text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
#include "caffe/layers/winograd_layer.hpp"
#include "caffe/util/winograd.hpp"
namespace caffe {
template <typename Dtype>
__global__ void winograd_input_im2col_gpu_kernel(
const int n,
const Dtype *data, Dtype *col_buff,
int height, int width,
int pad_h, int pad_w,
int ntiles_h, int ntiles_w,
int tile_h_in, int tile_w_in,
int tile_h_out, int tile_w_out,
int nchannels, int batch_size)
{
CUDA_KERNEL_LOOP(index, n) {
const int x = index%tile_w_in;
const int y = index/tile_w_in%tile_h_in;
const int tile_w = index/tile_w_in/tile_h_in%ntiles_w;
const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h;
const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h%nchannels;
const int image_idx = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h/nchannels;
int in_y = tile_h*tile_h_out + y - pad_h;
int in_x = tile_w*tile_w_out + x - pad_w;
if (in_y < 0 || in_x < 0 || in_y >= height || in_x >= width) {
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = 0;
}
else {
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x] = data[((image_idx*nchannels + c)*height + in_y)*width + in_x];
}
}
}
template <typename Dtype>
__global__ void winograd_output_col2im_gpu_kernel(
const int n,
const Dtype *col_buff, Dtype *data,
int output_h, int output_w,
int ntiles_h, int ntiles_w,
int tile_h_out, int tile_w_out,
int nchannels, int batch_size)
{
CUDA_KERNEL_LOOP(index, n) {
const int x = index%tile_w_out;
const int y = index/tile_w_out%tile_h_out;
const int tile_w = index/tile_w_out/tile_h_out%ntiles_w;
const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h;
const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h%nchannels;
const int image_idx = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h/nchannels;
int out_y = tile_h*tile_h_out + y;
int out_x = tile_w*tile_w_out + x;
if (out_y < output_h && out_x < output_w) {
data[((image_idx*nchannels + c)*output_h + out_y)*output_w + out_x] =
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x];
}
}
}
template <typename Dtype>
__global__ void winograd_output_im2col_gpu_kernel(
const int n,
const Dtype *data, Dtype *col_buff,
int output_h, int output_w,
int ntiles_h, int ntiles_w,
int tile_h_out, int tile_w_out,
int nchannels, int batch_size)
{
CUDA_KERNEL_LOOP(index, n) {
const int x = index%tile_w_out;
const int y = index/tile_w_out%tile_h_out;
const int tile_w = index/tile_w_out/tile_h_out%ntiles_w;
const int tile_h = index/tile_w_out/tile_h_out/ntiles_w%ntiles_h;
const int c = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h%nchannels;
const int image_idx = index/tile_w_out/tile_h_out/ntiles_w/ntiles_h/nchannels;
int out_y = tile_h*tile_h_out + y;
int out_x = tile_w*tile_w_out + x;
if (out_y < 0 || out_x < 0 || out_y >= output_h || out_x >= output_w) {
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] = 0;
}
else {
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_out + y)*tile_w_out + x] =
data[((image_idx*nchannels + c)*output_h + out_y)*output_w + out_x];
}
}
}
template <typename Dtype>
__global__ void winograd_input_col2im_gpu_kernel(
const int n,
const Dtype *col_buff, Dtype *data,
int height, int width,
int pad_h, int pad_w,
int ntiles_h, int ntiles_w,
int tile_h_in, int tile_w_in,
int tile_h_out, int tile_w_out,
int nchannels, int batch_size)
{
int m = batch_size*nchannels*height*width;
CUDA_KERNEL_LOOP(index, m) {
data[index] = 0;
}
CUDA_KERNEL_LOOP(index, n) {
const int x = index%tile_w_in;
const int y = index/tile_w_in%tile_h_in;
const int tile_w = index/tile_w_in/tile_h_in%ntiles_w;
const int tile_h = index/tile_w_in/tile_h_in/ntiles_w%ntiles_h;
const int c = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h%nchannels;
const int image_idx = index/tile_w_in/tile_h_in/ntiles_w/ntiles_h/nchannels;
int in_y = tile_h*tile_h_out + y - pad_h;
int in_x = tile_w*tile_w_out + x - pad_w;
if (in_y >= 0 && in_x >= 0 && in_y < height && in_x < width) {
data[((image_idx*nchannels + c)*height + in_y)*width + in_x] +=
col_buff[((((image_idx + c*batch_size)*ntiles_h + tile_h)*ntiles_w + tile_w)*tile_h_in + y)*tile_w_in + x];
}
}
}
template <>
void WinogradLayer<double>::Forward_gpu(const vector<Blob<double>*>& bottom,
const vector<Blob<double>*>& top) {
NOT_IMPLEMENTED;
}
//#define PROFILE_WINOGRAD
template <>
void WinogradLayer<float>::Forward_gpu(const vector<Blob<float>*>& bottom,
const vector<Blob<float>*>& top) {
int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1];
WinogradAKronA<float> *AKronA = WinogradAKronA<float>::getInstance(kernel_h);
WinogradBKronB<float> *BKronB = WinogradBKronB<float>::getInstance(kernel_h);
WinogradGKronG<float> *GKronG = WinogradGKronG<float>::getInstance(kernel_h);
const float* weight = this->blobs_[0]->gpu_data();
#ifdef PROFILE_WINOGRAD
CPUTimer timer;
#endif
for (int i = 0; i < bottom.size(); ++i) {
const float* bottom_data = bottom[i]->gpu_data();
float* top_data = top[i]->mutable_gpu_data();
int M = this->conv_in_channels_*ntiles_h_*ntiles_w_;
int num_kernels = this->conv_in_channels_*this->num_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_;
int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2];
int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1];
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
winograd_input_im2col_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, bottom_data, temp2_.mutable_gpu_data(),
height, width,
pad_h, pad_w,
ntiles_h_, ntiles_w_,
tile_h_in_, tile_w_in_,
tile_h_out_, tile_w_out_,
this->conv_in_channels_, this->num_);
CUDA_POST_KERNEL_CHECK;
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "winograd_input_im2col takes " << timer.MicroSeconds()/1e6;
#endif
// Transform input to Winograd domain
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
caffe_gpu_gemm<float>(CblasTrans, CblasTrans,
tile_h_in_*tile_w_in_, this->num_*M, tile_h_in_*tile_w_in_,
(float)1, BKronB->get()->gpu_data(), temp2_.mutable_gpu_data(),
(float)0, temp1_.mutable_gpu_data());
// temp1_ has (tile_h_in*tile_w_in) x conv_in_channels x num_ x (ntiles_h*ntiles_w) dimension
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Transformation of bottom takes " << timer.MicroSeconds()/1e6;
#endif
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
// Convolution in Winograd domain
{
float alpha = 1, beta = 0;
int M = this->conv_out_channels_/this->group_;
int N = this->num_*ntiles_h_*ntiles_w_;
int K = this->conv_in_channels_/this->group_;
if (!weight_ptrs_initialized_) {
float **weight_ptrs = (float **)weight_ptrs_->mutable_cpu_data();
for (int j = 0; j < tile_h_in_*tile_w_in_*this->group_; ++j) {
weight_ptrs[j] =
this->blobs_[0]->mutable_gpu_data() +
j*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_);
}
weight_ptrs_initialized_ = true;
}
CUBLAS_CHECK(cublasSgemmBatched(
Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K,
&alpha,
(const float **)in_activation_ptrs_->gpu_data(), N,
(const float **)weight_ptrs_->gpu_data(), K,
&beta,
(float **)out_activation_ptrs_->mutable_gpu_data(), N,
in_activation_ptrs_->count()));
}
// col_buff has (tile_h_in*tile_w_in) x conv_out_channels x num_ x (ntiles_h*ntiles_w)
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Convolution takes " << timer.MicroSeconds()/1e6;
#endif
// Transform back to time domain
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
caffe_gpu_gemm<float>(CblasTrans, CblasNoTrans,
this->conv_out_channels_*this->num_*ntiles_h_*ntiles_w_, tile_h_out_*tile_w_out_, tile_h_in_*tile_w_in_,
(float)1, temp2_.gpu_data(), AKronA->get()->gpu_data(),
(float)0, temp1_.mutable_gpu_data());
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Inverse transformation of top takes " << timer.MicroSeconds()/1e6;
#endif
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
num_kernels = this->conv_out_channels_*this->num_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_;
const int output_h = this->output_shape_[0], output_w = this->output_shape_[1];
winograd_output_col2im_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels,
temp1_.gpu_data(), top_data,
output_h, output_w,
ntiles_h_, ntiles_w_,
tile_h_out_, tile_w_out_,
this->conv_out_channels_, this->num_);
CUDA_POST_KERNEL_CHECK;
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "winograd_output_col2im takes " << timer.MicroSeconds()/1e6;
#endif
for (int n = 0; n < this->num_; ++n) { // JSP: this->num_ is batch size
if (this->bias_term_) {
const float* bias = this->blobs_[1]->gpu_data();
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
template <>
void WinogradLayer<double>::Backward_gpu(const vector<Blob<double>*>& top,
const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom) {
NOT_IMPLEMENTED;
}
template <>
void WinogradLayer<float>::Backward_gpu(const vector<Blob<float>*>& top,
const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom) {
int kernel_h = this->kernel_shape_.cpu_data()[0], kernel_w = this->kernel_shape_.cpu_data()[1];
WinogradAKronA<float> *AKronA = WinogradAKronA<float>::getInstance(kernel_h);
WinogradBKronB<float> *BKronB = WinogradBKronB<float>::getInstance(kernel_h);
WinogradGKronG<float> *GKronG = WinogradGKronG<float>::getInstance(kernel_h);
const float* weight = this->blobs_[0]->gpu_data();
float* weight_diff = this->blobs_[0]->mutable_gpu_diff();
/*const float *weight_cpu = this->blobs_[0]->cpu_data();
fprintf(stderr, "weight_winograd\n");
for (int j = 0; j < tile_h_in_*tile_w_in_; ++j) {
for (int n = 0; n < this->conv_out_channels_; ++n) {
for (int c = 0; c < this->conv_in_channels_; ++c) {
fprintf(stderr, "%g ", weight_cpu[(j*this->conv_out_channels_ + n)*this->conv_in_channels_ + c]);
}
}
fprintf(stderr, "\n");
}*/
#ifdef PROFILE_WINOGRAD
CPUTimer timer;
#endif
for (int i = 0; i < top.size(); ++i) {
const float* top_diff = top[i]->gpu_diff();
const float* bottom_data = bottom[i]->gpu_data();
float* bottom_diff = bottom[i]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
float* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
int M = this->conv_out_channels_*ntiles_h_*ntiles_w_;
int num_kernels = this->num_*this->conv_out_channels_*ntiles_h_*ntiles_w_*tile_h_out_*tile_w_out_;
const int output_h = this->output_shape_[0], output_w = this->output_shape_[1];
const int height = this->conv_input_shape_.cpu_data()[1], width = this->conv_input_shape_.cpu_data()[2];
const int pad_h = this->pad_.cpu_data()[0], pad_w = this->pad_.cpu_data()[1];
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
winograd_output_im2col_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels,
top_diff, temp1_.mutable_gpu_data(),
output_h, output_w,
ntiles_h_, ntiles_w_,
tile_h_out_, tile_w_out_,
this->conv_out_channels_, this->num_);
CUDA_POST_KERNEL_CHECK;
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "winograd_output_im2col takes " << timer.MicroSeconds()/1e6;
#endif
// Transform out_diff to Winograd domain
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
caffe_gpu_gemm<float>(CblasNoTrans, CblasTrans,
tile_h_in_*tile_w_in_, this->num_*M, tile_h_out_*tile_w_out_,
(float)1, AKronA->get()->gpu_data(), temp1_.mutable_gpu_data(),
(float)0, temp2_.mutable_gpu_data());
// temp2_ has (tile_h_in*tile_w_in) x conv_out_channels x num_ x (ntiles_h*ntiles_w) dimension
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Transformation of top_diff takes " << timer.MicroSeconds()/1e6;
#endif
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
int num_kernels = this->conv_in_channels_*this->num_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_;
winograd_input_im2col_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, bottom_data, this->col_buffer_.mutable_gpu_data(),
height, width,
pad_h, pad_w,
ntiles_h_, ntiles_w_,
tile_h_in_, tile_w_in_,
tile_h_out_, tile_w_out_,
this->conv_in_channels_, this->num_);
CUDA_POST_KERNEL_CHECK;
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "winograd_input_im2col takes " << timer.MicroSeconds()/1e6;
#endif
// Transform input to Winograd domain
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
caffe_gpu_gemm<float>(CblasTrans, CblasTrans,
tile_h_in_*tile_w_in_, this->conv_in_channels_*this->num_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_,
(float)1, BKronB->get()->gpu_data(), this->col_buffer_.mutable_gpu_data(),
(float)0, temp1_.mutable_gpu_data());
// temp1_ has (tile_h_in*tile_w_in) x conv_in_channels x num_ x (ntiles_h*ntiles_w) dimension
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Transformation of bottom takes " << timer.MicroSeconds()/1e6;
#endif
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
if (!weight_diff_ptrs_initialized_) {
float **weight_diff_ptrs = (float **)weight_diff_ptrs_->mutable_cpu_data();
for (int j = 0; j < tile_h_in_*tile_w_in_*this->group_; ++j) {
weight_diff_ptrs[j] =
this->blobs_[0]->mutable_gpu_diff() +
j*(this->conv_out_channels_/this->group_)*(this->conv_in_channels_/this->group_);
}
weight_diff_ptrs_initialized_ = true;
}
float alpha = 1, beta = 1;
int M = this->conv_out_channels_/this->group_;
int N = this->conv_in_channels_/this->group_;
int K = this->num_*ntiles_h_*ntiles_w_;
CUBLAS_CHECK(cublasSgemmBatched(
Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N,
N, M, K,
&alpha,
(const float **)in_activation_ptrs_->gpu_data(), K,
(const float **)out_activation_ptrs_->gpu_data(), K,
&beta,
(float **)weight_diff_ptrs_->mutable_gpu_data(), N,
tile_h_in_*tile_w_in_*this->group_));
// weight_diff has (tile_h_in*tile_w_in) x (conv_out_channels) x (conv_in_channels/group) dimension
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Convolution for weight gradient takes " << timer.MicroSeconds()/1e6;
#endif
} // param_propagate_down_[0]
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
// Convolution in Winograd domain
float alpha = 1, beta = 0;
int M = this->conv_in_channels_/this->group_;
int N = this->num_*ntiles_h_*ntiles_w_;
int K = this->conv_out_channels_/this->group_;
CUBLAS_CHECK(cublasSgemmBatched(
Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T,
N, M, K,
&alpha,
(const float **)out_activation_ptrs_->gpu_data(), N,
(const float **)weight_ptrs_->gpu_data(), M,
&beta,
(float **)in_activation_ptrs_->mutable_gpu_data(), N,
in_activation_ptrs_->count()));
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Convolution for bottom gradient takes " << timer.MicroSeconds()/1e6;
#endif
// Transform back to time domain
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
caffe_gpu_gemm<float>(CblasTrans, CblasTrans,
this->conv_in_channels_*this->num_*ntiles_h_*ntiles_w_, tile_h_in_*tile_w_in_, tile_h_in_*tile_w_in_,
(float)1, temp1_.mutable_gpu_data(), BKronB->get()->gpu_data(),
(float)0, this->col_buffer_.mutable_gpu_data());
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "Inverse transformation of bottom_diff takes " << timer.MicroSeconds()/1e6;
#endif
#ifdef PROFILE_WINOGRAD
timer.Start();
#endif
num_kernels = this->conv_in_channels_*this->num_*ntiles_h_*ntiles_w_*tile_h_in_*tile_w_in_;
winograd_input_col2im_gpu_kernel<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels,
this->col_buffer_.gpu_data(), bottom_diff,
height, width,
pad_h, pad_w,
ntiles_h_, ntiles_w_,
tile_h_in_, tile_w_in_,
tile_h_out_, tile_w_out_,
this->conv_in_channels_, this->num_);
#ifdef PROFILE_WINOGRAD
LOG(INFO) << "winograd_input_col2im takes " << timer.MicroSeconds()/1e6;
#endif
} // propagate_down_[i]
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WinogradLayer);
} // namespace caffe
|
the_stack
|
#include<ops/declarable/helpers/batchnorm.h>
#include <helpers/ShapeUtils.h>
#include <helpers/OmpLaunchHelper.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
// template<typename T>
// __global__ static void batchnormCuda(const void* vx, const Nd4jLong* xShapeInfo,
// const void* vMean, const Nd4jLong* meanShapeInfo,
// const void* vVariance, const Nd4jLong* varianceShapeInfo,
// const void* vGamma, const Nd4jLong* gammaShapeInfo,
// const void* vBeta, const Nd4jLong* betaShapeInfo,
// void* vz, const Nd4jLong* zShapeInfo,
// const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
// const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
// const T epsilon) {
// const auto x = reinterpret_cast<const T*>(vx);
// auto z = reinterpret_cast<T*>(vz);
// const auto mean = reinterpret_cast<const T*>(vMean);
// const auto variance = reinterpret_cast<const T*>(vVariance);
// const auto gamma = reinterpret_cast<const T*>(vGamma);
// const auto beta = reinterpret_cast<const T*>(vBeta);
// // maxRank = xRank = zRank, minRank = meanRank = varianceRank = gammaRank = betaRank
// __shared__ Nd4jLong minLen, tadLen, totalThreads;
// if (threadIdx.x == 0) {
// totalThreads = gridDim.x * blockDim.x;
// minLen = shape::length(meanShapeInfo);
// tadLen = shape::length(xShapeInfo) / minLen;
// }
// __syncthreads();
// const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// for (uint i = tid; i < minLen; i += totalThreads) {
// const auto meanOffset = shape::getIndexOffset(i, meanShapeInfo);
// const auto varianceOffset = shape::getIndexOffset(i, varianceShapeInfo);
// T sigmaInvGam = 1. / sd::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
// if(gamma != nullptr)
// sigmaInvGam *= gamma[shape::getIndexOffset(i, gammaShapeInfo)];
// auto betaOffset = 0;
// if(beta != nullptr)
// betaOffset = shape::getIndexOffset(i, betaShapeInfo);
// const auto xTad = x + xTadOffsets[i];
// auto zTad = z + zTadOffsets[i];
// for (uint j = 0; j < tadLen; ++j) {
// const auto xTadOffset = shape::getIndexOffset(j, xTadShapeInfo);
// const auto zTadOffset = shape::getIndexOffset(j, zTadShapeInfo);
// zTad[zTadOffset] = (xTad[xTadOffset] - mean[meanOffset]) * sigmaInvGam;
// if(beta != nullptr)
// zTad[zTadOffset] += beta[betaOffset];
// }
// }
// }
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchnormCuda2(const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const T epsilon) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto mean = reinterpret_cast<const T*>(vMean);
const auto variance = reinterpret_cast<const T*>(vVariance);
const auto gamma = reinterpret_cast<const T*>(vGamma);
const auto beta = reinterpret_cast<const T*>(vBeta);
__shared__ int xRank, minRank; // xRank == zRank, minRank = meanRank = varianceRank = gammaRank = betaRank
__shared__ Nd4jLong xLen, totalThreads; // xLen = zLen
if (threadIdx.x == 0) {
totalThreads = gridDim.x * blockDim.x;
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
minRank = shape::rank(meanShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < xLen; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(minRank == xRank) {
for (uint i = 0, j = 0; i < xRank; ++i) {
if(j < numDims && i != dims[j])
coords[i] = 0;
else
++j;
}
}
else // minRank = numDims = 1 in this case
coords[0] = coords[dims[0]];
const auto meanOffset = shape::getOffset(meanShapeInfo, coords);
const auto varianceOffset = shape::getOffset(varianceShapeInfo, coords);
T sigmaInvGam = 1. / sd::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
if(gamma != nullptr) {
const auto gammaOffset = shape::getOffset(gammaShapeInfo, coords);
sigmaInvGam *= gamma[gammaOffset];
}
z[zOffset] = (x[xOffset] - mean[meanOffset]) * sigmaInvGam;
if(beta != nullptr) {
const auto betaOffset = shape::getOffset(betaShapeInfo, coords);
z[zOffset] += beta[betaOffset];
}
}
}
///////////////////////////////////////////////////////////////////
// template<typename T>
// __host__ static void batchnormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
// const void* vx, const Nd4jLong* xShapeInfo,
// const void* vMean, const Nd4jLong* meanShapeInfo,
// const void* vVariance, const Nd4jLong* varianceShapeInfo,
// const void* vGamma, const Nd4jLong* gammaShapeInfo,
// const void* vBeta, const Nd4jLong* betaShapeInfo,
// void* vz, const Nd4jLong* zShapeInfo,
// const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
// const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
// const double epsilon) {
// batchnormCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, zTadShapeInfo, zTadOffsets, static_cast<T>(epsilon));
// }
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void batchnormCudaLauncher2(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const double epsilon) {
batchnormCuda2<T><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, numDims, dims, static_cast<T>(epsilon));
}
//////////////////////////////////////////////////////////////////////////
void batchnorm(const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output, const std::vector<int>& axes, const double epsilon) {
// std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), axes);
// auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimsToExclude);
// auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimsToExclude);
// const int threadsPerBlock = MAX_NUM_THREADS / 2;
// const int blocksPerGrid = (mean->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// PointersManager manager(input->getContext(), "batchnorm");
// NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), mean->specialBuffer(), mean->specialShapeInfo(), variance->specialBuffer(), variance->specialShapeInfo(), gamma ? gamma->specialBuffer() : nullptr, gamma ? gamma->specialShapeInfo() : nullptr, beta ? beta->specialBuffer() : nullptr, beta ? beta->specialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), epsilon), FLOAT_TYPES);
// NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
// manager.synchronize();
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (input->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(input->getContext(), "batchnorm");
const int* dims = reinterpret_cast<int*>(manager.replicatePointer(axes.data(), axes.size() * sizeof(int)));
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher2, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), mean->specialBuffer(), mean->specialShapeInfo(), variance->specialBuffer(), variance->specialShapeInfo(), gamma ? gamma->specialBuffer() : nullptr, gamma ? gamma->specialShapeInfo() : nullptr, beta ? beta->specialBuffer() : nullptr, beta ? beta->specialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), axes.size(), dims, epsilon), FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
manager.synchronize();
}
}
}
}
|
the_stack
|
//
// This file contains dimension reduction operation functions and
// kernels that work on both contiguous and non-contiguous tensor
// arguments of arbitrary (up to MAX_CUTORCH_DIMS) dimensioned
// arguments without copying or temporary storage.
//
#include <THC/THCTensorTypeUtils.cuh>
#include <THC/THCReduceApplyUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
// Threads per thread block
#define THC_NONCONTIG_REDUCE_BLOCK_SIZE 32 * 16
#define CHUNKPERBLOCK 256
template <typename IndexType>
__device__ __forceinline__ IndexType getReduceNoncontigDimSliceIndex() {
// Each thread handles one slice
return getLinearBlockId<IndexType>() * THC_NONCONTIG_REDUCE_BLOCK_SIZE + threadIdx.x;
}
// quick hack to enable two-stage use of reduceChunk
template <typename T>
struct SimpleCopyOp
{
__device__ __forceinline__ T operator()(volatile const T val) const volatile
{
return val;
}
};
__device__ __forceinline__ int lastpow2(int n)
{
int out = 1 << (31 - __clz(n));
if(n == out)
out >>= 1;
return out;
}
template
<typename T,
typename U,
typename IndexType,
typename AccT,
typename ModifyOp,
typename ReduceOp,
typename FinalizeOp>
__device__ __forceinline__ void reduceChunk
(T* out,
U* in,
const int& inbounds,
const IndexType& reductionStride,
const IndexType& reductionSize,
const IndexType& inOffset,
const IndexType& outOffset,
const int& shmem_lim,
AccT init,
AccT* shmem,
ModifyOp modifyOp,
ReduceOp reduceOp,
FinalizeOp finalizeOp)
{
AccT load_reg[4];
AccT local_reg = init;
//Unroll this loop
//for(IndexType i=threadIdx.y; i<reductionSize; i+=blockDim.y){
// local_reg += in[inOffset + i*reductionStride];
//}
if(inbounds)
for(IndexType i = threadIdx.y; i < reductionSize; i += blockDim.y*4)
{
if (i + blockDim.y*3 < reductionSize)
{
const AccT val0 = scalar_cast<AccT>(in[inOffset + i*reductionStride]);
load_reg[0] = modifyOp(val0);
const AccT val1 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y)*reductionStride]);
load_reg[1] = modifyOp(val1);
const AccT val2 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y*2)*reductionStride]);
load_reg[2] = modifyOp(val2);
const AccT val3 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y*3)*reductionStride]);
load_reg[3] = modifyOp(val3);
local_reg = reduceOp(local_reg, load_reg[0]);
local_reg = reduceOp(local_reg, load_reg[1]);
local_reg = reduceOp(local_reg, load_reg[2]);
local_reg = reduceOp(local_reg, load_reg[3]);
}
else if (i + blockDim.y*2 < reductionSize)
{
const AccT val0 = scalar_cast<AccT>(in[inOffset + i*reductionStride]);
load_reg[0] = modifyOp(val0);
const AccT val1 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y)*reductionStride]);
load_reg[1] = modifyOp(val1);
const AccT val2 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y*2)*reductionStride]);
load_reg[2] = modifyOp(val2);
local_reg = reduceOp(local_reg, load_reg[0]);
local_reg = reduceOp(local_reg, load_reg[1]);
local_reg = reduceOp(local_reg, load_reg[2]);
}
else if (i + blockDim.y < reductionSize)
{
const AccT val0 = scalar_cast<AccT>(in[inOffset + i*reductionStride]);
load_reg[0] = modifyOp(val0);
const AccT val1 = scalar_cast<AccT>(in[inOffset + (i + blockDim.y)*reductionStride]);
load_reg[1] = modifyOp(val1);
local_reg = reduceOp(local_reg, load_reg[0]);
local_reg = reduceOp(local_reg, load_reg[1]);
}
else if (i < reductionSize)
{
const AccT val0 = scalar_cast<AccT>(in[inOffset + i*reductionStride]);
local_reg = reduceOp(local_reg, modifyOp(val0));
}
}
*shmem = local_reg;
for(int i = lastpow2(shmem_lim); i > 0; i >>= 1)
{
__syncthreads();
if(threadIdx.y < i && threadIdx.y + i < shmem_lim)
*shmem = reduceOp(*shmem, *(shmem + i*blockDim.x));
}
if(threadIdx.y == 0 && inbounds) {
T &&o_ele = static_cast<T>(finalizeOp(*shmem));
out[outOffset] = o_ele;
}
}
// Kernel that handles an entire reduction of a slice of a tensor per each thread
template
<typename T,
typename IndexType,
typename AccT,
typename ModifyOp,
typename ReduceOp,
typename FinalizeOp,
int ADims, int BDims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(512, 4)
#endif
__global__ void kernelReduceNoncontigDim_shared
(TensorInfo<T, IndexType> out,
TensorInfo<T, IndexType> in,
IndexType reductionStride,
IndexType reductionSize,
IndexType totalSlices,
AccT init,
ModifyOp modifyOp,
ReduceOp reduceOp,
FinalizeOp finalizeOp,
volatile AccT* stagingData,
int* semaphores)
{
IndexType sliceIndex = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int isLastBlockDone;
__shared__ AccT local_reduce[THC_NONCONTIG_REDUCE_BLOCK_SIZE];
AccT* shmem = &local_reduce[threadIdx.x + threadIdx.y*blockDim.x];
// This kernel is intended for the latency-bound case, so we want to launch enough blocks
// to cover the entire output. This means we don't need grid-stride loops.
const IndexType outOffset =
IndexToOffset<T, IndexType, ADims>::get(sliceIndex, out);
const IndexType inOffset =
IndexToOffset<T, IndexType, BDims>::get(sliceIndex, in);
const int inbounds = (sliceIndex < totalSlices);
if(gridDim.y == 1)
reduceChunk
(out.data,
in.data,
inbounds,
reductionStride,
reductionSize,
inOffset,
outOffset,
reductionSize < blockDim.y ? reductionSize : blockDim.y,
init,
shmem,
modifyOp,
reduceOp,
finalizeOp);
else
{
int* semaphore = semaphores + blockIdx.x;
const IndexType chunkStart = blockIdx.y*CHUNKPERBLOCK;
const IndexType chunkSize = reductionSize - chunkStart < CHUNKPERBLOCK ?
reductionSize - chunkStart : CHUNKPERBLOCK;
const IndexType reductionStrideStaging = totalSlices;
const IndexType stagingOffset = sliceIndex;
reduceChunk
(stagingData,
in.data,
inbounds,
reductionStride,
chunkSize,
inOffset + chunkStart*reductionStride,
stagingOffset + blockIdx.y*reductionStrideStaging,
chunkSize < blockDim.y ? chunkSize : blockDim.y,
init,
shmem,
modifyOp,
reduceOp,
SimpleCopyOp<AccT>());
__threadfence(); // make sure writes are globally visible
__syncthreads(); // if multiple warps in this block wrote to staging, make sure they're all done
if(threadIdx.x == 0 && threadIdx.y == 0)
{
int old = atomicAdd(semaphore, 1);
isLastBlockDone = (old == gridDim.y - 1);
}
__syncthreads();
// The staging area contains gridDim.y elements along each slice. The final reduction
// begins by treating the first blockDim.y elements as "init" values.
if(isLastBlockDone)
{
if(threadIdx.y < gridDim.y)
init = stagingData[stagingOffset + threadIdx.y*reductionStrideStaging];
IndexType remaining = gridDim.y < blockDim.y ? 0 : gridDim.y - blockDim.y;
reduceChunk
(out.data,
stagingData,
inbounds,
reductionStrideStaging,
remaining, // if 0, loop in reduceChunk is skipped, otherwise...
stagingOffset + blockDim.y*reductionStrideStaging, // ...loop begins at blockDim+1th element
outOffset,
gridDim.y < blockDim.y ? gridDim.y : blockDim.y,
init,
shmem,
SimpleCopyOp<AccT>(),
reduceOp,
finalizeOp);
}
}
}
// Kernel that handles an entire reduction of a slice of a tensor per each thread
template <typename T,
typename IndexType,
typename AccT,
typename ModifyOp,
typename ReduceOp,
typename FinalizeOp,
int ADims, int BDims>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(512, 4)
#endif
__global__ void
kernelReduceNoncontigDim(TensorInfo<T, IndexType> out,
TensorInfo<T, IndexType> in,
IndexType reductionStride,
IndexType reductionSize,
IndexType totalSlices,
AccT init,
ModifyOp modifyOp,
ReduceOp reduceOp,
FinalizeOp finalizeOp) {
const IndexType sliceIndex = getReduceNoncontigDimSliceIndex<IndexType>();
if (sliceIndex >= totalSlices) {
return;
}
// Each thread picks a point in `out` and `in` for which it is
// producing the reduction
const IndexType outOffset =
IndexToOffset<T, IndexType, ADims>::get(sliceIndex, out);
const IndexType inBaseOffset =
IndexToOffset<T, IndexType, BDims>::get(sliceIndex, in);
// For each point in reductionSize, reduce into `r`
IndexType inOffset = inBaseOffset;
AccT r = init;
for (IndexType i = 0; i < reductionSize; ++i) {
const AccT val = scalar_cast<AccT>(in.data[inOffset]);
r = reduceOp(r, modifyOp(val));
inOffset += reductionStride;
}
// Write out reduced value
out.data[outOffset] = scalar_cast<T>(finalizeOp(r));
}
template <typename IndexType>
__device__ __forceinline__ IndexType getReduceContigDimSliceIndex() {
// Each block handles one slice
return getLinearBlockId<IndexType>();
}
// Kernel that handles an entire reduction of a slice of a tensor per
// each block
template <typename T,
typename IndexType,
typename AccT,
typename ModifyOp,
typename ReduceOp,
typename FinalizeOp,
int ADims, int BDims>
__global__ void
kernelReduceContigDim(TensorInfo<T, IndexType> out,
TensorInfo<T, IndexType> in,
IndexType reductionSize,
IndexType totalSlices,
AccT init,
ModifyOp modifyOp,
ReduceOp reduceOp,
FinalizeOp finalizeOp) {
const IndexType sliceIndex = getReduceContigDimSliceIndex<IndexType>();
if (sliceIndex >= totalSlices) {
return;
}
// Get the offset in `out` for the reduction
const IndexType outOffset =
IndexToOffset<T, IndexType, ADims>::get(sliceIndex, out);
// Get the base offset in `in` for this block's reduction
const IndexType inBaseOffset =
IndexToOffset<T, IndexType, BDims>::get(sliceIndex, in);
// Each thread in the block will reduce some subset of elements in
// the slice. The elements are guaranteed contiguous starting at
// `inBaseOffset`.
AccT r = init;
for (IndexType i = threadIdx.x; i < reductionSize; i += blockDim.x) {
const AccT val = scalar_cast<AccT>(in.data[inBaseOffset + i]);
r = reduceOp(r, modifyOp(val));
}
// Reduce within the block
// FIXME: extern name
extern __shared__ char smemChar[];
AccT* smem = (AccT*) smemChar;
r = reduceBlock<AccT, ReduceOp>(smem, blockDim.x, r, reduceOp, init);
if (threadIdx.x == 0) {
// Write out reduced value
out.data[outOffset] = scalar_cast<T>(finalizeOp(r));
}
}
inline dim3 getNoncontigReduceBlock() {
return dim3(THC_NONCONTIG_REDUCE_BLOCK_SIZE);
}
inline dim3 getContigReduceBlock(ptrdiff_t numSlices, int64_t reductionSize) {
// If the number of slices is low but the reduction dimension size
// is high, then we should increase block size for greater parallelism.
// Aim for at least 32 warps per SM (assume 15 SMs; don't bother
// inquiring the real number for now).
int maxWarps = 4; // better occupancy if many blocks are around
// For numSlices > 15 * 8, there are > 32 warps active per SM.
if (numSlices < 15 * 8) {
maxWarps = 8;
if (numSlices < 15 * 4) {
maxWarps = 16;
if (numSlices < 15 * 2) {
maxWarps = 32;
}
}
}
// Scale up block size based on the reduction dimension size
int64_t warpsInReductionSize = THCCeilDiv(reductionSize, (int64_t) 32);
int numWarps = warpsInReductionSize > (int64_t) maxWarps ?
maxWarps : (int) warpsInReductionSize;
return dim3(numWarps * 32);
}
inline bool getNoncontigReduceGrid(ptrdiff_t elements, dim3& grid) {
// One output point per thread
return THC_getGridFromTiles(THCCeilDiv(elements,
(ptrdiff_t) THC_NONCONTIG_REDUCE_BLOCK_SIZE), grid);
}
inline bool getContigReduceGrid(ptrdiff_t elements, dim3& grid) {
// One output point per block
return THC_getGridFromTiles(elements, grid);
}
// Performs a reduction out[..., 0, ...] = reduce_i(modify(in[..., i, ...])) for
// all in where i and the out's 0 are indexed at dimension `dim`
template <typename ScalarType,
typename TensorType,
typename ModifyOp,
typename ReduceOp,
typename FinalizeOp,
typename AccT>
bool THC_reduceDim(THCState* state,
TensorType* out,
TensorType* in,
const ModifyOp modifyOp,
const ReduceOp reduceOp,
const FinalizeOp finalizeOp,
AccT init,
int dim,
int keepdim) {
ptrdiff_t inElements = THCTensor_nElement(state, in);
int64_t reductionSize = THTensor_sizeLegacyNoScalars(in, dim);
int64_t reductionStride = THTensor_strideLegacyNoScalars(in, dim);
ptrdiff_t outElements = inElements / reductionSize;
if (THCTensor_nDimensionLegacyAll(state, out) > MAX_CUTORCH_DIMS ||
THCTensor_nDimensionLegacyAll(state, in) > MAX_CUTORCH_DIMS) {
return false;
}
if (THCTensor_nDimensionLegacyAll(state, in) == 0) {
// Zero-dim tensor; do nothing
return true;
}
// Is the reduction dimension contiguous? If so, then we can use a
// shared memory reduction kernel to increase performance.
bool contigReduction = (reductionStride == 1);
dim3 block;
dim3 grid;
int smemSize = 0; // contiguous reduction uses smem
if (contigReduction) {
if (!getContigReduceGrid(outElements, grid)) {
return false;
}
block = getContigReduceBlock(outElements, reductionSize);
smemSize = sizeof(AccT) * block.x;
} else {
if (!getNoncontigReduceGrid(outElements, grid)) {
return false;
}
block = getNoncontigReduceBlock();
if(outElements <= 4096)
{
// gridDim.x and blockDim.x parallelize work across slices.
// blockDim.y enables some intra-block reduction within slices.
// gridDim.y enables inter-block reduction within slices.
// Each block covers 32 output elements.
int blockdimx = 32;
int griddimx = THCCeilDiv((int64_t)outElements, (int64_t)blockdimx);
// Each warp reduces at most 4 slices. This heuristic can be tuned,
// but locking blockdimy to 16 is robust and reasonably performant.
int blockdimy = 16;
int griddimy = 1;
bool coop = false;
// Rough heuristics to decide if using cooperating blocks is worthwhile
if( outElements <= 32 && reductionSize >= 4096) coop = true;
if( 32 < outElements && outElements <= 64 && reductionSize >= 4096) coop = true;
if( 64 < outElements && outElements <= 128 && reductionSize >= 4096) coop = true;
if( 128 < outElements && outElements <= 256 && reductionSize >= 4096) coop = true;
if( 256 < outElements && outElements <= 512 && reductionSize >= 4096) coop = true;
if( 512 < outElements && outElements <= 1024 && reductionSize >= 4096) coop = true;
if(1024 < outElements && outElements <= 2048 && reductionSize >= 2048) coop = true;
if(2048 < outElements && outElements <= 4096 && reductionSize >= 2048) coop = true;
// Each block reduces at most CHUNKPERBLOCK (currently 256) slices.
if(coop)
griddimy = THCCeilDiv((int64_t)reductionSize, (int64_t)CHUNKPERBLOCK);
grid = dim3(griddimx, griddimy, 1);
block = dim3(blockdimx, blockdimy, 1);
}
}
// Resize out to correspond to the reduced size with keepdim=True.
// Preserve noncontiguities by unsqueezing out if necessary
THCTensor_preserveReduceDimSemantics(
state, out, THCTensor_nDimensionLegacyAll(state, in), dim, keepdim);
// Resize out
std::vector<int64_t> sizes = THTensor_sizesLegacyNoScalars(in);
sizes[dim] = 1;
THCTensor_resize(state, out, sizes, {});
// It is possible that the tensor dimensions are able to be collapsed,
// and thus we can reduce the actual code complexity of the copy by
// exploiting this knowledge statically, since the div/mod is the
// most expensive part of the operation, more so than memory accesses.
// For instance, when copying a non-contiguous to a contiguous tensor
// (or vice versa), the contiguous tensor can be collapsed to one
// dimension, and the loop to translate the linear index to the array
// index can be similarly collapsed. That is what this unrolling is for.
#define HANDLE_CASE(TYPE, OUT, IN) \
if (contigReduction) { \
kernelReduceContigDim<ScalarType, \
TYPE, AccT, ModifyOp, ReduceOp, FinalizeOp, \
OUT, IN> \
<<<grid, block, smemSize, c10::cuda::getCurrentCUDAStream()>>> \
(outInfo, inInfo, reductionSize, \
(TYPE) outElements, init, modifyOp, reduceOp, finalizeOp); \
} else { \
if(block.y == 1){ \
kernelReduceNoncontigDim< \
ScalarType, \
TYPE, AccT, ModifyOp, ReduceOp, FinalizeOp, \
OUT, IN> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>> \
(outInfo, inInfo, reductionStride, reductionSize, \
(TYPE) outElements, init, modifyOp, reduceOp, finalizeOp); \
} \
else \
{ \
void* stagingData = nullptr; \
void* semaphores = nullptr; \
\
if(grid.y > 1) \
{ \
stagingData = THCudaMalloc(state, sizeof(AccT)*outElements*grid.y);\
semaphores = THCudaMalloc(state, sizeof(int)*grid.x); \
THCudaCheck(cudaMemsetAsync \
(semaphores, \
0, \
sizeof(int)*grid.x, \
c10::cuda::getCurrentCUDAStream())); \
} \
\
kernelReduceNoncontigDim_shared \
<ScalarType, TYPE, AccT, ModifyOp, ReduceOp, FinalizeOp, OUT, IN> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>> \
(outInfo, \
inInfo, \
reductionStride, \
reductionSize, \
(TYPE) outElements, \
init, \
modifyOp, \
reduceOp, \
finalizeOp, \
(volatile AccT*)stagingData, \
(int*)semaphores); \
\
if(grid.y > 1) \
{ \
THCudaFree(state, stagingData); \
THCudaFree(state, semaphores); \
} \
} \
}
#define HANDLE_IN_CASE(TYPE, OUT, IN) \
{ \
switch (IN) { \
case 1: \
HANDLE_CASE(TYPE, OUT, 1); \
break; \
case 2: \
HANDLE_CASE(TYPE, OUT, 2); \
break; \
default: \
HANDLE_CASE(TYPE, OUT, -1); \
break; \
} \
}
#define HANDLE_OUT_CASE(TYPE, OUT, IN) \
{ \
switch (OUT) { \
case 1: \
HANDLE_IN_CASE(TYPE, 1, IN); \
break; \
case 2: \
HANDLE_IN_CASE(TYPE, 2, IN); \
break; \
default: \
HANDLE_IN_CASE(TYPE, -1, IN); \
break; \
} \
}
if(THCTensor_canUse32BitIndexMath(state, out) &&
THCTensor_canUse32BitIndexMath(state, in))
{
TensorInfo<ScalarType,
unsigned int> outInfo =
getTensorInfo<ScalarType, TensorType, unsigned int>(state, out);
outInfo.collapseDims();
TensorInfo<ScalarType,
unsigned int> inInfo =
getTensorInfo<ScalarType, TensorType, unsigned int>(state, in);
inInfo.reduceDim(dim);
inInfo.collapseDims();
HANDLE_OUT_CASE(unsigned int, outInfo.dims, inInfo.dims);
}
else
{
TensorInfo<ScalarType,
uint64_t> outInfo =
getTensorInfo<ScalarType, TensorType, uint64_t>(state, out);
outInfo.collapseDims();
TensorInfo<ScalarType,
uint64_t> inInfo =
getTensorInfo<ScalarType, TensorType, uint64_t>(state, in);
inInfo.reduceDim(dim);
inInfo.collapseDims();
/*
Only instantiates the all 1D special case and the fallback all nD case for
large (64-bit indexed) tensors to reduce compilation time.
*/
if (outInfo.dims == 1 && inInfo.dims == 1) {
HANDLE_CASE(uint64_t, 1, 1);
} else {
HANDLE_CASE(uint64_t, -1, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_IN_CASE
#undef HANDLE_OUT_CASE
if (!keepdim) {
THCTensor_squeeze1d(state, out, out, dim);
}
return true;
}
#undef THC_NONCONTIG_REDUCE_BLOCK_SIZE
#undef CHUNKPERBLOCK
#endif // THC_REDUCE_INC
|
the_stack
|
// add_QKV_bias kernel code modified from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_attention.cu#L1342-L1395
template<typename T>
__global__
void add_QKV_bias_opt(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x ) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template<>
__global__
void add_QKV_bias_opt<half>( half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_opt_kernel( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream){
int qkv_types = 3;
int m = batch_size * seq_len;
int k = head_num * size_per_head;
//assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
//TODO - int8
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
add_QKV_bias_opt<<<grid, block, 0, stream>>>((float*)Q, (float*)bias_Q, (float*)K, (float*)bias_K, (float*)V, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
add_QKV_bias_opt<<<grid, block, 0, stream>>>((half*)Q, (half*)bias_Q, (half*)K, (half*)bias_K, (half*)V, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void add_QKV_bias_opt_kernel<float>( void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
template void add_QKV_bias_opt_kernel<half>(void* Q, const void* bias_Q, void* K, const void* bias_K, void* V, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
template<typename T>
__global__
void fused_add_QKV_bias(T* QKV, const T* bias_Q, const T* bias_K, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int batch_id = (blockIdx.x % m) / seq_len;
int word_start_id = (blockIdx.x ) % seq_len;
int qkv_id = blockIdx.x / m ;
int row_offset = (blockIdx.x % m) * n;
int bid_offset = 2 * n * seq_len;
if(qkv_id == 0)
{
data_ptr = QKV + row_offset + batch_id * bid_offset + 2 * word_start_id * n;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = QKV + row_offset + batch_id * bid_offset + 2 * word_start_id * n + n;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = QKV + row_offset + batch_id * bid_offset + 2 * word_start_id * n + 2 * n;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int head_id = (threadIdx.x + blockIdx.y * blockDim.x) / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
T bias = __ldg(&bias_ptr[threadIdx.x + blockDim.x * blockIdx.y]);
for(int i = word_start_id; i < word_start_id + 1; ++i)
{
T tmp = data_ptr[threadIdx.x + blockDim.x * blockIdx.y] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
__global__
void fused_add_QKV_bias( half* QKV, const half* bias_Q, const half* bias_K, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * (size_per_head * head_num) + threadIdx.x + blockDim.x * blockIdx.y;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x + blockDim.x * blockIdx.y;
int q_tid = tid + 2 * batch_id * size_per_head * head_num * seq_len + 2 * seq_id * size_per_head * head_num;
int k_tid = tid + 2 * batch_id * size_per_head * head_num * seq_len + size_per_head * head_num + 2 * seq_id * size_per_head * head_num;
int v_tid = tid + 2 * batch_id * size_per_head * head_num * seq_len + 2 * size_per_head * head_num + 2 * seq_id * size_per_head * head_num;
half2* src_ptr = (half2*)QKV;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[q_tid], __ldg(&bias_ptr[bias_id]));
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[k_tid], __ldg(&bias_ptr[bias_id]));
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[v_tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void fused_add_QKV_bias_kernel( void* QKV, const void* bias_Q, const void* bias_K, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream){
int m = batch_size * seq_len;
int k = head_num * size_per_head;
int qkv_types = 3;
//assert(m * qkv_types <= 65536 && "batch_size * seq_len must <= 65536");
int fold_coeff = 1;
dim3 grid;
dim3 block;
if (sizeof(T) == sizeof(float)){
if (k <= 1024){
fold_coeff = 1;
}else if( k <= 2048){
fold_coeff = 2;
}else if(k <= 4096){
fold_coeff = 4;
}else if(k <= 8192){
fold_coeff = 8;
}else if(k <= 16384){
fold_coeff = 16;
}
grid.x = m * qkv_types;
grid.y = fold_coeff;
block.x = k / fold_coeff;
fused_add_QKV_bias<<<grid, block, 0, stream>>>((float*)QKV, (float*)bias_Q, (float*)bias_K, (float*)bias_V, (float*)q_buf_, (float*)k_buf_,
(float*)v_buf_, batch_size, seq_len, head_num, size_per_head);
}else{
if (k <= 2048){
fold_coeff = 2;
}else if( k <= 4096){
fold_coeff = 2;
}else if(k <= 8192){
fold_coeff = 4;
}else if(k <= 16384){
fold_coeff = 8;
}else if(k <= 16384 * 2){
fold_coeff = 16;
}
grid.x = m;
grid.y = fold_coeff;
block.x = k / (2 * fold_coeff);
fused_add_QKV_bias<<<grid, block, 0, stream>>>((half*)QKV, (half*)bias_Q, (half*)bias_K, (half*)bias_V, (half*)q_buf_, (half*)k_buf_,
(half*)v_buf_, batch_size, seq_len, head_num, size_per_head / 2);
}
}
template void fused_add_QKV_bias_kernel<float>( void* QKV, const void* bias_Q, const void* bias_K, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
template void fused_add_QKV_bias_kernel<half>( void* QKV, const void* bias_Q, const void* bias_K, const void* bias_V, void* q_buf_, void* k_buf_, void* v_buf_,
const int& batch_size, const int& seq_len, const int& head_num, const int& size_per_head, const cudaStream_t stream);
|
the_stack
|
// Pseudo-random number generator
namespace amgx
{
__host__ __device__ unsigned int hash(unsigned int a, unsigned int seed)
{
a ^= seed;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
struct is_less_than_zero
{
__host__ __device__
bool operator()(int x)
{
return x < 0;
}
};
// ---------------------------
// Kernels
// ---------------------------
template <typename IndexType, int num_hash>
__global__
void colorRowsMultiHashKernel_1step(const IndexType *A_offsets, const IndexType *A_column_indices, IndexType *row_colors, const int num_rows, const int next_color)
{
int hash_j;
int hash_i;
int my_colors[num_hash];
int my_row_color;
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_rows; i += blockDim.x * gridDim.x)
{
int num_possible_colors = 0;
bool max_i, min_i;
int row_start = A_offsets[i];
int row_end = A_offsets[i + 1];
//#pragma unroll //can't unroll because it is not the innermost loop
for (int k = 0; k < num_hash; k++)
{
max_i = true;
min_i = true;
for (int r = row_start; r < row_end; r++)
{
int j = A_column_indices[r];
if (j >= num_rows) { continue; }
hash_j = hash(j, k);
hash_i = hash(i, k);
// There is an uncolored neighbour that is greater
if ( hash_j > hash_i)
{
max_i = false;
}
// There is an uncolored neighbour that is smaller
if (hash_j < hash_i)
{
min_i = false;
}
}
// If not colored or colored but coin flip decides color should be changed
if (max_i)
{
my_colors[num_possible_colors++] = 2 * k;
}
else if (min_i)
{
my_colors[num_possible_colors++] = 2 * k + 1;
}
}
if (num_possible_colors)
{
int rand_pick = hash(i, 0) % num_possible_colors;
my_row_color = my_colors[rand_pick];
}
else
{
my_row_color = 2 * num_hash;
}
row_colors[i] = my_row_color;
}
}
template <typename IndexType, int num_hash>
__global__
void colorRowsMultiHashKernel(const IndexType *A_offsets, const IndexType *A_column_indices, IndexType *row_colors, const int num_rows, const int next_color, const int seed)
{
unsigned int i_rand[num_hash];
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_rows; i += blockDim.x * gridDim.x)
{
// skip if previously matched
if (row_colors[i] != -1) { continue; }
int not_min = 0, not_max = 0;
int t;
#pragma unroll
for (t = 0; t < num_hash; t++)
{
i_rand[t] = hash(i, seed + 1043 * t);
}
// have we been proved to be not min or max
int row_start = A_offsets[i];
int row_end = A_offsets[i + 1];
int possible_colors = 2 * num_hash;
for (int r = row_start; r < row_end; r++)
{
int j = A_column_indices[r];
// skip diagonal
if (j == i || j >= num_rows)
{
continue;
}
int j_color = row_colors[j];
if (j_color != -1 && j_color < next_color)
{
continue;
}
#pragma unroll
for (t = 0; t < num_hash; t++)
{
unsigned int j_rand = hash(j, seed + 1043 * t);
// bail if any neighbor is greater
if (i_rand[t] <= j_rand && !(not_max & (0x1 << t) ))
{
not_max |= (0x1 << t);
possible_colors--;
}
if (i_rand[t] >= j_rand && !(not_min & (0x1 << t) ))
{
not_min |= (0x1 << t);
possible_colors--;
}
}
if (possible_colors == 0)
{
break;
}
}
if (possible_colors == 0) { continue; }
// made it here, so possible_colors > 0
// pick one of the possible colors pseudo-randomly
int col_id = i % possible_colors;
int this_col_id = 0;
for (t = 0; t < num_hash; t++)
{
if (!(not_min & (0x1 << t) ) && col_id == this_col_id)
{
row_colors[i] = 2 * t + next_color;
return;
}
this_col_id += !(not_min & (0x1 << t));
if (!(not_max & (0x1 << t) ) && col_id == this_col_id)
{
row_colors[i] = 2 * t + 1 + next_color;
return;
}
this_col_id += !(not_max & (0x1 << t));
}
}
}
// ---------------------------
// Methods
// ---------------------------
template<class T_Config>
MultiHashMatrixColoringBase<T_Config>::MultiHashMatrixColoringBase(AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope)
{
if (cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default"))
{
m_uncolored_fraction = 0.;
}
else
{
m_uncolored_fraction = cfg.AMG_Config::getParameter<double>("max_uncolored_percentage", cfg_scope);
}
max_num_hash = cfg.AMG_Config::getParameter<int>("max_num_hash", cfg_scope);
}
template<class TConfig>
void MultiHashMatrixColoringBase<TConfig>::colorMatrix(Matrix<TConfig> &A)
{
ViewType oldView = A.currentView();
this->m_row_colors.resize(A.row_offsets.size() - 1, 0);
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
if (this->m_coloring_level == 0)
{
FatalError("Calling coloring scheme but coloring level==0", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
else if (this->m_coloring_level == 1)
{
this->colorMatrixOneRing(A);
}
else
{
FatalError("Multi-hash coloring algorithm can only do one ring coloring", AMGX_ERR_NOT_IMPLEMENTED);
}
A.setView(oldView);
}
// Block version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiHashMatrixColoring<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::colorMatrixOneRing(Matrix_d &A)
{
profileSubphaseMatrixColoring();
// One thread per row
const int num_rows = A.get_num_rows();
int max_uncolored_rows = (int) (this->m_uncolored_fraction * ((ValueType) num_rows));
thrust::fill(this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, -1);
cudaCheckError();
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
IndexType *row_colors_ptr = this->m_row_colors.raw();
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / threads_per_block + 1);
this->m_num_colors = 0;
// Heuristic for setting the number of hash function to use
int avg_nonzero = 1.5 * A.row_offsets[num_rows] / num_rows;
this->num_hash = min(avg_nonzero, this->max_num_hash);
int next_color = 0;
int it = 0;
int seed = 1012;
if (avg_nonzero != 0)
{
for ( int num_uncolored = num_rows; num_uncolored > max_uncolored_rows ; )
{
it++;
// Assign all nodes to 0 colors by default
if (this->num_hash == 1)
{
colorRowsMultiHashKernel<IndexType, 1> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 2)
{
colorRowsMultiHashKernel<IndexType, 2> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 3)
{
colorRowsMultiHashKernel<IndexType, 3> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 4)
{
colorRowsMultiHashKernel<IndexType, 4> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 5)
{
colorRowsMultiHashKernel<IndexType, 5> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 6)
{
colorRowsMultiHashKernel<IndexType, 6> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 7)
{
colorRowsMultiHashKernel<IndexType, 7> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 8)
{
colorRowsMultiHashKernel<IndexType, 8> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 9)
{
colorRowsMultiHashKernel<IndexType, 9> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 10)
{
colorRowsMultiHashKernel<IndexType, 10> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 11)
{
colorRowsMultiHashKernel<IndexType, 11> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 12)
{
colorRowsMultiHashKernel<IndexType, 12> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 13)
{
colorRowsMultiHashKernel<IndexType, 13> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 14)
{
colorRowsMultiHashKernel<IndexType, 14> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 15)
{
colorRowsMultiHashKernel<IndexType, 15> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 16)
{
colorRowsMultiHashKernel<IndexType, 16> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 17)
{
colorRowsMultiHashKernel<IndexType, 17> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 18)
{
colorRowsMultiHashKernel<IndexType, 18> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 19)
{
colorRowsMultiHashKernel<IndexType, 19> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 20)
{
colorRowsMultiHashKernel<IndexType, 20> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 21)
{
colorRowsMultiHashKernel<IndexType, 21> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 22)
{
colorRowsMultiHashKernel<IndexType, 22> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 23)
{
colorRowsMultiHashKernel<IndexType, 23> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 24)
{
colorRowsMultiHashKernel<IndexType, 24> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash == 25)
{
colorRowsMultiHashKernel<IndexType, 25> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, row_colors_ptr, num_rows, next_color, seed);
}
else if (this->num_hash > 25)
{
FatalError("Multi-hash coloring algorithm currently can't handle more than 25 hash functions", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
seed = hash(seed, 0);
next_color += 2 * this->num_hash;
num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_less_than_zero() );
cudaCheckError();
}
}
else
{
thrust::fill(this->m_row_colors.begin(), this->m_row_colors.end(), 0);
cudaCheckError();
}
this->m_num_colors = *thrust::max_element(this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows) + 1;
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void MultiHashMatrixColoring<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::colorMatrixOneRing(Matrix_h &A)
{
FatalError("Haven't implemented MultiHash matrix coloring for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
#define AMGX_CASE_LINE(CASE) template class MultiHashMatrixColoringBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class MultiHashMatrixColoring<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // end namespace amgx
|
the_stack
|
#include "FreckleFilter.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// Device 函数:_getMaxMatchValueDev(得到两个直方图的 length 长度
// 相似度匹配最大值)
// 对圆周和园内两直方图进行长度为 length 的相似度匹配,返回最大值
static __device__ void // 返回值:无返回值
_getMaxMatchValueDev(
unsigned int *histogram1, // 圆周上的直方图
unsigned int *histogram2, // 圆周上的直方图
float &maxmatchvalue, // 像素点对应的最大匹配值指针
int length, // 相似度匹配的长度参数
int hisnum = 256 // 直方图的数组大小,本方法大小为 256
);
// Kernel 函数:_freckleFilterByVarSumCountKer(获得输出图像的每点像素平均值总
// 和与累加次数算法操作)
// 根据方差阈值大小,得到输出图像的每点像素平均值总和与累加次数算法操作
static __global__ void // Kernel 函数无返回值
_freckleFilterByVarSumCountKer(
ImageCuda inimg, // 输入图像
Template radtpl, // 圆形模板,用于指定圆内领域
Template archtpl, // 环形模板,用于指定圆周的邻域
float varTh, // 外部指定的方差阈值
float *sum, // 像素平均值累加总和
int *count // 像素平均值累加次数
);
// Kernel 函数:_freckleFilterPixelKer(实现给输出图像设定像素值算法操作)
// 根据每点像素累加总和与累加次数,给输出图像设定像素平均值
static __global__ void // Kernel 函数无返回值
_freckleFilterSetPixelKer(
ImageCuda inimg, // 输入图像
ImageCuda outimg, // 输出图像
float *sum, // 像素平均值累加总和
int *count, // 像素平均值累加次数
int select // 最后赋值时的选择参数
);
// Kernel 函数:_freckleFilterByStrMscKer(获得输出图像的每点像素平均值总
// 和与累加次数算法操作)
// 通过相似度匹配,根据匹配差阈值,得到输出图像的每点像素平均值总和与
// 累加次数算法操作
static __global__ void // Kernel 函数无返回值
_freckleFilterByStrMscKer(
ImageCuda inimg, // 输入图像
Template radtpl, // 圆形模板,用于指定圆内领域
Template archtpl, // 环形模板,用于指定圆周的邻域
float matchErrTh, // 外部指定的匹配差阈值
int length, // 相似度匹配的长度参数
int radius, // 圆领域的半径
float *sum, // 像素平均值累加总和
int *count // 像素平均值累加次数
);
// Kernel 函数:_freckleFilterByVarSumCountKer(实现给输出图像设定像素值算法
// 操作)
static __global__ void _freckleFilterByVarSumCountKer(
ImageCuda inimg, Template radtpl, Template archtpl, float varTh,
float *sum, int *count)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量
// dstc 表示 column, dstr 表示 row)。由于采用并行度缩减策略 ,令一个线程
// 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于
// dstr 需要进行乘 4 的计算
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致系统崩溃
if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height)
return;
// 用来保存临时像素点的坐标的 x 和 y 分量
int dx, dy;
// 用来记录当前模版所在位置的指针
int *curtplptr;
// 用来记录当前输入图像所在位置的指针
unsigned char *curinptr;
// 计数器,用来记录某点在模版范围内拥有的点的个数
int statistic[4] = { 0 , 0, 0, 0 };
// 迭代求平均值和方差使用的中间值
float m[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
// 计算得到的平均值
float mean[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
// 计算得到的拱圆模板领域方差
float variance[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
int pix; // 局部变量,临时存储像素值
// 指定当前环形模版所在位置
curtplptr = archtpl.tplData;
// 扫描环形模版范围内的每个输入图像的像素点
for (int i = 0; i < archtpl.count; i++) {
// 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的
// 数组表示一个点,所以使用当前模版位置的指针加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
float temp; // 局部变量,在进行迭代时的中间变量
// 先判断当前像素的 x 分量是否越界,如果越界,则跳过,扫描下一个模版点,
// 如果没有越界,则分别处理当前列的相邻的 4 个像素
if (dx >= 0 && dx < inimg.imgMeta.width) {
// 根据 dx 和 dy 获取第一个像素的指针
curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes;
// 检测此像素点的 y 分量是否越界
if (dy >= 0 && dy < inimg.imgMeta.height) {
// 对第一个点进行迭代
pix = *(curinptr);
statistic[0]++;
temp = pix - mean[0];
mean[0] += temp / statistic[0];
m[0] += temp * (pix - mean[0]);
}
// 获取第二个像素点的指针
curinptr = curinptr + inimg.pitchBytes;
dy++;
// 检测第二个像素点的 y 分量是否越界
if (dy >= 0 && dy < inimg.imgMeta.height) {
// 对第二个点进行迭代
pix = *(curinptr);
statistic[1]++;
temp = pix - mean[1];
mean[1] += temp / statistic[1];
m[1] += temp * (pix - mean[1]);
}
// 获取第三个像素点的指针
curinptr = curinptr + inimg.pitchBytes;
dy++;
// 检测第三个像素点的 y 分量是否越界
if (dy >= 0 && dy < inimg.imgMeta.height) {
// 对第三个点进行迭代
pix = *(curinptr);
statistic[2]++;
temp = pix - mean[2];
mean[2] += temp / statistic[2];
m[2] += temp * (pix - mean[2]);
}
// 获取第四个像素点的指针
curinptr = curinptr + inimg.pitchBytes;
dy++;
// 检测第四个像素点的 y 分量是否越界
if (dy >= 0 && dy < inimg.imgMeta.height) {
// 对第四个点进行迭代
pix = *(curinptr);
statistic[3]++;
temp = pix - mean[3];
mean[3] += temp / statistic[3];
m[3] += temp * (pix - mean[3]);
}
}
}
// 计算输出坐标点对应的图像数据数组下标。
int index;
// 对每个像素点求圆周上点的方差大小,根据方差与阈值大小给输出点累加和
for(int i = 0; i < 4; i++) {
// 如果圆周领域内的的点个数为 0,则判断下一个像素点
if(statistic[i] == 0)
continue;
// 计算环形模板领域的方差
variance[i] = m[i] / statistic[i];
// 如果方差小于给定阈值,则对圆形模板里的所有点赋平均值
if (variance[i] < varTh) {
// 指定当前圆形模版所在位置
curtplptr = radtpl.tplData;
// 扫描圆形模版范围内的每个输入图像的像素点
for (int j = 0; j < radtpl.count; j++) {
// 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个
// 下标的数组表示一个点,所以使用当前模版位置的指针加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
// 根据 dx 和 dy 获取像素下标
dy = dy + i;
index = dx + dy * inimg.imgMeta.width;
// 如果没有越界,则分别处理当前列的相邻的符合条件的像素
// 给累加和累加平均值,累加次数相应加 1
if (dx >= 0 && dx < inimg.imgMeta.width &&
dy >= 0 && dy < inimg.imgMeta.height) {
atomicAdd(&sum[index], mean[i]);
atomicAdd(&count[index], 1);
}
}
}
}
}
// Kernel 函数:_freckleFilterSetPixelKer(实现给输出图像设定像素值算法操作)
static __global__ void _freckleFilterSetPixelKer(
ImageCuda inimg, ImageCuda outimg, float *sum, int *count, int select)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,
// c 表示 column, r 表示 row)。由于采用并行度缩减策略 ,令一个线程
// 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于
// dstr 需要进行乘 4 的计算
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算
// 资源,另一方面防止由于段错误导致程序崩溃
if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height)
return;
// 计算第一个输入坐标点对应的图像数据数组下标。
int outidx = dstr * outimg.imgMeta.width + dstc;
int out = dstr * outimg.pitchBytes + dstc;
int temp; // 临时变量用于 float 型数据转 int 型,需要四舍五入
// 计算每一个点的像素平均值,并且四舍五入 float 转 int 型
if (count[outidx] == 0) {
// 如果该点没有被累加和,如果为 FRECKLE_OPEN 则应该赋值为
// 原图像对应灰度值,如果为 FRECKLE_CLOSE,则赋值为 0
if (select == FRECKLE_OPEN)
temp = inimg.imgMeta.imgData[out];
else if (select == FRECKLE_CLOSE)
temp = 0;
} else {
// 如果被累加和,则按以下方式求像素平均值并按要求处理
temp = (int)(sum[outidx] / count[outidx] + 0.5f);
}
// 对图像每点像素值赋上对应值
outimg.imgMeta.imgData[out] = (unsigned char)temp;
// 处理剩下的三个像素点。
for (int i = 0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各
// 点之间没有变化,故不用检查。
if (++dstr >= outimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
outidx += outimg.imgMeta.width;
out += outimg.pitchBytes;
// 计算每一个点的像素平均值,并且四舍五入 float 转 int 型
if (count[outidx] == 0) {
// 如果该点没有被累加和,如果为 FRECKLE_OPEN 则应该赋值为
// 原图像对应灰度值,如果为 FRECKLE_CLOSE,则赋值为 0
if (select == FRECKLE_OPEN)
temp = inimg.imgMeta.imgData[out];
else if (select == FRECKLE_CLOSE)
temp = 0;
} else {
// 如果被累加和,则按以下方式求像素平均值并按要求处理
temp = (int)(sum[outidx] / count[outidx] + 0.5f);
}
// 对图像每点像素值赋上对应值
outimg.imgMeta.imgData[out] = (unsigned char)temp;
}
}
// Device 函数:_getMaxMatchValueDev(得到两个直方图的 length 长度
// 相似度匹配最大值)
static __device__ void _getMaxMatchValueDev(
unsigned int *histogram1, unsigned int *histogram2,
float &maxmatchvalue, int length, int hisnum)
{
// 临时变量 matchvalue,存储匹配的结果值
float matchvalue = 0.0f;
// 从左端开始匹配
// 临时变量 location,用于定位匹配最右位置
int location = hisnum - length;
for (int j = 0; j <= location; j++) {
// 临时变量,存储计算相关系数的和
unsigned int sum1 = { 0 };
unsigned int sum2 = { 0 };
unsigned int sum3 = { 0 };
unsigned int sum4 = { 0 };
unsigned int sum5 = { 0 };
// 临时变量,存储获得数组对应值
unsigned int tmp1, tmp2;
// 临时变量,存储计算相关系数算法的分母
float m1, m2;
// 计算相似度需要用到的临时变量
for (int k = 0; k < length; k++) {
// 取得对应直方图值
tmp1 = *(histogram1 + j + k);
tmp2 = *(histogram2 + j + k);
// 计算相似度要用到的累加和
sum1 += tmp1;
sum2 += tmp2;
sum3 += tmp1 * tmp2;
sum4 += tmp1 * tmp1;
sum5 += tmp2 * tmp2;
}
// 计算相似度的分母临时变量
m1 = sqrtf((float)(length * sum4 - sum1 * sum1));
m2 = sqrtf((float)(length * sum5 - sum2 * sum2));
// 计算匹配的相似度
if (m1 <= 0.000001f || m2 <= 0.000001f)
matchvalue = 0.0f;
else
matchvalue = ((int)(length * sum3 - sum1 * sum2)) /
(m1 * m2);
// 取相似度最大值
if (matchvalue > maxmatchvalue) {
maxmatchvalue = matchvalue;
}
}
}
// Kernel 函数:_freckleFilterByStrMscKer(实现
// 给输出图像设定像素值算法操作)
static __global__ void _freckleFilterByStrMscKer(
ImageCuda inimg, Template radtpl, Template archtpl, float matchErrTh,
int length, int radius, float *sum, int *count)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量
// dstc 表示 column, dstr 表示 row)。由于采用并行度缩减策略 ,令一个线程
// 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于
// dstr 需要进行乘 4 的计算
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否符合作为圆心的条件,若不符,则不进行处理
if (dstc % radius != 0 || dstr % radius != 0 || dstc <= 0 || dstr <= 0 ||
dstc >= inimg.imgMeta.width - 1 || dstr >= inimg.imgMeta.height - 1)
return;
// 用来保存临时像素点的坐标的 x 和 y 分量
int dx, dy;
// 用来记录当前模版所在位置的指针
int *curtplptr;
// 用来记录当前输入图像所在位置的指针
unsigned char *curinptr;
// 圆周上的图像直方图 histogram1
unsigned int histogram1[256] = { 0 };
// 圆内的图像直方图 histogram2
unsigned int histogram2[256] = { 0 };
// 计数器,用来记录某点在圆周上和园内拥有的点的个数
int statistic = 0;
unsigned int pix; // 局部变量,临时存储像素值
// 指定当前环形模版所在位置
curtplptr = archtpl.tplData;
// 扫描环形模版范围内的每个输入图像的像素点
for (int i = 0; i < archtpl.count; i++) {
// 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的
// 数组表示一个点,所以使用当前模版位置的指针加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
// 先判断当前像素的 x 分量,y 分量是否越界,如果越界,则跳过,扫描
// 下一个模版点,如果没有越界,则分别处理当前列的相邻的 4 个像素
if (dx >= 0 && dx < inimg.imgMeta.width &&
dy >= 0 && dy < inimg.imgMeta.height) {
// 根据 dx 和 dy 获取像素的指针
curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes;
pix = *(curinptr);
histogram1[pix]++;
statistic++;
}
}
// 如果圆周领域内的的点个数为 0 这直接返回
if(statistic == 0)
return;
// 指定当前圆形模版所在位置
curtplptr = radtpl.tplData;
// 扫描环形模版范围内的每个输入图像的像素点
for (int i = 0; i < radtpl.count; i++) {
// 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的
// 数组表示一个点,所以使用当前模版位置的指针加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
// 先判断当前像素的 x 分量,y 分量是否越界,如果越界,则跳过,扫描
// 下一个模版点,如果没有越界,则分别处理当前列的相邻的 4 个像素
if (dx >= 0 && dx < inimg.imgMeta.width) {
// 根据 dx 和 dy 获取第一个像素的指针
curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes;
pix = *(curinptr);
histogram2[pix]++;
}
}
// 存储以四个像素圆心得到两直方图的匹配最大值
float maxmatchvalue = 0.0f;
// 得到四个像素的两直方图的匹配最大值
_getMaxMatchValueDev(histogram1, histogram2, maxmatchvalue, length, 256);
// 计算输出坐标点对应的图像数据数组下标。
int index;
// 根据匹配差与阈值大小对符合条件像素点对其圆周上点进行排序,
// 取中间 50% 灰度平均,给输出点累加和累加赋值
// 如果匹配差大于给定阈值,则对圆形模板里的所有点赋平均值
if (1 - maxmatchvalue > matchErrTh) {
// 存储圆周上的图像值的中值平均(取排序后中间 50% 平均)
float mean;
// 去掉排序结果中前端的数量
int lownum = (int)(statistic * 0.25f + 0.5f);
// 去掉排序结果中末端端的数量
int highnum = (int)(statistic * 0.25f + 0.5f);
// 对直方图前后端个数统计
int lowcount = 0, highcount = 0;
// 在前后端统计时,中间段少加的值
int lowvalue = 0, highvalue = 0;
// 前后端统计时的开关
bool lowmask = false, highmask = false;
// 直方图中间段的两端索引
int lowindex = 0, highindex = 0;
for (int k = 0; k < 256; k++) {
// 计算直方图前端的个数
lowcount += histogram1[k];
if (!lowmask && lowcount >= lownum) {
lowindex = k + 1;
lowvalue = (lowcount - lownum) * k;
lowmask = true;
}
// 直方图后端的循环索引
int high = 255 - k;
// 计算直方图后端的个数
highcount += histogram1[high];
if (!highmask && highcount >= highnum) {
highindex = high - 1;
highvalue = (highcount - highnum) * high;
highmask = true;
}
// 如果前后端开关都打开,表示都找到了对应位置,就退出循环
if (lowmask && highmask)
break;
}
// 如果 lowindex 大于 highindex,表示没有要处理的元素,则返回
if (lowindex > highindex)
return;
// 计算领域内的像素值总和
float tmpsum = (float)(lowvalue + highvalue);
for (int k = lowindex; k <= highindex; k++)
tmpsum += k * histogram1[k];
// 计算平均值
mean = tmpsum / (statistic - lownum - highnum);
// 指定当前圆形模版所在位置
curtplptr = radtpl.tplData;
// 扫描圆形模版范围内的每个输入图像的像素点
for (int j = 0; j < radtpl.count; j++) {
// 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个
// 下标的数组表示一个点,所以使用当前模版位置的指针加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
// 根据 dx 和 dy 获取像素下标
dy++;
index = dx + dy * inimg.imgMeta.width;
// 如果没有越界,则分别处理当前列的相邻的符合条件的像素
// 给累加和累加平均值,累加次数相应加 1
if (dx >= 0 && dx < inimg.imgMeta.width &&
dy >= 0 && dy < inimg.imgMeta.height) {
atomicAdd(&sum[index], mean);
atomicAdd(&count[index], 1);
}
}
}
}
// Host 成员方法:freckleFilter(广义的中值滤波)
__host__ int FreckleFilter::freckleFilter(Image *inimg, Image *outimg)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 开关错误检查,如果既不是开选择也不是闭选择,则返回错误
if (select != FRECKLE_OPEN && select != FRECKLE_CLOSE)
return INVALID_DATA;
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 定义模板 radtpl 用于获取圆形领域模板
Template *radtpl;
// 定义圆形模板的尺寸
dim3 radsize(this->radius * 2 + 1, this->radius * 2 + 1, 1);
// 通过模板工厂得到圆形领域模板
errcode = TemplateFactory::getTemplate(&radtpl, TF_SHAPE_CIRCLE,
radsize, NULL);
// 检查圆形模板是否为 NULL,如果为 NULL 直接报错返回。
if (errcode != NO_ERROR)
return errcode;
// 将模板拷贝到 Device 内存中
errcode = TemplateBasicOp::copyToCurrentDevice(radtpl);
if (errcode != NO_ERROR) {
// 放回 radtpl 模板
TemplateFactory::putTemplate(radtpl);
return errcode;
}
// 定义模板 archtpl 用于获取环形领域模板
Template *archtpl;
// 定义环形模板的尺寸
dim3 arcsize(this->radius * 2 + 1, (this->radius + 4) * 2 + 1, 1);
// 得到环形领域模板
errcode = TemplateFactory::getTemplate(&archtpl, TF_SHAPE_ARC,
arcsize, NULL);
// 检查环形模板是否为 NULL,如果为 NULL 报错返回。
if (errcode != NO_ERROR) {
// 放回 radtpl 模板
TemplateFactory::putTemplate(radtpl);
return errcode;
}
// 将模板拷贝到 Device 内存中
errcode = TemplateBasicOp::copyToCurrentDevice(archtpl);
if (errcode != NO_ERROR) {
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
return errcode;
}
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize1, gridsize2;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize1.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize1.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
gridsize2.x = gridsize1.x;
gridsize2.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// 得到要处理的像素总个数
size_t datasize = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height;
cudaError_t cuerrcode; // CUDA 调用返回的错误码。
// 定义 sum 全局变量指针,申请一个 outsubimgCud.imgMeta.width *
// outsubimgCud.imgMeta.height 的 float 型数组,用于存储每点像素平均值累加
// 总和。
float *sum;
// 定义 count 全局变量指针,申请一个 outsubimgCud.imgMeta.width *
// outsubimgCud.imgMeta.height 的 int 型数组,用于存储每点像素平均值累加
// 次数。
int *count;
// 定义局部变量,用于多份数据的一份申请
void *temp_dev;
// 在设备端申请内存,然后分配给各个变量
cuerrcode = cudaMalloc(
(void **)&temp_dev,
datasize * sizeof (float) + datasize * sizeof (int));
if (cuerrcode != cudaSuccess) {
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
return CUDA_ERROR;
}
// 为变量分配内存
sum = (float *)temp_dev;
count = (int *)(sum + datasize);
// 初始化累加和的所有值为 0
cuerrcode = cudaMemset(sum, 0, datasize * sizeof (float));
if (cuerrcode != cudaSuccess) {
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
// 释放累加和与累加次数的总空间
cudaFree(temp_dev);
return CUDA_ERROR;
}
// 初始化累加次数的所有值为 0
cuerrcode = cudaMemset(count, 0, datasize * sizeof (int));
if (cuerrcode != cudaSuccess) {
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
// 释放累加和与累加次数的总空间
cudaFree(temp_dev);
return CUDA_ERROR;
}
if (method == FRECKLE_VAR_TH) {
// 若方法为方差阈值法,则调用相应方差阈值法的 Kernel 获得
// 输出图像的每点像素平均值累加总和与累加次数。
_freckleFilterByVarSumCountKer<<<gridsize1, blocksize>>>(
insubimgCud, *radtpl, *archtpl, this->varTh, sum, count);
} else if (method == FRECKLE_MATCH_ERRTH) {
// 若方法为相似度匹配法,则调用相应相似度匹配法的 Kernel 获得
// 输出图像的每点像素平均值累加总和与累加次数。
_freckleFilterByStrMscKer<<<gridsize2, blocksize>>>(
insubimgCud, *radtpl, *archtpl, this->matchErrTh, this->length,
this->radius, sum, count);
} else {
// method 错误检查,进入这条分支表示没有外部方法设置有误
return INVALID_DATA;
}
// 检查核函数运行是否出错
if (cudaGetLastError() != cudaSuccess) {
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
// 释放累加和与累加次数的总空间
cudaFree(temp_dev);
return CUDA_ERROR;
}
// 放回模板
TemplateFactory::putTemplate(radtpl);
TemplateFactory::putTemplate(archtpl);
// 调用 Kernel 函数实现给输出图像设定像素值。
_freckleFilterSetPixelKer<<<gridsize1, blocksize>>>(
insubimgCud, outsubimgCud, sum, count, this->select);
// 检查核函数运行是否出错
if (cudaGetLastError() != cudaSuccess) {
// 释放累加和与累加次数的总空间
cudaFree(temp_dev);
return CUDA_ERROR;
}
// 释放累加和与累加次数的总空间
cudaFree(temp_dev);
// 处理完毕,退出。
return NO_ERROR;
}
|
the_stack
|
#include <ops/declarable/helpers/roll.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static void _CUDA_D rollKernelLinearStage1Dev(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) {
auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
auto xEws = shape::elementWiseStride(xShapeInfo);
auto zEws = shape::elementWiseStride(zShapeInfo);
auto xOrder = shape::order(xShapeInfo);
auto zOrder = shape::order(zShapeInfo);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) {
int sourceIndex = fullLength - actualShift + i;
auto eA = x[sourceIndex * xEws];
auto eB = x[i * xEws];
z[i * zEws] = eA;
z[sourceIndex * zEws] = eB;
}
} else {
for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) {
int sourceIndex = fullLength - actualShift + i;
auto xOffsetA = shape::getIndexOffset(i, xShapeInfo);
auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo);
auto zOffsetA = shape::getIndexOffset(i, zShapeInfo);
auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo);
auto eA = x[xOffsetA];
auto eB = x[xOffsetB];
z[zOffsetA] = eB;
z[zOffsetB] = eA;
}
}
}
template <typename T>
static void _CUDA_G rollKernelLinearStage1(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) {
rollKernelLinearStage1Dev<T>(vx, xShapeInfo, vz, zShapeInfo, fullLength, actualShift);
}
template <typename T>
static void _CUDA_G rollKernelLinearStage2(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int shiftCount) {
auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
auto xEws = shape::elementWiseStride(xShapeInfo);
auto zEws = shape::elementWiseStride(zShapeInfo);
auto xOrder = shape::order(xShapeInfo);
auto zOrder = shape::order(zShapeInfo);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int count = 1; count < shiftCount; ++count) {
for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) {
int destinationIndex = fullLength - (count + 1) * actualShift + i;
int sourceIndex = fullLength - count * actualShift + i;
auto eA = x[sourceIndex * xEws];
auto eB = x[destinationIndex * xEws];
z[destinationIndex * zEws] = eA;
z[sourceIndex * zEws] = eB;
}
__syncthreads();
}
} else {
for (int count = 1; count < shiftCount; ++count) {
for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) {
int destinationIndex = fullLength - (count + 1) * actualShift + i;
int sourceIndex = fullLength - count * actualShift + i;
auto xOffsetA = shape::getIndexOffset(destinationIndex, xShapeInfo);
auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo);
auto zOffsetA = shape::getIndexOffset(destinationIndex, zShapeInfo);
auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo);
auto eA = x[xOffsetA];
auto eB = x[xOffsetB];
z[zOffsetA] = eB;
z[zOffsetB] = eA;
}
__syncthreads();
}
}
}
template <typename T>
static void _CUDA_G rollKernelLinearStage3(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int remainShift) {
auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
auto xEws = shape::elementWiseStride(xShapeInfo);
auto zEws = shape::elementWiseStride(zShapeInfo);
auto xOrder = shape::order(xShapeInfo);
auto zOrder = shape::order(zShapeInfo);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid ; i < actualShift; i += blockDim.x * gridDim.x) {
int remainIdx = i + actualShift;
int sourceIndex = remainIdx + remainShift;
auto eA = x[sourceIndex * xEws];
auto eB = x[remainIdx * xEws];
z[remainIdx * zEws] = eA;
z[sourceIndex * zEws] = eB;
}
} else {
for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) {
int remainIdx = i + actualShift;
int sourceIndex = remainIdx + remainShift;
auto xOffsetA = shape::getIndexOffset(remainIdx, xShapeInfo);
auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo);
auto zOffsetA = shape::getIndexOffset(remainIdx, zShapeInfo);
auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo);
auto eA = x[xOffsetA];
auto eB = x[xOffsetB];
z[zOffsetA] = eB;
z[zOffsetB] = eA;
}
}
}
template <typename T>
static void _CUDA_D swapTadsKernel(void *vx, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong tadLength) {
auto x = reinterpret_cast<T*>(vx);
auto z = reinterpret_cast<T*>(vz);
auto zEws = shape::elementWiseStride(zShapeInfo);
auto zOrder = shape::order(zShapeInfo);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (zEws > 0) {
for (int e = threadIdx.x; e < tadLength; e += blockDim.x) {
auto eA = x[e * zEws];
auto eB = z[e * zEws];
x[e * zEws] = eB;
z[e * zEws] = eA;
}
} else {
for (int e = threadIdx.x; e < tadLength; e += blockDim.x) {
auto zOffset = shape::getIndexOffset(e, zShapeInfo);
auto eA = x[zOffset];
auto eB = z[zOffset];
x[zOffset] = eB;
z[zOffset] = eA;
}
}
}
template <typename T>
static void _CUDA_G rollKernelFullAnyDimensionStage1(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) {
auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
for (int e = blockIdx.x + theShift; e < sizeAt - theShift; e += gridDim.x) {
int sourceIndex = dim * sizeAt + e - theShift;
int targetIndex = dim * sizeAt + e;
swapTadsKernel<T>(z + xTadOffsets[sourceIndex], z + xTadOffsets[targetIndex], zTadShapeInfo, tadLength);
}
}
template <typename T>
static void _CUDA_G rollKernelFullAnyDimensionStage2(void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) {
auto x = reinterpret_cast<const T *>(vx);
auto z = reinterpret_cast<T *>(vz);
for (int e = blockIdx.x; e < theShift; e += gridDim.x) {
int sourceIndex = dim * sizeAt + sizeAt - theShift + e;
int targetIndex = dim * sizeAt + e;
swapTadsKernel<T>(z + zTadOffsets[sourceIndex], z + zTadOffsets[targetIndex], zTadShapeInfo, tadLength);
}
}
template <typename T>
static void rollFunctorFull_(NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){
if (!inplace)
output->assign(input);
for (size_t i = 0; i < axes.size(); i++) {
int axe = axes[i];
if (axe == input->rankOf() - 1) { // last dimension
ResultSet listOfTensors = output->allTensorsAlongDimension({axe});
ResultSet listOfOutTensors = output->allTensorsAlongDimension({axe});
int fullLen = listOfTensors.size();
int theShift = shifts[i];
// if (theShift > 0) {
// theShift %= fullLen;
// }
// else {
// theShift -= fullLen * (theShift / fullLen - 1);
// }
for (int k = 0; k < fullLen; k++) {
rollFunctorLinear(output->getContext(), listOfTensors.at(k), listOfOutTensors.at(k), theShift, true);
}
} else {
std::vector<int> dims(input->rankOf() - axe - 1);
for (int i = 0; i < dims.size(); ++i)
dims[i] = axe + 1 + i;
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dims);
int numTads = packZ.numberOfTads();
int sizeAt = input->sizeAt(axe);
auto tadLength = shape::length(packZ.primaryShapeInfo());
int theShift = shifts[i];
// if (theShift > 0)
// theShift %= sizeAt;
// else
// theShift -= sizeAt * (theShift / sizeAt - 1);
if (theShift) {
for (int dim = 0; dim < numTads / sizeAt; ++dim) {
rollKernelFullAnyDimensionStage1<T><<<1, 256, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift);
rollKernelFullAnyDimensionStage2<T><<<1, 256, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift);
}
}
}
}
}
template <typename T>
static void rollFunctorLinear_(NDArray* input, NDArray* output, int shift, bool inplace){
if (!inplace)
output->assign(input);
auto fullLen = input->lengthOf();
int actualShift = shift; // % fullLen; // shift already non-negative then
if (actualShift < 0) {
actualShift -= fullLen * (actualShift / fullLen - 1);
}
else
actualShift %= fullLen;
if (actualShift) {
int shiftCount = fullLen / actualShift - 1;
int remainShift = fullLen % actualShift;
// stage 1) swap last actualShift elements with first ones.
rollKernelLinearStage1<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift);
// stage 2) swap swapped actualShift elements with rest remainShiftCount times.
rollKernelLinearStage2<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, shiftCount);
// FIXME: no parallelism here :(
// stage 3) swap remainer of items.
if (remainShift && shiftCount)
rollKernelLinearStage3<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, remainShift);
}
}
void rollFunctorFull(sd::LaunchContext * context, NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorFull_, (input, output, shifts, axes, inplace), LIBND4J_TYPES);
output->tickWriteDevice();
}
void rollFunctorLinear(sd::LaunchContext * context, NDArray* input, NDArray* output, int shift, bool inplace){
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorLinear_, (input, output, shift, inplace), LIBND4J_TYPES);
output->tickWriteDevice();
}
BUILD_SINGLE_TEMPLATE(template void rollFunctorLinear_, (NDArray* input, NDArray* output, int shift, bool inplace), LIBND4J_TYPES);
BUILD_SINGLE_TEMPLATE(template void rollFunctorFull_, (NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace), LIBND4J_TYPES);
}
}
}
|
the_stack
|
* @file TopKSort.cu
* @author Minggang Yu <miyu@mapd.com>
* @brief Top-k sorting on streaming top-k heaps on VRAM
*
* Copyright (c) 2017 MapD Technologies, Inc. All rights reserved.
*/
#include "BufferEntryUtils.h"
#include "GpuMemUtils.h"
#include "ResultSetBufferAccessors.h"
#include "SortUtils.cuh"
#include "StreamingTopN.h"
#include "TopKSort.h"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/partition.h>
#include <thrust/sort.h>
#include <cuda.h>
CUstream getQueryEngineCudaStreamForDevice(int device_num);
#define checkCudaErrors(err) CHECK_EQ(err, CUDA_SUCCESS)
#include <iostream>
template <class K, class I = int32_t>
struct is_taken_entry {
is_taken_entry(const int8_t* buff, const size_t stride)
: buff_ptr(buff), key_stride(stride) {}
__host__ __device__ bool operator()(const I index) {
return !is_empty_entry<K>(static_cast<size_t>(index), buff_ptr, key_stride);
}
const int8_t* buff_ptr;
const size_t key_stride;
};
template <class K, class I = int32_t>
struct is_null_order_entry {
typedef I argument_type;
is_null_order_entry(const int8_t* base, const size_t stride, const int64_t nul)
: oe_base(base), oe_stride(stride), null_val(nul) {}
__host__ __device__ bool operator()(const I index) {
const auto oe_val = *reinterpret_cast<const K*>(oe_base + index * oe_stride);
switch (sizeof(K)) {
case 4:
return *reinterpret_cast<const int32_t*>(&oe_val) ==
static_cast<int32_t>(null_val);
case 8:
return *reinterpret_cast<const int64_t*>(&oe_val) == null_val;
default:
return false;
}
}
const int8_t* oe_base;
const size_t oe_stride;
const int64_t null_val;
};
template <typename ForwardIterator>
ForwardIterator partition_by_null(ForwardIterator first,
ForwardIterator last,
const int64_t null_val,
const bool nulls_first,
const int8_t* rows_ptr,
const GroupByBufferLayoutInfo& layout) {
if (nulls_first) {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val))
: thrust::partition(
first,
last,
is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val));
} else {
return (layout.col_bytes == 4)
? thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int32_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)))
: thrust::partition(
first,
last,
thrust::not1(is_null_order_entry<int64_t>(
rows_ptr + layout.col_off, layout.row_bytes, null_val)));
}
}
template <class K, class I>
struct KeyFetcher {
KeyFetcher(K* out_base,
const int8_t* src_oe_base,
const size_t stride,
const I* indices)
: key_base(out_base), oe_base(src_oe_base), oe_stride(stride), idx_base(indices) {}
__host__ __device__ void operator()(const I index) {
key_base[index] = *reinterpret_cast<const K*>(oe_base + idx_base[index] * oe_stride);
}
K* key_base;
const int8_t* oe_base;
const size_t oe_stride;
const I* idx_base;
};
template <class K>
struct KeyReseter {
KeyReseter(int8_t* out_base, const size_t stride, const K emp_key)
: rows_base(out_base), key_stride(stride), empty_key(emp_key) {}
__host__ __device__ void operator()(const size_t index) {
K* key_ptr = reinterpret_cast<K*>(rows_base + index * key_stride);
*key_ptr = empty_key;
}
int8_t* rows_base;
const size_t key_stride;
const K empty_key;
};
// TODO(miyu) : switch to shared version in ResultSetSortImpl.cu.
template <class K, class I>
void collect_order_entry_column(thrust::device_ptr<K>& d_oe_col_buffer,
const int8_t* d_src_buffer,
const thrust::device_ptr<I>& d_idx_first,
const size_t idx_count,
const size_t oe_offset,
const size_t oe_stride,
ThrustAllocator& allocator,
const int device_id) {
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
thrust::for_each(thrust::cuda::par(allocator).on(qe_cuda_stream),
thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(idx_count),
KeyFetcher<K, I>(thrust::raw_pointer_cast(d_oe_col_buffer),
d_src_buffer + oe_offset,
oe_stride,
thrust::raw_pointer_cast(d_idx_first)));
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
}
template <class K, class I>
void sort_indices_by_key(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const thrust::device_ptr<K>& d_key_buffer,
const bool desc,
ThrustAllocator& allocator,
const int device_id) {
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
if (desc) {
thrust::sort_by_key(thrust::cuda::par(allocator).on(qe_cuda_stream),
d_key_buffer,
d_key_buffer + idx_count,
d_idx_first,
thrust::greater<K>());
} else {
thrust::sort_by_key(thrust::cuda::par(allocator).on(qe_cuda_stream),
d_key_buffer,
d_key_buffer + idx_count,
d_idx_first);
}
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
}
template <class I = int32_t>
void do_radix_sort(thrust::device_ptr<I> d_idx_first,
const size_t idx_count,
const int8_t* d_src_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
ThrustAllocator& allocator,
const int device_id) {
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.is_fp()) {
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<float>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes,
allocator,
device_id);
sort_indices_by_key(
d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator, device_id);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<double>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes,
allocator,
device_id);
sort_indices_by_key(
d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator, device_id);
break;
}
default:
CHECK(false);
}
return;
}
CHECK(oe_type.is_number() || oe_type.is_time());
switch (layout.col_bytes) {
case 4: {
auto d_oe_buffer = get_device_ptr<int32_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes,
allocator,
device_id);
sort_indices_by_key(
d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator, device_id);
break;
}
case 8: {
auto d_oe_buffer = get_device_ptr<int64_t>(idx_count, allocator);
collect_order_entry_column(d_oe_buffer,
d_src_buffer,
d_idx_first,
idx_count,
layout.col_off,
layout.row_bytes,
allocator,
device_id);
sort_indices_by_key(
d_idx_first, idx_count, d_oe_buffer, oe.is_desc, allocator, device_id);
break;
}
default:
CHECK(false);
}
}
template <class I>
struct RowFetcher {
RowFetcher(int8_t* out_base,
const int8_t* in_base,
const I* indices,
const size_t row_sz)
: dst_base(out_base), src_base(in_base), idx_base(indices), row_size(row_sz) {}
__host__ __device__ void operator()(const I index) {
memcpy(dst_base + index * row_size, src_base + idx_base[index] * row_size, row_size);
}
int8_t* dst_base;
const int8_t* src_base;
const I* idx_base;
const size_t row_size;
};
template <typename DerivedPolicy>
void reset_keys_in_row_buffer(
const thrust::detail::execution_policy_base<DerivedPolicy>& exec,
int8_t* row_buffer,
const size_t key_width,
const size_t row_size,
const size_t first,
const size_t last) {
switch (key_width) {
case 4:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int32_t>(row_buffer, row_size, static_cast<int32_t>(EMPTY_KEY_32)));
break;
case 8:
thrust::for_each(
exec,
thrust::make_counting_iterator(first),
thrust::make_counting_iterator(last),
KeyReseter<int64_t>(row_buffer, row_size, static_cast<int64_t>(EMPTY_KEY_64)));
break;
default:
CHECK(false);
}
}
std::vector<int8_t> pop_n_rows_from_merged_heaps_gpu(
Data_Namespace::DataMgr* data_mgr,
const int64_t* dev_heaps,
const size_t heaps_size,
const size_t n,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t group_key_bytes,
const size_t thread_count,
const int device_id) {
const auto row_size = layout.row_bytes;
CHECK_EQ(heaps_size, streaming_top_n::get_heap_size(row_size, n, thread_count));
const int8_t* rows_ptr = reinterpret_cast<const int8_t*>(dev_heaps) +
streaming_top_n::get_rows_offset_of_heaps(n, thread_count);
const auto total_entry_count = n * thread_count;
ThrustAllocator thrust_allocator(data_mgr, device_id);
auto d_indices = get_device_ptr<int32_t>(total_entry_count, thrust_allocator);
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
thrust::sequence(thrust::cuda::par(thrust_allocator).on(qe_cuda_stream),
d_indices,
d_indices + total_entry_count);
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
auto separator =
(group_key_bytes == 4)
? thrust::partition(thrust::cuda::par(thrust_allocator).on(qe_cuda_stream),
d_indices,
d_indices + total_entry_count,
is_taken_entry<int32_t>(rows_ptr, row_size))
: thrust::partition(thrust::cuda::par(thrust_allocator).on(qe_cuda_stream),
d_indices,
d_indices + total_entry_count,
is_taken_entry<int64_t>(rows_ptr, row_size));
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
const size_t actual_entry_count = separator - d_indices;
if (!actual_entry_count) {
std::vector<int8_t> top_rows(row_size * n);
reset_keys_in_row_buffer(
thrust::host, &top_rows[0], layout.col_bytes, row_size, 0, n);
return top_rows;
}
const auto& oe_type = layout.oe_target_info.sql_type;
if (oe_type.get_notnull()) {
do_radix_sort(
d_indices, actual_entry_count, rows_ptr, oe, layout, thrust_allocator, device_id);
} else {
auto separator = partition_by_null(d_indices,
d_indices + actual_entry_count,
null_val_bit_pattern(oe_type, false),
oe.nulls_first,
rows_ptr,
layout);
if (oe.nulls_first) {
const size_t null_count = separator - d_indices;
if (null_count < actual_entry_count) {
do_radix_sort(separator,
actual_entry_count - null_count,
rows_ptr,
oe,
layout,
thrust_allocator,
device_id);
}
} else {
const size_t nonnull_count = separator - d_indices;
if (nonnull_count > 0) {
do_radix_sort(
d_indices, nonnull_count, rows_ptr, oe, layout, thrust_allocator, device_id);
}
}
}
const auto final_entry_count = std::min(n, actual_entry_count);
auto d_top_rows = get_device_ptr<int8_t>(row_size * n, thrust_allocator);
thrust::for_each(thrust::cuda::par(thrust_allocator).on(qe_cuda_stream),
thrust::make_counting_iterator(size_t(0)),
thrust::make_counting_iterator(final_entry_count),
RowFetcher<int32_t>(thrust::raw_pointer_cast(d_top_rows),
rows_ptr,
thrust::raw_pointer_cast(d_indices),
row_size));
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
if (final_entry_count < n) {
reset_keys_in_row_buffer(thrust::cuda::par(thrust_allocator).on(qe_cuda_stream),
thrust::raw_pointer_cast(d_top_rows),
layout.col_bytes,
row_size,
final_entry_count,
n);
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
}
std::vector<int8_t> top_rows(row_size * n);
thrust::copy(d_top_rows, d_top_rows + row_size * n, top_rows.begin());
return top_rows;
}
|
the_stack
|
#if defined (__i386__)
static __inline__ unsigned long long GetCycleCount(void)
{
unsigned long long int x;
__asm__ volatile ("rdtsc":"=A"(x));
return x;
}
#elif defined (__x86_64__)
static __inline__ unsigned long long GetCycleCount(void)
{
unsigned hi, lo;
__asm__ volatile("rdtsc":"=a"(lo), "=d"(hi));
return ((unsigned long long)lo) | (((unsigned long long)hi) << 32);
}
#endif
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
// #define C 1024
// #define K 1024
// #define H 448
// #define W 448
// #define batch_size 1
// #define kernel_size 15
// #define stride 2
// #define padding 3
/*
To run this program:
export PATH=/usr/local/cuda-10.1/bin:/usr/local/cuda-10.1/NsightCompute-2019.1:/usr/local/cuda-10.1/nvvm/bin${PATH:+:${PATH}} &&
export LD_LIBRARY_PATH=/usr/local/cuda-10.1/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} &&
make &&
./cudnn_conv
*/
float conv(int C, int K, int H, int W, int batch_size, int kernel_size, int stride, int padding, int times=1000)
{
srand((unsigned)time(NULL));
auto format = CUDNN_TENSOR_NHWC;
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/format,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/batch_size,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W));
cudnnTensorDescriptor_t output_descriptor;
size_t H_out = (H + 2 * padding - kernel_size) / stride + 1;
size_t W_out = (W + 2 * padding - kernel_size) / stride + 1;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/format,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/batch_size,
/*channels=*/K,
/*image_height=*/H_out,
/*image_width=*/W_out));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/format,
/*out_channels=*/K,
/*in_channels=*/C,
/*kernel_height=*/kernel_size,
/*kernel_width=*/kernel_size));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding,
/*pad_width=*/padding,
/*vertical_stride=*/stride,
/*horizonal_stride=*/stride,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes = 0;
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm/*CUDNN_CONVOLUTION_FWD_ALGO_DIRECT*/,
&workspace_bytes));
std::cerr << "Workspace size: " << (float(workspace_bytes) / 1048576.0) << "MB"
<< std::endl;
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
size_t image_bytes = batch_size * C * H * W * sizeof(float);
float *d_input{nullptr};
cudaMalloc(&d_input, image_bytes);
float *h_input{nullptr};
h_input = (float*)malloc(image_bytes);
for(int i=0; i < batch_size * C * H * W; ++i)
{
*(h_input + i) = (float(rand()) - (RAND_MAX >> 1)) / RAND_MAX;
}
cudaMemcpy(d_input, h_input, image_bytes, cudaMemcpyHostToDevice);
size_t output_bytes = batch_size * K * H_out * W_out * sizeof(float);
float *d_output{nullptr};
cudaMalloc(&d_output, output_bytes);
cudaMemset(d_output, 0, output_bytes);
float *h_output{nullptr};
h_output = (float*)malloc(output_bytes);
size_t filter_bytes = K * C * kernel_size * kernel_size * sizeof(float);
float *d_filter{nullptr};
cudaMalloc(&d_filter, filter_bytes);
float *h_filter{nullptr};
h_filter = (float*)malloc(filter_bytes);
for(int i=0; i < K * C * kernel_size * kernel_size; ++i)
{
*(h_filter + i) = (float(rand()) - (RAND_MAX >> 1)) / RAND_MAX;
}
cudaMemcpy(d_filter, h_filter, filter_bytes, cudaMemcpyHostToDevice);
const float alpha = 1, beta = 0;
auto beg = (unsigned long long)GetCycleCount();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float sum = 0.0;
for(int i = 0; i < times + 1; ++i)
{
cudaEventRecord(start, 0);
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_filter,
convolution_descriptor,
convolution_algorithm/*CUDNN_CONVOLUTION_FWD_ALGO_DIRECT*/,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
if (i > 0)
{
sum += elapsed;
}
}
auto end = (unsigned long long)GetCycleCount();
cudaMemcpy(h_output, d_output, output_bytes, cudaMemcpyDeviceToHost);
free(h_input);
free(h_filter);
free(h_output);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_filter);
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
return sum;//float(end - beg);
}
int main(int argc, char const* argv[])
{
int arg_lst[][8] = {
//{256, 256, 14, 14, 3, 512, 1, 1},
// {1, 1024, 7, 7, 3, 1024, 1, 1},
// {8, 1024, 7, 7, 3, 1024, 1, 1},
// {64, 1024, 7, 7, 3, 1024, 1, 1},
// {256, 1024, 7, 7, 3, 1024, 1, 1},
// {1, 1024, 14, 14, 1, 512, 1, 0},
// {1, 256, 28, 28, 3, 512, 1, 1},
// {1, 512, 28, 28, 1, 256, 1, 0},
// {1, 128, 56, 56, 3, 256, 1, 1},
// {1, 192, 56, 56, 1, 128, 1, 0},
// {1, 64, 112, 112, 3, 192, 1, 1},
// {1, 3, 448, 448, 7, 64, 2, 3}
{1, 3, 448, 448, 7, 64, 2, 3},
{1, 64, 112, 112, 3, 192, 1, 1},
{1, 192, 56, 56, 1, 128, 1, 0},
{1, 128, 56, 56, 3, 256, 1, 1},
{1, 256, 56, 56, 1, 256, 1, 0},
{1, 256, 56, 56, 3, 512, 1, 1},
{1, 512, 28, 28, 1, 256, 1, 0},
{1, 256, 28, 28, 3, 512, 1, 1},
{1, 512, 28, 28, 1, 512, 1, 0}, // conv15 8
{1, 512, 28, 28, 3, 1024, 1, 1}, // conv16 9
{1, 1024, 14, 14, 1, 512, 1, 0}, // conv17 10
{1, 512, 14, 14, 3, 1024, 1, 1}, // conv18 11
{1, 1024, 14, 14, 3, 1024, 1, 1}, // conv21 12
{1, 1024, 14, 14, 3, 1024, 2, 1}, // conv22 13
{1, 1024, 7, 7, 3, 1024, 1, 1}, // conv23 14
};
for(int i=0; i < 15; ++i)
{
int batch_size = arg_lst[i][0];
int C = arg_lst[i][1];
int H = arg_lst[i][2];
int W = arg_lst[i][3];
int kernel_size = arg_lst[i][4];
int K = arg_lst[i][5];
int stride = arg_lst[i][6];
int padding = arg_lst[i][7];
int times = 10;
if(argc > 1)
{
if(strcmp(argv[1], "2")==0)
{
times *= 10;
}
else if(strcmp(argv[1], "3")==0)
{
times *= 100;
}
}
auto beg = (unsigned long long)GetCycleCount();
usleep(10);
auto end = (unsigned long long)GetCycleCount();
auto duration = float(end - beg) / 10;
std::cout << times << std::endl;
auto cost = conv(C, K, H, W, batch_size, kernel_size, stride, padding, times);
std::cout << "(" << batch_size << "," << H << "," << W << "," << C << "," << kernel_size << "," << K << "," << stride << "," << padding << ")"\\
<< " Use time " << cost / times << "ms" << std::endl;
}
return 0;
}
|
the_stack
|
Copyright (C) 2016 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/mexutils.h"
#include "bits/datamex.hpp"
#include "bits/data.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <sys/un.h>
#include <sys/socket.h>
#include <memory>
#include <vector>
#include <algorithm>
#include <sstream>
/**
\file vl_tmove.cu
The `vl_tmove` utility implements an efficient mechanism to exchange
tensor data between different MATLAB processes. Presently, it is
limited to processes running on the same host, but future extensions
can integrate networked environments. Even limited to a single
host, this functionality is important because MATLAB multiple GPU
support uses different processess for different GPUs.
The key idea is to implement a reduction tree, in which each MATLAB
process is connected to a parent and a number of children. When a tensor
needs to be accumulated, a node receives copies form the children,
sums them with its local copy, and sends the result to the parent.
Eventually, the data flow reaches the root of the tree and the accumulated
tensor is sent back towards the leaves. This communication mechanism
is designed to reduce the amount of data transfers from O(n^2)
for the trivial n-to-n communication of tensor copies to O(n).
A second strategy used to significantly improve the speed is to allow
the transfer of tensor data to proceed in the background, while MATLAB is busy
running the rest of the network. This is achieved by isolating
all communications in a supervisory thread.
# Notable facts
* Communications between thread uses UNIX-domain sockets (extensible
to INet sockets in the future). These are used to send lightweight
cohordination messages.
* Data passing on local machines uses a shared memory map between
processes. The shared memory contains a copy of each tensor for each
process. GPU tensors may either be allocated internally
by `vl_tmove` (in which case MATLAB may forget them)
or may remember pointers to MATLAB's memory (inplace).
The latter is slightly unsafe, but much faster as it saves several copies.
In any case, `vl_tmove` allocates a GPU buffer as large as
the largest tensor as scratch space (and for direct GPU communication).
* The supervisory and main threads collaborate through lock-less
synchronization for speed. This is possible because at any point in time
each tensor is managed by only one thread depending on its state.
Thus a tensor moves from one thread to the other simply by swapping
its state. There is, however, a condition variable to allow the
main thread to wait for the supervisory thread when needed.
* The supervisory thread waits by calling `poll()` on a number of sockets.
However, sometimes the main thread needs to signal the supervisor too.
This is realized by having a dummy `pipe()` between the two
threads.
**/
/* ---------------------------------------------------------------- */
/* Globals */
/* ---------------------------------------------------------------- */
enum {
IN_COMMAND, IN_END
} ;
enum {
OUT_RESULT, OUT_END
} ;
/* option codes */
enum {
opt_inplace = 0,
opt_verbose,
opt_prefix,
} ;
/* options */
VLMXOption options [] = {
{"prefix", 1, opt_prefix },
{"InPlace", 0, opt_inplace },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
int verbosity = 0 ;
vl::MexContext context ;
class SharedTensorDescriptor ;
class SharedTensorSpace ;
class ProcessPool ;
/* ---------------------------------------------------------------- */
/* Utility */
/* ---------------------------------------------------------------- */
static VLMXErrorCode vlmxParseDataType(vl::DataType & dataType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "double") == 0) {
dataType = vl::VLDT_Double ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "single") == 0) {
dataType = vl::VLDT_Float ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseDeviceType(vl::DeviceType & deviceType, mxArray const * arg)
{
if (vlmxCompareToStringI(arg, "cpu") == 0) {
deviceType = vl::VLDT_CPU ;
return VLMXE_Success ;
} else if (vlmxCompareToStringI(arg, "gpu") == 0) {
deviceType = vl::VLDT_GPU ;
return VLMXE_Success ;
} else {
return VLMXE_IllegalArgument ;
}
}
static VLMXErrorCode vlmxParseString(std::string & name, mxArray const * arg)
{
char str [256] ;
if (!vlmxIsString(arg, -1)) {
return VLMXE_IllegalArgument ;
}
mxGetString(arg, str, sizeof(str)) ;
name = str ;
return VLMXE_Success ;
}
static VLMXErrorCode vlmxParseTensorShape(vl::TensorShape & shape, mxArray const * arg)
{
size_t dimensions [32] ;
if (!vlmxIsVector(arg, -1) || !vlmxIsPlain(arg)) {
return VLMXE_IllegalArgument ;
}
int nd = mxGetNumberOfElements(arg) ;
for (int k = 0 ; k < nd ; ++k) { dimensions[k] = (size_t)mxGetPr(arg)[k] ; }
shape.setDimensions(dimensions, nd) ;
return VLMXE_Success ;
}
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<lab<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<lab<<"::"
/* ---------------------------------------------------------------- */
/* SharedTensorDescriptor */
/* ---------------------------------------------------------------- */
#pragma mark -
// Describe one of the shared tensors: shape, data type,
// and device type.
class SharedTensorDescriptor
{
public:
SharedTensorDescriptor() ;
~SharedTensorDescriptor() ;
void init(vl::DeviceType deviceType,
vl::DataType dataType,
vl::TensorShape const & shape) ;
void finalize() ;
size_t getSizeInBytes() const ;
SharedTensorDescriptor & operator=(SharedTensorDescriptor const & tensor) ;
// Data.
vl::DeviceType deviceType ;
vl::DataType dataType ;
vl::TensorShape shape ;
} ;
SharedTensorDescriptor::SharedTensorDescriptor()
{ }
SharedTensorDescriptor::~SharedTensorDescriptor()
{
finalize() ;
}
SharedTensorDescriptor &
SharedTensorDescriptor::operator=(SharedTensorDescriptor const & tensor)
{
deviceType = tensor.deviceType ;
dataType = tensor.dataType ;
shape = tensor.shape ;
return *this ;
}
void SharedTensorDescriptor::init(vl::DeviceType newDeviceType,
vl::DataType newDataType,
vl::TensorShape const & newShape)
{
assert(newDeviceType == vl::VLDT_CPU || newDeviceType == vl::VLDT_GPU) ;
assert(newDataType == vl::VLDT_Float || newDataType == vl::VLDT_Double) ;
deviceType = newDeviceType ;
dataType = newDataType ;
shape = newShape ;
}
void SharedTensorDescriptor::finalize()
{ }
size_t SharedTensorDescriptor::getSizeInBytes() const
{
return shape.getNumElements() * getDataTypeSizeInBytes(dataType) ;
}
/* ---------------------------------------------------------------- */
/* SharedTensorSpace */
/* ---------------------------------------------------------------- */
#pragma mark -
// SharedTensorSpace holds a list of tensors that can be accumulated
// between different processes.
//
// It encapsualtes in particular: the shared memory map,
// the GPU dispatch buffer, and, possibly, for non-inplace operations
// and GPU arrays, a copy of the GPU data.
//
// This class is not thread safe, so the MATLAB and flow supervisor thread
// must properly syncrhonize in accessing it.
class SharedTensorSpace
{
public:
SharedTensorSpace() ;
~SharedTensorSpace() ;
vl::ErrorCode mexInit(mxArray const *mexDescriptor) ;
void finalize() ;
vl::ErrorCode attach(std::string const & prefix, int lab, int numLabs) ;
vl::ErrorCode attachPeer(int lab) ;
void mexPrint() const ;
void dump() const ;
private:
bool initialized ;
int lab ;
int numLabs ;
enum SharedTensorState {
ready,
accumulateChildren,
waitParent,
waitChildren,
} state ;
// This class represents an instance of a shared tensor. It contain
// its state@transaction pair and information on its memory location.
struct SharedTensorInstance
{
std::string name ;
SharedTensorDescriptor descriptor ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
int numChildrenToAccumulate ;
size_t memoryMapOffset ;
void * cpuMemory ;
void * gpuMemory ;
bool gpuMemoryIsOwned ;
#if ENABLE_GPU
cudaEvent_t gpuEvent ;
bool gpuEventIsInitialized ;
#endif
bool operator==(std::string const & theName) { return name == theName ; }
SharedTensorInstance()
: state(ready), transaction(0), finalTransaction((size_t)-1),
cpuMemory(NULL), gpuMemory(NULL), gpuMemoryIsOwned(false)
#if ENABLE_GPU
, gpuEvent(0), gpuEventIsInitialized(false)
#endif
{ }
} ;
typedef std::vector<SharedTensorInstance> tensors_t ;
tensors_t tensors ;
struct SharedTensorPeerInstance
{
int lab ;
SharedTensorState state ;
size_t transaction ;
size_t finalTransaction ;
void *mappedCpuMemory ;
void *mappedGpuMemory ;
bool accumulated ;
bool operator==(int theLab) { return lab == theLab ; }
SharedTensorPeerInstance()
: lab(-1), state(ready), transaction(0),
mappedCpuMemory(NULL), mappedGpuMemory(NULL), accumulated(false),
finalTransaction((size_t)-1) { }
} ;
typedef std::vector<std::vector<SharedTensorPeerInstance> > peerTensors_t ;
peerTensors_t peerTensors ;
SharedTensorPeerInstance & getPeerTensor(int tensorIndex, int lab) ;
// Shared CPU memory
void * memoryMap ;
size_t memoryMapSize ;
size_t memoryMapLabStride ;
std::string memoryMapName ;
int memoryMapFD ;
bool memoryMapIsCudaRegistered ;
// Additional GPU memory
void * gpuDispatchMemory ;
int gpuDevice ;
#if ENABLE_GPU
// Todo: one for each mapped peer dispatch memory
cudaIpcMemHandle_t gpuMemoryHandle ;
cudaStream_t gpuHelperStream ;
cudaEvent_t gpuHelperEvent ;
bool gpuHelperStreamInitialized ;
bool gpuHelperEventInitialized ;
#endif
friend class ProcessPool ;
} ;
SharedTensorSpace::SharedTensorSpace()
: initialized(false),
memoryMapFD(-1),
memoryMap(NULL),
memoryMapIsCudaRegistered(false),
memoryMapSize(0),
gpuDevice(-1),
gpuDispatchMemory(NULL)
#if ENABLE_GPU
, gpuHelperStream(0),
gpuHelperStreamInitialized(false),
gpuHelperEventInitialized(false)
#endif
{ }
SharedTensorSpace::~SharedTensorSpace()
{
finalize() ;
}
// This function initializes the SharedTensorSpace using
// a MATLAB cell array as descriptor for the space content.
// It can throw a MEX error, so it must be called from
// the MATLAB thread.
vl::ErrorCode SharedTensorSpace::mexInit(mxArray const *descriptor)
{
assert(descriptor) ;
if (initialized) {
mexErrMsgTxt("Already initialized. Use 'reset' to clear.") ;
}
lab = -1 ;
numLabs = 0 ;
memoryMapName = "" ;
memoryMapSize = 0 ;
memoryMapLabStride = 0 ;
// Parse tensor list
if (!mxIsCell(descriptor)) {
mexErrMsgTxt("DESCRIPTOR is not a cell array.") ;
}
if (mxGetNumberOfDimensions(descriptor) != 2) {
mexErrMsgTxt("DESCRIPTOR does not have two dimensions.") ;
}
if (mxGetN(descriptor) != 3 &&
mxGetN(descriptor) != 4) {
mexErrMsgTxt("DESCRIPTOR does not have three or four columns.") ;
}
size_t numTensors = mxGetM(descriptor) ;
size_t offset = 0 ;
size_t const alignFactor = 16 ;
bool useGPU = false ;
for (int i = 0 ; i < numTensors ; ++i) {
VLMXErrorCode error ;
vl::DeviceType deviceType = vl::VLDT_CPU ;
vl::DataType dataType ;
vl::TensorShape shape ;
std::string name ;
error = vlmxParseDataType(dataType, mxGetCell(descriptor, 0*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,1} is not a valid data type.", i+1) ;
}
error = vlmxParseTensorShape(shape, mxGetCell(descriptor, 1*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,2} is not a valid tensor shape.", i+1) ;
}
error = vlmxParseString(name, mxGetCell(descriptor, 2*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,3} is not a valid tensor name.", i+1) ;
}
if (mxGetN(descriptor) == 4) {
error = vlmxParseDeviceType(deviceType, mxGetCell(descriptor, 3*numTensors + i)) ;
if (error != VLMXE_Success) {
vlmxError(error, "DESCRIPTOR{%d,4} is not a valid device type name.", i+1) ;
}
}
if (deviceType == vl::VLDT_GPU) {
#if not defined(ENABLE_GPU)
vlmxError(VLMXE_IllegalArgument, "GPU support not compiled.") ;
#endif
useGPU = true ;
}
// Add the new tensor to the table.
{
SharedTensorInstance tensor ;
tensor.name = name ;
tensor.descriptor.init(deviceType, dataType, shape) ;
tensor.memoryMapOffset = offset ;
tensors.push_back(tensor) ;
offset +=
vl::divideAndRoundUp(tensor.descriptor.getSizeInBytes(), alignFactor) * alignFactor ;
if (verbosity >= 2) {
mexPrintf("[info] %s: registered tensor %s\n", __func__, name.c_str()) ;
}
}
}
// Size of the memory allocated for one lab (with a copy of all tensors).
memoryMapName = "/mcn" ;
size_t const pageSize = getpagesize() ;
memoryMapLabStride = vl::divideAndRoundUp(offset, pageSize) * pageSize ;
memoryMapSize = 0 ;
#if ENABLE_GPU
if (useGPU) {
cudaGetDevice(&gpuDevice) ; // to inform thread
LOG(2) << "current CUDA device: " << gpuDevice ;
}
#endif
initialized = true ;
return vl::VLE_Success ;
}
// Get the peer tensor corresponding to a given
// tensor and process index.
SharedTensorSpace::SharedTensorPeerInstance &
SharedTensorSpace::getPeerTensor(int tensorIndex, int lab)
{
std::vector<SharedTensorPeerInstance>::iterator PT
= std::find(peerTensors[tensorIndex].begin(), peerTensors[tensorIndex].end(), lab) ;
assert(PT != peerTensors[tensorIndex].end()) ;
return *PT ;
}
/// Attach the shared space. This allocates the shared memory map
/// for inter-process data transfers containing all tensors,
/// and the GPU dispatch memory.
vl::ErrorCode SharedTensorSpace::attach(std::string const & prefix, int lab, int numLabs)
{
int error ;
this->lab = lab ;
this->numLabs = numLabs ;
// Create the memory map name from the prefix.
memoryMapName = std::string("/") + prefix ;
// The root lab deletes a pre-existing memory object, if any.
if (lab == 0) {
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1) {
switch (errno) {
case ENOENT:
// Fine, there wasn't such a memory map anyways.
break ;
default:
LOGERROR
<< "could not delete the stale memory map '"
<< memoryMapName.c_str()
<< "' because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
}
}
// Open/create the shared memory file descriptor.
memoryMapSize = memoryMapLabStride * numLabs ;
memoryMapFD = shm_open(memoryMapName.c_str(),
(lab == 0 ? O_CREAT:0)| O_RDWR, S_IRUSR | S_IWUSR) ;
if (memoryMapFD == -1) {
LOGERROR << "shm_open() failed because " << strerror(errno) ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
// The root process set the size of the shared memory.
if (lab == 0) {
if (ftruncate(memoryMapFD, memoryMapSize) == -1) {
LOGERROR << "truncate failed because " << strerror(errno) ;
return vl::VLE_OutOfMemory ;
}
}
// Map the memory.
memoryMap = mmap(0, memoryMapSize,
PROT_READ | PROT_WRITE, MAP_SHARED,
memoryMapFD, 0) ;
if (memoryMap == MAP_FAILED) {
LOGERROR << "mmap failed because " << strerror(errno) ;
memoryMap = NULL ;
close(memoryMapFD) ;
memoryMapFD = -1 ;
return vl::VLE_Unknown ;
}
memoryMapIsCudaRegistered = false ;
// The FD is not needed after mmap.
close(memoryMapFD) ;
memoryMapFD = -1 ;
// Associate memory to tensors.
#if ENABLE_GPU
size_t maxGPUTensorSize = 0 ;
#endif
for (int t = 0 ; t < tensors.size() ; ++t) {
tensors[t].cpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
#if ENABLE_GPU
if (tensors[t].descriptor.deviceType == vl::VLDT_GPU) {
// Lazy allocation (to allow inplace operations).
tensors[t].gpuMemory = NULL ;
tensors[t].gpuMemoryIsOwned = false ;
maxGPUTensorSize = std::max(maxGPUTensorSize,
tensors[t].descriptor.getSizeInBytes()) ;
cudaError_t cerror = cudaEventCreate(&tensors[t].gpuEvent) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA could not create an event because '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
tensors[t].gpuEventIsInitialized = true ;
}
#endif
}
#if ENABLE_GPU
if (maxGPUTensorSize > 0) {
cudaError_t cerror ;
cerror = cudaMalloc(&gpuDispatchMemory, maxGPUTensorSize) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not allocate GPU memory for dispatch because '"
<< cudaGetErrorString(cerror) << '\'' ;
gpuDispatchMemory = NULL ;
return vl::VLE_Cuda ;
}
// To parallelize memory transfers we use a separate CUDA stream.
cerror = cudaStreamCreateWithFlags(&gpuHelperStream, cudaStreamNonBlocking) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not create a CUDA stream because '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
gpuHelperStreamInitialized = true ;
// Pin all shared host memory.
cerror = cudaHostRegister(memoryMap,
memoryMapSize,
cudaHostRegisterDefault) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while pinning the shared host memory: '"
<< cudaGetErrorString(cerror) << '\'' ;
} else {
LOG(2) << "pinned shared memory" ;
memoryMapIsCudaRegistered = true ;
}
}
#endif
return vl::VLE_Success ;
}
// attachPeer
vl::ErrorCode
SharedTensorSpace::attachPeer(int lab)
{
if (peerTensors.size() != tensors.size()) {
peerTensors.resize(tensors.size()) ;
}
for (int t = 0 ; t < tensors.size() ; ++t) {
SharedTensorPeerInstance peerTensor ;
peerTensor.lab = lab ;
peerTensor.state = SharedTensorSpace::ready ;
peerTensor.mappedCpuMemory = (char*)memoryMap
+ tensors[t].memoryMapOffset
+ lab * memoryMapLabStride ;
peerTensor.accumulated = false ;
peerTensors[t].push_back(peerTensor) ;
}
return vl::VLE_Success ;
}
// Destroy all resources
// 1) unmap and unlink shared memory map
// 2) ...
void SharedTensorSpace::finalize()
{
int error ;
initialized = false ;
#if ENABLE_GPU
if (memoryMap && memoryMapIsCudaRegistered) {
cudaHostUnregister(memoryMap) ;
}
// if (gpuHelperEventInitialized) {
// cudaEventDestroy(gpuHelperEvent) ;
// gpuHelperEventInitialized = false ;
// }
if (gpuHelperStreamInitialized) {
cudaStreamDestroy(gpuHelperStream) ;
gpuHelperStream = 0 ;
gpuHelperStreamInitialized = false ;
}
if (gpuDispatchMemory) {
cudaFree(gpuDispatchMemory) ;
gpuDispatchMemory = NULL ;
}
for (tensors_t::iterator T = tensors.begin() ;
T != tensors.end() ;
T++)
{
if (T->gpuMemory && T->gpuMemoryIsOwned) {
cudaFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
}
if (T->gpuEventIsInitialized) {
cudaEventDestroy(T->gpuEvent) ;
T->gpuEvent = 0 ;
T->gpuEventIsInitialized = false ;
}
}
gpuDevice = -1 ;
#endif
if (memoryMap) {
munmap(memoryMap, memoryMapSize) ;
memoryMap = NULL ;
}
if (memoryMapFD != -1) {
// This should have beeen closed right after mmap().
close(memoryMapFD) ;
memoryMapFD = -1 ;
}
error = shm_unlink(memoryMapName.c_str()) ;
if (error == -1 && errno == EACCES) {
LOGERROR << "Cannot clear the shared memory map due to a permission error." ;
}
tensors.clear() ;
numLabs = -1 ;
}
// For debugging
void SharedTensorSpace::dump() const
{
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
char const * stateString ;
switch (T.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"Tensor " << T.name ;
LOG(0)<<"\tState: " << stateString ;
LOG(0)<<"\ttransaction: "<<T.transaction ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
switch (PT.state) {
case ready: stateString="ready" ; break ;
case accumulateChildren: stateString="accumulateChildren" ; break ;
case waitParent: stateString="waitParent" ; break ;
case waitChildren: stateString="waitChildren" ; break ;
}
LOG(0)<<"\tPeer on lab " << PT.lab << ": " << stateString;
LOG(0)<<"\t\ttransaction:" << PT.transaction ;
}
}
}
}
void SharedTensorSpace::mexPrint() const
{
mexPrintf("\tlab %d of %d\n", lab, numLabs) ;
mexPrintf("\tshared memory: '%s', %d bytes mapped at address: 0x%zx\n",
memoryMapName.c_str(),memoryMapSize,memoryMap) ;
for (int tensorIndex = 0 ; tensorIndex < tensors.size() ; ++tensorIndex) {
SharedTensorInstance const & T = tensors[tensorIndex] ;
mexPrintf("\tTensor '%s'\n", T.name.c_str()) ;
mexPrintf("\t\t[") ;
for (int k = 0 ; k < T.descriptor.shape.getNumDimensions() ; ++k) {
mexPrintf(" %d", T.descriptor.shape.getDimensions()[k]) ;
}
mexPrintf("] %s %s\n",
T.descriptor.dataType == vl::VLDT_Double?"double":"single",
T.descriptor.deviceType == vl::VLDT_CPU?"CPU":"GPU") ;
mexPrintf("\t\tCPU address: 0x%zx\n", T.cpuMemory) ;
mexPrintf("\t\tGPU address: 0x%zx\n", T.gpuMemory) ;
if (peerTensors.size() > tensorIndex) {
for (int p = 0 ; p < peerTensors[tensorIndex].size() ; ++p) {
SharedTensorPeerInstance const & PT = peerTensors[tensorIndex][p] ;
mexPrintf("\t\tPeer instance %d\n", p) ;
mexPrintf("\t\t\tlab: %0d\n", PT.lab) ;
mexPrintf("\t\t\tmapped CPU address: 0x%zx\n",PT.mappedCpuMemory) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool */
/* ---------------------------------------------------------------- */
#pragma mark -
/// Represents a pool of collaborating MATLAB processes. Usually each
/// process corresponds to a certain MATLAB instance in a MATLAB pool.
class ProcessPool
{
public:
/// Create an un-intialized ProcessPool. Before it is used,
/// the pool must be initialized using init(). This design allows
/// to catch errors during initialization without resorting to exceptions.
ProcessPool() ;
/// Automatically calls ::finalize().
~ProcessPool() ;
/// Initialize the instance \a lab of \a numLabs pools. The function
/// timesout.
vl::ErrorCode init(std::string const & prefix, int lab,
int numLabs, SharedTensorSpace * space) ;
/// Gracefully shutdown the connection with the other processes,
/// waiting for them to finish updating as needed. After this, the
/// supervisory thread quits, but the object remains initialized
/// to allow reading off the final value of the tensor.
///
/// The function timesout.
vl::ErrorCode shutdown() ;
/// Immediately terminate the ProcessPool instance and release all
/// resources.
void finalize() ;
/// Print information.
///
/// This function must be called from the MATLAB thread.
void mexPrint() const ;
/// Push a tensor in the pool for accumulation.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and can time out.
void mexPush(std::string const & name, mxArray const * x,
bool inplace = false) ;
/// Pull an accumulated tensor from the pool.
///
/// This function must be called from the MATLAB thread. It throws
/// a MEX error on error and an time out.
mxArray * mexPull(std::string const & name, bool inplace = false) ;
/// Check whether the instance is intialized or not.
bool isInitialized() const { return initialized ; }
private:
bool initialized ;
std::string prefix ;
int lab ;
int numLabs ;
size_t timeoutInterval ;
SharedTensorSpace * sharedSpace ;
// Messages between peer processes.
struct Message
{
enum MessageType {
/// Sent from root to leaves to request initialization during
/// hanshake.
init,
/// Sent from leaves to root to acknowledge initialization.
initDone,
/// Sent from root to leaves to request attching the shared
/// resources (shared memory).
attach,
/// Sent to advertise a state change for a tensor.
tensorStateChange,
/// Shutdown sequence
requestShutdown,
/// Communicate the final transaction index for quitting.
tensorFinalTransaction
}
type ;
/// The transaction number.
size_t transaction ;
/// The final transaction number.
size_t finalTransaction ;
// Sender and destination process indexes.
int16_t from ;
int16_t to ;
// Session identifier, used for sanity checks.
uint32_t session ;
// Tensort ID and state for a tensor state change.
uint32_t tensorId ;
SharedTensorSpace::SharedTensorState tensorState ;
Message() : transaction(0), finalTransaction((size_t)-1), tensorId(0) { }
} ;
class Supervisor {
public:
Supervisor(ProcessPool& pool)
: pool(pool), thread(NULL), state(down),
socketFD(-1) { pipeFD[0] = -1 ; pipeFD[1] = -1 ; }
~Supervisor() { finalize() ; }
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode shutdown() ;
vl::ErrorCode beginTransaction(int tensorIndex) ;
vl::ErrorCode waitTensor(int tensorIndex) ;
private:
ProcessPool & pool ;
tthread::thread * thread ;
enum State {
connecting,
running,
shuttingDown,
down} state ;
// Peer processes.
struct Peer
{
int lab ;
int socketFD ;
bool cudaCanAccessPeer ; //cudaDeviceCanAccessPeer
bool shutdownRequested ;
Peer(int lab)
: lab(lab), socketFD(-1),
cudaCanAccessPeer(false),
shutdownRequested(false)
{ }
bool operator== (int lab) { return this->lab == lab ; }
} ;
typedef std::vector<Peer> peers_t ;
peers_t peers ;
// Comms.
uint32_t session ;
int pipeFD [2] ;
int socketFD ;
tthread::mutex mutex ;
tthread::condition_variable waitingList ;
bool shutdownRequested ; // local
bool forceQuit ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
vl::ErrorCode connect() ;
void disconnect() ;
vl::ErrorCode handshake() ;
vl::ErrorCode loop() ;
vl::ErrorCode send(Message &msg, int to) ;
vl::ErrorCode receive(Message &msg, int from, int timeout = -1) ;
vl::ErrorCode handleAccumulateChildren(int tensorIndex) ;
vl::ErrorCode handleWaitParent(int tensorIndex) ;
vl::ErrorCode handleWaitChildren(int tensorIndex) ;
} supervisor ;
} ;
ProcessPool::ProcessPool()
: supervisor(*this),
initialized(false),
lab(-1), numLabs(0)
{ }
ProcessPool::~ProcessPool()
{
finalize() ;
}
vl::ErrorCode ProcessPool::init(std::string const & newPrefix, int newLab, int newNumLabs, SharedTensorSpace * newSharedSpace)
{
vl::ErrorCode error ;
assert(newLab >= 0) ;
assert(newNumLabs > newLab) ;
assert(newSharedSpace) ;
// finalize process pool if previously initialized
finalize() ;
// set members
prefix = newPrefix ;
lab = newLab ;
numLabs = newNumLabs ;
sharedSpace = newSharedSpace ;
timeoutInterval = 30UL * 1000UL * 1000UL ; // 30s in us
error = supervisor.init() ;
if (error == vl::VLE_Success) {
initialized = true ;
}
return error ;
}
vl::ErrorCode ProcessPool::shutdown()
{
return supervisor.shutdown() ;
}
void ProcessPool::finalize()
{
supervisor.finalize() ;
if (sharedSpace) {
sharedSpace->finalize() ;
delete sharedSpace ;
sharedSpace = NULL ;
}
lab = -1 ;
numLabs = 0 ;
initialized = false ;
}
void ProcessPool::mexPrint() const
{
tthread::lock_guard<tthread::mutex> (mutex) ;
if (sharedSpace) {
sharedSpace->mexPrint() ;
} else {
mexPrintf("Uninitialized.") ;
}
}
void ProcessPool::mexPush(std::string const & name,
mxArray const * x,
bool inplace)
{
// Search tensor by name.
auto T = std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor '%s'.", name.c_str()) ;
}
// Encapsulate MATLAB argument and check tensor compatibility.
vl::MexTensor mtens(context) ;
mtens.init(x) ;
if (mtens.getDeviceType() != T->descriptor.deviceType) {
vlmxError(VLMXE_IllegalArgument, "The tensor device type is incorrect.") ;
}
if (mtens.getDataType() != T->descriptor.dataType) {
vlmxError(VLMXE_IllegalArgument, "The tensor data type is incorrect.") ;
}
if (mtens.getNumElements() != T->descriptor.shape.getNumElements()) {
vlmxError(VLMXE_IllegalArgument, "The tensor shape is incorrect.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
// Copy memory to SharedSpace
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(T->cpuMemory, mtens.getMemory(), T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
cudaError_t cerror ;
// sync main thread (do not start until the parameters have been computed!)
cudaEventRecord(T->gpuEvent, 0) ;
cudaStreamWaitEvent(sharedSpace->gpuHelperStream, T->gpuEvent, 0) ;
if (inplace) {
if (T->gpuMemoryIsOwned && T->gpuMemory) {
// Free the previously allocated memory as we are going to use
// an inplace operation on this tensor.
cudaFree(T->gpuMemory) ;
T->gpuMemory = NULL ;
}
T->gpuMemoryIsOwned = false ;
T->gpuMemory = mtens.getMemory() ;
} else {
if (T->gpuMemoryIsOwned == false || T->gpuMemory == NULL) {
cerror = cudaMalloc(&T->gpuMemory,
T->descriptor.getSizeInBytes()) ;
if (cerror != cudaSuccess) {
T->gpuMemory = NULL ;
T->gpuMemoryIsOwned = false ;
vlmxError(VLMXE_Alloc, "CUDA error while allocating GPU memory (%s).",
cudaGetErrorString(cerror)) ;
}
T->gpuMemoryIsOwned = true ;
cerror = cudaMemcpyAsync (T->gpuMemory,
mtens.getMemory(),
T->descriptor.getSizeInBytes(),
cudaMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA error while copying GPU data (%s).",
cudaGetErrorString(cerror)) ;
}
}
}
#endif
}
supervisor.beginTransaction(T - sharedSpace->tensors.begin()) ;
}
mxArray * ProcessPool::mexPull(std::string const & name, bool inplace)
{
// Search the tensor with the specified name.
auto T = std::find(sharedSpace->tensors.begin(), sharedSpace->tensors.end(), name) ;
if (T == sharedSpace->tensors.end()) {
vlmxError(VLMXE_IllegalArgument, "There is no tensor with the specified name.") ;
}
if (inplace && T->descriptor.deviceType != vl::VLDT_GPU) {
vlmxError(VLMXE_IllegalArgument, "Inplace operations are supported only for GPU arrays.") ;
}
// Wait until the tensor is in ready state
vl::ErrorCode error = supervisor.waitTensor(T - sharedSpace->tensors.begin()) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Timeout or disconnected while waiting for tensor '%s' to become ready.", T->name.c_str()) ;
}
if (inplace) {
// With in-place operations, the only purpose of pull() is to wait until
// the tensor is ready and can be accessed.
return NULL ;
} else {
vl::MexTensor result(context) ;
result.init(T->descriptor.deviceType, T->descriptor.dataType, T->descriptor.shape) ;
if (T->descriptor.deviceType == vl::VLDT_CPU) {
memcpy(result.getMemory(),
T->cpuMemory,
T->descriptor.getSizeInBytes()) ;
} else {
#if ENABLE_GPU
// Synchronous with main thread.
cudaError_t cerror = cudaMemcpyAsync (result.getMemory(),
T->gpuMemory,
T->descriptor.getSizeInBytes(),
cudaMemcpyDeviceToDevice,
sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while copying GPU data: '%s'.",
cudaGetErrorString(cerror)) ;
}
cerror = cudaStreamSynchronize(sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
vlmxError(VLMXE_Execution, "CUDA generated an error while synchronizing a stream: '%s'.",
cudaGetErrorString(cerror)) ;
}
#endif
}
return result.relinquish() ;
}
}
/* ---------------------------------------------------------------- */
/* ProcessPool::Supervisor */
/* ---------------------------------------------------------------- */
#pragma mark -
#undef LOGERROR
#define LOGERROR \
vl::Logger().getStream() \
<<"[error]"<<__func__<<"::lab "<<pool.lab<<"::"
#undef LOG
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::lab "<<pool.lab<<"::"
void ProcessPool::Supervisor::threadEntryPoint(void * thing)
{
((ProcessPool::Supervisor*)thing)->entryPoint() ;
}
vl::ErrorCode ProcessPool::Supervisor::init()
{
vl::ErrorCode error = vl::VLE_Success ;
finalize() ;
// Infer parent and children labs.
int bit = ffs(pool.lab) - 1 ;
if (bit == -1) { bit = 31 ; }
int parent = pool.lab & (~(1 << bit)) ;
if (parent != pool.lab) {
// peers[0] always contain the parent (except for root)
peers.push_back(Peer(parent)) ;
}
for (int k = 0 ; k < bit ; ++k) {
int child = pool.lab | (1 << k) ;
if (child < pool.numLabs) {
// Which peers[] gets which children is determined later
// during hadshake based on the random connection order.
// Here we assign a provisional lab index using negative indexes
// as these are needed to use send().
peers.push_back(Peer(-child)) ;
}
}
state = connecting ;
shutdownRequested = false ;
forceQuit = false ;
thread = new tthread::thread(threadEntryPoint, this) ;
// Wait for initialization to be complete.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state == connecting) {
waitingList.wait(mutex) ;
}
if (state == running) {
error = vl::VLE_Success ;
} else {
error = vl::VLE_Unknown ;
}
}
return error ;
}
void ProcessPool::Supervisor::finalize()
{
if (thread) {
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
forceQuit = true ;
if (pipeFD[1] >= 0) {
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
}
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
peers.clear() ;
}
vl::ErrorCode ProcessPool::Supervisor::shutdown()
{
// Signal the supervisory thread
shutdownRequested = true ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
// Wait for shutdown to complete
{
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (state != down) {
if (vl::getTime() > start + pool.timeoutInterval) {
LOGERROR << "timeout while shutting down" ;
return vl::VLE_Timeout ;
}
waitingList.wait(mutex) ;
}
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::beginTransaction(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
T.transaction ++ ;
T.numChildrenToAccumulate = 0 ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
SharedTensorSpace::SharedTensorPeerInstance & PT = pool.sharedSpace->peerTensors[tensorIndex][p] ;
PT.accumulated = false ;
T.numChildrenToAccumulate += 1;
}
asm volatile("": : :"memory") ; // Memory barrier: prevents compiler from reordering
T.state = SharedTensorSpace::accumulateChildren ; // Must be last to close transaction
// Signal the supervisory thread
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
char dummy = 1 ;
write(pipeFD[1], &dummy, 1) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::waitTensor(int tensorIndex)
{
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
size_t start = vl::getTime() ;
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (T.state != SharedTensorSpace::ready) {
if ((vl::getTime() - start) > pool.timeoutInterval) {
return vl::VLE_Timeout ;
}
if (state != running) {
return vl::VLE_Unknown ;
}
waitingList.wait(mutex) ;
}
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::send(Message & msg, int to)
{
// Find connection to peer.
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), to) ;
assert(rel != peers.end()) ;
// Add complementery information to the message.
msg.session = session ;
msg.from = pool.lab ;
msg.to = to ;
// Send all bytes.
int bytesWritten = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesWritten < sizeof(msg)) {
status = write(rel->socketFD, nextByte, sizeof(msg) - bytesWritten) ;
if (status == -1) {
LOGERROR
<< "could not send message to " << to
<< " because '" << strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
bytesWritten += status ;
}
LOG(3)
<< "sent message to " << to
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< " tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
vl::ErrorCode ProcessPool::Supervisor::receive(Message & msg, int from, int timeout)
{
size_t waited = 0 ; // us
size_t const pollInterval = 1000 ; // us
if (timeout < 0) { timeout = pool.timeoutInterval ; } // us
// find connection to peer
peers_t::const_iterator rel = std::find(peers.begin(), peers.end(), from) ;
assert(rel != peers.end()) ;
// receive all bytes
{
int bytesRead = 0 ;
int status ;
char * nextByte = (char*)&msg ;
while (bytesRead < sizeof(msg)) {
status = read(rel->socketFD, nextByte, sizeof(msg) - bytesRead) ;
if (status == 0 || status == -1) {
if (status == 0 || errno == EAGAIN) {
if (timeout == 0 && bytesRead == 0) {
// non blocking operation, no message, just return no data
return vl::VLE_NoData ;
}
if (timeout > 0 && waited >= timeout) {
if (verbosity >= 1) {
LOGERROR
<< "timed out while receiving a message from lab " << from
<< " because '" << strerror(errno) << '\'' ;
}
return vl::VLE_Timeout ;
}
usleep(pollInterval) ;
waited += pollInterval ;
continue ;
}
if (verbosity >= 1) {
LOGERROR
<< "error while receiving a message from lab " << from
<< ": '" << strerror(errno) << '\'' ;
}
return vl::VLE_Unknown ;
}
bytesRead += status ;
}
}
// check message integrity
if ((msg.type != Message::init &&
msg.type != Message::initDone)
&& (msg.session != session &&
msg.from != from &&
msg.to != pool.lab)) {
LOGERROR
<< "received an unexpected message from lab " << from
<< "\n\tmsg: session:" << msg.session
<< " from:" << msg.from
<< " to:" << msg.to
<< " type:" << msg.type
<< "\n\tthis session:" << this->session ;
return vl::VLE_Unknown ;
}
LOG(3)
<< "received message from "<<from
<< " (type " << msg.type
<< ", state " << msg.tensorState
<< ", tensor " << msg.tensorId
<< ')' ;
return vl::VLE_Success ;
}
/// Establish connections with the peers.
vl::ErrorCode ProcessPool::Supervisor::connect()
{
vl::ErrorCode error = vl::VLE_Success ;
int result ;
char socketName [256] ;
struct sockaddr_un socketAddress ;
size_t start = vl::getTime() ;
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
socketFD = -1 ;
// Lock for entire duration of connect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Advertise
state = connecting ;
waitingList.notify_all() ;
// Cerate a pipe FD for notification between MATLAB's thread
// and the supervisory thread. This is needed to allow awaking
// the supervisory thread.
result = pipe(pipeFD) ;
if (result == -1) {
pipeFD[0] = -1 ;
pipeFD[1] = -1 ;
LOGERROR
<< "cannot create inter-threads pipe because: '"
<< strerror(errno) << '\'' ;
return vl::VLE_Unknown ;
}
// Create a socket and connect children.
size_t numChildren = peers.size() - (pool.lab > 0) ;
if (numChildren > 0) {
// Get a UNID comain socket.
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (socketFD == -1) {
LOGERROR
<< "cannot create socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Delete socket path if it exists before binding.
if (access(socketAddress.sun_path, F_OK) == 0) {
unlink(socketAddress.sun_path) ;
}
// Bind socket to address.
result = bind(socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == -1) {
LOGERROR
<< "cannot bind socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Start listening for children connections
result = listen(socketFD, numChildren) ;
if (result == -1) {
LOGERROR
<< "cannot listen to socket " << socketName
<< "because: " << strerror(errno) ;
return vl::VLE_Unknown ;
}
// Do not block on accept().
fcntl(socketFD, F_SETFL, fcntl(socketFD, F_GETFL, 0) | O_NONBLOCK);
// Accept one connection per child.
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
peers[p].socketFD = -1 ;
for (;;) {
peers[p].socketFD = accept(socketFD, NULL, NULL) ;
if (peers[p].socketFD == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
if (vl::getTime() < start + pool.timeoutInterval) continue ; // retry
LOGERROR
<< "timed out while accepting connection from peer " << peers[p].lab ;
error = vl::VLE_Timeout ;
goto done ;
}
LOGERROR
<< " cannot accept connection from peer " << peers[p].lab
<< " because: " << strerror(errno) ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[p].socketFD, F_SETFL,
fcntl(peers[p].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
}
}
// Connect parent.
if (pool.lab > 0) {
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), peers[0].lab) ;
for (;;) {
peers[0].socketFD = socket(AF_UNIX, SOCK_STREAM, 0) ;
if (peers[0].socketFD == -1) {
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to create socket file.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot create socket '" << socketName
<< "' because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
break ;
}
fcntl(peers[0].socketFD, F_SETFL,
fcntl(peers[0].socketFD ,F_GETFL, 0) | O_NONBLOCK) ;
// Copy socket path into socketAddress.
memset(&socketAddress, 0, sizeof(socketAddress)) ;
socketAddress.sun_family = AF_UNIX;
strncpy(socketAddress.sun_path, socketName,
sizeof(socketAddress.sun_path) - 1) ;
// Establish connection with parent.
for (int trials = 0 ; ; ++trials) {
int result = ::connect(peers[0].socketFD,
(struct sockaddr *)&socketAddress,
sizeof(socketAddress)) ;
if (result == 0) break ;
if (vl::getTime() < start + pool.timeoutInterval) {
// Wait for parent to start accepting connections.
usleep(100UL * 1000UL) ; // 100 ms (10 times a second)
continue ;
}
LOGERROR
<< "cannot connect socket " << socketName
<< " after trying " << trials
<< " times because '" << strerror(errno) << '"' ;
error = vl::VLE_Unknown ;
goto done ;
}
}
done:
return error ;
}
void ProcessPool::Supervisor::disconnect()
{
// Lock for entire duration of disconnect()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
for (int p = 0 ; p < peers.size() ; ++p) {
if (peers[p].socketFD != -1) {
close(peers[p].socketFD) ;
peers[p].socketFD = -1 ;
}
}
if (socketFD != -1) {
close(socketFD) ;
socketFD = -1 ;
}
char socketName [256] ;
snprintf(socketName, sizeof(socketName)/sizeof(socketName[0]),
"/%s/%s-socket-%02d", P_tmpdir, pool.prefix.c_str(), pool.lab) ;
unlink(socketName) ;
for (int t = 1 ; t >= 0 ; --t) {
if (pipeFD[t] != -1) {
close(pipeFD[t]) ;
pipeFD[t] = -1 ;
}
}
state = down ;
waitingList.notify_all() ;
}
// The purpose of the handshake sequence is to make sure that
// all processes are properly communicating and ready to go.
// It is also required to synchornize the root (which creates several
// shared resources) and the other nodes (which attach them).
vl::ErrorCode ProcessPool::Supervisor::handshake()
{
Message msg ;
vl::ErrorCode error = vl::VLE_Success ;
// Lock for entire duration of handshake()
tthread::lock_guard<tthread::mutex> lock(mutex) ;
LOG(2) << "handshake begins" ;
// receive message from parent (except for root)
if (pool.lab == 0) {
session = (uint32_t)vl::getTime() ;
// root atteches first
error = pool.sharedSpace->attach(pool.prefix, 0, pool.numLabs) ;
if (error != vl::VLE_Success) {
LOGERROR << "root could not attach the shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "root attached the shared tensor space" ;
} else {
error = receive(msg, peers[0].lab) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "did not receive a message from parent" ;
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
// children attach now
error = pool.sharedSpace->attach(pool.prefix, pool.lab, pool.numLabs) ;
if (error != vl::VLE_Success || msg.type != Message::init) {
LOGERROR << "could not attach shared space" ;
error = vl::VLE_Unknown ;
goto done ;
}
LOG(2) << "child attached the shared tensor space" ;
}
// send message to all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
msg.type = Message::init ;
error = send(msg,peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "could not send a message to a child" ;
goto done ;
}
}
// receive message from all children
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
error = receive(msg,peers[p].lab) ;
if (error != vl::VLE_Success || msg.type != Message::initDone) {
error = vl::VLE_Unknown ;
goto done ;
}
// now we can identify the child lab index
peers[p].lab = msg.from ;
LOG(2) << "connected lab " << msg.from ;
}
// register peer tensors in the same order as peer[]
for (int p = 0 ; p < peers.size() ; ++p) {
pool.sharedSpace->attachPeer(peers[p].lab) ;
}
// send message to parent (excep for root)
if (pool.lab > 0) {
msg.type = Message::initDone ;
error = send(msg, peers[0].lab) ;
if (error != vl::VLE_Success) {
error = vl::VLE_Unknown ;
goto done ;
}
session = msg.session ;
}
done:
if (error != vl::VLE_Success) {
LOGERROR << "handshake failed" ;
} else {
LOG(2) << "handshake terminated successfully" ;
}
return error ;
}
void ProcessPool::Supervisor::entryPoint()
{
vl::ErrorCode error = vl::VLE_Success ;
// Make sure the supervisory thread operates on the same CUDA device
// as the main thread.
#if ENABLE_GPU
if (pool.sharedSpace->gpuDevice >= 0) {
LOG(2) << "setting CUDA device" ;
cudaError_t cerror = cudaSetDevice(pool.sharedSpace->gpuDevice) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "could not switch supervisory thread to CUDA device "
<< pool.sharedSpace->gpuDevice ;
error = vl::VLE_Cuda ;
} else {
LOG(2) << "supervisory thread switched to CUDA device " << pool.sharedSpace->gpuDevice ;
}
}
#endif
if (error == vl::VLE_Success) {
error = connect() ;
}
if (error == vl::VLE_Success) {
error = handshake() ;
}
if (error == vl::VLE_Success) {
error = loop() ;
}
disconnect() ;
}
vl::ErrorCode ProcessPool::Supervisor::handleAccumulateChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Search for children ready to be be accumulated.
for (int p = (pool.lab > 0) ; p < peers.size() && error == vl::VLE_Success ; ++p)
{
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildReadyForAccumulation =
PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitParent &&
PT.accumulated == false ;
if (thisChildReadyForAccumulation) {
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU: {
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)PT.mappedCpuMemory, 1,
(float*)T.cpuMemory, 1) ;
break ;
case vl::VLDT_Double:
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)PT.mappedCpuMemory, 1,
(double*)T.cpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
break ;
}
case vl::VLDT_GPU: {
#if ENABLE_GPU
cudaError_t cerror ;
if (T.gpuMemory == NULL) {
LOGERROR << "internal error: GPU memory not allocated for tensor " << T.name ;
error = vl::VLE_Unknown ;
break ;
}
// Copy the copy of the tensor update in the host shared memory map
// to a buffer in the GPU.
cerror = cudaMemcpyAsync(pool.sharedSpace->gpuDispatchMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while copying data from host to device: "
<< cudaGetErrorString(cerror) ;
error = vl::VLE_Cuda ;
break ;
}
// Sum the update to the current tensor vale.
cudaStream_t previousStream = context.getCudaHelper().getStream() ;
error = context.getCudaHelper().setStream(pool.sharedSpace->gpuHelperStream) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "CUDA generated an error while switching to a different stream:"
<< context.getLastErrorMessage() ;
break ;
}
switch (T.descriptor.dataType) {
case vl::VLDT_Float:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Float>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0f,
(float*)pool.sharedSpace->gpuDispatchMemory, 1,
(float*)T.gpuMemory, 1) ;
break ;
case vl::VLDT_Double:
error = vl::impl::blas<vl::VLDT_GPU,vl::VLDT_Double>::axpy
(context,
T.descriptor.shape.getNumElements(),
1.0,
(double*)pool.sharedSpace->gpuDispatchMemory, 1,
(double*)T.gpuMemory, 1) ;
break ;
default:
assert(false) ;
break ;
}
context.getCudaHelper().setStream(previousStream) ;
if (error != vl::VLE_Success) {
LOGERROR << "summing tensors:" << context.getLastErrorMessage() ;
}
#endif
break ;
}
default:
assert(false) ;
break ;
}
PT.accumulated = true ;
-- T.numChildrenToAccumulate ;
LOG(3)
<< "accumulated child " << PT.lab
<< "; " << T.numChildrenToAccumulate << " remaining" ;
} // next peer
}
if (error != vl::VLE_Success) { return error ; }
// If all children have been accumulated, then
// notify the parent and switch to waitParent state.
// Note that we change the PT state too as the peer
// will switch to that upon receiving the notification.
//
// The root is a special case because it
// does not have a parent, so it can switch
// directly to the waitChildren state. However, in order
// to reuse the generic code above, we also set it
// to waitParent and let the next iteration pick this up.
if (T.numChildrenToAccumulate == 0) {
if (T.descriptor.deviceType == vl::VLDT_GPU) {
#if ENABLE_GPU
cudaError_t cerror ;
// Copy the GPU tensor to the shared host memory map for other
// processes to use.
cerror = cudaMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while copying from device to host ("
<< cudaGetErrorString(cerror) << ")" ;
return vl::VLE_Cuda ;
}
// Make this operation synchronous in order
// to make sure that other processes will properly read the
// update only when the copy is complete
cerror = cudaStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while synchronizing a stream: '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
#endif
}
T.state = SharedTensorSpace::waitParent ;
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
pool.sharedSpace->getPeerTensor(tensorIndex, parentLab).state = SharedTensorSpace::waitParent ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.tensorId = tensorIndex ;
msg.tensorState = T.state ;
msg.transaction = T.transaction ;
error = send(msg, parentLab) ;
}
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitParent(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if parent finished updating. If so, we can copy its value here
// and notify the children to copy us by switching to waitParent state and
// notifying the children. Note that we change the children peer state too
// as these peers will switch to that upon being notified.
if (pool.lab > 0) {
int parentLab = peers[0].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, parentLab) ;
bool parentDone = (PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::waitChildren) ;
if (!parentDone) {
return vl::VLE_Success ;
}
switch (T.descriptor.deviceType) {
case vl::VLDT_CPU:
memcpy(T.cpuMemory, PT.mappedCpuMemory, T.descriptor.getSizeInBytes()) ;
break ;
case vl::VLDT_GPU: {
#if ENABLE_GPU
cudaError_t cerror = cudaMemcpyAsync(T.gpuMemory,
PT.mappedCpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyHostToDevice,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "propagating parent to children: CUDA generated an error while copying from host to device: '"
<< cudaGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
#endif
break ;
}
}
if (error != vl::VLE_Success) { return error ; }
}
// We have copied data from parent (or there is no parent at all)
// so we are ready to pass our data to the children and to release
// the parent from waiting on us.
#if ENABLE_GPU
if (T.descriptor.deviceType == vl::VLDT_GPU) {
cudaError_t cerror ;
if (peers.size() > (pool.lab > 0)) {
// There are children (i.e. peers other than parent), so copy data to host
// to deliver it to them.
cerror = cudaMemcpyAsync(T.cpuMemory,
T.gpuMemory,
T.descriptor.getSizeInBytes(),
cudaMemcpyDeviceToHost,
pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA generated an error while copying from device to host: '"
<< cudaGetErrorString(cerror) << '\'' ;
error = vl::VLE_Cuda ;
}
}
// Synchronize, so it is safe for children on other processes to read
// the memory. Synchronize even if there are no children, so that inplace
// reads from this process are safe.
cerror = cudaStreamSynchronize(pool.sharedSpace->gpuHelperStream) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA gnereated an error while synchronizing a stream: '"
<< cudaGetErrorString(cerror) << '\'' ;
return vl::VLE_Cuda ;
}
}
#endif
// Notify the parent that we are done copying its data and the children than we are waiting
// on them to copy our data.
T.state = SharedTensorSpace::waitChildren ;
for (int p = 0 ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
PT.state = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
Message msg ;
msg.type = Message::tensorStateChange ;
msg.transaction = T.transaction ;
msg.tensorId = tensorIndex ;
msg.tensorState = (pool.lab > 0 && p == 0) ? SharedTensorSpace::ready : SharedTensorSpace::waitChildren ;
error = send(msg, peerLab) ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::handleWaitChildren(int tensorIndex)
{
vl::ErrorCode error = vl::VLE_Success ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
// Check if all children finished updating. If so, we can switch
// to ready state and notify the parent.
// Note that we change the peer children state too
// as these peers will switch to that upon being notified.
bool allChildrenDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
int peerLab = peers[p].lab ;
SharedTensorSpace::SharedTensorPeerInstance & PT
= pool.sharedSpace->getPeerTensor(tensorIndex, peerLab) ;
bool thisChildDone =((PT.transaction == T.transaction &&
PT.state == SharedTensorSpace::ready) ||
PT.transaction > T.transaction) ;
allChildrenDone &= thisChildDone ;
}
if (allChildrenDone) {
tthread::lock_guard<tthread::mutex> lock(mutex) ;
T.state = SharedTensorSpace::ready ;
waitingList.notify_all() ;
}
return error ;
}
vl::ErrorCode ProcessPool::Supervisor::loop()
{
vl::ErrorCode error = vl::VLE_Success ;
LOG(2) << "loop begins" ;
// Advertise. Note that we do not lock extensively in the main
// loop. Syncrhonization with the main thread is kept efficient
// using lock-free mechanisms.
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
state = running ;
waitingList.notify_all() ;
}
int pollStatus = 0 ;
size_t const pollInterval = 499UL ; // allow heartbeats (ms)
size_t const heartbeatInterval = 500UL * 1000UL * 1000UL ; // (ns)
size_t lastHeartbeat = vl::getTime() ;
struct pollfd * polls = new struct pollfd [peers.size() + 1] ;
for (int p = 0 ; p < peers.size() ; ++p) {
polls[p].fd = peers[p].socketFD ;
polls[p].events = POLLIN | POLLHUP | POLLERR | POLLNVAL ;
}
polls[peers.size()].fd = pipeFD[0] ;
polls[peers.size()].events = POLLIN ;
while (error == vl::VLE_Success && forceQuit == false)
{
// Generate regular heartbeats to wake up the main thread at
// regular interval and allow it to time out on
// user commands usch as pull() and push().
size_t now = vl::getTime() ;
if (now > lastHeartbeat + heartbeatInterval) {
waitingList.notify_all() ; // no need to lock
lastHeartbeat = now ;
}
// Wait for incoming messages or a timeout.
pollStatus = poll(polls, peers.size() + 1, pollInterval) ;
if (pollStatus < 0) {
error = vl::VLE_Unknown ;
continue ;
}
// Timeout!
if (pollStatus == 0) {
LOG(1) << "Polling timed out on lab " << pool.sharedSpace->lab ;
// pool.sharedSpace->dump() ;
}
// Check for messages piped from the main thread.
if (polls[peers.size()].revents & POLLIN) {
LOG(3) << "supervisory thread notified by the main thread" ;
char dummy ;
read(pipeFD[0], &dummy, 1) ;
}
// Check for messages from other processes.
for (int p = 0 ; p < peers.size() && error == vl::VLE_Success ; ++ p)
{
// Check for communication errors.
if (polls[p].revents & (POLLHUP | POLLERR | POLLNVAL)) {
LOG(3) << "one of the sockets generated an error, quitting" ;
error = vl::VLE_Unknown ;
break ;
}
// Skip this peer if there is no incoming data.
if ((polls[p].revents & POLLIN) == 0) continue ;
// Receive the message.
Message msg ;
error = receive(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR << "error while receiving a message from lab " << peers[p].lab ;
break ;
}
// Process the message.
switch (msg.type) {
case Message::tensorStateChange: {
// Record the new state for later.
LOG(3)
<< "received tensor state change from lab " << msg.from
<< " for tensor " << pool.sharedSpace->tensors[msg.tensorId].name.c_str()
<< " to state " << msg.tensorState
<< " for transaction " << msg.transaction ;
SharedTensorSpace::SharedTensorPeerInstance & T
= pool.sharedSpace->getPeerTensor(msg.tensorId, msg.from) ;
T.state = msg.tensorState ;
T.transaction = msg.transaction ;
break ;
}
case Message::requestShutdown: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
P->shutdownRequested = true ;
break ;
}
case Message::tensorFinalTransaction: {
peers_t::iterator P = std::find(peers.begin(), peers.end(), msg.from) ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[msg.tensorId];
LOG(3)
<< "received final transaction from lab " << msg.from
<< " for tensor " << T.name.c_str()
<< " to transaction " << msg.finalTransaction ;
int sourcePeer = msg.from ;
if (msg.finalTransaction < T.finalTransaction) {
T.finalTransaction = msg.finalTransaction ;
for (int q = 0 ; q < peers.size() ; ++q) {
if (sourcePeer == peers[q].lab) continue ;
error = send(msg, peers[q].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
break ;
}
default:
// Unexpected message.
LOGERROR << "received an unexpected message" ;
error = vl::VLE_Unknown ;
break ;
}
}
// Check all tensors for actions. Keep updating each tensor until its
// state does not change anymore.
for (int tensorIndex = 0 ; tensorIndex < pool.sharedSpace->tensors.size() && error == vl::VLE_Success ; ++tensorIndex)
{
SharedTensorSpace::SharedTensorState currentState ;
SharedTensorSpace::SharedTensorInstance & T = pool.sharedSpace->tensors[tensorIndex] ;
do {
currentState = T.state ;
LOG(3) << "visiting tensor " << T.name << " in state " << T.state ;
// Detect interruptions
if (T.transaction > T.finalTransaction) {
LOG(1) << "detected interrupded transaction for tensor " << T.name <<
" (transaction:"<<T.transaction<<" > final_transaction:"<<T.finalTransaction<<")";
error = vl::VLE_Interrupted ;
continue ;
}
switch (T.state) {
case SharedTensorSpace::ready:
break ;
case SharedTensorSpace::accumulateChildren:
error = handleAccumulateChildren(tensorIndex) ;
break ;
case SharedTensorSpace::waitParent :
error = handleWaitParent(tensorIndex) ;
break ;
case SharedTensorSpace::waitChildren :
error = handleWaitChildren(tensorIndex) ;
break ;
}
} while (T.state != currentState && error == vl::VLE_Success) ;
}
// Upon shutting down, propagate a message to let other nodes know that
// no further transaction can be processed for each tensor.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
LOG(3) << "sending final transaction for all tensors" ;
for (int i = 0 ; i < pool.sharedSpace->tensors.size() ; ++i) {
SharedTensorSpace::SharedTensorInstance & tensor = pool.sharedSpace->tensors[i] ;
if (tensor.finalTransaction > tensor.transaction) {
tensor.finalTransaction = tensor.transaction ;
Message msg ;
msg.type = Message::tensorFinalTransaction ;
msg.tensorId = i ;
msg.finalTransaction = tensor.finalTransaction ;
for (int p = 0 ; p < peers.size() ; ++p) {
error = send(msg, peers[p].lab) ;
if (error != vl::VLE_Success) {
LOGERROR
<< "error while sending a message to lab "
<< peers[p].lab ;
break ;
}
}
}
}
}
// Check for other actions.
if (shutdownRequested && (state == running) && (error == vl::VLE_Success)) {
// Check if the children are also in shutdown mode
bool allDone = true ;
for (int p = (pool.lab > 0) ; p < peers.size() ; ++p) {
allDone &= peers[p].shutdownRequested ;
}
if (allDone) {
state = Supervisor::shuttingDown ; // avoid sending the same message again later
if (pool.lab > 0) {
LOG(2) << "subtree ready to shutdown, telling parent lab" ;
Message msg ;
msg.type = Message::requestShutdown ;
error = send(msg, peers[0].lab) ;
} else {
// Other processes will stop when connections are broken.
LOG(2) << "everyone requested shutdown, root lab quitting" ;
break ; // out of poll loop
}
}
}
} // back to poll
LOG(2) << "terminating supervisory thread loop (error = " << error << ')' ;
delete [] polls ;
return error ;
}
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
#pragma mark -
ProcessPool processPool ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
processPool.finalize() ;
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int opt ;
int next = IN_END ;
mxArray const *optarg ;
enum Commands { init, stats, reset, push, pull } command ;
bool inplace = false ;
std::string tensorName ;
std::string prefix = "mcn" ;
mxArray const * arg ;
vl::ErrorCode error = vl::VLE_Success ;
size_t labIndex = 0 ;
size_t numLabs = 0 ;
verbosity = 0 ;
mexAtExit(atExit) ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "Not enough input arguments.") ;
}
if (!vlmxIsString(in[0], -1)) {
vlmxError(VLMXE_IllegalArgument, "COMMAND is not a string.") ;
}
if (vlmxCompareToStringI(in[0],"init") == 0) {
command = init ;
if (nin < 4) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to INIT.") ;
}
arg = in[1] ;
if (!vlmxIsPlainScalar(in[2])) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX is not a plain scalar.") ;
}
labIndex = mxGetScalar(in[2]) ;
if (labIndex < 1) {
vlmxError(VLMXE_IllegalArgument, "LABINDEX must be an integer greater than 0.") ;
}
if (!vlmxIsPlainScalar(in[3])) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS is not a plain scalar.") ;
}
numLabs = mxGetScalar(in[3]) ;
if (numLabs < labIndex) {
vlmxError(VLMXE_IllegalArgument, "NUMLABS must be an integer greater or equal to LABINDEX.") ;
}
next = 4 ;
} else if (vlmxCompareToStringI(in[0], "stats") == 0) {
command = stats ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "reset") == 0) {
command = reset ;
next = 1 ;
} else if (vlmxCompareToStringI(in[0], "push") == 0) {
if (nin < 3) {
vlmxError(VLMXE_IllegalArgument, "Less than three arguments passed to PUSH.") ;
}
command = push ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
arg = in[2] ;
next = 3 ;
} else if (vlmxCompareToStringI(in[0], "pull") == 0) {
if (nin < 2) {
mexErrMsgTxt("Less than two arguments passed to PULL.") ;
}
command = pull ;
VLMXErrorCode error = vlmxParseString(tensorName, in[1]) ;
if (error != VLMXE_Success) {
vlmxError(error, "NAME is not a string.") ;
}
next = 2 ;
}
else {
vlmxError(VLMXE_IllegalArgument, "Unknown COMMAND.") ;
}
// optional arguments
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_prefix : {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "PREFIX is not a string.") ;
}
char str [512] ;
mxGetString (optarg, str, sizeof(str)/sizeof(str[0])) ;
prefix = str ;
break ;
}
case opt_verbose :
++ verbosity ;
break ;
case opt_inplace :
inplace = true ;
break ;
}
}
switch (command) {
case init:
{
(verbosity >= 2) && mexPrintf("vl_tmove: command 'init'\n") ;
// Initialize shared space. mexInit() may thorow a MEX error;
// the auto_ptr should avoid a leak in this case.
std::auto_ptr<SharedTensorSpace> sharedSpace(new SharedTensorSpace()) ;
sharedSpace->mexInit(arg) ;
// Initialize the pool, including attaching the shared space.
// Now the shared space is owned by the process pool.
error = processPool.init(prefix, labIndex - 1, numLabs, sharedSpace.release()) ;
if (error != vl::VLE_Success) {
mexErrMsgTxt("Could not initialize connections to other MATLAB labs.") ;
}
// At this point, sharedSpace is handled by the ProcessPool thread,
// so we interact with it indirectly
break ;
}
case stats :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'stats'\n") ;
processPool.mexPrint() ;
break ;
case push :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'push' on tensor '%s'%s\n", tensorName.c_str(), inplace?" (inplace)":"") ;
processPool.mexPush(tensorName, arg, inplace) ;
break ;
case pull :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'pull' on tensor '%s'%s\n", tensorName.c_str(),
inplace?" (inplace)":"") ;
out[0] = processPool.mexPull(tensorName, inplace) ;
break ;
case reset :
(verbosity >= 2) && mexPrintf("vl_tmove: command 'reset'\n") ;
processPool.shutdown() ; // gracefully (wait for others to finish)
processPool.finalize() ; // no matter what
break ;
}
}
|
the_stack
|
// Optimize nchw2nhwc && nhwc2nchw with tiling and shared memory.
// Firstly, combined 2 dims hw together, treat input and output as 3D tensor.
// Secondly, determine whether a matrix is a large matrix or a narrow matrix,
// which determines the chosen TileSize.
// Reason: tiling and shared memory can avoid uncoalesced global memory access.
// There are two stages of this kernel, load-to-shm and write-to-output.
// load-to-shm: Threads in a thread block work together to load input data tile to shared mem.
// write-to-output: Threads in a thread block work together to write shared mem to output tile.
// because of the shared mem usage, The access to both input and output memory can be coalesced.
// SimpleTransposeKernel for small matrix
template <typename T>
__global__ void SimpleTransposeKernel(const size_t size, const T *input, const size_t *input_shape,
const size_t *input_axis, const size_t shape_size, T *output) {
size_t pos_size;
size_t temp_pos;
size_t newpos;
size_t newpos_size;
size_t pos_array[4];
// for example 4-D: pos = posArray[0] * input_shape[1] * input_shape[2] * input_shape[3] +
// posArray[1] * input_shape[2] * input_shape[3] +
// posArray[2] * input_shape[3] +
// posArray[3]
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
temp_pos = pos;
pos_size = size / input_shape[0]; // C * H * W
pos_array[0] = temp_pos / pos_size; // i / (CHW)
for (size_t i = 1; i < shape_size; i++) {
temp_pos -= pos_array[i - 1] * pos_size;
pos_size = pos_size / input_shape[i];
pos_array[i] = temp_pos / pos_size;
}
newpos = pos_array[input_axis[shape_size - 1]];
newpos_size = 1;
for (int64_t j = shape_size - 2; j >= 0; j--) {
newpos_size *= input_shape[input_axis[j + 1]];
newpos += pos_array[input_axis[j]] * newpos_size;
}
output[newpos] = *(input + pos);
}
return;
}
__forceinline__ __device__ int TensorIdxToOneDimIdx(int ndims, const int *idx, const int *dims) {
int flat_idx = idx[0];
for (int i = 1; i < ndims; i++) {
flat_idx = flat_idx * dims[i] + idx[i];
}
return flat_idx;
}
__forceinline__ __device__ void OneDimIdxToTensorIdx(int ndims, int idx, const int *dims, int *out_tensor_idx) {
for (int i = ndims - 1; i >= 0; i--) {
int new_idx = idx / dims[i];
out_tensor_idx[i] = idx - dims[i] * new_idx;
idx = new_idx;
}
}
template <typename T>
__global__ void Swap3DTensorLast2DimKernel_shared(const T *input, int NumThreads, int TileHeight, int TileWidth,
int input_dims_0, int input_dims_1, int input_dims_2, T *output) {
extern __shared__ unsigned char sdata_uchar[];
// shm_tile[TileHeight][TileWidth + 1]: to avoid bank conflict in write-to-output period
T *shm_tile = reinterpret_cast<T*>(sdata_uchar);
int NumRowsPerLoadLoop = NumThreads / TileWidth; // the number of shm rows that all threads can load into shm once
int NumColsPerWriteLoop =
NumThreads / TileHeight; // the number of shm cols that all threads can write into output once
int load_thread_num_align = NumRowsPerLoadLoop * TileWidth; // use align num threads in load-to-shm period
int write_thread_num_align = NumColsPerWriteLoop * TileHeight; // use align num threads in write-to-output period
int tid = threadIdx.x;
int input_dims[3] = {input_dims_0, input_dims_1, input_dims_2};
int output_dims[3] = {input_dims[0], input_dims[2], input_dims[1]};
int input_dims_in_tiles[3] = {input_dims[0], (input_dims[1] + TileHeight - 1) / TileHeight,
(input_dims[2] + TileWidth - 1) / TileWidth};
int input_tile_idx[3];
OneDimIdxToTensorIdx(3, blockIdx.x, input_dims_in_tiles, input_tile_idx);
int input_tile_origin[3] = {input_tile_idx[0], input_tile_idx[1] * TileHeight, input_tile_idx[2] * TileWidth};
int input_block_start_idx = TensorIdxToOneDimIdx(3, input_tile_origin, input_dims); // input idx of this thread block
bool full_tile = true;
int tile_width = TileWidth;
// Only the last row or column may not have the full size
// boundary process
if (input_tile_idx[2] == input_dims_in_tiles[2] - 1) {
tile_width = input_dims[2] - (input_dims_in_tiles[2] - 1) * TileWidth;
full_tile &= false;
}
int tile_height = TileHeight;
if (input_tile_idx[1] == input_dims_in_tiles[1] - 1) {
tile_height = input_dims[1] - (input_dims_in_tiles[1] - 1) * TileHeight;
full_tile &= false;
}
// load-to-shm: each block load input data into shared mem(loop)
if (tid < load_thread_num_align) {
// Map task blocks to thread blocks.
// organize threads to n*TileWidth
int shm_row_id = tid / TileWidth; // shem_row_id, also the block row_id of input
int shm_col_id = tid % TileWidth; // shem_col_id, also the block col_id of input
int input_idx = input_block_start_idx + shm_row_id * input_dims[2] + shm_col_id; // the input idx of this thread
int input_step = NumRowsPerLoadLoop * input_dims[2];
if (full_tile) { // thread blocks responses for inner tiles
#pragma unroll
for (int row_id = shm_row_id; row_id < (TileHeight);
row_id += NumRowsPerLoadLoop) { // move to the next pass, loop
// shm_tile[row_id][shm_col_id]
shm_tile[row_id * (TileWidth + 1) + shm_col_id] =
input[input_idx]; // each thread load one input data into shared mem
input_idx += input_step; // calculate the next input idx this thread should load
}
} else { // boundary process: thread blocks responses for edge tiles
if (shm_col_id < tile_width) {
for (int row_id = shm_row_id; row_id < (tile_height); row_id += NumRowsPerLoadLoop) {
// shm_tile[row_id][shm_col_id]
shm_tile[row_id * (TileWidth + 1) + shm_col_id] = input[input_idx];
input_idx += input_step;
}
}
}
}
__syncthreads();
// load-to-shm: end
// write-to-output: each block write shared mem into output(loop)
int output_tile_idx[3] = {input_tile_idx[0], input_tile_idx[2], input_tile_idx[1]};
int output_tile_origin[3] = {output_tile_idx[0], output_tile_idx[1] * TileWidth, output_tile_idx[2] * TileHeight};
int output_block_start_idx = TensorIdxToOneDimIdx(3, output_tile_origin, output_dims);
if (tid < write_thread_num_align) {
// organize threads to TileHeight*n1
int shm_col_id = tid / TileHeight; // shm_col_id, also the block row_id of output
int shm_row_id = tid % TileHeight; // shm_row_id, also the block col_id of output
int output_idx = output_block_start_idx + shm_col_id * output_dims[2] + shm_row_id;
int output_step = NumColsPerWriteLoop * output_dims[2];
if (full_tile) {
#pragma unroll
for (int col_id = shm_col_id; col_id < (TileWidth);
col_id += NumColsPerWriteLoop) { // move to the next pass, loop
// shm_tile[shm_row_id][col_id]
output[output_idx] = shm_tile[shm_row_id * (TileWidth + 1) + col_id]; // avoid bank conflict
output_idx += output_step;
}
} else {
if (shm_row_id < tile_height) {
for (int col_id = shm_col_id; col_id < (tile_width); col_id += NumColsPerWriteLoop) {
// shm_tile[shm_row_id][col_id];
output[output_idx] = shm_tile[shm_row_id * (TileWidth + 1) + col_id];
output_idx += output_step;
}
}
}
}
}
template <typename T>
void Swap3DTensorLast2Dim(const size_t size, const size_t shape_size, int *combined_dims, const T *d_input,
const size_t *input_shape, const size_t *input_axis, const size_t *d_input_shape,
const size_t *d_input_axis, T *d_output, cudaStream_t cuda_stream) {
static const int kMinDimensionToUseTiles = 16;
static const int kMinDimensionToUseRectTiles = 96;
auto short_side = std::min(combined_dims[1], combined_dims[2]);
auto long_side = std::max(combined_dims[1], combined_dims[2]);
// large matrix
// Both dims are greater than 16 && cuda blocks have enough shared mem.
constexpr int kTileSizeLargeMat = 32;
constexpr int kNumThreadsLargeMat = 256;
auto ShmemReqLargeMat = kTileSizeLargeMat * (kTileSizeLargeMat + 1) * sizeof(T);
bool is_large_matrix = short_side >= kMinDimensionToUseTiles && ShmemReqLargeMat <= SHARED_MEM_PER_BLOCK;
// narrow matrix
// one dim less than 16 && one dim greater than 96(narrow)
constexpr int kTileSizeNarrowMatLongSide = 128;
const int kTileSizeNarrowMatShortSide = short_side;
constexpr int kNumThreadsNarrowMat = kTileSizeNarrowMatLongSide;
auto ShmemReqNarrowMat = kTileSizeNarrowMatLongSide * (kTileSizeNarrowMatShortSide + 1) * sizeof(T);
bool is_narrow_matrix = short_side < kMinDimensionToUseTiles && long_side >= kMinDimensionToUseRectTiles &&
ShmemReqNarrowMat <= SHARED_MEM_PER_BLOCK;
if (is_large_matrix) {
int input_dims_in_tiles[3] = {combined_dims[0], (combined_dims[1] + kTileSizeLargeMat - 1) / kTileSizeLargeMat,
(combined_dims[2] + kTileSizeLargeMat - 1) / kTileSizeLargeMat};
int TotalNumTiles = input_dims_in_tiles[0] * input_dims_in_tiles[1] * input_dims_in_tiles[2];
Swap3DTensorLast2DimKernel_shared<T><<<TotalNumTiles, kNumThreadsLargeMat, ShmemReqLargeMat, cuda_stream>>>(
d_input, kNumThreadsLargeMat, kTileSizeLargeMat, kTileSizeLargeMat, combined_dims[0], combined_dims[1],
combined_dims[2], d_output);
} else if (is_narrow_matrix) {
int input_dims_in_tiles[3] = {combined_dims[0], 1,
(long_side + kTileSizeNarrowMatLongSide - 1) / kTileSizeNarrowMatLongSide};
int TotalNumTiles = input_dims_in_tiles[0] * input_dims_in_tiles[1] * input_dims_in_tiles[2];
int TileHeight, TileWidth;
if (long_side == combined_dims[1]) {
TileHeight = kTileSizeNarrowMatLongSide;
TileWidth = short_side;
} else {
TileHeight = short_side;
TileWidth = kTileSizeNarrowMatLongSide;
}
Swap3DTensorLast2DimKernel_shared<T><<<TotalNumTiles, kNumThreadsNarrowMat, ShmemReqNarrowMat, cuda_stream>>>(
d_input, kNumThreadsNarrowMat, TileHeight, TileWidth, combined_dims[0], combined_dims[1], combined_dims[2],
d_output);
} else {
SimpleTransposeKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, d_input, d_input_shape, d_input_axis,
shape_size, d_output);
}
return;
}
// specific for NHWC -> NCHW
template <typename T>
void CalNHWC2NCHWInterface(const size_t size, const size_t shape_size, const T *d_input, const size_t *input_shape,
const size_t *input_axis, const size_t *d_input_shape, const size_t *d_input_axis,
T *d_output, cudaStream_t cuda_stream) {
int combined_dims[3];
combined_dims[0] = input_shape[0]; // N
combined_dims[1] = input_shape[1]; // HW
for (unsigned int i = 2; i < shape_size - 1; i++) {
combined_dims[1] *= input_shape[i];
}
combined_dims[2] = input_shape[shape_size - 1]; // C
Swap3DTensorLast2Dim(size, shape_size, combined_dims, d_input, input_shape, input_axis, d_input_shape, d_input_axis,
d_output, cuda_stream);
}
// specific for NCHW -> NHWC
template <typename T>
void CalNCHW2NHWCInterface(const size_t size, const size_t shape_size, const T *d_input, const size_t *input_shape,
const size_t *input_axis, const size_t *d_input_shape, const size_t *d_input_axis,
T *d_output, cudaStream_t cuda_stream) {
int combined_dims[3];
combined_dims[0] = input_shape[0]; // N
combined_dims[1] = input_shape[1]; // C
combined_dims[2] = input_shape[2]; // HW
for (unsigned int i = 3; i < shape_size; ++i) {
combined_dims[2] *= input_shape[i];
}
Swap3DTensorLast2Dim(size, shape_size, combined_dims, d_input, input_shape, input_axis, d_input_shape, d_input_axis,
d_output, cuda_stream);
}
template void CalNHWC2NCHWInterface<double>(const size_t size, const size_t shape_size, const double *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, double *d_output,
cudaStream_t cuda_stream);
template void CalNHWC2NCHWInterface<float>(const size_t size, const size_t shape_size, const float *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, float *d_output,
cudaStream_t cuda_stream);
template void CalNHWC2NCHWInterface<half>(const size_t size, const size_t shape_size, const half *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, half *d_output,
cudaStream_t cuda_stream);
template void CalNHWC2NCHWInterface<int>(const size_t size, const size_t shape_size, const int *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, int *d_output,
cudaStream_t cuda_stream);
template void CalNHWC2NCHWInterface<int64_t>(const size_t size, const size_t shape_size, const int64_t *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, int64_t *d_output,
cudaStream_t cuda_stream);
template void CalNCHW2NHWCInterface<double>(const size_t size, const size_t shape_size, const double *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, double *d_output,
cudaStream_t cuda_stream);
template void CalNCHW2NHWCInterface<float>(const size_t size, const size_t shape_size, const float *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, float *d_output,
cudaStream_t cuda_stream);
template void CalNCHW2NHWCInterface<half>(const size_t size, const size_t shape_size, const half *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, half *d_output,
cudaStream_t cuda_stream);
template void CalNCHW2NHWCInterface<int>(const size_t size, const size_t shape_size, const int *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, int *d_output,
cudaStream_t cuda_stream);
template void CalNCHW2NHWCInterface<int64_t>(const size_t size, const size_t shape_size, const int64_t *d_input,
const size_t *input_shape, const size_t *input_axis,
const size_t *d_input_shape, const size_t *d_input_axis, int64_t *d_output,
cudaStream_t cuda_stream);
|
the_stack
|
namespace cuHE {
// Pre-computation
static ZZ* crtPrime; // decreasing?
static ZZ* coeffModulus; // decreasing
void genCrtPrimes() {
int pnum = param.numCrtPrime;
crtPrime = new ZZ[pnum];
unsigned* h_p = new unsigned[pnum];
int logmid = param.logCoeffMin-(pnum-param.depth)*param.logCrtPrime;
// after cutting, fairly larger primes
ZZ temp = to_ZZ(0x1<<param.logCrtPrime)-1;
for (int i=0; i<=pnum-param.depth-1; i++) {
while (!ProbPrime(temp, 10))
temp --;
conv(h_p[i], temp);
crtPrime[i] = temp;
temp --;
}
// mid
ZZ tmid;
if (logmid != param.logCrtPrime)
tmid = to_ZZ(0x1<<logmid)-1;
else
tmid = temp;
while (!ProbPrime(tmid, 10))
tmid --;
conv(h_p[pnum-param.depth], tmid);
crtPrime[pnum-param.depth] = tmid;
// for cutting
if (param.logCoeffCut == logmid)
temp = tmid-1;
else if (param.logCoeffCut == param.logCrtPrime)
temp --;
else
temp = to_ZZ(0x1<<param.logCoeffCut)-1;
for (int i=pnum-param.depth+1; i<pnum; i++) {
while (!ProbPrime(temp, 10) || temp%to_ZZ(param.modMsg) != 1)
temp --;
conv(h_p[i], temp);
crtPrime[i] = temp;
temp --;
}
preload_crt_p(h_p, pnum);
delete [] h_p;
};
void genCoeffModuli() {
int d = param.depth;
int pnum = param.numCrtPrime;
coeffModulus = new ZZ[d];
for (int i=0; i<d; i++) {
coeffModulus[i] = 1;
for (int j=0; j<pnum-i; j++)
coeffModulus[i] *= crtPrime[j];
}
}
void genCrtInvPrimes() {
int pnum = param.numCrtPrime;
uint32 *h_pinv = new uint32[pnum*(pnum-1)/2];
ZZ temp;
for (int i=1; i<pnum; i++)
for (int j=0; j<i; j++)
conv(h_pinv[i*(i-1)/2+j], InvMod(crtPrime[i]%crtPrime[j], crtPrime[j]));
preload_crt_invp(h_pinv, pnum*(pnum-1)/2);
delete [] h_pinv;
}
static int* icrtLevel; // one int for each device
static struct IcrtConst {
uint32 *q;
uint32 *qp;
uint32 *qpinv;
} **icrtConst;
void genIcrtByLevel(int lvl) {
int pnum = param._numCrtPrime(lvl);
int words_q = param._wordsCoeff(lvl);
int words_qp = param._wordsCoeff(lvl+1);
for (int dev=0; dev<numDevices(); dev++) {
CSC(cudaSetDevice(dev));
CSC(cudaMallocHost(&icrtConst[dev][lvl].q,
words_q*sizeof(uint32)));
CSC(cudaMallocHost(&icrtConst[dev][lvl].qp,
pnum*words_qp*sizeof(uint32)));
CSC(cudaMallocHost(&icrtConst[dev][lvl].qpinv,
pnum*sizeof(uint32)));
}
ZZ *z_qp = new ZZ[pnum];
for (int i=0; i<pnum; i++)
z_qp[i] = coeffModulus[lvl]/crtPrime[i];
for (int dev=0; dev<numDevices(); dev++) {
BytesFromZZ((uint8 *)icrtConst[dev][lvl].q,
coeffModulus[lvl], words_q*sizeof(uint32));
for (int i=0; i<pnum; i++) {
BytesFromZZ((uint8 *)(&icrtConst[dev][lvl].qp[words_qp*i]),
z_qp[i], words_qp*sizeof(uint32));
conv(icrtConst[dev][lvl].qpinv[i],
InvMod(z_qp[i]%crtPrime[i], crtPrime[i]));
}
}
delete [] z_qp;
};
void genIcrt() {
icrtConst = new IcrtConst *[numDevices()];
icrtLevel = new int[numDevices()];
for (int dev=0; dev<numDevices(); dev++) {
icrtConst[dev] = new IcrtConst[param.depth];
icrtLevel[dev] = -1;
}
for (int i=0; i<param.depth; i++)
genIcrtByLevel(i);
};
void loadIcrtConst(int lvl, int dev, cudaStream_t st) {
if (icrtLevel[dev] != lvl) {
int pnum = param._numCrtPrime(lvl);
int words_q = param._wordsCoeff(lvl);
int words_qp = param._wordsCoeff(lvl+1);
CSC(cudaSetDevice(dev));
load_icrt_M(icrtConst[dev][lvl].q, words_q, dev, st);
load_icrt_mi(icrtConst[dev][lvl].qp, words_qp*pnum, dev, st);
load_icrt_bi(icrtConst[dev][lvl].qpinv, pnum, dev, st);
icrtLevel[dev] = lvl;
}
};
void getCoeffModuli(ZZ* dst) {
for (int i=0; i<param.depth; i++)
dst[i] = coeffModulus[i];
}
void initCrt(ZZ* coeffModulus) {
genCrtPrimes();
genCoeffModuli();
genCrtInvPrimes();
genIcrt();
for (int dev=0; dev<numDevices(); dev++)
loadIcrtConst(0, dev);
getCoeffModuli(coeffModulus);
}
static uint64 **d_swap; // conversion buffer
static uint32 **d_hold; // intt result buffer
void initNtt() {
preload_ntt(param.nttLen);
// temporary result allocation
d_swap = new uint64 *[numDevices()];
d_hold = new uint32 *[numDevices()];
for (int dev=0; dev<numDevices(); dev++) {
cudaSetDevice(dev);
CSC(cudaMalloc(&d_swap[dev], param.nttLen*sizeof(uint64)));
CSC(cudaMalloc(&d_hold[dev],
param.numCrtPrime*param.nttLen*sizeof(uint32)));
}
}
uint32 *inttResult(int dev) {
return ptrNttHold(dev);
}
uint64 **ptrNttSwap() { return d_swap;}
uint32 **ptrNttHold() { return d_hold;}
uint64 *ptrNttSwap(int dev) { return d_swap[dev];}
uint32 *ptrNttHold(int dev) { return d_hold[dev];}
uint64 **d_barrett_ntt;
uint32 **d_barrett_crt;
uint32 **d_barrett_src;
void createBarrettTemporySpace() {
d_barrett_crt = new uint32*[numDevices()];
d_barrett_ntt = new uint64*[numDevices()];
d_barrett_src = new uint32*[numDevices()];
for (int dev=0; dev<numDevices(); dev++) {
cudaSetDevice(dev);
CSC(cudaMalloc(&d_barrett_crt[dev],
param.numCrtPrime*param.nttLen*sizeof(uint32)));
CSC(cudaMalloc(&d_barrett_ntt[dev],
param.numCrtPrime*param.nttLen*sizeof(uint64)));
CSC(cudaMalloc(&d_barrett_src[dev],
param.numCrtPrime*param.nttLen*sizeof(uint32)));
}
}
static uint32 *ptrBarrettCrt(int dev) { return d_barrett_crt[dev];}
static uint64 *ptrBarrettNtt(int dev) { return d_barrett_ntt[dev];}
static uint32 *ptrBarrettSrc(int dev) { return d_barrett_src[dev];}
void setPolyModulus(ZZX m) {
// compute NTL type zm, zu
ZZ zq = coeffModulus[0];
ZZX zm = m;
ZZX zu;
SetCoeff(zu, 2*param.modLen-1, 1);
zu /= zm;
for (int i=0; i<=deg(zm); i++)
SetCoeff(zm, i, coeff(zm, i)%zq);
for (int i=0; i<=deg(zu); i++)
SetCoeff(zu, i, coeff(zu, i)%zq);
SetCoeff(zm, param.modLen, 0);
// prep m
CuCtxt c;
c.setLevel(0, 0, zm);
c.x2c();
preload_barrett_m_c(c.cRep(), param.numCrtPrime*param.crtLen*sizeof(uint32));
c.x2n();
preload_barrett_m_n(c.nRep(), param.numCrtPrime*param.nttLen*sizeof(uint64));
// prep u
CuCtxt cc;
cc.setLevel(0, 0, zu);
cc.x2n();
preload_barrett_u_n(cc.nRep(),
param.numCrtPrime*param.nttLen*sizeof(uint64));
};
void initBarrett(ZZX m) {
setPolyModulus(m);
createBarrettTemporySpace();
}
// Operations
void crt(uint32 *dst, uint32 *src, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
crt<<<(param.modLen+63)/64, 64,
param._wordsCoeff(lvl)*sizeof(uint32)*64, st>>>(dst, src,
param._numCrtPrime(lvl), param._wordsCoeff(lvl), param.modLen,
param.crtLen);
CCE();
}
void icrt(uint32 *dst, uint32 *src, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
loadIcrtConst(lvl, dev, st);
CSC(cudaStreamSynchronize(st));
CSC(cudaSetDevice(dev));
icrt<<<(param.modLen+63)/64, 64, 0, st>>>(dst, src, param._numCrtPrime(lvl),
param._wordsCoeff(lvl), param._wordsCoeff(lvl+1), param.modLen,
param.crtLen);
CCE();
}
void crtAdd(uint32 *sum, uint32 *x, uint32 *y, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
crt_add<<<(param.modLen+63)/64, 64, 0, st>>>(sum, x, y,
param._numCrtPrime(lvl), param.modLen, param.crtLen);
CCE();
}
void crtAddInt(uint32 *sum, uint32 *x, unsigned a, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
crt_add_int<<<(param._numCrtPrime(lvl)+63)/64, 64, 0, st>>>(sum, x, a,
param._numCrtPrime(lvl), param.crtLen);
CCE();
}
void crtAddNX1(uint32 *sum, uint32 *x, uint32 *scalar, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
crt_add_nx1<<<(param.modLen+63)/64, 64, 0, st>>>(sum, x, scalar,
param._numCrtPrime(lvl), param.modLen, param.crtLen);
CCE();
}
void crtMulInt(uint32 *prod, uint32 *x, int a, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
crt_mul_int<<<(param.numCrtPrime-lvl+63)/64, 64, 0, st>>>(prod, x, a,
param._numCrtPrime(lvl), param.crtLen);
CCE();
}
void crtModSwitch(uint32 *dst, uint32 *src, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
cudaSetDevice(dev);
modswitch<<<(param.modLen+63)/64, 64, 0, st>>>(dst, src,
param._numCrtPrime(lvl), param.modLen, param.crtLen, param.modMsg);
CCE();
}
//// single crt polynomial
void _ntt(uint64 *X, uint32 *x, int dev, cudaStream_t st) {
if (param.nttLen == 16384) {
ntt_1_16k_ext<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x);
CCE();
ntt_2_16k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_16k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
else if (param.nttLen == 32768) {
ntt_1_32k_ext<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x);
CCE();
ntt_2_32k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_32k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
else if (param.nttLen == 65536) {
ntt_1_64k_ext<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x);
CCE();
ntt_2_64k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_64k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
}
void _nttw(uint64 *X, uint32 *x, int coeffwords, int relinIdx, int dev,
cudaStream_t st) {
if (param.nttLen == 16384) {
ntt_1_16k_ext_block<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x,
param.logRelin, relinIdx, coeffwords);
CCE();
ntt_2_16k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_16k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
else if (param.nttLen == 32768) {
ntt_1_32k_ext_block<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x,
param.logRelin,relinIdx, coeffwords);
CCE();
ntt_2_32k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_32k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
else if (param.nttLen == 65536) {
ntt_1_64k_ext_block<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), x,
param.logRelin,relinIdx, coeffwords);
CCE();
ntt_2_64k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
ntt_3_64k<<<param.nttLen/512, 64, 0, st>>>(X, ptrNttSwap(dev));
CCE();
}
}
// !!! x has length of param.nttLen
void _intt(uint32 *x, uint64 *X, int crtidx, int dev, cudaStream_t st) {
if (param.nttLen == 16384) {
intt_1_16k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), X);
CCE();
ntt_2_16k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
intt_3_16k_modcrt<<<param.nttLen/512, 64, 0, st>>>(x, ptrNttSwap(dev),
crtidx);
CCE();
}
else if (param.nttLen == 32768) {
intt_1_32k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), X);
CCE();
ntt_2_32k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
intt_3_32k_modcrt<<<param.nttLen/512, 64, 0, st>>>(x, ptrNttSwap(dev),
crtidx);
CCE();
}
else if (param.nttLen == 65536) {
intt_1_64k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev), X);
CCE();
ntt_2_64k<<<param.nttLen/512, 64, 0, st>>>(ptrNttSwap(dev));
CCE();
intt_3_64k_modcrt<<<param.nttLen/512, 64, 0, st>>>(x, ptrNttSwap(dev),
crtidx);
CCE();
}
}
// all crt polynomials
// ntt
void ntt(uint64 *X, uint32 *x, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numCrtPrime(lvl); i++)
_ntt(X+i*param.nttLen, x+i*param.crtLen, dev, st);
}
void nttw(uint64 *X, uint32 *x, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numEvalKey(lvl); i++)
_nttw(X+i*param.nttLen, x, param._wordsCoeff(lvl), i, dev, st);
}
// intt holding result
void inttHold(uint64 *X, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numCrtPrime(lvl); i++)
_intt(ptrNttHold(dev)+i*param.nttLen, X+i*param.nttLen, i, dev, st);
}
// intt without barrett copy result, x has param.nttLen
void inttDoubleDeg(uint32 *x, uint64 *X, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numCrtPrime(lvl); i++)
_intt(ptrNttHold(dev)+i*param.nttLen, X+i*param.nttLen, i, dev, st);
CSC(cudaMemcpyAsync(x, ptrNttHold(dev),
param._numCrtPrime(lvl)*param.nttLen*sizeof(uint32),
cudaMemcpyDeviceToDevice, st));
}
// intt without barrett copy result, x has param.crtLen
void intt(uint32 *x, uint64 *X, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numCrtPrime(lvl); i++) {
_intt(ptrNttHold(dev)+i*param.nttLen, X+i*param.nttLen, i, dev, st);
CSC(cudaMemcpyAsync(x+i*param.crtLen, ptrNttHold(dev)+i*param.nttLen,
param.crtLen*sizeof(uint32), cudaMemcpyDeviceToDevice, st));
}
}
// intt with barrett, x has param.crtLen
void inttMod(uint32 *x, uint64 *X, int logq, int dev, cudaStream_t st) {
int lvl = param._getLevel(logq);
for (int i=0; i<param._numCrtPrime(lvl); i++)
_intt(ptrNttHold(dev)+i*param.nttLen, X+i*param.nttLen, i, dev, st);
barrett(x, lvl, dev, st);
}
void nttMul(uint64 *z, uint64 *y, uint64 *x, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
ntt_mul<<<(param.nttLen+63)/64, 64, 0, st>>>(z, y, x,
param._numCrtPrime(lvl), param.nttLen);
}
void nttMulNX1(uint64 *z, uint64 *x, uint64 *scalar, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
ntt_mul_nx1<<<(param.nttLen+63)/64, 64, 0, st>>>(z, x, scalar,
param._numCrtPrime(lvl), param.nttLen);
}
void nttAdd(uint64 *z, uint64 *y, uint64 *x, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
ntt_add<<<(param.nttLen+63)/64, 64, 0, st>>>(z, x, y,
param._numCrtPrime(lvl), param.nttLen);
}
void nttAddNX1(uint64 *z, uint64 *x, uint64 *scalar, int logq, int dev,
cudaStream_t st) {
int lvl = param._getLevel(logq);
ntt_add_nx1<<<(param.nttLen+63)/64, 64, 0, st>>>(z, x, scalar,
param._numCrtPrime(lvl), param.nttLen);
}
void barrett(uint32 *dst, uint32 *src, int lvl, int dev, cudaStream_t st) {
cudaSetDevice(dev);
uint32 *ptrCrt = ptrBarrettCrt(dev);
uint64 *ptrNtt = ptrBarrettNtt(dev);
uint32 *ptrSrc = ptrBarrettSrc(dev);
CSC(cudaMemcpyAsync(ptrSrc, src,
param._numCrtPrime(lvl)*param.nttLen*sizeof(uint32),
cudaMemcpyDeviceToDevice, st));
// ptrSrc = f, deg = 2n-2
for (int i=0; i<param._numCrtPrime(lvl); i++)
_ntt(ptrNtt+i*param.nttLen, ptrSrc+i*param.nttLen+param.modLen-1, dev, st);
// ptrNtt = f>>(n-1), deg = n-1
barrett_mul_un<<<(param.nttLen+63)/64, 64, 0, st>>>
(ptrNtt, param._numCrtPrime(lvl), param.nttLen);
inttDoubleDeg(ptrCrt, ptrNtt, param._logCoeff(lvl), dev, st);
// ptrCrt = u * f>>(n-1), deg = 2n-2
for (int i=0; i<param._numCrtPrime(lvl); i++)
CSC(cudaMemsetAsync(ptrCrt+i*param.nttLen, 0, param.modLen*sizeof(uint32),
st));
// ptrCrt = u*f>>(2n-1)<<n
for (int i=0; i<param._numCrtPrime(lvl); i++)
_ntt(ptrNtt+i*param.nttLen, ptrCrt+i*param.nttLen+param.modLen, dev, st);
// ptrNtt = (u * f>>(n-1))>>n = u*f>>(2n-1), deg = n-2
barrett_mul_mn<<<(param.nttLen+63)/64, 64, 0, st>>>
(ptrNtt, param._numCrtPrime(lvl), param.nttLen);
// ptrNtt = (m-x^n) * (u * f>>(n-1))>>n, deg = 2n-3
barrett_sub_1<<<(param.modLen+63)/64, 64, 0, st>>>
(ptrSrc, ptrCrt, param._numCrtPrime(lvl), param.modLen, param.nttLen);
// ptrSrc = f - (u*f>>(2n-1))<<n
inttDoubleDeg(ptrCrt, ptrNtt, param._logCoeff(lvl), dev, st);
// ptrCrt = (m-x^n) * (u * f>>(2n-1)), deg = 2n-3
barrett_sub_2<<<(param.nttLen+63)/64, 64, 0, st>>>
(ptrSrc, ptrCrt, param._numCrtPrime(lvl), param.nttLen);
// ptrSrc = f - (m*u*f)>>(2n-1), deg = n
barrett_sub_mc<<<(param.nttLen+63)/64, 64,
param._numCrtPrime(lvl)*sizeof(uint32), st>>>(ptrSrc,
param._numCrtPrime(lvl), param.modLen, param.crtLen, param.nttLen);
// ptrSrc = ptrSrc - m, deg = n-1
for (int i=0; i<param._numCrtPrime(lvl); i++)
CSC(cudaMemcpyAsync(dst+i*param.crtLen, ptrSrc+i*param.nttLen,
param.crtLen*sizeof(uint32), cudaMemcpyDeviceToDevice, st));
}
void barrett(uint32 *dst, int lvl, int dev, cudaStream_t st) {
barrett(dst, inttResult(dev), lvl, dev, st);
}
} // namespace cuHE
|
the_stack
|
// On pytorch 1.10 and CUDA 10.2, I get compilation errors on torch/csrc/api/include/torch/nn/cloneable.h
// So we'll only include torch/python.h instead of torch/extension.h
// Similar to https://github.com/getkeops/keops/blob/3efd428b55c724b12f23982c06de00bc4d02d903/pykeops/torch_headers.h.in#L8
// #include <torch/extension.h>
#include <torch/python.h>
#include <ATen/cuda/CUDAContext.h> // For getCurrentCUDAStream
#include <THC/THCAtomics.cuh> // For atomicAdd on complex
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/util/complex.h> // For scalar_value_type
#include "map.h" // For the MAP macro, i.e. for_each over the arguments
#ifndef ITEMS_PER_THREAD_SYM_FWD_VALUES
#define ITEMS_PER_THREAD_SYM_FWD_VALUES {2, 4, 8, 16, 32, 32, 32, 64, 64, 64}
#endif
#ifndef MAX_BLOCK_SIZE_VALUE
#define MAX_BLOCK_SIZE_VALUE 256
#endif
#ifndef ITEMS_PER_THREAD_SYM_BWD_VALUE
#define ITEMS_PER_THREAD_SYM_BWD_VALUE 32
#endif
static constexpr int ITEMS_PER_THREAD_FWD = 64;
static constexpr int ITEMS_PER_THREAD_BWD = 32;
static constexpr int ITEMS_PER_THREAD_SYM_FWD[] = ITEMS_PER_THREAD_SYM_FWD_VALUES;
static constexpr int MAX_BLOCK_SIZE = MAX_BLOCK_SIZE_VALUE;
static constexpr int ITEMS_PER_THREAD_SYM_BWD = ITEMS_PER_THREAD_SYM_BWD_VALUE;
template <typename T, size_t N>
using CudaAcsr = at::GenericPackedTensorAccessor<T, N, at::RestrictPtrTraits, int32_t>;
constexpr __host__ __device__ int div_up_const(int a, int b) { return (a + b - 1) / b; }
__host__ __device__ static inline int div_up(int a, int b) { return (a + b - 1) / b;}
template<typename scalar_t>
__device__ __forceinline__ void initalize_shared_mem(scalar_t mem[], int size) {
// Assume that block only uses x and y coordinates, not z coordinate
for (int t = threadIdx.x + threadIdx.y * blockDim.x; t < size; t += blockDim.x * blockDim.y) {
mem[t] = 0;
}
}
template <typename scalar_t, int log_N>
__global__ void cauchy_mult_fwd_cuda_kernel(CudaAcsr<scalar_t, 2> v,
CudaAcsr<scalar_t, 1> z,
CudaAcsr<scalar_t, 2> w,
CudaAcsr<scalar_t, 2> out,
int L) {
constexpr int N = 1 << log_N;
constexpr int blockDimx = div_up_const(N, ITEMS_PER_THREAD_FWD);
constexpr int blockDimy = MAX_BLOCK_SIZE / blockDimx;
// We just want a shared array:
// __shared__ scalar_t s_b[16];
// But it doesn't work for complex: https://github.com/pytorch/pytorch/issues/39270
// So we declare a char array and cast it.
// The casting is subtle: https://stackoverflow.com/questions/12692310/convert-array-to-two-dimensional-array-by-pointer
// TODO: generalize for N > 256
__shared__ char s_v_char[N * sizeof(scalar_t)];
scalar_t *s_v = (scalar_t *)&s_v_char;
__shared__ char s_w_char[N * sizeof(scalar_t)];
scalar_t *s_w = (scalar_t *)&s_w_char;
__shared__ char s_z_char[blockDimy * sizeof(scalar_t)];
scalar_t *s_z = (scalar_t *)&s_z_char;
__shared__ char s_out_char[blockDimy * sizeof(scalar_t)];
scalar_t *s_out = (scalar_t *)&s_out_char;
int batch_idx = blockIdx.x;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int L_idx = blockIdx.y * blockDim.y + threadIdx.y;
int L_block_start = blockIdx.y * blockDim.y;
for (int N_idx = threadIdx.x + threadIdx.y * blockDim.x; N_idx < N; N_idx += blockDim.x * blockDim.y) {
s_v[N_idx] = v[batch_idx][N_idx];
s_w[N_idx] = w[batch_idx][N_idx];
}
// for (int l = threadIdx.x + threadIdx.y * blockDim.x; l < blockDim.y && L_block_start + l < L; l += blockDim.x * blockDim.y) {
// s_z[l] = z[L_block_start + l];
// }
if (tid < blockDim.y && L_block_start + tid < L) {
s_z[tid] = z[L_block_start + tid];
}
// if (threadIdx.x == 0 && L_idx < L) {
// s_z[threadIdx.y] = z[L_idx];
// }
__syncthreads();
scalar_t result = 0;
if (L_idx < L) {
scalar_t t_z = s_z[threadIdx.y];
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD_FWD; ++item) {
int N_idx = item * blockDimx + threadIdx.x;
// result += s_v[N_idx] / (t_z - s_w[N_idx]);
scalar_t diff_inv = scalar_t(1.0) / (t_z - s_w[N_idx]);
result += s_v[N_idx] * diff_inv;
}
// #pragma unroll
// for (int N_idx = threadIdx.x; N_idx < N; N_idx += blockDimx) {
// result += s_v[N_idx] / (t_z - s_w[N_idx]);
// }
// TODO: this only works for N a power of 2
#pragma unroll
for (int offset = blockDimx / 2; offset > 0; offset /= 2) {
result += WARP_SHFL_DOWN(result, offset);
}
// if ((L_idx < L) && (threadIdx.x == 0)) {
// out[batch_idx][L_idx] = result;
// }
if (threadIdx.x == 0) {
s_out[threadIdx.y] = result;
}
}
__syncthreads();
if (tid < blockDim.y && L_block_start + tid < L) {
out[batch_idx][L_block_start + tid] = s_out[tid];
}
}
torch::Tensor cauchy_mult_fwd_cuda(torch::Tensor v,
torch::Tensor z,
torch::Tensor w) {
const int batch_size = v.size(0);
const int N = v.size(1);
const int L = z.size(0);
auto out = torch::empty({batch_size, L}, torch::dtype(v.dtype()).device(v.device()));
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = c10::complex<float>;
const auto v_a = v.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto z_a = z.packed_accessor32<scalar_t, 1, at::RestrictPtrTraits>();
const auto w_a = w.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto out_a = out.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
int block_x = div_up(N, ITEMS_PER_THREAD_FWD);
dim3 block(block_x, MAX_BLOCK_SIZE / block_x);
dim3 grid(batch_size, div_up(L, block.y));
switch (N) {
case 64:
cauchy_mult_fwd_cuda_kernel<scalar_t, 6>
<<<grid, block, 0, stream>>>(v_a, z_a, w_a, out_a, L);
}
return out;
}
template <typename scalar_t>
__global__ void cauchy_mult_bwd_cuda_kernel(CudaAcsr<scalar_t, 2> v,
CudaAcsr<scalar_t, 1> z,
CudaAcsr<scalar_t, 2> w,
CudaAcsr<scalar_t, 2> dout,
CudaAcsr<scalar_t, 2> dv,
CudaAcsr<scalar_t, 2> dw,
int L) {
// We just want a shared array:
// __shared__ scalar_t s_b[16];
// But it doesn't work for complex: https://github.com/pytorch/pytorch/issues/39270
// So we declare a char array and cast it.
// The casting is subtle: https://stackoverflow.com/questions/12692310/convert-array-to-two-dimensional-array-by-pointer
// TODO: generalize for N > 256
__shared__ char s_v_char[sizeof(scalar_t)];
scalar_t *s_v = (scalar_t *)&s_v_char;
__shared__ char s_w_char[ sizeof(scalar_t)];
scalar_t *s_w = (scalar_t *)&s_w_char;
__shared__ char s_dv_char[C10_WARP_SIZE * sizeof(scalar_t)];
scalar_t *s_dv = (scalar_t *)&s_dv_char;
__shared__ char s_dw_char[C10_WARP_SIZE * sizeof(scalar_t)];
scalar_t *s_dw = (scalar_t *)&s_dw_char;
int batch_idx = blockIdx.x;
int N_idx = blockIdx.y;
int tid = threadIdx.x;
if (tid == 0) {
s_v[0] = v[batch_idx][N_idx];
s_w[0] = w[batch_idx][N_idx];
}
__syncthreads();
scalar_t t_v = s_v[0];
scalar_t t_w = s_w[0];
scalar_t t_dv = 0;
scalar_t t_dw = 0;
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD_BWD; ++item) {
int l = item * blockDim.x + threadIdx.x;
// if (l < L) {
scalar_t t_dout = dout[batch_idx][l];
scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(z[l] - t_w);
scalar_t prod = t_dout * diff_conj_inv;
t_dv += prod;
t_dw += prod * diff_conj_inv;
// }
}
// for (int item = 0; item < ITEMS_PER_THREAD_BWD / 2; ++item) {
// int l_1 = item * 2 * blockDim.x + threadIdx.x;
// int l_2 = (item * 2 + 1) * blockDim.x + threadIdx.x;
// scalar_t t_dout_1 = dout[batch_idx][l_1];
// scalar_t denom_1 = std::conj(z[l_1] - t_w);
// scalar_t t_dout_2 = dout[batch_idx][l_2];
// scalar_t denom_2 = std::conj(z[l_2] - t_w);
// scalar_t denom_prod_inv = scalar_t(1) / (denom_1 * denom_2);
// scalar_t denom_1_inv = denom_2 * denom_prod_inv;
// scalar_t denom_2_inv = denom_1 * denom_prod_inv;
// scalar_t prod_1 = t_dout_1 * denom_1_inv;
// scalar_t prod_2 = t_dout_2 * denom_2_inv;
// t_dv += prod_1 + prod_2;
// t_dw += prod_1 * denom_1_inv + prod_2 * denom_2_inv;
// t_dv += (t_dout_1 * denom_2 + t_dout_2 * denom_1) * denom_prod_inv;
// t_dw += (t_dout_1 * denom_2 * denom_2 + t_dout_2 * denom_1 * denom_1) * denom_prod_inv * denom_prod_inv;
// }
t_dv = at::native::cuda_utils::BlockReduceSum<scalar_t>(t_dv, s_dv);
t_dw = at::native::cuda_utils::BlockReduceSum<scalar_t>(t_dw, s_dw);
if (tid == 0) {
dv[batch_idx][N_idx] = t_dv;
dw[batch_idx][N_idx] = t_dw * std::conj(t_v);
}
}
std::tuple<torch::Tensor, torch::Tensor>
cauchy_mult_bwd_cuda(torch::Tensor v,
torch::Tensor z,
torch::Tensor w,
torch::Tensor dout) {
const int batch_size = v.size(0);
const int N = v.size(1);
const int L = z.size(0);
auto dv = torch::empty({batch_size, N}, torch::dtype(v.dtype()).device(v.device()));
auto dw = torch::empty({batch_size, N}, torch::dtype(w.dtype()).device(w.device()));
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = c10::complex<float>;
const auto v_a = v.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto z_a = z.packed_accessor32<scalar_t, 1, at::RestrictPtrTraits>();
const auto w_a = w.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto dout_a = dout.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto dv_a = dv.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto dw_a = dw.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
// Need to take max, otherwise each block has fewer than 1 full warp, causing
// at::native::cuda_utils::BlockReduceSum to produce wrong resutl.
// Otherwise we assume L > ITEMS_PER_THREAD_BWD * C10_WARP_SIZE
int block_x = max(div_up(L, ITEMS_PER_THREAD_BWD), C10_WARP_SIZE);
// TODO: assume that L is a multiple of ITEMS_PER_THREAD_BWD
dim3 block(block_x);
dim3 grid(batch_size, N);
cauchy_mult_bwd_cuda_kernel<scalar_t>
<<<grid, block, 0, stream>>>(v_a, z_a, w_a, dout_a, dv_a, dw_a, L);
return std::make_tuple(dv, dw);
}
template <typename scalar_t, int log_N,
int items_per_thread=ITEMS_PER_THREAD_SYM_FWD[log_N - 1]>
__global__ void cauchy_mult_sym_fwd_cuda_kernel(CudaAcsr<scalar_t, 2> v,
CudaAcsr<scalar_t, 1> z,
CudaAcsr<scalar_t, 2> w,
CudaAcsr<scalar_t, 2> out,
int L) {
// Get the float type from the complex type
// https://github.com/pytorch/pytorch/blob/bceb1db885cafa87fe8d037d8f22ae9649a1bba0/aten/src/ATen/native/cpu/ReduceOpsKernel.cpp#L213
using float_t = typename at::scalar_value_type<scalar_t>::type;
constexpr int N = 1 << log_N;
constexpr int blockDimx = div_up_const(N, items_per_thread);
constexpr int blockDimy = MAX_BLOCK_SIZE / blockDimx;
// We just want a shared array:
// __shared__ scalar_t s_b[16];
// But it doesn't work for complex: https://github.com/pytorch/pytorch/issues/39270
// So we declare a char array and cast it.
// The casting is subtle: https://stackoverflow.com/questions/12692310/convert-array-to-two-dimensional-array-by-pointer
__shared__ float_t s_vr[N];
// __shared__ char s_w_char[N * sizeof(scalar_t)];
// scalar_t *s_w = (scalar_t *)&s_w_char;
__shared__ float_t s_wr[N];
__shared__ float_t s_wnorm[N];
__shared__ float_t s_vwconj_r[N];
__shared__ char s_z_char[blockDimy * sizeof(scalar_t)];
scalar_t *s_z = (scalar_t *)&s_z_char;
__shared__ char s_out_char[blockDimy * sizeof(scalar_t)];
scalar_t *s_out = (scalar_t *)&s_out_char;
int batch_idx = blockIdx.x;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int L_idx = blockIdx.y * blockDim.y + threadIdx.y;
int L_block_start = blockIdx.y * blockDim.y;
for (int N_idx = threadIdx.x + threadIdx.y * blockDim.x; N_idx < N; N_idx += blockDim.x * blockDim.y) {
scalar_t t_v = v[batch_idx][N_idx];
scalar_t t_w = w[batch_idx][N_idx];
s_vr[N_idx] = std::real(t_v);
// s_w[N_idx] = t_w;
s_wr[N_idx] = std::real(t_w);
s_wnorm[N_idx] = std::norm(t_w);
// s_vwconj_r[N_idx] = std::real(t_v) * std::real(t_w) + std::imag(t_v) * std::imag(t_w);
// Compiler is able to optimize this, so the two lines give idential speed;
s_vwconj_r[N_idx] = std::real(t_v * std::conj(t_w));
}
if (tid < blockDim.y && L_block_start + tid < L) {
s_z[tid] = z[L_block_start + tid];
}
__syncthreads();
if (L_idx < L) {
scalar_t t_z = s_z[threadIdx.y];
scalar_t t_z_sq = t_z * t_z;
scalar_t result = 0;
#pragma unroll
// for (int item = 0; item < items_per_thread; ++item) {
// int N_idx = item * blockDimx + threadIdx.x;
// // int N_idx = item * blockDim.x + threadIdx.x;
// // scalar_t t_w = s_w[N_idx];
// // float t_vr = s_vr[N_idx];
// // scalar_t denom_inv = scalar_t(1.0) / ((t_z - t_w) * (t_z - std::conj(t_w)));
// // scalar_t denom_inv = scalar_t(1.0) / (t_z * t_z - 2 * t_z * std::real(t_w) + t_w * std::conj(t_w));
// // scalar_t denom_inv = scalar_t(1.0) / (t_z * t_z - 2 * t_z * std::real(t_w) + std::norm(t_w));
// // result += (t_z * std::real(t_v) - std::real(t_v) * std::real(t_w) - std::imag(t_v) * std::imag(t_w)) * denom_inv;
// scalar_t denom_inv = scalar_t(1.0) / (t_z_sq - 2 * t_z * s_wr[N_idx] + s_wnorm[N_idx]);
// result += (t_z * s_vr[N_idx] - s_vwconj_r[N_idx]) * denom_inv;
// // These next 2 lines assume that z is a root of unity
// // scalar_t denom_inv = scalar_t(1.0) / (t_z - 2 * std::real(t_w) + std::norm(t_w) * std::conj(t_z));
// // result += (std::real(t_v) - (std::real(t_v) * std::real(t_w) + std::imag(t_v) * std::imag(t_w)) * std::conj(t_z)) * denom_inv;
// }
for (int item = 0; item < items_per_thread / 2; ++item) {
int N_idx_1 = item * 2 * blockDimx + threadIdx.x;
int N_idx_2 = (item * 2 + 1) * blockDimx + threadIdx.x;
scalar_t denom_1 = (t_z_sq - 2 * t_z * s_wr[N_idx_1] + s_wnorm[N_idx_1]);
scalar_t nom_1 = (t_z * s_vr[N_idx_1] - s_vwconj_r[N_idx_1]);
scalar_t denom_2 = (t_z_sq - 2 * t_z * s_wr[N_idx_2] + s_wnorm[N_idx_2]);
scalar_t nom_2 = (t_z * s_vr[N_idx_2] - s_vwconj_r[N_idx_2]);
scalar_t denom_prod_inv = scalar_t(1) / (denom_1 * denom_2);
result += (nom_1 * denom_2 + nom_2 * denom_1) * denom_prod_inv;
}
// TODO: this only works for N a power of 2
#pragma unroll
for (int offset = blockDimx / 2; offset > 0; offset /= 2) {
result += WARP_SHFL_DOWN(result, offset);
}
if (threadIdx.x == 0) {
s_out[threadIdx.y] = result;
}
}
__syncthreads();
if (tid < blockDim.y && L_block_start + tid < L) {
out[batch_idx][L_block_start + tid] = 2 * s_out[tid];
}
}
torch::Tensor cauchy_mult_sym_fwd_cuda(torch::Tensor v,
torch::Tensor z,
torch::Tensor w) {
const int batch_size = v.size(0);
const int N = v.size(1);
const int L = z.size(0);
auto out = torch::empty({batch_size, L}, torch::dtype(v.dtype()).device(v.device()));
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = c10::complex<float>;
const auto v_a = v.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto z_a = z.packed_accessor32<scalar_t, 1, at::RestrictPtrTraits>();
const auto w_a = w.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto out_a = out.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
int log_N = int(log2((double) N));
int block_x = div_up(N, ITEMS_PER_THREAD_SYM_FWD[log_N - 1]);
dim3 block(block_x, MAX_BLOCK_SIZE / block_x);
dim3 grid(batch_size, div_up(L, block.y));
switch (log_N) {
#define CASE_LOG_N(log_N_val) case log_N_val: \
cauchy_mult_sym_fwd_cuda_kernel<scalar_t, log_N_val> \
<<<grid, block, 0, stream>>>(v_a, z_a, w_a, out_a, L); break;
MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
}
#undef CASE_LOG_N
C10_CUDA_KERNEL_LAUNCH_CHECK();
return out;
}
template <typename scalar_t, bool check_L_boundary>
__global__ void cauchy_mult_sym_bwd_cuda_kernel(CudaAcsr<scalar_t, 2> v,
CudaAcsr<scalar_t, 1> z,
CudaAcsr<scalar_t, 2> w,
CudaAcsr<scalar_t, 2> dout,
CudaAcsr<scalar_t, 3> dv,
CudaAcsr<scalar_t, 3> dw,
int L,
int L_chunk_size) {
// We just want a shared array:
// __shared__ scalar_t s_b[16];
// But it doesn't work for complex: https://github.com/pytorch/pytorch/issues/39270
// So we declare a char array and cast it.
// The casting is subtle: https://stackoverflow.com/questions/12692310/convert-array-to-two-dimensional-array-by-pointer
// __shared__ char s_v_char[sizeof(scalar_t)];
// scalar_t *s_v = (scalar_t *)&s_v_char;
__shared__ char s_w_conj_char[ sizeof(scalar_t)];
scalar_t *s_w_conj = (scalar_t *)&s_w_conj_char;
__shared__ char s_dv_char[C10_WARP_SIZE * sizeof(scalar_t)];
scalar_t *s_dv = (scalar_t *)&s_dv_char;
__shared__ char s_dw_char[C10_WARP_SIZE * sizeof(scalar_t)];
scalar_t *s_dw = (scalar_t *)&s_dw_char;
int batch_idx = blockIdx.x;
int N_idx = blockIdx.y;
int L_chunk_idx = blockIdx.z;
int tid = threadIdx.x;
if (tid == 0) {
s_w_conj[0] = std::conj(w[batch_idx][N_idx]);
}
__syncthreads();
scalar_t t_w_conj = s_w_conj[0];
scalar_t t_w_conj_sq = t_w_conj * t_w_conj;
scalar_t t_dv = 0;
scalar_t t_dw = 0;
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD_SYM_BWD; ++item) {
int l = L_chunk_idx * L_chunk_size + item * blockDim.x + threadIdx.x;
scalar_t t_dout, t_z;
if (check_L_boundary) {
t_dout = l < L ? dout[batch_idx][l] : 0;
t_z = l < L ? z[l] : 1;
} else {// Not checking boundary can speed it up quite a bit, around 30%.
t_dout = dout[batch_idx][l];
t_z = z[l];
}
scalar_t denom_inv = scalar_t(1) / (std::norm(t_z) - 2 * std::real(t_z) * t_w_conj + t_w_conj_sq);
// auto dout_z_real = std::real(t_dout) * std::real(t_z) - std::imag(t_dout) * std::imag(t_z);
// Compiler is able to optimize this, so the two lines give idential speed;
auto dout_z_real = std::real(t_dout * t_z);
scalar_t dv_nom = (dout_z_real - std::real(t_dout) * t_w_conj);
t_dv += dv_nom * denom_inv;
scalar_t t_z_sq = t_z * t_z;
// auto dout_z_sq_real = std::real(t_dout) * std::real(t_z_sq) - std::imag(t_dout) * std::imag(t_z_sq);
// Compiler is able to optimize this, so the two lines give idential speed;
auto dout_z_sq_real = std::real(t_dout * t_z_sq);
scalar_t dw_nom = dout_z_sq_real - 2 * dout_z_real * t_w_conj + std::real(t_dout) * t_w_conj_sq;
t_dw += dw_nom * denom_inv * denom_inv;
}
t_dv = at::native::cuda_utils::BlockReduceSum<scalar_t>(t_dv, s_dv);
t_dw = at::native::cuda_utils::BlockReduceSum<scalar_t>(t_dw, s_dw);
if (tid == 0) {
dw[batch_idx][N_idx][L_chunk_idx] = 2 * t_dw * std::conj(v[batch_idx][N_idx]);
dv[batch_idx][N_idx][L_chunk_idx] = 2 * t_dv;
}
}
std::tuple<torch::Tensor, torch::Tensor>
cauchy_mult_sym_bwd_cuda(torch::Tensor v,
torch::Tensor z,
torch::Tensor w,
torch::Tensor dout) {
const int batch_size = v.size(0);
const int N = v.size(1);
const int L = z.size(0);
constexpr int MAX_BLOCK_SIZE = 1024;
constexpr int MAX_L_CHUNK_SIZE = ITEMS_PER_THREAD_SYM_BWD * MAX_BLOCK_SIZE;
const int n_L_chunks = div_up(L, MAX_L_CHUNK_SIZE);
auto dv = torch::empty({batch_size, N, n_L_chunks}, torch::dtype(v.dtype()).device(v.device()));
auto dw = torch::empty({batch_size, N, n_L_chunks}, torch::dtype(w.dtype()).device(w.device()));
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = c10::complex<float>;
const auto v_a = v.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto z_a = z.packed_accessor32<scalar_t, 1, at::RestrictPtrTraits>();
const auto w_a = w.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto dout_a = dout.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto dv_a = dv.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto dw_a = dw.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
// Each block need to have a multiple of 32 threads, otherwise
// at::native::cuda_utils::BlockReduceSum to produce wrong result.
// int block_x = max(div_up(L, ITEMS_PER_THREAD_SYM_BWD), C10_WARP_SIZE);
const int L_chunk_size = min(L, MAX_L_CHUNK_SIZE);
int block_x = div_up(L_chunk_size, ITEMS_PER_THREAD_SYM_BWD * C10_WARP_SIZE) * C10_WARP_SIZE;
bool check_L_boundary = L != block_x * ITEMS_PER_THREAD_SYM_BWD * n_L_chunks;
dim3 block(block_x);
dim3 grid(batch_size, N, n_L_chunks);
check_L_boundary
? cauchy_mult_sym_bwd_cuda_kernel<scalar_t, true>
<<<grid, block, 0, stream>>>(v_a, z_a, w_a, dout_a, dv_a, dw_a, L, L_chunk_size)
: cauchy_mult_sym_bwd_cuda_kernel<scalar_t, false>
<<<grid, block, 0, stream>>>(v_a, z_a, w_a, dout_a, dv_a, dw_a, L, L_chunk_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return std::make_tuple(dv.sum(-1), dw.sum(-1));
}
|
the_stack
|
texture<huffman_indx_t> huffman_indx_texture;
texture<huffman_code_t> huffman_code_texture;
texture<huffman_point_t> huffman_point_texture;
texture<window_t> context_range_texture;
texture<word> corpus_texture;
__global__ void cbow_neural_network_async_validation_kernel(real* likelihood,
const real* word_distribution,const real* point_distribution,
const size_t corpus_size)
{
const unsigned int FEATURESIZE = blockDim.x;
// forward propagation, exactly the same as validation kernel
extern __shared__ real out_layer_input[];
const unsigned int block_id = indx(blockIdx,gridDim);
if(block_id >= corpus_size) return;
const window_t range = tex1Dfetch(context_range_texture,block_id);
if(range == 0)
{
return;
}
const unsigned int thread_id = threadIdx.x;
// add up context predictor vector
real feature_i_mutable = 0;
huffman_point_t context_indx;
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id+i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
context_indx = tex1Dfetch(corpus_texture,block_id-i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
}
const real feature_i = feature_i_mutable;
// output the hidden activation
//compute gradient for each huffman point activation
context_indx = tex1Dfetch(corpus_texture,block_id);
const huffman_indx_t start = tex1Dfetch(huffman_indx_texture, context_indx);
const huffman_indx_t end = tex1Dfetch(huffman_indx_texture, context_indx+1);
huffman_indx_t indicator = start;
real likelihood_aggregate = 0;
while(indicator < end)
{
const huffman_point_t point = tex1Dfetch(huffman_point_texture,indicator);
const real point_feature_i = point_distribution[point*FEATURESIZE + thread_id];
out_layer_input[thread_id] = feature_i*point_feature_i;
__syncthreads();
for (unsigned int i = FEATURESIZE/2; i >0; i >>= 1)
{
if(thread_id < i)
{
out_layer_input[thread_id] += out_layer_input[thread_id +i];
}
__syncthreads();
}
//compute negative loglikelihood in current corpus texture
const huffman_code_t code = tex1Dfetch(huffman_code_texture,indicator);
likelihood_aggregate += neg_log_sigmoid(out_layer_input[0]*(2*(real)code -1));
indicator++;
}
likelihood[block_id] = likelihood_aggregate;
}
__global__ void cbow_neural_network_async_kernel(real* word_distribution,real* point_distribution,
const size_t corpus_size,const size_t offset,const real learning_rate)
{
const unsigned int FEATURESIZE = blockDim.x;
// forward propagation, exactly the same as validation kernel
extern __shared__ real out_layer_input[];
const unsigned int block_id = indx(blockIdx,gridDim);
if(block_id >= corpus_size || block_id < offset) return;
const window_t range = tex1Dfetch(context_range_texture,block_id);
if(range == 0)
{
return;
}
const unsigned int thread_id = threadIdx.x;
// add up context predictor vector
real feature_i_mutable = 0;
huffman_point_t context_indx;
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id+i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
context_indx = tex1Dfetch(corpus_texture,block_id-i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
}
const real feature_i = feature_i_mutable;
// output the hidden activation
//compute gradient for each huffman point activation
context_indx = tex1Dfetch(corpus_texture,block_id);
const huffman_indx_t start = tex1Dfetch(huffman_indx_texture, context_indx);
const huffman_indx_t end = tex1Dfetch(huffman_indx_texture, context_indx+1);
real update_i = 0;
for(huffman_indx_t indicator = start; indicator < end; indicator++)
{
const huffman_point_t point = tex1Dfetch(huffman_point_texture,indicator);
const real point_feature_i = point_distribution[point*FEATURESIZE + thread_id];
out_layer_input[thread_id] = feature_i*point_feature_i;
__syncthreads();
for (unsigned int i = FEATURESIZE/2; i >0; i >>= 1)
{
if(thread_id < i)
{
out_layer_input[thread_id] += out_layer_input[thread_id +i];
}
__syncthreads();
}
const huffman_code_t code = tex1Dfetch(huffman_code_texture,indicator);
const real grad = sigmoid(out_layer_input[0]) - ((real)code);
point_distribution[point*FEATURESIZE + thread_id] = point_feature_i - learning_rate*grad*feature_i;
update_i += grad*point_feature_i;
}
update_i *= learning_rate;
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id+i);
word_distribution[context_indx*FEATURESIZE + thread_id] -= update_i;
}
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id-i);
word_distribution[context_indx*FEATURESIZE + thread_id] -= update_i;
}
}
__device__ float sigmoid(const real numbers)
{
return 1/(1+expf(-numbers));
}
__device__ real neg_log_sigmoid(const real f)
{
return logf(1+expf(-f));
}
__device__ indx_t indx( dim3 elem,dim3 blockD)
{
return elem.x + blockD.x*elem.y + blockD.x*blockD.y*elem.z;
}
environment::environment():
code_binder(huffman_code_texture),point_binder(huffman_point_texture),
indx_binder(huffman_indx_texture),corpus_binder(corpus_texture),
window_binder(context_range_texture),file_ptr_word_count(0) {}
environment::~environment()
{
code_binder.unbind();
point_binder.unbind();
indx_binder.unbind();
corpus_binder.unbind();
window_binder.unbind();
}
//---------------------------tests------------------------------------------
/*
* test kernels using global textures have to be defined here
*/
__global__ void t_corpus_texture_binding_kernel(word* out,size_t size)
{
size_t index = indx(blockIdx,gridDim);
if(index >= size) return;
word val = tex1Dfetch(corpus_texture,index);
out[index] = val;
}
__global__ void t_window_texture_binding_kernel(window_t* out,size_t size)
{
size_t index = indx(blockIdx,gridDim);
if(index >= size) return;
window_t val = tex1Dfetch(context_range_texture,index);
out[index] = val;
}
__global__ void t_huff_code_texture_binding_kernel(huffman_code_t* out,size_t size)
{
size_t index = indx(blockIdx,gridDim);
if(index >= size ) return;
huffman_code_t val = tex1Dfetch(huffman_code_texture,index);
out[index] = val;
}
__global__ void t_huff_point_texture_binding_kernel(huffman_point_t* out, size_t size)
{
size_t index = indx(blockIdx,gridDim);
if(index >= size ) return;
huffman_point_t val = tex1Dfetch(huffman_point_texture,index);
out[index] = val;
}
__global__ void t_huff_indx_texture_binding_kernel(huffman_indx_t* out, size_t size)
{
size_t index = indx(blockIdx,gridDim);
if(index >= size ) return;
huffman_indx_t val = tex1Dfetch(huffman_indx_texture,index);
out[index] = val;
}
__global__ void t_mid_layer_activation(real* midLayer,const real* word_distribution,const size_t corpus_size)
{
const unsigned int FEATURESIZE = blockDim.x;
// forward propagation, exactly the same as validation kernel
extern __shared__ real out_layer_input[];
const unsigned int block_id = indx(blockIdx,gridDim);
if(block_id >= corpus_size) return;
const window_t range = tex1Dfetch(context_range_texture,block_id);
if(range == 0)
{
return;
}
const unsigned int thread_id = threadIdx.x;
// add up context predictor vector( mid layer activation)
real feature_i = 0;
huffman_point_t context_indx;
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id+i);
feature_i += word_distribution[context_indx*FEATURESIZE + thread_id];
context_indx = tex1Dfetch(corpus_texture,block_id-i);
feature_i += word_distribution[context_indx*FEATURESIZE + thread_id];
}
// output the hidden activation
//compute gradient for each huffman point activation
midLayer[threadIdx.x + FEATURESIZE*block_id] = feature_i;
}
/*
* kernel implementing gradient_check
*/
__global__ void t_gradient_check_kernel(real* difference,const real* word_distribution,const real* point_distribution,
const size_t corpus_size,const size_t pertub_cord,const real epsilong)
{
const unsigned int FEATURESIZE = blockDim.x;
// forward propagation, exactly the same as validation kernel
extern __shared__ real out_layer_input[];
const unsigned int block_id = indx(blockIdx,gridDim);
if(block_id >= corpus_size) return;
const window_t range = tex1Dfetch(context_range_texture,block_id);
if(range == 0)
{
return;
}
const unsigned int thread_id = threadIdx.x;
// add up context predictor vector
real feature_i_mutable = 0;
huffman_point_t context_indx;
for(window_t i=1; i<= range; i++)
{
context_indx = tex1Dfetch(corpus_texture,block_id+i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
context_indx = tex1Dfetch(corpus_texture,block_id-i);
feature_i_mutable += word_distribution[context_indx*FEATURESIZE + thread_id];
}
const real feature_i = feature_i_mutable;
// output the hidden activation
//compute gradient for each huffman point activation
context_indx = tex1Dfetch(corpus_texture,block_id);
const huffman_indx_t start = tex1Dfetch(huffman_indx_texture, context_indx);
const huffman_indx_t end = tex1Dfetch(huffman_indx_texture, context_indx+1);
huffman_indx_t indicator = start;
real aggregate_difference = 0;
while(indicator < end)
{
const huffman_point_t point = tex1Dfetch(huffman_point_texture,indicator);
const real point_feature_i = point_distribution[point*FEATURESIZE + thread_id];
out_layer_input[thread_id] = feature_i*point_feature_i;
__syncthreads();
for (unsigned int i = FEATURESIZE/2; i >0; i >>= 1)
{
if(thread_id < i)
{
out_layer_input[thread_id] += out_layer_input[thread_id +i];
}
__syncthreads();
}
huffman_code_t code = tex1Dfetch(huffman_code_texture,start);
real op = sigmoid(out_layer_input[0]);
real grad = op - ((real)code);
op = neg_log_sigmoid(out_layer_input[0]*(2*(real)code -1));
// pertubation the ith coordinate of mid layer
out_layer_input[thread_id] = feature_i*point_feature_i;
if(thread_id==pertub_cord) out_layer_input[thread_id] +=epsilong*point_feature_i;
for (unsigned int i = FEATURESIZE/2; i >0; i >>= 1)
{
if(thread_id < i)
{
out_layer_input[thread_id] += out_layer_input[thread_id +i];
}
__syncthreads();
}
real op_pertub = neg_log_sigmoid(out_layer_input[0]*(2*(real)code -1));
real partial_derivative = (op_pertub - op)/epsilong;
if(thread_id==pertub_cord)
aggregate_difference += fabsf(partial_derivative - grad*point_feature_i);
//pertubation of the ith coordinate of point_feature
out_layer_input[thread_id] = feature_i*point_feature_i;
if(thread_id==pertub_cord) out_layer_input[thread_id] +=epsilong*feature_i;
for (unsigned int i = FEATURESIZE/2; i >0; i >>= 1)
{
if(thread_id < i)
{
out_layer_input[thread_id] += out_layer_input[thread_id +i];
}
__syncthreads();
}
op_pertub = neg_log_sigmoid(out_layer_input[0]*(2*(real)code -1));
partial_derivative = (op_pertub - op)/epsilong;
if(thread_id==pertub_cord)
aggregate_difference += fabsf(partial_derivative - grad*feature_i);
indicator++;
}
if(thread_id==pertub_cord)
difference[block_id] = aggregate_difference;
}
|
the_stack
|
template <typename scalar_t>
__device__ scalar_t modulated_deform_conv2d_im2col_bilinear(
const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = static_cast<scalar_t>(1.) - lh, hw = static_cast<scalar_t>(1.) - lw;
scalar_t v1 = static_cast<scalar_t>(0.);
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = static_cast<scalar_t>(0.);
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = static_cast<scalar_t>(0.);
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = static_cast<scalar_t>(0.);
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void modulated_deform_conv2d_im2col_gpu_kernel(
const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0.);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width){
val = modulated_deform_conv2d_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
void modulated_deform_conv2d_im2col_cuda(
at::Tensor data_im, at::Tensor data_offset, at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deform_conv2d_im2col_gpu_kernel", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deform_conv2d_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deform_conv2d_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
at::Tensor modulated_deform_conv2d_forward_cuda(
at::Tensor input, at::Tensor weight, at::Tensor bias,
at::Tensor offset, at::Tensor mask,
const int kernel_h,const int kernel_w,const const int stride_h, const int stride_w,
const int pad_h, const int pad_w, const int dilation_h,const int dilation_w,
const int group, const int deformable_group,const int in_step,
const bool with_bias) {
AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous");
AT_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous");
AT_CHECK(mask.is_contiguous(), "mask tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
// resize output
const int step=GET_STEP(batch,in_step);
at::Tensor output=at::zeros({batch, channels_out, height_out, width_out},input.options());
// resize temporary columns
at::Tensor columns =at::zeros({channels * kernel_h * kernel_w,
step * height_out * width_out},input.options());
input=input.view({batch/step,step,channels,height,width});
offset=offset.view({batch/step,step,deformable_group * 2 *kernel_h*kernel_w,height_out,width_out});
mask=mask.view({batch/step,step,deformable_group*kernel_h*kernel_w,height_out,width_out});
// divide into group
output = output.view({batch/step, group, channels_out / group,step,
height_out, width_out});
weight = weight.view({group, channels_out / group, channels_kernel,
kernel_h_, kernel_w_});
for (int b = 0; b < batch/step; b++) {
columns.fill_(0.0f);
modulated_deform_conv2d_im2col_cuda(
input[b], offset[b], mask[b], step, channels, height, width, height_out,
width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group, columns);
columns = columns.view({group, channels * kernel_h * kernel_w / group, step * height_out * width_out});
for (int g = 0; g < group; g++) {
output[b][g] = output[b][g].flatten(1)
.addmm_(weight[g].flatten(1), columns[g]).view_as(output[b][g]);
}
columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)});
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
output = output.view({batch / step, channels_out, step, height_out, width_out});
output = output.transpose(1, 2).contiguous();
output = output.view({batch , channels_out, height_out, width_out});
if (with_bias) {
output += bias.view({1, bias.size(0), 1, 1});
}
return output;
}
// input=input.view({batch,channels,height,width});
// offset=offset.view({batch,deformable_group * 2 *kernel_h*kernel_w,height_out,width_out});
// mask=mask.view({batch,deformable_group*kernel_h*kernel_w,height_out,width_out});
// py::object id = py::module_::import("builtins").attr("id");
// py::object type = py::module_::import("builtins").attr("type");
// py::print(output,id(output));
template <typename scalar_t>
__global__ void modulated_deform_conv2d_gradient_gpu_kernel(
const int n,const scalar_t *grad_col, const scalar_t *data_input,
const scalar_t *data_offset, const scalar_t *data_mask, scalar_t * columns,
const int channels_input, const int height_input, const int width_input,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int step,
const int offset_channels,const int deformable_group,
const int height_col, const int width_col,
scalar_t * grad_input,scalar_t *grad_offset, scalar_t *grad_mask)
{
// columns = {channels * kernel_h * kernel_w,step*height_out * width_out}
// grad_columns = {channels * kernel_h * kernel_w,step*height_out * width_out}
// grad_output = {step,channels_out,height_out,width_out}
// input = {step,channels,height,width}
// offset = {step,deformable_group * 2 * kernel_h * kernel_w,height_out,width_out}
// grad_offset = {step,deformable_group * 2 * kernel_h * kernel_w,height_out,width_out});
CUDA_KERNEL_LOOP(index, n)//channels*kernel_h * kernel_w * step * height_col * width_col;
{
int k = (index /step/ height_col / width_col)%(kernel_h*kernel_w);
int i=k/kernel_w;
int j=k%kernel_w;
int bpos=(index%(step*height_col*width_col))/(height_col*width_col);
int wpos_col = (index % (height_col*width_col)) % width_col;
int hpos_col = ((index %(height_col*width_col)) / width_col) % height_col;
int cpos_col = (index / step / width_col / height_col);
int cpos_in=cpos_col/kernel_h/kernel_w;
int offset_group_index=cpos_in/(channels_input/deformable_group);
//printf("index %d bpos %d cpos_col %d hpos_col %d wpos_col %d \n",index,bpos,cpos_col,hpos_col,wpos_col);
int offset_h_ptr=bpos*(deformable_group * 2 * kernel_h * kernel_w*height_col*width_col)
+offset_group_index*channel_per_deformable_group*height_col*width_col
+2*k*height_col*width_col+hpos_col*width_col+wpos_col;
int offset_w_ptr=bpos*(deformable_group * 2 * kernel_h * kernel_w*height_col*width_col)
+offset_group_index*channel_per_deformable_group*height_col*width_col
+(2*k+1)*height_col*width_col+hpos_col*width_col+wpos_col;
int mask_hw_ptr=bpos*(deformable_group * kernel_h * kernel_w*height_col*width_col)
+offset_group_index*kernel_h*kernel_w*height_col*width_col
+k*height_col*width_col+hpos_col*width_col+wpos_col;
scalar_t offset_h=data_offset[offset_h_ptr];
scalar_t offset_w=data_offset[offset_w_ptr];
int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h;
int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w;
scalar_t real_offset_h=hpos_in+offset_h;
scalar_t real_offset_w=wpos_in+offset_w;
int h_low = floor(real_offset_h);
int w_low = floor(real_offset_w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t dh = real_offset_h - h_low;
scalar_t dw = real_offset_w - w_low;
scalar_t v1 = static_cast<scalar_t>(0.);
if (h_low >= 0 && h_low <= height_input -1 && w_low >= 0 && w_low <= width_input - 1)
v1 = data_input[bpos*channels_input*height_input*width_input+cpos_in*height_input*width_input+h_low * width_input + w_low];
scalar_t v2 = static_cast<scalar_t>(0.);
if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 )
v2 = data_input[bpos*channels_input*height_input*width_input+cpos_in*height_input*width_input+h_low * width_input + w_high];
scalar_t v3 = static_cast<scalar_t>(0.);
if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 )
v3 = data_input[bpos*channels_input*height_input*width_input+cpos_in*height_input*width_input+h_high * width_input + w_low];
scalar_t v4 = static_cast<scalar_t>(0.);
if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 )
v4 = data_input[bpos*channels_input*height_input*width_input+cpos_in*height_input*width_input+h_high * width_input + w_high];
// scalar_t w1=0,w2=0,w3=0,w4=0;
// w1= (h_low+1-real_offset_h) *(w_low+1-real_offset_w);
// w2= (h_low+1-real_offset_h) *(real_offset_w+1-w_high);
// w3 = (real_offset_h+1-h_high) * (w_low+1-real_offset_w);
// w4 = (real_offset_h+1-h_high) * (real_offset_w+1-w_high);
scalar_t w1 = (1-dh) *(1- dw), w2 =(1- dh) * dw, w3 = dh*(1- dw), w4 = dh * dw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
scalar_t col=val*data_mask[mask_hw_ptr];//
scalar_t dval=data_mask[mask_hw_ptr]*grad_col[index];
if (h_low >= 0 && h_low <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1)
atomicAdd(grad_input + bpos*channels_input*height_input*width_input
+ cpos_in*height_input*width_input+h_low * width_input + w_low, (h_low+1-real_offset_h) *(w_low+1-real_offset_w)*dval);
if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && dw>EPS )
atomicAdd(grad_input + bpos*channels_input*height_input*width_input
+ cpos_in*height_input*width_input+h_low * width_input + w_high, (h_low+1-real_offset_h) *(real_offset_w+1-w_high)*dval);
if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && dh>EPS )
atomicAdd(grad_input + bpos*channels_input*height_input*width_input
+ cpos_in*height_input*width_input+h_high * width_input + w_low, (real_offset_h+1-h_high) * (w_low+1-real_offset_w)*dval);
if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && dh>EPS && dw>EPS)
atomicAdd(grad_input + bpos*channels_input*height_input*width_input
+ cpos_in*height_input*width_input+h_high * width_input + w_high,(real_offset_h+1-h_high) * (real_offset_w+1-w_high)*dval);
if (real_offset_h > static_cast<scalar_t>(-1.) && real_offset_h < height_input && real_offset_w > static_cast<scalar_t>(-1.) && real_offset_w< width_input){
scalar_t w_tmp=static_cast<scalar_t>(0.);
scalar_t v_tmp=static_cast<scalar_t>(0.);
if(h_low>=0 && w_low>=0) w_tmp+=-1*(w_low+1-real_offset_w)*v1;
if(h_low>=0 && w_high< width_input) w_tmp+=-1*(real_offset_w-w_low)*v2;
if(h_high< height_input && w_low>=0) w_tmp+=(w_low+1-real_offset_w)*v3;
if(h_low< height_input && w_high< width_input) w_tmp+=(real_offset_w-w_low)*v4;
v_tmp+=w_tmp*grad_col[index]*data_mask[mask_hw_ptr];
atomicAdd(grad_offset + offset_h_ptr,v_tmp);
w_tmp=static_cast<scalar_t>(0.);
v_tmp=static_cast<scalar_t>(0.);
if(h_low>=0 && w_low>=0) w_tmp+=-1*(h_low+1-real_offset_h)*v1;
if(h_low>=0 && w_high< width_input) w_tmp+=(h_low+1-real_offset_h)*v2;
if(h_high< height_input && w_low>=0) w_tmp+=-1*(real_offset_h-h_low)*v3;
if(h_low< height_input && w_high< width_input) w_tmp+=(real_offset_h-h_low)*v4;
v_tmp+=w_tmp*grad_col[index]*data_mask[mask_hw_ptr];
atomicAdd(grad_offset + offset_w_ptr,v_tmp);
}
atomicAdd(grad_mask + mask_hw_ptr,grad_col[index]*val);
columns[index]=col;
}
}
// gradient offset mask input
void modulated_deform_conv2d_gradient_cuda(
at::Tensor grad_col,at::Tensor data_input,
at::Tensor data_offset, at::Tensor data_mask, at::Tensor columns,
const int channels, const int height_input, const int width_input,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int step, const int deformable_group,
at::Tensor grad_input, at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels =channels*height_col * width_col * kernel_h * kernel_w * step;
const int channel_per_deformable_group =2 * kernel_h * kernel_w ;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_col.scalar_type(), "modulated_deform_conv2d_gradient_gpu_kernel", ([&] {
const scalar_t *grad_col_ = grad_col.data<scalar_t>();
const scalar_t *data_input_ = data_input.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *columns_ = columns.data<scalar_t>();
scalar_t *grad_input_ = grad_input.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deform_conv2d_gradient_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, grad_col_, data_input_, data_offset_, data_mask_,columns_,
channels, height_input, width_input,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group,step,
channel_per_deformable_group * deformable_group,
deformable_group, height_col, width_col,
grad_input_,grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deform_conv2d_gradient_cuda: %s\n", cudaGetErrorString(err));
}
}
py::tuple modulated_deform_conv2d_backward_cuda(
at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset, at::Tensor mask,
at::Tensor grad_output,
const int kernel_h,const int kernel_w,const int stride_h,const int stride_w,
const int pad_h,const int pad_w,const int dilation_h,const int dilation_w,
const int group,const int deformable_group, const int in_step,const bool with_bias) {
AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous");
AT_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous");
AT_CHECK(mask.is_contiguous(), "mask tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out=weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
kernel_h_, kernel_w, kernel_h_, kernel_w_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int step=GET_STEP(batch,in_step);
at::Tensor ones = at::ones({step,height_out, width_out}, input.options());
at::Tensor columns = at::zeros({channels * kernel_h * kernel_w, step*height_out * width_out},input.options());
at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w, step*height_out * width_out},input.options());
grad_output=grad_output.view({batch/step,step,channels_out,height_out,width_out});
grad_output.transpose_(1, 2);
grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group,
grad_output.size(2), grad_output.size(3),grad_output.size(4)});
input=input.view({batch/step,step,channels,height,width});
at::Tensor grad_input = at::zeros({batch/step,step, channels, height, width},input.options());
offset=offset.view({batch/step,step,deformable_group * 2 * kernel_h * kernel_w,height_out,width_out});
at::Tensor grad_offset=at::zeros({batch/step,step,deformable_group * 2 * kernel_h * kernel_w,height_out,width_out},offset.options());
mask=mask.view({batch/step,step,deformable_group * kernel_h * kernel_w,height_out,width_out});
at::Tensor grad_mask=at::zeros({batch/step,step,deformable_group * kernel_h * kernel_w,height_out,width_out},mask.options());
at::Tensor grad_weight=at::zeros_like(weight,weight.options());
at::Tensor grad_bias=at::zeros_like(bias,bias.options());
for (int b = 0; b < batch/step; b++) {
// divide int group
grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3)});
for (int g = 0; g < group; g++) {
grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),grad_output[b][g].flatten(1), 0.0f, 1.0f);
}
grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)});
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4)});
columns.fill_(0.0f);
modulated_deform_conv2d_gradient_cuda(
grad_columns,input[b],offset[b],mask[b],columns,
channels,height,width,height_out,width_out,kernel_h,kernel_w,
pad_h,pad_w,stride_h,stride_w,dilation_h, dilation_w,
step,deformable_group,
grad_input[b],grad_offset[b],grad_mask[b]);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
grad_weight = grad_weight.view({group, grad_weight.size(0) / group,
grad_weight.size(1), grad_weight.size(2),
grad_weight.size(3)});
if (with_bias)
grad_bias = grad_bias.view({group, grad_bias.size(0) / group});
for (int g = 0; g < group; g++) {
grad_weight[g] = grad_weight[g].flatten(1)
.addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1),1.0f,1.0f)
.view_as(grad_weight[g]);
if (with_bias) {
at::Tensor temp=grad_bias[g].view({-1, 1});
temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f);
grad_bias[g] =temp.view(-1);
}
}
columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)});
grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1),
grad_weight.size(2), grad_weight.size(3),
grad_weight.size(4)});
if (with_bias)
grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)});
}
grad_input = grad_input.view({batch, channels, height, width});
grad_offset=grad_offset.view({batch,deformable_group * 2 * kernel_h * kernel_w,height_out,width_out});
grad_mask=grad_mask.view({batch,deformable_group * kernel_h * kernel_w,height_out,width_out});
py::tuple out=py::make_tuple(grad_input, grad_offset, grad_mask, grad_weight, grad_bias);
return out;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("modulated_deform_conv2d_forward_cuda", &modulated_deform_conv2d_forward_cuda,
"modulated_deform_conv2d_forward_cuda");
m.def("modulated_deform_conv2d_backward_cuda", &modulated_deform_conv2d_backward_cuda,
"modulated_deform_conv2d_backward_cuda");
}
|
the_stack
|
#include <iostream>
using std::cout;
using std::cerr;
using std::endl;
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <mpi.h>
#include <unistd.h> // for gethostname
namespace etics {
namespace scf {
extern __constant__ CacheStruct Cache;
extern Complex *PartialSum;
extern Complex A_h[(NMAX+1)*(LMAX+1)*(LMAX+2)/2];
extern Complex *PartialSum_h;
extern int k3gs, k3bs, k4gs, k4bs;
int blockSizeToDynamicSMemSize(int BlockSize);
void TestK3(Particle *ParticleList, int N, int numberoftries, double *Average, double *StandardDeviation, bool *Success);
void TestK4(Particle *ParticleList, int N, int numberoftries, double *Average, double *StandardDeviation, bool *Success);
void OptimizeLaunchConfiguration(int N);
}
}
double *Potential;
vec3 *F;
vec3 FirstParticleForce;
const double A000 = 9*(1-0.75*log(3)); // ~1.584, the theoretical A000 coefficient for a Hernquist sphere with a=1/3, which is what we get after noramalizing our initial conditions to Henon units.
const double ExpectedForceX = -1e-5;
void etics::scf::GuessLaunchConfiguration(int N, int *k3gs_new, int *k3bs_new, int *k4gs_new, int *k4bs_new) {
int blockSize;
int minGridSize;
int gridSize;
cudaOccupancyMaxPotentialBlockSizeVariableSMem(&minGridSize, &blockSize, CalculateCoefficientsPartial, blockSizeToDynamicSMemSize, 128);
cerr << "Warning: setting blockSizeLimit=128 for cudaOccupancyMaxPotentialBlockSizeVariableSMem." << endl;
gridSize = minGridSize;
*k3gs_new = gridSize;
*k3bs_new = blockSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, CalculateGravityFromCoefficients, 0, N);
gridSize = (N + blockSize - 1) / blockSize;
*k4gs_new = gridSize;
*k4bs_new = blockSize;
}
void etics::scf::TestK3(Particle *ParticleList, int N, int numberoftries, double *Average, double *StandardDeviation, bool *Success) {
double Average_tmp=0, StandardDeviation_tmp = 0;
for (int k=0; k<numberoftries; k++) {
LoadParticlesToCache<<<128,128>>>(ParticleList, N); // need to clear the cache
DeviceTimer Timer;
Timer.Start();
CalculateCoefficients(A_h);
Timer.Stop();
double Milliseconds = Timer.Difference()*1000;
Average_tmp += Milliseconds;
StandardDeviation_tmp += Milliseconds*Milliseconds;
}
Average_tmp /= numberoftries;
StandardDeviation_tmp = sqrt(StandardDeviation_tmp/numberoftries - Average_tmp*Average_tmp);
*Average = Average_tmp;
*StandardDeviation = StandardDeviation_tmp;
double A000 = 9*(1-0.75*log(3)); // ~1.584, the theoretical A000 coefficient for a Hernquist sphere with a=1/3, which is what we get after noramalizing our initial conditions to Henon units.
*Success = (0.8 < A_h[0].x/A000) && (A_h[0].x/A000 < 1.2); // very rough success criterion.
}
void etics::scf::TestK4(Particle *ParticleList, int N, int numberoftries, double *Average, double *StandardDeviation, bool *Success) {
// Need to make sure A_h is loaded to GPU and the first particle is at (187.79, 187.79, 0); also, global arrays Potential and F should be allocated on GPU
double Average_tmp=0, StandardDeviation_tmp = 0;
SendCoeffsToGPU(A_h);
cudaMemset(F, 0, sizeof(vec3));
for (int k=0; k<numberoftries; k++) {
LoadParticlesToCache<<<128,128>>>(ParticleList, N); // need to clear the cache
DeviceTimer Timer;
Timer.Start();
CalculateGravityFromCoefficients<<<k4gs,k4bs>>>(Potential, F);
Timer.Stop();
double Milliseconds = Timer.Difference()*1000;
Average_tmp += Milliseconds;
StandardDeviation_tmp += Milliseconds*Milliseconds;
}
Average_tmp /= numberoftries;
StandardDeviation_tmp = sqrt(StandardDeviation_tmp/numberoftries - Average_tmp*Average_tmp);
*Average = Average_tmp;
*StandardDeviation = StandardDeviation_tmp;
cudaMemcpy(&FirstParticleForce, F, sizeof(vec3), cudaMemcpyDeviceToHost);
*Success = (0.8 < FirstParticleForce.x/ExpectedForceX) && (FirstParticleForce.x/ExpectedForceX < 1.2); // very rough success criterion.
}
void etics::scf::OptimizeLaunchConfiguration(int N) {
cout << "We are going to try to optimize the launch configuration for the main ETICS (SCF) kernels by a brute force search." << endl << endl;
cudaDeviceProp DeviceProperties;
cudaGetDeviceProperties(&DeviceProperties, 0); // should be DevID!!!
char HostName[256];
gethostname(HostName, 256);
cout << "Probing device GPU" << 0 << " on host " << HostName << ": " << DeviceProperties.name << endl << endl;
const int ComputeCapability = ((DeviceProperties.major << 4) + DeviceProperties.minor);
int CorePerSM = 0;
switch (ComputeCapability) { // We count FP32 cores here... not exactly what we want.
case 0x10 : CorePerSM = 8; break; // Tesla Generation (SM 1.0) G80 class
case 0x11 : CorePerSM = 8; break; // Tesla Generation (SM 1.1) G8x class
case 0x12 : CorePerSM = 8; break; // Tesla Generation (SM 1.2) G9x class
case 0x13 : CorePerSM = 8; break; // Tesla Generation (SM 1.3) GT200 class
case 0x20 : CorePerSM = 32; break; // Fermi Generation (SM 2.0) GF100 class
case 0x21 : CorePerSM = 48; break; // Fermi Generation (SM 2.1) GF10x class
case 0x30 : CorePerSM = 192; break; // Kepler Generation (SM 3.0) GK10x class
case 0x32 : CorePerSM = 192; break; // Kepler Generation (SM 3.2) GK10x class
case 0x35 : CorePerSM = 192; break; // Kepler Generation (SM 3.5) GK11x class
case 0x37 : CorePerSM = 192; break; // Kepler Generation (SM 3.7) GK21x class
case 0x50 : CorePerSM = 128; break; // Maxwell Generation (SM 5.0) GM10x class
case 0x52 : CorePerSM = 128; break; // Maxwell Generation (SM 5.2) GM20x class
case 0x60 : CorePerSM = 64; break; // Pascal Generation (SM 6.0) GP10x class
}
int Cores = CorePerSM * DeviceProperties.multiProcessorCount ;
if (Cores == 0) {
Cores = 3584;
cout << "Could not count cores! Your GPU is possibly too new. We'll take " << Cores << " but it doesn't really matter." << endl << endl;
}
const int RealMaxGS = Cores/2;
cout << "We are only going to examine grids smaller than " << RealMaxGS << " blocks, for absolutely no good reason." << endl << endl;
const int numberoftries = 10;
const int WarpSize = DeviceProperties.warpSize, MinBS=WarpSize;
const int ShmemSizePerBlock = sizeof(Complex)*(LMAX+1); // for Kernel3
int MaxBS = (int)(DeviceProperties.sharedMemPerBlock / (WarpSize*ShmemSizePerBlock))*WarpSize;
MaxBS = (MaxBS>DeviceProperties.maxThreadsPerBlock)?(DeviceProperties.maxThreadsPerBlock):(MaxBS);
Particle *ParticleList, *ParticleList_h;
cout << "Generating initial conditions (this may take a while)." << endl << endl;
etics::ic::hernquist(N, 0, &ParticleList_h);
ParticleList_h[0].pos = vec3(187.79445239392416256476, 187.79445239392416256476, 0);
cudaMalloc((void**)&ParticleList, N * sizeof(Particle));
cudaMemcpy(ParticleList, ParticleList_h, N * sizeof(Particle), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Potential, N * sizeof(Real));
cudaMalloc((void**)&F, N * sizeof(vec3));
int k3gs_tmp, k3bs_tmp, k4gs_tmp, k4bs_tmp;
GuessLaunchConfiguration(N, &k3gs_tmp, &k3bs_tmp, &k4gs_tmp, &k4bs_tmp);
printf("Recommended launch configuration for Kernel3 (CalculateCoefficientsPartial): <<<%d,%d>>>\n", k3gs_tmp, k3bs_tmp);
Init(N, k3gs_tmp, k3bs_tmp, k4gs_tmp, k4bs_tmp);
double Average=0, StandardDeviation=0;
cout << "Testing..." << endl;
bool Success;
TestK3(ParticleList, N, numberoftries, &Average, &StandardDeviation, &Success);
printf("Executed in %.2f ms +/- %.2f\n\n", Average, StandardDeviation);
double k3_normal_time = Average;
printf("Recommended launch configuration for Kernel4 (CalculateGravityFromCoefficients): <<<%d,%d>>>\n", k4gs_tmp, k4bs_tmp);
cout << "Testing..." << endl;
TestK4(ParticleList, N, numberoftries, &Average, &StandardDeviation, &Success);
double k4_normal_time = Average;
printf("Executed in %.2f ms +/- %.2f\n\n", Average, StandardDeviation);
free(PartialSum_h);
cudaFree(PartialSum);
PartialSum_h = (Complex*)malloc(RealMaxGS*(LMAX+1)*sizeof(Complex)); // why not use "new"?
cudaMalloc((void**)&PartialSum, RealMaxGS*(LMAX+1)*sizeof(Complex));
int TotalTests = RealMaxGS * (MaxBS/32);
double Average_arr[TotalTests], StandardDeviation_arr[TotalTests];
int BlockSize_arr[TotalTests], GridSize_arr[TotalTests];
cout << "Optimizing K3 (block size is a power of two due to summation algorithm)" << endl;
int i = 0;
Success = true;
for (k3bs = MinBS; k3bs <= MaxBS; k3bs *= 2) {
if (!Success) break;
int MinGS = (Cores/k3bs>0)?(Cores/k3bs):1; // honestly can't remember why
int MaxGS = (N+k3bs-1)/k3bs;
MaxGS = (MaxGS>RealMaxGS)?(RealMaxGS):(MaxGS);
for (k3gs = MinGS; k3gs <= MaxGS; k3gs++) {
TestK3(ParticleList, N, numberoftries, &Average, &StandardDeviation, &Success);
if (!Success) break;
printf("<<<%04d,%03d>>> %7.2f %7.2f %.3e\n", k3gs, k3bs, Average, StandardDeviation, A_h[0].x/A000);
fflush(stdout);
BlockSize_arr[i]=k3bs; GridSize_arr[i]=k3gs;
Average_arr[i] = Average;
StandardDeviation_arr[i] = StandardDeviation;
i++;
}
}
int MaxIndex = i;
int IndexOfMininum = 0;
for (int i = 0; i < MaxIndex; i++) if (Average_arr[i] < Average_arr[IndexOfMininum]) IndexOfMininum = i;
int k3gs_opt=GridSize_arr[IndexOfMininum], k3bs_opt=BlockSize_arr[IndexOfMininum];
double k3_opt_time = Average_arr[IndexOfMininum];
printf("Fastest configuration is: <<<%04d,%03d>>>\n", GridSize_arr[IndexOfMininum], BlockSize_arr[IndexOfMininum]);
cout << "Other options:" << endl;
for (int i = 0; i < MaxIndex; i++) {
if ((Average_arr[i]-StandardDeviation_arr[i] < Average_arr[IndexOfMininum]+StandardDeviation_arr[IndexOfMininum]) && (i!=IndexOfMininum)) {
printf(" <<<%04d,%03d>>>\n", GridSize_arr[i], BlockSize_arr[i]);
}
}
cout << "Optimizing K4" << endl;
Init(N, k3gs_tmp, k3bs_tmp, k4gs_tmp, k4bs_tmp);
TestK3(ParticleList, N, 1, &Average, &StandardDeviation, &Success);
i = 0;
Success = true;
for (k4bs = MinBS; k4bs <= DeviceProperties.maxThreadsPerBlock; k4bs += WarpSize) {
if (!Success) break;
int MinGS = (Cores/k4bs>0)?(Cores/k4bs):1; // honestly can't remember why
int MaxGS = (N+k4bs-1)/k4bs;
MaxGS = (MaxGS>RealMaxGS)?(RealMaxGS):(MaxGS);
for (k4gs = MinGS; k4gs <= MaxGS; k4gs++) {
TestK4(ParticleList, N, numberoftries, &Average, &StandardDeviation, &Success);
if (!Success) break;
printf("<<<%04d,%03d>>> %7.2f %7.2f %.3e\n", k4gs, k4bs, Average, StandardDeviation, FirstParticleForce.x/ExpectedForceX);
fflush(stdout);
BlockSize_arr[i]=k4bs; GridSize_arr[i]=k4gs;
Average_arr[i] = Average;
StandardDeviation_arr[i] = StandardDeviation;
i++;
}
}
MaxIndex = i;
IndexOfMininum = 0;
for (int i = 0; i < MaxIndex; i++) if (Average_arr[i] < Average_arr[IndexOfMininum]) IndexOfMininum = i;
int k4gs_opt=GridSize_arr[IndexOfMininum], k4bs_opt=BlockSize_arr[IndexOfMininum];
double k4_opt_time = Average_arr[IndexOfMininum];
printf("Fastest configuration is: <<<%04d,%03d>>>\n", GridSize_arr[IndexOfMininum], BlockSize_arr[IndexOfMininum]);
cout << "Other options:" << endl;
for (int i = 0; i < MaxIndex; i++) {
if ((Average_arr[i]-StandardDeviation_arr[i] < Average_arr[IndexOfMininum]+StandardDeviation_arr[IndexOfMininum]) && (i!=IndexOfMininum)) {
printf(" <<<%04d,%03d>>>\n", GridSize_arr[i], BlockSize_arr[i]);
}
}
printf("=================================== SUMMARY ==================================\n");
printf("Parameters: LMAX=%d, NMAX=%d, N=%d (GPU%d=\"%s\" on %s)\n", LMAX, NMAX, N, 0, DeviceProperties.name, HostName);
printf("Recommended launch configuration for K3: <<<%d,%d>>>; execution time: %.2f ms.\n", k3gs_tmp, k3bs_tmp, k3_normal_time);
printf("Optimal launch configuration for K3: <<<%d,%d>>>; execution time: %.2f ms.\n", k3gs_opt, k3bs_opt, k3_opt_time);
printf("Recommended launch configuration for K4: <<<%d,%d>>>; execution time: %.2f ms.\n", k4gs_tmp, k4bs_tmp, k4_normal_time);
printf("Optimal launch configuration for K4: <<<%d,%d>>>; execution time: %.2f ms.\n", k4gs_opt, k4bs_opt, k4_opt_time);
printf("================================================================================\n");
cudaFree(PartialSum);
cudaFree(ParticleList);
cudaFree(Potential);
cudaFree(F);
}
int main(int argc, char* argv[]) {
if (argc < 2) {
cout << "Please specify number of particles (> 10000)." << endl;
return 1;
}
int N = atoi(argv[1]);
if (N < 10000) {
cout << "Please use more than 10000 particles." << endl;
return 1;
}
etics::scf::OptimizeLaunchConfiguration(N);
return 0;
}
|
the_stack
|
#include "bnorm.hpp"
#include "../datacu.hpp"
#include "blashelper.hpp"
#include <assert.h>
#include <float.h>
#include <stdint.h>
// MSB_WARP = log2(WARP_SIZE)
#define WARP_SIZE 32
#define MSB_WARP 5
// macro function
#define min(a,b) (a > b ? b : a);
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
static inline int getBlockSize(int dataSize)
{
int blockSize = VL_CUDA_NUM_THREADS / 2 ;
if (dataSize < blockSize) {
unsigned int numWarps = dataSize / WARP_SIZE ;
if (numWarps < 4) {
blockSize = 2 * WARP_SIZE ;
}
else if (numWarps < 8) {
blockSize = 4 * WARP_SIZE ;
}
else {
blockSize = 8 * WARP_SIZE ;
}
}
return blockSize ;
}
// The bockReduce function(s) computes the sum of the elements of the
// array mdata[] (and sdata, rdata, tdata):
//
// mdata[0] <- mdata[0] + mdata[1] + ... + mdata[blockSize-1]
//
// blockSize is a power of two.
//
// When the reduction involves a single warp of 32 threads further
// optimisation kick in. In fact, such threads work synchronously in the warp, and explicit syncrhonisation is not needed anymore.
__forceinline__ __device__ void warpReduce(volatile float * mdata,
unsigned int tid,
unsigned int blockSize)
{
if (blockSize >= 64) { mdata[tid] += mdata[tid + 32]; } // mdata[0:31] = mdata[0:31] + mdata[32:63]
if (blockSize >= 32) { mdata[tid] += mdata[tid + 16]; } // mdata[0:15] = mdata[0:15] + mdata[16:31]
if (blockSize >= 16) { mdata[tid] += mdata[tid + 8]; } // mdata[0:7] = mdata[0:7] + mdata[7:15]
if (blockSize >= 8) { mdata[tid] += mdata[tid + 4]; } // mdata[0:3] = mdata[0:3] + mdata[4:7]
if (blockSize >= 4) { mdata[tid] += mdata[tid + 2]; } // mdata[0:1] = mdata[0:1] + mdata[2:3]
if (blockSize >= 2) { mdata[tid] += mdata[tid + 1]; } // mdata[0] = mdata[0] + mdata[1]
}
__forceinline__ __device__ void blockReduce(volatile float * mdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64 ) { if (tid < 64) { mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
warpReduce(mdata, tid, blockSize);
}
}
__forceinline__ __device__ void warpReduce2(volatile float * sdata, volatile float * mdata, unsigned int tid, unsigned int blockSize)
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; }
}
__forceinline__ __device__ void blockReduce2(volatile float * mdata,
volatile float * sdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
warpReduce2(sdata, mdata, tid, blockSize);
}
}
__forceinline__ __device__ void warpReduce4(volatile float * sdata,
volatile float * mdata,
volatile float * rdata,
volatile float * tdata,
unsigned int tid,
unsigned int blockSize)
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; rdata[tid] += rdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; rdata[tid] += rdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; rdata[tid] += rdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; rdata[tid] += rdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; rdata[tid] += rdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; rdata[tid] += rdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
__forceinline__ __device__ void blockReduce4(volatile float * sdata,
volatile float * mdata,
volatile float * rdata,
volatile float * tdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >= 512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; rdata[tid] += rdata[tid + 512]; tdata[tid] += tdata[tid + 512];} __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >= 256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; rdata[tid] += rdata[tid + 256]; tdata[tid] += tdata[tid + 256];} __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >= 128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; rdata[tid] += rdata[tid + 128]; tdata[tid] += tdata[tid + 128];} __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >= 64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; rdata[tid] += rdata[tid + 64]; tdata[tid] += tdata[tid + 64];} __syncthreads(); }
if (tid < 32) {
warpReduce4(sdata, mdata, rdata, tdata, tid, blockSize);
}
}
/*
In the following we often need to use blocks of threads to sum over
data which is not necessarily naturally aligned with thread blocks or even thread warps.
The trick is to look at the block as a jumping window, sliding it over the memory
that needs to be summed, but always aligned at natural block boundaries. This means
that occasionally blocks will only be partially filled with useful memory:
+-------+ +-------+ +-------+ +-------+ aligned blocks (with two warps each)
| : | | : | | : | | : | covering the data
+-------+ +-------+ +-------+ +-------+
+-------------+ +-------------+ data to sum
+-------------------------------------------------------->
increasing memory addresses
This pattern is repreated several times in the code below.
*/
// Get largest memory address that is aligned to a warp worth of float
// and smaller than x.
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(float)) - 1)) ;
}
// Use the current block of thread to sum over a given column of a matrix. The selected
// column is given by the thread block index in the block grid.
__forceinline__ __device__ float matrixSumHelper(float const * matrix, int numRows)
{
// One thread block per column to sum
extern __shared__ float scratch [] ;
int tid = threadIdx.x ;
int column = blockIdx.x ;
int blockSize = blockDim.x ;
scratch[tid] = 0 ;
float const * columnBegin = matrix + column * numRows ;
float const * columnEnd = columnBegin + numRows ;
float const * block = (float const*) getBlockBeginning(columnBegin) + tid ;
while (block < columnEnd) {
if (block >= columnBegin) {
scratch[tid] += *block ;
}
block += blockSize ;
}
// Now we have a block worth of partial sums for the column
// Finish by reducing and saving
blockReduce(scratch, tid, blockSize, numRows) ;
return scratch[0] ;
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* bnorm_forward */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
__global__ void divide(float * mu,
float * sigma,
float mass,
unsigned int n)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < n){
float mean = mu[idx]/mass;
mu[idx] = mean;
sigma[idx] = sigma[idx]/mass-mean*mean;
}
}
// The kernel accumulates means and variances for the data.
// Each block of thread sums over one or more data planes, resulting
// in an array accumulator[] of dimension numChunks x 2*numChannels.
//
// If each thread block scans all the images, then numChunks = 1.
// However, for efficiency different thread blocks do different
// subset of images, resulting in numChunks partial results to be integrated
// later.
//
// The first part accumulator[:,0:numChannels-1] stores the data for the mean
// and the second part accumulator[:,numChannels,2*numChannels-1] the data
// for the sigmas.
__global__ void computePartialMuSigma(float * accumulator,
float const * data,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
extern __shared__ float s[] ;
float * mdata = s ;
float * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
while (block < planeEnd) {
if (block >= planeBegin) {
float x = *block ;
mdata[tid] += x ;
sdata[tid] += x * x ;
}
block += blockSize ;
}
plane += planeStride ;
}
blockReduce2(sdata, mdata, tid, blockSize, planeArea) ;
if (tid == 0) {
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
accumulator[i] = mdata[0];
accumulator[i + gridDim.x] = sdata[0];
}
}
__global__ void reduceMuSigma(float * accumulator,
float const * matrix,
int numRows)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
float x = matrixSumHelper(matrix, numRows) ;
if (tid == 0) {
accumulator[column] = x ;
}
}
__global__ void normalize(float * outputData,
float const * data,
float const * means,
float const * sigmas,
float const * multipliers,
float const * biases,
int planeArea,
int numPlanes,
int numChannels,
float epsilon)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
float mean = means[channel];
float sigma = sigmas[channel];
float multiplier = multipliers[channel];
float bias = biases[channel];
float coefficient = multiplier * rsqrt(sigma + epsilon) ;
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
float * oblock = outputData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = coefficient * (*block - mean) + bias ;
}
block += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
template<> vl::Error
vl::impl::bnorm_forward<vl::GPU, float>(Context& context,
float* output,
float const* data,
float const* multipliers,
float const* biases,
int height, int width, int depth, int size,
float epsilon)
{
/*
The data is organised in SIZE images, each of which is composed of DEPTH
planes. The goal is to compute the mean and std of the features in each
plane. In the follwing diagram, planes are enumerated from left to right
and top to bottom, listing first all the plane for one image (a row) and then
subsequent images (in different rows).
+-------+ +-------+ +-------+ +-------+
|plane 1| |p 2 | |p 3 | |p 4 | numPlanes = 12
|ch 1 | |c 2 | |c 3 | |c 4 | depth = 4
|image 1| |i 1 | |i 1 | |i 1 | planeArea = 28
+---+block 1| |b 2 | |b 3 | |b 4 | planeStride = gridSize = 8
| +-------+ +-------+ +-------+ +-------+
|
| +-------+ +-------+ +-------+ +-------+
| |p 5 | |p 6 | |p 7 | |p 8 |
| |c 1 | |c 2 | |c 3 | |c 4 |
| |i 2 | |i 2 | |i 2 | |i 2 |
| |b 5 | |b 6 | |b 7 | |b 8 |
| +-------+ +-------+ +-------+ +-------+
|
| +-------+ +-------+ +-------+ +-------+
| |p 9 | |p 10 | |p 11 | |p 12 |
| |c 1 | |c 2 | |c 3 | |c 4 |
| |i 3 | |i 3 | |i 3 | |i 3 |
+-->+b 1 | |b 2 | |b 3 | |b 4 |
+-------+ +-------+ +-------+ +-------+
We create gridSize thread blocks. Each block is assigned to sum
over a successive plane in the order above. Since there may be less blocks
than planes overall, these warp around (in the example, thread block 1
integrates planes 1 and planes 9).
*/
float *mean ;
float *sigma ;
cudaError_t status;
vl::Device type = GPU;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
// Try allocating one block for each plane. However, if
// this corresponds to too many blocks, reduce the number,
// still making sure that the number of blocksis a multiple of
// DEPTH. The latter is needed so that a block always sums
// features belonging to the same channel,
// even across different images.
unsigned int row = 1 ;
unsigned int gridSize = depth ;
// Avoid thread overload : a thread will execute less than ten thousand operation
/*if (planeArea*size > 10000*blockSize) {
row = min((depth*planeArea*size)/(9999*blockSize)+1,size) ;
// gridSize limit
if(depth >= 65536){
row = 1;
}
else if (depth*row > 65536) {
row = 65536/depth + 1 ;
}
gridSize = row * depth ;
}*/
if (gridSize != depth){
// Get intermediate buffers
unsigned int fin1 = (gridSize%WARP_SIZE==0) ? gridSize : WARP_SIZE*((gridSize>>MSB_WARP)+1);
float * intermediateOutput = (float*) context.getWorkspace(type, (gridSize+fin1+2*depth) * sizeof(float)) ;
mean = intermediateOutput + gridSize+fin1;
sigma = mean + depth;
// Compute mean and variance at the same time
computePartialMuSigma <<<gridSize, blockSize, 2*blockSize*sizeof(float)>>>
(intermediateOutput,
data,
planeArea,
numPlanes,
depth,
row) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
int blockSizeSum = getBlockSize(row) ;
reduceMuSigma <<<2*depth,blockSizeSum,blockSizeSum*sizeof(float)>>>
(mean, intermediateOutput, row) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
} else {
mean = (float*) context.getWorkspace(type, 2*depth * sizeof(float)) ;
sigma = mean + depth;
computePartialMuSigma<<<gridSize, blockSize, 2*blockSize*sizeof(float)>>>
(mean,
data,
planeArea,
numPlanes,
depth,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
}
unsigned int mass = planeArea*size;
divide <<<divideUpwards(depth,blockSize),blockSize>>>
(mean, mean+depth, (float)mass, depth);
normalize <<<gridSize, blockSize>>>
(output, data, mean, sigma, multipliers, biases,
planeArea,
numPlanes,
depth,
epsilon) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* bnorm_backward */
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
__global__ void divideSigma(float * dzdg,
float * dzdb,
float * mu,
float * sigma,
float epsilon,
float mass,
unsigned int n
)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx<n){
float mean = mu[idx]/mass;
mu[idx] = mean;
sigma[idx] = sigma[idx]/mass-mean*mean;
dzdg[idx] = (dzdg[idx]-mean*dzdb[idx])/sqrt(sigma[idx]+epsilon);
}
}
__global__ void computePartialMuSigmaDer(float * buffer1,
float * buffer2,
float * buffer3,
float const * data,
float const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numChunks)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
extern __shared__ float s[] ;
float * mdata = s ;
float * sdata = mdata + blockSize ;
float * rdata = sdata + blockSize ;
float * tdata = rdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
rdata[tid] = 0 ;
tdata[tid] = 0 ;
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
float const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
float x = *block ;
float dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
rdata[tid] += x * x ;
tdata[tid] += x ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
// Nothing to optimize here
blockReduce4(sdata, mdata, rdata, tdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
buffer1[blockIdx.x] = mdata[0];
buffer2[blockIdx.x] = sdata[0];
buffer3[blockIdx.x] = tdata[0];
buffer3[blockIdx.x+numChannels] = rdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numChunks ;
buffer1[i] = mdata[0]; // derMultipliers
buffer1[i + gridDim.x] = sdata[0]; // derBiases
buffer1[i + 2*gridDim.x] = tdata[0]; // means
buffer1[i + 3*gridDim.x] = rdata[0]; // sigmas
}
}
}
__global__ void reduceMuSigmaDer(float * accumulator,
float * derMultipliers,
float * derBiases,
float const * matrix,
int numRows,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
float x = matrixSumHelper(matrix, numRows) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else if (type == 1) {
derBiases[channel] = x ;
}
else if (type == 2) {
accumulator[channel] = x ;
}
else {
accumulator[channel + numChannels] = x ;
}
}
}
__global__ void normalizeBackward(float * derData,
float const * data,
float const * derOutput,
float const * means,
float const * sigmas,
float const * multipliers,
float const * derBiases,
float const * derMultipliers,
int planeArea,
int numPlanes,
int numChannels,
float epsilon,
float mass)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
// Not optimized for compute capability < 1.2
float mu = means[channel];
float sigma2 = sigmas[channel] + epsilon;
float multiplier = multipliers[channel] ;
float muz = derBiases[channel] / mass;
float derMultiplier = derMultipliers[channel];
float G1 = multiplier * rsqrt(sigma2);
float G2 = (multiplier * derMultiplier) / (sigma2 * mass);
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
float const * dblock = derOutput + (block - data) ;
float * oblock = derData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = G1 * (*dblock - muz) - G2 * (*block - mu);
}
block += blockSize ;
dblock += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
template<> vl::Error
vl::impl::bnorm_backward<vl::GPU, float>(Context& context,
float* derData,
float* derMultipliers,
float* derBiases,
float const* data,
float const* multipliers,
float const* biases,
float const* derOutput,
int height, int width, int depth, int size,
float epsilon)
{
vl::Device type = GPU;
float *intermediateOutput;
float *mean ;
float *sigma;
cudaError_t status;
unsigned int planeArea = height * width ;
unsigned int numPlanes = depth * size ;
unsigned int blockSize = getBlockSize(planeArea) ;
unsigned int row = 1 ;
unsigned int gridSize = depth ;
// Avoid thread overload : a thread will execute less than ten thousand operation
/*if (planeArea*size > 10000*blockSize) {
row = min((depth*planeArea*size)/(9999*blockSize)+1,size) ;
// gridSize limit
if(depth >= 65536){
row = 1;
}
else if (depth*row > 65536) {
row = 65536/depth + 1 ;
}
gridSize = row * depth ;
}*/
if(gridSize != depth){
// Get intermediate buffers
unsigned int fin1 = (gridSize%WARP_SIZE==0) ? gridSize : WARP_SIZE*((gridSize>>MSB_WARP)+1);
// Might be optimize here to get coalescent access
intermediateOutput = (float*) context.getWorkspace(type, (3*gridSize+fin1+2*depth) * sizeof(float)) ;
mean = intermediateOutput + fin1 + 3*gridSize;
sigma = mean + depth;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
// Mean, variance, derMultipliers and derBiases computation
computePartialMuSigmaDer<<<gridSize, blockSize, 4*blockSize*sizeof(float)>>>
(intermediateOutput,
NULL,
NULL,
data,
derOutput,
planeArea,
numPlanes,
depth,
row) ;
int blockSizeSum = getBlockSize(row) ;
reduceMuSigmaDer <<<4*depth,blockSizeSum,blockSizeSum*sizeof(float)>>>
(mean, derMultipliers, derBiases,
intermediateOutput, row, depth) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
} else {
mean = (float*) context.getWorkspace(type, (2*depth) * sizeof(float)) ;
sigma = mean + depth;
computePartialMuSigmaDer <<<gridSize, blockSize, 4*blockSize*sizeof(float)>>>
(derMultipliers,
derBiases,
mean,
data,
derOutput,
planeArea,
numPlanes,
depth,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::vlErrorCuda ;
}
unsigned int mass = planeArea*size;
divideSigma<<<divideUpwards(depth,blockSize),blockSize>>>
(derMultipliers, derBiases, mean,sigma,epsilon,(float)mass,depth);
// Compute output
normalizeBackward <<<gridSize, blockSize>>>
(derData,
data, derOutput, mean, sigma,
multipliers, derBiases, derMultipliers,
planeArea, numPlanes, depth,
epsilon, (float)mass) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
|
the_stack
|
#include "DouglasPeucker.h"
// 宏:DEF_BLOCK_1D
// 定义了默认的 1D Block 尺寸。
#define DEF_BLOCK_1D 512
// 宏:CH_LARGE_ENOUGH
// 定义了一个足够大的正整数,该整数在使用过程中被认为是无穷大。
#define CH_LARGE_ENOUGH ((1 << 30) - 1)
// Kernel 函数:_initLabelAryKer(初始化 LABEL 数组)
// 在迭代前初始化 LABEL 数组,初始化后的 LABEL 数组要求除最后一个元素为 1
// 以外,其他的元素皆为 0。
static __global__ void // Kernel 函数无返回值
_initLabelAryKer(
int label[], // 待初始化的 LABEL 数组。
int cstcnt // LABEL 数组长度。
);
// Kernel 函数: _updateDistKer(更新点集的垂距信息)
// 根据目前已知的结果集上的点集和区域的标签值,找出当前每个点所在区域的起止点,
// 根据点到直线的垂距公式,计算点集的附带数据:点到当前所在区域的起止点构成的直
// 线的垂直距离。
static __global__ void // Kernel 函数无返回值
_updateDistKer(
int cst[], // 输入点集,也是输出点集,更新点集的
// attachData,也就是垂距的信息。
int cornercst[], // 目前已知结果点集,即每段的最值点信息。
int label[], // 输入,当前点集的区域标签值数组。
int cstcnt, // 输入,当前点的数量。
float dis[] // 记录每一个点的垂距
);
// Kernel 函数: _updateFoundInfoKer(更新新发现角点信息)
// 根据分段扫描后得到的点集信息,更新当前区域是否有新发现的角点,更新目前
// 已知的结果集的点的位置索引。
static __global__ void // Kernel 函数无返回值
_updateFoundInfoKer(
int label[], // 输入,当前点集的区域标签值数组。
float dist[], // 输入数组,所有点的垂距,即坐标点集数据结构中的
// attachedData 域。
int maxdistidx[], // 输入,分段扫描后,当前位置记录的本段目前已知的最
// 大垂距点的位置索引数组。
int cstcnt, // 坐标点的数量。
int foundflag[], // 输出数组,如果当前区域内找到新的点,标志位置 1。
int startidx[], // 输出,目前已知的结果点集中点的位置索引数组,也相当
// 于当前每段上的起始位置的索引数组。
float threshold, // 垂距的阈值
int foundidx[] // 新找到的角点的一维索引
);
// Kernel 函数: _updateCornerCstKer(生成新结果点集)
// 根据分段扫描后得到的点集信息,和每段上是否发现新点的信息,生成新点集。
static __global__ void // Kernel 函数无返回值
_updateCornerCstKer(
int cst[], // 输入点集
int cornercst[], // 目前已知结果点集,即每段的最值点信息。
int foundflag[], // 输入,当前区域内有新发现点的标志位数组,如果当前
// 区域内找到新的点,标志位置 1。
int foundacc[], // 输入,偏移量数组,即当前区域内有新发现点的标志位
// 的累加值。
int startidx[], // 输入,目前已知的点的位置索引数组,
// 也相当于当前每段上的起始位置的索引数组。
int maxdistidx[], // 输入,分段扫描后,当前位置记录的本段目前已知的最
// 大垂距点的位置索引数组
int cornercnt, // 当前点的数量。
int newcornercst[] // 输出,更新后的目前已知结果点集即每段的最值点信息。
);
// Kernel 函数: _updateLabelKer(更新 Label 值)
// 根据已得到的结果点,更新 Label 值。
static __global__ void // Kernel 函数无返回值
_updateLabelKer(
int label[], // 输入,当前点集的区域标签值数组。
int cstcnt, // 坐标点的数量。
int foundidx[], // 新找到的点的一维索引
int foundacc[], // 输入,偏移量数组即当前区域内新发现点的标志位累加值。
int tmplabel[] // 新的标签值
);
// Kernel 函数:_initLabelAryKer(初始化 LABEL 数组)
static __global__ void _initLabelAryKer(int label[], int cstcnt)
{
// 计算当前 Thread 对应的数组下标。
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 如果当前下标处理的是越界数据,则直接退出。
if (idx >= cstcnt)
return;
// 在 LABEL 数组中,将最后一个变量写入 1,其余变量写入 0。
if (idx == cstcnt - 1)
label[idx] = 1;
else
label[idx] = 0;
}
// Host 成员方法:initLabelAry(初始化 LABEL 数组)
__host__ int DouglasPeucker::initLabelAry(int label[], int cstcnt)
{
// 检查输入的数组是否为 NULL。
if (label == NULL)
return NULL_POINTER;
// 检查数组长度是否大于等于 2。
if (cstcnt < 2)
return INVALID_DATA;
// 计算启动 Kernel 函数所需要的 Block 尺寸与数量。
size_t blocksize = DEF_BLOCK_1D;
size_t gridsize = (cstcnt + blocksize - 1) / blocksize;
// 启动 Kernel 函数,完成计算。
_initLabelAryKer<<<gridsize, blocksize>>>(label, cstcnt);
// 检查 Kernel 函数执行是否正确。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,返回。
return NO_ERROR;
}
// Kernel 函数: _updateDistKer(更新点集的垂距信息)
static __global__ void _updateDistKer(
int cst[], int cornercst[], int label[],
int cstcnt, /*int foundflagDev[], int oldlabelDev[], */float dis[])
{
// 记录了本 Kernel 所使用到的共享内存中各个下标所存储的数据的含义。其中,
// SIDX_BLK_CNT 表示当前 Block 所需要处理的坐标点的数量,由于坐标点的数量不
// 一定能够被 BlockDim 整除,因此最后一个 Block 所处理的坐标点的数量要小于
// BlockDim。
// SIDX_BLK_LABEL_LOW 和 SIDX_BLK_LABEL_UP 用来存当前 Block 中所加载的点集
// 的区域标签值的上下界。根据这个上下界,可以计算出当前点所在区域的起止
// 点,从而根据这两点确定的直线计算当前点的垂距。
// 从下标为 SIDX_BLK_CST 开始的其后的所有共享内存空间存储了当前 Block 中的
// 点集坐标。坐标集中第 i 个点对应的数组下标为 2 * i 和 2 * i + 1,其中下标
// 为 2 * i 的数据表示该点的横坐标,下标为 2 * i + 1 的数据表示该点的纵坐
// 标。
#define SIDX_BLK_CNT 0
#define SIDX_BLK_LABEL_LOW 1
#define SIDX_BLK_LABEL_UP 2
#define SIDX_BLK_CORNER 3
// 共享内存的声明。
extern __shared__ int shdmem[];
// 基准索引。表示当前 Block 的起始位置索引。
int baseidx = blockIdx.x * blockDim.x;
// 全局索引。
int idx = baseidx + threadIdx.x;
// 如果当前线程的全局下标越界,则直接返回,因为他没有对应的所要处理坐标点。
if (idx >= cstcnt)
return;
// 当前 Block 的第 0 个线程来处理共享内存中彼此共享的数据的初始化工作。
if (threadIdx.x == 0) {
// 计算当前 Block 所要处理的坐标点的数量。默认情况下该值等于 BlockDim,
// 但对于最后一个 Block 来说,在坐标点总数量不能被 BlockDim 所整除的时
// 候,需要处理的坐标点数量会小于 BlockDim。
if (baseidx + blockDim.x <= cstcnt)
shdmem[SIDX_BLK_CNT] = blockDim.x;
else
shdmem[SIDX_BLK_CNT] = cstcnt - baseidx;
// 计算当前 Block 所处理的坐标点中起始的 LABEL 编号。
shdmem[SIDX_BLK_LABEL_LOW] = label[baseidx];
// 计算当前 Block 索要处理的坐标点中最大的 LABEL 编号。由于考虑到根据两
// 点计算直线方程,因此所谓的最大 LABEL 编号其实是
if (baseidx + shdmem[SIDX_BLK_CNT] <= cstcnt)
shdmem[SIDX_BLK_LABEL_UP] =
label[baseidx + shdmem[SIDX_BLK_CNT] - 1] + 1;
else
shdmem[SIDX_BLK_LABEL_UP] = label[cstcnt - 1];
}
// Block 内部同步,使得上一步的初始化对 Block 内的所有 Thread 可见。
__syncthreads();
// 将当前 Block 处理的 LABEL 值上下界加载到寄存器,该步骤没有逻辑上的含义,
// 只是为了 GPU 处理速度更快。
int labellower = shdmem[SIDX_BLK_LABEL_LOW];
int labelupper = shdmem[SIDX_BLK_LABEL_UP];
// 为了方便代码编写,这里单独提出一个 blockcstShd 指针,指向当前 Block 所对
// 应的点集数据的共享内存空间。
int *cornerShd = &shdmem[SIDX_BLK_CORNER];
// 加载当前 Block 中所用到的 LABEL 所对应的起止点,这两个点构成的直线可用来
// 衡量各点的垂距并以此推算出下一轮的角点。将所用到的点加载的 Shared Memory
// 中也没有逻辑上的目的,仅仅是为了下一步计算时访存时间的缩短。
if (threadIdx.x < labelupper - labellower + 1) {
cornerShd[2 * threadIdx.x] =
cornercst[2 * (labellower + threadIdx.x)];
cornerShd[2 * threadIdx.x + 1] =
cornercst[2 * (labellower + threadIdx.x) + 1];
}
// Block 内部同步,使得上面所有的数据加载对 Block 内的所有 Thread 可见。下
// 面的代码就正式的投入计算了。
__syncthreads();
if (idx == cstcnt - 1) {
dis[idx] = 0.0f;
return;
}
// 计算当前点的坐标和区域标签值。
int curx = cst[2 * idx];
int cury = cst[2 * idx + 1];
int curlabelidx = 2 * (label[idx] - labellower);
// 计算当前 LABEL 区域的最左点的坐标。
int leftx = cornerShd[curlabelidx++];
int lefty = cornerShd[curlabelidx++];
// 计算当前 LABEL 区域的最右点的坐标。
int rightx = cornerShd[curlabelidx++];
int righty = cornerShd[curlabelidx ];
// 如果当前点就是角点,那么不需要计算直接赋值退出就可以了。
if ((curx == leftx && cury == lefty) ||
(curx == rightx && cury == righty)) {
dis[idx] = 0.0f;
return;
}
// 计算垂距,该计算通过起止形成的直线作为垂距求解的依据
float k, dist,b, temp;
if (rightx == leftx) {
dist = fabsf(curx - leftx);
} else {
k = (righty - lefty) * 1.0f / (rightx - leftx);
b = lefty - k * leftx;
temp = fabsf(k * curx - cury + b);
dist = temp / sqrtf(k * k + 1);
}
// 将垂距信息更新到 Global 内存中作为输出。
dis[idx] = dist;
#undef SIDX_BLK_CNT
#undef SIDX_BLK_LABEL_LOW
#undef SIDX_BLK_LABEL_UP
#undef SIDX_BLK_CORNER
}
// 成员方法:updateDist(更新坐标点集垂距)
__host__ int DouglasPeucker::updateDist(
int *cst, int *cornercst, int label[], int cstcnt, float dis[])
{
// 检查输入坐标集,输出坐标集是否为空。
if (cornercst == NULL || cst == NULL || label == NULL || dis == NULL)
return NULL_POINTER;
// 检查当前点的数量,小于等于 0 则无效数据。
if (cstcnt <= 0)
return INVALID_DATA;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量,以及所需要的 Shared
// Memory 的数量。
size_t blocksize = DEF_BLOCK_1D;
size_t gridsize = (cstcnt + blocksize - 1) / blocksize;
size_t sharedsize = (3 + 2 * blocksize) * sizeof (int);
// 调用更新点集的垂距信息的核函数,计算每个点的垂距,更新负垂距标志数组。
_updateDistKer<<<gridsize, blocksize, sharedsize>>>(
cst, cornercst, label, cstcnt,/* foundflagDev, oldlabelDev, */dis);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// Kernel 函数: _updateFoundInfoKer(更新新发现点信息)
static __global__ void _updateFoundInfoKer(
int *label, float *dist, int *maxdistidx, int cstcnt,
int *foundflag, int *startidx, float threshold, int *foundidx)
{
// 共享内存,用来存放当前 Block 处理的 LABEL 值,其长度为 BlockDim + 1,因
// 为需要加载下一 Blcok 的第一个 LABEL 值。
extern __shared__ int labelShd[];
// 基准索引。表示当前 Block 的起始位置索引
int baseidx = blockIdx.x * blockDim.x;
// 全局索引。
int idx = baseidx + threadIdx.x;
// 初始化 Shared Memory,将当前 Block 所对应的坐标点的 LABEL 值赋值给
// Shared Memroy,为了程序健壮性的考虑,我们将处理越界数据的那些 Thread 所
// 对应的 LABEL 值赋值为最后一个点的 LABEL 值。
if (idx < cstcnt)
labelShd[threadIdx.x] = label[idx];
else
labelShd[threadIdx.x] = label[cstcnt - 1];
// 使用每个 Block 中第 0 个 Thread 来初始化多出来的那个 LABEL 值,初始化的
// 规则同上面的规则一样,也做了健壮性的考量。
if (threadIdx.x == 0) {
if (baseidx + blockDim.x < cstcnt)
labelShd[blockDim.x] = label[baseidx + blockDim.x];
else
labelShd[blockDim.x] = label[cstcnt - 1];
// 如果是第一块的话,起始索引更新。
if (blockIdx.x == 0)
startidx[0] = 0;
}
// 块内的线程同步
__syncthreads();
// 对于处理越界数据的 Thread 直接进行返回操作,不进行任何处理。
if (idx >= cstcnt)
return;
// 当前 Thread 处理坐标点的 LABEL 值。
int curlabel = labelShd[threadIdx.x];
// 对于单独处于一个 LABEL 区域的最后一个点,该点不需要做任何查找操作,直接
// 赋值为未找到新的凸壳点。
if (idx == cstcnt - 1) {
foundflag[curlabel] = 0;
foundidx[curlabel] = CH_LARGE_ENOUGH;
return;
}
// 本函数只针对处于 LABEL 区域边界的点进行处理,对于不处于区域边界的点则直
// 接返回。
if (curlabel == labelShd[threadIdx.x + 1])
return;
// 读取当前 LABEL 区域的最大垂距和最大垂距所对应的下标和该最大垂距的值。
int curmaxdistidx = maxdistidx[idx];
float curmaxdist = dist[curmaxdistidx];
// 如果当前 LABEL 区域的最大垂距点的垂距值大于 0,则说明了在当前的 LABEL 区
// 域内发现了点。为了健壮性的考虑,这里将 0 写为 1.0e-6。
foundflag[curlabel] = (curmaxdist >= threshold) ? 1 : 0;
foundidx[curlabel] = (foundflag[curlabel] == 1 ?
curmaxdistidx : CH_LARGE_ENOUGH);
// 更新下一个 LABEL 区域的起始下标。由于当前 Thread 是当前 LABEL 区域的最后
// 一个,因此下一个 LABEL 区域的起始下标为当前 Thread 全局索引加 1。
startidx[curlabel + 1] = idx + 1;
}
// 成员方法: updateFoundInfo(更新新发现点信息)
__host__ int DouglasPeucker::updateFoundInfo(
int label[], float dist[], int maxdistidx[],
int cstcnt, int foundflag[], int startidx[],
float threshold, int foundidx[])
{
// 检查所有的输入指针或数组是否为 NULL,如果存在一个为 NULL 则报错退出。
if (label == NULL || dist == NULL || maxdistidx == NULL ||
foundflag == NULL || startidx == NULL || foundidx == NULL)
return NULL_POINTER;
// 检查坐标点的数量是否小于等于 0,若是则报错推出。
if (cstcnt <= 0)
return INVALID_DATA;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量,以及所需要的 Shared
// Memory 的数量。
size_t blocksize = DEF_BLOCK_1D;
size_t gridsize = (cstcnt + blocksize - 1) / blocksize;
size_t sharedsize = (blocksize + 1) * sizeof (int);
// 调用 Kernel 函数,完成计算。
_updateFoundInfoKer<<<gridsize, blocksize, sharedsize>>>(
label, dist, maxdistidx, cstcnt, foundflag,
startidx, threshold, foundidx);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// Kernel 函数: _updateCornerCstKer(生成新点集)
static __global__ void _updateCornerCstKer(
int cst[], int cornercst[], int foundflag[],
int foundacc[], int startidx[], int maxdistidx[], int cornercnt,
int newcornercst[])
{
// 计算当前 Thread 的全局索引。本 Kernel 中,每个线程都对应于一个 LABEL 区
// 域,对于发现了新点的 LABEL 区域,则需要将原来这个 LABEL 点和新发现的点
// 同时拷贝到新的点集中。
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 如果该 Thread 对应的时越界数据,则直接返回,不进行任何处理。
if (idx >= cornercnt)
return;
// 计算原来的角点在新角点集中的下标,由于前面的 LABEL 区域共产生了
// foundacc[idx] 个角点,因此,下标应相较于原来的下标(idx)增加了相应的
// 数量。
int newidx = idx + foundacc[idx];
// 将这个点的坐标从原来的点集中拷贝到新的点集中。
newcornercst[2 * newidx] = cornercst[2 * idx];
newcornercst[2 * newidx + 1] = cornercst[2 * idx + 1];
// 如果当前 LABEL 区域中没有发现新的点,则只需要拷贝原有的点到新的点集中。
if (foundflag[idx] == 0)
return;
// 计算新发现的点在点集中的下标和该点对应的坐标点集中的下标。由于最大垂距点
// 下标数组是记录的 Scanning 操作的结果,因此正确的结果存放再该LABEL 区域
// 最后一个下标处。
newidx++;
int cstidx = maxdistidx[startidx[idx + 1] - 1];
// 将新发现的凸壳点从坐标点集中拷贝到新的凸壳点集中。
newcornercst[2 * newidx] = cst[2 * cstidx];
newcornercst[2 * newidx + 1] = cst[2 * cstidx + 1];
}
// Host 成员方法:updateCornerCst(生成新点集)
__host__ int DouglasPeucker::updateCornerCst(
int *cst, int *cornercst, int foundflag[],
int foundacc[], int startidx[], int maxdistidx[], int cornercnt,
int *newcornercst)
{
// 检查参数中所有的指针和数组是否为空。
if (cst == NULL || cornercst == NULL || foundacc == NULL ||
foundflag == NULL || startidx == NULL || maxdistidx == NULL ||
newcornercst == NULL)
return NULL_POINTER;
// 检查当前角点的数量,小于等于 0 则无效数据。
if (cornercnt <= 0)
return INVALID_DATA;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 矩阵方法分段扫描版本线程块大小。
size_t blocksize = DEF_BLOCK_1D;
size_t gridsize = (cornercnt + blocksize - 1) / blocksize;
// 调用 Kernel 函数完成计算。
_updateCornerCstKer<<<gridsize, blocksize>>>(
cst, cornercst, foundflag, foundacc, startidx,
maxdistidx, cornercnt, newcornercst);
// 判断 Kernel 函数是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// Kernel 函数: _updateLabelKer(更新标签值)
static __global__ void _updateLabelKer(
int label[], int cstcnt,
int foundidx[], int foundacc[], int tmplabel[])
{
// 记录了本 Kernel 所使用到的共享内存中各个下标所存储的数据的含义。其中,
// SIDX_BLK_LABEL_LOW 和 SIDX_BLK_LABEL_UP 用来存当前 Block 中所加载的点集
// 的区域标签值的上下界。根据这个上下界,可以计算出当前点所在区域的起止
// 点,从而根据这两点确定的直线计算当前点的垂距。
// 从下标为 SIDX_BLK_CORNER_X 开始的其后的所有共享内存空间存储了当前 Block
// 所处理的所有的新点的 X 坐标。
#define SIDX_BLK_LABEL_LOW 0
#define SIDX_BLK_LABEL_UP 1
#define SIDX_BLK_CORNER_X 2
#define SIDX_BLK_FOUND_ACC 2 + blockDim.x
// 共享内存的声明。
extern __shared__ int shdmem[];
// 基准下标。表示当前 Block 第一个 Thread 所处理的下标。
int baseidx = blockIdx.x * blockDim.x;
// 当前 Thread 的全局下标。
int idx = baseidx + threadIdx.x;
// 初始化共享内存中的公共数据,为了防止写入冲突,这里只使用每个 Block 的第
// 一个 Thread 处理初始化工作。
if (threadIdx.x == 0) {
// 读取当前 Block 所处理的所有坐标点中最小的 LABEL 值。
shdmem[SIDX_BLK_LABEL_LOW] = label[baseidx];
// 计算当前 Block 所处理的所有坐标点中最大的 LABEL 值。
if (baseidx + blockDim.x <= cstcnt)
shdmem[SIDX_BLK_LABEL_UP] = label[baseidx + blockDim.x - 1];
else
shdmem[SIDX_BLK_LABEL_UP] = label[cstcnt - 1];
}
// 同步 Block 内的所有 Thread,使得上述初始化对所有的 Thread 都可见。
__syncthreads();
// 从 Shared Memory 中读取当前 Block 所处理的 LABEL 值范围。这一步骤没有实
// 际的逻辑含义,将数据从共享内存搬入寄存器仅仅是为了加快处理速度。
int labellower = shdmem[SIDX_BLK_LABEL_LOW];
int labelupper = shdmem[SIDX_BLK_LABEL_UP];
// 并不是所有的 LABEL 区域都会在该论迭代中发现新点。该值要求非常的大,因为没
// 有发现新凸壳点的区域,相当于所有的坐标点放在左侧。
#define LP_DUMMY_CVXX CH_LARGE_ENOUGH
// 将新点的 X 坐标存储 Shared Memory 提取出,用一个指针来表示,这样的写
// 法是为了代码更加易于理解。
int *newidx = &shdmem[SIDX_BLK_CORNER_X];
int *foundaccShd = &shdmem[SIDX_BLK_FOUND_ACC];
// 在 Shared Memory 中初始化新点(中心点)的 X 坐标。
if (threadIdx.x < labelupper - labellower + 1) {
// 计算新点在新的点集中的下标。
int labelidx = threadIdx.x + labellower;
newidx[threadIdx.x] = foundidx[labelidx];
// 从 Global Memory 中读取新点的累加值。
foundaccShd[threadIdx.x] = foundacc[threadIdx.x + labellower];
}
// 同步 Block 内的所有 Thread,是的上述所有初始化计算对所有 Thread 可见。
__syncthreads();
// 如果当前 Thread 处理的是越界范围,则直接返回不进行任何处理。
if (idx >= cstcnt)
return;
// 读取当前坐标点所对应的 LABEL 值(经过校正的,表示 Shared Memory 中的下
// 标)。
int curlabel = label[idx] - labellower;
// 对于所有垂距大于等于 0,且 x 坐标小于中心点坐标时认为该点在中心点左侧。
if (idx < newidx[curlabel]) {
tmplabel[idx] = label[idx] + foundaccShd[curlabel];
}
else
tmplabel[idx] = label[idx] + foundaccShd[curlabel] + 1;
// 清除函数内部的宏定义,防止同后面的函数造成冲突。
#undef LP_TMPX_DUMMY
#undef SIDX_BLK_LABEL_LOW
#undef SIDX_BLK_LABEL_UP
#undef SIDX_BLK_CORNER_X
#undef SIDX_BLK_FOUND_ACC
}
// Host 成员方法:updateLabel(标记左侧点)
__host__ int DouglasPeucker::updateLabel(
int label[], int cstcnt, int foundidx[], int foundacc[], int tmplabel[])
{
// 检查参数中所有的指针和变量是否为空。
if (label == NULL || foundacc == NULL || foundidx == NULL || tmplabel == NULL)
return NULL_POINTER;
// 检查当前点的数量,小于等于 0 则无效数据。
if (cstcnt <= 0)
return INVALID_DATA;
// 计算 Kernel 函数所需要的 Block 尺寸和数量,以及每个 Block 所使用的
// Shared Memory 的数量。
size_t blocksize = DEF_BLOCK_1D;
size_t gridsize = (cstcnt + blocksize - 1) / blocksize;
size_t sharedsize = (2 + 2 * blocksize) * sizeof (int);
// 调用 Kernel 函数,完成计算。
_updateLabelKer<<<gridsize, blocksize, sharedsize>>>(
label, cstcnt, foundidx, foundacc, tmplabel);
// 判断 Kernel 函数运行是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// 宏:FAIL_CORNER_FREE
// 如果出错,就释放之前申请的内存。
#define FAIL_CORNER_FREE do { \
if (tmpmemDev != NULL) \
cudaFree(tmpmemDev); \
} while (0)
// 成员方法:cornerHullIter(迭代法求凸壳上的点集)
__host__ int DouglasPeucker::douglasIter(
int *inputcst, int *cornercst, float threshold, int count, int *cornerpnt)
{
// 检查输入坐标集,输出坐标集是否为空。
if (inputcst == NULL || cornercst == NULL)
return NULL_POINTER;
// 局部变量
cudaError_t cuerrcode; // CUDA 函数调用返回的错误码
int errcode; // 调用函数返回的错误码
// 定义扫描所用的二元操作符。
add_class<int> add;
int cornercnt = 2; // 当前角点的数量,由于迭代开始时,已经实
// 现找到了点集中的最左和最有两点作为角
// 点,因此这里直接赋值为 2。
int foundcnt; // 当前迭代时找到的新点的数量,这一数量
// 并不包含往次所找到的点。
int *tmpmemDev = NULL; // 存放中间变量的 Device 内存空间。
size_t datacnt = 0; // 所需要的数据元素的数量。
size_t datasize = 0; // 书需要的数据元素的字节尺寸。
// 宏:CHI_DATA_DECLARE(中间变量声明器)
// 为了消除中间变量声明过程中大量的重复代码,这里提供了一个宏,使代码看起来
// 整洁一些。
#define CHI_DATA_DECLARE(dataname, type, count) \
type *dataname##Dev = NULL; \
size_t dataname##cnt = (count); \
datacnt += dataname##cnt; \
datasize += dataname##cnt * sizeof (type)
// 声明各个中间变量的 Device 数组。
CHI_DATA_DECLARE(label, int, // 记录当前迭代中每个像素点所在的
count); // LABEL 区域。
CHI_DATA_DECLARE(maxdistidx, int, // 记录当前迭代中每个坐标点前面的所
count); // 有点中和其在同一个 LABEL 区域的
// 所有点中具有最大垂距的下标。
CHI_DATA_DECLARE(foundflag, int, // 记录当前迭代中各个 LABEL 区域是
count); // 否找到了新点。
CHI_DATA_DECLARE(foundidx, int, // 记录当前迭代中各个 LABEL 区域是
count); // 否找到了新点。
CHI_DATA_DECLARE(foundacc, int, // 记录当前迭代中每个 LABEL 区域其
count + 1); // 前面的所有 LABEL 区域共找到的新
// 点的数量。该值用于计算各个点(无
// 论是旧的还是新的)在新点集中的新下标。
CHI_DATA_DECLARE(startidx, int, // 记录每个 LABEL 区域在坐标点集中
count); // 的起始下标
CHI_DATA_DECLARE(tmplabel, int, // 记录新的标签值
count);
CHI_DATA_DECLARE(tmpcstin, int, // 迭代过程中的临时数组,存放输入点集。
count * 2);
CHI_DATA_DECLARE(tmpcornerin, int, // 迭代过程中的临时数组,存放截止上次迭
count * 2); // 代已经找到的结果点集。
CHI_DATA_DECLARE(tmpcornerout, int, // 迭代过程中的临时数组,存放本次找到的
count * 2); // 结果点集。
CHI_DATA_DECLARE(dist, float, // 存放垂距的数组
count);
CHI_DATA_DECLARE(tmpmaxdist, float, // 存放分段扫描的结果值。
count * 2);
// 消除中间变量声明器这个宏,防止后续步骤的命名冲突。
#undef CHI_DATA_DECLARE
// 中间变量申请 Device 内存空间,并将这些空间分配给各个中间变量。
cuerrcode = cudaMalloc((void **)&tmpmemDev, datasize);
if (cuerrcode != cudaSuccess) {
FAIL_CORNER_FREE;
return CUDA_ERROR;
}
// 为各个中间变量分配内存空间,采用这种一次申请一个大空间的做法是为了减少申
// 请内存的开销,同时也减少因内存对齐导致的内存浪费。
labelDev = tmpmemDev;
maxdistidxDev = labelDev + labelcnt;
foundflagDev = maxdistidxDev + maxdistidxcnt;
foundidxDev = foundflagDev + foundflagcnt;
foundaccDev = foundidxDev + foundidxcnt;
startidxDev = foundaccDev + foundacccnt;
tmplabelDev = startidxDev + startidxcnt;
tmpcstinDev = tmplabelDev + tmplabelcnt;
tmpcornerinDev = tmpcstinDev + tmpcstincnt;
tmpcorneroutDev = tmpcornerinDev + tmpcornerincnt;
distDev = (float *)tmpcorneroutDev + tmpcorneroutcnt;
tmpmaxdistDev = distDev + distcnt;
// 调用 LABEL 初始化函数,完成 LABEL 初始化。初始化后,除最后一个元素为 1
// 外,其余元素皆为 0。
errcode = this->initLabelAry(labelDev, count);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 初始化迭代过程中使用的坐标点集,这里一共需要使用到两个坐标点集,为了不破
// 坏输入坐标点集,这里在迭代过程中我们使用内部申请的坐标点集。
// 一个临时数组,存放的是曲线的首尾坐标,用来初始化结果点集。
int temp[4]= {inputcst[0], inputcst[1],
inputcst[2 * (count - 1)],
inputcst[2 * (count - 1) + 1]};
// 为 tmpcstinDev 赋初值。
cudaMemcpy(tmpcstinDev, inputcst, count * sizeof(int) * 2,
cudaMemcpyHostToDevice);
// 初始化结果点集。
cuerrcode = cudaMemcpy(tmpcornerinDev, temp,
sizeof (int) * 4, cudaMemcpyHostToDevice);
if (cuerrcode != cudaSuccess) {
FAIL_CORNER_FREE;
return CUDA_ERROR;
}
// 所有的初始化过程至此全部完毕,开始进行迭代。每次迭代都需要重新计算坐标点
// 在其 LABEL 区域内的垂距,然后根据垂距信息判断每个 LABEL 区域内是否存在新
// 的凸壳点(如果有需要确定是哪一个点),之后根据这个新发现的角点点,计算所
// 有坐标点在下一轮迭代中的下标。迭代的过程知道无法在从当前所有的 LABEL 区域
// 内找到新的点为止。
while (count >= cornercnt) {
// 调用更新垂距函数。更新点集中每个点的垂距值。
errcode = this->updateDist(tmpcstinDev, tmpcornerinDev, labelDev, count,
distDev);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 利用分段扫描得到各个 LABEL 区域的最大垂距,记忆最大垂距坐标点的下标
// 值
errcode = this->segScan.segmentedScan(
distDev, labelDev, tmpmaxdistDev, maxdistidxDev, count, false);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 根据所求出来的垂距信息判断各个 LABEL 区域是否有新的点存在。
errcode = this->updateFoundInfo(
labelDev, distDev, maxdistidxDev,
count, foundflagDev, startidxDev, threshold, foundidxDev);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 通过扫描,计算出 LABEL 区域新发现点标记值对应的累加值。
errcode = this->aryScan.scanArrayExclusive(foundflagDev, foundaccDev,
cornercnt, add,
false, false, false);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 将新点标记累加值的最后一个拷贝到 Host 内存中,这个累加值的含义是
// 当前迭代下所有新发现点的数量。
cuerrcode = cudaMemcpy(&foundcnt, &foundaccDev[cornercnt],
sizeof (int), cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess) {
FAIL_CORNER_FREE;
return errcode;
}
// 如果新发现点的数量小于等于 0,则说明所有的角点都已经被找到,
// 没有必要在继续做下去了,因此退出迭代。
if (foundcnt <= 0)
break;
// 更新点集
errcode = this->updateCornerCst(
tmpcstinDev, tmpcornerinDev, foundflagDev, foundaccDev, startidxDev,
maxdistidxDev, cornercnt, tmpcorneroutDev);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 更新角点点集中点的数量。
cornercnt += foundcnt;
*cornerpnt = cornercnt;
// 标记左侧点。所谓左侧点是在某 LABEL 区域内处于新发现的点左侧的点。
errcode = this->updateLabel(labelDev, count, foundidxDev,
foundaccDev, tmplabelDev);
if (errcode != NO_ERROR) {
FAIL_CORNER_FREE;
return errcode;
}
// 交还部分中间变量,将本轮迭代得到的结果给到下一轮迭代的参数。
labelDev = tmplabelDev;
int *cstswptmp = tmpcornerinDev;
tmpcornerinDev = tmpcorneroutDev;
tmpcorneroutDev = cstswptmp;
// 一轮迭代到此结束。
}
// 将结果点集拷贝到 cornercst中
cuerrcode = cudaMemcpy(
cornercst, tmpcornerinDev, cornercnt * sizeof(int) * 2,
cudaMemcpyDeviceToHost);
// 释放内存
cudaFree(tmpmemDev);
// 操作完毕,退出。
return NO_ERROR;
}
#undef FAIL_CORNER_FREE
// Host 成员方法:douglasPeucker(道格拉斯算法简化曲线)
__host__ int DouglasPeucker::douglasPeucker(
Curve *incur, Curve *outcur)
{
// 检查指针性参数是否为 NULL。
if (incur == NULL || outcur == NULL)
return NULL_POINTER;
// 局部变量,错误码。
int errcode;
int point = 0;
// 调用凸壳迭代,求输入点集的下半凸壳。
errcode = this->douglasIter(incur->crvData, outcur->crvData, this->threshold,
incur->curveLength, &point);
outcur->curveLength = point;
if (errcode != NO_ERROR)
return errcode;
// 处理完毕,退出
return NO_ERROR;
}
|
the_stack
|
// gvdbBrickFunc ( gvdb, channel, nodeid, t, pos, dir, hit, norm, clr )
typedef void(*gvdbBrickFunc_t)( VDBInfo*, uchar, int, float3, float3, float3, float3&, float3&, float4& );
static const int MAXLEV = 5;
static const int MAX_ITER = 256;
// Gets the value of a given floating-point channel at a point inside a brick.
// `gvdb` is the volume's `VDBInfo` object.
// `chan` is the channel to sample.
// `p` is the location of the point within the brick.
// `offs` is the coordinate of the minimum corner of the brick in the atlas.
// TODO: Turn `offs` into an integer vector.
inline __device__ float getTricubic ( VDBInfo* gvdb, uchar chan, float3 p, float3 offs )
{
static const float MID = 1.0;
static const float HI = 2.0;
// find bottom-left corner of local 3x3x3 group
float3 q = floor3(p + offs) - MID; // move to bottom-left corner
// evaluate tri-cubic
float3 tb = frac3(p) * 0.5 + 0.25;
float3 ta = (1.0-tb);
float3 ta2 = ta*ta;
float3 tb2 = tb*tb;
float3 tab = ta*tb*2.0;
// lookup 3x3x3 local neighborhood
float tv[9];
tv[0] = tex3D<float>( gvdb->volIn[chan], q.x, q.y, q.z );
tv[1] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y, q.z );
tv[2] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y, q.z );
tv[3] = tex3D<float>( gvdb->volIn[chan] , q.x, q.y+MID, q.z );
tv[4] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+MID, q.z );
tv[5] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+MID, q.z );
tv[6] = tex3D<float>( gvdb->volIn[chan], q.x, q.y+HI, q.z );
tv[7] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+HI, q.z );
tv[8] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+HI, q.z );
float3 abc = make_float3 ( tv[0]*ta2.x + tv[1]*tab.x + tv[2]*tb2.x,
tv[3]*ta2.x + tv[4]*tab.x + tv[5]*tb2.x,
tv[6]*ta2.x + tv[7]*tab.x + tv[8]*tb2.x );
tv[0] = tex3D<float>( gvdb->volIn[chan], q.x, q.y, q.z+MID );
tv[1] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y, q.z+MID );
tv[2] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y, q.z+MID );
tv[3] = tex3D<float>( gvdb->volIn[chan], q.x, q.y+MID, q.z+MID );
tv[4] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+MID, q.z+MID );
tv[5] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+MID, q.z+MID );
tv[6] = tex3D<float>( gvdb->volIn[chan], q.x, q.y+HI, q.z+MID );
tv[7] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+HI, q.z+MID );
tv[8] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+HI, q.z+MID );
float3 def = make_float3 ( tv[0]*ta2.x + tv[1]*tab.x + tv[2]*tb2.x,
tv[3]*ta2.x + tv[4]*tab.x + tv[5]*tb2.x,
tv[6]*ta2.x + tv[7]*tab.x + tv[8]*tb2.x );
tv[0] = tex3D<float>( gvdb->volIn[chan], q.x, q.y, q.z+HI );
tv[1] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y, q.z+HI );
tv[2] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y, q.z+HI );
tv[3] = tex3D<float>( gvdb->volIn[chan], q.x, q.y+MID, q.z+HI );
tv[4] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+MID, q.z+HI );
tv[5] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+MID, q.z+HI );
tv[6] = tex3D<float>( gvdb->volIn[chan], q.x, q.y+HI, q.z+HI );
tv[7] = tex3D<float>( gvdb->volIn[chan], q.x+MID, q.y+HI, q.z+HI );
tv[8] = tex3D<float>( gvdb->volIn[chan], q.x+HI, q.y+HI, q.z+HI );
float3 ghi = make_float3 ( tv[0]*ta2.x + tv[1]*tab.x + tv[2]*tb2.x,
tv[3]*ta2.x + tv[4]*tab.x + tv[5]*tb2.x,
tv[6]*ta2.x + tv[7]*tab.x + tv[8]*tb2.x );
float3 jkl = make_float3 ( abc.x*ta2.y + abc.y*tab.y + abc.z*tb2.y,
def.x*ta2.y + def.y*tab.y + def.z*tb2.y,
ghi.x*ta2.y + ghi.y*tab.y + ghi.z*tb2.y );
return jkl.x*ta2.z + jkl.y*tab.z + jkl.z*tb2.z;
}
// Gets the value of a given floating-point channel at a point inside a brick.
// `gvdb` is the volume's `VDBInfo` object.
// `chan` is the channel to sample.
// `wp` is the point to sample in index-space (not atlas-space!)
// `offs` is the minimum vertex of the brick's bounding box in atlas space.
// `vmin` is the minimum vertex of the brick's bounding box in index-space.
inline __device__ float getTrilinear (VDBInfo* gvdb, uchar chan, float3 wp, float3 offs, float3 vmin)
{
float3 p = offs + (wp-vmin); // sample point in index coords
return tex3D<float> ( gvdb->volIn[chan], p.x, p.y, p.z );
}
#ifdef CUDA_PATHWAY
inline __device__ unsigned char getVolSampleC ( VDBInfo* gvdb, uchar chan, float3 wpos )
{
float3 offs, vmin; uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); // find vdb node at point
if ( node == 0x0 ) return 0;
float3 p = offs + (wpos-vmin);
return tex3D<uchar> ( gvdb->volIn[chan], p.x, p.y, p.z );
}
inline __device__ float getVolSampleF ( VDBInfo* gvdb, uchar chan, float3 wpos )
{
float3 offs, vmin; uint64 nid;
VDBNode* node = getNodeAtPoint ( gvdb, wpos, &offs, &vmin, &nid ); // find vdb node at point
if ( node == 0x0 ) return 0;
float3 p = offs + (wpos-vmin);
return tex3D<float> ( gvdb->volIn[chan], p.x, p.y, p.z );
}
#endif
// Gets the negative of the gradient of the floating-point channel with index `chan` at the atlas-space position `p`
// using default filtering.
// This will point away from higher-density regions in a density field, and into a level set/signed distance field.
inline __device__ float3 getGradient ( VDBInfo* gvdb, uchar chan, float3 p )
{
float3 g;
// note: must use +/- 0.5 since apron may only be 1 voxel wide (cannot go beyond brick)
g.x = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x-.5, p.y, p.z ) - tex3D<float>( gvdb->volIn[chan], p.x+.5, p.y, p.z ));
g.y = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x, p.y-.5, p.z ) - tex3D<float>( gvdb->volIn[chan], p.x, p.y+.5, p.z ));
g.z = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x, p.y, p.z-.5) - tex3D<float>( gvdb->volIn[chan], p.x, p.y, p.z+.5 ));
g = normalize ( g );
return g;
}
// Gets the gradient of the floating-point channel with index `chan` at the atlas-space position `p` using
// default filtering.
// This will approximate the normal of a level set/signed distance field, and point away from higher-density regions
// in a density field.
inline __device__ float3 getGradientLevelSet ( VDBInfo* gvdb, uchar chan, float3 p )
{
// tri-linear filtered gradient
// (assumes atlas has linear hardware filtering on)
float3 g;
g.x = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x+.5, p.y, p.z ) - tex3D<float>( gvdb->volIn[chan], p.x-.5, p.y, p.z ));
g.y = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x, p.y+.5, p.z ) - tex3D<float>( gvdb->volIn[chan], p.x, p.y-.5, p.z ));
g.z = 1.0* (tex3D<float>( gvdb->volIn[chan], p.x, p.y, p.z+.5) - tex3D<float>( gvdb->volIn[chan], p.x, p.y, p.z-.5 ));
g = normalize ( g );
return g;
}
// Gets the negative of the gradient of the floating-point channel with index `chan` at the atlas-space position `p`
// using tricubic interpolation.
inline __device__ float3 getGradientTricubic ( VDBInfo* gvdb, uchar chan, float3 p, float3 offs )
{
// tri-cubic filtered gradient
const float vs = 0.5;
float3 g;
g.x = (getTricubic (gvdb, chan, p+make_float3(-vs,0,0), offs) - getTricubic (gvdb, chan, p+make_float3(vs,0,0), offs))/(2*vs);
g.y = (getTricubic (gvdb, chan, p+make_float3(0,-vs,0), offs) - getTricubic (gvdb, chan, p+make_float3(0,vs,0), offs))/(2*vs);
g.z = (getTricubic (gvdb, chan, p+make_float3(0,0,-vs), offs) - getTricubic (gvdb, chan, p+make_float3(0,0,vs), offs))/(2*vs);
g = normalize ( g );
return g;
}
// Marches along the ray p + o + rdir*t in atlas space in steps of SCN_FINESTEP, using default interpolation.
// If it samples a point less than SCN_THRESH, halts and returns:
// `p`: The atlas-space coordinate of the intersection relative to `o`
// returned value: The index-space coordinate of the intersection
// Otherwise, returns (NOHIT, NOHIT, NOHIT).
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to sample
// `p`: The origin of the ray in atlas-space relative to `o`
// `o`: The minimum AABB vertex of the brick to start sampling from
// `rpos`: Unused
// `rdir`: The direction of the ray in atlas-space
// `vmin`: The minimum AABB vertex of the brick in index-space
__device__ float3 rayLevelSet ( VDBInfo* gvdb, uchar chan, float3& p, float3 o, float3 rpos, float3 rdir, float3 vmin )
{
float dt = SCN_FINESTEP;
float3 pt = dt*rdir;
for ( int i=0; i < 512; i++ ) {
if ( tex3D<float>( gvdb->volIn[chan], p.x+o.x, p.y+o.y, p.z+o.z ) < SCN_THRESH ) // trilinear test
return p + vmin;
p += pt;
}
return make_float3(NOHIT, NOHIT, NOHIT);
}
// Samples channel `chan` at atlas-space position `p`, returning a `uchar4` color.
inline __device__ uchar4 getColor ( VDBInfo* gvdb, uchar chan, float3 p )
{
return tex3D<uchar4> ( gvdb->volIn[chan], (int) p.x, (int) p.y, (int) p.z );
}
// Samples channel `chan` at atlas-space position `p`, obtaining a `uchar4` color and casting it to a `float4`.
inline __device__ float4 getColorF ( VDBInfo* gvdb, uchar chan, float3 p )
{
return make_float4 (tex3D<uchar4> ( gvdb->volIn[chan], (int) p.x, (int) p.y, (int) p.z ) );
}
//----------- RAY CASTING
// Traces a brick, rendering voxels as cubes.
// To find a surface intersection, this steps through each voxel once using DDA and stops when it finds a voxel with
// value greater than or equal to SCN_THRESH.
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: If hit.z == NOHIT, no intersection; otherwise, the coordinates of the intersection
// `norm`: The normal at the intersection
// `hclr`: The color of the color channel at the intersection point.
__device__ void raySurfaceVoxelBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& hclr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
float3 o = make_float3( node->mValue ) ; // Atlas sub-volume to trace
HDDAState dda;
dda.SetFromRay(pos, dir, t);
dda.PrepareLeaf(vmin);
for (int iter=0; iter < MAX_ITER
&& dda.p.x >=0 && dda.p.y >=0 && dda.p.z >=0
&& dda.p.x < gvdb->res[0] && dda.p.y < gvdb->res[0] && dda.p.z < gvdb->res[0]; iter++)
{
if ( tex3D<float> ( gvdb->volIn[chan], dda.p.x+o.x+.5, dda.p.y+o.y+.5, dda.p.z+o.z+.5 ) > SCN_THRESH) { // test texture atlas
vmin += make_float3(dda.p); // voxel location in world
dda.t = rayBoxIntersect ( pos, dir, vmin, vmin + 1 );
if (dda.t.z == NOHIT) {
hit.z = NOHIT;
continue;
}
hit = getRayPoint ( pos, dir, dda.t.x );
// Compute the normal of the voxel [vmin, vmin+gvdb->voxelsize] at the hit point
// Note: This is not normalized when the ray hits an edge of the voxel exactly
float3 fromVoxelCenter = (hit - vmin) - 0.5f; // in [-1/2, 1/2]
fromVoxelCenter -= 0.01 * dir; // Bias the sample point slightly towards the camera
const float maxCoordinate = fmaxf(fmaxf(fabsf(fromVoxelCenter.x), fabsf(fromVoxelCenter.y)), fabsf(fromVoxelCenter.z));
norm.x = (fabsf(fromVoxelCenter.x) == maxCoordinate ? copysignf(1.0f, fromVoxelCenter.x) : 0.0f);
norm.y = (fabsf(fromVoxelCenter.y) == maxCoordinate ? copysignf(1.0f, fromVoxelCenter.y) : 0.0f);
norm.z = (fabsf(fromVoxelCenter.z) == maxCoordinate ? copysignf(1.0f, fromVoxelCenter.z) : 0.0f);
if ( gvdb->clr_chan != CHAN_UNDEF ) hclr = getColorF ( gvdb, gvdb->clr_chan, make_float3(dda.p)+o );
return;
}
dda.Next();
dda.Step();
}
}
// Traces a brick, rendering the surface with trilinear interpolation.
// To find an intersection, this samples using increments of `SCN_PSTEP` in `t` and stops when it finds a point greater
// than or equal to SCN_THRESH.
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: If hit.z == NOHIT, no intersection; otherwise, the coordinates of the intersection
// `norm`: The normal at the intersection
// `hclr`: The color of the color channel at the intersection point.
__device__ void raySurfaceTrilinearBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& hclr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
float3 o = make_float3( node->mValue ) ; // Atlas sub-volume to trace
t.x = SCN_DIRECTSTEP * ceilf(t.x / SCN_DIRECTSTEP); // Start on sampling wavefront (avoids subvoxel banding artifacts)
float3 p = pos + t.x*dir - vmin; // sample point in index coords
for (int iter=0; iter < MAX_ITER && p.x >=0 && p.y >=0 && p.z >=0 && p.x < gvdb->res[0] && p.y < gvdb->res[0] && p.z < gvdb->res[0]; iter++)
{
if (tex3D<float>(gvdb->volIn[chan], p.x+o.x, p.y+o.y, p.z+o.z ) >= SCN_THRESH ) {
hit = p + vmin;
norm = getGradient ( gvdb, chan, p+o );
if ( gvdb->clr_chan != CHAN_UNDEF ) hclr = getColorF ( gvdb, gvdb->clr_chan, p+o );
return;
}
p += SCN_DIRECTSTEP*dir;
t.x += SCN_DIRECTSTEP;
}
}
// Traces a brick, rendering the surface with tricubic interpolation.
// To find an intersection, this samples using increments of `SCN_PSTEP` in `t` and stops when it finds a point greater
// than or equal to SCN_THRESH.
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: If hit.z == NOHIT, no intersection; otherwise, the coordinates of the intersection
// `norm`: The normal at the intersection
// `hclr`: The color of the color channel at the intersection point.
__device__ void raySurfaceTricubicBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& hclr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
float3 o = make_float3( node->mValue ) ; // Atlas sub-volume to trace
float3 p = pos + t.x*dir - vmin; // sample point in index coords
float3 v;
for (int iter=0; iter < MAX_ITER && p.x >=0 && p.y >=0 && p.z >=0 && p.x < gvdb->res[0] && p.y < gvdb->res[0] && p.z < gvdb->res[0]; iter++)
{
v.z = getTricubic ( gvdb, chan, p, o );
if ( v.z >= SCN_THRESH) {
v.x = getTricubic ( gvdb, chan, p - SCN_FINESTEP*dir, o );
v.y = (v.z - SCN_THRESH)/(v.z-v.x);
p += -v.y*SCN_FINESTEP*dir;
hit = p + vmin;
norm = getGradientTricubic ( gvdb, chan, p, o );
if ( gvdb->clr_chan != CHAN_UNDEF ) hclr = getColorF ( gvdb, gvdb->clr_chan, p+o );
return;
}
p += SCN_DIRECTSTEP*dir;
t.x += SCN_DIRECTSTEP;
}
}
// Looks up the stored NDC value from a depth buffer (i.e. after perspective projection), and inverts this
// to get the world-space depth of the stored fragment.
inline __device__ float getLinearDepth(float* depthBufFloat)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; // Pixel coordinates
int y = blockIdx.y * blockDim.y + threadIdx.y;
float z = depthBufFloat[(SCN_HEIGHT - 1 - y) * SCN_WIDTH + x]; // Get depth value
float n = scn.camnear;
float f = scn.camfar;
return (-n * f / (f - n)) / (z - (f / (f - n))); // Return linear depth
}
// Get the value of t at which the ray starting at the camera position with direction `dir` intersects the depth buffer
// at the current thread. INFINITY if there is no depth buffer.
inline __device__ float getRayDepthBufferMax(const float3& rayDir) {
if (SCN_DBUF != 0x0) {
// Solve
// t * (length of rayDir in app space) == getLinearDepth(SCN_DBUF)
// for t, where (length of rayDir in app space) is length((SCN_XFORM * float4(rayDir, 0)).xyz):
float3 rayInWorldSpace;
rayInWorldSpace.x = rayDir.x * SCN_XFORM[0] + rayDir.y * SCN_XFORM[4] + rayDir.z * SCN_XFORM[ 8];
rayInWorldSpace.y = rayDir.x * SCN_XFORM[1] + rayDir.y * SCN_XFORM[5] + rayDir.z * SCN_XFORM[ 9];
rayInWorldSpace.z = rayDir.x * SCN_XFORM[2] + rayDir.y * SCN_XFORM[6] + rayDir.z * SCN_XFORM[10];
return getLinearDepth(SCN_DBUF) / length(rayInWorldSpace);
}
else {
return INFINITY;
}
}
#define EPSTEST(a,b,c) (a>b-c && a<b+c)
#define VOXEL_EPS 0.0001
// Traces a brick, rendering the level set surface with default interpolation.
// To find an intersection, this samples using increments of `SCN_PSTEP` in `t` and stops when it finds a point less
// than SCN_THRESH.
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: If hit.z == NOHIT, no intersection; otherwise, the coordinates of the intersection
// `norm`: The normal at the intersection
// `hclr`: The color of the color channel at the intersection point
__device__ void rayLevelSetBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& hclr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
float3 o = make_float3( node->mValue ) ; // Atlas sub-volume to trace
float3 p = pos + t.x*dir - vmin; // sample point in index coords
t.x = SCN_DIRECTSTEP * ceilf( t.x / SCN_DIRECTSTEP );
for (int iter=0; iter < MAX_ITER && p.x >=0 && p.y >=0 && p.z >=0 && p.x <= gvdb->res[0] && p.y <= gvdb->res[0] && p.z <= gvdb->res[0]; iter++) {
if (tex3D<float>(gvdb->volIn[chan], p.x+o.x, p.y+o.y, p.z+o.z ) < SCN_THRESH ) { // test atlas for zero crossing
hit = rayLevelSet ( gvdb, chan, p, o, pos, dir, vmin );
if ( hit.z != NOHIT ) {
norm = getGradientLevelSet ( gvdb, chan, p+o );
if (gvdb->clr_chan != CHAN_UNDEF) hclr = getColorF(gvdb, gvdb->clr_chan, p + o);
return;
}
}
p += SCN_DIRECTSTEP*dir;
t.x += SCN_DIRECTSTEP;
}
}
// Empty intersector that simply reports a hit at the current position.
// Inputs:
// `gvdb`: Unused
// `chan`: Unused
// `nodeid`: Unused
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: The coordinates of the intersection
// `norm`: Unused
// `hclr`: Unused
__device__ void rayEmptySkipBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& clr )
{
hit = pos + t.x * dir; // Return brick hit
}
// Returns deep shadow accumulation along a ray. Each sample's value is mapped to a density value using the transfer
// function. This sample density is then treated as an opaque layer with opacity
// exp( SCN_EXTINCT * density * SCN_SHADOWSTEP / (1.0 + t.x * 0.4) )
// where t.x in the above equation increments in steps of SCN_SHADOWSTEP, while the parameter of the ray increments in steps
// of SCN_DIRECTSTEP.
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit`: Unused
// `norm`: Unused
// `clr`: Accumulated color and transparency along the ray.
__device__ void rayShadowBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& clr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
t.x += gvdb->epsilon; // make sure we start inside
t.y -= gvdb->epsilon; // make sure we end insidoke
float3 o = make_float3( node->mValue ); // atlas sub-volume to trace
float3 p = pos + t.x*dir - vmin; // sample point in index coords
float3 pt = SCN_DIRECTSTEP * dir; // index increment
float val = 0;
// accumulate remaining voxels
for (; clr.w < 1 && p.x >=0 && p.y >=0 && p.z >=0 && p.x < gvdb->res[0] && p.y < gvdb->res[0] && p.z < gvdb->res[0];) {
val = exp ( SCN_EXTINCT * transfer( gvdb, tex3D<float> ( gvdb->volIn[chan], p.x+o.x, p.y+o.y, p.z+o.z )).w * SCN_SHADOWSTEP/(1.0 + t.x * 0.4) ); // 0.4 = shadow gain
clr.w = 1.0 - (1.0-clr.w) * val;
p += pt;
t.x += SCN_SHADOWSTEP;
}
}
// DeepBrick - Sample into brick for deep volume raytracing
// Accumulates colors in a volume. Handles depth buffer intersections.
// This samples in increments of `SCN_PSTEP` in `t`.
// Each sample's value is mapped to a density value using the transfer function. This sample density is then treated as
// an opaque layer with opacity
// exp(SCN_EXTINCT * val.w * SCN_DIRECTSTEP).
// Inputs:
// `gvdb`: The volume's `VDBInfo` object
// `chan`: The channel to render
// `nodeid`: The index of the node at level 0
// `t`: The current parameter of the ray
// `pos`: The origin of the ray
// `dir`: The direction of the ray
// Outputs:
// `hit.x`: Front brick intersection point, equal to t.x
// `hit.y`: Back intersection of brick (if hit.z = 0) or distance between ray origin and depth buffer intersection
// (if hit.z = 1)
// `hit.z`: 0 if ray passed through entire brick, 1 if intersected with depth buffer.
// `norm`: Unused
// `clr`: Accumulated color and transparency along the ray.
__device__ void rayDeepBrick ( VDBInfo* gvdb, uchar chan, int nodeid, float3 t, float3 pos, float3 dir, float3& hit, float3& norm, float4& clr )
{
float3 vmin;
VDBNode* node = getNode ( gvdb, 0, nodeid, &vmin ); // Get the VDB leaf node
t.x = SCN_DIRECTSTEP * ceilf( t.x / SCN_DIRECTSTEP ); // Start on sampling wavefront (avoids subvoxel banding artifacts)
float3 o = make_float3( node->mValue ); // Atlas sub-volume to trace
float3 wp = pos + t.x*dir; // Sample position in index space
float3 p = wp - vmin; // Sample point in sub-volume (in units of voxels)
const float3 wpt = SCN_DIRECTSTEP*dir; // Increment in units of voxels
const float dt = length(wpt); // Change in t-value per step
const float tDepthIntersection = getRayDepthBufferMax(dir); // The t.x at which the ray intersects the depth buffer
// Record front hit point at first significant voxel
if (hit.x == 0) hit.x = t.x; // length(wp - pos);
// Accumulate remaining voxels
for (int iter = 0; clr.w > SCN_ALPHACUT && iter < MAX_ITER && p.x >=0 && p.y >=0 && p.z >=0 && p.x < gvdb->res[0] && p.y < gvdb->res[0] && p.z < gvdb->res[0]; iter++) {
// Test to see if we've intersected the depth buffer (if there is no depth buffer, then this will never happen):
if (t.x > tDepthIntersection) {
hit.y = length(wp - pos);
hit.z = 1;
clr = make_float4(fmin(clr.x, 1.f), fmin(clr.y, 1.f), fmin(clr.z, 1.f), fmax(clr.w, 0.f));
return;
}
// Get the value of the volume at this point. Only consider it if it's greater than SCN_MINVAL.
const float rawSample = tex3D<float>(gvdb->volIn[chan], p.x + o.x, p.y + o.y, p.z + o.z);
if (rawSample >= SCN_MINVAL) {
// Apply transfer function; integrate val.w to get transmittance according to the Beer-Lambert law:
float4 val = transfer(gvdb, rawSample);
val.w = exp(SCN_EXTINCT * val.w * SCN_DIRECTSTEP);
// RGB color from color channel (alpha component is unused):
const float4 hclr = (gvdb->clr_chan == CHAN_UNDEF) ? make_float4(1, 1, 1, 1) : getColorF(gvdb, gvdb->clr_chan, p + o);
clr.x += val.x * clr.w * (1 - val.w) * SCN_ALBEDO * hclr.x;
clr.y += val.y * clr.w * (1 - val.w) * SCN_ALBEDO * hclr.y;
clr.z += val.z * clr.w * (1 - val.w) * SCN_ALBEDO * hclr.z;
clr.w *= val.w;
}
// Step forwards.
p += wpt;
wp += wpt;
t.x += dt;
}
hit.y = t.x; // length(wp - pos);
clr = make_float4(fmin(clr.x, 1.f), fmin(clr.y, 1.f), fmin(clr.z, 1.f), fmax(clr.w, 0.f));
}
//----------------------------- MASTER RAYCAST FUNCTION
// 1. Performs empty skipping of GVDB hiearchy
// 2. Checks input depth buffer [if set]
// 3. Calls the specified 'brickFunc' when a brick is hit, for custom behavior
// 4. Returns a color and/or surface hit and normal
//
__device__ void rayCast ( VDBInfo* gvdb, uchar chan, float3 pos, float3 dir, float3& hit, float3& norm, float4& clr, gvdbBrickFunc_t brickFunc )
{
int nodeid[MAXLEV]; // level variables
float tMax[MAXLEV];
int b;
// GVDB - Iterative Hierarchical 3DDA on GPU
float3 vmin;
int lev = gvdb->top_lev;
nodeid[lev] = 0; // rootid ndx
float3 tStart = rayBoxIntersect ( pos, dir, gvdb->bmin, gvdb->bmax ); // intersect ray with bounding box
VDBNode* node = getNode ( gvdb, lev, nodeid[lev], &vmin ); // get root VDB node
if ( tStart.z == NOHIT ) return;
// 3DDA variables
tStart.x += gvdb->epsilon;
tMax[lev] = tStart.y -gvdb->epsilon;
int iter;
HDDAState dda;
dda.SetFromRay(pos, dir, tStart);
dda.Prepare(vmin, gvdb->vdel[lev]);
const float tDepthIntersection = getRayDepthBufferMax(dir); // The t.x at which the ray intersects the depth buffer
for (iter=0; iter < MAX_ITER && lev > 0 && lev <= gvdb->top_lev && dda.p.x >=0 && dda.p.y >=0 && dda.p.z >=0 && dda.p.x <= gvdb->res[lev] && dda.p.y <= gvdb->res[lev] && dda.p.z <= gvdb->res[lev]; iter++ ) {
dda.Next();
// Test to see if we've intersected the depth buffer (if there is no depth buffer, then this will never happen):
if (dda.t.x > tDepthIntersection) {
hit.z = 0;
return;
}
// node active test
b = (((int(dda.p.z) << gvdb->dim[lev]) + int(dda.p.y)) << gvdb->dim[lev]) + int(dda.p.x); // bitmaskpos
if ( isBitOn ( gvdb, node, b ) ) { // check vdb bitmask for voxel occupancy
if ( lev == 1 ) { // enter brick function..
nodeid[0] = getChild ( gvdb, node, b );
dda.t.x += gvdb->epsilon;
(*brickFunc) (gvdb, chan, nodeid[0], dda.t, pos, dir, hit, norm, clr);
if ( clr.w <= 0) {
clr.w = 0;
return;
} // deep termination
if (hit.z != NOHIT) return; // surface termination
dda.Step();
} else {
lev--; // step down tree
nodeid[lev] = getChild ( gvdb, node, b ); // get child
node = getNode ( gvdb, lev, nodeid[lev], &vmin );// child node
dda.t.x += gvdb->epsilon; // make sure we start inside child
tMax[lev] = dda.t.y -gvdb->epsilon; // t.x = entry point, t.y = exit point
dda.Prepare(vmin, gvdb->vdel[lev]); // start dda at next level down
}
} else {
// empty voxel, step DDA
dda.Step();
}
while ( dda.t.x > tMax[lev] && lev <= gvdb->top_lev ) {
lev++; // step up tree
if ( lev <= gvdb->top_lev ) {
node = getNode ( gvdb, lev, nodeid[lev], &vmin );
dda.Prepare(vmin, gvdb->vdel[lev]); // restore dda at next level up
}
}
}
}
|
the_stack
|
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "saiga/core/time/all.h"
#include "saiga/core/util/Thread/omp.h"
#include "saiga/core/util/assert.h"
#include "saiga/cuda/cudaHelper.h"
#ifdef SAIGA_VISION
#include "ORBExtractorGPU.h"
#include <thread>
#include <vector>
#if defined(SAIGA_USE_CUDA_TOOLKIT) && !defined(_WIN32)
const int PATCH_SIZE = 31;
namespace Saiga
{
ORBExtractorGPU::Level::Level() {}
ORBExtractorGPU::Level::~Level() {}
void ORBExtractorGPU::Level::Reserve(int initial_N, int final_N)
{
descriptors.resize(final_N);
h_descriptors.resize(final_N);
keypoints.resize(initial_N);
h_keypoints.resize(initial_N);
}
void ORBExtractorGPU::Level::filter()
{
SAIGA_ASSERT(image.rows == image_gauss.rows);
SAIGA_ASSERT(image.cols == image_gauss.cols);
Saiga::NPPI::GaussFilter(image.getConstImageView(), image_gauss.getImageView(), context);
}
ORBExtractorGPU::ORBExtractorGPU(int _nfeatures, float _scaleFactor, int _nlevels, int _iniThFAST, int _minThFAST)
: nlevels(_nlevels), iniThFAST(_iniThFAST), minThFAST(_minThFAST)
{
pyramid = Saiga::ScalePyramid(_nlevels, _scaleFactor, _nfeatures);
download_stream.setName("download");
orb_stream.setName("orb");
descriptor_stream.setName("descritpor");
}
ORBExtractorGPU::~ORBExtractorGPU() {}
int ORBExtractorGPU::Detect(Saiga::ImageView<unsigned char> image, std::vector<Saiga::KeyPoint<float>>& keypoints,
std::vector<Saiga::DescriptorORB>& descriptors)
{
// SAIGA_BLOCK_TIMER();
SAIGA_ASSERT(!image.empty());
SAIGA_ASSERT(image.pitchBytes % 4 == 0);
image.cols = Saiga::iAlignDown(image.cols, 4);
ComputePyramid(image);
int total_kps = 0;
for (auto& level : levels)
{
total_kps += level->N;
}
keypoints.resize(total_kps);
descriptors.resize(total_kps);
ComputeKeypoints(keypoints, descriptors);
return total_kps;
}
void ORBExtractorGPU::DownloadAndDistribute(int level)
{
auto& level_data = levels[level];
{
SAIGA_ASSERT(level_data->N <= level_data->h_keypoints.size());
auto h_keypoints = Saiga::ArrayView<Saiga::KeyPoint<float>>(level_data->h_keypoints).head(level_data->N);
auto level_keypoints = level_data->dis.Distribute(
h_keypoints, Saiga::vec2(level_data->fast_min_x, level_data->fast_min_y),
Saiga::vec2(level_data->fast_max_x, level_data->fast_max_y), pyramid.Features(level));
SAIGA_ASSERT(level_keypoints.size() <= h_keypoints.size());
level_data->N = level_keypoints.size();
h_keypoints = h_keypoints.head(level_data->N);
for (int i = 0; i < level_data->N; ++i)
{
h_keypoints[i] = level_keypoints[i];
}
}
{
const int N = level_data->N;
SAIGA_ASSERT(level_data->h_keypoints.size() >= N);
SAIGA_ASSERT(level_data->keypoints.size() >= N);
SAIGA_ASSERT(level_data->h_descriptors.size() >= N);
SAIGA_ASSERT(level_data->descriptors.size() >= N);
auto h_keypoints = Saiga::ArrayView<Saiga::KeyPoint<float>>(level_data->h_keypoints).head(N);
auto d_keypoints = Saiga::ArrayView<Saiga::KeyPoint<float>>(level_data->keypoints).head(N);
auto h_descriptors = Saiga::ArrayView<Saiga::DescriptorORB>(level_data->h_descriptors).head(N);
auto d_descriptors = Saiga::ArrayView<Saiga::DescriptorORB>(level_data->descriptors).head(N);
auto& stream = level_data->stream;
CHECK_CUDA_ERROR(cudaMemcpyAsync(d_keypoints.data(), h_keypoints.data(), sizeof(Saiga::KeyPoint<float>) * N,
cudaMemcpyHostToDevice, stream));
{
orb.ComputeAngles(level_data->image_obj, level_data->image.getImageView(), d_keypoints, level_data->fast_min_x,
level_data->fast_min_y, level, pyramid.Scale(level) * PATCH_SIZE, stream);
}
{
# ifdef SAIGA_NPPI_HAS_STREAM_CONTEXT
# else
level_data->gauss_ready.wait(stream);
# endif
orb.ComputeDescriptors(level_data->image_gauss_obj, level_data->image_gauss.getImageView(), d_keypoints,
d_descriptors, stream);
}
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoints.data(), d_keypoints.data(), sizeof(Saiga::KeyPoint<float>) * N,
cudaMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_descriptors.data(), d_descriptors.data(), sizeof(Saiga::DescriptorORB) * N,
cudaMemcpyDeviceToHost, stream));
}
}
void ORBExtractorGPU::ComputeKeypoints(std::vector<Saiga::KeyPoint<float>>& keypoints,
std::vector<Saiga::DescriptorORB>& descriptors)
{
int current_kp = 0;
for (int level = 0; level < nlevels; ++level)
{
auto& level_data = levels[level];
int N = level_data->N;
auto h_keypoints = Saiga::ArrayView<Saiga::KeyPoint<float>>(level_data->h_keypoints).head(N);
auto h_descriptors = Saiga::ArrayView<Saiga::DescriptorORB>(level_data->h_descriptors).head(N);
level_data->stream.synchronize();
float scale = pyramid.Scale(level);
for (int i = 0; i < N; ++i)
{
auto& kp = h_keypoints[i];
kp.point *= scale;
keypoints[current_kp + i] = kp;
descriptors[current_kp + i] = h_descriptors[i];
}
current_kp += N;
}
}
void ORBExtractorGPU::AllocatePyramid(int rows, int cols)
{
SAIGA_ASSERT(cols % 4 == 0);
if (!levels.empty())
{
return;
}
levels.resize(nlevels);
// first frame, allocate the Pyramids
for (int level = 0; level < nlevels; ++level)
{
levels[level] = std::make_shared<Level>();
auto& level_data = levels[level];
float scale = pyramid.InverseScale(level);
int level_rows = Saiga::iRound(rows * scale);
int level_cols = Saiga::iRound(cols * scale);
level_data->image.create(level_rows, level_cols);
level_data->image_gauss.create(level_rows, level_cols);
level_data->fast = std::make_unique<Saiga::CUDA::Fast>(iniThFAST, minThFAST);
level_data->Reserve(level_data->fast->MaxKeypoints(), pyramid.Features(level) * 1.1);
auto fast_edge_threshold = 16;
level_data->fast_min_x = fast_edge_threshold;
level_data->fast_min_y = fast_edge_threshold;
level_data->fast_max_x = level_data->image.cols - fast_edge_threshold;
level_data->fast_max_y = level_data->image.rows - fast_edge_threshold;
level_data->fast_image_view = level_data->image.getImageView().subImageView(
level_data->fast_min_y, level_data->fast_min_x, level_data->fast_max_y - level_data->fast_min_y,
level_data->fast_max_x - level_data->fast_min_x);
level_data->stream.setName("Level " + std::to_string(level));
level_data->context = Saiga::NPPI::CreateStreamContextWithStream(level_data->stream);
level_data->image_obj = level_data->image.GetTextureObject();
level_data->image_gauss_obj = level_data->image_gauss.GetTextureObject();
}
nppSetStream(orb_stream);
}
void ORBExtractorGPU::ComputePyramid(Saiga::ImageView<unsigned char> image)
{
// SAIGA_BLOCK_TIMER();
AllocatePyramid(image.rows, image.cols);
SAIGA_ASSERT(!levels.empty());
auto& first_level = levels.front();
SAIGA_ASSERT(first_level->image.cols == image.cols);
SAIGA_ASSERT(first_level->image.rows == image.rows);
for (int level = 0; level < nlevels; ++level)
{
auto& curr_data = levels[level];
curr_data->image_ready.reset();
}
for (int level = 0; level < nlevels; ++level)
{
auto& curr_data = levels[level];
# ifdef SAIGA_NPPI_HAS_STREAM_CONTEXT
auto& stream = curr_data->stream;
# else
auto& stream = orb_stream;
# endif
if (level == 0)
{
first_level->image.upload(image, stream);
}
else
{
auto& prev_data = levels[level - 1];
stream.waitForEvent(prev_data->image_ready);
Saiga::NPPI::ResizeLinear(prev_data->image.getConstImageView(), curr_data->image.getImageView(),
curr_data->context);
}
curr_data->image_ready.record(stream);
curr_data->fast->Detect(curr_data->fast_image_view, stream);
}
# ifndef _OPENMP
# error asdf
# endif
# pragma omp parallel for num_threads(2) schedule(static, 1)
for (int level = 0; level < nlevels; ++level)
{
auto& curr_data = levels[level];
curr_data->download();
# ifdef SAIGA_NPPI_HAS_STREAM_CONTEXT
curr_data->filter();
DownloadAndDistribute(level);
# else
if (level == 0)
{
for (int l2 = 0; l2 < nlevels; l2 += 1)
{
auto& l = levels[l2];
l.filter();
l.gauss_ready.record(orb_stream);
l.stream.waitForEvent(l.gauss_ready);
}
}
DownloadAndDistribute(level);
# endif
}
}
} // namespace Saiga
#endif
#endif
|
the_stack
|
extern "C" {
#include <ccv.h>
#include <ccv_internal.h>
#include <nnc/ccv_nnc.h>
#include <nnc/ccv_nnc_easy.h>
#include <nnc/ccv_nnc_internal.h>
}
#include <nnc/gpu/ccv_nnc_compat.h>
#ifdef HAVE_CUDA
#ifdef USE_SYSTEM_CUB
#include <cub/util_type.cuh>
#include <cub/device/device_radix_sort.cuh>
#include <cub/thread/thread_load.cuh>
#include <cub/thread/thread_store.cuh>
#else
#include "3rdparty/cub/util_type.cuh.h"
#include "3rdparty/cub/device/device_radix_sort.cuh.h"
#include "3rdparty/cub/thread/thread_load.cuh.h"
#include "3rdparty/cub/thread/thread_store.cuh.h"
#endif
struct float5 {
float v[5];
};
__global__ void _ccv_nnc_scatter_rank_kernel(const int n, const float* const a, float* const b, float* const rank)
{
CUDA_1D_KERNEL_LOOP(i, n) {
rank[i] = a[i * 5];
((int *)b)[i * 5] = i;
b[i * 5 + 1] = a[i * 5 + 1];
b[i * 5 + 2] = a[i * 5 + 2];
b[i * 5 + 3] = a[i * 5 + 3];
b[i * 5 + 4] = a[i * 5 + 4];
}
}
__global__ void _ccv_nnc_merge_rank_kernel(const int n, float* const b, float* const rank, int* const c)
{
CUDA_1D_KERNEL_LOOP(i, n) {
c[i] = ((int*)b)[i * 5];
b[i * 5] = rank[i];
}
}
template<int threadsPerBlock>
__global__ void _ccv_nnc_iou_mask_kernel(const int gm, const int m, const float iou_threshold, const float* const b, uint64_t* const iou_mask)
{
// Compute only upper-left triangle.
int row_start = blockIdx.x / (gm + 1);
int col_start = blockIdx.x % (gm + 1);
if (col_start > row_start)
{
col_start = col_start - row_start - 1;
row_start = gm - 1 - row_start;
}
const int row_size = min(m - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(m - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size)
{
boxes[threadIdx.x * 4] = b[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
boxes[threadIdx.x * 4 + 1] = b[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
boxes[threadIdx.x * 4 + 2] = b[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
boxes[threadIdx.x * 4 + 3] = b[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size)
{
const int row_idx = threadsPerBlock * row_start + threadIdx.x;
const float* const bp = b + row_idx * 5;
int i;
int end = (row_start == col_start) ? threadIdx.x : col_size;
uint64_t t = 0;
const float area1 = bp[3] * bp[4];
for (i = 0; i < end; i++)
{
const float area2 = boxes[i * 4 + 2] * boxes[i * 4 + 3];
const float xdiff = ccv_max(0, ccv_min(bp[1] + bp[3], boxes[i * 4] + boxes[i * 4 + 2]) - ccv_max(bp[1], boxes[i * 4]));
const float ydiff = ccv_max(0, ccv_min(bp[2] + bp[4], boxes[i * 4 + 1] + boxes[i * 4 + 3]) - ccv_max(bp[2], boxes[i * 4 + 1]));
const float intersection = xdiff * ydiff;
const float iou = intersection / (area1 + area2 - intersection);
if (iou >= iou_threshold)
t |= (1ULL << i);
}
iou_mask[row_idx * gm + col_start] = t;
}
}
__global__ void _ccv_nnc_nms_zero_flags(const int n, int* const flags)
{
CUDA_1D_KERNEL_LOOP(i, n) {
flags[i] = 0;
}
}
template<int threadsPerBlock>
__global__ void _ccv_nnc_iou_postproc_kernel(const int gm, const int m, const uint64_t* const iou_mask, int* const flags, float* const b, int* const c)
{
const int row_idx = threadsPerBlock * blockIdx.x + threadIdx.x;
int i;
int suppressed = (row_idx >= m);
for (i = 0; i < blockIdx.x; i++) // Compute whether we depends on these, for each of them.
{
const uint64_t ious = row_idx < m ? iou_mask[row_idx * gm + i] : 0;
if (threadIdx.x == 0) // Wait flags to turn to 1.
while (cub::ThreadLoad<cub::LOAD_CG>(flags + i) == 0)
__threadfence_block();
__syncthreads(); // Now it is available. Sync all threads to this point.
if (suppressed)
continue;
int j;
const int col_size = min(m - i * threadsPerBlock, threadsPerBlock);
for (j = 0; j < col_size; j++)
if (ious & (1ULL << j)) // And it overlaps. Mark this one as not good.
if (c[i * threadsPerBlock + j] != -1) // If this is not marked as unavailable.
c[row_idx] = -1, suppressed = 1;
}
__shared__ int bc[threadsPerBlock];
bc[threadIdx.x] = row_idx < m ? c[row_idx] : 0;
// Now, go over it internally.
const uint64_t ious = row_idx < m ? iou_mask[row_idx * gm + blockIdx.x] : 0;
#pragma unroll threadsPerBlock
for (i = 0; i < threadsPerBlock; i++)
{
__syncthreads(); // Need to sync on every round.
if (i >= threadIdx.x)
continue;
if (ious & (1ULL << i)) // And it overlaps. Mark this one as not good.
if (bc[i] != -1) // If this is not marked as unavailable.
bc[threadIdx.x] = -1;
}
// Write back.
if (row_idx < m)
c[row_idx] = bc[threadIdx.x];
// Done mine. Mark it visible for other blocks. Store the flag.
__syncthreads();
if (threadIdx.x == 0)
cub::ThreadStore<cub::STORE_CG>(flags + blockIdx.x, 1);
// If I am the last one, I am responsible for removing suppressed values.
if (blockIdx.x == gm - 1 && threadIdx.x == 0)
{
int j;
for (i = 0, j = 0; i < m; i++)
if (c[i] != -1)
{
int k;
if (i != j)
{
for (k = 0; k < 5; k++)
b[j * 5 + k] = b[i * 5 + k];
c[j] = c[i];
}
++j;
}
for (i = j; i < m; i++)
c[i] = -1, b[i * 5] = -FLT_MAX;
}
}
static int _ccv_nnc_nms_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size == 1);
const ccv_nnc_tensor_view_t* a = (ccv_nnc_tensor_view_t*)inputs[0];
assert(output_size == 2);
ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)outputs[0];
ccv_nnc_tensor_view_t* c = (ccv_nnc_tensor_view_t*)outputs[1];
const int a_nd = ccv_nnc_tensor_nd(a->info.dim);
const int b_nd = ccv_nnc_tensor_nd(b->info.dim);
const int c_nd = ccv_nnc_tensor_nd(c->info.dim);
assert(a_nd == b_nd);
int i;
for (i = 0; i < a_nd; i++)
{ assert(a->info.dim[i] == b->info.dim[i]); }
const int* ainc = CCV_IS_TENSOR_VIEW(a) ? a->inc : a->info.dim;
const int* binc = CCV_IS_TENSOR_VIEW(b) ? b->inc : b->info.dim;
const int* cinc = CCV_IS_TENSOR_VIEW(c) ? c->inc : c->info.dim;
const int n = a_nd >= 3 ? a->info.dim[0] : 1;
const int aninc = a_nd >= 3 ? ainc[1] * ainc[2] : 0;
const int bninc = b_nd >= 3 ? binc[1] * binc[2] : 0;
const int cninc = c_nd >= 2 ? cinc[1] : 0;
const int m = a_nd >= 3 ? a->info.dim[1] : a->info.dim[0];
if (c_nd == 1)
{ assert(m == c->info.dim[0]); }
else
{ assert(c_nd == 2 && n == c->info.dim[0] && m == c->info.dim[1]); }
const float iou_threshold = cmd.info.nms.iou_threshold;
assert((a_nd <= 1 ? 1 : a->info.dim[a_nd - 1]) == 5 && ainc[a_nd - 1] == 5 && ainc[a_nd - 1] == binc[b_nd - 1]);
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending(0, temp_storage_bytes, a->data.f32, b->data.f32, (float5*)b->data.f32, (float5*)b->data.i32, m, 0, sizeof(float) * 8, 0);
size_t aligned_temp_storage_bytes = ((temp_storage_bytes + 511) / 512) * 512;
const int gm = (m + 63) / 64;
// Use full parallelism to compute whether it overlaps or not (iou >= iou_threshold).
size_t iou_bytes = ((sizeof(uint64_t) * (m * gm) + 511) / 512) * 512;
size_t flag_bytes = sizeof(int) * gm;
size_t total_bytes = ccv_max(iou_bytes + flag_bytes, aligned_temp_storage_bytes + sizeof(float) * m);
uint8_t* const d_temp_storage = (uint8_t*)ccv_nnc_stream_context_get_workspace(stream_context, total_bytes, CCV_TENSOR_GPU_MEMORY);
float* const rank = (float*)(d_temp_storage + aligned_temp_storage_bytes);
uint64_t* const d_ious = (uint64_t*)d_temp_storage;
int* const d_flags = (int*)((uint8_t*)d_ious + iou_bytes);
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
for (i = 0; i < n; i++)
{
const float* const ap = a->data.f32 + i * aninc;
float* const bp = b->data.f32 + i * bninc;
int* const cp = c->data.i32 + i * cninc;
// Scatter to ranks, so we can sort by these floating-points.
_ccv_nnc_scatter_rank_kernel<<<CUDA_GET_BLOCKS(m), CUDA_NUM_THREADS, 0, stream>>>(m, ap, bp, rank);
// Sorting.
cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, rank, rank, (float5*)bp, (float5*)bp, m, 0, sizeof(float) * 8, stream);
// Merging back into respective arrays.
_ccv_nnc_merge_rank_kernel<<<CUDA_GET_BLOCKS(m), CUDA_NUM_THREADS, 0, stream>>>(m, bp, rank, cp);
// Compute whether it overlaps or not with the other. There is no dependencies between them.
const int block_size = (gm + 1) * gm / 2;
_ccv_nnc_iou_mask_kernel<64><<<block_size, 64, 0, stream>>>(gm, m, iou_threshold, bp, d_ious);
_ccv_nnc_nms_zero_flags<<<CUDA_GET_BLOCKS(gm), CUDA_NUM_THREADS, 0, stream>>>(gm, d_flags);
// Remove overlap items. There are dependencies, because we only remove items that overlap with existing items.
_ccv_nnc_iou_postproc_kernel<64><<<gm, 64, 0, stream>>>(gm, m, d_ious, d_flags, bp, cp);
}
return CCV_NNC_EXEC_SUCCESS;
}
__global__ void _ccv_nnc_nms_zero_kernel(const int n, float* const b)
{
CUDA_1D_KERNEL_LOOP(i, n) {
b[i * 5] = 0;
b[i * 5 + 1] = 0;
b[i * 5 + 2] = 0;
b[i * 5 + 3] = 0;
b[i * 5 + 4] = 0;
}
}
__global__ void _ccv_nnc_nms_back_kernel(const int n, const float* const a, const int* const idx, float* const b)
{
CUDA_1D_KERNEL_LOOP(i, n) {
const int j = idx[i];
if (j >= 0)
{
b[j * 5] = a[i * 5];
b[j * 5 + 1] = a[i * 5 + 1];
b[j * 5 + 2] = a[i * 5 + 2];
b[j * 5 + 3] = a[i * 5 + 3];
b[j * 5 + 4] = a[i * 5 + 4];
}
}
}
static int _ccv_nnc_nms_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size >= 5);
const ccv_nnc_tensor_view_t* a = (ccv_nnc_tensor_view_t*)inputs[0];
const ccv_nnc_tensor_view_t* c = (ccv_nnc_tensor_view_t*)inputs[4];
assert(output_size == 1);
ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)outputs[0];
const int a_nd = ccv_nnc_tensor_nd(a->info.dim);
const int b_nd = ccv_nnc_tensor_nd(b->info.dim);
const int c_nd = ccv_nnc_tensor_nd(c->info.dim);
assert(a_nd == b_nd);
int i;
for (i = 0; i < a_nd; i++)
{ assert(a->info.dim[i] == b->info.dim[i]); }
const int* ainc = CCV_IS_TENSOR_VIEW(a) ? a->inc : a->info.dim;
const int* binc = CCV_IS_TENSOR_VIEW(b) ? b->inc : b->info.dim;
const int* cinc = CCV_IS_TENSOR_VIEW(c) ? c->inc : c->info.dim;
const int n = a_nd >= 3 ? a->info.dim[0] : 1;
const int aninc = a_nd >= 3 ? ainc[1] * ainc[2] : 0;
const int bninc = b_nd >= 3 ? binc[1] * binc[2] : 0;
const int cninc = c_nd >= 2 ? cinc[1] : 0;
const int m = a_nd >= 3 ? a->info.dim[1] : a->info.dim[0];
if (c_nd == 1)
{ assert(m == c->info.dim[0]); }
else
{ assert(c_nd == 2 && n == c->info.dim[0] && m == c->info.dim[1]); }
assert((a_nd <= 1 ? 1 : a->info.dim[a_nd - 1]) == 5 && ainc[a_nd - 1] == 5 && ainc[a_nd - 1] == binc[b_nd - 1]);
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
for (i = 0; i < n; i++)
{
const float* const ap = a->data.f32 + i * aninc;
float* const bp = b->data.f32 + i * bninc;
int* const cp = c->data.i32 + i * cninc;
_ccv_nnc_nms_zero_kernel<<<CUDA_GET_BLOCKS(m), CUDA_NUM_THREADS, 0, stream>>>(m, bp);
_ccv_nnc_nms_back_kernel<<<CUDA_GET_BLOCKS(m), CUDA_NUM_THREADS, 0, stream>>>(m, ap, cp, bp);
}
return CCV_NNC_EXEC_SUCCESS;
}
#endif
REGISTER_COMMAND_BACKEND(CCV_NNC_NMS_FORWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDA
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F | CCV_32S;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_nms_forw;
#endif
}
REGISTER_COMMAND_BACKEND(CCV_NNC_NMS_BACKWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDA
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F | CCV_32S;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_nms_back;
#endif
}
|
the_stack
|
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_CUDA
namespace lbann {
namespace gpu_lib {
// -------------------------------------------------------------
// Device properties
// -------------------------------------------------------------
dim3 max_grid_dims() {
static dim3 max_grid_dims_(0,0,0);
if (max_grid_dims_.x == 0) {
int device = 0;
cudaDeviceProp prop;
CHECK_CUDA(cudaGetDevice(&device));
CHECK_CUDA(cudaGetDeviceProperties(&prop, device));
max_grid_dims_.x = prop.maxGridSize[0];
max_grid_dims_.y = prop.maxGridSize[1];
max_grid_dims_.z = prop.maxGridSize[2];
if (max_grid_dims_.x == 0) {
LBANN_ERROR("Could not setup max CUDA grid size");
}
}
return max_grid_dims_;
}
} // namespace gpu_lib
} // namespace lbann
namespace lbann {
namespace cuda {
// -------------------------------------------------------------
// event_wrapper
// -------------------------------------------------------------
event_wrapper::event_wrapper() : m_event(nullptr), m_stream(0) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
}
event_wrapper::event_wrapper(const event_wrapper& other)
: m_event(nullptr), m_stream(other.m_stream) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
if (!other.query()) { record(m_stream); }
}
event_wrapper& event_wrapper::operator=(const event_wrapper& other) {
m_stream = other.m_stream;
if (!other.query()) { record(m_stream); }
return *this;
}
event_wrapper::~event_wrapper() {
cudaEventDestroy(m_event);
}
void event_wrapper::record(cudaStream_t stream) {
m_stream = stream;
CHECK_CUDA(cudaEventRecord(m_event, m_stream));
}
bool event_wrapper::query() const {
const auto& status = cudaEventQuery(m_event);
switch (status) {
case cudaSuccess: return true;
case cudaErrorNotReady: return false;
default:
CHECK_CUDA(status);
return false;
}
}
void event_wrapper::synchronize() {
CHECK_CUDA(cudaEventSynchronize(m_event));
}
cudaEvent_t& event_wrapper::get_event() { return m_event; }
// -----------------------------
// Graph
// -----------------------------
Graph::Graph(cudaGraph_t graph)
: graph_{graph}
{}
Graph::~Graph() {
if (graph_) {
// Don't check status to avoid exceptions
cudaGraphDestroy(graph_);
}
}
Graph::Graph(const Graph& other) {
if (other.graph_) {
CHECK_CUDA(cudaGraphClone(&graph_, other.graph_));
}
}
Graph::Graph(Graph&& other)
: graph_{other.graph_} {
other.graph_ = nullptr;
}
Graph& Graph::operator=(Graph other) {
swap(other, *this);
return *this;
}
void swap(Graph& first, Graph& second) {
std::swap(first.graph_, second.graph_);
}
void Graph::reset(cudaGraph_t graph) {
if (graph_) {
CHECK_CUDA(cudaGraphDestroy(graph_));
}
graph_ = graph;
}
cudaGraph_t Graph::release() {
auto old_graph = graph_;
graph_ = nullptr;
return old_graph;
}
cudaGraph_t Graph::get() const noexcept {
return graph_;
}
Graph::operator cudaGraph_t() const noexcept {
return get();
}
void Graph::create() {
if (!graph_) {
CHECK_CUDA(cudaGraphCreate(&graph_, 0));
}
}
void Graph::begin_capture(
cudaStream_t stream,
cudaStreamCaptureMode mode) {
// Check that stream is valid
// Note (tym 9/22/20): As of CUDA 11.0.3, support for stream capture
// on default stream is not supported.
if (stream == 0) {
LBANN_ERROR("attempting to capture default CUDA stream");
}
// Check whether CUDA stream is already being captured
cudaStreamCaptureStatus capture_status;
CHECK_CUDA(cudaStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
break;
case cudaStreamCaptureStatusActive:
LBANN_ERROR("CUDA stream is already being captured");
break;
case cudaStreamCaptureStatusInvalidated:
{
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
Graph temp(graph);
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Start capturing CUDA stream
CHECK_CUDA(cudaStreamBeginCapture(stream, mode));
}
Graph Graph::end_capture(cudaStream_t stream) {
// Check whether CUDA stream is already being captured
cudaStreamCaptureStatus capture_status;
CHECK_CUDA(cudaStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
LBANN_ERROR("CUDA stream is not being captured");
break;
case cudaStreamCaptureStatusActive:
break;
case cudaStreamCaptureStatusInvalidated:
{
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
Graph temp(graph);
LBANN_ERROR("CUDA stream capture has failed");
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Finish capturing CUDA stream
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
return Graph(graph);
}
// -----------------------------
// ExecutableGraph
// -----------------------------
ExecutableGraph::ExecutableGraph(cudaGraphExec_t graph_exec)
: graph_exec_{graph_exec}
{}
ExecutableGraph::ExecutableGraph(cudaGraph_t graph) {
if (!graph) {
LBANN_ERROR("attempted to instantiate cudaGraphExec_t from null cudaGraph_t object");
}
constexpr size_t log_size = BUFSIZ;
char log_buffer[log_size];
const auto status
= cudaGraphInstantiate(&graph_exec_, graph, nullptr, log_buffer, log_size);
if (status != cudaSuccess && log_buffer[0] != '\0') {
log_buffer[log_size-1] = '\0';
LBANN_WARNING(log_buffer);
}
CHECK_CUDA(status);
}
ExecutableGraph::~ExecutableGraph() {
if (graph_exec_) {
// Don't check status to avoid exceptions
cudaGraphExecDestroy(graph_exec_);
}
}
ExecutableGraph::ExecutableGraph(ExecutableGraph&& other)
: graph_exec_{other.graph_exec_} {
other.graph_exec_ = nullptr;
}
ExecutableGraph& ExecutableGraph::operator=(ExecutableGraph other) {
swap(other, *this);
return *this;
}
void swap(ExecutableGraph& first, ExecutableGraph& second) {
std::swap(first.graph_exec_, second.graph_exec_);
}
void ExecutableGraph::reset(cudaGraphExec_t graph_exec) {
if (graph_exec_) {
CHECK_CUDA(cudaGraphExecDestroy(graph_exec_));
}
graph_exec_ = graph_exec;
}
cudaGraphExec_t ExecutableGraph::release() {
auto old_graph_exec = graph_exec_;
graph_exec_ = nullptr;
return old_graph_exec;
}
cudaGraphExec_t ExecutableGraph::get() const noexcept {
return graph_exec_;
}
ExecutableGraph::operator cudaGraphExec_t() const noexcept {
return get();
}
void ExecutableGraph::launch(cudaStream_t stream) const {
if (!graph_exec_) {
LBANN_ERROR("attempted to launch null cudaGraphExec_t");
}
CHECK_CUDA(cudaGraphLaunch(graph_exec_, stream));
}
void ExecutableGraph::update(cudaGraph_t graph) {
// Make sure CUDA graph is valid
if (!graph) {
LBANN_ERROR("attempting to update cudaGraphExec_t with null cudaGraph_t");
}
// Try updating executable CUDA graph
#if (__CUDACC_VER_MAJOR__*100+__CUDACC_VER_MINOR__) < 1002 // < 10.2
reset();
#else // >= 10.2
if (graph_exec_) {
cudaGraphNode_t error_node;
cudaGraphExecUpdateResult result;
auto status = cudaGraphExecUpdate(graph_exec_, graph, &error_node, &result);
switch (status) {
case cudaSuccess:
break;
case cudaErrorGraphExecUpdateFailure:
reset();
break;
default:
CHECK_CUDA(status);
reset();
}
}
#endif // CUDA version >= 10.02
// If update failed, create new executable CUDA graph
if (!graph_exec_) {
*this = ExecutableGraph(graph);
}
}
// -------------------------------------------------------------
// Helper functions for tensor operations
// -------------------------------------------------------------
namespace {
using int4 = gpu_lib::array<int, 4>;
/**
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (dim[3] / bdimx) x (dim[2] / bdimy) x (dim[1] / bdimx)
*/
template <typename TensorDataType>
__global__ void copy_4d_kernel(
int4 dims,
const TensorDataType* __restrict__ input,
int4 input_strides,
TensorDataType* __restrict__ output,
int4 output_strides) {
// Indices
const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y;
const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z;
const auto& nthreadsx = gridDim.x * blockDim.x;
const auto& nthreadsy = gridDim.y * blockDim.y;
const auto& nthreadsz = gridDim.z * blockDim.z;
for (int i0=0; i0<dims[0]; ++i0) {
for (int i1=gidz; i1<dims[1]; i1+=nthreadsz) {
for (int i2=gidy; i2<dims[2]; i2+=nthreadsy) {
for (int i3=gidx; i3<dims[3]; i3+=nthreadsx) {
const auto& x = input[i0 * input_strides[0]
+ i1 * input_strides[1]
+ i2 * input_strides[2]
+ i3 * input_strides[3]];
auto& y = output[i0 * output_strides[0]
+ i1 * output_strides[1]
+ i2 * output_strides[2]
+ i3 * output_strides[3]];
y = x;
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType>
void copy_tensor(
cudaStream_t stream,
const std::vector<size_t>& dims,
const TensorDataType* input,
const std::vector<size_t>& input_strides,
TensorDataType* output,
const std::vector<size_t>& output_strides) {
// Check inputs
if (dims.empty() || dims.size() > 4) {
LBANN_ERROR("invalid number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != input_strides.size()) {
LBANN_ERROR(
"number of input strides (",input_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != output_strides.size()) {
LBANN_ERROR(
"number of output strides (",output_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
// Pad tensor dimensions to 4D
std::vector<int>
rdims(dims.rbegin(), dims.rend()),
input_rstrides(input_strides.rbegin(), input_strides.rend()),
output_rstrides(output_strides.rbegin(), output_strides.rend());
rdims.resize(4, 1);
input_rstrides.resize(4, input_rstrides.back());
output_rstrides.resize(4, output_rstrides.back());
// Launch CUDA kernel
const auto size = std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int>());
if (size > 0) {
constexpr size_t block_size = 64;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
block_dims.z = 1;
grid_dims.x = (rdims[0] + block_dims.x - 1) / block_dims.x;
grid_dims.y = (rdims[1] + block_dims.y - 1) / block_dims.y;
grid_dims.z = (rdims[2] + block_dims.z - 1) / block_dims.z;
grid_dims.y = El::Min(grid_dims.y, 65535);
grid_dims.z = El::Min(grid_dims.z, 65535);
copy_4d_kernel<<<grid_dims, block_dims, 0, stream>>>(
{rdims[3], rdims[2], rdims[1], rdims[0]},
input,
{input_rstrides[3], input_rstrides[2],
input_rstrides[1], input_rstrides[0]},
output,
{output_rstrides[3], output_rstrides[2],
output_rstrides[1], output_rstrides[0]});
}
}
#if defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
template <>
void copy_tensor<cpu_fp16>(
cudaStream_t stream,
const std::vector<size_t>& dims,
const cpu_fp16* input,
const std::vector<size_t>& input_strides,
cpu_fp16* output,
const std::vector<size_t>& output_strides) {
copy_tensor<fp16>(
stream,
dims,
reinterpret_cast<const fp16*>(input),
input_strides,
reinterpret_cast<fp16*>(output),
output_strides);
}
#endif // defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
// Explicit template instantiation
#define PROTO(T) \
template void copy_tensor<T>( \
cudaStream_t stream, \
const std::vector<size_t>& dims, \
const T* input, \
const std::vector<size_t>& input_strides, \
T* output, \
const std::vector<size_t>& output_strides);
#define LBANN_INSTANTIATE_GPU_HALF
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
#undef PROTO
void mem_copy_async(
void* output,
const void* input,
const size_t count,
cudaMemcpyKind kind,
cudaStream_t stream) {
CHECK_CUDA(cudaMemcpyAsync(
output,
input,
count,
kind,
stream));
}
} // namespace cuda
} // namespace lbann
#endif // LBANN_HAS_CUDA
|
the_stack
|
#include "cuda_error.h"
#include "cuda_runtime.h"
#include "StreamingKernels.h"
#include "MultiFitStream.h"
#include "JobWrapper.h"
#include "GpuMultiFlowFitControl.h"
#include "SignalProcessingFitterQueue.h"
using namespace std;
#define DEBUG_SIZE 0
int SimpleMultiFitStream::_bpb = 128;
int SimpleMultiFitStream::_l1type = -1; // 0: SM=L1, 1: SM>L1, 2: L1>SM, -1:GPU default
int SimpleMultiFitStream::_bpbPartialD= 128;
int SimpleMultiFitStream::_l1typePartialD =-1;
int SimpleMultiFitStream::l1DefaultSettingMultiFit()
{
// 0: SM=L1, 1: SM>L1, 2: L1>SM, -1:GPU default
if(_computeVersion == 20 ) return 2;
if(_computeVersion == 35 ) return 1;
return 0;
}
int SimpleMultiFitStream::l1DefaultSettingPartialD()
{
if(_computeVersion == 20 ) return 2;
if(_computeVersion == 35 ) return 0;
return 0;
}
/////////////////////////////////////////////////
//MULTI FIT STREAM CLASS
SimpleMultiFitStream::SimpleMultiFitStream(streamResources * res, WorkerInfoQueueItem item ) :
cudaSimpleStreamExecutionUnit(res, item),
_myJob( static_cast< BkgModelWorkInfo * >( item.private_data )->flow_key,
static_cast< BkgModelWorkInfo * >( item.private_data )->inception_state->
bkg_control.signal_chunks.flow_block_sequence.BlockAtFlow(
static_cast< BkgModelWorkInfo * >( item.private_data )->flow )->size() )
{
setName("MultiFitStream");
if(_verbose) cout << getLogHeader() << " created" << endl;
// lambda values for each lev mar iteration
_lambda_start[0] = SMALL_LAMBDA;
_lambda_start[1] = LARGER_LAMBDA;
_clonal_restriction[0] = NO_NONCLONAL_PENALTY;
_clonal_restriction[1] = FULL_NONCLONAL_PENALTY;
// calculate clonality restriction
for (int i=0; i<CUDA_MULTIFLOW_NUM_FIT; ++i)
{
CalculateClonalityRestriction(i);
}
_fitNum = 0;
_curFitLevel = 0;
}
SimpleMultiFitStream::~SimpleMultiFitStream()
{
cleanUp();
}
void SimpleMultiFitStream::cleanUp()
{
if(_verbose) cout << getLogHeader() << " clean up" << endl;
CUDA_ERROR_CHECK();
}
void SimpleMultiFitStream::resetPointers()
{
if(_verbose) cout << getLogHeader() << " resetting pointers for job with " << _myJob.getNumBeads() << "("<< _myJob.getPaddedN() <<") beads and " << _myJob.getNumFrames() << " frames" << endl;
// multiple rounds of lev mar fits
_fit_training_level[0] = _myJob.getPostKeyFitAllWellsTrainingLevel();
_fit_training_level[1] = 1;
// lev mar iterations within each training level
_fit_iterations[0] = _myJob.getPostKeyFitAllWellsTrainingStep();
_fit_iterations[1] = HAPPY_ALL_BEADS;
// fit invariant inputs
try{
if(!_resource->checkDeviceMemory( getMaxDeviceMem(_myJob.getFlowKey(),_myJob.getFlowBlockSize(),_myJob.getNumFrames(),_myJob.getNumBeads(),&_myJob)))
cout << getLogHeader() << " Successfully reallocated device memory to handle Job" << endl;
_hConstP = _resource->getHostSegment(sizeof(ConstParams));
_hdBeadParams = _resource->GetHostDevPair(_myJob.getBeadParamsSize(true));
_resource->StartNewSegGroup();
// We reuse the same buffer for both _hdFgBuffer and _dPartialDerivsOutput+_dDelta.
_hdFgBuffer = _resource->GetHostDevPair( _myJob.getReusedFgBufferPartialDerivsSize(true) );
_hdCoarseNucRise = _resource->GetHostDevPair(_myJob.getCoarseNucRiseSize(true)); // ISIG_SUB_STEPS_MULTI_FLOW * F * flow_block_size
_hdSbg = _resource->GetHostDevPair(_myJob.getShiftedBackgroundSize(true)); // flow_block_size*F
_hdEmphasis = _resource->GetHostDevPair(_myJob.getEmphVecSize(true)); // (MAX_POISSON_TABLE_COL)*F
_hdNon_integer_penalty = _resource->GetHostDevPair(_myJob.getClonalCallScaleSize(true));
_hdDarkMatterComp = _resource->GetHostDevPair(_myJob.getDarkMatterSize(true)); // NUMNUC*F
_hdInvariantCopyInGroup = _resource->GetCurrentPairGroup();
// fit variant inputs
_resource->StartNewDeviceSegGroup();
_DevFitData.Steps = _resource->getDevSegment(_myJob.getPartialDerivStepsMaxSize(true));
_DevFitData.JTJMatrixMapForDotProductComputation = _resource->getDevSegment(_myJob.getJTJMatrixMapMaxSize(true));
_DevFitData.BeadParamIdxMap = _resource->getDevSegment(_myJob.getBeadParamIdxMapMaxSize(true));
_DevFitData.LambdaForBeadFit = _resource->getDevSegment(_myJob.getFloatPerBead(true));
MemSegment FitVariantDataDeviceGroup = _resource->GetCurrentDeviceGroup();
// fit specific host memory allocations
for (int i=0; i<CUDA_MULTIFLOW_NUM_FIT; ++i)
{
_resource->StartNewHostSegGroup();
_HostDeviceFitData[i].Steps = _resource->getHostSegment( _myJob.getPartialDerivStepsMaxSize(true));
_HostDeviceFitData[i].JTJMatrixMapForDotProductComputation = _resource->getHostSegment(_myJob.getJTJMatrixMapMaxSize(true));
_HostDeviceFitData[i].BeadParamIdxMap = _resource->getHostSegment(_myJob.getBeadParamIdxMapMaxSize(true));
_HostDeviceFitData[i].LambdaForBeadFit = _resource->getHostSegment(_myJob.getFloatPerBead(true));
MemSegment FitVariantDataHostGroup = _resource->GetCurrentHostGroup();
//create copy pair for each fitting.
_HostDeviceFitData[i].hdCopyGroup = MemSegPair(FitVariantDataHostGroup, FitVariantDataDeviceGroup);
}
// Device work/scratch buffer:
_dBeadParamsEval = _resource->getDevSegment(_myJob.getBeadParamsSize(true));
_dBeadParamsTranspose = _resource->getDevSegment(_myJob.getBeadParamsSize(true));
_dFgBufferTransposed = _resource->getDevSegment(_myJob.getFgBufferSize(true));
// we need a specific struct describing this config for this well fit for GPU
_dIval = _resource->getDevSegment(_myJob.getFxB(true)); // FLxNxF
_dScratch_ival = _resource->getDevSegment(_myJob.getFxB(true)); // FLxNxF
_dResidual = _resource->getDevSegment(_myJob.getFloatPerBead(true)); // FLxNxF
// lev mar fit matrices
_dJTJ = _resource->getDevSegment(_myJob.getParamMatrixMaxSize(true) );
_dLTR = _resource->getDevSegment(_myJob.getParamMatrixMaxSize(true) );
_dRHS = _resource->getDevSegment(_myJob.getParamRHSMaxSize(true));
//re-use fgBuffer device segment
_dPartialDerivsOutput = _hdFgBuffer.getDeviceSegment();
_dDelta = _dPartialDerivsOutput.splitAt( sizeof(float)*_myJob.getMaxSteps()*_myJob.getPaddedN()*
_myJob.getNumFrames() );
}
catch (cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Resource Acquisition!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
if(_verbose)cout << getLogHeader() << " " << _resource->Status() << endl;
}
void SimpleMultiFitStream::serializeFitInvariantInputs()
{ //inputs
if(_verbose) cout << getLogHeader() <<" serialize data for fit invariant asnync global mem copy" << endl;
try{
_hdFgBuffer.copyIn(_myJob.getFgBuffer(), _myJob.getFgBufferSizeShort());
_hdBeadParams.copyIn(_myJob.getBeadParams() , _myJob.getBeadParamsSize());
_hdDarkMatterComp.copyIn(_myJob.getDarkMatter(), _myJob.getDarkMatterSize());
_hdSbg.copyIn(_myJob.getShiftedBackground(), _myJob.getShiftedBackgroundSize());
_hdEmphasis.copyIn(_myJob.getEmphVec() , _myJob.getEmphVecSize());
_hdCoarseNucRise.copyIn(_myJob.getCoarseNucRise() , _myJob.getCoarseNucRiseSize());
// a little hacky but we want to fill the structure in page locked memory with data
ConstParams* tmpConstP = _hConstP.getPtr();
//init the reg_param part (all we need from the reg params is non-dynamic)
reg_params* tmpConstPCastToReg = (reg_params*)tmpConstP;
*(tmpConstPCastToReg) = *(_myJob.getRegionParams()); // use the
// init the rest of the ConstParam buffers
memcpy( tmpConstP->coarse_nuc_start, _myJob.getCoarseNucStart() , _myJob.getStartNucSize() );
memcpy( tmpConstP->deltaFrames, _myJob.getDeltaFrames() , _myJob.getDeltaFramesSize() );
memcpy( tmpConstP->flowIdxMap, _myJob.getFlowIdxMap() , _myJob.getFlowIdxMapSize());
memcpy( tmpConstP->non_zero_crude_emphasis_frames, _myJob.GetNonZeroEmphasisFrames(),
_myJob.GetNonZeroEmphasisFramesVecSize());
memcpy(&tmpConstP->beadParamsMaxConstraints, _myJob.getBeadParamsMax(), _myJob.getBeadParamsMaxSize());
memcpy(&tmpConstP->beadParamsMinConstraints, _myJob.getBeadParamsMin(), _myJob.getBeadParamsMinSize());
tmpConstP->useDarkMatterPCA = _myJob.useDarkMatterPCA();
}
catch (cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Input Serialization!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
void SimpleMultiFitStream::serializeFitSpecificInputs(int fit_index)
{
//inputs
if(_verbose) cout << getLogHeader() <<" serialize data for fit specific asnync global mem copy" << endl;
try{
_HostDeviceFitData[fit_index].Steps.copyIn(_myJob.getPartialDerivSteps(fit_index) , _myJob.getPartialDerivStepsSize(fit_index) );
_HostDeviceFitData[fit_index].JTJMatrixMapForDotProductComputation.copyIn(_myJob.getJTJMatrixMap(fit_index), _myJob.getJTJMatrixMapSize(fit_index));
_HostDeviceFitData[fit_index].BeadParamIdxMap.copyIn(_myJob.getBeadParamIdxMap(fit_index), _myJob.getBeadParamIdxMapSize(fit_index) );
}
catch (cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Fit Specific Input Serialization!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
//////////////////////////
// ASYNC CUDA FUNCTIONS, KERNEL EXECUTION AND DATA HANDLING
void SimpleMultiFitStream::prepareFitSpecificInputs(
int fit_index)
{
//prepare environment for new job
SetUpLambdaArray(fit_index);
serializeFitSpecificInputs(fit_index);
}
void SimpleMultiFitStream::copyFitInvariantInputsToDevice()
{
//cout << "Copy data to GPU" << endl;
if(_verbose) cout << getLogHeader() << " Invariant Async Copy To Device" << endl;
try{
//_hdNon_integer_penalty.copyToDeviceAsync(_stream,_myJob.getClonalCallScaleSize());
//_hdFgBuffer.copyToDeviceAsync(_stream, _myJob.getFgBufferSizeShort());
//_hdDarkMatterComp.copyToDeviceAsync(_stream, _myJob.getDarkMatterSize());
//_hdSbg.copyToDeviceAsync(_stream, _myJob.getShiftedBackgroundSize());
//_hdEmphasis.copyToDeviceAsync( _stream, _myJob.getEmphVecSize());
//_hdNucRise.copyToDeviceAsync(_stream, _myJob.getNucRiseCoarseSize());
_hdInvariantCopyInGroup.copyToDeviceAsync(_stream);
// copyMultiFlowFitConstParamAsync(_HostConstP, getStreamId(),_stream);CUDA_ERROR_CHECK();
StreamingKernels::copyFittingConstParamAsync(_hConstP.getPtr(), getStreamId(),_stream);CUDA_ERROR_CHECK();
}
catch(cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Copy to device!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
void SimpleMultiFitStream::copyFitSpecifcInputsToDevice(int fit_index)
{
//cout << "Copy data to GPU" << endl;
if(_verbose) cout << getLogHeader() << " Fit Specific Async Copy To Device" << endl;
try{
//_DevFitData.Steps.copyAsync(_HostDeviceFitData[fit_index].Steps, _stream, _myJob.getPartialDerivStepsSize(fit_index));
//_DevFitData.JTJMatrixMapForDotProductComputation.copyAsync(_HostDeviceFitData[fit_index].JTJMatrixMapForDotProductComputation, _stream, _myJob.getJTJMatrixMapSize(fit_index));
//_DevFitData.BeadParamIdxMap.copyAsync(_HostDeviceFitData[fit_index].BeadParamIdxMap, _stream, _myJob.getBeadParamIdxMapSize(fit_index));
//_DevFitData.LambdaForBeadFit.copyAsync(_HostDeviceFitData[fit_index].LambdaForBeadFit,_stream,_myJob.getFloatPerBead());
_HostDeviceFitData[fit_index].hdCopyGroup.copyToDeviceAsync(_stream);
}
catch(cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Fit Specific Copy to device!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
void SimpleMultiFitStream::executeTransposeToFloat()
{
//cout << "TransposeToFloat Kernel" << endl;
int F = _myJob.getNumFrames();
int padN = _myJob.getPaddedN();
dim3 block(32,32);
dim3 grid( (F*_myJob.getFlowBlockSize()+ block.x-1)/block.x , (padN+block.y-1)/block.y);
StreamingKernels::transposeDataToFloat(grid, block, 0 ,_stream,_dFgBufferTransposed.getPtr(), _hdFgBuffer.getPtr(), F*_myJob.getFlowBlockSize(), padN);
CUDA_ERROR_CHECK();
}
void SimpleMultiFitStream::executeTransposeParams()
{
int padN = _myJob.getPaddedN();
//cout << "TransposeParams Kernel" << endl;
dim3 block(32,32);
int StructLength = (sizeof(BeadParams)/sizeof(float));
if((sizeof(BeadParams)%sizeof(float)) != 0 )
{
cerr << getLogHeader() <<" Structure not a multiple of sizeof(float), transpose not possible" << endl;
exit(-1);
}
dim3 grid((StructLength + block.x-1)/block.x , (padN+block.y-1)/block.y);
CUDA_ERROR_CHECK();
StreamingKernels::transposeData(grid, block, 0 ,_stream,_dBeadParamsTranspose.getPtr(), (float*)_hdBeadParams.getPtr(), StructLength, padN);
// cudaThreadSynchronize();CUDA_ERROR_CHECK();
}
void SimpleMultiFitStream::executeMultiFit(int fit_index)
{
if(_verbose) cout << getLogHeader() << " Exec Async Kernels" << endl;
//cout << "MultiFit Kernels" << endl;
int F = _myJob.getNumFrames();
int N = _myJob.getNumBeads();
dim3 blockPD( getBeadsPerBlockPartialD(), 1);
dim3 gridPD( (N+blockPD.x-1)/blockPD.x, 1 );
// int StructLength = (sizeof(BeadParams)/sizeof(float));
CUDA_ERROR_CHECK();
//async device to device copy
_dBeadParamsEval.copyAsync(_dBeadParamsTranspose, _stream, _myJob.getBeadParamsSize(true));
int sharedMem = _myJob.getEmphVecSize();
for (int i=0; i<_fit_iterations[fit_index]; ++i) {
//set scratchspace to 0
_dJTJ.memSetAsync(0, _stream, _myJob.getParamMatrixMaxSize(true));
_dRHS.memSetAsync(0, _stream, _myJob.getParamRHSMaxSize(true));
StreamingKernels::ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow(
getL1SettingPartialD(),
gridPD,
blockPD,
sharedMem,
_stream,
// inputs
_myJob.getMaxEmphasis(),
// weights for frames
_restrict_clonal[fit_index],
_dFgBufferTransposed.getPtr(),
_dIval.getPtr(),
_dScratch_ival.getPtr(),
_hdCoarseNucRise.getPtr(),
_hdSbg.getPtr(),
_hdEmphasis.getPtr(),
_hdNon_integer_penalty.getPtr(),
_hdDarkMatterComp.getPtr(),
_dBeadParamsTranspose.getPtr(),
_DevFitData.Steps.getPtr(),
_DevFitData.JTJMatrixMapForDotProductComputation.getPtr(), // pxp
_dJTJ.getPtr(),
_dRHS.getPtr(),
_myJob.getNumParams(fit_index),
_myJob.getNumSteps(fit_index),
N,
F,
_dResidual.getPtr(),
_dPartialDerivsOutput.getPtr(),
getStreamId(), // stream id for offset in const memory
_myJob.getFlowBlockSize());
dim3 block( getBeadsPerBlockMultiFit(), 1);
dim3 grid( (N+block.x-1)/block.x, 1 );
StreamingKernels::MultiFlowLevMarFit(
getL1SettingMultiFit(),
grid,
block,
sharedMem,
_stream,
_myJob.getMaxEmphasis(),
_restrict_clonal[fit_index],
_dFgBufferTransposed.getPtr(),
_dIval.getPtr(),
_dScratch_ival.getPtr(),
_hdCoarseNucRise.getPtr(),
_hdSbg.getPtr(),
_hdEmphasis.getPtr(),
_hdNon_integer_penalty.getPtr(),
_hdDarkMatterComp.getPtr(),
_dBeadParamsTranspose.getPtr(), // we will be indexing directly into it from the parameter indices provide by CpuStep
_dBeadParamsEval.getPtr(),
_DevFitData.LambdaForBeadFit.getPtr(),
_dJTJ.getPtr(), // jtj matrix
_dLTR.getPtr(), // lower triangular matrix
_dRHS.getPtr(), // rhs vector
_dDelta.getPtr(),
_DevFitData.BeadParamIdxMap.getPtr(),
_myJob.getNumParams(fit_index),
N,
F,
_dResidual.getPtr(), // N
getStreamId(),
_myJob.getFlowBlockSize());
}
++_curFitLevel;
}
void SimpleMultiFitStream::executeTransposeParamsBack()
{
//cout << "TransposeParamsBack Kernel" << endl;
int padN = _myJob.getPaddedN();
dim3 block(32,32);
int StructLength = (sizeof(BeadParams)/sizeof(float));
dim3 grid ((padN+block.y-1)/block.y, (StructLength + block.x-1)/block.x );
StreamingKernels::transposeData(grid, block, 0 ,_stream, (float*)_hdBeadParams.getPtr(), _dBeadParamsTranspose.getPtr(), padN, StructLength);
CUDA_ERROR_CHECK();
}
void SimpleMultiFitStream::copyBeadParamsToDevice()
{
_hdBeadParams.copyToDeviceAsync(_stream,_myJob.getBeadParamsSize());
}
void SimpleMultiFitStream::copyBeadParamsToHost()
{
_hdBeadParams.copyToHostAsync(_stream, _myJob.getBeadParamsSize());
}
int SimpleMultiFitStream::handleResults()
{
if(_myJob.isSet()){
if(_verbose) cout << getLogHeader() << " Handling Results "<< _fitNum << endl;
_hdBeadParams.copyOut(_myJob.getBeadParams(), _myJob.getBeadParamsSize());
_myJob.KeyNormalize(); // temporary call to key normalize till we put it into a GPU kernel
postFitProcessing();
// if not last iteratin yet copy bead data back topagelocked mem so device can get updated
if(_fitNum < CUDA_MULTIFLOW_NUM_FIT){
_hdBeadParams.copyIn(_myJob.getBeadParams(),_myJob.getBeadParamsSize());
return 1; //signal more work to be done;
}
_myJob.setJobToRemainRegionFit();
_myJob.putJobToCPU(_item);
}
return 0; //signal Job com[plete
}
void SimpleMultiFitStream::SetUpLambdaArray(int fit_index) {
for (int i=0; i<_myJob.getNumBeads(); ++i) {
_HostDeviceFitData[fit_index].LambdaForBeadFit[i] = _lambda_start[fit_index];
}
}
void SimpleMultiFitStream::ExecuteJob()
{
// printInfo(); cout << " i: " << _fitNum << " numBeads: " << _myJob.getNumBeads() << " numFrames:" << _myJob.getNumFrames() << endl;
if(_fitNum == 0 && _curFitLevel == 0){
preFitCpuSteps();
resetPointers();
CalculateNonIntegerPenalty();
serializeFitInvariantInputs();
copyFitInvariantInputsToDevice();
executeTransposeToFloat();
}
copyBeadParamsToDevice();
prepareFitSpecificInputs(_fitNum);
copyFitSpecifcInputsToDevice(_fitNum);
executeTransposeParams();
executeMultiFit(_fitNum);
executeTransposeParamsBack();
copyBeadParamsToHost();
}
bool SimpleMultiFitStream::InitJob() {
_myJob.setData(static_cast<BkgModelWorkInfo *>(getJobData()));
return _myJob.ValidJob();
}
void SimpleMultiFitStream::CalculateClonalityRestriction(int fit_index)
{
_restrict_clonal[fit_index] = 0;
float hpmax = 2.0f;
if (_clonal_restriction[fit_index] > 0)
{
if (hpmax > _clonal_restriction[fit_index])
hpmax = _clonal_restriction[fit_index];
_restrict_clonal[fit_index] = hpmax-0.5f;
}
}
void SimpleMultiFitStream::CalculateNonIntegerPenalty()
{
const float *clonal_call_scale = _myJob.getClonalCallScale();
float clonal_call_penalty = _myJob.getClonalCallPenalty();
for (int i=0; i<MAGIC_CLONAL_CALL_ARRAY_SIZE; ++i)
{
_hdNon_integer_penalty[i] = clonal_call_penalty * clonal_call_scale[i];
}
}
int SimpleMultiFitStream::getBeadsPerBlockMultiFit()
{
return _bpb;
}
int SimpleMultiFitStream::getL1SettingMultiFit()
{
if(_l1type < 0 || _l1type > 2){
return l1DefaultSettingMultiFit();
}
return _l1type;
}
int SimpleMultiFitStream::getBeadsPerBlockPartialD()
{
return _bpbPartialD;
}
int SimpleMultiFitStream::getL1SettingPartialD()
{
if(_l1typePartialD < 0 || _l1typePartialD > 2){
return l1DefaultSettingPartialD();
}
return _l1typePartialD;
}
void SimpleMultiFitStream::printStatus()
{
cout << getLogHeader() << " status: " << endl
<< " +------------------------------" << endl
<< " | block size MultiFit: " << getBeadsPerBlockMultiFit() << endl
<< " | l1 setting MultiFit: " << getL1SettingMultiFit() << endl
<< " | block size PartialD: " << getBeadsPerBlockPartialD() << endl
<< " | l1 setting PartialD: " << getL1SettingPartialD() << endl
<< " | state: " << _state << endl;
if(_resource->isSet())
cout << " | streamResource acquired successfully"<< endl;
else
cout << " | streamResource not acquired"<< endl;
_myJob.printJobSummary();
cout << " +------------------------------" << endl;
}
// Static member function
void SimpleMultiFitStream::requestResources(
int global_max_flow_key,
int global_max_flow_block_size,
float deviceFraction
)
{
// We need to check values both with key=0 and key=max_key.
// That way, we cover both extremes.
size_t devAlloc = static_cast<size_t>( deviceFraction *
max( getMaxDeviceMem(global_max_flow_key, global_max_flow_block_size, 0, 0),
getMaxDeviceMem(0, global_max_flow_block_size, 0, 0) ) );
size_t hostAlloc = max( getMaxHostMem(global_max_flow_key, global_max_flow_block_size),
getMaxHostMem(0, global_max_flow_block_size) );
cout << "CUDA: MultiFitStream active and resources requested dev = "<< devAlloc/(1024.0*1024) << "MB ("<< (int)(deviceFraction*100)<<"%) host = " << hostAlloc/(1024.0*1024) << "MB" << endl;
cudaResourcePool::requestDeviceMemory(devAlloc);
cudaResourcePool::requestHostMemory(hostAlloc);
}
size_t SimpleMultiFitStream::getMaxHostMem(int flow_key, int flow_block_size)
{
WorkSet Job( flow_key, flow_block_size );
size_t ret = 0;
ret += sizeof(ConstParams);
ret += Job.getFgBufferSizeShort(true);
ret += Job.getBeadParamsSize(true);
ret += Job.getReusedFgBufferPartialDerivsSize(true);
ret += Job.getCoarseNucRiseSize(true);
ret += Job.getShiftedBackgroundSize(true);
ret += Job.getEmphVecSize(true);
ret += Job.getClonalCallScaleSize(true);
ret += Job.getDarkMatterSize(true);
for (int i=0; i<CUDA_MULTIFLOW_NUM_FIT; ++i)
{
ret += Job.getPartialDerivStepsMaxSize(true);
ret += Job.getJTJMatrixMapMaxSize(true);
ret += Job.getBeadParamIdxMapMaxSize(true);
ret += Job.getFloatPerBead(true);
}
return ret;
}
size_t SimpleMultiFitStream::getMaxDeviceMem(
int flow_key,
int flow_block_size,
int numFrames,
int numBeads,
WorkSet *curJob
)
{
// create default job
WorkSet dummyJob( flow_key, flow_block_size );
// if numFrames/numBeads are passed overwrite the predevined maxFrames/maxBeads
// for the size calculation
if(numFrames >0) dummyJob.setMaxFrames(numFrames);
if(numBeads> 0) dummyJob.setMaxBeads(numBeads);
WorkSet *Job = NULL;
if (curJob)
Job = curJob;
else
Job = &dummyJob;
size_t ret = 0;
ret += Job->getBeadParamsSize(true); // _hdBeadParams
ret += Job->getReusedFgBufferPartialDerivsSize(true); // _hdFgBuffer
ret += Job->getCoarseNucRiseSize(true); // _hdNucRise
ret += Job->getShiftedBackgroundSize(true); // _hdSbg
ret += Job->getEmphVecSize(true); // _hdEmphasis
ret += Job->getClonalCallScaleSize(true); // _hdNon_integer_penalty
ret += Job->getDarkMatterSize(true); // _hdDarkMatterComp
ret += Job->getPartialDerivStepsMaxSize(true); // _DevFitData.Steps
ret += Job->getJTJMatrixMapMaxSize(true); // _D...JTJMatrixMapForDotProductComputation
ret += Job->getBeadParamIdxMapMaxSize(true); // _DevFitData.BeadParamIdxMap
ret += Job->getFloatPerBead(true); // _DevFitData.LambdaForBeadFit
ret += Job->getBeadParamsSize(true); // _dBeadParamsEval
ret += Job->getBeadParamsSize(true); // _dBeadParamsTranspose
ret += Job->getFgBufferSize(true); // _dFgBufferTransposed
ret += Job->getFxB(true); // _dIval
ret += Job->getFxB(true); // _dcratch_ival
ret += Job->getFloatPerBead(true); // _dResidual
ret += Job->getParamMatrixMaxSize(true); // _dJTJ
ret += Job->getParamMatrixMaxSize(true); // _dLTR
ret += Job->getParamRHSMaxSize(true); // _dRHS
#if DEBUG_SIZE
cout << "BP size: " << Job->getBeadParamsSize(true) << endl;
cout << "Fgbuffer: " << Job->getReusedFgBufferPartialDerivsSize(true) << endl;
cout << "Coarse rise: " << Job->getCoarseNucRiseSize(true) << endl;
cout << "Shifted bkg: " << Job->getShiftedBackgroundSize(true) << endl;
cout << "Emp size: " << Job->getEmphVecSize(true) << endl;
cout << "Clonal scale: " << Job->getClonalCallScaleSize(true) << endl;
cout << "Dark matter: " << Job->getDarkMatterSize(true) << endl;
cout << "PartialDerivstepmax: " << Job->getPartialDerivStepsMaxSize(true) << endl;
cout << "jtjmatrixmapsize: " << Job->getJTJMatrixMapMaxSize(true) << endl;
cout << "BeadParamIdmap: " << Job->getBeadParamIdxMapMaxSize(true) << endl;
cout << "Lambda: " << Job->getFloatPerBead(true) << endl;
cout << "BP size: " << Job->getBeadParamsSize(true) << endl;
cout << "BP size: " << Job->getBeadParamsSize(true) << endl;
cout << "Fgbuffer size: " << Job->getFgBufferSize(true) << endl;
cout << "FxB: " << Job->getFxB(true) << endl;
cout << "FxB: " << Job->getFxB(true) << endl;
cout << "Floatperbead: " << Job->getFloatPerBead(true) << endl;
cout << "MatrixMax: " << Job->getParamMatrixMaxSize(true) << endl;
cout << "MatrixMax: " << Job->getParamMatrixMaxSize(true) << endl;
cout << "RHS Max: " << Job->getParamRHSMaxSize(true) << endl;
#endif
return ret;
}
void SimpleMultiFitStream::setBeadsPerBlockMultiF(int bpb)
{
_bpb = bpb;
}
void SimpleMultiFitStream::setL1SettingMultiF(int type) // 0:sm=l1, 1:sm>l1, 2:sm<l1
{
_l1type = type;
}
void SimpleMultiFitStream::setBeadsPerBlockPartialD(int bpb)
{
_bpbPartialD = bpb;
}
void SimpleMultiFitStream::setL1SettingPartialD(int type) // 0:sm=l1, 1:sm>l1, 2:sm<l1
{
_l1typePartialD = type;
}
void SimpleMultiFitStream::printSettings()
{
cout << "CUDA: MultiFitStream SETTINGS: blocksize = " << _bpb << " l1setting = " ;
switch(_l1type){
case 0:
cout << "cudaFuncCachePreferEqual" << endl;;
break;
case 1:
cout << "cudaFuncCachePreferShared" <<endl;
break;
case 2:
cout << "cudaFuncCachePreferL1" << endl;
break;
default:
cout << " GPU specific default" << endl;;
}
cout << "CUDA: PartialDerivative SETTINGS: blocksize = " << _bpbPartialD << " l1setting = ";
switch(_l1typePartialD){
case 0:
cout << "cudaFuncCachePreferEqual" << endl;;
break;
case 1:
cout << "cudaFuncCachePreferShared" <<endl;
break;
case 2:
cout << "cudaFuncCachePreferL1" << endl;
break;
default:
cout << "GPU specific default" << endl;
}
}
void SimpleMultiFitStream::preFitCpuSteps()
{
_myJob.prepareMultiFlowFitMatrixConfig();
_myJob.performPreFitStepsForMultiFitStream();
}
void SimpleMultiFitStream::postFitProcessing()
{
if (_curFitLevel == _fit_training_level[_fitNum]) {
if (_fitNum == 0 && _myJob.performCalcPCADarkMatter())
{
//PCA on CPUi
_myJob.PerformePCA();
// update PCA flag
ConstParams* tmpConstP = _hConstP.getPtr();
tmpConstP->useDarkMatterPCA = _myJob.useDarkMatterPCA();
StreamingKernels::copyFittingConstParamAsync(tmpConstP, getStreamId(),_stream);CUDA_ERROR_CHECK();
//update DarkMatterComp
_hdDarkMatterComp.copyIn(_myJob.getDarkMatter(),_myJob.getDarkMatterSize());
_hdDarkMatterComp.copyToDeviceAsync(_stream, _myJob.getDarkMatterSize());
}
// go to next fit
++_fitNum;
// reset current training level
_curFitLevel = 0;
}
}
|
the_stack
|
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace geo {
/**
* @brief Speciflying parameters for Geo Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
return retval;
}
/**
* @brief Template Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef typename util::Array1D<SizeT, ValueT> ArrayT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// ----------------------------------------------------------------
// Dataslice structure
/**
* @brief Data structure containing problem specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// Device arrays to store latitudes and longitudes
util::Array1D<SizeT, ValueT> latitude;
util::Array1D<SizeT, ValueT> longitude;
// Use for Stop_Condition for a complete Geo run
util::Array1D<SizeT, SizeT> active;
SizeT active_;
// Store inverse of Haversine Distances
util::Array1D<SizeT, ValueT> Dinv;
// Run as many iterations as possible to do a
// complete geolocation -> uses atomics()
bool geo_complete;
// Number of iterations for geolocation app
int geo_iter;
// Number of iterations for a spatial median kernel
int spatial_iter;
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
latitude.SetName("latitude");
longitude.SetName("longitude");
active.SetName("active");
Dinv.SetName("Dinv");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(latitude.Release(target));
GUARD_CU(longitude.Release(target));
GUARD_CU(active.Release(target));
GUARD_CU(Dinv.Release(target));
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing sssp-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus, int gpu_idx,
ProblemFlag flag, bool geo_complete_,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
SizeT nodes = this->sub_graph->nodes;
SizeT edges = this->sub_graph->edges + 1;
printf("Number of nodes for allocation: %u\n", nodes);
geo_complete = geo_complete_;
GUARD_CU(latitude.Allocate(nodes, target));
GUARD_CU(longitude.Allocate(nodes, target));
GUARD_CU(active.Allocate(1, util::HOST | target));
GUARD_CU(Dinv.Allocate(edges, target));
if (target & util::DEVICE) {
GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this->stream));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(ValueT *h_latitude, ValueT *h_longitude, int _geo_iter,
int _spatial_iter, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->sub_graph->nodes;
SizeT edges = this->sub_graph->edges + 1;
// Ensure data are allocated
GUARD_CU(latitude.EnsureSize_(nodes, target));
GUARD_CU(longitude.EnsureSize_(nodes, target));
GUARD_CU(active.EnsureSize_(1, util::HOST | target));
GUARD_CU(Dinv.EnsureSize_(edges, target));
this->geo_iter = _geo_iter;
this->spatial_iter = _spatial_iter;
// Reset data
// Using spatial center we can determine the invalid predicted locations.
GUARD_CU(active.ForAll(
[] __host__ __device__(SizeT * x, const VertexT &pos) { x[pos] = 0; },
1, target, this->stream));
this->active_ = 0;
// Assumes that all vertices have invalid positions, in reality
// a preprocessing step is needed to assign nodes that do have
// positions to have proper positions already.
GUARD_CU(latitude.SetPointer(h_latitude, nodes, util::HOST));
GUARD_CU(latitude.Move(util::HOST, util::DEVICE));
GUARD_CU(longitude.SetPointer(h_longitude, nodes, util::HOST));
GUARD_CU(longitude.Move(util::HOST, util::DEVICE));
return retval;
}
}; // DataSlice
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
bool geo_complete;
// ----------------------------------------------------------------
// Problem Methods
/**
* @brief geolocation default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {
geo_complete = _parameters.Get<bool>("geo-complete");
}
/**
* @brief geolocation default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
...
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(ValueT *h_predicted_lat, ValueT *h_predicted_lon,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->org_graph->nodes;
if (this->num_gpus == 1) {
auto &data_slice = data_slices[0][0];
// Set device
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(
data_slice.latitude.SetPointer(h_predicted_lat, nodes, util::HOST));
GUARD_CU(data_slice.latitude.Move(util::DEVICE, util::HOST));
GUARD_CU(data_slice.longitude.SetPointer(h_predicted_lon, nodes,
util::HOST));
GUARD_CU(data_slice.longitude.Move(util::DEVICE, util::HOST));
} else if (target == util::HOST) {
GUARD_CU(data_slice.latitude.ForEach(
h_predicted_lat,
[] __host__ __device__(const ValueT &device_val, ValueT &host_val) {
host_val = device_val;
},
nodes, util::HOST));
GUARD_CU(data_slice.longitude.ForEach(
h_predicted_lon,
[] __host__ __device__(const ValueT &device_val, ValueT &host_val) {
host_val = device_val;
},
nodes, util::HOST));
}
} else { // num_gpus != 1
// ============ INCOMPLETE TEMPLATE - MULTIGPU ============
// // TODO: extract the results from multiple GPUs, e.g.:
// // util::Array1D<SizeT, ValueT *> th_distances;
// // th_distances.SetName("bfs::Problem::Extract::th_distances");
// // GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST));
// for (int gpu = 0; gpu < this->num_gpus; gpu++)
// {
// auto &data_slice = data_slices[gpu][0];
// if (target == util::DEVICE)
// {
// GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
// // GUARD_CU(data_slice.distances.Move(util::DEVICE,
// util::HOST));
// }
// // th_distances[gpu] = data_slice.distances.GetPointer(util::HOST);
// } //end for(gpu)
// for (VertexT v = 0; v < nodes; v++)
// {
// int gpu = this -> org_graph -> GpT::partition_table[v];
// VertexT v_ = v;
// if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0)
// v_ = this -> org_graph -> GpT::convertion_table[v];
// // h_distances[v] = th_distances[gpu][v_];
// }
// // GUARD_CU(th_distances.Release());
}
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that SSSP processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], this->flag,
this->geo_complete, target));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(ValueT *h_latitude, ValueT *h_longitude, int _geo_iter,
int _spatial_iter, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// Reset data slices
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(h_latitude, h_longitude, _geo_iter,
_spatial_iter, target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
};
} // namespace geo
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#define CUB_STDERR
#include <thrust/device_vector.h>
#include <thrust/distance.h>
#include <thrust/host_vector.h>
#include <thrust/mismatch.h>
#include <thrust/scan.h>
#include <cub/device/device_spmv.cuh>
#include <cub/util_debug.cuh>
#include <iostream>
#include <type_traits>
#include <typeinfo>
#include "test_util.h"
bool g_verbose = false;
//==============================================================================
// Casts char types to int for numeric printing
template <typename T>
T print_cast(T val)
{
return val;
}
int print_cast(char val) { return static_cast<int>(val); }
int print_cast(signed char val) { return static_cast<int>(val); }
int print_cast(unsigned char val) { return static_cast<int>(val); }
//==============================================================================
// Print a vector to out
template <typename VectorT>
void print_vector(std::ostream& out, const VectorT& vec)
{
bool first = true;
for (const auto& val : vec)
{
if (!first)
{
out << ", ";
}
first = false;
out << print_cast(val);
}
}
//==============================================================================
// Simple CSR matrix implementation.
// HostStorage controls whether data is stored on the host or device.
// Use the host_csr_matrix and device_csr_matrix aliases for code clarity.
template <typename ValueT, bool HostStorage>
struct csr_matrix
{
csr_matrix(int num_rows, int num_cols)
: m_row_offsets(static_cast<size_t>(num_rows + 1), 0)
, m_num_rows(num_rows)
, m_num_columns(num_cols)
{}
// host/device conversion constructor
explicit csr_matrix(const csr_matrix<ValueT, !HostStorage>& other)
: m_values(other.m_values)
, m_row_offsets(other.m_row_offsets)
, m_column_indices(other.m_column_indices)
, m_num_rows(other.m_num_rows)
, m_num_columns(other.m_num_columns)
, m_num_nonzeros(other.m_num_nonzeros)
{}
// Note that this must append to the values array. Finish filling each row
// before adding to the next, and each row's columns must be added in order.
// Must call `finalize` once all items are added.
void append_value(int row, int col, ValueT value)
{
++m_num_nonzeros;
++m_row_offsets[row];
m_column_indices.push_back(col);
m_values.push_back(std::move(value));
}
void finalize()
{
thrust::exclusive_scan(m_row_offsets.cbegin(),
m_row_offsets.cend(),
m_row_offsets.begin());
AssertEquals(m_row_offsets.back(), m_num_nonzeros);
}
const ValueT* get_values() const
{
return thrust::raw_pointer_cast(m_values.data());
}
const int* get_row_offsets() const
{
return thrust::raw_pointer_cast(m_row_offsets.data());
}
int get_row_offset(int row) const { return m_row_offsets[row]; }
int get_row_num_nonzero(int row) const
{
return m_row_offsets[row + 1] - m_row_offsets[row];
}
const int* get_column_indices() const
{
return thrust::raw_pointer_cast(m_column_indices.data());
}
int get_num_rows() const { return m_num_rows; }
int get_num_columns() const { return m_num_columns; }
int get_num_nonzeros() const { return m_num_nonzeros; }
void print_internals(std::ostream& out) const
{
out << (HostStorage ? "host" : "device") << "_csr_matrix"
<< "(" << m_num_rows << ", " << m_num_columns << ")\n"
<< " - num_elems: " << (m_num_rows * m_num_columns) << "\n"
<< " - num_nonzero: " << m_num_nonzeros << "\n"
<< " - row_offsets:\n [";
print_vector(out, m_row_offsets);
out << "]\n"
<< " - column_indices:\n [";
print_vector(out, m_column_indices);
out << "]\n"
<< " - values:\n [";
print_vector(out, m_values);
out << "]\n";
}
void print_summary(std::ostream& out) const
{
const int num_elems = m_num_rows * m_num_columns;
const float fill_ratio =
num_elems == 0
? 0.f
: (static_cast<float>(m_num_nonzeros) / static_cast<float>(num_elems));
out << m_num_rows << "x" << m_num_columns << ", " << m_num_nonzeros << "/"
<< num_elems << " (" << fill_ratio << ")\n";
}
friend class csr_matrix<ValueT, !HostStorage>;
private:
template <typename VecValueT>
using vector_t =
typename std::conditional<HostStorage,
thrust::host_vector<VecValueT>,
thrust::device_vector<VecValueT>>::type;
vector_t<ValueT> m_values;
vector_t<int> m_row_offsets;
vector_t<int> m_column_indices;
int m_num_rows{0};
int m_num_columns{0};
int m_num_nonzeros{0};
};
//==============================================================================
// Convenience aliases for host/device csr_matrix types.
template <typename ValueT>
using host_csr_matrix = csr_matrix<ValueT, true>;
template <typename ValueT>
using device_csr_matrix = csr_matrix<ValueT, false>;
//==============================================================================
// Compare two floats within a tolerance.
// This mimics the approach used by Thrust's ASSERT_ALMOST_EQUAL checks.
template <typename ValueT>
struct fp_almost_equal_functor
{
__host__ __device__ bool operator()(ValueT v1, ValueT v2) const
{
constexpr double r_tol = 1e-3;
constexpr double a_tol = 1e-2;
const double limit = r_tol * (std::fabs(v1) + std::fabs(v2)) + a_tol;
return std::fabs(v1 - v2) <= limit;
}
};
//==============================================================================
// Compare the reference and cub output vectors.
// Use fuzzy check for floating point values.
template <typename ValueT>
bool compare_results(std::true_type /* is_fp */,
const thrust::host_vector<ValueT>& h_vec1,
const thrust::device_vector<ValueT>& d_vec2)
{
thrust::device_vector<ValueT> d_vec1(h_vec1);
auto err = thrust::mismatch(d_vec1.cbegin(),
d_vec1.cend(),
d_vec2.cbegin(),
fp_almost_equal_functor<ValueT>{});
if (err.first == d_vec1.cend() || err.second == d_vec2.cend())
{
return true;
}
else
{
thrust::host_vector<ValueT> h_vec2(d_vec2);
const auto idx = thrust::distance(d_vec1.cbegin(), err.first);
std::cerr << "Mismatch at position " << idx << ": "
<< print_cast(ValueT{h_vec1[idx]}) << " vs "
<< print_cast(ValueT{h_vec2[idx]}) << std::endl;
return false;
}
};
template <typename ValueT>
bool compare_results(std::false_type /* is_fp */,
const thrust::host_vector<ValueT>& h_vec1,
const thrust::device_vector<ValueT>& d_vec2)
{
thrust::device_vector<ValueT> d_vec1(h_vec1);
auto err = thrust::mismatch(d_vec1.cbegin(), d_vec1.cend(), d_vec2.cbegin());
if (err.first == d_vec1.cend() || err.second == d_vec2.cend())
{
return true;
}
else
{
thrust::host_vector<ValueT> h_vec2(d_vec2);
const auto idx = thrust::distance(d_vec1.cbegin(), err.first);
std::cerr << "Mismatch at position " << idx << ": "
<< print_cast(ValueT{h_vec1[idx]}) << " vs "
<< print_cast(ValueT{h_vec2[idx]}) << std::endl;
return false;
}
}
//==============================================================================
// Generate a random host_csr_matrix<ValueT> with the specified dimensions.
// target_fill_ratio is the target fraction of non-zero elements (may be more
// or less in the output).
template <typename ValueT>
host_csr_matrix<ValueT> make_random_csr_matrix(int num_rows,
int num_cols,
float target_fill_ratio)
{
host_csr_matrix<ValueT> mat{num_rows, num_cols};
for (int row = 0; row < num_rows; ++row)
{
for (int col = 0; col < num_cols; ++col)
{
const bool is_non_zero = RandomValue<float>(1.f) < target_fill_ratio;
if (!is_non_zero)
{
continue;
}
if (std::is_floating_point<ValueT>::value)
{
// Keep fp numbers somewhat small, from -100 -> 100; otherwise we run
// into issues with nans/infs
ValueT value =
(RandomValue(static_cast<ValueT>(200)) - static_cast<ValueT>(100));
mat.append_value(row, col, value);
}
else
{
ValueT value{};
InitValue(RANDOM, value);
mat.append_value(row, col, value);
}
}
}
mat.finalize();
const int num_elements = num_rows * num_cols;
const float actual_fill_ratio = static_cast<float>(mat.get_num_nonzeros()) /
static_cast<float>(num_elements);
if (g_verbose)
{
printf("Created host_csr_matrix<%s>(%d, %d)\n"
" - NumElements: %d\n"
" - NumNonZero: %d\n"
" - Target fill: %0.2f%%\n"
" - Actual fill: %0.2f%%\n",
typeid(ValueT).name(),
num_rows,
num_cols,
num_elements,
mat.get_num_nonzeros(),
target_fill_ratio,
actual_fill_ratio);
}
return mat;
}
//==============================================================================
// Fill a vector with random values.
template <typename ValueT>
thrust::host_vector<ValueT> make_random_vector(int len)
{
thrust::host_vector<ValueT> vec(len);
for (auto& val : vec)
{
if (std::is_floating_point<ValueT>::value)
{ // Keep fp numbers somewhat small; otherwise we run into issues with
// nans/infs
val = RandomValue(static_cast<ValueT>(200)) - static_cast<ValueT>(100);
}
else
{
InitValue(RANDOM, val);
}
}
return vec;
}
//==============================================================================
// Serial y = Ax computation
template <typename ValueT>
void compute_reference_solution(const host_csr_matrix<ValueT>& a,
const thrust::host_vector<ValueT>& x,
thrust::host_vector<ValueT>& y)
{
if (a.get_num_rows() == 0 || a.get_num_columns() == 0)
{
return;
}
for (int row = 0; row < a.get_num_rows(); ++row)
{
const int row_offset = a.get_row_offset(row);
const int row_length = a.get_row_num_nonzero(row);
const int* cols = a.get_column_indices() + row_offset;
const int* cols_end = cols + row_length;
const ValueT* values = a.get_values() + row_offset;
ValueT accum{};
while (cols < cols_end)
{
accum += (*values++) * x[*cols++];
}
y[row] = accum;
}
}
//==============================================================================
// cub::DeviceSpmv::CsrMV y = Ax computation
template <typename ValueT>
void compute_cub_solution(const device_csr_matrix<ValueT>& a,
const thrust::device_vector<ValueT>& x,
thrust::device_vector<ValueT>& y)
{
thrust::device_vector<char> temp_storage;
std::size_t temp_storage_bytes{};
auto err = cub::DeviceSpmv::CsrMV(nullptr,
temp_storage_bytes,
a.get_values(),
a.get_row_offsets(),
a.get_column_indices(),
thrust::raw_pointer_cast(x.data()),
thrust::raw_pointer_cast(y.data()),
a.get_num_rows(),
a.get_num_columns(),
a.get_num_nonzeros());
CubDebugExit(err);
temp_storage.resize(temp_storage_bytes);
err = cub::DeviceSpmv::CsrMV(thrust::raw_pointer_cast(temp_storage.data()),
temp_storage_bytes,
a.get_values(),
a.get_row_offsets(),
a.get_column_indices(),
thrust::raw_pointer_cast(x.data()),
thrust::raw_pointer_cast(y.data()),
a.get_num_rows(),
a.get_num_columns(),
a.get_num_nonzeros(),
0,
true);
CubDebugExit(err);
}
//==============================================================================
// Compute y = Ax twice, one reference and one cub::DeviceSpmv, and compare the
// results.
template <typename ValueT>
void test_spmv(const host_csr_matrix<ValueT>& h_a,
const thrust::host_vector<ValueT>& h_x)
{
if (g_verbose)
{
std::cout << "Testing cub::DeviceSpmv on inputs:\n";
h_a.print_internals(std::cout);
std::cout << "x vector:\n [";
print_vector(std::cout, h_x);
std::cout << "]" << std::endl;
}
else
{
h_a.print_summary(std::cout);
}
const device_csr_matrix<ValueT> d_a(h_a);
const thrust::device_vector<ValueT> d_x(h_x);
thrust::host_vector<ValueT> h_y(h_a.get_num_rows());
thrust::device_vector<ValueT> d_y(d_a.get_num_rows());
compute_reference_solution(h_a, h_x, h_y);
compute_cub_solution(d_a, d_x, d_y);
if (g_verbose)
{
std::cout << "reference output:\n [";
print_vector(std::cout, h_y);
std::cout << "]\n";
thrust::host_vector<ValueT> tmp_y(d_y);
std::cout << "cub::DeviceSpmv output:\n [";
print_vector(std::cout, tmp_y);
std::cout << "]" << std::endl;
}
constexpr auto is_fp = std::is_floating_point<ValueT>{};
AssertTrue(compare_results(is_fp, h_y, d_y));
}
//==============================================================================
// Test example from cub::DeviceSpmv documentation
template <typename ValueT>
void test_doc_example()
{
std::cout << "\n\ntest_doc_example<" << typeid(ValueT).name() << ">()"
<< std::endl;
host_csr_matrix<ValueT> h_a(9, 9);
h_a.append_value(0, 1, ValueT{1});
h_a.append_value(0, 3, ValueT{1});
h_a.append_value(1, 0, ValueT{1});
h_a.append_value(1, 2, ValueT{1});
h_a.append_value(1, 4, ValueT{1});
h_a.append_value(2, 1, ValueT{1});
h_a.append_value(2, 5, ValueT{1});
h_a.append_value(3, 0, ValueT{1});
h_a.append_value(3, 4, ValueT{1});
h_a.append_value(3, 6, ValueT{1});
h_a.append_value(4, 1, ValueT{1});
h_a.append_value(4, 3, ValueT{1});
h_a.append_value(4, 5, ValueT{1});
h_a.append_value(4, 7, ValueT{1});
h_a.append_value(5, 2, ValueT{1});
h_a.append_value(5, 4, ValueT{1});
h_a.append_value(5, 8, ValueT{1});
h_a.append_value(6, 3, ValueT{1});
h_a.append_value(6, 7, ValueT{1});
h_a.append_value(7, 4, ValueT{1});
h_a.append_value(7, 6, ValueT{1});
h_a.append_value(7, 8, ValueT{1});
h_a.append_value(8, 5, ValueT{1});
h_a.append_value(8, 7, ValueT{1});
h_a.finalize();
thrust::host_vector<ValueT> h_x(9, ValueT{1});
test_spmv(h_a, h_x);
}
//==============================================================================
// Generate and test a random SpMV operation with the given parameters.
template <typename ValueT>
void test_random(int rows, int cols, float target_fill_ratio)
{
std::cout << "\n\ntest_random<" << typeid(ValueT).name() << ">(" << rows
<< ", " << cols << ", " << target_fill_ratio << ")" << std::endl;
host_csr_matrix<ValueT> h_a =
make_random_csr_matrix<ValueT>(rows, cols, target_fill_ratio);
thrust::host_vector<ValueT> h_x = make_random_vector<ValueT>(cols);
test_spmv(h_a, h_x);
}
//==============================================================================
// Dispatch many random SpMV tests over a variety of parameters.
template <typename ValueT>
void test_random()
{
test_random<ValueT>(0, 0, 1.f);
test_random<ValueT>(0, 1, 1.f);
test_random<ValueT>(1, 0, 1.f);
const int dim_min = 1;
const int dim_max = 10000;
const int max_num_elems = 100000;
const float ratio_min = 0.f;
const float ratio_max = 1.1f; // a lil over to account for fp errors
const float ratio_step = 0.3334f;
for (int rows = dim_min; rows < dim_max; rows <<= 1)
{
for (int cols = dim_min; cols < dim_max; cols <<= 1)
{
if (rows * cols >= max_num_elems)
{
continue;
}
for (float ratio = ratio_min; ratio < ratio_max; ratio += ratio_step)
{
test_random<ValueT>(rows, cols, ratio);
// Test nearby non-power-of-two dims:
test_random<ValueT>(rows + 97, cols + 83, ratio);
}
}
}
}
//==============================================================================
// Dispatch many SpMV tests for a given ValueT.
template <typename ValueT>
void test_type()
{
test_doc_example<ValueT>();
test_random<ValueT>();
}
//==============================================================================
// Dispatch many SpMV tests over a variety of types.
void test_types()
{
test_type<float>();
test_type<double>();
test_type<char>();
test_type<int>();
test_type<unsigned long long>();
}
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] verbose"
"\n",
argv[0]);
exit(0);
}
CubDebugExit(args.DeviceInit());
test_types();
}
|
the_stack
|
#pragma once
extern "C"
{
__device__ unsigned int debugCounters[1024];
}
#include <math/vector.h>
#include <math/matrix.h>
#include "triangle_buffer.cuh"
#include "index_queue.cuh"
#include "bin_raster.cuh"
#include "tile_raster.cuh"
#include "bitmask.cuh"
#include "config.h"
#include "utils.cuh"
#include "viewport.cuh"
#include "utils/work_assignment.cuh"
#include "buffer_definitions.cuh"
template <unsigned int NUM_WARPS, class RasterizerSpace, class CoverageShader, class FragmentShader, class FrameBuffer>
class RasterizationStage
{
private:
typedef typename ::FragmentShaderInputSignature<decltype(FragmentShader::shade)>::type FragmentShaderInputSignature;
static_assert(NUM_WARPS <= WARP_SIZE, "NUM_WARPS must be smaller equal WARP_SIZE: Rasterization stage work assignment depends on decissions made in a single warp");
static constexpr int NUM_THREADS = NUM_WARPS*WARP_SIZE;
typedef ::BinRasterizer<NUM_WARPS, RasterizerSpace> BinRasterizer;
typedef ::TileRasterizer<NUM_WARPS, RasterizerSpace, CoverageShader, FragmentShader, FrameBuffer> TileRasterizer;
typedef ::BlockWorkAssignment<NUM_THREADS> BlockWorkAssignment;
typedef ::TileBitMask<RasterizerSpace> TileBitMask;
struct BinTrianglePack
{
static constexpr unsigned int TRI_BITS = 10U;
static constexpr unsigned int BIN_COORD_BITS = 11U;
static constexpr unsigned int TRI_OFFSET = 2U * BIN_COORD_BITS;
static constexpr unsigned int BIN_X_OFFSET = 0;
static constexpr unsigned int BIN_Y_OFFSET = BIN_COORD_BITS;
static constexpr unsigned int BIN_MASK = (1U << BIN_COORD_BITS) - 1U;
static constexpr unsigned int COMBBIN_MASK = (1U << (2*BIN_COORD_BITS))-1U;
unsigned int value;
public:
__device__
BinTrianglePack(unsigned int localTriangleId, unsigned int bin_x, unsigned int bin_y)
: value((localTriangleId << TRI_OFFSET) | (bin_x << BIN_X_OFFSET) | (bin_y << BIN_Y_OFFSET))
{ }
__device__ unsigned int triId() const { return value >> TRI_OFFSET; }
__device__ unsigned int binX() const { return (value >> BIN_X_OFFSET) & BIN_MASK; }
__device__ unsigned int binY() const { return (value >> BIN_Y_OFFSET) & BIN_MASK; }
__device__ unsigned int combbin() const { return value & COMBBIN_MASK; }
};
public:
static constexpr size_t SHARED_MEMORY = sizeof(unsigned int)*NUM_THREADS
+ 2 * BlockWorkAssignment::SHARED_MEMORY
+ sizeof(TileBitMask)* NUM_THREADS
+ sizeof(BinTrianglePack) * NUM_THREADS
+ NUM_THREADS*sizeof(ushort2)
+ static_max<BlockWorkAssignment::SHARED_TEMP_MEMORY, BinRasterizer::SHARED_MEMORY, TileRasterizer::SHARED_MEMORY>::value;
__device__
static unsigned int enqueueTriangle(unsigned int triangle_id, const math::int4& bounds)
{
int2 start_bin = RasterizerSpace::bin(bounds.x, bounds.y);
int2 end_bin = RasterizerSpace::bin(bounds.z, bounds.w);
return RasterizerSpace::traverseRasterizers(start_bin, end_bin, [triangle_id](int r)
{
rasterizer_queue[r].enqueue(triangle_id);
});
}
__device__
static bool sufficientToRun(char* shared_memory)
{
int* num = reinterpret_cast<int*>(shared_memory + SHARED_MEMORY - 3 * sizeof(int));
if (threadIdx.x == 0)
*num = rasterizer_queue[RasterizerSpace::MyQueue()].size();
__syncthreads();
return *num >= NUM_THREADS;
}
__device__
static bool run(char* shared_memory)
{
unsigned int* tri_ids = reinterpret_cast<unsigned int*>(shared_memory);
char* bin_work_assignment_shared = shared_memory + sizeof(unsigned int)*NUM_THREADS;
char* tile_work_assignment_shared = bin_work_assignment_shared + BlockWorkAssignment::SHARED_MEMORY;
char* c_tile_bit_masks = tile_work_assignment_shared + BlockWorkAssignment::SHARED_MEMORY;
TileBitMask* tile_bit_masks = reinterpret_cast<TileBitMask*>(c_tile_bit_masks);
char* c_bin_triangle_pack = c_tile_bit_masks + sizeof(TileBitMask)* NUM_THREADS;
BinTrianglePack* bin_triangle_pack = reinterpret_cast<BinTrianglePack*>(c_bin_triangle_pack);
char* warp_work_assignment = c_bin_triangle_pack + sizeof(BinTrianglePack)* NUM_THREADS;
char* shared_temp = warp_work_assignment + sizeof(ushort2)* NUM_THREADS;
char* tile_raster_shared = shared_temp;
unsigned int triidin = 0xFFFFFFFFU;
int num_tris = rasterizer_queue[RasterizerSpace::MyQueue()].dequeueBlock(&triidin, NUM_THREADS);
tri_ids[threadIdx.x] = triidin;
if (num_tris > 0)
{
int num_bins = 0;
if (threadIdx.x < num_tris)
{
// compute num elements
math::int4 bounds = triangle_buffer.loadBounds(triidin);
int2 start_bin = RasterizerSpace::bin(bounds.x, bounds.y);
int2 end_bin = RasterizerSpace::bin(bounds.z, bounds.w);
num_bins = RasterizerSpace::numHitBinsForMyRasterizer(start_bin, end_bin);
}
BlockWorkAssignment::prepare(bin_work_assignment_shared, shared_temp, num_bins);
__syncthreads();
do
{
// process bin of triangle
int triangle, bin;
int num_tiles = 0;
if (BlockWorkAssignment::pullWorkThreads(bin_work_assignment_shared, shared_temp, triangle, bin))
{
//if (blockIdx.x == 2)
// printf("%d got %d %d\n", threadIdx.x, triangle, bin);
int triangleId = tri_ids[triangle];
math::int4 bounds = triangle_buffer.loadBounds(triangleId);
int2 start_bin = RasterizerSpace::bin(bounds.x, bounds.y);
int2 end_bin = RasterizerSpace::bin(bounds.z, bounds.w);
int2 binid = RasterizerSpace::getHitBinForMyRasterizer(bin, start_bin, end_bin);
// store meta information
bin_triangle_pack[threadIdx.x] = BinTrianglePack(triangle, binid.x, binid.y);
num_tiles = BinRasterizer::run(shared_temp, tile_bit_masks, triangleId, binid);
}
__syncthreads();
// assign tiles
BlockWorkAssignment::prepare(tile_work_assignment_shared, shared_temp, num_tiles);
__syncthreads();
do
{
int wip = threadIdx.x / WARP_SIZE;
ushort2 *warpdata = reinterpret_cast<ushort2*>(warp_work_assignment);
// process tile of triangle
BlockWorkAssignment::pullWorkSelectiveThreads(tile_work_assignment_shared, shared_temp,
[&tile_raster_shared, &tri_ids, &warpdata, &tile_bit_masks, &bin_triangle_pack, wip](int* count, int* sum_count, int2 threadWork, bool haswork)->bool
{
// write work assignment to shared
// TODO: make sure different warps are not working on the same tile in parallel!
//// one warp makes sure that we do not end up with different triangles for the same tile
//if (threadIdx.x < 32)
//{
// unsigned int mytri = bin_triangle_pack[threadWork.x].triId();
// unsigned int combbin = bin_triangle_pack[threadWork.x].combbin();
// bool canwork = true;
// #pragma unroll
// for (int i = 0; i < 32; ++i)
// {
// unsigned int vtre = __shfl_sync(~0U, mytri, i);
// unsigned int vcombbin = __shfl_sync(~0U, combbin, i);
// TileBitMask vBinmask = tile_bit_masks[threadWork.x].shfl(i);
// if (threadIdx.x > i && vtre != mytri && vcombbin == combbin && vBinmask.overlap(tile_bit_masks[threadWork.x]))
// canwork = false;
// }
// unsigned int workmask = __ballot_sync(~0U, canwork);
// unsigned int numwork = min(NUM_WARPS, __popc(workmask));
// int myworkoffset = __popc(workmask & lanemask_lt());
// if (canwork && myworkoffset < numwork)
// {
// warpdata[myworkoffset] = make_int2(threadWork.x, count[threadWork.x] - threadWork.y - 1);
// if (__shfl_down_sync(~0U, threadWork.x, 1) != threadWork.x || (myworkoffset + 1 == numwork))
// {
// count[threadWork.x] = max(0, count[threadWork.x] - threadWork.y - 1);
// }
// }
// if (threadIdx.x >= numwork && threadIdx.x < NUM_WARPS)
// warpdata[threadIdx.x] = make_int2(0, -1);
//}
warpdata[threadIdx.x] = make_ushort2(threadWork.x, threadWork.y >= 0 ? threadWork.y : 0xFFFFU);
__syncthreads();
#pragma unroll
for (int i = 0; i < NUM_THREADS; i += NUM_WARPS)
{
uint2 tw = make_uint2(warpdata[i + wip].x, warpdata[i + wip].y);
if (tw.y != 0xFFFFU)
{
int tileid = tile_bit_masks[tw.x].getSetBitWarp(tw.y);
TileRasterizer::run(tile_raster_shared, tileid,
tri_ids[bin_triangle_pack[tw.x].triId()],
bin_triangle_pack[tw.x].binX(), bin_triangle_pack[tw.x].binY());
}
}
__syncthreads();
//// for now just take next best tile and reduce count
//if (threadIdx.x < NUM_WARPS)
// warpdata[threadIdx.x] = threadWork;
// //warpdata[threadIdx.x] = make_int2(threadWork.y>=0?threadWork.x:-1, tile_bit_masks[threadWork.x].getSetBit(threadWork.y));
//count[threadIdx.x] = max(0, min(count[threadIdx.x], sum_count[threadIdx.x] - static_cast<int>(NUM_WARPS)));
count[threadIdx.x] = max(0, min(count[threadIdx.x], sum_count[threadIdx.x] - static_cast<int>(NUM_THREADS)));
return true;
}, true);
} while (BlockWorkAssignment::isWorkAvailable(tile_work_assignment_shared));
} while (BlockWorkAssignment::isWorkAvailable(bin_work_assignment_shared));
//////////////////////////////////////////////////////////////////////////////
//// vis bounding box
//__syncthreads();
//int wip = threadIdx.x / WARP_SIZE;
//for (int i = wip; i < num_tris; i += NUM_WARPS)
//{
// math::int4 bounds = triangle_buffer.loadBounds(tri_ids[i]);
// int2 start_bin = RasterizerSpace::bin(bounds.x, bounds.y);
// for (int x = bounds.x + laneid(); x < bounds.z; x += warpSize)
// {
// FrameBuffer::writeColor(x, bounds.y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(x, bounds.w, make_uchar4(255, 255, 255, 255));
// }
// for (int y = bounds.y + laneid(); y < bounds.w; y += warpSize)
// {
// FrameBuffer::writeColor(bounds.x, y, make_uchar4(255, 255, 255, 255));
// FrameBuffer::writeColor(bounds.z, y, make_uchar4(255, 255, 255, 255));
// }
//}
////////////////////////////////////////////////////////////////////////////////
__threadfence();
if (tri_ids[threadIdx.x] != 0xFFFFFFFFU)
{
triangle_buffer.release(tri_ids[threadIdx.x]);
}
return true;
}
return false;
}
};
#endif
|
the_stack
|
#include <nvbench/axes_metadata.cuh>
#include <nvbench/axis_base.cuh>
#include <nvbench/benchmark.cuh>
#include <nvbench/callable.cuh>
#include "test_asserts.cuh"
#include <fmt/format.h>
// Mock up a benchmark for testing:
void dummy_generator(nvbench::state &) {}
NVBENCH_DEFINE_CALLABLE(dummy_generator, dummy_callable);
using dummy_bench = nvbench::benchmark<dummy_callable>;
using floats = nvbench::type_list<nvbench::float32_t, nvbench::float64_t>;
using ints = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>;
using misc = nvbench::type_list<void, bool>;
using type_axes = nvbench::type_list<floats, ints, misc>;
template <typename F, typename I, typename M>
void template_generator(nvbench::state &, nvbench::type_list<F, I, M>){};
NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_generator, template_callable);
using template_bench = nvbench::benchmark<template_callable, type_axes>;
void test_empty()
{
// no axes = one state
nvbench::detail::state_iterator sg;
ASSERT(sg.get_number_of_states() == 1);
sg.init();
ASSERT(sg.iter_valid());
sg.next();
ASSERT(!sg.iter_valid());
}
void test_single_state()
{
// one single-value axis = one state
nvbench::detail::state_iterator sg;
sg.add_axis("OnlyAxis", nvbench::axis_type::string, 1);
ASSERT(sg.get_number_of_states() == 1);
sg.init();
ASSERT(sg.iter_valid());
ASSERT(sg.get_current_indices().size() == 1);
ASSERT(sg.get_current_indices()[0].axis == "OnlyAxis");
ASSERT(sg.get_current_indices()[0].index == 0);
ASSERT(sg.get_current_indices()[0].size == 1);
ASSERT(sg.get_current_indices()[0].type == nvbench::axis_type::string);
sg.next();
ASSERT(!sg.iter_valid());
}
void test_basic()
{
nvbench::detail::state_iterator sg;
sg.add_axis("Axis1", nvbench::axis_type::string, 2);
sg.add_axis("Axis2", nvbench::axis_type::string, 3);
sg.add_axis("Axis3", nvbench::axis_type::string, 3);
sg.add_axis("Axis4", nvbench::axis_type::string, 2);
ASSERT_MSG(sg.get_number_of_states() == (2 * 3 * 3 * 2),
"Actual: {} Expected: {}",
sg.get_number_of_states(),
2 * 3 * 3 * 2);
fmt::memory_buffer buffer;
fmt::memory_buffer line;
std::size_t line_num{0};
for (sg.init(); sg.iter_valid(); sg.next())
{
line.clear();
fmt::format_to(line, "| {:^2}", line_num++);
for (auto &axis_index : sg.get_current_indices())
{
ASSERT(axis_index.type == nvbench::axis_type::string);
fmt::format_to(line,
" | {}: {}/{}",
axis_index.axis,
axis_index.index,
axis_index.size);
}
fmt::format_to(buffer, "{} |\n", fmt::to_string(line));
}
const std::string ref =
R"expected(| 0 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 0/3 | Axis4: 0/2 |
| 1 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 0/3 | Axis4: 0/2 |
| 2 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 0/3 | Axis4: 0/2 |
| 3 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 0/3 | Axis4: 0/2 |
| 4 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 0/3 | Axis4: 0/2 |
| 5 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 0/3 | Axis4: 0/2 |
| 6 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 1/3 | Axis4: 0/2 |
| 7 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 1/3 | Axis4: 0/2 |
| 8 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 1/3 | Axis4: 0/2 |
| 9 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 1/3 | Axis4: 0/2 |
| 10 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 1/3 | Axis4: 0/2 |
| 11 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 1/3 | Axis4: 0/2 |
| 12 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 2/3 | Axis4: 0/2 |
| 13 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 2/3 | Axis4: 0/2 |
| 14 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 2/3 | Axis4: 0/2 |
| 15 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 2/3 | Axis4: 0/2 |
| 16 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 2/3 | Axis4: 0/2 |
| 17 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 2/3 | Axis4: 0/2 |
| 18 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 0/3 | Axis4: 1/2 |
| 19 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 0/3 | Axis4: 1/2 |
| 20 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 0/3 | Axis4: 1/2 |
| 21 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 0/3 | Axis4: 1/2 |
| 22 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 0/3 | Axis4: 1/2 |
| 23 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 0/3 | Axis4: 1/2 |
| 24 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 1/3 | Axis4: 1/2 |
| 25 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 1/3 | Axis4: 1/2 |
| 26 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 1/3 | Axis4: 1/2 |
| 27 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 1/3 | Axis4: 1/2 |
| 28 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 1/3 | Axis4: 1/2 |
| 29 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 1/3 | Axis4: 1/2 |
| 30 | Axis1: 0/2 | Axis2: 0/3 | Axis3: 2/3 | Axis4: 1/2 |
| 31 | Axis1: 1/2 | Axis2: 0/3 | Axis3: 2/3 | Axis4: 1/2 |
| 32 | Axis1: 0/2 | Axis2: 1/3 | Axis3: 2/3 | Axis4: 1/2 |
| 33 | Axis1: 1/2 | Axis2: 1/3 | Axis3: 2/3 | Axis4: 1/2 |
| 34 | Axis1: 0/2 | Axis2: 2/3 | Axis3: 2/3 | Axis4: 1/2 |
| 35 | Axis1: 1/2 | Axis2: 2/3 | Axis3: 2/3 | Axis4: 1/2 |
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
void test_create()
{
dummy_bench bench;
bench.set_devices(std::vector<int>{});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
// 2 (Radians) * 3 (VecSize) * 3 (NumInputs) * 2 (Strategy) = 36
ASSERT(states.size() == 36);
fmt::memory_buffer buffer;
const std::string table_format =
"| {:^5} | {:^10} | {:^7} | {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
table_format,
"State",
"TypeConfig",
"Radians",
"VecSize",
"NumInputs",
"Strategy");
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
table_format,
config++,
state.get_type_config_index(),
state.get_float64("Radians"),
state.get_int64("VecSize"),
state.get_int64("NumInputs"),
state.get_string("Strategy"));
}
const std::string ref =
R"expected(
| State | TypeConfig | Radians | VecSize | NumInputs | Strategy |
| 0 | 0 | 3.14 | 2 | 1024 | Recursive |
| 1 | 0 | 6.28 | 2 | 1024 | Recursive |
| 2 | 0 | 3.14 | 3 | 1024 | Recursive |
| 3 | 0 | 6.28 | 3 | 1024 | Recursive |
| 4 | 0 | 3.14 | 4 | 1024 | Recursive |
| 5 | 0 | 6.28 | 4 | 1024 | Recursive |
| 6 | 0 | 3.14 | 2 | 32768 | Recursive |
| 7 | 0 | 6.28 | 2 | 32768 | Recursive |
| 8 | 0 | 3.14 | 3 | 32768 | Recursive |
| 9 | 0 | 6.28 | 3 | 32768 | Recursive |
| 10 | 0 | 3.14 | 4 | 32768 | Recursive |
| 11 | 0 | 6.28 | 4 | 32768 | Recursive |
| 12 | 0 | 3.14 | 2 | 1048576 | Recursive |
| 13 | 0 | 6.28 | 2 | 1048576 | Recursive |
| 14 | 0 | 3.14 | 3 | 1048576 | Recursive |
| 15 | 0 | 6.28 | 3 | 1048576 | Recursive |
| 16 | 0 | 3.14 | 4 | 1048576 | Recursive |
| 17 | 0 | 6.28 | 4 | 1048576 | Recursive |
| 18 | 0 | 3.14 | 2 | 1024 | Iterative |
| 19 | 0 | 6.28 | 2 | 1024 | Iterative |
| 20 | 0 | 3.14 | 3 | 1024 | Iterative |
| 21 | 0 | 6.28 | 3 | 1024 | Iterative |
| 22 | 0 | 3.14 | 4 | 1024 | Iterative |
| 23 | 0 | 6.28 | 4 | 1024 | Iterative |
| 24 | 0 | 3.14 | 2 | 32768 | Iterative |
| 25 | 0 | 6.28 | 2 | 32768 | Iterative |
| 26 | 0 | 3.14 | 3 | 32768 | Iterative |
| 27 | 0 | 6.28 | 3 | 32768 | Iterative |
| 28 | 0 | 3.14 | 4 | 32768 | Iterative |
| 29 | 0 | 6.28 | 4 | 32768 | Iterative |
| 30 | 0 | 3.14 | 2 | 1048576 | Iterative |
| 31 | 0 | 6.28 | 2 | 1048576 | Iterative |
| 32 | 0 | 3.14 | 3 | 1048576 | Iterative |
| 33 | 0 | 6.28 | 3 | 1048576 | Iterative |
| 34 | 0 | 3.14 | 4 | 1048576 | Iterative |
| 35 | 0 | 6.28 | 4 | 1048576 | Iterative |
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
void test_create_with_types()
{
template_bench bench;
bench.set_devices(std::vector<int>{});
bench.set_type_axes_names({"Floats", "Ints", "Misc"});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
// - 2 (Floats) * 2 (Ints) * 2 (Misc) = 8 total type_configs
// - 2 (Radians) * 3 (VecSize) * 3 (NumInputs) * 2 (Strategy) = 36 non_type
// configs
ASSERT(states.size() == 8 * 36);
fmt::memory_buffer buffer;
std::string table_format = "| {:^5} | {:^10} | {:^6} | {:^4} | {:^4} | {:^7} "
"| {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
table_format,
"State",
"TypeConfig",
"Floats",
"Ints",
"Misc",
"Radians",
"VecSize",
"NumInputs",
"Strategy");
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
table_format,
config++,
state.get_type_config_index(),
state.get_string("Floats"),
state.get_string("Ints"),
state.get_string("Misc"),
state.get_float64("Radians"),
state.get_int64("VecSize"),
state.get_int64("NumInputs"),
state.get_string("Strategy"));
}
const std::string ref =
R"expected(
| State | TypeConfig | Floats | Ints | Misc | Radians | VecSize | NumInputs | Strategy |
| 0 | 0 | F32 | I32 | void | 3.14 | 2 | 1024 | Recursive |
| 1 | 0 | F32 | I32 | void | 6.28 | 2 | 1024 | Recursive |
| 2 | 0 | F32 | I32 | void | 3.14 | 3 | 1024 | Recursive |
| 3 | 0 | F32 | I32 | void | 6.28 | 3 | 1024 | Recursive |
| 4 | 0 | F32 | I32 | void | 3.14 | 4 | 1024 | Recursive |
| 5 | 0 | F32 | I32 | void | 6.28 | 4 | 1024 | Recursive |
| 6 | 0 | F32 | I32 | void | 3.14 | 2 | 32768 | Recursive |
| 7 | 0 | F32 | I32 | void | 6.28 | 2 | 32768 | Recursive |
| 8 | 0 | F32 | I32 | void | 3.14 | 3 | 32768 | Recursive |
| 9 | 0 | F32 | I32 | void | 6.28 | 3 | 32768 | Recursive |
| 10 | 0 | F32 | I32 | void | 3.14 | 4 | 32768 | Recursive |
| 11 | 0 | F32 | I32 | void | 6.28 | 4 | 32768 | Recursive |
| 12 | 0 | F32 | I32 | void | 3.14 | 2 | 1048576 | Recursive |
| 13 | 0 | F32 | I32 | void | 6.28 | 2 | 1048576 | Recursive |
| 14 | 0 | F32 | I32 | void | 3.14 | 3 | 1048576 | Recursive |
| 15 | 0 | F32 | I32 | void | 6.28 | 3 | 1048576 | Recursive |
| 16 | 0 | F32 | I32 | void | 3.14 | 4 | 1048576 | Recursive |
| 17 | 0 | F32 | I32 | void | 6.28 | 4 | 1048576 | Recursive |
| 18 | 0 | F32 | I32 | void | 3.14 | 2 | 1024 | Iterative |
| 19 | 0 | F32 | I32 | void | 6.28 | 2 | 1024 | Iterative |
| 20 | 0 | F32 | I32 | void | 3.14 | 3 | 1024 | Iterative |
| 21 | 0 | F32 | I32 | void | 6.28 | 3 | 1024 | Iterative |
| 22 | 0 | F32 | I32 | void | 3.14 | 4 | 1024 | Iterative |
| 23 | 0 | F32 | I32 | void | 6.28 | 4 | 1024 | Iterative |
| 24 | 0 | F32 | I32 | void | 3.14 | 2 | 32768 | Iterative |
| 25 | 0 | F32 | I32 | void | 6.28 | 2 | 32768 | Iterative |
| 26 | 0 | F32 | I32 | void | 3.14 | 3 | 32768 | Iterative |
| 27 | 0 | F32 | I32 | void | 6.28 | 3 | 32768 | Iterative |
| 28 | 0 | F32 | I32 | void | 3.14 | 4 | 32768 | Iterative |
| 29 | 0 | F32 | I32 | void | 6.28 | 4 | 32768 | Iterative |
| 30 | 0 | F32 | I32 | void | 3.14 | 2 | 1048576 | Iterative |
| 31 | 0 | F32 | I32 | void | 6.28 | 2 | 1048576 | Iterative |
| 32 | 0 | F32 | I32 | void | 3.14 | 3 | 1048576 | Iterative |
| 33 | 0 | F32 | I32 | void | 6.28 | 3 | 1048576 | Iterative |
| 34 | 0 | F32 | I32 | void | 3.14 | 4 | 1048576 | Iterative |
| 35 | 0 | F32 | I32 | void | 6.28 | 4 | 1048576 | Iterative |
| 36 | 1 | F32 | I32 | bool | 3.14 | 2 | 1024 | Recursive |
| 37 | 1 | F32 | I32 | bool | 6.28 | 2 | 1024 | Recursive |
| 38 | 1 | F32 | I32 | bool | 3.14 | 3 | 1024 | Recursive |
| 39 | 1 | F32 | I32 | bool | 6.28 | 3 | 1024 | Recursive |
| 40 | 1 | F32 | I32 | bool | 3.14 | 4 | 1024 | Recursive |
| 41 | 1 | F32 | I32 | bool | 6.28 | 4 | 1024 | Recursive |
| 42 | 1 | F32 | I32 | bool | 3.14 | 2 | 32768 | Recursive |
| 43 | 1 | F32 | I32 | bool | 6.28 | 2 | 32768 | Recursive |
| 44 | 1 | F32 | I32 | bool | 3.14 | 3 | 32768 | Recursive |
| 45 | 1 | F32 | I32 | bool | 6.28 | 3 | 32768 | Recursive |
| 46 | 1 | F32 | I32 | bool | 3.14 | 4 | 32768 | Recursive |
| 47 | 1 | F32 | I32 | bool | 6.28 | 4 | 32768 | Recursive |
| 48 | 1 | F32 | I32 | bool | 3.14 | 2 | 1048576 | Recursive |
| 49 | 1 | F32 | I32 | bool | 6.28 | 2 | 1048576 | Recursive |
| 50 | 1 | F32 | I32 | bool | 3.14 | 3 | 1048576 | Recursive |
| 51 | 1 | F32 | I32 | bool | 6.28 | 3 | 1048576 | Recursive |
| 52 | 1 | F32 | I32 | bool | 3.14 | 4 | 1048576 | Recursive |
| 53 | 1 | F32 | I32 | bool | 6.28 | 4 | 1048576 | Recursive |
| 54 | 1 | F32 | I32 | bool | 3.14 | 2 | 1024 | Iterative |
| 55 | 1 | F32 | I32 | bool | 6.28 | 2 | 1024 | Iterative |
| 56 | 1 | F32 | I32 | bool | 3.14 | 3 | 1024 | Iterative |
| 57 | 1 | F32 | I32 | bool | 6.28 | 3 | 1024 | Iterative |
| 58 | 1 | F32 | I32 | bool | 3.14 | 4 | 1024 | Iterative |
| 59 | 1 | F32 | I32 | bool | 6.28 | 4 | 1024 | Iterative |
| 60 | 1 | F32 | I32 | bool | 3.14 | 2 | 32768 | Iterative |
| 61 | 1 | F32 | I32 | bool | 6.28 | 2 | 32768 | Iterative |
| 62 | 1 | F32 | I32 | bool | 3.14 | 3 | 32768 | Iterative |
| 63 | 1 | F32 | I32 | bool | 6.28 | 3 | 32768 | Iterative |
| 64 | 1 | F32 | I32 | bool | 3.14 | 4 | 32768 | Iterative |
| 65 | 1 | F32 | I32 | bool | 6.28 | 4 | 32768 | Iterative |
| 66 | 1 | F32 | I32 | bool | 3.14 | 2 | 1048576 | Iterative |
| 67 | 1 | F32 | I32 | bool | 6.28 | 2 | 1048576 | Iterative |
| 68 | 1 | F32 | I32 | bool | 3.14 | 3 | 1048576 | Iterative |
| 69 | 1 | F32 | I32 | bool | 6.28 | 3 | 1048576 | Iterative |
| 70 | 1 | F32 | I32 | bool | 3.14 | 4 | 1048576 | Iterative |
| 71 | 1 | F32 | I32 | bool | 6.28 | 4 | 1048576 | Iterative |
| 72 | 2 | F32 | I64 | void | 3.14 | 2 | 1024 | Recursive |
| 73 | 2 | F32 | I64 | void | 6.28 | 2 | 1024 | Recursive |
| 74 | 2 | F32 | I64 | void | 3.14 | 3 | 1024 | Recursive |
| 75 | 2 | F32 | I64 | void | 6.28 | 3 | 1024 | Recursive |
| 76 | 2 | F32 | I64 | void | 3.14 | 4 | 1024 | Recursive |
| 77 | 2 | F32 | I64 | void | 6.28 | 4 | 1024 | Recursive |
| 78 | 2 | F32 | I64 | void | 3.14 | 2 | 32768 | Recursive |
| 79 | 2 | F32 | I64 | void | 6.28 | 2 | 32768 | Recursive |
| 80 | 2 | F32 | I64 | void | 3.14 | 3 | 32768 | Recursive |
| 81 | 2 | F32 | I64 | void | 6.28 | 3 | 32768 | Recursive |
| 82 | 2 | F32 | I64 | void | 3.14 | 4 | 32768 | Recursive |
| 83 | 2 | F32 | I64 | void | 6.28 | 4 | 32768 | Recursive |
| 84 | 2 | F32 | I64 | void | 3.14 | 2 | 1048576 | Recursive |
| 85 | 2 | F32 | I64 | void | 6.28 | 2 | 1048576 | Recursive |
| 86 | 2 | F32 | I64 | void | 3.14 | 3 | 1048576 | Recursive |
| 87 | 2 | F32 | I64 | void | 6.28 | 3 | 1048576 | Recursive |
| 88 | 2 | F32 | I64 | void | 3.14 | 4 | 1048576 | Recursive |
| 89 | 2 | F32 | I64 | void | 6.28 | 4 | 1048576 | Recursive |
| 90 | 2 | F32 | I64 | void | 3.14 | 2 | 1024 | Iterative |
| 91 | 2 | F32 | I64 | void | 6.28 | 2 | 1024 | Iterative |
| 92 | 2 | F32 | I64 | void | 3.14 | 3 | 1024 | Iterative |
| 93 | 2 | F32 | I64 | void | 6.28 | 3 | 1024 | Iterative |
| 94 | 2 | F32 | I64 | void | 3.14 | 4 | 1024 | Iterative |
| 95 | 2 | F32 | I64 | void | 6.28 | 4 | 1024 | Iterative |
| 96 | 2 | F32 | I64 | void | 3.14 | 2 | 32768 | Iterative |
| 97 | 2 | F32 | I64 | void | 6.28 | 2 | 32768 | Iterative |
| 98 | 2 | F32 | I64 | void | 3.14 | 3 | 32768 | Iterative |
| 99 | 2 | F32 | I64 | void | 6.28 | 3 | 32768 | Iterative |
| 100 | 2 | F32 | I64 | void | 3.14 | 4 | 32768 | Iterative |
| 101 | 2 | F32 | I64 | void | 6.28 | 4 | 32768 | Iterative |
| 102 | 2 | F32 | I64 | void | 3.14 | 2 | 1048576 | Iterative |
| 103 | 2 | F32 | I64 | void | 6.28 | 2 | 1048576 | Iterative |
| 104 | 2 | F32 | I64 | void | 3.14 | 3 | 1048576 | Iterative |
| 105 | 2 | F32 | I64 | void | 6.28 | 3 | 1048576 | Iterative |
| 106 | 2 | F32 | I64 | void | 3.14 | 4 | 1048576 | Iterative |
| 107 | 2 | F32 | I64 | void | 6.28 | 4 | 1048576 | Iterative |
| 108 | 3 | F32 | I64 | bool | 3.14 | 2 | 1024 | Recursive |
| 109 | 3 | F32 | I64 | bool | 6.28 | 2 | 1024 | Recursive |
| 110 | 3 | F32 | I64 | bool | 3.14 | 3 | 1024 | Recursive |
| 111 | 3 | F32 | I64 | bool | 6.28 | 3 | 1024 | Recursive |
| 112 | 3 | F32 | I64 | bool | 3.14 | 4 | 1024 | Recursive |
| 113 | 3 | F32 | I64 | bool | 6.28 | 4 | 1024 | Recursive |
| 114 | 3 | F32 | I64 | bool | 3.14 | 2 | 32768 | Recursive |
| 115 | 3 | F32 | I64 | bool | 6.28 | 2 | 32768 | Recursive |
| 116 | 3 | F32 | I64 | bool | 3.14 | 3 | 32768 | Recursive |
| 117 | 3 | F32 | I64 | bool | 6.28 | 3 | 32768 | Recursive |
| 118 | 3 | F32 | I64 | bool | 3.14 | 4 | 32768 | Recursive |
| 119 | 3 | F32 | I64 | bool | 6.28 | 4 | 32768 | Recursive |
| 120 | 3 | F32 | I64 | bool | 3.14 | 2 | 1048576 | Recursive |
| 121 | 3 | F32 | I64 | bool | 6.28 | 2 | 1048576 | Recursive |
| 122 | 3 | F32 | I64 | bool | 3.14 | 3 | 1048576 | Recursive |
| 123 | 3 | F32 | I64 | bool | 6.28 | 3 | 1048576 | Recursive |
| 124 | 3 | F32 | I64 | bool | 3.14 | 4 | 1048576 | Recursive |
| 125 | 3 | F32 | I64 | bool | 6.28 | 4 | 1048576 | Recursive |
| 126 | 3 | F32 | I64 | bool | 3.14 | 2 | 1024 | Iterative |
| 127 | 3 | F32 | I64 | bool | 6.28 | 2 | 1024 | Iterative |
| 128 | 3 | F32 | I64 | bool | 3.14 | 3 | 1024 | Iterative |
| 129 | 3 | F32 | I64 | bool | 6.28 | 3 | 1024 | Iterative |
| 130 | 3 | F32 | I64 | bool | 3.14 | 4 | 1024 | Iterative |
| 131 | 3 | F32 | I64 | bool | 6.28 | 4 | 1024 | Iterative |
| 132 | 3 | F32 | I64 | bool | 3.14 | 2 | 32768 | Iterative |
| 133 | 3 | F32 | I64 | bool | 6.28 | 2 | 32768 | Iterative |
| 134 | 3 | F32 | I64 | bool | 3.14 | 3 | 32768 | Iterative |
| 135 | 3 | F32 | I64 | bool | 6.28 | 3 | 32768 | Iterative |
| 136 | 3 | F32 | I64 | bool | 3.14 | 4 | 32768 | Iterative |
| 137 | 3 | F32 | I64 | bool | 6.28 | 4 | 32768 | Iterative |
| 138 | 3 | F32 | I64 | bool | 3.14 | 2 | 1048576 | Iterative |
| 139 | 3 | F32 | I64 | bool | 6.28 | 2 | 1048576 | Iterative |
| 140 | 3 | F32 | I64 | bool | 3.14 | 3 | 1048576 | Iterative |
| 141 | 3 | F32 | I64 | bool | 6.28 | 3 | 1048576 | Iterative |
| 142 | 3 | F32 | I64 | bool | 3.14 | 4 | 1048576 | Iterative |
| 143 | 3 | F32 | I64 | bool | 6.28 | 4 | 1048576 | Iterative |
| 144 | 4 | F64 | I32 | void | 3.14 | 2 | 1024 | Recursive |
| 145 | 4 | F64 | I32 | void | 6.28 | 2 | 1024 | Recursive |
| 146 | 4 | F64 | I32 | void | 3.14 | 3 | 1024 | Recursive |
| 147 | 4 | F64 | I32 | void | 6.28 | 3 | 1024 | Recursive |
| 148 | 4 | F64 | I32 | void | 3.14 | 4 | 1024 | Recursive |
| 149 | 4 | F64 | I32 | void | 6.28 | 4 | 1024 | Recursive |
| 150 | 4 | F64 | I32 | void | 3.14 | 2 | 32768 | Recursive |
| 151 | 4 | F64 | I32 | void | 6.28 | 2 | 32768 | Recursive |
| 152 | 4 | F64 | I32 | void | 3.14 | 3 | 32768 | Recursive |
| 153 | 4 | F64 | I32 | void | 6.28 | 3 | 32768 | Recursive |
| 154 | 4 | F64 | I32 | void | 3.14 | 4 | 32768 | Recursive |
| 155 | 4 | F64 | I32 | void | 6.28 | 4 | 32768 | Recursive |
| 156 | 4 | F64 | I32 | void | 3.14 | 2 | 1048576 | Recursive |
| 157 | 4 | F64 | I32 | void | 6.28 | 2 | 1048576 | Recursive |
| 158 | 4 | F64 | I32 | void | 3.14 | 3 | 1048576 | Recursive |
| 159 | 4 | F64 | I32 | void | 6.28 | 3 | 1048576 | Recursive |
| 160 | 4 | F64 | I32 | void | 3.14 | 4 | 1048576 | Recursive |
| 161 | 4 | F64 | I32 | void | 6.28 | 4 | 1048576 | Recursive |
| 162 | 4 | F64 | I32 | void | 3.14 | 2 | 1024 | Iterative |
| 163 | 4 | F64 | I32 | void | 6.28 | 2 | 1024 | Iterative |
| 164 | 4 | F64 | I32 | void | 3.14 | 3 | 1024 | Iterative |
| 165 | 4 | F64 | I32 | void | 6.28 | 3 | 1024 | Iterative |
| 166 | 4 | F64 | I32 | void | 3.14 | 4 | 1024 | Iterative |
| 167 | 4 | F64 | I32 | void | 6.28 | 4 | 1024 | Iterative |
| 168 | 4 | F64 | I32 | void | 3.14 | 2 | 32768 | Iterative |
| 169 | 4 | F64 | I32 | void | 6.28 | 2 | 32768 | Iterative |
| 170 | 4 | F64 | I32 | void | 3.14 | 3 | 32768 | Iterative |
| 171 | 4 | F64 | I32 | void | 6.28 | 3 | 32768 | Iterative |
| 172 | 4 | F64 | I32 | void | 3.14 | 4 | 32768 | Iterative |
| 173 | 4 | F64 | I32 | void | 6.28 | 4 | 32768 | Iterative |
| 174 | 4 | F64 | I32 | void | 3.14 | 2 | 1048576 | Iterative |
| 175 | 4 | F64 | I32 | void | 6.28 | 2 | 1048576 | Iterative |
| 176 | 4 | F64 | I32 | void | 3.14 | 3 | 1048576 | Iterative |
| 177 | 4 | F64 | I32 | void | 6.28 | 3 | 1048576 | Iterative |
| 178 | 4 | F64 | I32 | void | 3.14 | 4 | 1048576 | Iterative |
| 179 | 4 | F64 | I32 | void | 6.28 | 4 | 1048576 | Iterative |
| 180 | 5 | F64 | I32 | bool | 3.14 | 2 | 1024 | Recursive |
| 181 | 5 | F64 | I32 | bool | 6.28 | 2 | 1024 | Recursive |
| 182 | 5 | F64 | I32 | bool | 3.14 | 3 | 1024 | Recursive |
| 183 | 5 | F64 | I32 | bool | 6.28 | 3 | 1024 | Recursive |
| 184 | 5 | F64 | I32 | bool | 3.14 | 4 | 1024 | Recursive |
| 185 | 5 | F64 | I32 | bool | 6.28 | 4 | 1024 | Recursive |
| 186 | 5 | F64 | I32 | bool | 3.14 | 2 | 32768 | Recursive |
| 187 | 5 | F64 | I32 | bool | 6.28 | 2 | 32768 | Recursive |
| 188 | 5 | F64 | I32 | bool | 3.14 | 3 | 32768 | Recursive |
| 189 | 5 | F64 | I32 | bool | 6.28 | 3 | 32768 | Recursive |
| 190 | 5 | F64 | I32 | bool | 3.14 | 4 | 32768 | Recursive |
| 191 | 5 | F64 | I32 | bool | 6.28 | 4 | 32768 | Recursive |
| 192 | 5 | F64 | I32 | bool | 3.14 | 2 | 1048576 | Recursive |
| 193 | 5 | F64 | I32 | bool | 6.28 | 2 | 1048576 | Recursive |
| 194 | 5 | F64 | I32 | bool | 3.14 | 3 | 1048576 | Recursive |
| 195 | 5 | F64 | I32 | bool | 6.28 | 3 | 1048576 | Recursive |
| 196 | 5 | F64 | I32 | bool | 3.14 | 4 | 1048576 | Recursive |
| 197 | 5 | F64 | I32 | bool | 6.28 | 4 | 1048576 | Recursive |
| 198 | 5 | F64 | I32 | bool | 3.14 | 2 | 1024 | Iterative |
| 199 | 5 | F64 | I32 | bool | 6.28 | 2 | 1024 | Iterative |
| 200 | 5 | F64 | I32 | bool | 3.14 | 3 | 1024 | Iterative |
| 201 | 5 | F64 | I32 | bool | 6.28 | 3 | 1024 | Iterative |
| 202 | 5 | F64 | I32 | bool | 3.14 | 4 | 1024 | Iterative |
| 203 | 5 | F64 | I32 | bool | 6.28 | 4 | 1024 | Iterative |
| 204 | 5 | F64 | I32 | bool | 3.14 | 2 | 32768 | Iterative |
| 205 | 5 | F64 | I32 | bool | 6.28 | 2 | 32768 | Iterative |
| 206 | 5 | F64 | I32 | bool | 3.14 | 3 | 32768 | Iterative |
| 207 | 5 | F64 | I32 | bool | 6.28 | 3 | 32768 | Iterative |
| 208 | 5 | F64 | I32 | bool | 3.14 | 4 | 32768 | Iterative |
| 209 | 5 | F64 | I32 | bool | 6.28 | 4 | 32768 | Iterative |
| 210 | 5 | F64 | I32 | bool | 3.14 | 2 | 1048576 | Iterative |
| 211 | 5 | F64 | I32 | bool | 6.28 | 2 | 1048576 | Iterative |
| 212 | 5 | F64 | I32 | bool | 3.14 | 3 | 1048576 | Iterative |
| 213 | 5 | F64 | I32 | bool | 6.28 | 3 | 1048576 | Iterative |
| 214 | 5 | F64 | I32 | bool | 3.14 | 4 | 1048576 | Iterative |
| 215 | 5 | F64 | I32 | bool | 6.28 | 4 | 1048576 | Iterative |
| 216 | 6 | F64 | I64 | void | 3.14 | 2 | 1024 | Recursive |
| 217 | 6 | F64 | I64 | void | 6.28 | 2 | 1024 | Recursive |
| 218 | 6 | F64 | I64 | void | 3.14 | 3 | 1024 | Recursive |
| 219 | 6 | F64 | I64 | void | 6.28 | 3 | 1024 | Recursive |
| 220 | 6 | F64 | I64 | void | 3.14 | 4 | 1024 | Recursive |
| 221 | 6 | F64 | I64 | void | 6.28 | 4 | 1024 | Recursive |
| 222 | 6 | F64 | I64 | void | 3.14 | 2 | 32768 | Recursive |
| 223 | 6 | F64 | I64 | void | 6.28 | 2 | 32768 | Recursive |
| 224 | 6 | F64 | I64 | void | 3.14 | 3 | 32768 | Recursive |
| 225 | 6 | F64 | I64 | void | 6.28 | 3 | 32768 | Recursive |
| 226 | 6 | F64 | I64 | void | 3.14 | 4 | 32768 | Recursive |
| 227 | 6 | F64 | I64 | void | 6.28 | 4 | 32768 | Recursive |
| 228 | 6 | F64 | I64 | void | 3.14 | 2 | 1048576 | Recursive |
| 229 | 6 | F64 | I64 | void | 6.28 | 2 | 1048576 | Recursive |
| 230 | 6 | F64 | I64 | void | 3.14 | 3 | 1048576 | Recursive |
| 231 | 6 | F64 | I64 | void | 6.28 | 3 | 1048576 | Recursive |
| 232 | 6 | F64 | I64 | void | 3.14 | 4 | 1048576 | Recursive |
| 233 | 6 | F64 | I64 | void | 6.28 | 4 | 1048576 | Recursive |
| 234 | 6 | F64 | I64 | void | 3.14 | 2 | 1024 | Iterative |
| 235 | 6 | F64 | I64 | void | 6.28 | 2 | 1024 | Iterative |
| 236 | 6 | F64 | I64 | void | 3.14 | 3 | 1024 | Iterative |
| 237 | 6 | F64 | I64 | void | 6.28 | 3 | 1024 | Iterative |
| 238 | 6 | F64 | I64 | void | 3.14 | 4 | 1024 | Iterative |
| 239 | 6 | F64 | I64 | void | 6.28 | 4 | 1024 | Iterative |
| 240 | 6 | F64 | I64 | void | 3.14 | 2 | 32768 | Iterative |
| 241 | 6 | F64 | I64 | void | 6.28 | 2 | 32768 | Iterative |
| 242 | 6 | F64 | I64 | void | 3.14 | 3 | 32768 | Iterative |
| 243 | 6 | F64 | I64 | void | 6.28 | 3 | 32768 | Iterative |
| 244 | 6 | F64 | I64 | void | 3.14 | 4 | 32768 | Iterative |
| 245 | 6 | F64 | I64 | void | 6.28 | 4 | 32768 | Iterative |
| 246 | 6 | F64 | I64 | void | 3.14 | 2 | 1048576 | Iterative |
| 247 | 6 | F64 | I64 | void | 6.28 | 2 | 1048576 | Iterative |
| 248 | 6 | F64 | I64 | void | 3.14 | 3 | 1048576 | Iterative |
| 249 | 6 | F64 | I64 | void | 6.28 | 3 | 1048576 | Iterative |
| 250 | 6 | F64 | I64 | void | 3.14 | 4 | 1048576 | Iterative |
| 251 | 6 | F64 | I64 | void | 6.28 | 4 | 1048576 | Iterative |
| 252 | 7 | F64 | I64 | bool | 3.14 | 2 | 1024 | Recursive |
| 253 | 7 | F64 | I64 | bool | 6.28 | 2 | 1024 | Recursive |
| 254 | 7 | F64 | I64 | bool | 3.14 | 3 | 1024 | Recursive |
| 255 | 7 | F64 | I64 | bool | 6.28 | 3 | 1024 | Recursive |
| 256 | 7 | F64 | I64 | bool | 3.14 | 4 | 1024 | Recursive |
| 257 | 7 | F64 | I64 | bool | 6.28 | 4 | 1024 | Recursive |
| 258 | 7 | F64 | I64 | bool | 3.14 | 2 | 32768 | Recursive |
| 259 | 7 | F64 | I64 | bool | 6.28 | 2 | 32768 | Recursive |
| 260 | 7 | F64 | I64 | bool | 3.14 | 3 | 32768 | Recursive |
| 261 | 7 | F64 | I64 | bool | 6.28 | 3 | 32768 | Recursive |
| 262 | 7 | F64 | I64 | bool | 3.14 | 4 | 32768 | Recursive |
| 263 | 7 | F64 | I64 | bool | 6.28 | 4 | 32768 | Recursive |
| 264 | 7 | F64 | I64 | bool | 3.14 | 2 | 1048576 | Recursive |
| 265 | 7 | F64 | I64 | bool | 6.28 | 2 | 1048576 | Recursive |
| 266 | 7 | F64 | I64 | bool | 3.14 | 3 | 1048576 | Recursive |
| 267 | 7 | F64 | I64 | bool | 6.28 | 3 | 1048576 | Recursive |
| 268 | 7 | F64 | I64 | bool | 3.14 | 4 | 1048576 | Recursive |
| 269 | 7 | F64 | I64 | bool | 6.28 | 4 | 1048576 | Recursive |
| 270 | 7 | F64 | I64 | bool | 3.14 | 2 | 1024 | Iterative |
| 271 | 7 | F64 | I64 | bool | 6.28 | 2 | 1024 | Iterative |
| 272 | 7 | F64 | I64 | bool | 3.14 | 3 | 1024 | Iterative |
| 273 | 7 | F64 | I64 | bool | 6.28 | 3 | 1024 | Iterative |
| 274 | 7 | F64 | I64 | bool | 3.14 | 4 | 1024 | Iterative |
| 275 | 7 | F64 | I64 | bool | 6.28 | 4 | 1024 | Iterative |
| 276 | 7 | F64 | I64 | bool | 3.14 | 2 | 32768 | Iterative |
| 277 | 7 | F64 | I64 | bool | 6.28 | 2 | 32768 | Iterative |
| 278 | 7 | F64 | I64 | bool | 3.14 | 3 | 32768 | Iterative |
| 279 | 7 | F64 | I64 | bool | 6.28 | 3 | 32768 | Iterative |
| 280 | 7 | F64 | I64 | bool | 3.14 | 4 | 32768 | Iterative |
| 281 | 7 | F64 | I64 | bool | 6.28 | 4 | 32768 | Iterative |
| 282 | 7 | F64 | I64 | bool | 3.14 | 2 | 1048576 | Iterative |
| 283 | 7 | F64 | I64 | bool | 6.28 | 2 | 1048576 | Iterative |
| 284 | 7 | F64 | I64 | bool | 3.14 | 3 | 1048576 | Iterative |
| 285 | 7 | F64 | I64 | bool | 6.28 | 3 | 1048576 | Iterative |
| 286 | 7 | F64 | I64 | bool | 3.14 | 4 | 1048576 | Iterative |
| 287 | 7 | F64 | I64 | bool | 6.28 | 4 | 1048576 | Iterative |
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
void test_create_with_masked_types()
{
template_bench bench;
bench.set_devices(std::vector<int>{});
bench.set_type_axes_names({"Floats", "Ints", "Misc"});
bench.add_float64_axis("Radians", {3.14, 6.28});
bench.add_int64_axis("VecSize", {2, 3, 4}, nvbench::int64_axis_flags::none);
bench.add_int64_axis("NumInputs",
{10, 15, 20},
nvbench::int64_axis_flags::power_of_two);
bench.add_string_axis("Strategy", {"Recursive", "Iterative"});
// Mask out some types:
bench.get_axes().get_type_axis("Floats").set_active_inputs({"F32"});
bench.get_axes().get_type_axis("Ints").set_active_inputs({"I64"});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
fmt::memory_buffer buffer;
std::string table_format = "| {:^5} | {:^10} | {:^6} | {:^4} | {:^4} | {:^7} "
"| {:^7} | {:^9} | {:^9} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer,
table_format,
"State",
"TypeConfig",
"Floats",
"Ints",
"Misc",
"Radians",
"VecSize",
"NumInputs",
"Strategy");
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
table_format,
config++,
state.get_type_config_index(),
state.get_string("Floats"),
state.get_string("Ints"),
state.get_string("Misc"),
state.get_float64("Radians"),
state.get_int64("VecSize"),
state.get_int64("NumInputs"),
state.get_string("Strategy"));
}
const std::string ref =
R"expected(
| State | TypeConfig | Floats | Ints | Misc | Radians | VecSize | NumInputs | Strategy |
| 0 | 2 | F32 | I64 | void | 3.14 | 2 | 1024 | Recursive |
| 1 | 2 | F32 | I64 | void | 6.28 | 2 | 1024 | Recursive |
| 2 | 2 | F32 | I64 | void | 3.14 | 3 | 1024 | Recursive |
| 3 | 2 | F32 | I64 | void | 6.28 | 3 | 1024 | Recursive |
| 4 | 2 | F32 | I64 | void | 3.14 | 4 | 1024 | Recursive |
| 5 | 2 | F32 | I64 | void | 6.28 | 4 | 1024 | Recursive |
| 6 | 2 | F32 | I64 | void | 3.14 | 2 | 32768 | Recursive |
| 7 | 2 | F32 | I64 | void | 6.28 | 2 | 32768 | Recursive |
| 8 | 2 | F32 | I64 | void | 3.14 | 3 | 32768 | Recursive |
| 9 | 2 | F32 | I64 | void | 6.28 | 3 | 32768 | Recursive |
| 10 | 2 | F32 | I64 | void | 3.14 | 4 | 32768 | Recursive |
| 11 | 2 | F32 | I64 | void | 6.28 | 4 | 32768 | Recursive |
| 12 | 2 | F32 | I64 | void | 3.14 | 2 | 1048576 | Recursive |
| 13 | 2 | F32 | I64 | void | 6.28 | 2 | 1048576 | Recursive |
| 14 | 2 | F32 | I64 | void | 3.14 | 3 | 1048576 | Recursive |
| 15 | 2 | F32 | I64 | void | 6.28 | 3 | 1048576 | Recursive |
| 16 | 2 | F32 | I64 | void | 3.14 | 4 | 1048576 | Recursive |
| 17 | 2 | F32 | I64 | void | 6.28 | 4 | 1048576 | Recursive |
| 18 | 2 | F32 | I64 | void | 3.14 | 2 | 1024 | Iterative |
| 19 | 2 | F32 | I64 | void | 6.28 | 2 | 1024 | Iterative |
| 20 | 2 | F32 | I64 | void | 3.14 | 3 | 1024 | Iterative |
| 21 | 2 | F32 | I64 | void | 6.28 | 3 | 1024 | Iterative |
| 22 | 2 | F32 | I64 | void | 3.14 | 4 | 1024 | Iterative |
| 23 | 2 | F32 | I64 | void | 6.28 | 4 | 1024 | Iterative |
| 24 | 2 | F32 | I64 | void | 3.14 | 2 | 32768 | Iterative |
| 25 | 2 | F32 | I64 | void | 6.28 | 2 | 32768 | Iterative |
| 26 | 2 | F32 | I64 | void | 3.14 | 3 | 32768 | Iterative |
| 27 | 2 | F32 | I64 | void | 6.28 | 3 | 32768 | Iterative |
| 28 | 2 | F32 | I64 | void | 3.14 | 4 | 32768 | Iterative |
| 29 | 2 | F32 | I64 | void | 6.28 | 4 | 32768 | Iterative |
| 30 | 2 | F32 | I64 | void | 3.14 | 2 | 1048576 | Iterative |
| 31 | 2 | F32 | I64 | void | 6.28 | 2 | 1048576 | Iterative |
| 32 | 2 | F32 | I64 | void | 3.14 | 3 | 1048576 | Iterative |
| 33 | 2 | F32 | I64 | void | 6.28 | 3 | 1048576 | Iterative |
| 34 | 2 | F32 | I64 | void | 3.14 | 4 | 1048576 | Iterative |
| 35 | 2 | F32 | I64 | void | 6.28 | 4 | 1048576 | Iterative |
| 36 | 3 | F32 | I64 | bool | 3.14 | 2 | 1024 | Recursive |
| 37 | 3 | F32 | I64 | bool | 6.28 | 2 | 1024 | Recursive |
| 38 | 3 | F32 | I64 | bool | 3.14 | 3 | 1024 | Recursive |
| 39 | 3 | F32 | I64 | bool | 6.28 | 3 | 1024 | Recursive |
| 40 | 3 | F32 | I64 | bool | 3.14 | 4 | 1024 | Recursive |
| 41 | 3 | F32 | I64 | bool | 6.28 | 4 | 1024 | Recursive |
| 42 | 3 | F32 | I64 | bool | 3.14 | 2 | 32768 | Recursive |
| 43 | 3 | F32 | I64 | bool | 6.28 | 2 | 32768 | Recursive |
| 44 | 3 | F32 | I64 | bool | 3.14 | 3 | 32768 | Recursive |
| 45 | 3 | F32 | I64 | bool | 6.28 | 3 | 32768 | Recursive |
| 46 | 3 | F32 | I64 | bool | 3.14 | 4 | 32768 | Recursive |
| 47 | 3 | F32 | I64 | bool | 6.28 | 4 | 32768 | Recursive |
| 48 | 3 | F32 | I64 | bool | 3.14 | 2 | 1048576 | Recursive |
| 49 | 3 | F32 | I64 | bool | 6.28 | 2 | 1048576 | Recursive |
| 50 | 3 | F32 | I64 | bool | 3.14 | 3 | 1048576 | Recursive |
| 51 | 3 | F32 | I64 | bool | 6.28 | 3 | 1048576 | Recursive |
| 52 | 3 | F32 | I64 | bool | 3.14 | 4 | 1048576 | Recursive |
| 53 | 3 | F32 | I64 | bool | 6.28 | 4 | 1048576 | Recursive |
| 54 | 3 | F32 | I64 | bool | 3.14 | 2 | 1024 | Iterative |
| 55 | 3 | F32 | I64 | bool | 6.28 | 2 | 1024 | Iterative |
| 56 | 3 | F32 | I64 | bool | 3.14 | 3 | 1024 | Iterative |
| 57 | 3 | F32 | I64 | bool | 6.28 | 3 | 1024 | Iterative |
| 58 | 3 | F32 | I64 | bool | 3.14 | 4 | 1024 | Iterative |
| 59 | 3 | F32 | I64 | bool | 6.28 | 4 | 1024 | Iterative |
| 60 | 3 | F32 | I64 | bool | 3.14 | 2 | 32768 | Iterative |
| 61 | 3 | F32 | I64 | bool | 6.28 | 2 | 32768 | Iterative |
| 62 | 3 | F32 | I64 | bool | 3.14 | 3 | 32768 | Iterative |
| 63 | 3 | F32 | I64 | bool | 6.28 | 3 | 32768 | Iterative |
| 64 | 3 | F32 | I64 | bool | 3.14 | 4 | 32768 | Iterative |
| 65 | 3 | F32 | I64 | bool | 6.28 | 4 | 32768 | Iterative |
| 66 | 3 | F32 | I64 | bool | 3.14 | 2 | 1048576 | Iterative |
| 67 | 3 | F32 | I64 | bool | 6.28 | 2 | 1048576 | Iterative |
| 68 | 3 | F32 | I64 | bool | 3.14 | 3 | 1048576 | Iterative |
| 69 | 3 | F32 | I64 | bool | 6.28 | 3 | 1048576 | Iterative |
| 70 | 3 | F32 | I64 | bool | 3.14 | 4 | 1048576 | Iterative |
| 71 | 3 | F32 | I64 | bool | 6.28 | 4 | 1048576 | Iterative |
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
void test_devices()
{
const auto device_0 = nvbench::device_info{0, {}};
const auto device_1 = nvbench::device_info{1, {}};
const auto device_2 = nvbench::device_info{2, {}};
dummy_bench bench;
bench.set_devices({device_0, device_1, device_2});
bench.add_string_axis("S", {"foo", "bar"});
bench.add_int64_axis("I", {2, 4});
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
// 3 devices * 4 axis configs = 12 total states
ASSERT(states.size() == 12);
fmt::memory_buffer buffer;
const std::string table_format = "| {:^5} | {:^6} | {:^5} | {:^3} |\n";
fmt::format_to(buffer, "\n");
fmt::format_to(buffer, table_format, "State", "Device", "S", "I");
std::size_t config = 0;
for (const auto &state : states)
{
fmt::format_to(buffer,
table_format,
config++,
state.get_device()->get_id(),
state.get_string("S"),
state.get_int64("I"));
}
const std::string ref =
R"expected(
| State | Device | S | I |
| 0 | 0 | foo | 2 |
| 1 | 0 | bar | 2 |
| 2 | 0 | foo | 4 |
| 3 | 0 | bar | 4 |
| 4 | 1 | foo | 2 |
| 5 | 1 | bar | 2 |
| 6 | 1 | foo | 4 |
| 7 | 1 | bar | 4 |
| 8 | 2 | foo | 2 |
| 9 | 2 | bar | 2 |
| 10 | 2 | foo | 4 |
| 11 | 2 | bar | 4 |
)expected";
const std::string test = fmt::to_string(buffer);
ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test);
}
void test_termination_criteria()
{
const nvbench::int64_t min_samples = 1000;
const nvbench::float64_t min_time = 2000;
const nvbench::float64_t max_noise = 3000;
const nvbench::float64_t skip_time = 4000;
const nvbench::float64_t timeout = 5000;
// for comparing floats
auto within_one = [](auto a, auto b) { return std::abs(a - b) < 1.; };
dummy_bench bench;
bench.set_devices(std::vector<int>{});
bench.set_min_samples(min_samples);
bench.set_min_time(min_time);
bench.set_max_noise(max_noise);
bench.set_skip_time(skip_time);
bench.set_timeout(timeout);
const std::vector<nvbench::state> states =
nvbench::detail::state_generator::create(bench);
ASSERT(states.size() == 1);
ASSERT(min_samples == states[0].get_min_samples());
ASSERT(within_one(min_time, states[0].get_min_time()));
ASSERT(within_one(max_noise, states[0].get_max_noise()));
ASSERT(within_one(skip_time, states[0].get_skip_time()));
ASSERT(within_one(timeout, states[0].get_timeout()));
}
int main()
try
{
test_empty();
test_single_state();
test_basic();
test_create();
test_create_with_types();
test_create_with_masked_types();
test_devices();
test_termination_criteria();
return 0;
}
catch (std::exception &e)
{
fmt::print("{}\n", e.what());
return 1;
}
|
the_stack
|
#include "Device/Util/Basic.cuh" //xlib::shfl
#include "Device/Util/DeviceProperties.cuh" //xlib::WARP_SIZE
namespace xlib {
namespace detail {
template<int SIZE, int INDEX = 0>
struct Unroll {
template<typename Lambda>
__device__ __forceinline__
static void apply(const Lambda& lambda) {
lambda(INDEX);
Unroll<SIZE, INDEX + 1>::apply(lambda);
}
};
template<int SIZE>
struct Unroll<SIZE, SIZE> {
template<typename Lambda>
__device__ __forceinline__
static void apply(const Lambda&) {}
};
template<int SIZE = 1, int OFFSET = 0, int LEFT_BOUND = 0>
struct SMemReordering {
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__
static void run(T (®)[ITEMS_PER_THREAD],
void* smem_thread,
void* smem_warp) {
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++)
static_cast<T*>(smem_thread)[i] = reg[i];
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++)
reg[i] = static_cast<T*>(smem_warp)[i * xlib::WARP_SIZE];
}
//--------------------------------------------------------------------------
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__
static void run(const T (®_in)[ITEMS_PER_THREAD],
T (®_out)[ITEMS_PER_THREAD],
T* smem_thread,
T* smem_warp) {
const int TH_NUM = xlib::WARP_SIZE / (ITEMS_PER_THREAD / SIZE);
const int RIGHT_BOUND = LEFT_BOUND + TH_NUM;
if (static_cast<int>(xlib::lane_id()) >= LEFT_BOUND &&
xlib::lane_id() < RIGHT_BOUND) {
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++)
smem_thread[i] = reg_in[i];
}
#pragma unroll
for (int i = 0; i < SIZE; i++)
reg_out[OFFSET + i] = smem_warp[i * xlib::WARP_SIZE];
const bool END_COND = (LEFT_BOUND + TH_NUM >= xlib::WARP_SIZE);
SMemReordering<END_COND ? 0 : SIZE,
OFFSET + SIZE, LEFT_BOUND + TH_NUM>
::run(reg_in, reg_out, smem_thread, smem_warp);
}
//--------------------------------------------------------------------------
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__
static void run(const T (®_in)[ITEMS_PER_THREAD],
T (®_out)[ITEMS_PER_THREAD],
T* smem,
T* smem_warp,
int& offset,
int& index) {
const int WARP_ITEMS = xlib::WARP_SIZE * SIZE;
while (index < ITEMS_PER_THREAD && offset < WARP_ITEMS)
smem[offset++] = reg_in[index++];
offset -= WARP_ITEMS;
#pragma unroll
for (int j = 0; j < SIZE; j++)
reg_out[OFFSET + j] = smem_warp[j * xlib::WARP_SIZE];
const bool END_COND = (OFFSET + SIZE > ITEMS_PER_THREAD);
SMemReordering<END_COND ? 0 : SIZE, SIZE, OFFSET + SIZE>
::run(reg_in, reg_out, smem, smem_warp, offset, index);
}
};
template<int OFFSET, int LEFT_BOUND>
struct SMemReordering<0, OFFSET, LEFT_BOUND> {
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__ static
void run(T (&)[ITEMS_PER_THREAD], T*, T*) {}
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__ static
void run(const T (&)[ITEMS_PER_THREAD], T (&)[ITEMS_PER_THREAD], T*, T*) {}
template<typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__ static
void run(const T (&)[ITEMS_PER_THREAD], T (&)[ITEMS_PER_THREAD],
T*, T*, int&, int&) {}
};
} // namespace detail
//==============================================================================
//==============================================================================
//////////////////////////////
// SHARED MEMORY REORDERING //
//////////////////////////////
template<unsigned ITEMS_PER_THREAD, typename T, typename R>
__device__ __forceinline__
void smem_reordering(T (®1)[ITEMS_PER_THREAD],
R (®2)[ITEMS_PER_THREAD],
void* smem) {
if (ITEMS_PER_THREAD == 1)
return;
const unsigned WARP_ITEMS = xlib::WARP_SIZE * ITEMS_PER_THREAD;
T* smem_tmp = static_cast<T*>(smem) + xlib::warp_id() * WARP_ITEMS;
T* smem_thread = smem_tmp + xlib::lane_id() * ITEMS_PER_THREAD;
T* smem_warp = smem_tmp + xlib::lane_id();
detail::SMemReordering<>::run(reg1, smem_thread, smem_warp);
detail::SMemReordering<>::run(reg2, smem_thread, smem_warp);
}
//==============================================================================
template<unsigned SMEM_PER_WARP, unsigned ITEMS_PER_THREAD, typename T>
__device__ __forceinline__
void smem_reordering(T (®)[ITEMS_PER_THREAD], void* smem) {
if (ITEMS_PER_THREAD == 1)
return;
const unsigned SMEM_THREAD = (SMEM_PER_WARP == 0) ? ITEMS_PER_THREAD :
SMEM_PER_WARP / xlib::WARP_SIZE;
T* smem_T = static_cast<T*>(smem) +
xlib::warp_id() * xlib::WARP_SIZE * SMEM_THREAD;
T* smem_warp = smem_T + xlib::lane_id();
if (ITEMS_PER_THREAD <= SMEM_THREAD ||
ITEMS_PER_THREAD % SMEM_THREAD == 0) {
const unsigned MIN_ITEMS = xlib::min(SMEM_THREAD, ITEMS_PER_THREAD);
T* smem_thread = smem_T + xlib::lane_id() * MIN_ITEMS;
if (ITEMS_PER_THREAD <= SMEM_THREAD)
detail::SMemReordering<>::run(reg, smem_thread, smem_warp);
else {
T tmp[ITEMS_PER_THREAD];
detail::SMemReordering<MIN_ITEMS>::run(reg, tmp, smem_thread,
smem_warp);
xlib::reg_copy(tmp, reg);
}
}
else {
T tmp[ITEMS_PER_THREAD];
int offset = xlib::lane_id() * ITEMS_PER_THREAD;
int index = 0;
detail::SMemReordering<SMEM_THREAD>::run(reg, tmp, smem_T, smem_warp,
offset, index);
xlib::reg_copy(tmp, reg);
}
}
//==============================================================================
//==============================================================================
//==============================================================================
////////////////////////
// SHUFFLE REORDERING //
////////////////////////
template<typename T, int SIZE>
__device__ __forceinline__
void shuffle_reordering(T (&A)[SIZE]) {
static_assert(xlib::mcd(SIZE, xlib::WARP_SIZE) == 1 ||
xlib::is_power2(SIZE),
"Does not work if mcd(SIZE, WARP_SIZE) != 1 && SIZE is not "
"a power of 2");
using namespace xlib::detail;
if (SIZE == 1)
return;
T B[SIZE];
int laneid = xlib::lane_id();
if (xlib::mcd(SIZE, xlib::WARP_SIZE) == 1) {
/*
// !!! Enable in CUDA Toolkit >= 9.2 !!!
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = (i * xlib::WARP_SIZE + laneid) % SIZE;
#pragma unroll
for (int j = 0; j < SIZE; j++)
B[j] = (j == index) ? A[i] : B[j];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = (I * xlib::WARP_SIZE + laneid) % SIZE;
Unroll<SIZE>::apply([&](int J) {
B[J] = (J == index) ? A[I] : B[J];
});
});
#pragma unroll
for (int i = 0; i < SIZE; i++)
A[i] = xlib::shfl(B[i], (laneid * SIZE + i) % xlib::WARP_SIZE);
}
else if (xlib::is_power2(SIZE)) {
const unsigned NUM_GROUPS = xlib::WARP_SIZE / SIZE;
/*
// !!! Enable in CUDA Toolkit >= 9.2 !!!
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = (SIZE - i + laneid) % SIZE; //also: i * (SIZE - 1)
#pragma unroll
for (int j = 0; j < SIZE; j++)
B[i] = (j == index) ? A[j] : B[i];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = (SIZE - I + laneid) % SIZE;
Unroll<SIZE>::apply([&](int J) {
B[I] = (J == index) ? A[J] : B[I];
});
});
#pragma unroll
for (int i = 0; i < SIZE; i++) {
//also (laneid % NUM_GROUPS) * SIZE;
int offset = (laneid * SIZE) % xlib::WARP_SIZE;
B[i] = xlib::shfl(B[i], offset + (laneid / NUM_GROUPS + i) % SIZE);
}
/*
// !!! Enable in CUDA Toolkit >= 9.2 !!!
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = (i + laneid / NUM_GROUPS) % SIZE;
#pragma unroll
for (int j = 0; j < SIZE; j++)
A[j] = (j == index) ? B[i] : A[j];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = (I + laneid / NUM_GROUPS) % SIZE;
Unroll<SIZE>::apply([&](int J) {
A[J] = (J == index) ? B[I] : A[J];
});
});
}
}
//==============================================================================
//==============================================================================
template<typename T>
__device__ __forceinline__
void shuffle_reordering_v4(T (&A)[8]) {
using namespace detail;
const unsigned SIZE = 8;
const unsigned VECT = 4;
const unsigned NUM_GROUPS = SIZE / VECT;
const unsigned GROUP_SIZE = xlib::WARP_SIZE / NUM_GROUPS;
T B[SIZE];
int laneid = xlib::lane_id();
/*
// !!! Enable in CUDA Toolkit >= 9.2 !!!
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = ((laneid % NUM_GROUPS) * VECT + i) % SIZE;
#pragma unroll
for (int j = 0; j < SIZE; j++)
B[i] = (j == index) ? A[j] : B[i];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = ((laneid % NUM_GROUPS) * VECT + I) % SIZE;
Unroll<SIZE>::apply([&](int J) {
B[I] = (J == index) ? A[J] : B[I];
});
});
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int offset = (laneid / GROUP_SIZE + i / VECT) % NUM_GROUPS;
int index = (offset + laneid * NUM_GROUPS) % xlib::WARP_SIZE;
B[i] = xlib::shfl(B[i], index);
}
/*
// !!! Enable in CUDA Toolkit >= 9.2 !!!
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = ((laneid / GROUP_SIZE) * VECT + i) % SIZE;
#pragma unroll
for (int j = 0; j < SIZE; j++)
A[i] = (j == index) ? B[j] : A[i];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = ((laneid / GROUP_SIZE) * VECT + I) % SIZE;
Unroll<SIZE>::apply([&](int J) {
A[I] = (J == index) ? B[J] : A[I];
});
});
}
//==============================================================================
template<typename T, int SIZE>
__device__ __forceinline__
void shuffle_reordering_inv(T (&A)[SIZE]) {
static_assert(xlib::WARP_SIZE % SIZE == 0,
"WARP_SIZE and SIZE must be divisible");
using namespace xlib::detail;
if (SIZE == 1)
return;
T B[SIZE];
int laneid = xlib::lane_id();
const unsigned NUM_GROUPS = xlib::WARP_SIZE / SIZE;
// !!! Enable in CUDA Toolkit >= 9.2 !!!
/*
#pragma unroll //index = (SIZE - i + laneid / NUM_GROUPS) % SIZE;
for (int i = 0; i < SIZE; i++) {
int index = (SIZE - i + laneid) % SIZE;
#pragma unroll
for (int j = 0; j < SIZE; j++)
B[i] = (j == index) ? A[j] : B[i];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = (SIZE - I + laneid / NUM_GROUPS) % SIZE;
Unroll<SIZE>::apply([&](int J) {
B[I] = (J == index) ? A[J] : B[I];
});
});
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int base = (laneid % SIZE) * NUM_GROUPS;
int index = (base + laneid / SIZE + i * NUM_GROUPS) % xlib::WARP_SIZE;
B[i] = xlib::shfl(B[i], index);
}
// !!! Enable in CUDA Toolkit >= 9.2 !!!
/*
#pragma unroll
for (int i = 0; i < SIZE; i++) {
int index = (i + laneid) % SIZE; //<-- (i + laneid % SIZE) % SIZE
#pragma unroll
for (int j = 0; j < SIZE; j++)
A[j] = (j == index) ? B[i] : A[j];
}
*/
Unroll<SIZE>::apply([&](int I) {
int index = (I + laneid) % SIZE;
Unroll<SIZE>::apply([&](int J) {
A[J] = (J == index) ? B[I] : A[J];
});
});
}
} // namespace xlib
|
the_stack
|
template<typename T>
struct row_length : public thrust::unary_function<T, T>
{
__host__ __device__ T operator()(const T &x) const
{
const T *next_ptr = &x;
next_ptr++;
return (*next_ptr) - x;
}
};
namespace amgx
{
__global__ void computeRowOffsetsKernel(INDEX_TYPE num_rows, INDEX_TYPE num_nz, const INDEX_TYPE *row_indices, INDEX_TYPE *row_offsets )
{
//one thread per non-zero
int nz = blockIdx.x * blockDim.x + threadIdx.x;
if (nz == 0)
{
row_offsets[0] = 0;
row_offsets[num_rows] = num_nz;
}
while (nz < num_nz - 1)
{
int row = row_indices[nz];
int next_row = row_indices[nz + 1];
while (row < next_row) //this loop should probably always execute once, but I'm making it a loop just in case...
{
row_offsets[++row] = nz + 1;
}
nz += blockDim.x * gridDim.x;
}
}
__global__ void computeRowIndicesKernel(INDEX_TYPE num_rows, const INDEX_TYPE *row_offsets, INDEX_TYPE *row_indices )
{
//one warp per row //possible optimziation: multiple warps per row
int row = (blockIdx.x * blockDim.x + threadIdx.x) / AMGX_WARP_SIZE;
int warp_id = threadIdx.x % AMGX_WARP_SIZE;
while (row < num_rows)
{
int start = row_offsets[row] + warp_id;
int end = row_offsets[row + 1];
for (int nz = start; nz < end; nz += AMGX_WARP_SIZE)
{
row_indices[nz] = row;
}
row += blockDim.x * gridDim.x / AMGX_WARP_SIZE;
}
}
__global__ void computeDiagonalKernelCOO(INDEX_TYPE num_nz, INDEX_TYPE *row_indices, INDEX_TYPE *col_indices, INDEX_TYPE *diag, INDEX_TYPE *diag_end_offsets)
{
//BLOCKY*BLOCKX threads per nz
INDEX_TYPE nz = (blockIdx.x * blockDim.x + threadIdx.x);
while (nz < num_nz)
{
INDEX_TYPE row = row_indices[nz];
INDEX_TYPE col = col_indices[nz];
if (row == col)
{
//copy block to diag
diag[row] = nz;
diag_end_offsets[row] = nz + 1;
}
nz += blockDim.x * gridDim.x;
}
}
__global__ void computeDiagonalKernelCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *diag, INDEX_TYPE null_index, INDEX_TYPE *diag_end_offsets)
{
INDEX_TYPE row = (blockIdx.x * blockDim.x + threadIdx.x);
while (row < num_rows)
{
int nz = row_offsets[row];
int last_nz = row_offsets[row + 1];
while (nz < last_nz)
{
int col = col_indices[nz];
if (row == col)
{
diag[row] = nz;
diag_end_offsets[row] = nz + 1;
break;
}
nz++;
}
row += blockDim.x * gridDim.x;
}
}
__global__ void computeColorOffsetsKernelCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, const INDEX_TYPE *row_colors, INDEX_TYPE *smaller_color_offsets, INDEX_TYPE *larger_color_offsets, INDEX_TYPE *diag)
{
INDEX_TYPE row = (blockIdx.x * blockDim.x + threadIdx.x);
while (row < num_rows)
{
int my_color = row_colors[row];
int nz = row_offsets[row];
int last_nz = row_offsets[row + 1];
int location_small = -1;
int location_large = -1;
while (nz < last_nz)
{
int col = col_indices[nz];
if (row_colors[col] >= my_color && location_small == -1)
{
location_small = nz;
}
if (row_colors[col] > my_color && location_large == -1)
{
location_large = nz;
break;
}
nz++;
}
if (location_large == -1) { location_large = last_nz + 1; }
if (location_small == -1) { location_small = last_nz + 1; }
larger_color_offsets[row] = location_large;
smaller_color_offsets[row] = location_small;
row += blockDim.x * gridDim.x;
}
}
__global__ void computeDiagonalKernelDiagProp (INDEX_TYPE num_rows, INDEX_TYPE num_nz, INDEX_TYPE *diag, INDEX_TYPE *diag_end_offsets)
{
INDEX_TYPE r = (blockIdx.x * blockDim.x + threadIdx.x);
while (r < num_rows)
{
diag[r] = num_nz + r;
diag_end_offsets[r] = num_nz + r + 1;
r += blockDim.x * gridDim.x;
}
}
template <class T>
__global__ void reorderElements (INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *permutation, T *data, T *temp, INDEX_TYPE max_row, INDEX_TYPE blockSize)
{
INDEX_TYPE rowId = blockIdx.x;
while (rowId < num_rows)
{
INDEX_TYPE rowStart = row_offsets[rowId];
INDEX_TYPE rowLen = row_offsets[rowId + 1] - rowStart;
int i = threadIdx.x;
//copy and reorder into temp storage
while (i < rowLen * blockSize)
{
temp[max_row * blockIdx.x + i] = data[(permutation[rowStart + i / blockSize]) * blockSize + i % blockSize];
i += blockDim.x;
}
__syncthreads();
//copy back
i = threadIdx.x;
while (i < rowLen * blockSize)
{
data[rowStart * blockSize + i] = temp[max_row * blockIdx.x + i];
i += blockDim.x;
}
rowId += gridDim.x;
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::apply(const Vector<TConfig> &v, Vector<TConfig> &res, ViewType view)
{
Vector<TConfig> &v_ = const_cast<Vector<TConfig>&>(v);
multiply(*this, v_, res, view);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::apply(const Vector<TConfig> &v, Vector<TConfig> &res, ViewType view)
{
Vector<TConfig> &v_ = const_cast<Vector<TConfig>&>(v);
multiply(*this, v_, res, view);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::print(char *f, char *s, int srows, int erows, int trank)
{
typedef typename TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec>::MatPrec ValueType;
int rank = 0;
int level = 0;
char filename[1024];
FILE *fid = NULL;
int printRowsStart, printRowsEnd;
int i, j, ii, xdim, ydim, tnnz;
ValueType a;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//check target rank
if (rank == trank)
{
//check whether to ouput to stdout or file
if (f == NULL)
{
fid = stdout;
}
else
{
level = this->amg_level_index;
#ifdef _WIN32
_snprintf_s(filename, 1024, 1024, "%s_l%d_r%d.mtx", f, level, rank);
#else
snprintf(filename, 1024, "%s_l%d_r%d.mtx", f, level, rank);
#endif
fid = fopen(filename, "w");
}
cudaDeviceSynchronize();
cudaCheckError();
printRowsStart = (srows < 0) ? 0 : srows;
printRowsEnd = (erows < 0) ? this->get_num_rows() : erows;
tnnz = this->get_num_nz();
//matrix might have separate diagonal (need to accoutn for it in nnz)
if (this->hasProps(DIAG, this->props))
{
//matrix might be non-square so take min of # of rows and cols
tnnz += min(this->get_num_rows(), this->get_num_cols());
}
fprintf(fid, "%%%%MatrixMarket matrix coordinate real general\n");
fprintf(fid, "%% %s\n", s);
fprintf(fid, "%d %d %d\n", this->get_num_rows(), this->get_num_cols(), tnnz);
for (i = printRowsStart; i < printRowsEnd; i++)
{
for (ydim = 0; ydim < this->get_block_dimy(); ydim++)
{
if (this->hasProps(DIAG, this->props))
{
if (i < min(this->get_num_rows(), this->get_num_cols()))
{
for (xdim = 0; xdim < this->get_block_dimx(); xdim++)
{
a = this->values[this->diag[i] * this->get_block_dimx() * this->get_block_dimy() + this->get_block_dimy() * ydim + xdim];
fprintf(fid, "%d %d ", i + 1, i + 1);
types::util<value_type>::fprintf(fid, "%20.16f", a);
fprintf(fid, "\n");
}
}
}
for (ii = this->row_offsets[i]; ii < this->row_offsets[i + 1]; ii++)
{
j = this->col_indices[ii];
for (xdim = 0; xdim < this->get_block_dimx(); xdim++)
{
a = this->values[ii * this->get_block_dimx() * this->get_block_dimy() + this->get_block_dimy() * ydim + xdim];
fprintf(fid, "%d %d ", i + 1, j + 1);
types::util<value_type>::fprintf(fid, "%20.16f", a);
fprintf(fid, "\n");
}
}
}
}
cudaDeviceSynchronize();
cudaGetLastError();
if (fid != stdout)
{
fclose(fid);
}
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::printToFile(char *f, char *s, int srows, int erows)
{
int rank = 0;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//notice that print will be called with different (target) rank on different on different ranks/processes
this->print(f, s, srows, erows, rank);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::print(char *f, char *s, int srows, int erows, int trank)
{
typedef typename TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec>::MatPrec ValueType;
int rank = 0;
int level = 0;
char filename[1024];
FILE *fid = NULL;
int printRowsStart, printRowsEnd;
int i, j, ii, xdim, ydim, tnnz;
ValueType a;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//check target rank
if (rank == trank)
{
//check whether to ouput to stdout or file
if (f == NULL)
{
fid = stdout;
}
else
{
level = this->amg_level_index;
#ifdef _WIN32
_snprintf_s(filename, 1024, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#else
snprintf(filename, 1024, "%s_r%d_l%d.mtx", f, rank, level);
#endif
fid = fopen(filename, "w");
}
cudaDeviceSynchronize();
cudaCheckError();
printRowsStart = (srows < 0) ? 0 : srows;
printRowsEnd = (erows < 0) ? this->get_num_rows() : erows;
tnnz = this->get_num_nz();
//matrix might have separate diagonal (need to accoutn for it in nnz)
if (this->hasProps(DIAG, this->props))
{
//matrix might be non-square so take min of # of rows and cols
tnnz += min(this->get_num_rows(), this->get_num_cols());
}
fprintf(fid, "%%%%MatrixMarket matrix coordinate real general\n");
fprintf(fid, "%% %s\n", s);
fprintf(fid, "%d %d %d\n", this->get_num_rows(), this->get_num_cols(), tnnz);
for (i = printRowsStart; i < printRowsEnd; i++)
{
for (ydim = 0; ydim < this->get_block_dimy(); ydim++)
{
if (this->hasProps(DIAG, this->props))
{
if (i < min(this->get_num_rows(), this->get_num_cols()))
{
for (xdim = 0; xdim < this->get_block_dimx(); xdim++)
{
a = this->values[this->diag[i] * this->get_block_dimx() * this->get_block_dimy() + this->get_block_dimy() * ydim + xdim];
fprintf(fid, "%d %d ", i + 1, i + 1);
types::util<value_type>::fprintf(fid, "%20.16f", a);
fprintf(fid, "\n");
}
}
}
for (ii = this->row_offsets[i]; ii < this->row_offsets[i + 1]; ii++)
{
j = this->col_indices[ii];
for (xdim = 0; xdim < this->get_block_dimx(); xdim++)
{
a = this->values[ii * this->get_block_dimx() * this->get_block_dimy() + this->get_block_dimy() * ydim + xdim];
fprintf(fid, "%d %d ", i + 1, j + 1);
types::util<value_type>::fprintf(fid, "%20.16f", a);
fprintf(fid, "\n");
}
}
}
}
cudaDeviceSynchronize();
cudaGetLastError();
if (fid != stdout)
{
fclose(fid);
}
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::printToFile(char *f, char *s, int srows, int erows)
{
int rank = 0;
#ifdef AMGX_WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
//notice that print will be called with different (target) rank on different on different ranks/processes
this->print(f, s, srows, erows, rank);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::convert( const Matrix<TConfig> &mat, unsigned int new_props, int block_dimy, int block_dimx )
{
if ( !mat.is_initialized() )
{
FatalError("Trying to convert from the uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
this->set_initialized(0);
this->addProps(new_props);
index_type block_size = block_dimx * block_dimy;
index_type new_num_rows = ( mat.get_num_rows() * mat.block_dimy + block_dimy - 1 ) / block_dimy;
index_type new_num_cols = ( mat.get_num_cols() * mat.block_dimx + block_dimx - 1 ) / block_dimx;
MVector new_values;
IVector new_col_indices;
index_type new_num_nnz = 0;
IVector new_row_indices;
IVector new_row_offsets;
MVector new_dia_values;
new_dia_values.resize( new_num_rows * block_size, types::util<value_type>::get_zero() );
if ( mat.hasProps(COO) )
{
std::map< std::pair<index_type, index_type>, index_type> ind;
for ( index_type i = 0; i < mat.get_num_nz(); i++ )
for ( index_type loc_row = 0; loc_row < mat.block_dimy; loc_row++ )
for ( index_type loc_col = 0; loc_col < mat.block_dimx; loc_col++ )
{
index_type in_row = mat.row_indices[i] * mat.block_dimy + loc_row;
index_type in_col = mat.col_indices[i] * mat.block_dimx + loc_col;
value_type in_val = mat.values[i * mat.block_size + loc_row * mat.block_dimx + loc_col];
if ( types::util<value_type>::is_zero(in_val) ) { continue; } // skip zero entries
index_type out_br = in_row / block_dimy;
index_type out_bc = in_col / block_dimx;
index_type out_lr = in_row % block_dimy;
index_type out_lc = in_col % block_dimx;
if ( ind.find( std::pair<index_type, index_type>( out_br, out_bc ) ) == ind.end() )
{
// allocate a new block
ind.insert( std::pair< std::pair<index_type, index_type>, index_type>( std::pair<index_type, index_type>( out_br, out_bc ), new_num_nnz ) );
if (out_br != out_bc || !this->hasProps(DIAG))
{
new_row_indices.push_back( out_br );
new_col_indices.push_back( out_bc );
new_num_nnz++;
for ( int b = 0; b < block_size; b++ )
{
new_values.push_back( types::util<value_type>::get_zero() );
}
}
}
if ( out_br != out_bc || !this->hasProps(DIAG) )
{
new_values[ ind[std::pair<index_type, index_type>( out_br, out_bc )] * block_size + out_lr * block_dimx + out_lc ] = in_val;
}
else
{
new_dia_values[ out_br * block_size + out_lr * block_dimx + out_lc ] = in_val;
}
}
} // ( mat.hasProps(COO) )
else if ( mat.hasProps(CSR))
{
new_num_nnz = 0;
//MVector new_dia_values;
//new_dia_values.resize( new_num_rows*block_size, 0.0 );
new_row_offsets.resize( new_num_rows + 1 );
// process each output block row
for ( index_type i = 0; i < new_num_rows; i++ )
{
new_row_offsets[i] = new_num_nnz;
// count non zero column blocks
IVector non_zero_blocks( new_num_cols );
for ( index_type j = i * block_dimy; j < (i + 1) * block_dimy && j < mat.get_num_rows() * mat.block_dimy; j++ )
{
// input row block / local position
index_type in_br = j / mat.block_dimy;
index_type in_lr = j % mat.block_dimy;
// loop through block columns
for ( index_type r = mat.row_offsets[in_br]; r < mat.row_offsets[in_br + 1] + ( mat.hasProps(DIAG) ); r++ )
{
index_type in_bc = ( r == mat.row_offsets[in_br + 1] ) ? in_br : mat.col_indices[r];
// loop through local columns
for ( index_type in_lc = 0; in_lc < mat.block_dimx; in_lc++ )
{
index_type in_col = in_bc * mat.block_dimx + in_lc;
index_type out_bc = in_col / block_dimx;
// fetch input entry value
value_type val = ( r == mat.row_offsets[in_br + 1] ) ?
mat.values[mat.diag[in_br] * mat.block_size + in_lr * mat.block_dimx + in_lc] :
mat.values[r * mat.block_size + in_lr * mat.block_dimx + in_lc];
if ( types::util<value_type>::is_zero(val) ) { continue; } // skip zero entries
// mark non_zero column
non_zero_blocks[out_bc] = 1;
}
}
}
// populate non zero column blocks
for ( int bc = 0; bc < new_num_cols; bc++ )
if ( non_zero_blocks[bc] != 0 )
{
if ( i != bc || !this->hasProps(DIAG) ) // off-diagonal for DIAG
{
non_zero_blocks[bc] = new_num_nnz++;
new_col_indices.push_back( bc );
for ( int b = 0; b < block_size; b++ )
{
new_values.push_back( types::util<value_type>::get_zero() );
}
}
}
// fill non zero values
for ( index_type j = i * block_dimy; j < (i + 1) * block_dimy && j < mat.get_num_rows() * mat.block_dimy; j++ )
{
// output row block/local position
index_type out_br = j / block_dimy;
index_type out_lr = j % block_dimy;
// input row block/local position
index_type in_br = j / mat.block_dimy;
index_type in_lr = j % mat.block_dimy;
// loop through block columns
for ( index_type r = mat.row_offsets[in_br]; r < mat.row_offsets[in_br + 1] + ( mat.hasProps(DIAG) ); r++ )
{
index_type in_bc = ( r == mat.row_offsets[in_br + 1] ) ? in_br : mat.col_indices[r];
// loop through local columns
for ( index_type in_lc = 0; in_lc < mat.block_dimx; in_lc++ )
{
index_type in_col = in_bc * mat.block_dimx + in_lc;
index_type out_bc = in_col / block_dimx;
index_type out_lc = in_col % block_dimx;
// fetch input entry value
value_type val = ( r == mat.row_offsets[in_br + 1] ) ?
mat.values[mat.diag[in_br] * mat.block_size + in_lr * mat.block_dimx + in_lc] :
mat.values[r * mat.block_size + in_lr * mat.block_dimx + in_lc];
if ( types::util<value_type>::is_zero(val) ) { continue; } // skip zero entries
// write to new matrix
if ( out_br != out_bc || !this->hasProps(DIAG) )
{
new_values[ non_zero_blocks[out_bc] * block_size + out_lr * block_dimx + out_lc] = val;
}
else
{
new_dia_values[ out_br * block_size + out_lr * block_dimx + out_lc ] = val;
}
}
}
}
} // for( i < new_num_rows )
// fill extra diagonal for the last block
int extra_start = ( mat.get_num_rows() * mat.block_dimy ) % block_dimy;
if ( extra_start > 0 )
{
for ( int r = extra_start; r < block_dimy; r++ )
if ( this->hasProps(DIAG) )
{
new_dia_values[ (new_num_rows - 1) * block_size + r * block_dimx + r] = types::util<value_type>::get_one();
}
else
{
new_values[ (new_num_nnz - 1) * block_size + r * block_dimx + r] = types::util<value_type>::get_one();
}
}
new_row_offsets[new_num_rows] = new_num_nnz;
} // ( mat.hasProps(CSR) )
else
{
FatalError("Input matrix for conversion doesn't have COO or CSR format", AMGX_ERR_CONFIGURATION);
}
if ( this->hasProps(DIAG) )
{
new_values.insert(new_values.end(), new_dia_values.begin(), new_dia_values.end());
}
else
for ( int b = 0; b < block_size; b++ )
{
new_values.push_back( types::util<value_type>::get_zero() );
}
this->resize( new_num_rows, new_num_cols, new_num_nnz, block_dimy, block_dimx );
this->values.copy( new_values );
this->values.set_block_dimx(block_dimx);
this->values.set_block_dimy(block_dimy);
this->col_indices.copy( new_col_indices );
if ( mat.hasProps(COO) )
{
this->row_indices.copy( new_row_indices );
this->props |= COO;
}
if ( mat.hasProps(CSR) )
{
this->row_offsets.copy( new_row_offsets );
this->props |= CSR;
}
if (this->hasProps(COO) && this->row_indices.size() == 0)
{
this->row_indices.resize(new_num_nnz);
computeRowIndices();
}
if (this->hasProps(CSR) && this->row_offsets.size() == 0)
{
this->row_offsets.resize(new_num_rows + 1);
computeRowOffsets();
}
computeDiagonal();
this->set_initialized(1);
}
template <class T_Config> class MatrixBase;
template<class T_Config>
AMGX_ERROR
MatrixBase<T_Config>::resize(index_type num_rows, index_type num_cols, index_type num_nz, int skipDiaCompute)
{
if (this->is_initialized())
{
FatalError("Debug throw: resizing already initialized matrix\n", AMGX_ERR_BAD_PARAMETERS);
}
{
this->num_rows = num_rows;
this->num_cols = num_cols;
this->num_nz = num_nz;
if ( hasProps(DIAG) )
{
values.resize(num_nz * block_size + num_rows * block_size);
}
else
{
values.resize((num_nz + 1)*block_size);
//thrust::fill(values.begin() + num_nz*block_size, values.end(), static_cast<value_type>(0.0));
}
diag.resize(num_rows);
m_diag_end_offsets.resize(num_rows);
col_indices.resize(num_nz);
//if( props == NONE ) {props = CSR; props |= DIAG;}
if ( hasProps(COO) ) { row_indices.resize(num_nz); }
if ( hasProps(CSR) ) { row_offsets.resize(num_rows + 1); }
m_seq_offsets.resize(num_rows + 1);
thrust::sequence(m_seq_offsets.begin(), m_seq_offsets.end());
cudaCheckError();
if (!skipDiaCompute )
{
computeDiagonal();
}
}
return AMGX_OK;
}
template<class T_Config>
void
MatrixBase<T_Config>::setupMatrix(Solver<T_Config> *outer_solver, AMG_Config &cfg, bool reuse_matrix_structure)
{
// Check that matrix is initialized
if (!this->is_initialized())
{
FatalError("Trying to setup from the uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
this->set_initialized(0);
m_separation_interior = cfg.getParameter<ViewType>("separation_interior", "default");
m_separation_exterior = cfg.getParameter<ViewType>("separation_exterior", "default");
if (m_separation_interior > m_separation_exterior) { FatalError("Interior separation cannot be wider than the exterior separation", AMGX_ERR_CONFIGURATION); }
int min_rows_latency_hiding = cfg.getParameter<int>("min_rows_latency_hiding", "default");
if (min_rows_latency_hiding < 0 || this->get_num_rows() < min_rows_latency_hiding) { m_separation_interior = m_separation_exterior; }
bool is_coloring_needed = outer_solver->isColoringNeeded();
if (!reuse_matrix_structure)
{
// Color the matrix since the structure has changed
if ( is_coloring_needed )
{
// Get the scope of the solver that needs coloring
std::string cfg_scope_for_coloring;
outer_solver->getColoringScope(cfg_scope_for_coloring);
this->colorMatrix(cfg, cfg_scope_for_coloring);
}
}
// Set the matrix block format
BlockFormat block_format = cfg.getParameter<BlockFormat>( "block_format", "default" );
if ( this->getBlockFormat() != block_format )
{
this->setBlockFormat( block_format );
}
// Permute the values and the column indices (if necessary)
bool reorder_cols_by_color = outer_solver->getReorderColsByColorDesired();
bool insert_diagonal = outer_solver->getInsertDiagonalDesired();
if (reorder_cols_by_color)
{
if (reuse_matrix_structure) // Only permute the values
{
this->permuteValues();
}
else // Permute the values and the columns
{
this->reorderColumnsByColor(insert_diagonal);
this->permuteValues();
}
}
this->set_initialized(1);
m_is_matrix_setup = true;
}
template<class T_Config>
void
MatrixBase<T_Config>::reorderColumnsByColor(bool insert_diagonal)
{
// If columns already reordered, don't reorder again
if (this->m_cols_reordered_by_color)
{
return;
}
// Check if matrix is colored
if (!this->hasProps(COLORING))
{
FatalError("Matrix must be colored in order to be reordered by colors. Try setting coloring_level=1 in the configuration file", AMGX_ERR_CONFIGURATION);
}
set_initialized(0);
// Compute the row indices
addProps(COO);
set_allow_recompute_diag(false);
index_type num_non_zeros = num_nz;
if (hasProps(DIAG) && insert_diagonal) // Diagonal stored separetely
{
num_non_zeros += num_rows;
}
// Append the diagonal if stored separately
// The new matrix will have inside diagonal
if (hasProps(DIAG) && insert_diagonal)
{
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first + num_rows;
// Create new row_indices with appended diagonal
IVector new_row_indices(num_non_zeros);
IVector new_col_indices(num_non_zeros);
thrust::copy(row_indices.begin(), row_indices.end(), new_row_indices.begin());
thrust::copy(first, last, new_row_indices.begin() + num_nz);
// Create new col_indices with appended diagonal
thrust::copy(col_indices.begin(), col_indices.end(), new_col_indices.begin());
thrust::copy(first, last, new_col_indices.begin() + num_nz);
row_indices.swap(new_row_indices);
col_indices.swap(new_col_indices);
new_row_indices.clear();
new_row_indices.shrink_to_fit();
new_col_indices.clear();
new_col_indices.shrink_to_fit();
}
// Compute the color of every column
IVector element_colors(num_non_zeros);
thrust::copy(thrust::make_permutation_iterator(this->getMatrixColoring().getRowColors().begin(), col_indices.begin()),
thrust::make_permutation_iterator(this->getMatrixColoring().getRowColors().begin(), col_indices.end()),
element_colors.begin());
// Compute the permutation vector by sorting by rows and columns
m_values_permutation_vector.resize(num_non_zeros);
thrust::sequence(m_values_permutation_vector.begin(), m_values_permutation_vector.end());
cusp::detail::sort_by_row_and_column(row_indices, element_colors, m_values_permutation_vector);
cudaCheckError();
element_colors.clear();
element_colors.shrink_to_fit();
// Compute the new column indices sorted by color
IVector new_column_indices(num_non_zeros);
thrust::copy(thrust::make_permutation_iterator(col_indices.begin(), m_values_permutation_vector.begin()),
thrust::make_permutation_iterator(col_indices.begin(), m_values_permutation_vector.end()),
new_column_indices.begin());
col_indices.swap(new_column_indices);
new_column_indices.clear();
new_column_indices.shrink_to_fit();
if (hasProps(DIAG) && insert_diagonal)
{
// Change the number of nonzeros
set_num_nz(num_non_zeros);
values.resize( (num_non_zeros + 1)*this->get_block_size());
this->m_is_permutation_inplace = false;
}
else
{
this->m_is_permutation_inplace = true;
}
if (hasProps(DIAG) && insert_diagonal)
{
delProps(DIAG);
// Force recomputation of row offsets
delProps(CSR);
}
// Compute row offsets if input matrix only had COO format or if diagonal was inserted
addProps(CSR);
// Recompute the diagonal
set_allow_recompute_diag(true);
computeDiagonal();
// Compute the color offsets
m_smaller_color_offsets.resize(this->get_num_rows());
m_larger_color_offsets.resize(this->get_num_rows());
computeColorOffsets();
this->m_cols_reordered_by_color = true;
set_initialized(1);
}
template<class T_Config>
void
MatrixBase<T_Config>::sortByRowAndColumn()
{
this->set_initialized(0);
// Add row_indices array
this->addProps(COO);
this->set_allow_recompute_diag(false);
if (this->get_block_dimx() != 1 || this->get_block_dimy() != 1)
{
FatalError("sortByRowAndColumn only works for scalar matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
size_t N = this->row_indices.size();
IVector permutation(N);
thrust::sequence(permutation.begin(), permutation.end());
cudaCheckError();
// compute permutation and sort by (I,J)
{
IVector temp(this->col_indices);
thrust::stable_sort_by_key(temp.begin(), temp.end(), permutation.begin());
temp = this->row_indices;
thrust::gather(permutation.begin(), permutation.end(), temp.begin(), this->row_indices.begin());
thrust::stable_sort_by_key(this->row_indices.begin(), this->row_indices.end(), permutation.begin());
temp = this->col_indices;
thrust::gather(permutation.begin(), permutation.end(), temp.begin(), this->col_indices.begin());
}
cudaCheckError();
// use permutation to reorder the values
{
MVector temp(this->values);
thrust::gather(permutation.begin(), permutation.end(), temp.begin(), this->values.begin());
}
cudaCheckError();
this->set_allow_recompute_diag(true);
this->addProps(CSR);
// remove row indices array
this->delProps(COO);
this->computeDiagonal();
this->set_initialized(1);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDiagonal()
{
ViewType oldView = this->currentView();
if (this->m_initialized) { this->setView(ALL); }
if (this->allow_recompute_diag)
{
index_type num_rows = this->get_num_rows();
index_type num_nz = this->get_num_nz();
IVector &row_offsets = this->row_offsets;
IVector &row_indices = this->row_indices;
IVector &col_indices = this->col_indices;
IVector &diag = this->diag;
if (this->diag.size() != this->get_num_rows()) { this->diag.resize(this->get_num_rows()); }
if (this->m_diag_end_offsets.size() != this->get_num_rows()) { this->m_diag_end_offsets.resize(this->get_num_rows()); }
if ( this->hasProps(DIAG) )
{
int first = num_nz;
for (int r = 0; r < num_rows; r++)
{
diag[r] = first++;
}
}
else
{
index_type null_index = this->get_num_nz();
if ( this->hasProps(CSR) )
{
for (int r = 0; r < num_rows; r++)
{
int start = row_offsets[r];
int end = row_offsets[r + 1];
diag[r] = null_index;
for (int j = start; j < end; j++)
{
if (col_indices[j] == r)
{
diag[r] = j;
break;
}
}
}
}
else if (this->hasProps(COO) )
{
for (int i = 0; i < num_rows; i++)
{
diag[i] = null_index;
}
for (int j = 0; j < num_nz; j++)
{
int r = row_indices[j];
if (r == col_indices[j])
{
diag[r] = j;
}
}
}
}
for (int r = 0; r < num_rows; r++)
{
this->m_diag_end_offsets[r] = diag[r] + 1;
}
}
this->setView(oldView);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDiagonal()
{
ViewType oldView;
index_type null_index = 0; //this->get_num_nz();
if (!this->allow_recompute_diag || !(this->get_num_rows() > 0))
{
return;
}
if (this->m_initialized)
{
oldView = this->currentView();
this->setView(ALL);
}
if (this->diag.size() < this->get_num_rows())
{
this->diag.resize(this->get_num_rows());
}
if (this->m_diag_end_offsets.size() < this->get_num_rows())
{
this->m_diag_end_offsets.resize(this->get_num_rows());
}
if (this->hasProps(DIAG))
{
int num_blocks = min(4096, (this->get_num_rows() + 511) / 512);
computeDiagonalKernelDiagProp <<< num_blocks, 512, 0, thrust::global_thread_handle::get_stream()>>>(this->get_num_rows(), this->get_num_nz(), this->diag.raw(), this->m_diag_end_offsets.raw());
}
else if (this->hasProps(COO))
{
int num_blocks = min(4096, (this->get_num_nz() + 511) / 512);
computeDiagonalKernelCOO <<< num_blocks, 512>>>(this->get_num_nz(), this->row_indices.raw(), this->col_indices.raw(), this->diag.raw(), this->m_diag_end_offsets.raw());
}
else
{
int num_blocks = min(4096, (this->get_num_rows() + 511) / 512);
computeDiagonalKernelCSR <<< num_blocks, 512>>>(this->get_num_rows(), this->row_offsets.raw(), this->col_indices.raw(), this->diag.raw(), null_index, this->m_diag_end_offsets.raw());
}
cudaCheckError();
if (this->m_initialized)
{
this->setView(oldView);
}
#ifdef DEBUG
if (this->diag_copy.size() == 0)
{
this->diag_copy = this->diag;
}
else
{
if ((this->diag.size() != this->diag_copy.size()) || (this->diag.size() == 1)) { return; }
IVector_h h_diag = this->diag;
IVector_h h_diag_copy = this->diag_copy;
bool equal = true;
for (unsigned int i = 0; i < this->diag.size(); ++i)
{
if (h_diag[i] != h_diag_copy[i])
{
equal = false;
break;
}
}
if (equal)
{
FatalError("ComputeDiagonal was called, but diagonal hasn't changed", AMGX_ERR_UNKNOWN);
}
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void
Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::permuteValues()
{
if (this->m_cols_reordered_by_color && this->m_is_permutation_inplace)
{
reorderValuesInPlace();
}
else if (this->m_cols_reordered_by_color && !this->m_is_permutation_inplace )
{
// might use a lot of memory
MVector temp_values;
temp_values.resize(this->values.size());
temp_values.set_block_dimx(this->values.get_block_dimx());
temp_values.set_block_dimy(this->values.get_block_dimy());
amgx::unpermuteVector(this->values, temp_values, this->m_values_permutation_vector, (this->get_num_nz()) * (this->get_block_size()));
this->values.swap(temp_values);
temp_values.clear();
temp_values.shrink_to_fit();
}
else
{
FatalError("Invalid reordering level in permuteValues", AMGX_ERR_CONFIGURATION);
}
}
void computeRowOffsetsDevice(int num_blocks, INDEX_TYPE num_rows, INDEX_TYPE num_nz, const INDEX_TYPE *row_indices, INDEX_TYPE *row_offsets, INDEX_TYPE block_size )
{
computeRowOffsetsKernel <<< num_blocks, 512>>>(num_rows, num_nz, row_indices, row_offsets);
cudaCheckError();
}
extern void computeRowIndicesDevice(int num_blocks, INDEX_TYPE num_rows, const INDEX_TYPE *row_offsets, INDEX_TYPE *row_indices, INDEX_TYPE block_size )
{
computeRowIndicesKernel <<< num_blocks, 512>>>(num_rows, row_offsets, row_indices);
cudaCheckError();
}
void computeColorOffsetsDeviceCSR(int num_blocks, INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, const INDEX_TYPE *row_colors, INDEX_TYPE *smaller_color_offsets, INDEX_TYPE *larger_color_offsets, INDEX_TYPE block_size, INDEX_TYPE *diag )
{
computeColorOffsetsKernelCSR <<< num_blocks, 512>>>(num_rows, row_offsets, col_indices, row_colors, smaller_color_offsets, larger_color_offsets, diag);
cudaCheckError();
}
template <typename T>
void reorderElementsDeviceCSR(INDEX_TYPE num_rows,
INDEX_TYPE *row_offsets,
INDEX_TYPE *permutation,
INDEX_TYPE *col_indices,
T *values,
INDEX_TYPE block_size)
{
thrust::device_ptr<INDEX_TYPE> dev_ptr = thrust::device_pointer_cast(row_offsets);
INDEX_TYPE max_row_length = std::max(1, thrust::transform_reduce(dev_ptr, dev_ptr + num_rows, row_length<INDEX_TYPE>(), 0, thrust::maximum<INDEX_TYPE>()));
//TODO: optimise this in terms of storage
INDEX_TYPE storage_space = 100 * 1024 * 1024 * sizeof(T) / sizeof(cuDoubleComplex); // because we allocate as for cuComplex
INDEX_TYPE blocks = 1500 < storage_space / (max_row_length * block_size * sizeof(T)) ? 1500 : storage_space / (max_row_length * block_size * sizeof(T));
blocks = blocks < num_rows ? blocks : num_rows;
INDEX_TYPE aligned_space = ((max_row_length * block_size * sizeof(T) / 128 + 1) * 128) / sizeof(T); //pad to 128 bytes
Vector<amgx::TemplateConfig<AMGX_device, AMGX_vecDoubleComplex, AMGX_matDoubleComplex, AMGX_indInt> > tempstorage(blocks * aligned_space);
reorderElements <<< blocks, 256>>>(num_rows, row_offsets, permutation, values, (T *)tempstorage.raw(), aligned_space, block_size);
cudaCheckError();
}
// explicitly instantiate reorderElementsDeviceCSR, since we call it from header and it's not a part of some class
template void reorderElementsDeviceCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *permutation, INDEX_TYPE *col_indices, float *values, INDEX_TYPE block_size);
template void reorderElementsDeviceCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *permutation, INDEX_TYPE *col_indices, double *values, INDEX_TYPE block_size);
template void reorderElementsDeviceCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *permutation, INDEX_TYPE *col_indices, cuComplex *values, INDEX_TYPE block_size);
template void reorderElementsDeviceCSR(INDEX_TYPE num_rows, INDEX_TYPE *row_offsets, INDEX_TYPE *permutation, INDEX_TYPE *col_indices, cuDoubleComplex *values, INDEX_TYPE block_size);
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class MatrixBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Matrix<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}//end namespace amgx
|
the_stack
|
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/distance.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Average string byte-length threshold for deciding character-level vs row-level parallel
* algorithm.
*
* This value was determined by running the replace string scalar benchmark against different
* power-of-2 string lengths and observing the point at which the performance only improved for
* all trials.
*/
constexpr size_type BYTES_PER_VALID_ROW_THRESHOLD = 64;
/**
* @brief Function logic for the row-level parallelism replace API.
*
* This will perform a replace operation on each string.
*/
struct replace_row_parallel_fn {
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t const max_repl;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
const char* in_ptr = d_str.data();
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
auto max_n = (max_repl < 0) ? d_str.length() : max_repl;
auto bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while ((position >= 0) && (max_n > 0)) {
if (out_ptr) {
auto const curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
} else {
bytes += d_repl.size_bytes() - d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes());
--max_n;
}
if (out_ptr) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
else
d_offsets[idx] = bytes;
}
};
/**
* @brief Functor for detecting falsely-overlapped target positions.
*
* This functor examines target positions that have been flagged as potentially overlapped by
* a previous target position and identifies the overlaps that are false. A false overlap can occur
* when a target position is overlapped by another target position that is itself overlapped.
*
* For example, a target string of "+++" and string to search of "++++++" will generate 4 potential
* target positions at char offsets 0 through 3. The targets at offsets 1, 2, and 3 will be flagged
* as potential overlaps since a prior target position is within range of the target string length.
* The targets at offset 1 and 2 are true overlaps, since the footprint of the valid target at
* offset 0 overlaps with them. The target at offset 3 is not truly overlapped because it is only
* overlapped by invalid targets, targets that were themselves overlapped by a valid target.
*/
struct target_false_overlap_filter_fn {
size_type const* const d_overlap_pos_indices{};
size_type const* const d_target_positions{};
size_type const target_size{};
__device__ bool operator()(size_type overlap_idx) const
{
if (overlap_idx == 0) {
// The first overlap has no prior overlap to chain, so it should be kept as an overlap.
return false;
}
size_type const this_pos_idx = d_overlap_pos_indices[overlap_idx];
// Searching backwards for the first target position index of an overlap that is not adjacent
// to its overlap predecessor. The result will be the first overlap in this chain of overlaps.
size_type first_overlap_idx = overlap_idx;
size_type first_pos_idx = this_pos_idx;
while (first_overlap_idx > 0) {
size_type prev_pos_idx = d_overlap_pos_indices[--first_overlap_idx];
if (prev_pos_idx + 1 != first_pos_idx) { break; }
first_pos_idx = prev_pos_idx;
}
// The prior target position to the first overlapped position in the chain is a valid target.
size_type valid_pos_idx = first_pos_idx - 1;
size_type valid_pos = d_target_positions[valid_pos_idx];
// Walk forward from this valid target. Any targets within the range of this valid one are true
// overlaps. The first overlap beyond the range of this valid target is another valid target,
// as it was falsely overlapped by a target that was itself overlapped. Repeat until we get to
// the overlapped position being queried by this call.
while (valid_pos_idx < this_pos_idx) {
size_type next_pos_idx = valid_pos_idx + 1;
size_type next_pos = d_target_positions[next_pos_idx];
// Every target position within the range of a valid target position is a true overlap.
while (next_pos < valid_pos + target_size) {
if (next_pos_idx == this_pos_idx) { return false; }
next_pos = d_target_positions[++next_pos_idx];
}
valid_pos_idx = next_pos_idx;
valid_pos = next_pos;
}
// This was overlapped only by false overlaps and therefore is a valid target.
return true;
}
};
/**
* @brief Functor for replacing each target string with the replacement string.
*
* This will perform a replace operation at each target position.
*/
struct target_replacer_fn {
device_span<size_type const> const d_target_positions;
char const* const d_in_chars{};
char* const d_out_chars{};
size_type const target_size{};
string_view const d_repl;
int32_t const in_char_offset = 0;
__device__ void operator()(size_type input_idx) const
{
// Calculate the adjustment from input index to output index for each prior target position.
auto const repl_size = d_repl.size_bytes();
auto const idx_delta_per_pos = repl_size - target_size;
// determine the number of target positions at or before this character position
size_type const* next_target_pos_ptr = thrust::upper_bound(
thrust::seq, d_target_positions.begin(), d_target_positions.end(), input_idx);
size_type const num_prev_targets = next_target_pos_ptr - d_target_positions.data();
size_type output_idx = input_idx - in_char_offset + idx_delta_per_pos * num_prev_targets;
if (num_prev_targets == 0) {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
} else {
// check if this input position is within a target string
size_type const prev_target_pos = *(next_target_pos_ptr - 1);
size_type target_idx = input_idx - prev_target_pos;
if (target_idx < target_size) {
// within the target string, so the original calculation was off by one target string
output_idx -= idx_delta_per_pos;
// Copy the corresponding byte from the replacement string. If the replacement string is
// larger than the target string then the thread reading the last target byte is
// responsible for copying the remainder of the replacement string.
if (target_idx < repl_size) {
d_out_chars[output_idx++] = d_repl.data()[target_idx++];
if (target_idx == target_size) {
memcpy(d_out_chars + output_idx, d_repl.data() + target_idx, repl_size - target_idx);
}
}
} else {
// not within a target string
d_out_chars[output_idx] = d_in_chars[input_idx];
}
}
}
};
/**
* @brief Filter target positions that are overlapped by other, valid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are overlapped by other, valid target positions. For example, if the target string is "++"
* and the string to search is "+++" then there will be two potential targets at character offsets
* 0 and 1. The target at offset 0 is valid and overlaps the target at offset 1, invalidating the
* target at offset 1.
*
* @param[in,out] d_target_positions Potential target positions to filter in-place.
* @param[in] target_count Number of potential target positions.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_overlap_target_positions(size_type* d_target_positions,
size_type target_count,
size_type target_size,
rmm::cuda_stream_view stream)
{
auto overlap_detector = [d_target_positions, target_size] __device__(size_type pos_idx) -> bool {
return (pos_idx > 0)
? d_target_positions[pos_idx] - d_target_positions[pos_idx - 1] < target_size
: false;
};
// count the potential number of overlapped target positions
size_type overlap_count =
thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
overlap_detector);
if (overlap_count == 0) { return target_count; }
// create a vector indexing the potential overlapped target positions
rmm::device_uvector<size_type> potential_overlapped_pos_indices(overlap_count, stream);
auto d_potential_overlapped_pos_indices = potential_overlapped_pos_indices.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(target_count),
d_potential_overlapped_pos_indices,
overlap_detector);
// filter out the false overlaps that are actually valid
rmm::device_uvector<size_type> overlapped_pos_indices(overlap_count, stream);
auto d_overlapped_pos_indices = overlapped_pos_indices.data();
auto overlap_end =
thrust::remove_copy_if(rmm::exec_policy(stream),
d_potential_overlapped_pos_indices,
d_potential_overlapped_pos_indices + overlap_count,
thrust::make_counting_iterator<size_type>(0),
d_overlapped_pos_indices,
target_false_overlap_filter_fn{
d_potential_overlapped_pos_indices, d_target_positions, target_size});
overlap_count = cudf::distance(d_overlapped_pos_indices, overlap_end);
// In-place remove any target positions that are overlapped by valid target positions
auto target_pos_end = thrust::remove_if(
rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
thrust::make_counting_iterator<size_type>(0),
[d_overlapped_pos_indices, overlap_count] __device__(size_type target_position_idx) -> bool {
return thrust::binary_search(thrust::seq,
d_overlapped_pos_indices,
d_overlapped_pos_indices + overlap_count,
target_position_idx);
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Filter target positions to remove any invalid target positions.
*
* This performs an in-place modification of the target positions to remove any target positions
* that are invalid, either by the target string overlapping a row boundary or being overlapped by
* another valid target string.
*
* @param[in,out] target_positions Potential target positions to filter in-place.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] target_size Size of the target string in bytes.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_false_target_positions(rmm::device_uvector<size_type>& target_positions,
device_span<int32_t const> d_offsets_span,
size_type target_size,
rmm::cuda_stream_view stream)
{
// In-place remove any positions for target strings that crossed string boundaries.
auto d_target_positions = target_positions.data();
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_positions.size(),
[d_offsets_span, target_size] __device__(size_type target_pos) -> bool {
// find the end of the string containing the start of this target
size_type const* offset_ptr = thrust::upper_bound(
thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return target_pos + target_size > *offset_ptr;
});
auto const target_count = cudf::distance(d_target_positions, target_pos_end);
if (target_count == 0) { return 0; }
// Filter out target positions that are the result of overlapping target matches.
return (target_count > 1)
? filter_overlap_target_positions(d_target_positions, target_count, target_size, stream)
: target_count;
}
/**
* @brief Filter target positions beyond the maximum target replacements per row limit.
*
* This performs an in-place modification of the target positions to remove any target positions
* corresponding to targets that should not be replaced due to the maximum target replacement per
* row limit.
*
* @param[in,out] target_positions Target positions to filter in-place.
* @param[in] target_count Number of target positions.
* @param[in] d_offsets_span Memory range encompassing the string column offsets.
* @param[in] max_repl_per_row Maximum target replacements per row limit.
* @param[in] stream CUDA stream to use for device operations.
* @return Number of target positions after filtering.
*/
size_type filter_maxrepl_target_positions(size_type* d_target_positions,
size_type target_count,
device_span<int32_t const> d_offsets_span,
size_type max_repl_per_row,
rmm::cuda_stream_view stream)
{
auto pos_to_row_fn = [d_offsets_span] __device__(size_type target_pos) -> size_type {
auto upper_bound =
thrust::upper_bound(thrust::seq, d_offsets_span.begin(), d_offsets_span.end(), target_pos);
return thrust::distance(d_offsets_span.begin(), upper_bound);
};
// compute the match count per row for each target position
rmm::device_uvector<size_type> match_counts(target_count, stream);
auto d_match_counts = match_counts.data();
thrust::inclusive_scan_by_key(
rmm::exec_policy(stream),
thrust::make_transform_iterator(d_target_positions, pos_to_row_fn),
thrust::make_transform_iterator(d_target_positions + target_count, pos_to_row_fn),
thrust::make_constant_iterator<size_type>(1),
d_match_counts);
// In-place remove any positions that exceed the per-row match limit
auto target_pos_end =
thrust::remove_if(rmm::exec_policy(stream),
d_target_positions,
d_target_positions + target_count,
d_match_counts,
[max_repl_per_row] __device__(size_type match_count) -> bool {
return match_count > max_repl_per_row;
});
return cudf::distance(d_target_positions, target_pos_end);
}
/**
* @brief Scalar string replacement using a character-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* character-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively long.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param chars_start Offset of the first character in the string column.
* @param chars_end Offset beyond the last character in the string column to search.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_char_parallel(strings_column_view const& strings,
size_type chars_start,
size_type chars_end,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
auto const d_in_chars = strings.chars().data<char>();
auto const chars_bytes = chars_end - chars_start;
auto const target_size = d_target.size_bytes();
// detect a target match at the specified byte position
device_span<char const> const d_chars_span(d_in_chars, chars_end);
auto target_detector = [d_chars_span, d_target] __device__(size_type char_idx) {
auto target_size = d_target.size_bytes();
auto target_ptr = d_chars_span.begin() + char_idx;
return target_ptr + target_size <= d_chars_span.end() &&
d_target.compare(target_ptr, target_size) == 0;
};
// Count target string matches across all character positions, ignoring string boundaries and
// overlapping target strings. This may produce false-positives.
size_type target_count = thrust::count_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
target_detector);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
// create a vector of the potential target match positions
rmm::device_uvector<size_type> target_positions(target_count, stream);
auto d_target_positions = target_positions.data();
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
thrust::make_counting_iterator<size_type>(chars_end),
d_target_positions,
target_detector);
device_span<int32_t const> d_offsets_span(d_offsets, offset_count);
if (target_size > 1) {
target_count =
filter_false_target_positions(target_positions, d_offsets_span, target_size, stream);
if (target_count == 0) {
// nothing to replace, copy the input column
return std::make_unique<cudf::column>(strings.parent(), stream, mr);
}
}
// filter out any target positions that exceed the per-row match limit
if (maxrepl > 0 && target_count > maxrepl) {
target_count = filter_maxrepl_target_positions(
d_target_positions, target_count, d_offsets_span, maxrepl, stream);
}
// build the offsets column
auto offsets_column = make_numeric_column(
data_type{type_id::INT32}, offset_count, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
auto delta_per_target = d_repl.size_bytes() - target_size;
device_span<size_type const> d_target_positions_span(d_target_positions, target_count);
auto offsets_update_fn =
[d_target_positions_span, delta_per_target, chars_start] __device__(int32_t offset) -> int32_t {
// determine the number of target positions occurring before this offset
size_type const* next_target_pos_ptr = thrust::lower_bound(
thrust::seq, d_target_positions_span.begin(), d_target_positions_span.end(), offset);
size_type num_prev_targets =
thrust::distance(d_target_positions_span.data(), next_target_pos_ptr);
return offset - chars_start + delta_per_target * num_prev_targets;
};
thrust::transform(rmm::exec_policy(stream),
d_offsets_span.begin(),
d_offsets_span.end(),
offsets_view.begin<int32_t>(),
offsets_update_fn);
// build the characters column
auto chars_column =
create_chars_child_column(chars_bytes + (delta_per_target * target_count), stream, mr);
auto d_out_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(chars_start),
chars_bytes,
target_replacer_fn{
d_target_positions_span, d_in_chars, d_out_chars, target_size, d_repl, chars_start});
// free the target positions buffer as it is no longer needed
(void)target_positions.release();
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
/**
* @brief Scalar string replacement using a row-level parallel algorithm.
*
* Replaces occurrences of the target string with the replacement string using an algorithm with
* row-level parallelism. This algorithm will perform well when the strings in the string
* column are relatively short.
* @see BYTES_PER_VALID_ROW_THRESHOLD
*
* @param strings String column to search for target strings.
* @param d_target String to search for within the string column.
* @param d_repl Replacement string if target string is found.
* @param maxrepl Maximum times to replace if target appears multiple times in a string.
* @param stream CUDA stream to use for device operations
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column.
*/
std::unique_ptr<column> replace_row_parallel(strings_column_view const& strings,
string_view const& d_target,
string_view const& d_repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_row_parallel_fn{*d_strings, d_target, d_repl, maxrepl}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
} // namespace
/**
* @copydoc cudf::strings::detail::replace(strings_column_view const&, string_scalar const&,
* string_scalar const&, int32_t, rmm::cuda_stream_view, rmm::mr::device_memory_resource*)
*/
template <>
std::unique_ptr<column> replace<replace_algorithm::AUTO>(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_column(data_type{type_id::STRING});
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(stream), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(stream), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type const chars_start =
(strings.offset() == 0)
? 0
: cudf::detail::get_value<int32_t>(strings.offsets(), strings.offset(), stream);
size_type const chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
size_type const chars_bytes = chars_end - chars_start;
auto const avg_bytes_per_row = chars_bytes / std::max(strings_count - strings.null_count(), 1);
return (avg_bytes_per_row < BYTES_PER_VALID_ROW_THRESHOLD)
? replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr)
: replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::CHAR_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_column(data_type{type_id::STRING});
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(stream), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(stream), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
// determine range of characters in the base column
auto const strings_count = strings.size();
auto const offset_count = strings_count + 1;
auto const d_offsets = strings.offsets().data<int32_t>() + strings.offset();
size_type chars_start = (strings.offset() == 0) ? 0
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset(), stream);
size_type chars_end = (offset_count == strings.offsets().size())
? strings.chars_size()
: cudf::detail::get_value<int32_t>(
strings.offsets(), strings.offset() + strings_count, stream);
return replace_char_parallel(
strings, chars_start, chars_end, d_target, d_repl, maxrepl, stream, mr);
}
template <>
std::unique_ptr<column> replace<replace_algorithm::ROW_PARALLEL>(
strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_column(data_type{type_id::STRING});
if (maxrepl == 0) return std::make_unique<cudf::column>(strings.parent(), stream, mr);
CUDF_EXPECTS(repl.is_valid(stream), "Parameter repl must be valid.");
CUDF_EXPECTS(target.is_valid(stream), "Parameter target must be valid.");
CUDF_EXPECTS(target.size() > 0, "Parameter target must not be empty string.");
string_view d_target(target.data(), target.size());
string_view d_repl(repl.data(), repl.size());
return replace_row_parallel(strings, d_target, d_repl, maxrepl, stream, mr);
}
namespace {
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
struct replace_slice_fn {
column_device_view const d_strings;
string_view const d_repl;
size_type const start;
size_type const stop;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
auto const length = d_str.length();
char const* in_ptr = d_str.data();
auto const begin = d_str.byte_offset(((start < 0) || (start > length) ? length : start));
auto const end = d_str.byte_offset(((stop < 0) || (stop > length) ? length : stop));
if (d_chars) {
char* out_ptr = d_chars + d_offsets[idx];
out_ptr = copy_and_increment(out_ptr, in_ptr, begin); // copy beginning
out_ptr = copy_string(out_ptr, d_repl); // insert replacement
out_ptr = copy_and_increment(out_ptr, // copy end
in_ptr + end,
d_str.size_bytes() - end);
} else {
d_offsets[idx] = d_str.size_bytes() + d_repl.size_bytes() - (end - begin);
}
}
};
} // namespace
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_column(data_type{type_id::STRING});
CUDF_EXPECTS(repl.is_valid(stream), "Parameter repl must be valid.");
if (stop > 0) CUDF_EXPECTS(start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(), repl.size());
auto d_strings = column_device_view::create(strings.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_slice_fn{*d_strings, d_repl, start, stop}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
namespace {
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
struct replace_multi_fn {
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
int32_t* d_offsets{};
char* d_chars{};
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
auto const d_str = d_strings.element<string_view>(idx);
char const* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type spos = 0;
size_type lpos = 0;
char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
// check each character against each target
while (spos < d_str.size_bytes()) {
for (int tgt_idx = 0; tgt_idx < d_targets.size(); ++tgt_idx) {
auto const d_tgt = d_targets.element<string_view>(tgt_idx);
if ((d_tgt.size_bytes() <= (d_str.size_bytes() - spos)) && // check fit
(d_tgt.compare(in_ptr + spos, d_tgt.size_bytes()) == 0)) // and match
{
auto const d_repl = (d_repls.size() == 1) ? d_repls.element<string_view>(0)
: d_repls.element<string_view>(tgt_idx);
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
if (out_ptr) {
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes() - 1;
break;
}
}
++spos;
}
if (out_ptr) // copy remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = bytes;
}
};
} // namespace
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (strings.is_empty()) return make_empty_column(data_type{type_id::STRING});
CUDF_EXPECTS(((targets.size() > 0) && (targets.null_count() == 0)),
"Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS(((repls.size() > 0) && (repls.null_count() == 0)),
"Parameters repls must not be empty and must not have nulls");
if (repls.size() > 1)
CUDF_EXPECTS(repls.size() == targets.size(), "Sizes for targets and repls must match");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_targets = column_device_view::create(targets.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
// this utility calls the given functor to build the offsets and chars columns
auto children = cudf::strings::detail::make_strings_children(
replace_multi_fn{*d_strings, *d_targets, *d_repls}, strings.size(), stream, mr);
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
std::unique_ptr<column> replace_nulls(strings_column_view const& strings,
string_scalar const& repl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_empty_column(data_type{type_id::STRING});
CUDF_EXPECTS(repl.is_valid(stream), "Parameter repl must be valid.");
string_view d_repl(repl.data(), repl.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<int32_t>(0), [d_strings, d_repl] __device__(size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes()
: d_strings.element<string_view>(idx).size_bytes();
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream);
auto chars_column = create_chars_child_column(bytes, stream, mr);
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__(size_type idx) {
string_view d_str = d_repl;
if (!d_strings.is_null(idx)) d_str = d_strings.element<string_view>(idx);
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return make_strings_column(
strings_count, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{});
}
} // namespace detail
// external API
std::unique_ptr<column> replace(strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, target, repl, maxrepl, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace_slice(strings_column_view const& strings,
string_scalar const& repl,
size_type start,
size_type stop,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_slice(strings, repl, start, stop, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> replace(strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace(strings, targets, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
the_stack
|
#include <LightGBM/cuda/cuda_tree.hpp>
namespace LightGBM {
__device__ void SetDecisionTypeCUDA(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
__device__ void SetMissingTypeCUDA(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
__device__ bool GetDecisionTypeCUDA(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
__device__ int8_t GetMissingTypeCUDA(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
__device__ bool IsZeroCUDA(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
__global__ void SplitKernel( // split information
const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], false, kCategoricalMask);
SetDecisionTypeCUDA(&decision_type[new_node_index], cuda_split_info->default_left, kDefaultLeftMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = cuda_split_info->threshold;
} else if (thread_index == 14) {
threshold[new_node_index] = real_threshold;
}
}
void CUDATree::LaunchSplitKernel(const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info) {
SplitKernel<<<3, 5, 0, cuda_stream_>>>(
// split information
leaf_index,
real_feature_index,
real_threshold,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_);
}
__global__ void SplitCategoricalKernel( // split information
const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len,
int num_cat,
int* cuda_cat_boundaries,
int* cuda_cat_boundaries_inner) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], true, kCategoricalMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = num_cat;
} else if (thread_index == 14) {
threshold[new_node_index] = num_cat;
} else if (thread_index == 15) {
if (num_cat == 0) {
cuda_cat_boundaries[num_cat] = 0;
}
cuda_cat_boundaries[num_cat + 1] = cuda_cat_boundaries[num_cat] + cuda_bitset_len;
} else if (thread_index == 16) {
if (num_cat == 0) {
cuda_cat_boundaries_inner[num_cat] = 0;
}
cuda_cat_boundaries_inner[num_cat + 1] = cuda_cat_boundaries_inner[num_cat] + cuda_bitset_inner_len;
}
}
void CUDATree::LaunchSplitCategoricalKernel(const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len) {
SplitCategoricalKernel<<<3, 6, 0, cuda_stream_>>>(
// split information
leaf_index,
real_feature_index,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_,
cuda_bitset_len,
cuda_bitset_inner_len,
num_cat_,
cuda_cat_boundaries_.RawData(),
cuda_cat_boundaries_inner_.RawData());
}
__global__ void ShrinkageKernel(const double rate, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] *= rate;
}
}
void CUDATree::LaunchShrinkageKernel(const double rate) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
ShrinkageKernel<<<num_blocks, num_threads_per_block>>>(rate, cuda_leaf_value_, num_leaves_);
}
__global__ void AddBiasKernel(const double val, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] += val;
}
}
void CUDATree::LaunchAddBiasKernel(const double val) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
AddBiasKernel<<<num_blocks, num_threads_per_block>>>(val, cuda_leaf_value_, num_leaves_);
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
the_stack
|
* which demonstrates the use of CUDA in a multi phase sorting
* computation.
* Device code.
*/
#ifndef _RADIXSORT_KERNEL_H_
#define _RADIXSORT_KERNEL_H_
#include <stdio.h>
#include "radixsort.cuh"
#define SYNCIT __syncthreads()
static const int NUM_SMS = 16;
static const int NUM_THREADS_PER_SM = 192;
static const int NUM_THREADS_PER_BLOCK = 64;
//static const int NUM_THREADS = NUM_THREADS_PER_SM * NUM_SMS;
static const int NUM_BLOCKS = (NUM_THREADS_PER_SM / NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int RADIX = 8; // Number of bits per radix sort pass
static const int RADICES = 1 << RADIX; // Number of radices
static const int RADIXMASK = RADICES - 1; // Mask for each radix sort pass
#if SIXTEEN
static const int RADIXBITS = 16; // Number of bits to sort over
#else
static const int RADIXBITS = 32; // Number of bits to sort over
#endif
static const int RADIXTHREADS = 16; // Number of threads sharing each radix counter
static const int RADIXGROUPS = NUM_THREADS_PER_BLOCK / RADIXTHREADS; // Number of radix groups per CTA
static const int TOTALRADIXGROUPS = NUM_BLOCKS * RADIXGROUPS; // Number of radix groups for each radix
static const int SORTRADIXGROUPS = TOTALRADIXGROUPS * RADICES; // Total radix count
static const int GRFELEMENTS = (NUM_THREADS_PER_BLOCK / RADIXTHREADS) * RADICES;
static const int GRFSIZE = GRFELEMENTS * sizeof(uint);
// Prefix sum variables
static const int PREFIX_NUM_THREADS_PER_SM = NUM_THREADS_PER_SM;
static const int PREFIX_NUM_THREADS_PER_BLOCK = PREFIX_NUM_THREADS_PER_SM;
static const int PREFIX_NUM_BLOCKS = (PREFIX_NUM_THREADS_PER_SM / PREFIX_NUM_THREADS_PER_BLOCK) * NUM_SMS;
static const int PREFIX_BLOCKSIZE = SORTRADIXGROUPS / PREFIX_NUM_BLOCKS;
static const int PREFIX_GRFELEMENTS = PREFIX_BLOCKSIZE + 2 * PREFIX_NUM_THREADS_PER_BLOCK;
static const int PREFIX_GRFSIZE = PREFIX_GRFELEMENTS * sizeof(uint);
// Shuffle variables
static const int SHUFFLE_GRFOFFSET = RADIXGROUPS * RADICES;
static const int SHUFFLE_GRFELEMENTS = SHUFFLE_GRFOFFSET + PREFIX_NUM_BLOCKS;
static const int SHUFFLE_GRFSIZE = SHUFFLE_GRFELEMENTS * sizeof(uint);
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// Prefix sum data
uint gRadixSum[TOTALRADIXGROUPS * RADICES];
__device__ uint dRadixSum[TOTALRADIXGROUPS * RADICES];
uint gRadixBlockSum[PREFIX_NUM_BLOCKS];
__device__ uint dRadixBlockSum[PREFIX_NUM_BLOCKS];
extern __shared__ uint sRadixSum[];
////////////////////////////////////////////////////////////////////////////////
//! Perform a radix sum on the list to be sorted. Each SM holds a set of
//! radix counters for each group of RADIXGROUPS thread in the GRF.
//!
//! @param pData input data
//! @param elements total number of elements
//! @param elements_rounded_to_3072 total number of elements rounded up to the
//! nearest multiple of 3072
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixSum(KeyValuePair *pData, uint elements, uint elements_rounded_to_3072, uint shift)
{
uint pos = threadIdx.x;
// Zero radix counts
while (pos < GRFELEMENTS)
{
sRadixSum[pos] = 0;
pos += NUM_THREADS_PER_BLOCK;
}
// Sum up data
// Source addresses computed so that each thread is reading from a block of
// consecutive addresses so there are no conflicts between threads
// They then loop over their combined region and the next batch works elsewhere.
// So threads 0 to 16 work on memory 0 to 320.
// First reading 0,1,2,3...15 then 16,17,18,19...31 and so on
// optimising parallel access to shared memory by a thread accessing 16*threadID
// The next radix group runs from 320 to 640 and the same applies in that region
uint tmod = threadIdx.x % RADIXTHREADS;
uint tpos = threadIdx.x / RADIXTHREADS;
// Take the rounded element list size so that all threads have a certain size dataset to work with
// and no zero size datasets confusing the issue
// By using a multiple of 3072 we ensure that all threads have elements
// to work with until the last phase, at which point we individually test
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
// Generate range
// Note that it is possible for both pos and end to be past the end of the element set
// which will be caught later.
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction;
pos += tmod;
//printf("pos: %d\n", pos);
__syncthreads();
while (pos < end )
{
uint key = 0;
// Read first data element if we are in the set of elements
//if( pos < elements )
//key = pData[pos].key;
KeyValuePair kvp;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
kvp = pData[pos];
else
kvp.key = 0;
key = kvp.key;
// Calculate position of radix counter to increment
// There are RADICES radices in each pass (256)
// and hence this many counters for bin grouping
// Multiply by RADIXGROUPS (4) to spread through memory
// and into 4 radix groups
uint p = ((key >> shift) & RADIXMASK) * RADIXGROUPS;
// Increment radix counters
// Each radix group has its own set of counters
// so we add the thread position [0-3], ie the group index.
// We slow down here and take at least 16 cycles to write to the summation boxes
// but other groups will only conflict with themselves and so can also be writing
// 16 cycles here at least avoids retries.
uint ppos = p + tpos;
// If we are past the last element we don't want to do anything
// We do have to check each time, however, to ensure that all
// threads sync on each sync here.
if (tmod == 0 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 1 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 2 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 3 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 4 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 5 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 6 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 7 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 8 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 9 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 10 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 11 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 12 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 13 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 14 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
if (tmod == 15 && pos < elements)
sRadixSum[ppos]++;
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
__syncthreads();
// Output radix sums into separate memory regions for each radix group
// So this memory then is layed out:
// 0...... 192..... 384 ................ 192*256
// ie all 256 bins for each radix group
// in there:
// 0.............192
// 0 4 8 12... - block idx * 4
// And in the block boxes we see the 4 radix groups for that block
// So 0-192 should contain bin 0 for each radix group, and so on
uint offset = blockIdx.x * RADIXGROUPS;
uint row = threadIdx.x / RADIXGROUPS;
uint column = threadIdx.x % RADIXGROUPS;
while (row < RADICES)
{
dRadixSum[offset + row * TOTALRADIXGROUPS + column] = sRadixSum[row * RADIXGROUPS + column];
row += NUM_THREADS_PER_BLOCK / RADIXGROUPS;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Performs first part of parallel prefix sum - individual sums of each radix
//! count. By the end of this we have prefix sums on a block level in dRadixSum
//! and totals for blocks in dRadixBlockSum.
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixPrefixSum()
{
// Read radix groups in offset by one in the GRF so a zero can be inserted at the beginning
// and the final sum of all radix counts summed here is tacked onto the end for reading by
// the next stage
// Each block in this case is the full number of threads per SM (and hence the total number
// of radix groups), 192. We should then have the total set of offsets for an entire radix
// group by the end of this stage
// Device mem addressing
uint brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
uint drow = threadIdx.x / TOTALRADIXGROUPS; // In default parameterisation this is always 0
uint dcolumn = threadIdx.x % TOTALRADIXGROUPS; // And similarly this is always the same as threadIdx.x
uint dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn;
uint end = ((blockIdx.x + 1) * (RADICES / PREFIX_NUM_BLOCKS)) * TOTALRADIXGROUPS;
// Shared mem addressing
uint srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
// Read (RADICES / PREFIX_NUM_BLOCKS) radix counts into the GRF alongside each other
while (dpos < end)
{
sRadixSum[spos] = dRadixSum[dpos];
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
}
__syncthreads();
// Perform preliminary sum on each thread's stretch of data
// Each thread having a block of 16, with spacers between 0...16 18...33 and so on
int pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
uint sum = 0;
while (pos < end)
{
sum += sRadixSum[pos];
sRadixSum[pos] = sum;
pos++;
}
__syncthreads();
// Calculate internal offsets by performing a more traditional parallel
// prefix sum of the topmost member of each thread's work data. Right now,
// these are stored between the work data for each thread, allowing us to
// eliminate GRF conflicts as well as hold the offsets needed to complete the sum
// In other words we have:
// 0....15 16 17....32 33 34....
// Where this first stage updates the intermediate values (so 16=15, 33=32 etc)
int m = (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) +
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
sRadixSum[pos] = sRadixSum[pos - 1];
__syncthreads();
// This stage then performs a parallel prefix sum (ie use powers of 2 to propagate in log n stages)
// to update 17, 34 etc with the totals to that point (so 34 becomes [34] + [17]) and so on.
while (m < PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1))
{
int p = pos - m;
uint t = ((p > 0) ? sRadixSum[p] : 0);
__syncthreads();
sRadixSum[pos] += t;
__syncthreads();
m *= 2;
}
__syncthreads();
// Add internal offsets to each thread's work data.
// So now we take 17 and add it to all values 18 to 33 so all offsets for that block
// are updated.
pos = threadIdx.x * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
end = pos + (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
int p = pos - 1;
sum = ((p > 0) ? sRadixSum[p] : 0);
while (pos < end)
{
sRadixSum[pos] += sum;
pos++;
}
__syncthreads();
// Write summed data back out to global memory in the same way as we read it in
// We now have prefix sum values internal to groups
brow = blockIdx.x * (RADICES / PREFIX_NUM_BLOCKS);
drow = threadIdx.x / TOTALRADIXGROUPS;
dcolumn = threadIdx.x % TOTALRADIXGROUPS;
srow = threadIdx.x / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
scolumn = threadIdx.x % (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK);
dpos = (brow + drow) * TOTALRADIXGROUPS + dcolumn + 1;
spos = srow * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) + scolumn;
end = ((blockIdx.x + 1) * RADICES / PREFIX_NUM_BLOCKS) * TOTALRADIXGROUPS;
while (dpos < end)
{
dRadixSum[dpos] = sRadixSum[spos];
dpos += (TOTALRADIXGROUPS / PREFIX_NUM_THREADS_PER_BLOCK) * TOTALRADIXGROUPS;
spos += (PREFIX_NUM_THREADS_PER_BLOCK / (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK)) *
(PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1);
}
// Write last element to summation
// Storing block sums in a separate array
if (threadIdx.x == 0) {
dRadixBlockSum[blockIdx.x] = sRadixSum[PREFIX_NUM_THREADS_PER_BLOCK * (PREFIX_BLOCKSIZE / PREFIX_NUM_THREADS_PER_BLOCK + 1) - 1];
dRadixSum[blockIdx.x * PREFIX_BLOCKSIZE] = 0;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Initially perform prefix sum of block totals to obtain final set of offsets.
//! Then make use of radix sums to perform a shuffling of the data into the
//! correct bins.
//!
//! @param pSrc input data
//! @param pDst output data
//! @param elements total number of elements
//! @param shift the shift (0 to 24) that we are using to obtain the correct
//! byte
////////////////////////////////////////////////////////////////////////////////
__global__ void RadixAddOffsetsAndShuffle(KeyValuePair* pSrc, KeyValuePair* pDst, uint elements, uint elements_rounded_to_3072, int shift)
{
// Read offsets from previous blocks
if (threadIdx.x == 0)
sRadixSum[SHUFFLE_GRFOFFSET] = 0;
if (threadIdx.x < PREFIX_NUM_BLOCKS - 1)
sRadixSum[SHUFFLE_GRFOFFSET + threadIdx.x + 1] = dRadixBlockSum[threadIdx.x];
__syncthreads();
// Parallel prefix sum over block sums
int pos = threadIdx.x;
int n = 1;
while (n < PREFIX_NUM_BLOCKS)
{
int ppos = pos - n;
uint t0 = ((pos < PREFIX_NUM_BLOCKS) && (ppos >= 0)) ? sRadixSum[SHUFFLE_GRFOFFSET + ppos] : 0;
__syncthreads();
if (pos < PREFIX_NUM_BLOCKS)
sRadixSum[SHUFFLE_GRFOFFSET + pos] += t0;
__syncthreads();
n *= 2;
}
// Read radix count data and add appropriate block offset
// for each radix at the memory location for this thread
// (where the other threads in the block will be reading
// as well, hence the large stride).
// There is one counter box per radix group per radix
// per block (4*256*3)
// We use 64 threads to read the 4 radix groups set of radices
// for the block.
int row = threadIdx.x / RADIXGROUPS;
int column = threadIdx.x % RADIXGROUPS;
int spos = row * RADIXGROUPS + column;
int dpos = row * TOTALRADIXGROUPS + column + blockIdx.x * RADIXGROUPS;
while (spos < SHUFFLE_GRFOFFSET)
{
sRadixSum[spos] = dRadixSum[dpos] + sRadixSum[SHUFFLE_GRFOFFSET + dpos / (TOTALRADIXGROUPS * RADICES / PREFIX_NUM_BLOCKS)];
spos += NUM_THREADS_PER_BLOCK;
dpos += (NUM_THREADS_PER_BLOCK / RADIXGROUPS) * TOTALRADIXGROUPS;
}
__syncthreads();
//int pos;
// Shuffle data
// Each of the subbins for a block should be filled via the counters, properly interleaved
// Then, as we now iterate over each data value, we increment the subbins (each thread in the
// radix group in turn to avoid miss writes due to conflicts) and set locations correctly.
uint element_fraction = elements_rounded_to_3072 / TOTALRADIXGROUPS;
int tmod = threadIdx.x % RADIXTHREADS;
int tpos = threadIdx.x / RADIXTHREADS;
pos = (blockIdx.x * RADIXGROUPS + tpos) * element_fraction;
uint end = pos + element_fraction; //(blockIdx.x * RADIXGROUPS + tpos + 1) * element_fraction;
pos += tmod;
__syncthreads();
while (pos < end )
{
KeyValuePair kvp;
#if 1 // old load
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
kvp = pSrc[pos];
}
else
kvp.key = 0;
#else // casting to float2 to get it to combine loads
int2 kvpf2;
// Read first data element, both items at once as the memory will want to coalesce like that anyway
if (pos < elements)
{
// kvp = pSrc[pos];
kvpf2 = ((int2*)pSrc)[pos];
// printf("kvp: %f %f kvpf2: %f %f\n", kvp.key, kvp.value, kvpf2.x, kvpf2.y);
}
else
//kvp.key = 0;
kvpf2.x = 0;
kvp.key = kvpf2.x;
kvp.value = kvpf2.y;
#endif
uint index;
// Calculate position of radix counter to increment
uint p = ((kvp.key >> shift) & RADIXMASK) * RADIXGROUPS;
// Move data, keeping counts updated.
// Increment radix counters, relying on hexadecathread
// warp to prevent this code from stepping all over itself.
uint ppos = p + tpos;
if (tmod == 0 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 1 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 2 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 3 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 4 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 5 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 6 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 7 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 8 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 9 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 10 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 11 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 12 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 13 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 14 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
if (tmod == 15 && pos < elements)
{
index = sRadixSum[ppos]++;
pDst[index] = kvp;
}
SYNCIT;
pos += RADIXTHREADS;
}
__syncthreads();
}
#endif // #ifndef _RADIXSORT_KERNEL_H_
|
the_stack
|
namespace caffe2 {
namespace {
template <typename T>
__global__ void LRNFillScaleNCHW(const int nthreads, const T* in,
const int num, const int channels, const int height,
const int width, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// recover the pointers for the next loop.
in -= offset;
scale -= offset;
}
}
template <typename T>
__global__ void LRNFillScaleNHWC(const int nthreads, const T* in,
const int num, const int height, const int width,
const int channels, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pre_pad = (size - 1) / 2;
scale[index] = 0;
for (int i = 0; i < size; ++i) {
int raw_idx = c + i - pre_pad;
if (raw_idx >= 0 && raw_idx < channels) {
scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad];
}
}
scale[index] = bias + scale[index] * alpha_over_size;
}
}
// TODO(Yangqing): check if it would be faster to just put it into the previous
// kernel.
template <typename T>
__global__ void LRNComputeOutput(const int nthreads, const T* in,
const T* scale, const T negative_beta, T* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename T>
__global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const T negative_beta,
const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// recover pointer for next iteration.
bottom_data -= offset;
top_data -= offset;
scale -= offset;
top_diff -= offset;
bottom_diff -= offset;
}
}
// This local response normalization gradient does one sum per output location
// and does not use the running trick for 1-d convolution: thus it might not be
// the fastest implementation.
template <typename T>
__global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int height, const int width, const int channels,
const int size, const T negative_beta, const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local channel offset
int c = index % channels;
int pre_pad = size / 2;
T accum_ratio = 0;
for (int i = -pre_pad; i < size - pre_pad; ++i) {
if (c + i >= 0 && c + i < channels) {
accum_ratio += top_diff[index + i] * top_data[index + i] /
scale[index + i];
}
}
bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) -
cache_ratio * bottom_data[index] * accum_ratio;
}
}
} // namespace
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const float* Xdata = X.data<float>();
Y->ResizeLike(X);
float* Ydata = Y->mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
n_threads = X.size();
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
Y->ResizeLike(X);
float* Ydata = Y->mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->mutable_data<float>();
int n_threads = X.size();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
dX->ResizeLike(X);
const float* Xdata = X.data<float>();
const float* Ydata = Y.data<float>();
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
LRNComputeDiffNCHW<float><<<CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_,
2.f * alpha_ * beta_ / size_, dXdata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
dX->ResizeLike(X);
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->mutable_data<float>();
int n_threads = X.size();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeDiffNHWC<float><<<CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
X.size(), X.data<float>(), Y.data<float>(), scale_data,
dY.data<float>(),
X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), size_, -beta_,
2.f * alpha_ * beta_ / size_, dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>);
} // namespace caffe2
|
the_stack
|
#pragma once
#include <math/vector.h>
#include <ptx_primitives.cuh>
#include "shader.cuh"
namespace internal
{
template <unsigned int i, unsigned int j, typename... T>
struct PackedBufferAttribute;
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 0, float, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 1;
__device__
static inline float fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
return ldg_cs(d + i).x;
return d[i].x;
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 1, float, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 2;
__device__
static inline float fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
return ldg_cs(d + i).y;
return d[i].y;
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 2, float, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 3;
__device__
static inline float fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
return ldg_cs(d + i).z;
else
return d[i].z;
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 3, float, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 0;
__device__
static inline float fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
return ldg_cs(d + i).w;
else
return d[i].w;
}
};
template <unsigned int i, unsigned int j, typename... Tail>
struct PackedBufferAttribute<i, j, int, Tail...>
{
private:
typedef PackedBufferAttribute<i, j, float, Tail...> E;
public:
static constexpr int next_i = E::next_i;
static constexpr int next_j = E::next_j;
__device__
static inline int fetch(const float4* d)
{
return __float_as_int(E::fetch(d));
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 0, math::float2, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 2;
__device__
static inline math::float2 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.x, v.y };
}
return math::float2(d[i].x, d[i].y);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 1, math::float2, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 3;
__device__
static inline math::float2 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.y, v.z };
}
return math::float2(d[i].y, d[i].z);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 2, math::float2, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 0;
__device__
static inline math::float2 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.z, v.w };
}
return math::float2(d[i].z, d[i].w);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 3, math::float2, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 1;
__device__
static inline math::float2 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.w, v1.x };
}
return math::float2(d[i].w, d[i + 1].x);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 0, math::float3, Tail...>
{
static constexpr int next_i = i;
static constexpr int next_j = 3;
__device__
static inline math::float3 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.x, v.y, v.z };
}
return math::float3(d[i].x, d[i].y, d[i].z);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 1, math::float3, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 0;
__device__
static inline math::float3 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.y, v.x, v.w };
}
return math::float3(d[i].y, d[i].z, d[i].w);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 2, math::float3, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 1;
__device__
static inline math::float3 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.z, v0.w, v1.x };
}
return math::float3(d[i].z, d[i].w, d[i + 1].x);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 3, math::float3, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 2;
__device__
static inline math::float3 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.w, v1.x, v1.y };
}
return math::float3(d[i].w, d[i + 1].x, d[i + 1].y);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 0, math::float4, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 0;
__device__
static inline math::float4 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v = ldg_cs(d + i);
return { v.x, v.y, v.z, v.w };
}
return math::float4(d[i].x, d[i].y, d[i].z, d[i].w);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 1, math::float4, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 1;
__device__
static inline math::float4 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.y, v0.z, v0.w, v1.x };
}
return math::float4(d[i].y, d[i].z, d[i].w, d[i + 1].x);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 2, math::float4, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 2;
__device__
static inline math::float4 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.z, v0.w, v1.x, v1.y };
}
return math::float4(d[i].z, d[i].w, d[i + 1].x, d[i + 1].y);
}
};
template <unsigned int i, typename... Tail>
struct PackedBufferAttribute<i, 3, math::float4, Tail...>
{
static constexpr int next_i = i + 1;
static constexpr int next_j = 3;
__device__
static inline math::float4 fetch(const float4* d)
{
if (VERTEX_FETCH_CS)
{
auto v0 = ldg_cs(d + i);
auto v1 = ldg_cs(d + i + 1);
return { v0.w, v1.x, v1.y, v1.z };
}
return math::float4(d[i].w, d[i + 1].x, d[i + 1].y, d[i + 1].z);
}
};
}
template <typename... Elements>
class PackedBufferAttributes;
template <>
class PackedBufferAttributes<>
{
protected:
__device__
PackedBufferAttributes(const float4* d)
{
}
template <unsigned int i, unsigned int j>
__device__
void fetch(const float4* d)
{
}
public:
template <typename F, typename... Args>
__device__
auto read(F& reader, const Args&... args) const
{
return reader(args...);
}
};
template <typename T, typename... Tail>
class PackedBufferAttributes<T, Tail...> : private PackedBufferAttributes<Tail...>
{
private:
T data;
protected:
template <unsigned int i, unsigned int j>
__device__
void fetch(const float4* d)
{
typedef internal::PackedBufferAttribute<i, j, T, Tail...> E;
data = E::fetch(d);
PackedBufferAttributes<Tail...>::template fetch<E::next_i, E::next_j>(d);
}
public:
__device__
PackedBufferAttributes(const float4* d)
: PackedBufferAttributes<Tail...>(d)
{
fetch<0, 0>(d);
}
template <typename F, typename... Args>
__device__
auto read(F& reader, const Args&... args) const
{
return PackedBufferAttributes<Tail...>::read(reader, args..., data);
}
};
template <unsigned int STRIDE>
class VertexBuffer
{
static_assert(STRIDE % sizeof(float4) == 0, "ERROR: vertex attribute data must be 16 byte aligned");
public:
__device__
static const float4* attributes(unsigned int index)
{
return vertex_buffer + STRIDE / sizeof(float4) * index;
}
};
template <typename VertexBuffer, typename... Elements>
struct VertexBufferAttributes : PackedBufferAttributes<Elements...>
{
using Signature = ShaderSignature<Elements...>;
__device__
VertexBufferAttributes(unsigned int vertex_index)
: PackedBufferAttributes<Elements...>(VertexBuffer::attributes(vertex_index))
{
}
};
template <typename Head, typename... Tail>
struct InputVertexAttributes;
template <typename Head>
struct InputVertexAttributes<Head> : private Head
{
using Signature = typename Head::Signature;
__device__
InputVertexAttributes(unsigned int vertex_index)
: Head(vertex_index)
{
}
template <typename F>
__device__
auto read(F& reader) const
{
return Head::read(reader);
}
};
//template <typename Head, typename... Tail>
//struct InputVertexAttributes : private Head, private InputVertexAttributes<Tail...>
//{
// __device__
// InputVertexAttributes(unsigned int vertex_index)
// : Head(vertex_index), InputVertexAttributes<Tail...>(vertex_index)
// {
// }
//
// template <typename F, typename... Args>
// __device__
// auto read(F reader, const Args&... args) const
// {
// return Head::read(reader, args..., data);
// }
//};
#endif // INCLUDED_CURE_VERTEX_SHADER_INPUT
|
the_stack
|
#include <iostream>
#include <chrono>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
#include "StochasticLut.cuh"
// -------------------------------------------------
// Forward
// -------------------------------------------------
// real type
template<int N=6, typename T=float, int MAX_NODE_UNIT=32>
__global__ void kernal_StochasticLut_Forward(
T const *x_buf,
T *y_buf,
int const *input_index,
T const *W_buf,
int node_size,
int frame_size,
int frame_stride,
int input_binary,
int lut_binarize,
T unbinarize_bias
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T W[(1<<N)][MAX_NODE_UNIT];
T const *x_ptr[N];
T *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1<<N); i += id_step ) {
W[i][node_id] = W_buf[node * (1<<N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > 0.5 ? 1.0 : 0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N*node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
for (int frame = id; frame < frame_size; frame += id_step) {
if ( node < node_size ) {
T x[N];
if ( input_binary ) {
for ( int i = 0; i < N; ++i) {
x[i] = 0.5 + ((x_ptr[i][frame] > 0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x[i] = min(1.0, max(0.0, x_ptr[i][frame]));
}
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
// clamp
y = max(0.0, y);
y = min(1.0, y);
y_ptr[frame] = y;
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward
(
const float *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
int node_size,
int frame_size,
int frame_stride,
int input_binary,
int lut_binarize,
float unbinarize_bias,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 512;
unsigned int const MAX_FRAME_UNIT = 512;
unsigned int const MAX_NODE_UNIT = 64;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_StochasticLut_Forward<N, float, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
node_size,
frame_size,
frame_stride,
input_binary,
lut_binarize,
unbinarize_bias
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// bit packing
template<int N=6, typename T=float, int MAX_NODE_UNIT=32>
__global__ void kernal_bit_StochasticLut_Forward(
int const *x_buf,
T *y_buf,
int const *input_index,
T const *W_buf,
int node_size,
int frame_size,
int frame_stride,
int bin_frame_stride,
int binary_mode,
T unbinarize_bias
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
int const *x_ptr[N];
T *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( binary_mode ) {
W[i][node_id] = W[i][node_id] > 0.5 ? 1.0 : 0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[bin_frame_stride * input_index[N*node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
for (int frame = id; frame < frame_size; frame += id_step) {
if ( node < node_size ) {
int bit_mask = (1 << (frame & 0x1f));
int unit = (frame >> 5);
// read x
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = 0.5 + ((x_ptr[i][unit] & bit_mask) ? +unbinarize_bias : -unbinarize_bias);
}
// calculate
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
// clamp
y = max(0.0, y);
y = min(1.0, y);
y_ptr[frame] = y;
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward
(
int const *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
int node_size,
int frame_size,
int frame_stride,
int bin_frame_stride,
int lut_binarize,
float unbinarize_bias,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 512;
unsigned int const MAX_FRAME_UNIT = 512;
unsigned int const MAX_NODE_UNIT = 64;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_bit_StochasticLut_Forward<N, float, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
node_size,
frame_size,
frame_stride,
bin_frame_stride,
lut_binarize,
unbinarize_bias
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// bit packing and binarize
template<int N=6, typename T=float, int MAX_NODE_UNIT=32>
__global__ void kernal_bit_bit_StochasticLut_Forward(
int const *x_buf,
int *y_buf,
int const *input_index,
T const *W_buf,
int node_size,
int frame_size,
int frame_stride,
int binary_mode,
T unbinarize_bias
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
int const *x_ptr[N];
int *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( binary_mode ) {
W[i][node_id] = W[i][node_id] > 0.5 ? 1.0 : 0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N*node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
T unbinarize_hi = 0.5 + unbinarize_bias;
T unbinarize_lo = 0.5 - unbinarize_bias;
if ( node < node_size ) {
int frame_unit_size = ((frame_size + 0x1f) & ~0x1f);
for (int frame = id; frame < frame_unit_size; frame += id_step) {
int y_mask = 0;
int unit = (frame >> 5);
int bit = (frame & 0x1f);
int bit_mask = (1 << bit);
if ( frame < frame_size ) {
// read x
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = ((x_ptr[i][unit] & bit_mask) ? unbinarize_hi : unbinarize_lo);
}
// calculate
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
// binarize
if ( y > 0.5 ) {
y_mask = bit_mask;
}
}
// OR
y_mask = device_int_ShuffleOr(y_mask);
if ( bit == 0 ) {
y_ptr[unit] = y_mask;
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward
(
int const *dev_x_buf,
int *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
float unbinarize_bias,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 512;
unsigned int const MAX_FRAME_UNIT = 512;
unsigned int const MAX_NODE_UNIT = THREAD_SIZE / 32;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_bit_bit_StochasticLut_Forward<N, float, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
node_size,
frame_size,
frame_stride,
lut_binarize,
unbinarize_bias
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// -------------------------------------------------
// Backward
// -------------------------------------------------
// real type
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_StochasticLut_Backward
(
T const *x_buf,
T const *dy_buf,
T *dx_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
int node_size,
int frame_size,
int frame_stride,
int dx_frame_stride,
int input_binary,
int lut_binarize,
T unbinarize_bias
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T dW_prev[(1 << N)][MAX_NODE_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T dW[(1 << N)];
T const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
for ( int i = 0; i < (1 << N); ++i) {
dW[i] = 0;
}
for ( int i = id; i < (1 << N); i += id_step ) {
dW_prev[i][node_id] = dW_buf[node * (1 << N) + i];
}
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > 0.5 ? 1.0 : 0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * frame_stride];
}
dy_ptr = &dy_buf[node * frame_stride];
}
__syncthreads();
for ( int frame = id; frame < frame_size; frame += id_step ) {
if ( node < node_size ) {
// read x
T x[N];
if ( input_binary ) {
for ( int i = 0; i < N; ++i) {
x[i] = 0.5 +((x_ptr[i][frame] > 0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x[i] = max(0.0, min(1.0, x_ptr[i][frame]));
}
}
// read dy
T dy = dy_ptr[frame];
// calculate
StochasticLut<N, T, MAX_NODE_UNIT>::NodeBackward(node_id, x, dy, &dx_buf[node*N*dx_frame_stride + frame], W, dW, dx_frame_stride);
}
}
for ( int i = 0; i < (1 << N); ++i ) {
dW[i] = device_fp32_LocalSum(dW[i], sbuf[node_id]);
}
if ( node < node_size ) {
if ( id == 0 ) {
for ( int i = 0; i < (1 << N); ++i) {
dW_buf[node*(1 << N) + i] = dW[i] + dW_prev[i][node_id];
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward
(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
int const *dev_reverse_index,
float const *dev_W,
float *dev_dW,
int reverse_index_stride,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int tmp_frame_size,
int tmp_frame_stride,
int input_binary,
int lut_binarize,
float unbinarize_bias,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int frame_offset = 0;
do {
int unit_frame_size = frame_size - frame_offset;
if (unit_frame_size > tmp_frame_size) {
unit_frame_size = tmp_frame_size;
}
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_StochasticLut_Backward<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf + frame_offset,
dev_dy_buf + frame_offset,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
output_node_size,
unit_frame_size,
frame_stride,
tmp_frame_stride,
input_binary,
lut_binarize,
unbinarize_bias
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((unit_frame_size + (block.x - 1)) / block.x, (input_node_size + (block.y - 1)) / block.y);
kernal_NodeIntegrateWithTable<float><<<grid, block>>>
(
dev_dx_tmp,
dev_dx_buf + frame_offset,
dev_reverse_index,
reverse_index_stride,
input_node_size,
unit_frame_size,
tmp_frame_stride,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
frame_offset += unit_frame_size;
} while ( frame_offset < frame_size );
return 0;
}
// bit packing
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_bit_StochasticLut_Backward
(
int const *x_buf,
T const *dy_buf,
T *dx_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
int node_size,
int frame_size,
int x_frame_stride,
int dy_frame_stride,
int dx_frame_stride,
int lut_binarize,
T unbinarize_bias
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T dW_prev[(1 << N)][MAX_NODE_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T dW[(1 << N)];
int const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
for ( int i = 0; i < (1 << N); ++i) {
dW[i] = 0;
}
for ( int i = id; i < (1 << N); i += id_step ) {
dW_prev[i][node_id] = dW_buf[node * (1 << N) + i];
}
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > 0.5 ? 1.0 : 0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * x_frame_stride];
}
dy_ptr = &dy_buf[node * dy_frame_stride];
}
__syncthreads();
for ( int frame = id; frame < frame_size; frame += id_step ) {
if ( node < node_size ) {
int bit = (1 << (frame & 0x1f));
int unit = (frame >> 5);
// read x
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = 0.5 +((x_ptr[i][unit] & bit) ? +unbinarize_bias : -unbinarize_bias);
}
// read dy
T dy = dy_ptr[frame];
// calculate
StochasticLut<N, T, MAX_NODE_UNIT>::NodeBackward(node_id, x, dy, &dx_buf[node*N*dx_frame_stride + frame], W, dW, dx_frame_stride);
}
}
// write dW
for ( int i = 0; i < (1 << N); ++i ) {
dW[i] = device_fp32_LocalSum(dW[i], sbuf[node_id]);
}
if ( node < node_size ) {
if ( id == 0 ) {
for ( int i = 0; i < (1 << N); ++i) {
dW_buf[node*(1 << N) + i] = dW[i] + dW_prev[i][node_id];
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward
(
int const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
int const *dev_reverse_index,
float const *dev_W,
float *dev_dW,
int reverse_index_stride,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int bin_frame_stride,
int tmp_frame_size,
int tmp_frame_stride,
int lut_binarize,
float unbinarize_bias,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
int frame_offset = 0;
do {
int unit_frame_size = frame_size - frame_offset;
if (unit_frame_size > tmp_frame_size) {
unit_frame_size = tmp_frame_size;
}
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_bit_StochasticLut_Backward<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf + (frame_offset / 32),
dev_dy_buf + frame_offset,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
output_node_size,
unit_frame_size,
bin_frame_stride,
frame_stride,
tmp_frame_stride,
lut_binarize,
unbinarize_bias
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((unit_frame_size + (block.x - 1)) / block.x, (input_node_size + (block.y - 1)) / block.y);
kernal_NodeIntegrateWithTable<float><<<grid, block>>>
(
dev_dx_tmp,
dev_dx_buf + frame_offset,
dev_reverse_index,
reverse_index_stride,
input_node_size,
unit_frame_size,
tmp_frame_stride,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
frame_offset += unit_frame_size;
} while ( frame_offset < frame_size );
return 0;
}
// 実体化
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward<6>(const float *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward<5>(const float *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward<4>(const float *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward<3>(const float *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Forward<2>(const float *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward<6>(int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int bin_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward<5>(int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int bin_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward<4>(int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int bin_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward<3>(int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int bin_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Forward<2>(int const *dev_x_buf, float *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int bin_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward<6>(int const *dev_x_buf, int *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward<5>(int const *dev_x_buf, int *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward<4>(int const *dev_x_buf, int *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward<3>(int const *dev_x_buf, int *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_bit_fp32_StochasticLut_Forward<2>(int const *dev_x_buf, int *dev_y_buf, int const *dev_input_index, float const *dev_W, int node_size, int frame_size, int frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward<6>(float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int tmp_frame_size, int tmp_frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward<5>(float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int tmp_frame_size, int tmp_frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward<4>(float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int tmp_frame_size, int tmp_frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward<3>(float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int tmp_frame_size, int tmp_frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_fp32_StochasticLut_Backward<2>(float const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int tmp_frame_size, int tmp_frame_stride, int input_binary, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward<6>(int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int bin_frame_stride, int tmp_frame_size, int tmp_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward<5>(int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int bin_frame_stride, int tmp_frame_size, int tmp_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward<4>(int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int bin_frame_stride, int tmp_frame_size, int tmp_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward<3>(int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int bin_frame_stride, int tmp_frame_size, int tmp_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_StochasticLut_Backward<2>(int const *dev_x_buf, float const *dev_dy_buf, float *dev_dx_buf, float *dev_dx_tmp, int const *dev_input_index, int const *dev_reverse_index, float const *dev_W, float *dev_dW, int reverse_index_stride, int input_node_size, int output_node_size, int frame_size, int frame_stride, int bin_frame_stride, int tmp_frame_size, int tmp_frame_stride, int lut_binarize, float unbinarize_bias, cudaStream_t streamId);
// end of file
|
the_stack
|
#define FULL_MASK 0xffffffff
#define WARP_SIZE 32
/* Utility: Compute Offset Within Matrix From Addr [0,0]
*/
template<typename data_t, typename index_t>
__device__ __forceinline__
data_t* pointerOffset(data_t * base_pointer,
size_t const ld,
index_t const row,
index_t const col)
{
return base_pointer + (ld * row + col);
}
/* Utility: Nonnegative Integer Powers Of 2
*/
__device__ __forceinline__
int pow2(int exponent)
{
int val = 1;
for (int j = 0; j < exponent; j++) val *= 2;
return val;
}
/* Utility: Value Of Coef Leading
* B_{j,m}(xi) : xi = ti + delta
*/
template<typename data_t>
__device__ __forceinline__
data_t aCoef(int imj, int m, data_t delta){
return (delta + (data_t)imj) / (data_t)m;
}
/* Utility: Value Of Coef Leading
* B_{j+1,m}(xi) : xi = ti + delta
*/
template<typename data_t>
__device__ __forceinline__
data_t bCoef(int imj, int m, data_t delta){
return ((data_t)m - (data_t)imj - delta) / (data_t)m;
}
/* Each Block Is (Potentially Subset Of) A Single Row
* (2D Grid Of 1D Blocks)
*/
// TODO restrict pointers for better compiler optim
template<typename index_t>
__global__
void repeatKernel(index_t const*const g_repeats,
index_t const*const g_offsets,
index_t *const g_out)
{
// Num Repeats & Offset Shared Across Entire Block
__shared__ int s_repeat;
__shared__ int s_offset;
// Thread ID
const int xid = blockIdx.x * blockDim.x + threadIdx.x;
const int yid = blockIdx.y;
// tid0 Responsible For Global -> Shared MeM XFER
if (threadIdx.x == 0){
s_repeat = g_repeats[yid];
s_offset = (yid > 0) ? g_offsets[yid - 1] : 0;
}
// In-Bounds Threads Write RowID As Output
__syncthreads();
if (xid < s_repeat)
g_out[s_offset + xid] = yid;
}
///* Each Block Is A Single Nonzero Row
//* (1D Grid Of 1D Blocks)
//*/
//// TODO restrict pointers for better compiler optim
//template<typename data_t, typename index_t>
//__global__
//void pointwiseSubKernel(
//data_t *const g_convData,
//size_t const ldConvData,
//data_t const*const*const g_tempDataPtrs,
//size_t const ldTempData,
//index_t const*const*const g_tempIndPtrs,
//index_t const*const g_spikeLookup,
//index_t const*const g_spikeTemps,
//index_t const*const g_spikeTimes,
//index_t const*const g_spikeRowOffsets
//){
//// Each Thread-Block Maps To One Row, So Row-Heads Can Be Shared
//__shared__ data_t * s_convDataRowHead;
//__shared__ data_t const* s_tempDataRowHead;
//// tid0 Is Responsible For Computing Row-Heads
//if(threadIdx.x == 0)
//{
//// Lookup Spike's Template Row Offset
//const index_t spike = g_spikeLookup[blockIdx.x];
//const index_t temp = g_spikeTemps[spike];
//const index_t tempRowId = (spike > 0) ?
//(blockIdx.x - g_spikeRowOffsets[spike-1]) : blockIdx.x;
//// Compute Template & Time Offset In Convolved
//s_convDataRowHead = pointerOffset<data_t, index_t>(g_convData,
//ldConvData,
//g_tempIndPtrs[temp][tempRowId],
//g_spikeTimes[spike]);
//// Compute Nonzero-Row Offset Within Template
//s_tempDataRowHead = pointerOffset<data_t const, index_t>(g_tempDataPtrs[temp],
//ldTempData,
//tempRowId,
//0);
//}
//// In-Bounds Threads Perform Indpendent Reads->Sub->Write
////unsigned mask = __ballot_sync(FULL_MASK, threadIdx.x < ldTempData);
//__syncthreads();
//if (threadIdx.x < ldTempData)
//{
//// Compute Thread-Unique Addresses
////__syncwarp(mask);
//data_t const*const t_tempDataAddr = s_tempDataRowHead + threadIdx.x;
////__syncwarp(mask);
//data_t *const t_convDataAddr = s_convDataRowHead + threadIdx.x;
//// Perform Global Mem Reads
////__syncwarp(mask);
//// TODO handle scaling directly on templates? Alternatively as a param?
//data_t const t_tempDataElem = *t_tempDataAddr * -2;
//// Write Results To Global Mem
////__syncwarp(mask);
//atomicAdd(t_convDataAddr, t_tempDataElem);
//}
//}
/* Each Block Is A Single Nonzero Row
* (1D Grid Of 1D Blocks)
* Subtract interpolated values from energy function after being given
* coefficients for cubic B-Spline and time offset:
* Assumes:
* Equidistant knots, constant offset, Boundary padding during fit.
*/
template<typename data_t, typename index_t, int order>
__global__
void splineSubKernel(
size_t const numCoef,
data_t *const g_energyVals,
size_t const ldEnergyVals,
data_t const*const*const g_tempCoefPtrs,
size_t const ldTempCoefs,
index_t const*const*const g_tempIndPtrs,
index_t const*const g_eventIdLookup,
index_t const*const g_eventTempIds,
index_t const*const g_eventTimeIdx,
data_t const*const g_eventTimeOffset,
index_t const*const g_eventBlockOffset,
//data_t const g_tempScaling
data_t const*const g_tempScaling_array
)
{
// Basis Evals Same In Every Thread (Uniform Spacing) => Only Compute Once
__shared__ data_t s_basisVals[order + 1];
data_t t_basisVals[order + 1];
// Each Coef Used In M Different Threads => Only Accesses Gmem Once
extern __shared__ data_t s_tempCoefs[];
// Each Block Matches Single Rows => Only Access/Compute Once
__shared__ data_t const* s_tempCoefHeadAddr;
__shared__ data_t * s_energyValHeadAddr;
__shared__ data_t s_timeOffset;
__shared__ data_t s_spikeScale;
// Compute 2^order For Use In Basis Evaluation
const int pow2order = pow2(order);
// Initialize Evaled Basis Func Smem Buffer to 0
if (threadIdx.x < order + 1){
s_basisVals[threadIdx.x] = 0;
}
// tid0 Is Responsible For Computing Address Heads & Evluating Bases
if(threadIdx.x == 0)
{
// Lookup Spike's Template Row Offset
const index_t eventId = g_eventIdLookup[blockIdx.x];
const index_t tempId = g_eventTempIds[eventId];
const index_t tempBlockId = (eventId > 0) ?
(blockIdx.x - g_eventBlockOffset[eventId-1]) : blockIdx.x;
// Read Event Time Offsets & Adjust Time Idx
index_t t_eventTimeIdx = g_eventTimeIdx[eventId];
data_t t_timeOffset = g_eventTimeOffset[eventId] * -1;
data_t t_spikeScale = g_tempScaling_array[eventId];
if(t_timeOffset < 0){
t_timeOffset += 1;
t_eventTimeIdx += 1;
}
s_timeOffset = t_timeOffset;
s_spikeScale = t_spikeScale;
// Compute Address Of First Modified Value In Energy Function
s_energyValHeadAddr = pointerOffset<data_t, index_t>(
g_energyVals,
ldEnergyVals,
g_tempIndPtrs[tempId][tempBlockId],
t_eventTimeIdx
);
// Compute Address Of First Spline Coefficient In Template
s_tempCoefHeadAddr = pointerOffset<data_t const, index_t>(
g_tempCoefPtrs[tempId],
ldTempCoefs,
tempBlockId,
0 // Assuming We Have Only Stored Interior Coeffs
);
}
// Precompute Intermediate Coefficients & Atomic Add Bases
__syncthreads();
if (threadIdx.x < pow2order)
{
// Initialize Temporaru Quantities
const data_t t_timeOffset = s_timeOffset;
int id = threadIdx.x;
int split = pow2order;
int imj = 0; // i - j : B_j,m(xi + delta)
data_t coef = 1;
// Compute Intermediate Coefficients
for (int m = 1; m <= order; m++){
split /= 2;
if (id < split){
coef *= bCoef<data_t>(imj, m, t_timeOffset);
imj += 1;
} else {
coef *= aCoef<data_t>(imj, m, t_timeOffset);
}
id = id % split;
}
// Write Temporary Values To Buffer According To # Of Shifts
atomicAdd(s_basisVals + (order - imj), coef);
}
// Copy Evaluted Basis From SMem Into Thread-local Registers
__syncthreads();
for (int j = 0; j < order + 1; j++){
t_basisVals[j] = s_basisVals[j];
}
// In-Bounds Threads Perform Read/Write Of Template Coefs To Smem
__syncthreads();
data_t const*const t_tempCoefAddr = s_tempCoefHeadAddr+ threadIdx.x;
if (threadIdx.x < numCoef){
s_tempCoefs[threadIdx.x] = *t_tempCoefAddr;
}
// In-Bounds Threads Perform Reconstruction
__syncthreads();
data_t t_tempVal = 0;
if (threadIdx.x < numCoef - order - 1){
for (int j=0; j < order + 1; j++){
t_tempVal -= s_tempCoefs[threadIdx.x + j] * t_basisVals[j];
}
}
// In-Bounds Threads Perform Subtraction
__syncthreads();
data_t *const t_energyValAddr = s_energyValHeadAddr + threadIdx.x;
if (threadIdx.x < numCoef - order - 1){
//atomicAdd(t_energyValAddr, g_tempScaling * t_tempVal);
atomicAdd(t_energyValAddr, s_spikeScale * t_tempVal);
}
}
/* Each Block Is A Single Spike (i.e. row of convData)
* (1D Grid Of 1D Blocks)
*/
// TODO restrict pointers for better compiler optim
template<typename data_t, typename index_t>
__global__
void refracFillKernel(
size_t const fill_length,
size_t const fill_offset,
data_t const fill_value,
data_t *const g_convData,
size_t const ldConvData,
index_t const*const g_spikeTemps,
index_t const*const g_spikeTimes
){
// Get Addr For tid0 By Looking Up Spike Time(col) & Template(row)
__shared__ data_t *s_convDataRowHead;
if(threadIdx.x == 0){
s_convDataRowHead = pointerOffset<data_t, index_t>(
g_convData,
ldConvData,
g_spikeTemps[blockIdx.x],
g_spikeTimes[blockIdx.x] + fill_offset
);
}
// In-Bounds Threads Perform Indpendent Writes
__syncthreads();
if (threadIdx.x < fill_length){
data_t *const t_convDataAddr = s_convDataRowHead + threadIdx.x;
// old method replaced value
//*t_convDataAddr = fill_value;
*t_convDataAddr += fill_value;
}
}
/*
*/
void launchRepeatKernel(
at::Tensor & repeat_indices,
at::Tensor const& repeats,
at::Tensor const& offsets
){
// Determine Launch Configuration
const int largestRow = at::max(repeats).item<int>();
const int block = (largestRow > 288) ? (288) : largestRow;
const dim3 grid((largestRow + 287) / 288, repeats.size(0));
// Dispatch Kernel
repeatKernel<int64_t><<<grid, block>>>(repeats.data<int64_t>(),
offsets.data<int64_t>(),
repeat_indices.data<int64_t>());
// TODO: Remove Cuda Error Checking For Performance
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
/*
*/
//void launchPointwiseSubKernel(
//float *const d_convData,
//size_t const ldConvData,
//float const*const*const d_tempDataPtrs,
//size_t const ldTempData,
//int64_t const*const*const d_tempIndPtrs,
//int64_t const*const d_spikeLookup,
//size_t const nnzRows,
//int64_t const*const d_spikeTemps,
//int64_t const*const d_spikeTimes,
//int64_t const*const d_spikeRowOffset
//){
//// Determine Launch Configuration
//const int block = ((ldTempData + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE;
//const int grid = nnzRows;
//// Dispatch Kernel
//pointwiseSubKernel<float, int64_t><<<grid, block>>>(d_convData,
//ldConvData,
//d_tempDataPtrs,
//ldTempData,
//d_tempIndPtrs,
//d_spikeLookup,
//d_spikeTemps,
//d_spikeTimes,
//d_spikeRowOffset);
//// TODO: Remove Cuda Error Checking For Performance
//cudaError_t errSync = cudaGetLastError();
//cudaError_t errAsync = cudaDeviceSynchronize();
//if (errSync != cudaSuccess)
//printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
//if (errAsync != cudaSuccess)
//printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
//}
/*
*/
void launchSplineSubKernel(
size_t const numCoef,
float *const d_energyVals,
size_t const ldEnergyVals,
float const*const*const d_tempCoefPtrs,
size_t const ldTempCoefs,
int64_t const*const*const d_tempIndPtrs,
int64_t const*const d_eventIdLookup,
size_t const blockCount,
int64_t const*const d_eventTempIds,
int64_t const*const d_eventTimeIdx,
float const*const d_eventTimeOffset,
int64_t const*const d_eventBlockOffset,
//float const d_tempScaling
float const*const d_tempScaling_array
)
{
// Determine Launch Configuration
const size_t ORDER = 3; // Hard Coded To Compile Cubic Bspline Kernel
const int block = ((numCoef + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE;
const int grid = blockCount;
const size_t smemAlloc = sizeof(float) * numCoef;
// Dispatch Kernel
splineSubKernel<float, int64_t, ORDER><<<grid, block, smemAlloc>>>(
numCoef,
d_energyVals,
ldEnergyVals,
d_tempCoefPtrs,
ldTempCoefs,
d_tempIndPtrs,
d_eventIdLookup,
d_eventTempIds,
d_eventTimeIdx,
d_eventTimeOffset,
d_eventBlockOffset,
//d_tempScaling
d_tempScaling_array
);
// TODO: Remove Cuda Error Checking For Performance
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
/*
*/
void launchRefracFillKernel(
size_t const fill_length,
size_t const fill_offset,
float const fill_value,
float *const d_convData,
size_t const ldConvData,
int64_t const*const d_spikeTemps,
int64_t const*const d_spikeTimes,
size_t const numSpikes
){
// Determine Launch Configuration
const int block = ((fill_length + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE;
const int grid = numSpikes;
// Dispatch Kernel
refracFillKernel<float, int64_t><<<grid, block>>>(fill_length,
fill_offset,
fill_value,
d_convData,
ldConvData,
d_spikeTemps,
d_spikeTimes);
// TODO: Remove Cuda Error Checking For Performance
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
|
the_stack
|
/// Should the implementation use streams to schedule kernels in parallel if possible?
#define ENOKI_CUDA_USE_STREAMS 1
/// Synchronize with the device after each kernel launch (useful for debugging)
#define ENOKI_CUDA_LAUNCH_BLOCKING 0
#if defined(NDEBUG)
# define ENOKI_CUDA_DEFAULT_LOG_LEVEL 0
#else
# define ENOKI_CUDA_DEFAULT_LOG_LEVEL 1
#endif
/// Reserved registers for grid-stride loop indexing
#define ENOKI_CUDA_REG_RESERVED 10
NAMESPACE_BEGIN(enoki)
using TimePoint = std::chrono::time_point<std::chrono::high_resolution_clock>;
// Forward declarations
void cuda_inc_ref_ext(uint32_t);
void cuda_inc_ref_int(uint32_t);
void cuda_dec_ref_ext(uint32_t);
void cuda_dec_ref_int(uint32_t);
size_t cuda_register_size(EnokiType type);
uint32_t cuda_trace_append(EnokiType type, const char *cmd, uint32_t arg1);
// -----------------------------------------------------------------------
//! @{ \name 'Variable' type that is used to record instruction traces
// -----------------------------------------------------------------------
struct Variable {
/// Data type of this variable
EnokiType type;
/// PTX instruction to compute it
std::string cmd;
/// Associated label (mainly for debugging)
std::string label;
/// Number of entries
size_t size = 0;
/// Pointer to device memory
void *data = nullptr;
/// External (i.e. by Enoki) reference count
uint32_t ref_count_ext = 0;
/// Internal (i.e. within the PTX instruction stream) reference count
uint32_t ref_count_int = 0;
/// Dependencies of this instruction
std::array<uint32_t, 3> dep = { 0, 0, 0 };
/// Extra dependency (which is not directly used in arithmetic, e.g. scatter/gather)
uint32_t extra_dep = 0;
/// Does the instruction have side effects (e.g. 'scatter')
bool side_effect = false;
/// A variable is 'dirty' if there are pending scatter operations to it
bool dirty = false;
/// Free 'data' after this variable is no longer referenced?
bool free = true;
/// Optimization: is this a direct pointer (rather than an array which stores a pointer?)
bool direct_pointer = false;
/// Size of the (heuristic for instruction scheduling)
uint32_t subtree_size = 0;
Variable(EnokiType type) : type(type) { }
~Variable() { if (free && data != nullptr) cuda_free(data); }
bool is_collected() const {
return ref_count_int == 0 && ref_count_ext == 0;
}
};
void cuda_shutdown();
#if ENOKI_CUDA_USE_STREAMS == 1
struct Stream {
cudaStream_t stream = nullptr;
cudaEvent_t event = nullptr;
void init() {
cuda_check(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
cuda_check(cudaEventCreateWithFlags(&event, cudaEventDisableTiming));
}
void release() {
cuda_check(cudaEventDestroy(event));
cuda_check(cudaStreamDestroy(stream));
}
};
#endif
enum MallocType { Normal, Managed, Host };
struct TaggedSize {
MallocType type;
size_t size;
TaggedSize(MallocType type, size_t size) : type(type), size(size) { }
bool operator==(const TaggedSize &ts) const { return type == ts.type && size == ts.size; }
bool operator!=(const TaggedSize &ts) const { return type != ts.type || size != ts.size; }
};
struct TaggedSizeHasher {
size_t operator()(const TaggedSize &ts) const {
return std::hash<size_t>()((ts.size << 2) + (size_t) ts.type);
}
};
struct Context {
/// Current variable index
uint32_t ctr = 0;
/// Enumerates "live" (externally referenced) variables and statements with side effects
std::set<uint32_t> live;
/// Enumerates "dirty" variables (targets of 'scatter' operations that have not yet executed)
std::vector<uint32_t> dirty;
/// Stores the mapping from variable indices to variables
std::unordered_map<uint32_t, Variable> variables;
/// Stores the mapping from pointer addresses to variable indices
std::unordered_map<const void *, uint32_t> ptr_map;
/// Current operand array for scatter/gather
uint32_t scatter_gather_operand = 0;
/// Current log level (0 == none, 1 == minimal, 2 == moderate, 3 == max.)
uint32_t log_level = ENOKI_CUDA_DEFAULT_LOG_LEVEL;
/// Callback that will be invoked before each cuda_eval() call
std::vector<std::pair<void(*)(void *), void *>> callbacks;
/// Include printf function declaration in PTX assembly?
bool include_printf = false;
/// Hash table of previously compiled kernels
std::unordered_map<std::string, std::pair<CUmodule, CUfunction>, StringHasher> kernels;
#if ENOKI_CUDA_USE_STREAMS == 1
/// Streams for parallel execution
std::vector<Stream> streams;
/// Event on default stream
cudaEvent_t stream_0_event = nullptr;
#endif
/// Default thread and block count for kernels
uint32_t block_count = 0, thread_count = 0;
/// Map of unused memory regions
std::unordered_multimap<TaggedSize, void *, TaggedSizeHasher> free_map;
/// Map of currently used memory regions
std::unordered_map<void *, TaggedSize> used_map;
/// Memory usage watermarks
size_t used = 0, used_managed = 0, used_host = 0;
size_t watermark = 0, watermark_managed = 0, watermark_host = 0;
/// Mutex protecting the malloc-related data structures
std::recursive_mutex malloc_mutex;
~Context() { clear(); }
Variable &operator[](uint32_t i) {
auto it = variables.find(i);
if (it == variables.end())
throw std::runtime_error("CUDABackend: referenced unknown variable " + std::to_string(i));
return it->second;
}
void clear() {
#if !defined(NDEBUG)
if (log_level >= 1) {
if (ctr != 0 || !variables.empty())
std::cerr << "cuda_shutdown()" << std::endl;
size_t n_live = 0;
for (auto const &var : variables) {
if (var.first < ENOKI_CUDA_REG_RESERVED)
continue;
if (n_live < 10) {
std::cerr << "cuda_shutdown(): variable " << var.first << " is still live. "<< std::endl;
if (n_live == 9)
std::cerr << "(skipping remainder)" << std::endl;
}
++n_live;
}
if (n_live > 0)
std::cerr << "cuda_shutdown(): " << n_live
<< " variables were still live at shutdown." << std::endl;
}
#endif
ctr = 0;
dirty.clear();
variables.clear();
live.clear();
scatter_gather_operand = 0;
include_printf = false;
#if ENOKI_CUDA_USE_STREAMS == 1
for (Stream &stream : streams)
stream.release();
streams.clear();
if (stream_0_event) {
cuda_check(cudaEventDestroy(stream_0_event));
stream_0_event = nullptr;
}
#endif
for (auto &kv : kernels)
cuda_check(cuModuleUnload(kv.second.first));
kernels.clear();
cuda_sync();
cuda_malloc_trim();
}
Variable& append(EnokiType type) {
return variables.emplace(ctr++, type).first->second;
}
};
static Context *__context = nullptr;
bool installed_shutdown_handler = false;
void cuda_init();
inline static Context &context() {
if (ENOKI_UNLIKELY(__context == nullptr))
cuda_init();
return *__context;
}
ENOKI_EXPORT void cuda_init() {
// initialize CUDA
cudaFree(0);
// We don't really use shared memory, so put more into L1 cache.
cuda_check(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
/// Reserve indices for reserved kernel variables
if (__context)
delete __context;
__context = new Context();
Context &ctx = *__context;
ctx.clear();
ctx.append(EnokiType::Invalid);
ctx.append(EnokiType::UInt64);
while (ctx.variables.size() != ENOKI_CUDA_REG_RESERVED)
ctx.append(EnokiType::UInt32);
#if ENOKI_CUDA_USE_STREAMS == 1
ctx.streams.resize(5);
for (size_t i = 0; i < 5; ++i)
ctx.streams[i].init();
cuda_check(cudaEventCreateWithFlags(&ctx.stream_0_event, cudaEventDisableTiming));
#endif
ctx.kernels.reserve(1000);
int device, num_sm;
cudaGetDevice(&device);
cudaDeviceGetAttribute(&num_sm, cudaDevAttrMultiProcessorCount, device);
ctx.block_count = next_power_of_two(num_sm) * 2;
ctx.thread_count = 128;
ctx.used = 0;
ctx.used_managed = 0;
ctx.used_host = 0;
ctx.watermark = 0;
ctx.watermark_managed = 0;
ctx.watermark_host = 0;
if (!installed_shutdown_handler) {
installed_shutdown_handler = true;
atexit(cuda_shutdown);
}
}
ENOKI_EXPORT void cuda_shutdown() {
if (__context) {
__context->clear();
delete __context;
__context = nullptr;
}
}
ENOKI_EXPORT void *cuda_var_ptr(uint32_t index) {
return context()[index].data;
}
ENOKI_EXPORT size_t cuda_var_size(uint32_t index) {
return context()[index].size;
}
ENOKI_EXPORT void cuda_var_set_label(uint32_t index, const char *str) {
Context &ctx = context();
ctx[index].label = str;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_set_label(" << index << "): " << str << std::endl;
#endif
}
ENOKI_EXPORT uint32_t cuda_var_set_size(uint32_t index, size_t size, bool copy) {
Context &ctx = context();
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_set_size(" << index << "): " << size << std::endl;
#endif
Variable &var = ctx[index];
if (var.size == size)
return index;
if (var.data != nullptr || var.ref_count_int > 0) {
if (var.size == 1 && copy) {
uint32_t index_new =
cuda_trace_append(var.type, "mov.$t1 $r1, $r2", index);
ctx[index_new].size = size;
cuda_dec_ref_ext(index);
return index_new;
}
throw std::runtime_error(
"cuda_var_set_size(): attempted to resize variable " +
std::to_string(index) +
" which was already allocated (current size = " +
std::to_string(var.size) +
", requested size = " + std::to_string(size) + ")");
}
var.size = size;
return index;
}
ENOKI_EXPORT uint32_t cuda_var_register(EnokiType type, size_t size,
void *ptr, bool free) {
Context &ctx = context();
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_register(" << idx << "): " << ptr
<< ", size=" << size << ", free=" << free << std::endl;
#endif
if (size == 0)
throw std::runtime_error("cuda_var_register(): attempted to create a "
"variable of size zero!");
Variable &v = ctx.append(type);
v.data = ptr;
v.size = size;
v.free = free;
cuda_inc_ref_ext(idx);
return idx;
}
ENOKI_EXPORT uint32_t cuda_var_register_ptr(const void *ptr) {
Context &ctx = context();
auto it = ctx.ptr_map.find(ptr);
if (it != ctx.ptr_map.end()) {
cuda_inc_ref_ext(it->second);
return it->second;
}
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_register_ptr(" << idx << "): " << ptr
<< std::endl;
#endif
Variable &v = ctx.append(EnokiType::Pointer);
v.data = (void *) ptr;
v.size = 1;
v.free = false;
v.direct_pointer = true;
cuda_inc_ref_ext(idx);
ctx.ptr_map[ptr] = idx;
return idx;
}
ENOKI_EXPORT uint32_t cuda_var_copy_to_device(EnokiType type, size_t size,
const void *value) {
size_t total_size = size * cuda_register_size(type);
void *tmp = cuda_host_malloc(total_size),
*device_ptr = cuda_malloc(total_size);
memcpy(tmp, value, total_size);
cuda_check(cudaMemcpyAsync(device_ptr, tmp, total_size,
cudaMemcpyHostToDevice));
cuda_host_free(tmp);
return cuda_var_register(type, size, device_ptr, true);
}
ENOKI_EXPORT void cuda_var_free(uint32_t idx) {
Context &ctx = context();
Variable &v = ctx[idx];
#if !defined(NDEBUG)
if (ctx.log_level >= 5) {
std::cerr << "cuda_var_free(" << idx << ") = " << v.data;
if (!v.free)
std::cerr << " (not deleted)";
std::cerr << std::endl;
}
#endif
if (v.direct_pointer) {
auto it = ctx.ptr_map.find(v.data);
assert(it != ctx.ptr_map.end());
ctx.ptr_map.erase(it);
}
for (int i = 0; i < 3; ++i)
cuda_dec_ref_int(v.dep[i]);
cuda_dec_ref_ext(v.extra_dep);
ctx.variables.erase(idx); // invokes Variable destructor + cudaFree().
}
ENOKI_EXPORT void cuda_set_scatter_gather_operand(uint32_t idx, bool gather) {
Context &ctx = context();
if (idx != 0) {
Variable &v = ctx[idx];
if (v.data == nullptr || (gather && v.dirty))
cuda_eval();
}
ctx.scatter_gather_operand = idx;
}
//! @}
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
//! @{ \name Common functionality to distinguish types
// -----------------------------------------------------------------------
ENOKI_EXPORT size_t cuda_register_size(EnokiType type) {
switch (type) {
case EnokiType::UInt8:
case EnokiType::Int8:
case EnokiType::Bool: return 1;
case EnokiType::UInt16:
case EnokiType::Int16: return 2;
case EnokiType::UInt32:
case EnokiType::Int32: return 4;
case EnokiType::Pointer:
case EnokiType::UInt64:
case EnokiType::Int64: return 8;
case EnokiType::Float32: return 4;
case EnokiType::Float64: return 8;
default: return (size_t) -1;
}
}
ENOKI_EXPORT const char *cuda_register_type(EnokiType type) {
switch (type) {
case EnokiType::UInt8: return "u8";
case EnokiType::Int8: return "s8";
case EnokiType::UInt16: return "u16";
case EnokiType::Int16: return "s16";
case EnokiType::UInt32: return "u32";
case EnokiType::Int32: return "s32";
case EnokiType::Pointer:
case EnokiType::UInt64: return "u64";
case EnokiType::Int64: return "s64";
case EnokiType::Float16: return "f16";
case EnokiType::Float32: return "f32";
case EnokiType::Float64: return "f64";
case EnokiType::Bool: return "pred";
default: return nullptr;
}
}
ENOKI_EXPORT const char *cuda_register_type_bin(EnokiType type) {
switch (type) {
case EnokiType::UInt8:
case EnokiType::Int8: return "b8";
case EnokiType::UInt16:
case EnokiType::Float16:
case EnokiType::Int16: return "b16";
case EnokiType::Float32:
case EnokiType::UInt32:
case EnokiType::Int32: return "b32";
case EnokiType::Pointer:
case EnokiType::Float64:
case EnokiType::UInt64:
case EnokiType::Int64: return "b64";
case EnokiType::Bool: return "pred";
default: return nullptr;
}
}
ENOKI_EXPORT const char *cuda_register_name(EnokiType type) {
switch (type) {
case EnokiType::UInt8:
case EnokiType::Int8: return "%b";
case EnokiType::UInt16:
case EnokiType::Int16: return "%w";
case EnokiType::UInt32:
case EnokiType::Int32: return "%r";
case EnokiType::Pointer:
case EnokiType::UInt64:
case EnokiType::Int64: return "%rd";
case EnokiType::Float32: return "%f";
case EnokiType::Float64: return "%d";
case EnokiType::Bool: return "%p";
default: return nullptr;
}
}
//! @}
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
//! @{ \name Reference counting (internal means: dependency within
// JIT trace, external means: referenced by an Enoki array)
// -----------------------------------------------------------------------
ENOKI_EXPORT void cuda_inc_ref_ext(uint32_t index) {
if (index < ENOKI_CUDA_REG_RESERVED)
return;
Context &ctx = context();
Variable &v = ctx[index];
v.ref_count_ext++;
#if !defined(NDEBUG)
if (ctx.log_level >= 5)
std::cerr << "cuda_inc_ref_ext(" << index << ") -> "
<< v.ref_count_ext << std::endl;
#endif
}
ENOKI_EXPORT void cuda_inc_ref_int(uint32_t index) {
if (index < ENOKI_CUDA_REG_RESERVED)
return;
Context &ctx = context();
Variable &v = ctx[index];
v.ref_count_int++;
#if !defined(NDEBUG)
if (ctx.log_level >= 5)
std::cerr << "cuda_inc_ref_int(" << index << ") -> "
<< v.ref_count_int << std::endl;
#endif
}
ENOKI_EXPORT void cuda_dec_ref_ext(uint32_t index) {
Context &ctx = context();
if (index < ENOKI_CUDA_REG_RESERVED || ctx.variables.empty())
return;
Variable &v = ctx[index];
if (ENOKI_UNLIKELY(v.ref_count_ext == 0)) {
fprintf(stderr, "cuda_dec_ref_ext(): Node %u has no external references!\n", index);
exit(EXIT_FAILURE);
}
#if !defined(NDEBUG)
if (ctx.log_level >= 5)
std::cerr << "cuda_dec_ref_ext(" << index << ") -> "
<< (v.ref_count_ext - 1) << std::endl;
#endif
v.ref_count_ext--;
if (v.ref_count_ext == 0 && !v.side_effect)
ctx.live.erase(index);
if (v.is_collected())
cuda_var_free(index);
}
ENOKI_EXPORT void cuda_dec_ref_int(uint32_t index) {
if (index < ENOKI_CUDA_REG_RESERVED)
return;
Context &ctx = context();
Variable &v = ctx[index];
if (ENOKI_UNLIKELY(v.ref_count_int == 0)) {
fprintf(stderr, "cuda_dec_ref_int(): Node %u has no internal references!\n", index);
exit(EXIT_FAILURE);
}
#if !defined(NDEBUG)
if (ctx.log_level >= 5)
std::cerr << "cuda_dec_ref_int(" << index << ") -> "
<< (v.ref_count_int - 1) << std::endl;
#endif
v.ref_count_int--;
if (v.is_collected())
cuda_var_free(index);
}
ENOKI_EXPORT void cuda_var_mark_side_effect(uint32_t index) {
Context &ctx = context();
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_mark_side_effect(" << index << ")" << std::endl;
#endif
assert(index >= ENOKI_CUDA_REG_RESERVED);
ctx[index].side_effect = true;
}
ENOKI_EXPORT void cuda_var_mark_dirty(uint32_t index) {
Context &ctx = context();
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_var_mark_dirty(" << index << ")" << std::endl;
#endif
assert(index >= ENOKI_CUDA_REG_RESERVED);
ctx[index].dirty = true;
ctx.dirty.push_back(index);
}
//! @}
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
//! @{ \name JIT trace append routines
// -----------------------------------------------------------------------
static void strip_ftz(Variable &v) {
if (v.type != EnokiType::Float32) {
size_t offset = v.cmd.find(".ftz");
if (ENOKI_UNLIKELY(offset != std::string::npos)) {
size_t offset = v.cmd.find(".ftz");
v.cmd.replace(offset, 4, "");
}
}
}
ENOKI_EXPORT uint32_t cuda_trace_append(EnokiType type,
const char *cmd) {
Context &ctx = context();
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_trace_append(" << idx << "): " << cmd << std::endl;
#endif
Variable &v = ctx.append(type);
v.cmd = cmd;
v.subtree_size = 1;
v.size = 1;
cuda_inc_ref_ext(idx);
ctx.live.insert(idx);
strip_ftz(v);
return idx;
}
ENOKI_EXPORT uint32_t cuda_trace_append(EnokiType type,
const char *cmd,
uint32_t arg1) {
if (ENOKI_UNLIKELY(arg1 == 0))
throw std::runtime_error(
"cuda_trace_append(): arithmetic involving "
"uninitialized variable!");
Context &ctx = context();
const Variable &v1 = ctx[arg1];
if (v1.dirty)
cuda_eval();
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_trace_append(" << idx << " <- " << arg1 << "): " << cmd
<< std::endl;
#endif
Variable &v = ctx.append(type);
v.size = v1.size;
v.dep[0] = arg1;
v.cmd = cmd;
v.subtree_size = v1.subtree_size + 1;
cuda_inc_ref_int(arg1);
cuda_inc_ref_ext(idx);
ctx.live.insert(idx);
strip_ftz(v);
return idx;
}
ENOKI_EXPORT uint32_t cuda_trace_append(EnokiType type,
const char *cmd,
uint32_t arg1,
uint32_t arg2) {
if (ENOKI_UNLIKELY(arg1 == 0 || arg2 == 0))
throw std::runtime_error(
"cuda_trace_append(): arithmetic involving "
"uninitialized variable!");
Context &ctx = context();
const Variable &v1 = ctx[arg1],
&v2 = ctx[arg2];
if (v1.dirty || v2.dirty)
cuda_eval();
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_trace_append(" << idx << " <- " << arg1 << ", " << arg2
<< "): " << cmd << std::endl;
#endif
size_t size = std::max(v1.size, v2.size);
if (ENOKI_UNLIKELY((v1.size != 1 && v1.size != size) ||
(v2.size != 1 && v2.size != size)))
throw std::runtime_error("cuda_trace_append(): arithmetic involving "
"arrays of incompatible size (" +
std::to_string(v1.size) + " and " + std::to_string(v2.size) +
"). The instruction was \"" + cmd + "\".");
Variable &v = ctx.append(type);
v.size = size;
v.dep = { arg1, arg2, 0 };
v.cmd = cmd;
v.subtree_size = v1.subtree_size + v2.subtree_size + 1;
cuda_inc_ref_int(arg1);
cuda_inc_ref_int(arg2);
cuda_inc_ref_ext(idx);
ctx.live.insert(idx);
if (v.cmd.find("ld.global") != std::string::npos) {
v.extra_dep = ctx.scatter_gather_operand;
cuda_inc_ref_ext(v.extra_dep);
} else {
strip_ftz(v);
}
return idx;
}
ENOKI_EXPORT uint32_t cuda_trace_append(EnokiType type,
const char *cmd,
uint32_t arg1,
uint32_t arg2,
uint32_t arg3) {
if (ENOKI_UNLIKELY(arg1 == 0 || arg2 == 0 || arg3 == 0))
throw std::runtime_error("cuda_trace_append(): arithmetic involving "
"uninitialized variable!");
Context &ctx = context();
const Variable &v1 = ctx[arg1],
&v2 = ctx[arg2],
&v3 = ctx[arg3];
if (v1.dirty || v2.dirty || v3.dirty)
cuda_eval();
uint32_t idx = ctx.ctr;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_trace_append(" << idx << " <- " << arg1 << ", " << arg2
<< ", " << arg3 << "): " << cmd << std::endl;
#endif
size_t size = std::max(std::max(v1.size, v2.size), v3.size);
if (ENOKI_UNLIKELY((v1.size != 1 && v1.size != size) ||
(v2.size != 1 && v2.size != size) ||
(v3.size != 1 && v3.size != size)))
throw std::runtime_error("cuda_trace_append(): arithmetic involving "
"arrays of incompatible size (" +
std::to_string(v1.size) + ", " + std::to_string(v2.size) +
" and " + std::to_string(v3.size) + "). The instruction was \"" +
cmd + "\".");
Variable &v = ctx.append(type);
v.size = size;
v.dep = { arg1, arg2, arg3 };
v.cmd = cmd;
v.subtree_size = v1.subtree_size +
v2.subtree_size +
v3.subtree_size + 1;
cuda_inc_ref_int(arg1);
cuda_inc_ref_int(arg2);
cuda_inc_ref_int(arg3);
cuda_inc_ref_ext(idx);
ctx.live.insert(idx);
if (v.cmd.find("st.global") != std::string::npos ||
v.cmd.find("atom.global.add") != std::string::npos) {
v.extra_dep = ctx.scatter_gather_operand;
cuda_inc_ref_ext(v.extra_dep);
} else {
strip_ftz(v);
}
return idx;
}
ENOKI_EXPORT void cuda_trace_printf(const char *fmt, uint32_t narg, uint32_t* arg) {
auto &ctx = context();
std::ostringstream oss;
oss << "{" << std::endl
<< " .global .align 1 .b8 fmt[] = {";
for (int i = 0;; ++i) {
oss << (unsigned) fmt[i];
if (fmt[i] == '\0')
break;
oss << ", ";
}
oss << "};" << std::endl
<< " .local .align 8 .b8 buf[" << 8*narg << "];" << std::endl
<< " .reg.b64 %fmt_r, %buf_r;" << std::endl;
for (int i = 0; i < narg; ++i) {
switch (ctx[arg[i]].type) {
case EnokiType::Float32:
oss << " cvt.f64.f32 %d0, $r" << i + 2 << ";" << std::endl
<< " st.local.f64 [buf + " << i * 8 << "], %d0;" << std::endl;
break;
default:
oss << " st.local.$t" << i + 2 << " [buf + " << i * 8
<< "], $r" << i + 2 << ";" << std::endl;
break;
}
}
oss << " cvta.global.u64 %fmt_r, fmt;" << std::endl
<< " cvta.local.u64 %buf_r, buf;" << std::endl
<< " {" << std::endl
<< " .param .b64 fmt_p;" << std::endl
<< " .param .b64 buf_p;" << std::endl
<< " .param .b32 rv_p;" << std::endl
<< " st.param.b64 [fmt_p], %fmt_r;" << std::endl
<< " st.param.b64 [buf_p], %buf_r;" << std::endl
<< " call.uni (rv_p), vprintf, (fmt_p, buf_p);" << std::endl
<< " }" << std::endl
<< " }" << std::endl;
uint32_t idx = 0;
if (narg == 0)
idx = cuda_trace_append(EnokiType::UInt32, oss.str().c_str());
else if (narg == 1)
idx = cuda_trace_append(EnokiType::UInt32, oss.str().c_str(), arg[0]);
else if (narg == 2)
idx = cuda_trace_append(EnokiType::UInt32, oss.str().c_str(), arg[0], arg[1]);
else if (narg == 3)
idx = cuda_trace_append(EnokiType::UInt32, oss.str().c_str(), arg[0], arg[1], arg[2]);
else
throw std::runtime_error("cuda_trace_print(): at most 3 variables can be printed at once!");
cuda_var_mark_side_effect(idx);
ctx.include_printf = true;
}
//! @}
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
//! @{ \name JIT trace generation
// -----------------------------------------------------------------------
static void cuda_render_cmd(std::ostringstream &oss,
Context &ctx,
const std::unordered_map<uint32_t, uint32_t> ®_map,
uint32_t index) {
const Variable &var = ctx[index];
const std::string &cmd = var.cmd;
oss << " ";
for (size_t i = 0; i < cmd.length(); ++i) {
if (cmd[i] != '$' || i + 2 >= cmd.length()) {
oss << cmd[i];
continue;
} else {
uint8_t type = cmd[i + 1],
dep_offset = cmd[i + 2] - '0';
if (type != 't' && type != 'b' && type != 'r')
throw std::runtime_error("cuda_render_cmd: invalid '$' template!");
if (dep_offset < 1 && dep_offset > 4)
throw std::runtime_error("cuda_render_cmd: out of bounds!");
uint32_t dep =
dep_offset == 1 ? index : var.dep[dep_offset - 2];
EnokiType dep_type = ctx[dep].type;
const char *result = nullptr;
if (type == 't')
result = cuda_register_type(dep_type);
else if (type == 'b')
result = cuda_register_type_bin(dep_type);
else if (type == 'r')
result = cuda_register_name(dep_type);
if (result == nullptr)
throw std::runtime_error(
"CUDABackend: internal error -- variable " +
std::to_string(index) + " references " + std::to_string(dep) +
" with unsupported type: " + std::string(cmd));
oss << result;
if (type == 'r') {
auto it = reg_map.find(dep);
if (it == reg_map.end())
throw std::runtime_error(
"CUDABackend: internal error -- variable not found!");
oss << it->second;
}
i += 2;
}
}
if (!cmd.empty() && cmd[cmd.length() - 1] != '\n')
oss << ";" << std::endl;
}
static std::pair<std::string, std::vector<void *>>
cuda_jit_assemble(size_t size, const std::vector<uint32_t> &sweep, bool include_printf) {
Context &ctx = context();
std::ostringstream oss;
std::vector<void *> ptrs;
size_t n_in = 0, n_out = 0, n_arith = 0;
oss << ".version 6.3" << std::endl
<< ".target sm_" << ENOKI_CUDA_COMPUTE_CAPABILITY << std::endl
<< ".address_size 64" << std::endl
<< std::endl;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "Register map:" << std::endl;
#endif
uint32_t n_vars = ENOKI_CUDA_REG_RESERVED;
std::unordered_map<uint32_t, uint32_t> reg_map;
for (uint32_t index : sweep) {
#if !defined(NDEBUG)
if (ctx.log_level >= 4) {
const Variable &v = ctx[index];
std::cerr << " " << cuda_register_name(v.type) << n_vars << " -> " << index;
const std::string &label = v.label;
if (!label.empty())
std::cerr << " \"" << label << "\"";
if (v.size == 1)
std::cerr << " [scalar]";
if (v.data != nullptr)
std::cerr << " [in]";
else if (v.side_effect)
std::cerr << " [se]";
else if (v.ref_count_ext > 0)
std::cerr << " [out]";
std::cerr << std::endl;
}
#endif
reg_map[index] = n_vars++;
}
reg_map[2] = 2;
/**
* rd0: ptr
* r1: N
* r2: index
* r3: stride
* r4: threadIdx
* r5: blockIdx
* r6: blockDim
* r7: gridDim
* rd8, rd9: address I/O
*/
if (include_printf) {
oss << ".extern .func (.param .b32 rv) vprintf (" << std::endl
<< " .param .b64 fmt," << std::endl
<< " .param .b64 buf" << std::endl
<< ");" << std::endl
<< std::endl;
}
oss << ".visible .entry enoki_@@@@@@@@(.param .u64 ptr," << std::endl
<< " .param .u32 size) {" << std::endl
<< " .reg.b8 %b<" << n_vars << ">;" << std::endl
<< " .reg.b16 %w<" << n_vars << ">;" << std::endl
<< " .reg.b32 %r<" << n_vars << ">;" << std::endl
<< " .reg.b64 %rd<" << n_vars << ">;" << std::endl
<< " .reg.f32 %f<" << n_vars << ">;" << std::endl
<< " .reg.f64 %d<" << n_vars << ">;" << std::endl
<< " .reg.pred %p<" << n_vars << ">;" << std::endl << std::endl
<< std::endl
<< " // Grid-stride loop setup" << std::endl
<< " ld.param.u64 %rd0, [ptr];" << std::endl
<< " ld.param.u32 %r1, [size];" << std::endl
<< " mov.u32 %r4, %tid.x;" << std::endl
<< " mov.u32 %r5, %ctaid.x;" << std::endl
<< " mov.u32 %r6, %ntid.x;" << std::endl
<< " mad.lo.u32 %r2, %r5, %r6, %r4;" << std::endl
<< " setp.ge.u32 %p0, %r2, %r1;" << std::endl
<< " @%p0 bra L0;" << std::endl
<< std::endl
<< " mov.u32 %r7, %nctaid.x;" << std::endl
<< " mul.lo.u32 %r3, %r6, %r7;" << std::endl
<< std::endl
<< "L1:" << std::endl
<< " // Loop body" << std::endl;
for (uint32_t index : sweep) {
Variable &var = ctx[index];
if (var.is_collected() || (var.cmd.empty() && var.data == nullptr && !var.direct_pointer))
throw std::runtime_error(
"CUDABackend: found invalid/expired variable " + std::to_string(index) + " in schedule! ");
if (var.size != 1 && var.size != size)
throw std::runtime_error(
"CUDABackend: encountered arrays of incompatible size! (" +
std::to_string(size) + " vs " + std::to_string(var.size) + ")");
oss << std::endl;
if (var.data || var.direct_pointer) {
size_t idx = ptrs.size();
ptrs.push_back(var.data);
oss << std::endl
<< " // Load register " << cuda_register_name(var.type) << reg_map[index];
if (!var.label.empty())
oss << ": " << var.label;
oss << std::endl;
if (!var.direct_pointer) {
oss << " ldu.global.u64 %rd8, [%rd0 + " << idx * 8 << "];" << std::endl;
const char *load_instr = "ldu";
if (var.size != 1) {
oss << " mul.wide.u32 %rd9, %r2, " << cuda_register_size(var.type) << ";" << std::endl
<< " add.u64 %rd8, %rd8, %rd9;" << std::endl;
load_instr = "ld";
}
if (var.type != EnokiType::Bool) {
oss << " " << load_instr << ".global." << cuda_register_type(var.type) << " "
<< cuda_register_name(var.type) << reg_map[index] << ", [%rd8]"
<< ";" << std::endl;
} else {
oss << " " << load_instr << ".global.u8 %w1, [%rd8];" << std::endl
<< " setp.ne.u16 " << cuda_register_name(var.type) << reg_map[index] << ", %w1, 0;";
}
} else {
oss << " ldu.global.u64 " << cuda_register_name(var.type)
<< reg_map[index] << ", [%rd0 + " << idx * 8 << "];"
<< std::endl;
}
n_in++;
} else {
if (!var.label.empty())
oss << " // Compute register "
<< cuda_register_name(var.type) << reg_map[index] << ": "
<< var.label << std::endl;
cuda_render_cmd(oss, ctx, reg_map, index);
n_arith++;
if (var.side_effect) {
n_out++;
continue;
}
if (var.ref_count_ext == 0)
continue;
if (var.size != size)
continue;
size_t size_in_bytes =
cuda_var_size(index) * cuda_register_size(var.type);
var.data = cuda_malloc(size_in_bytes);
var.subtree_size = 1;
#if !defined(NDEBUG)
if (ctx.log_level >= 4)
std::cerr << "cuda_eval(): allocated variable " << index
<< " -> " << var.data << " (" << size_in_bytes
<< " bytes)" << std::endl;
#endif
size_t idx = ptrs.size();
ptrs.push_back(var.data);
n_out++;
oss << std::endl
<< " // Store register " << cuda_register_name(var.type) << reg_map[index];
if (!var.label.empty())
oss << ": " << var.label;
oss << std::endl
<< " ldu.global.u64 %rd8, [%rd0 + " << idx * 8 << "];" << std::endl;
if (var.size != 1) {
oss << " mul.wide.u32 %rd9, %r2, " << cuda_register_size(var.type) << ";" << std::endl
<< " add.u64 %rd8, %rd8, %rd9;" << std::endl;
}
if (var.type != EnokiType::Bool) {
oss << " st.global." << cuda_register_type(var.type) << " [%rd8], "
<< cuda_register_name(var.type) << reg_map[index] << ";"
<< std::endl;
} else {
oss << " selp.u16 %w1, 1, 0, " << cuda_register_name(var.type)
<< reg_map[index] << ";" << std::endl;
oss << " st.global.u8" << " [%rd8], %w1;" << std::endl;
}
}
}
oss << std::endl
<< " add.u32 %r2, %r2, %r3;" << std::endl
<< " setp.ge.u32 %p0, %r2, %r1;" << std::endl
<< " @!%p0 bra L1;" << std::endl;
oss << std::endl
<< "L0:" << std::endl
<< " ret;" << std::endl
<< "}" << std::endl;
if (ctx.log_level >= 1)
std::cerr << "cuda_eval(): launching kernel (n=" << size << ", in="
<< n_in << ", out=" << n_out << ", ops=" << n_arith
<< ")" << std::endl;
return { oss.str(), ptrs };
}
ENOKI_EXPORT void cuda_jit_run(Context &ctx,
std::string &&source_,
const std::vector<void *> &ptrs,
uint32_t size,
uint32_t stream_idx,
TimePoint start,
TimePoint mid) {
if (source_.empty())
return;
uint32_t hash = (uint32_t) StringHasher()(source_);
char kernel_name[9];
snprintf(kernel_name, 9, "%08x", hash);
char *id = strchr((char *) source_.c_str(), '@');
memcpy(id, kernel_name, 8);
auto hash_entry = ctx.kernels.emplace(
std::move(source_), std::pair<CUmodule, CUfunction>{ nullptr, nullptr });
const std::string &source = hash_entry.first->first;
CUmodule &module = hash_entry.first->second.first;
CUfunction &kernel = hash_entry.first->second.second;
if (ctx.log_level >= 3)
std::cout << source << std::endl;
size_t duration_1 = std::chrono::duration_cast<
std::chrono::microseconds>(mid - start).count();
if (hash_entry.second) {
CUjit_option arg[5];
void *argv[5];
char error_log[8192], info_log[8192];
unsigned int logSize = 8192;
arg[0] = CU_JIT_INFO_LOG_BUFFER;
argv[0] = (void *) info_log;
arg[1] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
argv[1] = (void *) (long) logSize;
arg[2] = CU_JIT_ERROR_LOG_BUFFER;
argv[2] = (void *) error_log;
arg[3] = CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES;
argv[3] = (void *) (long) logSize;
arg[4] = CU_JIT_LOG_VERBOSE;
argv[4] = (void *) 1;
CUlinkState link_state;
cuda_check(cuLinkCreate(5, arg, argv, &link_state));
int rt = cuLinkAddData(link_state, CU_JIT_INPUT_PTX, (void *) source.c_str(),
source.length(), nullptr, 0, nullptr, nullptr);
if (rt != CUDA_SUCCESS) {
std::cerr << "PTX linker error:" << std::endl << error_log << std::endl;
exit(EXIT_FAILURE);
}
void *link_output = nullptr;
size_t link_output_size = 0;
cuda_check(cuLinkComplete(link_state, &link_output, &link_output_size));
if (rt != CUDA_SUCCESS) {
std::cerr << "PTX linker error:" << std::endl << error_log << std::endl;
exit(EXIT_FAILURE);
}
TimePoint end = std::chrono::high_resolution_clock::now();
size_t duration_2 = std::chrono::duration_cast<
std::chrono::microseconds>(end - mid).count();
if (ctx.log_level >= 2) {
char *ptax_details = strstr(info_log, "ptxas info");
char *details = strstr(info_log, "\ninfo : used");
if (details) {
details += 16;
char *details_len = strstr(details, "registers,");
if (details_len)
details_len[9] = '\0';
std::cerr << "cuda_jit_run(): "
<< ((ptax_details == nullptr) ? "cache hit, " : "cache miss, ")
<< "jit: " << time_string(duration_1)
<< ", ptx compilation: " << time_string(duration_2)
<< ", " << details << std::endl;
}
if (ctx.log_level >= 3)
std::cerr << "Detailed linker output:" << std::endl
<< info_log << std::endl;
}
CUresult ret = cuModuleLoadData(&module, link_output);
if (ret == CUDA_ERROR_OUT_OF_MEMORY) {
cuda_malloc_trim();
ret = cuModuleLoadData(&module, link_output);
}
cuda_check(ret);
// Locate the kernel entry point
cuda_check(cuModuleGetFunction(&kernel, module, (std::string("enoki_") + kernel_name).c_str()));
// Destroy the linker invocation
cuda_check(cuLinkDestroy(link_state));
} else {
if (ctx.log_level >= 2) {
std::cerr << "cuda_jit_run(): cache hit, jit: "
<< time_string(duration_1) << std::endl;
}
}
cudaStream_t cuda_stream = nullptr;
#if ENOKI_CUDA_USE_STREAMS == 1
cuda_stream = ctx.streams[stream_idx].stream;
#endif
size_t total_size = ptrs.size() * sizeof(void*);
void *host_args = cuda_host_malloc(total_size),
*device_args = cuda_malloc(total_size);
memcpy(host_args, ptrs.data(), total_size);
cuda_check(cudaMemcpyAsync(device_args, host_args, total_size,
cudaMemcpyHostToDevice, cuda_stream));
uint32_t thread_count = ctx.thread_count,
block_count = ctx.block_count;
if (size == 1)
thread_count = block_count = 1;
void *args[2] = { &device_args, &size };
cuda_check(cuLaunchKernel(kernel, block_count, 1, 1, thread_count,
1, 1, 0, cuda_stream, args, nullptr));
cuda_host_free(host_args, cuda_stream);
cuda_free(device_args, cuda_stream);
#if ENOKI_CUDA_LAUNCH_BLOCKING == 1
cuda_check(cudaStreamSynchronize(cuda_stream));
#endif
}
static void sweep_recursive(Context &ctx,
std::unordered_set<uint32_t> &visited,
std::vector<uint32_t> &sweep,
uint32_t idx) {
if (visited.find(idx) != visited.end())
return;
visited.insert(idx);
std::array<uint32_t, 3> deps = ctx[idx].dep;
auto prio = [&](uint32_t i) -> uint32_t {
uint32_t k = deps[i];
if (k >= ENOKI_CUDA_REG_RESERVED)
return ctx[k].subtree_size;
else
return 0;
};
if (prio(1) < prio(2))
std::swap(deps[1], deps[2]);
if (prio(0) < prio(2))
std::swap(deps[0], deps[2]);
if (prio(0) < prio(1))
std::swap(deps[0], deps[1]);
for (uint32_t k : deps) {
if (k >= ENOKI_CUDA_REG_RESERVED)
sweep_recursive(ctx, visited, sweep, k);
}
sweep.push_back(idx);
}
ENOKI_EXPORT void cuda_eval(bool log_assembly) {
Context &ctx = context();
for (auto callback: ctx.callbacks)
callback.first(callback.second);
std::map<size_t, std::pair<std::unordered_set<uint32_t>,
std::vector<uint32_t>>> sweeps;
for (uint32_t idx : ctx.live) {
auto &sweep = sweeps[ctx[idx].size];
sweep_recursive(ctx, std::get<0>(sweep), std::get<1>(sweep), idx);
}
for (uint32_t idx : ctx.dirty)
ctx[idx].dirty = false;
ctx.live.clear();
ctx.dirty.clear();
if (ctx.log_level >= 2 && sweeps.size() > 1)
std::cerr << "cuda_eval(): begin parallel group" << std::endl;
#if ENOKI_CUDA_USE_STREAMS == 1
if (ctx.streams.size() < sweeps.size()) {
size_t cur = ctx.streams.size();
ctx.streams.resize(sweeps.size());
for (size_t i = cur; i < ctx.streams.size(); ++i)
ctx.streams[i].init();
}
cuda_check(cudaEventRecord(ctx.stream_0_event, nullptr));
#endif
size_t stream_idx = 0;
for (auto it = sweeps.rbegin(); it != sweeps.rend(); ++it) {
size_t size = std::get<0>(*it);
const std::vector<uint32_t> &schedule = std::get<1>(std::get<1>(*it));
#if ENOKI_CUDA_USE_STREAMS == 1
Stream &stream = ctx.streams[stream_idx];
cuda_check(cudaStreamWaitEvent(stream.stream, ctx.stream_0_event, 0));
#endif
TimePoint start = std::chrono::high_resolution_clock::now();
auto result = cuda_jit_assemble(size, schedule, ctx.include_printf);
if (std::get<0>(result).empty())
continue;
TimePoint mid = std::chrono::high_resolution_clock::now();
cuda_jit_run(ctx,
std::move(std::get<0>(result)),
std::get<1>(result),
size, stream_idx, start, mid);
#if ENOKI_CUDA_USE_STREAMS == 1
cuda_check(cudaEventRecord(stream.event, stream.stream));
cuda_check(cudaStreamWaitEvent(nullptr, stream.event, 0));
#endif
stream_idx++;
}
ctx.include_printf = false;
if (ctx.log_level >= 2 && sweeps.size() > 1)
std::cerr << "cuda_eval(): end parallel group" << std::endl;
for (auto const &sweep : sweeps) {
const std::vector<uint32_t> &schedule =
std::get<1>(std::get<1>(sweep));
for (uint32_t idx : schedule) {
auto it = ctx.variables.find(idx);
if (it == ctx.variables.end())
continue;
Variable &v = it->second;
if (v.side_effect)
cuda_dec_ref_ext(idx);
if (v.data != nullptr && !v.cmd.empty()) {
for (int j = 0; j < 3; ++j) {
cuda_dec_ref_int(v.dep[j]);
v.dep[j] = 0;
}
cuda_dec_ref_ext(v.extra_dep);
v.extra_dep = 0;
}
}
}
}
ENOKI_EXPORT void cuda_eval_var(uint32_t index, bool log_assembly) {
Variable &var = context()[index];
if (var.data == nullptr || var.dirty)
cuda_eval(log_assembly);
assert(!var.dirty);
}
//! @}
// -----------------------------------------------------------------------
ENOKI_EXPORT void cuda_fetch_element(void *dst, uint32_t src, size_t offset, size_t size) {
Variable &var = context()[src];
if (var.data == nullptr || var.dirty)
cuda_eval();
if (var.dirty)
throw std::runtime_error("cuda_fetch_element(): element is still "
"marked as 'dirty' even after cuda_eval()!");
else if (var.data == nullptr)
throw std::runtime_error(
"cuda_fetch_element(): tried to read from invalid/uninitialized CUDA array!");
if (var.size == 1)
offset = 0;
cuda_check(cudaMemcpy(dst, (uint8_t *) var.data + size * offset,
size, cudaMemcpyDeviceToHost));
}
ENOKI_EXPORT void cuda_set_log_level(uint32_t level) {
#if defined(NDEBUG)
if (level >= 4)
throw std::runtime_error("cuda_set_log_level(): log levels >= 4 are only supported when Enoki is compiled in debug mode!");
#endif
context().log_level = level;
}
ENOKI_EXPORT uint32_t cuda_log_level() {
return context().log_level;
}
ENOKI_EXPORT void cuda_register_callback(void (*callback)(void *), void *payload) {
context().callbacks.emplace_back(callback, payload);
}
ENOKI_EXPORT void cuda_unregister_callback(void (*callback)(void *), void *payload) {
auto &cb = context().callbacks;
auto it = std::find(cb.begin(), cb.end(), std::make_pair(callback, payload));
if (it == cb.end())
throw std::runtime_error("cuda_unregister_callback(): entry not found!");
cb.erase(it);
}
ENOKI_EXPORT char *cuda_whos() {
std::ostringstream oss;
oss << std::endl
<< " ID Type E/I Refs Size Memory Ready Label" << std::endl
<< " =================================================================" << std::endl;
auto &ctx = context();
std::vector<uint32_t> indices;
indices.reserve(ctx.variables.size());
for (const auto& it : ctx.variables)
indices.push_back(it.first);
std::sort(indices.begin(), indices.end());
size_t mem_size_scheduled = 0,
mem_size_ready = 0,
mem_size_arith = 0;
for (uint32_t id : indices) {
if (id < ENOKI_CUDA_REG_RESERVED)
continue;
const Variable &v = ctx[id];
oss << " " << std::left << std::setw(9) << id << " ";
switch (v.type) {
case EnokiType::Int8: oss << "i8 "; break;
case EnokiType::UInt8: oss << "u8 "; break;
case EnokiType::Int16: oss << "i16"; break;
case EnokiType::UInt16: oss << "u16"; break;
case EnokiType::Int32: oss << "i32"; break;
case EnokiType::UInt32: oss << "u32"; break;
case EnokiType::Int64: oss << "i64"; break;
case EnokiType::UInt64: oss << "u64"; break;
case EnokiType::Float16: oss << "f16"; break;
case EnokiType::Float32: oss << "f32"; break;
case EnokiType::Float64: oss << "f64"; break;
case EnokiType::Bool: oss << "msk"; break;
case EnokiType::Pointer: oss << "ptr"; break;
default: throw std::runtime_error("Invalid array type!");
}
size_t mem_size = v.size * cuda_register_size(v.type);
oss << " ";
oss << std::left << std::setw(10) << (std::to_string(v.ref_count_ext) + " / " + std::to_string(v.ref_count_int)) << " ";
oss << std::left << std::setw(12) << v.size;
oss << std::left << std::setw(12) << mem_string(mem_size);
oss << (v.data ? "[x]" : "[ ]") << " ";
oss << v.label;
oss << std::endl;
if (v.data) {
mem_size_ready += mem_size;
} else {
if (v.ref_count_ext == 0)
mem_size_arith += mem_size;
else
mem_size_scheduled += mem_size;
}
}
oss << " =================================================================" << std::endl << std::endl
<< " Memory usage (ready) : " << mem_string(mem_size_ready) << std::endl
<< " Memory usage (scheduled) : " << mem_string(mem_size_ready) << " + "
<< mem_string(mem_size_scheduled) << " = " << mem_string(mem_size_ready + mem_size_scheduled) << std::endl
<< " Memory savings : " << mem_string(mem_size_arith) << std::endl << std::endl
<< " cuda_malloc() usage: "
<< mem_string(ctx.used) << " device, "
<< mem_string(ctx.used_managed) << " managed, "
<< mem_string(ctx.used_host) << " host." << std::endl
<< " max. usage: "
<< mem_string(ctx.watermark) << " device, "
<< mem_string(ctx.watermark_managed) << " managed, "
<< mem_string(ctx.watermark_host) << " host." << std::endl;
return strdup(oss.str().c_str());
}
ENOKI_EXPORT void cuda_malloc_trim() {
std::unordered_multimap<TaggedSize, void *, TaggedSizeHasher> free_map;
Context &ctx = context();
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
free_map.swap(ctx.free_map);
}
size_t freed_normal = 0, freed_managed = 0, freed_host = 0,
count = free_map.size();
for (auto kv : free_map) {
switch (kv.first.type) {
case Normal:
freed_normal += kv.first.size;
cuda_check(cudaFree(kv.second));
break;
case Managed:
freed_managed += kv.first.size;
cuda_check(cudaFree(kv.second));
break;
case Host:
freed_host += kv.first.size;
cuda_check(cudaFreeHost(kv.second));
break;
default:
throw std::runtime_error("cuda_malloc_trim(): internal error!");
}
}
if (ctx.log_level >= 4 && count > 0)
std::cerr << "cuda_malloc_trim(): freed " << count << " arrays ("
<< mem_string(freed_normal) << " device memory, "
<< mem_string(freed_managed) << " unified memory, and "
<< mem_string(freed_host) << " host memory)." << std::endl;
}
ENOKI_EXPORT void cuda_sync() {
Context &ctx = context();
if (ctx.log_level >= 4)
std::cerr << "cuda_sync()." << std::endl;
cuda_check(cudaDeviceSynchronize());
}
size_t malloc_round(size_t x) {
/* Round to next higher power of two */
x -= 1;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x |= x >> 32;
x += 1;
return x;
}
ENOKI_EXPORT void* cuda_malloc(size_t size) {
if (size == 0)
return nullptr;
size = malloc_round(size);
TaggedSize ts(Normal, size);
void *ptr = nullptr;
Context &ctx = context();
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto it = ctx.free_map.find(ts);
if (it != ctx.free_map.end()) {
ptr = it->second;
ctx.free_map.erase(it);
}
}
if (ptr == nullptr) {
cudaError_t ret = cudaMalloc(&ptr, size);
if (ret != cudaSuccess) {
cuda_sync();
cuda_malloc_trim();
cudaError_t ret = cudaMalloc(&ptr, size);
if (ret != cudaSuccess)
throw std::runtime_error("cuda_malloc(): out of memory!");
}
}
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto result = ctx.used_map.insert(std::make_pair(ptr, ts));
if (!result.second) {
fprintf(stderr, "cuda_malloc(): internal error!\n");
exit(EXIT_FAILURE);
}
ctx.used += size;
ctx.watermark = std::max(ctx.watermark, ctx.used);
}
return ptr;
}
ENOKI_EXPORT void* cuda_managed_malloc(size_t size) {
if (size == 0)
return nullptr;
size = malloc_round(size);
TaggedSize ts(Managed, size);
void *ptr = nullptr;
Context &ctx = context();
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto it = ctx.free_map.find(ts);
if (it != ctx.free_map.end()) {
ptr = it->second;
ctx.free_map.erase(it);
}
}
if (ptr == nullptr) {
cudaError_t ret = cudaMallocManaged(&ptr, size);
if (ret != cudaSuccess) {
cuda_sync();
cuda_malloc_trim();
cudaError_t ret = cudaMallocManaged(&ptr, size);
if (ret != cudaSuccess)
throw std::runtime_error("cuda_managed_malloc(): out of memory!");
}
}
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto result = ctx.used_map.insert(std::make_pair(ptr, ts));
if (!result.second) {
fprintf(stderr, "cuda_managed_malloc(): internal error!\n");
exit(EXIT_FAILURE);
}
ctx.used_managed += size;
ctx.watermark_managed = std::max(ctx.watermark_managed, ctx.used_managed);
}
return ptr;
}
ENOKI_EXPORT void* cuda_host_malloc(size_t size) {
if (size == 0)
return nullptr;
size = malloc_round(size);
TaggedSize ts(Host, size);
void *ptr = nullptr;
Context &ctx = context();
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto it = ctx.free_map.find(ts);
if (it != ctx.free_map.end()) {
ptr = it->second;
ctx.free_map.erase(it);
}
}
if (ptr == nullptr) {
cudaError_t ret = cudaMallocHost(&ptr, size);
if (ret != cudaSuccess) {
cuda_sync();
cuda_malloc_trim();
cudaError_t ret = cudaMallocHost(&ptr, size);
if (ret != cudaSuccess)
throw std::runtime_error("cuda_host_malloc(): out of memory!");
}
}
/* Critical section */ {
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto result = ctx.used_map.insert(std::make_pair(ptr, ts));
if (!result.second) {
fprintf(stderr, "cuda_host_malloc(): internal error!\n");
exit(EXIT_FAILURE);
}
ctx.used_host += size;
ctx.watermark_host = std::max(ctx.watermark_host, ctx.used_host);
}
return ptr;
}
ENOKI_EXPORT void cuda_free(void *ptr, cudaStream_t stream) {
if (ptr == nullptr)
return;
cudaStreamAddCallback(
stream, [](cudaStream_t stream, cudaError_t status, void *data) {
Context &ctx = context();
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto it = ctx.used_map.find(data);
if (it == ctx.used_map.end()) {
fprintf(stderr, "cuda_host_free(): unknown/unregistered pointer!\n");
exit(EXIT_FAILURE);
}
if (it->second.type == Normal) {
ctx.used -= it->second.size;
} else if (it->second.type == Managed) {
ctx.used_managed -= it->second.size;
} else {
fprintf(stderr, "cuda_host_free(): tried to free a host pointer!");
exit(EXIT_FAILURE);
}
ctx.free_map.insert(std::make_pair(it->second, data));
ctx.used_map.erase(it);
},
ptr, 0
);
}
ENOKI_EXPORT void cuda_free(void *ptr) {
cuda_free(ptr, nullptr);
}
ENOKI_EXPORT void cuda_host_free(void *ptr, cudaStream_t stream) {
if (ptr == nullptr)
return;
cudaStreamAddCallback(
stream, [](cudaStream_t stream, cudaError_t status, void *data) {
Context &ctx = context();
std::lock_guard<std::recursive_mutex> guard(ctx.malloc_mutex);
auto it = ctx.used_map.find(data);
if (it == ctx.used_map.end()) {
fprintf(stderr, "cuda_host_free(): unknown/unregistered pointer!\n");
exit(EXIT_FAILURE);
}
if (it->second.type != Host) {
fprintf(stderr, "cuda_host_free(): tried to free a device pointer!");
exit(EXIT_FAILURE);
}
ctx.free_map.insert(std::make_pair(it->second, data));
ctx.used_map.erase(it);
ctx.used_host -= it->second.size;
},
ptr, 0
);
}
ENOKI_EXPORT void cuda_host_free(void *ptr) {
cuda_host_free(ptr, nullptr);
}
NAMESPACE_END(enoki)
|
the_stack
|
#ifdef __CDT_PARSER__
#define __global__
#define __device__
#define __host__
#define __shared__
#endif
#define THREADS_PER_BLOCK 1024
/*****************************************************************************/
/* HELPER FUNCTIONS */
/*****************************************************************************/
/*
*
* atomicAdd for double
*
*/
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*****************************************************************************/
/*
*
* mixed precision axpy
*
*/
__global__ void __cuda_axpy(int nElements, float alpha, const float *x, double *y){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
y[index] += alpha * x[index];
}
void _cuda_axpy(int nElements, float alpha, const float *x, double *y)
{
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_axpy <<< gridSize , THREADS_PER_BLOCK >>> (nElements, alpha, x, y);
}
/*
*
* exp
*
*/
template<typename T>
__global__ void __cuda_exp(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = exp(data[index]);
}
template<typename T>
void _cuda_exp(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_exp <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_exp<float>(float *, unsigned int);
template __global__ void __cuda_exp<double>(double *, unsigned int);
template void _cuda_exp<float>(float *, unsigned int, unsigned int);
template void _cuda_exp<double>(double *, unsigned int, unsigned int);
/*
*
* signedPow
*
*/
template<typename T>
__global__ void __cuda_signedPow(T *data, unsigned int nElements, T p){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if(data[index] < 0)
data[index] = -pow(-data[index], p);
else
data[index] = pow(data[index], p);
}
}
template<typename T>
void _cuda_signedPow(T *data, unsigned int nRows, unsigned int nColumns, T p)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_signedPow <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements, p);
}
template __global__ void __cuda_signedPow<float>(float *, unsigned int, float);
template __global__ void __cuda_signedPow<double>(double *, unsigned int, double);
template void _cuda_signedPow<float>(float *, unsigned int, unsigned int, float);
template void _cuda_signedPow<double>(double *, unsigned int, unsigned int, double);
/*
*
* log
*
*/
template<typename T>
__global__ void __cuda_log(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = log(data[index]);
}
template<typename T>
void _cuda_log(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_log <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_log<float>(float *, unsigned int);
template __global__ void __cuda_log<double>(double *, unsigned int);
template void _cuda_log<float>(float *, unsigned int, unsigned int);
template void _cuda_log<double>(double *, unsigned int, unsigned int);
/*
*
* sin
*
*/
template<typename T>
__global__ void __cuda_sin(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = sin(data[index]);
}
template<typename T>
void _cuda_sin(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_sin <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_sin<float>(float *, unsigned int);
template __global__ void __cuda_sin<double>(double *, unsigned int);
template void _cuda_sin<float>(float *, unsigned int, unsigned int);
template void _cuda_sin<double>(double *, unsigned int, unsigned int);
/*
*
* cos
*
*/
template<typename T>
__global__ void __cuda_cos(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = cos(data[index]);
}
template<typename T>
void _cuda_cos(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_cos <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_cos<float>(float *, unsigned int);
template __global__ void __cuda_cos<double>(double *, unsigned int);
template void _cuda_cos<float>(float *, unsigned int, unsigned int);
template void _cuda_cos<double>(double *, unsigned int, unsigned int);
/*
*
* asin
*
*/
template<typename T>
__global__ void __cuda_asin(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = asin(data[index]);
}
template<typename T>
void _cuda_asin(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_asin <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_asin<float>(float *, unsigned int);
template __global__ void __cuda_asin<double>(double *, unsigned int);
template void _cuda_asin<float>(float *, unsigned int, unsigned int);
template void _cuda_asin<double>(double *, unsigned int, unsigned int);
/*
*
* acos
*
*/
template<typename T>
__global__ void __cuda_acos(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = acos(data[index]);
}
template<typename T>
void _cuda_acos(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_acos <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_acos<float>(float *, unsigned int);
template __global__ void __cuda_acos<double>(double *, unsigned int);
template void _cuda_acos<float>(float *, unsigned int, unsigned int);
template void _cuda_acos<double>(double *, unsigned int, unsigned int);
/*
*
* abs
*
*/
template<typename T>
__global__ void __cuda_abs(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if (data[index] < 0)
data[index] = -data[index];
}
}
template<typename T>
void _cuda_abs(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_abs <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_abs<float>(float *, unsigned int);
template __global__ void __cuda_abs<double>(double *, unsigned int);
template void _cuda_abs<float>(float *, unsigned int, unsigned int);
template void _cuda_abs<double>(double *, unsigned int, unsigned int);
/*
*
* tanh
*
*
*/
template<typename T>
__global__ void __cuda_tanh(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = tanh(data[index]);
}
template<typename T>
void _cuda_tanh(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_tanh <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template __global__ void __cuda_tanh<float>(float *, unsigned int);
template __global__ void __cuda_tanh<double>(double *, unsigned int);
template void _cuda_tanh<float>(float *, unsigned int, unsigned int);
template void _cuda_tanh<double>(double *, unsigned int, unsigned int);
/*
*
* sigmoid
*
*/
template<typename T>
__global__ void __cuda_sigmoid1(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = 1.0 / (1.0 + exp(-data[index]));
}
template<typename T>
__global__ void __cuda_sigmoid(T gamma, T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = 1.0 / (1.0 + exp(-gamma * data[index]));
}
template<typename T>
void _cuda_sigmoid(T gamma, T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
if (gamma == 1.0)
__cuda_sigmoid1 <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
else
__cuda_sigmoid <<< gridSize , THREADS_PER_BLOCK >>> (gamma, data, nElements);
}
template void _cuda_sigmoid<double>(double gamma, double *data, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_sigmoid<double>(double gamma, double *data, unsigned int nElements);
template __global__ void __cuda_sigmoid1<double>(double *data, unsigned int nElements);
template void _cuda_sigmoid<float>(float gamma, float *data, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_sigmoid<float>(float gamma, float *data, unsigned int nElements);
template __global__ void __cuda_sigmoid1<float>(float *data, unsigned int nElements);
/*
*
* triangle
*
*/
template<typename T>
__global__ void __cuda_triangle(T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if ((data[index] < -1.0) || (data[index] > 1.0))
data[index] = 0.0;
else if (data[index] < 0.0)
data[index] = 1.0 + data[index];
else
data[index] = 1.0 - data[index];
}
}
template<typename T>
void _cuda_triangle(T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_triangle <<< gridSize , THREADS_PER_BLOCK >>> (data, nElements);
}
template void _cuda_triangle<double>(double *data, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_triangle<double>(double *data, unsigned int nElements);
template void _cuda_triangle<float>(float *data, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_triangle<float>(float *data, unsigned int nElements);
/*
*
* sum
*
*/
template<typename T>
__global__ void __cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result){
*result = 0;
for (int i = 0; i < nRows * nColumns; i++){
*result += data[i];
}
}
template<typename T>
void _cuda_sum(T *data, unsigned int nRows, unsigned int nColumns, T *result)
{
// no parallelization, but probably not relevant
__cuda_sum <<< 1,1>>> (data, nRows, nColumns, result);
}
template __global__ void __cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result);
template void _cuda_sum<double>(double *data, unsigned int nRows, unsigned int nColumns, double *result);
template __global__ void __cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result);
template void _cuda_sum<float>(float *data, unsigned int nRows, unsigned int nColumns, float *result);
/*
*
* columnwiseSquaredEuclideanDistance
*
*/
template<typename T>
__global__ void __cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nRows * nColumns) {
T d = A[index] - v[index % nRows];
d = d*d;
atomicAdd(&(result[index / nRows]), d);
}
}
template<typename T>
void _cuda_columnwiseSquaredEuclideanDistance(const T *A, unsigned int nRows, unsigned int nColumns, const T *v, T *result)
{
int gridSize = (int)ceil( (float) (nRows * nColumns)/THREADS_PER_BLOCK);
__cuda_columnwiseSquaredEuclideanDistance <<< gridSize , THREADS_PER_BLOCK >>> (A, nRows, nColumns, v, result);
}
template void _cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result);
template __global__ void __cuda_columnwiseSquaredEuclideanDistance<double>(const double *A, unsigned int nRows, unsigned int nColumns, const double *v, double *result);
template void _cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result);
template __global__ void __cuda_columnwiseSquaredEuclideanDistance<float>(const float *A, unsigned int nRows, unsigned int nColumns, const float *v, float *result);
/*
*
* clone
*
*/
template<typename T>
__global__ void __cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nRowsB * nColumnsB) {
unsigned int nRowsA = nRowsB / nClones;
unsigned int rowA = (index % nRowsA);
unsigned int colA = index / nRowsB;
dataB[index] = dataA[colA * nRowsA + rowA];
}
}
template<typename T>
void _cuda_clone(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones)
{
int nElementsB = nRowsB * nColumnsB;
int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK);
__cuda_clone <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nRowsB, nColumnsB, nClones);
}
template void _cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
template __global__ void __cuda_clone<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
template void _cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
template __global__ void __cuda_clone<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
/*
*
* cloneElementwise
*
*/
template<typename T>
__global__ void __cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int nClones){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElementsB) {
unsigned int indexA = index / nClones;
dataB[index] = dataA[indexA];
}
}
template<typename T>
void _cuda_cloneElementwise(const T *dataA, T *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones)
{
int nElementsB = nRowsB * nColumnsB;
int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK);
__cuda_cloneElementwise <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, nClones);
}
template void _cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
template __global__ void __cuda_cloneElementwise<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int nClones);
template void _cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nRowsB, unsigned int nColumnsB, unsigned int nClones);
template __global__ void __cuda_cloneElementwise<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int nClones);
/*
*
* addElementsByModuloIndex
*
*/
template<typename T>
__global__ void __cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nRowsB * nColumns) {
unsigned int rowB = index % nRowsB;
unsigned int column = index / nRowsB;
for (unsigned int j = 0; j < nRowsA / nRowsB; j++) {
dataB[index] += dataA[column * nRowsA + (rowB + j * nRowsB)];
}
}
}
template<typename T>
void _cuda_addElementsByModuloIndex(const T *dataA, T *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns)
{
unsigned int nElementsB = nRowsB * nColumns;
int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK);
__cuda_addElementsByModuloIndex <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nRowsA, nRowsB, nColumns);
}
template void _cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns);
template __global__ void __cuda_addElementsByModuloIndex<double>(const double *dataA, double *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns);
template void _cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns);
template __global__ void __cuda_addElementsByModuloIndex<float>(const float *dataA, float *dataB, unsigned int nRowsA, unsigned int nRowsB, unsigned int nColumns);
/*
*
* chiSquareFeatureMap
*
*/
template<typename T>
__global__ void __cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElementsB) {
unsigned int j = index % (2*n + 1);
unsigned int baseIndex = index / (2*n + 1);
T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min);
if (j == 0) {
dataB[index] = sqrt(samplingDistance * x);
}
else if (j % 2 == 1) {
T kappa = 1.0 / cosh(CUDART_PI * (j+1)/2 * samplingDistance);
dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x));
}
else {
T kappa = 1.0 / cosh(CUDART_PI * j/2 * samplingDistance);
dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x));
}
}
}
template<typename T>
void _cuda_chiSquareFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min)
{
int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK);
__cuda_chiSquareFeatureMap <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, n, samplingDistance, min);
}
template void _cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min);
template __global__ void __cuda_chiSquareFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min);
template void _cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min);
template __global__ void __cuda_chiSquareFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min);
/*
*
* histogramIntersectionFeatureMap
*
*/
template<typename T>
__global__ void __cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElementsB) {
unsigned int j = index % (2*n + 1);
unsigned int baseIndex = index / (2*n + 1);
T x = (dataA[baseIndex] > min ? dataA[baseIndex] : min);
if (j == 0) {
dataB[index] = sqrt(2 / CUDART_PI * samplingDistance * x);
}
else if (j % 2 == 1) {
T kappa = 2.0 / (CUDART_PI * (1 + 4 * (j+1)/2 * samplingDistance * (j+1)/2 * samplingDistance));
dataB[index] = sqrt(2 * kappa * samplingDistance * x) * cos((j+1)/2 * samplingDistance * log(x));
}
else {
T kappa = 2.0 / (CUDART_PI * (1 + 4 * j/2 * samplingDistance * j/2 * samplingDistance));
dataB[index] = sqrt(2 * kappa * samplingDistance * x) * sin(j/2 * samplingDistance * log(x));
}
}
}
template<typename T>
void _cuda_histogramIntersectionFeatureMap(const T *dataA, T *dataB, unsigned int nElementsB, unsigned int n, T samplingDistance, T min)
{
int gridSize = (int)ceil( (float) nElementsB/THREADS_PER_BLOCK);
__cuda_histogramIntersectionFeatureMap <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElementsB, n, samplingDistance, min);
}
template void _cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min);
template __global__ void __cuda_histogramIntersectionFeatureMap<double>(const double *dataA, double *dataB, unsigned int nElementsB, unsigned int n, double samplingDistance, double min);
template void _cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min);
template __global__ void __cuda_histogramIntersectionFeatureMap<float>(const float *dataA, float *dataB, unsigned int nElementsB, unsigned int n, float samplingDistance, float min);
/*
*
* elementwiseMultiplicationWithChiSquareFeatureMapDerivative
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
unsigned int j = index % (2 * n + 1);
if (j == 0) {
dataB[index] *= dataA[index];
}
else if (j % 2 == 1) {
dataB[index] *= dataA[index] - (j+1) * samplingDistance * dataA[index + 1];
}
else {
dataB[index] *= dataA[index] + j * samplingDistance * dataA[index - 1];
}
dataB[index] *= samplingDistance * kappa0 / (2.0 * dataA[index - j] * dataA[index - j]);
}
}
template<typename T>
void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative(const T *dataA, T *dataB, unsigned int nElements, unsigned int n, T samplingDistance, T kappa0)
{
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, nElements, n, samplingDistance, kappa0);
}
template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0);
template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<double>(const double *dataA, double *dataB, unsigned int nElements, unsigned int n, double samplingDistance, double kappa0);
template void _cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0);
template __global__ void __cuda_elementwiseMultiplicationWithApproximateFeatureMapDerivative<float>(const float *dataA, float *dataB, unsigned int nElements, unsigned int n, float samplingDistance, float kappa0);
/*
*
* addSummedRows
*
*/
template<typename T>
__global__ void __cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (columnIndex < nColumns){
float result = 0.0;
for (unsigned int i = 0; i < nRows; i++){
// result += matrix(i,columnIndex)
result += matrixDevPtr[columnIndex * nRows + i];
}
vectorDevPtr[columnIndex] += scale * result;
}
}
template<typename T>
void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
// parallelize over columns
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_addSummedRows <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale);
}
template __global__ void __cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template void _cuda_addSummedRows(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template __global__ void __cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
template void _cuda_addSummedRows(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
/*
* slightly faster version using tmp array
*
*/
template<typename T>
__global__ void __cuda_summedRowsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
T *tmpDevPtr, unsigned int tmpRows){
unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int columnPart = blockIdx.y;
if (columnIndex < nColumns){
unsigned int nRowsDiv = nRows / tmpRows;
unsigned int startRow = columnPart * nRowsDiv;
if (startRow < nRows){
unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv;
T result = 0.0;
for (unsigned int i = startRow; i < endRow; i++){
// result += matrix(i, columnIndex)
result += matrixDevPtr[columnIndex * nRows + i];
}
tmpDevPtr[columnIndex*tmpRows + columnPart] = result;
}
}
}
template<typename T>
void _cuda_addSummedRows(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
T *tmpDevPtr, unsigned int tmpRows, const T scale){
int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK);
int gridDimy = tmpRows;
dim3 gridSize(gridDimx,gridDimy);
__cuda_summedRowsTmp <<< gridSize , THREADS_PER_BLOCK >>> (matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows);
_cuda_addSummedRows<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns, scale);
}
template __global__ void __cuda_summedRowsTmp<double>(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
double *tmpDevPtr, unsigned int tmpRows);
template void _cuda_addSummedRows<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
double *tmpDevPtr, unsigned int tmpRows, const double scale);
template __global__ void __cuda_summedRowsTmp<float>(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
float *tmpDevPtr, unsigned int tmpRows);
template void _cuda_addSummedRows<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
float *tmpDevPtr, unsigned int tmpRows, const float scale);
/*
*
* addSummedColumns
*
*/
template<typename T>
__global__ void __cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (rowIndex < nRows){
T result = 0.0;
for (unsigned int i = 0; i < nColumns; i++){
// result += matrix(rowIndex,i)
result += matrixDevPtr[i * nRows + rowIndex];
}
vectorDevPtr[rowIndex] += scale * result;
}
}
template<typename T>
void _cuda_addSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
// parallelize over rows
int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK);
__cuda_addSummedColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale);
}
template __global__ void __cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template void _cuda_addSummedColumns<double>(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template __global__ void __cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
template void _cuda_addSummedColumns<float>(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
/*
* addSummedColumnsChannelWise
*
*
*/
template<typename T>
__global__ void __cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale)
{
unsigned int channelIndex = threadIdx.x + blockIdx.x * blockDim.x;
if(channelIndex < channels) {
unsigned int channelSize = nRows / channels;
for(unsigned int i=0; i < channelSize; i++) {
for(unsigned int j=0; j < nColumns; j++) {
vector[channelIndex] += scale * matrix[j * nRows + channelIndex * channelSize + i];
}
}
}
}
template<typename T>
void _cuda_addSummedColumnsChannelWise(T *vector, const T* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const T scale)
{
int gridSize = (int)ceil( (float) channels/THREADS_PER_BLOCK);
__cuda_addSummedColumnsChannelWise<<<gridSize, THREADS_PER_BLOCK>>>(vector, matrix, channels, nRows, nColumns, scale);
}
template __global__ void __cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale);
template __global__ void __cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale);
template void _cuda_addSummedColumnsChannelWise(double *vector, const double* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const double scale);
template void _cuda_addSummedColumnsChannelWise(float *vector, const float* matrix, const unsigned int channels, const unsigned int nRows, const unsigned int nColumns, const float scale);
/*
*
* addSquaredSummedColumns
*
*/
template<typename T>
__global__ void __cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
unsigned int rowIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (rowIndex < nRows){
T result = 0.0;
for (unsigned int i = 0; i < nColumns; i++){
result += matrixDevPtr[i * nRows + rowIndex] * matrixDevPtr[i * nRows + rowIndex];
}
vectorDevPtr[rowIndex] += scale * result;
}
}
template<typename T>
void _cuda_addSquaredSummedColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const T scale){
// parallelize over rows
int gridSize = (int)ceil( (float) nRows/THREADS_PER_BLOCK);
__cuda_addSquaredSummedColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns, scale);
}
template __global__ void __cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template void _cuda_addSquaredSummedColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const double scale);
template __global__ void __cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
template void _cuda_addSquaredSummedColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns, const float scale);
/*
*
* addSummedNeighborsInARow
*
*/
template<typename T>
__global__ void __cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int elementsA, unsigned int nNeighbors){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < elementsA){
for (unsigned int n = 0; n < nNeighbors; n++){
dataA[index] += dataB[index * nNeighbors + n];
}
}
}
template<typename T>
void _cuda_addSummedNeighborsInARow(T* dataA, const T* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors){
// parallelize over rows
int gridSize = (int)ceil( (float) rowsA*columnsA/THREADS_PER_BLOCK);
__cuda_addSummedNeighborsInARow <<< gridSize , THREADS_PER_BLOCK >>> (dataA, dataB, rowsA * columnsA, nNeighbors);
}
template __global__ void __cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int elementsA, unsigned int nNeighbors);
template void _cuda_addSummedNeighborsInARow(double* dataA, const double* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors);
template __global__ void __cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int elementsA, unsigned int nNeighbors);
template void _cuda_addSummedNeighborsInARow(float* dataA, const float* dataB, unsigned int rowsA, unsigned int columnsA, unsigned int nNeighbors);
/*
*
* addWeighted
*
*/
template<typename T>
__global__ void __cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nRows * nColumns) {
unsigned int col = index / nRows;
data[index] += X[index] * weights[col];
}
}
template<typename T>
void _cuda_addWeighted(T *data, const T *X, const T* weights, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_addWeighted <<< gridSize , THREADS_PER_BLOCK >>> (data, X, weights, nRows, nColumns);
}
template __global__ void __cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns);
template void _cuda_addWeighted<double>(double *data, const double *X, const double* weights, unsigned int nRows, unsigned int nColumns);
template void _cuda_addWeighted<float>(float *data, const float *X, const float* weights, unsigned int nRows, unsigned int nColumns);
/*
*
* elementwise multiplication
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = data[index] * datab[index];
}
template<typename T>
void _cuda_elementwiseMultiplication(T *data, T *datab, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplication <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nElements);
template __global__ void __cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplication<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns);
template void _cuda_elementwiseMultiplication<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns);
/*
*
* elementwise division
*
*/
template<typename T>
__global__ void __cuda_elementwiseDivision(T *data, T *datab, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = data[index] / datab[index];
}
template<typename T>
void _cuda_elementwiseDivision(T *data, T *datab, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseDivision <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nElements);
template __global__ void __cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nElements);
template void _cuda_elementwiseDivision<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns);
template void _cuda_elementwiseDivision<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns);
/*
*
* rprop Weight Update
*
*/
template<typename T>
__global__ void __cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
T change = oldGradients[index] * newGradients[index];
if (change > 0) {
updateValues[index] = updateValues[index] * increasingFactor;
if (updateValues[index] > maxUpdateValue)
updateValues[index] = maxUpdateValue;
} else if (change < 0) {
updateValues[index] = updateValues[index] * decreasingFactor;
if (updateValues[index] < minUpdateValue)
updateValues[index] = minUpdateValue;
}
if (newGradients[index] > 0)
currentValues[index] = currentValues[index] - updateValues[index];
else if (newGradients[index] < 0)
currentValues[index] = currentValues[index] + updateValues[index];
oldGradients[index] = newGradients[index];
}
}
template<typename T>
void _cuda_rpropUpdate(T *currentValues, T *newGradients, T *oldGradients, T *updateValues, T increasingFactor, T decreasingFactor, T maxUpdateValue, T minUpdateValue, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_rpropUpdate <<< gridSize , THREADS_PER_BLOCK >>> (currentValues, newGradients, oldGradients, updateValues, increasingFactor, decreasingFactor, maxUpdateValue, minUpdateValue, nElements);
}
template __global__ void __cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nElements);
template __global__ void __cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nElements);
template void _cuda_rpropUpdate<double>(double *currentValues, double *newGradients, double *oldGradients, double *updateValues, double increasingFactor, double decreasingFactor, double maxUpdateValue, double minUpdateValue, unsigned int nRows, unsigned int nColumns);
template void _cuda_rpropUpdate<float>(float *currentValues, float *newGradients, float *oldGradients, float *updateValues, float increasingFactor, float decreasingFactor, float maxUpdateValue, float minUpdateValue, unsigned int nRows, unsigned int nColumns);
/*
*
* add constant elementwise
*
*/
template<typename T>
__global__ void __cuda_addConstantElementwise(T constant, T *data, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = data[index] + constant;
}
template<typename T>
void _cuda_addConstantElementwise(T constant, T *data, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (T) nElements/THREADS_PER_BLOCK);
__cuda_addConstantElementwise <<< gridSize , THREADS_PER_BLOCK >>> (constant, data, nElements);
}
template __global__ void __cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nElements);
template void _cuda_addConstantElementwise<double>(double constant, double *data, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nElements);
template void _cuda_addConstantElementwise<float>(float constant, float *data, unsigned int nRows, unsigned int nColumns);
/*
*
* getMaxOfColumns
*
*/
template<typename T>
__global__ void __cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (columnIndex < nColumns){
T result = matrixDevPtr[columnIndex * nRows];
for (unsigned int i = 1; i < nRows; i++){
T val = matrixDevPtr[columnIndex * nRows + i];
result = fmax(result, val);
}
vectorDevPtr[columnIndex] = result;
}
}
template<typename T>
void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
// parallelize over columns
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_getMaxOfColumns <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nColumns);
}
template __global__ void __cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
/*
* slightly faster version using tmp array
*/
template<typename T>
__global__ void __cuda_getMaxOfColumnsTmp(const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
T *tmpDevPtr, unsigned int tmpRows){
unsigned int columnIndex = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int columnPart = blockIdx.y;
if (columnIndex < nColumns){
unsigned int nRowsDiv = nRows / tmpRows;
unsigned int startRow = columnPart * nRowsDiv;
if (startRow < nRows){
unsigned int endRow = columnPart == tmpRows - 1 ? nRows : (columnPart + 1) * nRowsDiv;
T result = matrixDevPtr[columnIndex * nRows];
for (unsigned int i = startRow; i < endRow; i++){
// result += matrix(i, columnIndex)
T val = matrixDevPtr[columnIndex * nRows + i];
result = fmax(result, val);
}
tmpDevPtr[columnIndex*tmpRows + columnPart] = result;
}
}
}
template<typename T>
void _cuda_getMaxOfColumns(T *vectorDevPtr, const T *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
T *tmpDevPtr, unsigned int tmpRows){
int gridDimx = (int)ceil( (float) nColumns / THREADS_PER_BLOCK);
int gridDimy = tmpRows;
dim3 gridSize(gridDimx,gridDimy);
__cuda_getMaxOfColumnsTmp <<< gridSize , THREADS_PER_BLOCK >>> (matrixDevPtr, nRows, nColumns, tmpDevPtr, tmpRows);
_cuda_getMaxOfColumns<T>(vectorDevPtr, tmpDevPtr, tmpRows, nColumns);
}
template __global__ void __cuda_getMaxOfColumnsTmp(const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
double *tmpDevPtr, unsigned int tmpRows);
template void _cuda_getMaxOfColumns(double *vectorDevPtr, const double *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
double *tmpDevPtr, unsigned int tmpRows);
template __global__ void __cuda_getMaxOfColumnsTmp(const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
float *tmpDevPtr, unsigned int tmpRows);
template void _cuda_getMaxOfColumns(float *vectorDevPtr, const float *matrixDevPtr, unsigned int nRows, unsigned int nColumns,
float *tmpDevPtr, unsigned int tmpRows);
/*
*
* elementwiseMultiplicationWithSigmoidDerivative
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = data[index] * (datab[index] * (1 - datab[index]));
}
template<typename T>
void _cuda_elementwiseMultiplicationWithSigmoidDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithSigmoidDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithSigmoidDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns);
/*
*
* elementwiseMultiplicationWithTriangleDerivative
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if ((datab[index] < -1.0) || (datab[index] > 1.0) || (datab[index] == 0))
data[index] = 0;
else if (datab[index] > 0.0)
data[index] = -data[index];
}
}
template<typename T>
void _cuda_elementwiseMultiplicationWithTriangleDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithTriangleDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithTriangleDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithTriangleDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns);
/*
*
* elementwiseMultiplicationWithTanhDerivative
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = data[index] * (1 - pow(datab[index],2));
}
template<typename T>
void _cuda_elementwiseMultiplicationWithTanhDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithTanhDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithTanhDerivative(double *data, double *datab, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithTanhDerivative(float *data, float *datab, unsigned int nRows, unsigned int nColumns);
/*
*
* multiplicationWithSoftmaxDerivative
*
*/
template<typename T>
__global__ void __cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nElements, unsigned int nRows){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = datab[index] * (data[index] - datac[index/nRows]);
}
template<typename T>
void _cuda_multiplicationWithSoftmaxDerivative(T *data, T *datab, T *datac, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_multiplicationWithSoftmaxDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, datac, nElements, nRows);
}
template __global__ void __cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nElements, unsigned int nRows);
template void _cuda_multiplicationWithSoftmaxDerivative(double *data, double *datab, double *datac, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nElements, unsigned int nRows);
template void _cuda_multiplicationWithSoftmaxDerivative(float *data, float *datab, float *datac, unsigned int nRows, unsigned int nColumns);
/*
* elementwiseMultiplicationWithClippedDerivative
*
*/
template <typename T>
__global__ void __cuda_elementwiseMultiplicationWithClippedDerivative(T *errOut, T *activations, unsigned int nElements, T thresholdLeft, T thresholdRight){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if ((activations[index] <= thresholdLeft) || (activations[index] >= thresholdRight)) errOut[index] = 0;
}
}
template <typename T>
void _cuda_elementwiseMultiplicationWithClippedDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T thresholdLeft, T thresholdRight) {
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithClippedDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements, thresholdLeft, thresholdRight);
}
template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, float, float);
template __global__ void __cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, double, double);
template void _cuda_elementwiseMultiplicationWithClippedDerivative<float>(float*, float*, unsigned int, unsigned int, float, float);
template void _cuda_elementwiseMultiplicationWithClippedDerivative<double>(double*, double*, unsigned int, unsigned int, double, double);
/*
* elementwiseMultiplicationWithSignedPowDerivative
*
*/
template <typename T>
__global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative(T *errOut, T *activations, unsigned int nElements, T p){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if (activations[index] == 0)
errOut[index] = 0;
else if (activations[index] < 0)
errOut[index] *= p * pow(-activations[index], p - 1);
else
errOut[index] *= p * pow(activations[index], p - 1);
}
}
template <typename T>
void _cuda_elementwiseMultiplicationWithSignedPowDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T p) {
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithSignedPowDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements, p);
}
template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, float);
template __global__ void __cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, double);
template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<float>(float*, float*, unsigned int, unsigned int, float);
template void _cuda_elementwiseMultiplicationWithSignedPowDerivative<double>(double*, double*, unsigned int, unsigned int, double);
/*
* elementwiseMultiplicationWithLogDerivative
*
*/
template <typename T>
__global__ void __cuda_elementwiseMultiplicationWithLogDerivative(T *errOut, T *activations, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
errOut[index] *= exp(-activations[index]);
}
template <typename T>
void _cuda_elementwiseMultiplicationWithLogDerivative(T *data, T *datab, unsigned int nRows, unsigned int nColumns) {
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithLogDerivative<T> <<<gridSize, THREADS_PER_BLOCK>>> (data, datab, nElements);
}
template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int);
template __global__ void __cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int);
template void _cuda_elementwiseMultiplicationWithLogDerivative<float>(float*, float*, unsigned int, unsigned int);
template void _cuda_elementwiseMultiplicationWithLogDerivative<double>(double*, double*, unsigned int, unsigned int);
/*
*
* multiplicationWithL2NormalizationDerivative
*
*/
template<typename T>
__global__ void __cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nElements, unsigned int nRows){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = (data[index] - datab[index] * datac[index/nRows]) / datad[index/nRows];
}
template<typename T>
void _cuda_multiplicationWithL2NormalizationDerivative(T *data, T *datab, T *datac, T *datad, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_multiplicationWithL2NormalizationDerivative <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, datac, datad, nElements, nRows);
}
template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nElements, unsigned int nRows);
template void _cuda_multiplicationWithL2NormalizationDerivative(double *data, double *datab, double *datac, double *datad, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nElements, unsigned int nRows);
template void _cuda_multiplicationWithL2NormalizationDerivative(float *data, float *datab, float *datac, float *datad, unsigned int nRows, unsigned int nColumns);
/*
*
* addToAllColumns
*
*/
template<typename T>
__global__ void __cuda_addToAllColumns(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] += alpha * datab[index%nRows];
}
template<typename T>
void _cuda_addToAllColumns(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha)
{
// TODO implement kernel without % operator (slow on GPU)
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_addToAllColumns <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements, nRows, alpha);
}
template __global__ void __cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha);
template void _cuda_addToAllColumns<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha);
template __global__ void __cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha);
template void _cuda_addToAllColumns<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha);
/*
* addToAllChannels
* Adds one element of vector to one channel
*/
template<typename T>
__global__ void __cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, T alpha)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nElements) {
unsigned int channelSize = nRows / channels;
mat[index] += alpha * vec[(index%nRows)/channelSize];
}
}
template<typename T>
void _cuda_addToAllChannels(T *mat, T *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, T alpha)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_addToAllChannels<<<gridSize, THREADS_PER_BLOCK>>>(mat, vec, channels, nRows, nElements, alpha);
}
template __global__ void __cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, double alpha);
template __global__ void __cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nElements, float alpha);
template void _cuda_addToAllChannels(double *mat, double *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, double alpha);
template void _cuda_addToAllChannels(float *mat, float *vec, unsigned int channels, unsigned int nRows, unsigned int nColumns, float alpha);
/*
*
* addToAllRows
*
*/
template<typename T>
__global__ void __cuda_addToAllRows(T *data, T *datab, unsigned int nElements, unsigned int nRows, T alpha){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] += alpha * datab[index/nRows];
}
template<typename T>
void _cuda_addToAllRows(T *data, T *datab, unsigned int nRows, unsigned int nColumns, T alpha)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_addToAllRows <<< gridSize , THREADS_PER_BLOCK >>> (data, datab, nElements, nRows, alpha);
}
template __global__ void __cuda_addToAllRows<double>(double *data, double *datab, unsigned int nElements, unsigned int nRows, double alpha);
template void _cuda_addToAllRows<double>(double *data, double *datab, unsigned int nRows, unsigned int nColumns, double alpha);
template __global__ void __cuda_addToAllRows<float>(float *data, float *datab, unsigned int nElements, unsigned int nRows, float alpha);
template void _cuda_addToAllRows<float>(float *data, float *datab, unsigned int nRows, unsigned int nColumns, float alpha);
/*
*
* multiplyColumnsByScalars
*
*/
template<typename T>
__global__ void __cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int colIndex = index / nRows;
if (index < nElements)
matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[colIndex];
}
template<typename T>
void _cuda_multiplyColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
// TODO parallelization without mod operator (mod is slow on GPU)
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_multiplyColumnsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements);
}
template __global__ void __cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements);
template void _cuda_multiplyColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements);
template void _cuda_multiplyColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
/*
*
* divideColumnsByScalars
*
*/
template<typename T>
__global__ void __cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int colIndex = index / nRows;
if (index < nElements)
matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[colIndex];
}
template<typename T>
void _cuda_divideColumnsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
// TODO parallelization without mod operator (mod is slow on GPU)
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_divideColumnsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements);
}
template __global__ void __cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nElements);
template void _cuda_divideColumnsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements);
template void _cuda_divideColumnsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
/*
*
* multiplyRowsByScalars
*
*/
template<typename T>
__global__ void __cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int rowIndex = index % nRows;
if (index < nElements)
matrixDevPtr[index] = matrixDevPtr[index] * vectorDevPtr[rowIndex];
}
template<typename T>
void _cuda_multiplyRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
// TODO parallelization without mod operator (mod is slow on GPU)
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_multiplyRowsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements);
}
template __global__ void __cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements);
template void _cuda_multiplyRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nElements);
template void _cuda_multiplyRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
/*
*
* divideRowsByScalars
*
*/
template<typename T>
__global__ void __cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int rowIndex = index % nRows;
if (index < nElements)
matrixDevPtr[index] = matrixDevPtr[index] / vectorDevPtr[rowIndex];
}
template<typename T>
void _cuda_divideRowsByScalars(const T *vectorDevPtr, T *matrixDevPtr, unsigned int nRows, unsigned int nColumns){
// TODO parallelization without mod operator (mod is slow on GPU)
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_divideRowsByScalars <<< gridSize , THREADS_PER_BLOCK >>> (vectorDevPtr, matrixDevPtr, nRows, nElements);
}
template __global__ void __cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows,unsigned int nElements);
template void _cuda_divideRowsByScalars<double>(const double *vectorDevPtr, double *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows,unsigned int nElements);
template void _cuda_divideRowsByScalars<float>(const float *vectorDevPtr, float *matrixDevPtr, unsigned int nRows, unsigned int nColumns);
/*
*
* fill
*
*/
template<typename T>
__global__ void __cuda_fill(T *data, T value, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements)
data[index] = value;
}
template<typename T>
void _cuda_fill(T *data, T value, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_fill <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements);
}
template __global__ void __cuda_fill<double>(double *data, double value, unsigned int nElements);
template void _cuda_fill<double>(double *data, double value, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_fill<float>(float *data, float value, unsigned int nElements);
template void _cuda_fill<float>(float *data, float value, unsigned int nRows, unsigned int nColumns);
/*
*
* Average Pooling
*
*/
template<typename T>
__global__ void __cuda_avgPool(const T *source, T *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int resultWidth = ceil((float)sourceWidth/stride);
unsigned int resultHeight = ceil((float)sourceHeight/stride);
unsigned int resultRows = resultWidth * resultHeight * sourceChannels;
if(index < (resultRows * sourceColumns))
{
int imageNum = index / resultRows;
int resultPixelNum = index % resultRows;
int channelNum = resultPixelNum / (resultWidth * resultHeight);
resultPixelNum %= (resultWidth * resultHeight);
int resultPixelX = resultPixelNum / resultHeight;
int resultPixelY = resultPixelNum % resultHeight;
int sourcePixelX = resultPixelX * stride;
int sourcePixelY = resultPixelY * stride;
T sum = 0;
T num = 0;
int index = -1;
for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++)
{
for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++)
{
index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) +
i * sourceHeight + j;
sum += source[index];
num += 1;
}
}
int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) +
resultPixelX * resultHeight + resultPixelY;
result[resultIndex] = sum / (poolSize * poolSize);//num;
}
}
template<typename T>
void _cuda_avgPool(const T *source, T *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride)
{
unsigned int nElements = (int)ceil((float)sourceWidth/stride) *
(int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns;
int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK);
__cuda_avgPool<<<gridSize, THREADS_PER_BLOCK>>>(source, result, sourceRows,
sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min());
}
template __global__ void __cuda_avgPool(const double *source, double *result,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride, double minValue);
template __global__ void __cuda_avgPool(const float *source, float *result,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride, float minValue);
template void _cuda_avgPool(const double *source, double *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride);
template void _cuda_avgPool(const float *source, float *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride);
/*
*
* Avg Pooling Backpropogation
*
*/
template<typename T>
__global__ void __cuda_backPropogateAvgPool(T *result, const T *errorSignal, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int errorSignalWidth = ceil((double)sourceWidth/stride);
unsigned int errorSignalHeight = ceil((double)sourceHeight/stride);
unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels;
if(index < (sourceRows * sourceColumns))
{
int imageNum = index / sourceRows;
int imagePixelNum = index % sourceRows;
int channel = imagePixelNum / (sourceWidth * sourceHeight);
imagePixelNum %= (sourceWidth * sourceHeight);
int pixelX = imagePixelNum / sourceHeight;
int pixelY = imagePixelNum % sourceHeight;
int indexInErrorSignal = -1;
//calculates start of the first grid containing current Pixel
unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 :
(unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride);
unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 :
(unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride);
//////////////////////////////////
for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride)
{
for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride)
{
indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth +
(gridX/stride) * errorSignalHeight + (gridY/stride);
result[index] += errorSignal[indexInErrorSignal] / (T)(poolSize * poolSize);
}
}
}
}
template<typename T>
void _cuda_backPropogateAvgPool(T *result, const T *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride)
{
unsigned int nElements = sourceRows * sourceColumns;
int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK);
__cuda_backPropogateAvgPool<<<gridSize, THREADS_PER_BLOCK>>>(result, errorSignal,
sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride);
}
template __global__ void __cuda_backPropogateAvgPool(double *result, const double *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template __global__ void __cuda_backPropogateAvgPool(float *result, const float *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template void _cuda_backPropogateAvgPool(double *result, const double *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template void _cuda_backPropogateAvgPool(float *result, const float *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
/*
*
* Max Pooling
*
*/
template<typename T>
__global__ void __cuda_maxPool(const T *source, T *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride, const T minValue)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int resultWidth = ceil((float)sourceWidth/stride);
unsigned int resultHeight = ceil((float)sourceHeight/stride);
unsigned int resultRows = resultWidth * resultHeight * sourceChannels;
if(index < (resultRows * sourceColumns))
{
int imageNum = index / resultRows;
int resultPixelNum = index % resultRows;
int channelNum = resultPixelNum / (resultWidth * resultHeight);
resultPixelNum %= (resultWidth * resultHeight);
int resultPixelX = resultPixelNum / resultHeight;
int resultPixelY = resultPixelNum % resultHeight;
int sourcePixelX = resultPixelX * stride;
int sourcePixelY = resultPixelY * stride;
T maxValue = minValue;
int index = -1;
for(int i=sourcePixelX; (i<(sourcePixelX+poolSize)) && (i<sourceWidth); i++)
{
for(int j=sourcePixelY; (j<(sourcePixelY + poolSize)) && (j<sourceHeight); j++)
{
index = imageNum * sourceRows + channelNum * (sourceWidth * sourceHeight) +
i * sourceHeight + j;
if(source[index] >= maxValue)
{
maxValue = source[index];
}
}
}
int resultIndex = imageNum * resultRows + channelNum * (resultWidth * resultHeight) +
resultPixelX * resultHeight + resultPixelY;
result[resultIndex] = maxValue;
}
}
template<typename T>
void _cuda_maxPool(const T *source, T *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride)
{
unsigned int nElements = (int)ceil((float)sourceWidth/stride) *
(int)ceil((float)sourceHeight/stride) * sourceChannels * sourceColumns;
int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK);
__cuda_maxPool<<<gridSize, THREADS_PER_BLOCK>>>(source, result, sourceRows,
sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride, std::numeric_limits<T>::min());
}
template __global__ void __cuda_maxPool(const double *source, double *result,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride, double minValue);
template __global__ void __cuda_maxPool(const float *source, float *result,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride, float minValue);
template void _cuda_maxPool(const double *source, double *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride);
template void _cuda_maxPool(const float *source, float *result, const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride);
/*
*
* Max Pooling Backpropogation
*
*/
template<typename T>
__global__ void __cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal,const unsigned int sourceRows,
const unsigned int sourceColumns, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int poolSize, const unsigned int stride)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int errorSignalWidth = ceil((double)sourceWidth/stride);
unsigned int errorSignalHeight = ceil((double)sourceHeight/stride);
unsigned int errorSignalRows = errorSignalWidth * errorSignalHeight * sourceChannels;
if(index < (sourceRows * sourceColumns))
{
int imageNum = index / sourceRows;
int imagePixelNum = index % sourceRows;
int channel = imagePixelNum / (sourceWidth * sourceHeight);
imagePixelNum %= (sourceWidth * sourceHeight);
int pixelX = imagePixelNum / sourceHeight;
int pixelY = imagePixelNum % sourceHeight;
int indexInErrorSignal = -1;
int numMaxima = 0;
//calculates start of the first grid containing current Pixel
unsigned int gridStartX = (pixelX + 1 - (int)poolSize) < 0 ? 0 :
(unsigned int)(ceil((float)(pixelX + 1 - (int)poolSize)/(float)stride) * stride);
unsigned int gridStartY = (pixelY + 1 - (int)poolSize) < 0 ? 0 :
(unsigned int)(ceil((float)(pixelY + 1 - (int)poolSize)/(float)stride) * stride);
//////////////////////////////////
for(unsigned int gridX=gridStartX; gridX<=pixelX; gridX+=stride)
{
for(unsigned int gridY=gridStartY; gridY<=pixelY; gridY+=stride)
{
indexInErrorSignal = imageNum * errorSignalRows + channel * errorSignalHeight * errorSignalWidth +
(gridX/stride) * errorSignalHeight + (gridY/stride);
//current pixel is not maximum in current window
if(activationIn[index] != activationOut[indexInErrorSignal])
break;
numMaxima = 0;
for(unsigned int i=gridX; (i<(gridX + poolSize)) && i<sourceWidth; i++)
{
for(unsigned int j=gridY;(j<(gridY+poolSize)) && j<sourceHeight; j++)
{
int indexInActivationIn = imageNum * sourceRows + channel * sourceHeight * sourceWidth +
i * sourceHeight + j;
if(activationIn[indexInActivationIn] ==
activationOut[indexInErrorSignal])
{
numMaxima += 1;
}
}
}
result[index] += errorSignal[indexInErrorSignal] / (T) numMaxima;
}
}
}
}
template<typename T>
void _cuda_backPropogateMaxPool(T *result, const T* activationIn, const T* activationOut, const T *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride)
{
unsigned int nElements = sourceRows * sourceColumns;
int gridSize = (int)ceil((float) nElements/THREADS_PER_BLOCK);
__cuda_backPropogateMaxPool<<<gridSize, THREADS_PER_BLOCK>>>(result, activationIn, activationOut,
errorSignal, sourceRows, sourceColumns, sourceWidth, sourceHeight, sourceChannels, poolSize, stride);
}
template __global__ void __cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template __global__ void __cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template void _cuda_backPropogateMaxPool(double *result, const double* activationIn, const double* activationOut, const double *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
template void _cuda_backPropogateMaxPool(float *result, const float* activationIn, const float* activationOut, const float *errorSignal,
const unsigned int sourceRows, const unsigned int sourceColumns, const unsigned int sourceWidth,
const unsigned int sourceHeight, const unsigned int sourceChannels, const unsigned int poolSize,
const unsigned int stride);
/*
*
* ensure minimal value
*
*/
template<typename T>
__global__ void __cuda_ensureMinimalValue(T *data, T value, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if ((index < nElements) && (data[index] < value))
data[index] = value;
}
template<typename T>
void _cuda_ensureMinimalValue(T *data, T value, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_ensureMinimalValue <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements);
}
template __global__ void __cuda_ensureMinimalValue(double *data, double value, unsigned int nElements);
template void _cuda_ensureMinimalValue(double *data, double value, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_ensureMinimalValue(float *data, float value, unsigned int nElements);
template void _cuda_ensureMinimalValue(float *data, float value, unsigned int nRows, unsigned int nColumns);
/*
*
* ensure maximal value
*
*/
template<typename T>
__global__ void __cuda_ensureMaximalValue(T *data, T value, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if ((index < nElements) && (data[index] > value))
data[index] = value;
}
template<typename T>
void _cuda_ensureMaximalValue(T *data, T value, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_ensureMaximalValue <<< gridSize , THREADS_PER_BLOCK >>> (data, value, nElements);
}
template __global__ void __cuda_ensureMaximalValue(double *data, double value, unsigned int nElements);
template void _cuda_ensureMaximalValue(double *data, double value, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_ensureMaximalValue(float *data, float value, unsigned int nElements);
template void _cuda_ensureMaximalValue(float *data, float value, unsigned int nRows, unsigned int nColumns);
/*
*
* prepares for convolution
*
*/
template<typename T>
__global__ void __cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < destRows * destCols) {
int imageNum = index / destRows;
int resultIndex = index % destRows;
int kernelMiddleX = kernelWidth / 2;
int kernelMiddleY = kernelHeight / 2;
int heightOfOneDestCh = (int)ceil((float)(sourceHeight - kernelHeight + 1) / (float)strideY);
int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels);
int pixelX = (pixelNum / heightOfOneDestCh) * strideX + kernelMiddleX;
int pixelY = (pixelNum % heightOfOneDestCh) * strideY + kernelMiddleY;
int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels);
int neighbNum = channelNum % (kernelHeight * kernelWidth);
channelNum = channelNum / (kernelWidth * kernelHeight);
int neighX = (neighbNum / kernelHeight) - kernelMiddleX;
int neighY = (neighbNum % kernelHeight) - kernelMiddleY;
dest[index] = source[imageNum * (sourceChannels * sourceWidth * sourceHeight) +
channelNum * (sourceWidth * sourceHeight) +
(pixelX + neighX) * sourceHeight + (pixelY + neighY)];
}
}
template<typename T>
void _cuda_prepareConvolution(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY)
{
unsigned int nElements = destRows * destCols;
int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK);
__cuda_prepareConvolution<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceWidth, sourceHeight,
sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY);
}
template __global__ void __cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template __global__ void __cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template void _cuda_prepareConvolution(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template void _cuda_prepareConvolution(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
/*
* Backpropogation convolution
*
*/
template<typename T>
__global__ void __cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < destRows * destCols) {
dest[index] = 0;
int img = index / destRows;
int ch = (index % destRows) / (destWidth * destHeight);
int pixelNum = (index % destRows) % (destWidth * destHeight);
int pixelX = pixelNum / destHeight;
int pixelY = pixelNum % destHeight;
int gridStartX = (pixelX + 1 - (int)kernelWidth) <= 0 ? 0 :
(pixelX + 1 - (int)kernelWidth);
int gridStartY = (pixelY + 1 - (int)kernelHeight) <= 0 ? 0 :
(pixelY + 1 - (int)kernelHeight);
int sourceHeight = (destHeight - (int)kernelHeight + 1);
int sizeOfOneChSource = sourceHeight * (destWidth - (int)kernelWidth + 1);
int neighNum = 0;
for(int j=gridStartX; (j<=pixelX) && ((j + kernelWidth) <= destWidth); j++) {
for(int k=gridStartY; (k<=pixelY) && ((k + kernelHeight) <= destHeight) ; k++) {
// (Cx, Cy) = (j + kernelMiddleX, k + kernelMiddleY) are coordinates of center pixel in grid
// (Rx, Ry) = (Cx - pixelX, Cy - pixelY) gives coordinates of pixel in refernce
// to center pixel, such that center pixel of grid is mapped is mapped to (0,0)
neighNum = (pixelX - j) * kernelHeight + (pixelY - k);
//(j * sourceHeight + k) is pixel number of center of grid in source
//i.e result of convolution
dest[index] += source[img * sizeOfOneChSource * destChannels * kernelWidth * kernelHeight +
(j * sourceHeight + k) * destChannels * kernelWidth * kernelHeight +
ch * kernelWidth * kernelHeight + neighNum];
}
}
}
}
template<typename T>
void _cuda_prepareConvolutionBackProp(T* dest, const T* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols)
{
unsigned int nElements = destRows * destCols;
int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK);
__cuda_prepareConvolutionBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, destWidth, destHeight,
destChannels, kernelWidth, kernelHeight, destRows, destCols);
}
template void _cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols);
template void _cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols);
template __global__ void __cuda_prepareConvolutionBackProp(double* dest, const double* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols);
template __global__ void __cuda_prepareConvolutionBackProp(float* dest, const float* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols);
/*
* prepare for convolution such that after convolution image size stays same
*
*
*/
template<typename T>
__global__ void __cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < destRows * destCols) {
int destWidth = (int)ceil((float)sourceWidth / (float)strideX);
int destHeight = (int)ceil((float)sourceHeight / (float)strideY);
int imageNum = index / destRows;
int resultIndex = index % destRows;
int kernelMiddleX = kernelWidth / 2;
int kernelMiddleY = kernelHeight / 2;
int pixelNum = resultIndex / (kernelHeight * kernelWidth * sourceChannels);
int pixelX = (pixelNum / destHeight) * strideX;
int pixelY = (pixelNum % destHeight) * strideY;
int channelNum = resultIndex % (kernelHeight * kernelWidth * sourceChannels);
int neighbNum = channelNum % (kernelHeight * kernelWidth);
channelNum = channelNum / (kernelWidth * kernelHeight);
int neighX = (neighbNum / kernelHeight) - kernelMiddleX;
int neighY = (neighbNum % kernelHeight) - kernelMiddleY;
dest[index] = ( (pixelX + neighX) < 0 || (pixelY + neighY) < 0 ||
(pixelX + neighX) >= sourceWidth || (pixelY + neighY) >= sourceHeight) ? 0 :
source[imageNum * (sourceChannels * sourceWidth * sourceHeight) +
channelNum * (sourceWidth * sourceHeight) +
(pixelX + neighX) * sourceHeight + (pixelY + neighY)];
}
}
template<typename T>
void _cuda_prepareConvolutionSame(T* dest, const T* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY)
{
unsigned int nElements = destRows * destCols;
int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK);
__cuda_prepareConvolutionSame<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceWidth, sourceHeight,
sourceChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY);
}
template __global__ void __cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template __global__ void __cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template void _cuda_prepareConvolutionSame(double* dest, const double* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template void _cuda_prepareConvolutionSame(float* dest, const float* source, const unsigned int sourceWidth, const unsigned int sourceHeight,
const unsigned int sourceChannels, const unsigned int kernelWidth, const unsigned int kernelHeight,
const unsigned int destRows, const unsigned int destCols, const unsigned int strideX, const unsigned int strideY);
template<typename T>
__global__ void __cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < (destRows * destCols)) {
dest[index] = 0;
int img = index / destRows; // destRows = destWidth * destHeight * destChannels
int ch = (index % destRows) / (destWidth * destHeight);
int pixelNum = (index % destRows) % (destWidth * destHeight);
int pixelX = pixelNum / destHeight;
int pixelY = pixelNum % destHeight;
int kernelMiddleX = (int)kernelWidth / 2;
int kernelMiddleY = (int)kernelHeight / 2;
int gridStartX = (pixelX + 1 - (int)kernelWidth) <= (-1 * kernelMiddleX) ? (-1 * kernelMiddleX) :
(pixelX + 1 - (int)kernelWidth);
int gridStartY = (pixelY + 1 - (int)kernelHeight) <= (-1 * kernelMiddleY) ? (-1 * kernelMiddleY) :
(pixelY + 1 - (int)kernelHeight);
for(int gridX=gridStartX; (gridX <= pixelX) && ((gridX + kernelMiddleX) < destWidth) ; gridX++) {
if (((gridX + kernelMiddleX) % strideX) == 0) {
for(int gridY=gridStartY; (gridY <= pixelY) && ((gridY + kernelMiddleY) < destHeight); gridY++) {
if (((gridY + kernelMiddleY) % strideY) == 0) {
int neighNum = (pixelX - gridX) * kernelHeight + (pixelY - gridY);
int centerPixel = (((gridX + kernelMiddleX) / strideX) * destHeight / strideY) + (gridY + kernelMiddleY) / strideY;
dest[index] += source[img * destChannels * (destWidth / strideX) * (destHeight / strideY) * kernelWidth * kernelHeight +
centerPixel * destChannels * kernelWidth * kernelHeight + ch * kernelWidth * kernelHeight + neighNum];
}
}
}
}
}
}
template<typename T>
void _cuda_prepareConvolutionSameBackProp(T* dest, const T* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY)
{
unsigned int nElements = destRows * destCols;
int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK);
__cuda_prepareConvolutionSameBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, destWidth, destHeight,
destChannels, kernelWidth, kernelHeight, destRows, destCols, strideX, strideY);
}
template void _cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY);
template void _cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY);
template __global__ void __cuda_prepareConvolutionSameBackProp(double* dest, const double* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY);
template __global__ void __cuda_prepareConvolutionSameBackProp(float* dest, const float* source, const unsigned int destWidth,
const unsigned int destHeight, const unsigned int destChannels, const unsigned int kernelWidth,
const unsigned int kernelHeight, const unsigned int destRows, const unsigned int destCols,
const unsigned int strideX, const unsigned int strideY);
/*
* rearrange
*
* helper for convolution
*/
template<typename T>
__global__ void __cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < (destRows * destColumns)) {
unsigned int img = index / (sourceRows * destNumPixels);
unsigned int ch = (index % (sourceRows * destNumPixels)) / destNumPixels;
unsigned int pix = (index % (sourceRows * destNumPixels)) % destNumPixels;
dest[index] = source[sourceRows * (img * destNumPixels + pix) + ch];
}
}
template<typename T>
void _cuda_rearrange(T *dest, const T *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels)
{
unsigned int nElements = destColumns * destRows;
int gridSize = (int)ceil((float)nElements/ THREADS_PER_BLOCK);
__cuda_rearrange<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceRows, destRows, destColumns, destNumPixels);
}
template __global__ void __cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template __global__ void __cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template void _cuda_rearrange(double *dest, const double *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template void _cuda_rearrange(float *dest, const float *source, const unsigned int sourceRows,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
/*
*
* Rearrange back propogation
*
*/
template<typename T>
__global__ void __cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < (destRows * destColumns)) {
unsigned int img = index / (destNumPixels * destRows);
unsigned int pix = (index % (destNumPixels * destRows)) / destRows;
unsigned int ch = (index % (destNumPixels * destRows)) % destRows;
dest[index] = source[img*(destRows * destNumPixels) + ch * destNumPixels + pix];
}
}
template<typename T>
void _cuda_rearrangeBackProp(T *dest, const T *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels)
{
unsigned int nElements = destRows * destColumns;
int gridSize = (int)ceil((float)nElements/THREADS_PER_BLOCK);
__cuda_rearrangeBackProp<<<gridSize, THREADS_PER_BLOCK>>>(dest, source, sourceColumns,
destRows, destColumns, destNumPixels);
}
template __global__ void __cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template __global__ void __cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template void _cuda_rearrangeBackProp(double *dest, const double *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
template void _cuda_rearrangeBackProp(float *dest, const float *source, const unsigned int sourceColumns,
const unsigned int destRows, const unsigned int destColumns, const unsigned int destNumPixels);
/*
*
* argMax
*
*
*/
template<typename T>
__global__ void __cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr){
unsigned int column= threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
int beginCol = column * nRows;
T maxVal = matrixPtr[beginCol];
resultDevPtr[column] = 0;
for (int i = 1; i < nRows; i++){
T val = matrixPtr[beginCol + i];
if (val > maxVal){
maxVal = val;
resultDevPtr[column] = i;
}
}
}
}
template<typename T>
void _cuda_argMax(T *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr)
{
// parallelization over columns only
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_argMax <<< gridSize, THREADS_PER_BLOCK>>> (matrixPtr, nRows, nColumns, resultDevPtr);
}
template __global__ void __cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr);
template void _cuda_argMax<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr);
template __global__ void __cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr);
template void _cuda_argMax<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, unsigned int *resultDevPtr);
/*
*
* max
* set max per column to 1.0, all other to 0.0
*
*/
template<typename T>
__global__ void __cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns) {
unsigned int argMax = 0;
T max = devResult[column * nRows];
for (int i = 0; i < nRows; i++) {
if (devResult[column * nRows + i] > max) {
max = devResult[column * nRows + i];
argMax = i;
}
devResult[column * nRows + i] = 0.0;
}
devResult[column * nRows + argMax] = 1.0;
}
}
template<typename T>
void _cuda_max(T *devResult, unsigned int nRows, unsigned int nColumns)
{
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_max <<< gridSize, THREADS_PER_BLOCK>>> (devResult, nRows, nColumns);
}
template __global__ void __cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns);
template void _cuda_max<double>(double *devResult, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns);
template void _cuda_max<float>(float *devResult, unsigned int nRows, unsigned int nColumns);
/*
*
* max
*
*
*/
template<typename T>
__global__ void __cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if (devA[index] < devB[index])
devResult[index] = devB[index];
else
devResult[index] = devA[index];
}
}
template<typename T>
void _cuda_max(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_max <<< gridSize, THREADS_PER_BLOCK>>> (devResult, devA, devB, nElements);
}
template __global__ void __cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements);
template void _cuda_max<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements);
template void _cuda_max<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns);
/*
*
* elementwiseMultiplicationWithKroneckerDelta
*
*
*/
template<typename T>
__global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nElements){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nElements) {
if (devA[index] != devB[index])
devResult[index] = 0;
}
}
template<typename T>
void _cuda_elementwiseMultiplicationWithKroneckerDelta(T *devResult, const T *devA, const T *devB, unsigned int nRows, unsigned int nColumns)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_elementwiseMultiplicationWithKroneckerDelta <<< gridSize, THREADS_PER_BLOCK>>> (devResult, devA, devB, nElements);
}
template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithKroneckerDelta<double>(double *devResult, const double *devA, const double *devB, unsigned int nRows, unsigned int nColumns);
template __global__ void __cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nElements);
template void _cuda_elementwiseMultiplicationWithKroneckerDelta<float>(float *devResult, const float *devA, const float *devB, unsigned int nRows, unsigned int nColumns);
/*
*
* nClassificationErrors
*
*
*/
template<typename T>
__global__ void __cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr){
unsigned int column= threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
int beginCol = column * nRows;
T maxVal = matrixPtr[beginCol];
uint argmax = 0;
for (int i = 1; i < nRows; i++){
T val = matrixPtr[beginCol + i];
if (val > maxVal){
maxVal = val;
argmax = i;
}
}
if (targets[nRows * column + argmax] != 1.0){
atomicAdd(resultDevPtr, 1);
}
}
}
template<typename T>
void _cuda_nClassificationErrors(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, unsigned int *resultDevPtr)
{
// parallelization over columns only
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
unsigned int result = 0;
cudaMemcpy(resultDevPtr, &result, sizeof(unsigned int), cudaMemcpyHostToDevice);
__cuda_nClassificationErrors <<< gridSize, THREADS_PER_BLOCK>>> (matrixPtr, nRows, nColumns, targets, resultDevPtr);
}
template __global__ void __cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr);
template void _cuda_nClassificationErrors<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, unsigned int *resultDevPtr);
template __global__ void __cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr);
template void _cuda_nClassificationErrors<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, unsigned int *resultDevPtr);
// crossEntropyObjectiveFunction
template<typename T>
__global__ void __cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){
unsigned int column= threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0;
for (int i = 0; i < nRows; i++){
if (targets[nRows * column + i] == 1.0)
objFctn[column] -= log(matrixPtr[nRows * column + i]);
}
}
}
template<typename T>
void _cuda_crossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T* targets, T *objFctn)
{
// parallelization over columns only
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_crossEntropyObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn);
}
template __global__ void __cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template void _cuda_crossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template __global__ void __cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
template void _cuda_crossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
// weightedCrossEntropyObjectiveFunction
template<typename T>
__global__ void __cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){
unsigned int column= threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0;
for (int i = 0; i < nRows; i++){
if (targets[nRows * column + i] == 1.0)
objFctn[column] -= log(matrixPtr[nRows * column + i]) * weights[column];
}
}
}
template<typename T>
void _cuda_weightedCrossEntropyObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights)
{
// parallelization over columns only
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_weightedCrossEntropyObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn, weights);
}
template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template void _cuda_weightedCrossEntropyObjectiveFunction<double>(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template __global__ void __cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
template void _cuda_weightedCrossEntropyObjectiveFunction<float>(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
// squaredErrorObjectiveFunction
template<typename T>
__global__ void __cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0.0f;
for (int row = 0; row < nRows; row++){
unsigned int position = column * nRows + row;
objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]);
}
}
}
template<typename T>
void _cuda_squaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn)
{
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_squaredErrorObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn);
}
template __global__ void __cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template void _cuda_squaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template __global__ void __cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
template void _cuda_squaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
// weightedSquaredErrorObjectiveFunction
template<typename T>
__global__ void __cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0.0f;
for (int row = 0; row < nRows; row++){
unsigned int position = column * nRows + row;
objFctn[column] += (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]);
}
objFctn[column] *= weights[column];
}
}
template<typename T>
void _cuda_weightedSquaredErrorObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn, T *weights)
{
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_weightedSquaredErrorObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn, weights);
}
template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template void _cuda_weightedSquaredErrorObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template __global__ void __cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
template void _cuda_weightedSquaredErrorObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
// smoothedL1ObjectiveFunction
template<typename T>
__global__ void __cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0.0f;
for (int row = 0; row < nRows; row++){
unsigned int position = column * nRows + row;
if ((matrixPtr[position] - targets[position]) < -1.0)
objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5;
else if ((matrixPtr[position] - targets[position]) > 1.0)
objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5;
else
objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]);
}
}
}
template<typename T>
void _cuda_smoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T *objFctn)
{
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_smoothedL1ObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, objFctn);
}
template __global__ void __cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template void _cuda_smoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn);
template __global__ void __cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
template void _cuda_smoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn);
// weightedSmoothedL1ObjectiveFunction
template<typename T>
__global__ void __cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumns){
objFctn[column] = 0.0f;
for (int row = 0; row < nRows; row++){
unsigned int position = column * nRows + row;
if ((matrixPtr[position] - targets[position]) < -1.0)
objFctn[column] += (targets[position] - matrixPtr[position]) - 0.5;
else if ((matrixPtr[position] - targets[position]) > 1.0)
objFctn[column] += (matrixPtr[position] - targets[position]) - 0.5;
else
objFctn[column] += 0.5 * (matrixPtr[position] - targets[position]) * (matrixPtr[position] - targets[position]);
}
objFctn[column] *= weights[column];
}
}
template<typename T>
void _cuda_weightedSmoothedL1ObjectiveFunction(T *matrixPtr, unsigned int nRows, unsigned int nColumns, T *targets, T* weights, T *objFctn)
{
int gridSize = (int)ceil( (float) nColumns/THREADS_PER_BLOCK);
__cuda_weightedSmoothedL1ObjectiveFunction <<< gridSize , THREADS_PER_BLOCK >>> (matrixPtr, nRows, nColumns, targets, weights, objFctn);
}
template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template void _cuda_weightedSmoothedL1ObjectiveFunction(double *matrixPtr, unsigned int nRows, unsigned int nColumns, double *targets, double *objFctn, double *weights);
template __global__ void __cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
template void _cuda_weightedSmoothedL1ObjectiveFunction(float *matrixPtr, unsigned int nRows, unsigned int nColumns, float *targets, float *objFctn, float *weights);
/*
* appendSecondOrderFeatures
*/
template<typename T>
__global__ void __cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumnsX){
unsigned int pos = offset;
for (unsigned int i = 0; i < nRowsX; ++ i) {
for (unsigned int j = i; j < nRowsX; ++ j) {
Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j];
pos++;
}
}
}
}
template<typename T>
void _cuda_appendSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK);
__cuda_appendSecondOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset);
}
template __global__ void __cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template __global__ void __cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
/*
* appendDiagonalSecondOrderFeatures
*/
template<typename T>
__global__ void __cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumnsX){
unsigned int pos = offset;
for (unsigned int i = 0; i < nRowsX; ++ i) {
Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i];
pos++;
}
}
}
template<typename T>
void _cuda_appendDiagonalSecondOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK);
__cuda_appendDiagonalSecondOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset);
}
template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendDiagonalSecondOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template __global__ void __cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendDiagonalSecondOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
// appendThirdOrderFeatures
template<typename T>
__global__ void __cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumnsX){
unsigned int pos = offset;
for (unsigned int i = 0; i < nRowsX; ++ i) {
for (unsigned int j = i; j < nRowsX; ++ j) {
for (unsigned int k = j; k < nRowsX; ++ k) {
Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + j] * X[column * nRowsX + k];
pos++;
}
}
}
}
}
template<typename T>
void _cuda_appendThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK);
__cuda_appendThirdOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset);
}
template __global__ void __cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template __global__ void __cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
// appendDiagonalThirdOrderFeatures
template<typename T>
__global__ void __cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
unsigned int column = threadIdx.x + blockIdx.x * blockDim.x;
if (column < nColumnsX){
unsigned int pos = offset;
for (unsigned int i = 0; i < nRowsX; ++ i) {
Y[column * nRowsY + pos] = X[column * nRowsX + i] * X[column * nRowsX + i] * X[column * nRowsX + i];
pos++;
}
}
}
template<typename T>
void _cuda_appendDiagonalThirdOrderFeatures(const T *X, unsigned int nRowsX, unsigned int nColumnsX, T *Y, unsigned int nRowsY, unsigned int offset){
int gridSize = (int)ceil( (float) nColumnsX/THREADS_PER_BLOCK);
__cuda_appendDiagonalThirdOrderFeatures <<< gridSize , THREADS_PER_BLOCK >>> (X, nRowsX, nColumnsX, Y, nRowsY, offset);
}
template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendDiagonalThirdOrderFeatures(const double *X, unsigned int nRowsX, unsigned int nColumnsX, double *Y, unsigned int nRowsY, unsigned int offset);
template __global__ void __cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
template void _cuda_appendDiagonalThirdOrderFeatures(const float *X, unsigned int nRowsX, unsigned int nColumnsX, float *Y, unsigned int nRowsY, unsigned int offset);
/*
*
* gaussianMixturePosteriors
* computes unnormalized, unexponentiated Gaussian mixture posteriors
* -> p(c|x) can be obtained with application of softmax on the result of this function
*
*/
template<typename T>
__global__ void __cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nFeatures * nMixtures) {
unsigned int k = index % nMixtures;
unsigned int n = index / nMixtures;
T expn = 0;
T det = 0;
for (unsigned int d = 0; d < featureDim; d++) {
expn += (X[n * featureDim + d] - means[d * nMixtures + k]) * (X[n * featureDim + d] - means[d * nMixtures + k])
/ variances[d * nMixtures + k];
det += log(variances[d * nMixtures + k]);
}
P[index] = log(weights[k]) - 0.5 * expn - 0.5 * log(2 * CUDART_PI) * featureDim - 0.5 * det;
}
}
template<typename T>
void _cuda_gaussianMixturePosteriors(T *P, const T *X, const T *means, const T *variances, const T *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures)
{
unsigned int nElements = nFeatures * nMixtures;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_gaussianMixturePosteriors <<< gridSize , THREADS_PER_BLOCK >>> (P, X, means, variances, weights, nFeatures, featureDim, nMixtures);
}
template __global__ void __cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template void _cuda_gaussianMixturePosteriors(double *P, const double *X, const double *means, const double *variances, const double *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template __global__ void __cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template void _cuda_gaussianMixturePosteriors(float *P, const float *X, const float *means, const float *variances, const float *weights, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
/*
*
* fisher encoding
*
*/
template<typename T>
__global__ void __cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T* gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nFeatures * nMixtures * featureDim) {
unsigned int n = index / (nMixtures * featureDim);
unsigned int k = (index % (nMixtures * featureDim)) / featureDim;
unsigned int d = (index % (nMixtures * featureDim)) % featureDim;
// first order component
F[d + k * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures]
* (X[d + n * featureDim] - means[k + d * nMixtures]) / sqrt(variances[k + d * nMixtures] * weights[k]);
// second order component
F[d + (k + nMixtures) * featureDim + n * featureDim * nMixtures * 2] = gamma[k + n * nMixtures]
* ( (X[d + n * featureDim] - means[k + d * nMixtures]) * (X[d + n * featureDim] - means[k + d * nMixtures])
/ variances[k + d * nMixtures] - 1.0 )
/ sqrt(2 * weights[k]);
}
}
template<typename T>
void _cuda_fisherEncoding(T *F, const T *X, const T *means, const T *variances, const T *weights, const T *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures)
{
unsigned int nElements = nFeatures * nMixtures * featureDim;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_fisherEncoding <<< gridSize , THREADS_PER_BLOCK >>> (F, X, means, variances, weights, gamma, nFeatures, featureDim, nMixtures);
}
template __global__ void __cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template void _cuda_fisherEncoding(double *F, const double *X, const double *means, const double *variances, const double *weights, const double *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template __global__ void __cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
template void _cuda_fisherEncoding(float *F, const float *X, const float *means, const float *variances, const float *weights, const float *gamma, unsigned int nFeatures, unsigned int featureDim, unsigned int nMixtures);
/*
*
* dropout
*
*/
template<typename T>
__global__ void __cuda_dropout(T *data, const T *mask, unsigned int nElements, T dropoutProbability){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if ((index < nElements) && (mask[index] < dropoutProbability))
data[index] = 0.0;
}
template<typename T>
void _cuda_dropout(T *data, const T *mask, unsigned int nRows, unsigned int nColumns, T dropoutProbability)
{
unsigned int nElements = nRows * nColumns;
int gridSize = (int)ceil( (float) nElements/THREADS_PER_BLOCK);
__cuda_dropout <<< gridSize , THREADS_PER_BLOCK >>> (data, mask, nElements, dropoutProbability);
}
template __global__ void __cuda_dropout(double *data, const double *mask, unsigned int nElements, double dropoutProbability);
template void _cuda_dropout(double *data, const double *mask, unsigned int nRows, unsigned int nColumns, double dropoutProbability);
template __global__ void __cuda_dropout(float *data, const float *mask, unsigned int nElements, float dropoutProbability);
template void _cuda_dropout(float *data, const float *mask, unsigned int nRows, unsigned int nColumns, float dropoutProbability);
|
the_stack
|
__constant__ float kPi = 3.141592654;
__constant__ float kTwoPi = 6.283185308;
__constant__ float kEpsilon = 1.0e-10; // to prevent indeterminate cases
extern "C" {
// Device helper function to compute distances between two agents
__device__ float ComputeDistance(
float * loc_x_arr,
float * loc_y_arr,
const int kThisAgentId1,
const int kThisAgentId2,
const int kEnvId,
int kNumAgents
) {
const int index1 = kEnvId * kNumAgents + kThisAgentId1;
const int index2 = kEnvId * kNumAgents + kThisAgentId2;
return sqrt(
pow(loc_x_arr[index1] - loc_x_arr[index2], 2) +
pow(loc_y_arr[index1] - loc_y_arr[index2], 2));
}
// Device helper function to generate observation
__device__ void CudaTagContinuousGenerateObservation(
float * loc_x_arr,
float * loc_y_arr,
float * speed_arr,
float * direction_arr,
float * acceleration_arr,
int * agent_types_arr,
const float kGridLength,
const float kMaxSpeed,
const int kNumOtherAgentsObserved,
int * still_in_the_game_arr,
const bool kUseFullObservation,
float * obs_arr,
float * neighbor_distances_arr,
int * neighbor_ids_sorted_by_distance_arr,
int * nearest_neighbor_ids,
int * env_timestep_arr,
const int kNumAgents,
const int kEpisodeLength,
const int kEnvId,
const int kThisAgentId,
const int kThisAgentArrayIdx
) {
int num_features = 7;
if (kThisAgentId < kNumAgents) {
if (kUseFullObservation) {
// obs shape is (num_envs, kNumAgents,
// num_features * (kNumAgents - 1) + 1)
const int kThisAgentIdxOffset = kEnvId * kNumAgents *
(num_features * (kNumAgents - 1) + 1) +
kThisAgentId * (num_features * (kNumAgents - 1) + 1);
// Initialize obs
int index = 0;
for (int other_agent_id = 0; other_agent_id < kNumAgents;
other_agent_id++) {
if (other_agent_id != kThisAgentId) {
obs_arr[kThisAgentIdxOffset + 0 * (kNumAgents - 1) + index]
= 0.0;
obs_arr[kThisAgentIdxOffset + 1 * (kNumAgents - 1) + index]
= 0.0;
obs_arr[kThisAgentIdxOffset + 2 * (kNumAgents - 1) + index]
= 0.0;
obs_arr[kThisAgentIdxOffset + 3 * (kNumAgents - 1) + index]
= 0.0;
obs_arr[kThisAgentIdxOffset + 4 * (kNumAgents - 1) + index]
= 0.0;
obs_arr[kThisAgentIdxOffset + 5 * (kNumAgents - 1) + index]
= agent_types_arr[other_agent_id];
obs_arr[kThisAgentIdxOffset + 6 * (kNumAgents - 1) + index]
= still_in_the_game_arr[kEnvId * kNumAgents + other_agent_id];
index += 1;
}
}
obs_arr[kThisAgentIdxOffset + num_features * (kNumAgents - 1)] = 0.0;
// Update obs for agents still in the game
if (still_in_the_game_arr[kThisAgentArrayIdx]) {
int index = 0;
for (int other_agent_id = 0; other_agent_id < kNumAgents;
other_agent_id++) {
if (other_agent_id != kThisAgentId) {
const int kOtherAgentArrayIdx = kEnvId * kNumAgents +
other_agent_id;
obs_arr[kThisAgentIdxOffset + 0 * (kNumAgents - 1) + index] =
static_cast<float>(loc_x_arr[kOtherAgentArrayIdx] -
loc_x_arr[kThisAgentArrayIdx]) / (sqrt(2.0) * kGridLength);
obs_arr[kThisAgentIdxOffset + 1 * (kNumAgents - 1) + index] =
static_cast<float>(loc_y_arr[kOtherAgentArrayIdx] -
loc_y_arr[kThisAgentArrayIdx]) / (sqrt(2.0) * kGridLength);
obs_arr[kThisAgentIdxOffset + 2 * (kNumAgents - 1) + index] =
static_cast<float>(speed_arr[kOtherAgentArrayIdx] -
speed_arr[kThisAgentArrayIdx]) / (kMaxSpeed + kEpsilon);
obs_arr[kThisAgentIdxOffset + 3 * (kNumAgents - 1) + index] =
static_cast<float>(acceleration_arr[kOtherAgentArrayIdx] -
acceleration_arr[kThisAgentArrayIdx]) / (kMaxSpeed + kEpsilon);
obs_arr[kThisAgentIdxOffset + 4 * (kNumAgents - 1) + index] =
static_cast<float>(direction_arr[kOtherAgentArrayIdx] -
direction_arr[kThisAgentArrayIdx]) / (kTwoPi);
index += 1;
}
}
obs_arr[kThisAgentIdxOffset + num_features * (kNumAgents - 1)] =
static_cast<float>(env_timestep_arr[kEnvId]) / kEpisodeLength;
}
} else {
// Initialize obs to all zeros
// obs shape is (num_envs, kNumAgents,
// num_features * kNumOtherAgentsObserved + 1)
const int kThisAgentIdxOffset = kEnvId * kNumAgents *
(num_features * kNumOtherAgentsObserved + 1) +
kThisAgentId * (num_features * kNumOtherAgentsObserved + 1);
for (int idx = 0; idx < kNumOtherAgentsObserved; idx++) {
obs_arr[kThisAgentIdxOffset + 0 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 1 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 2 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 3 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 4 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 5 * kNumOtherAgentsObserved + idx] =
0.0;
obs_arr[kThisAgentIdxOffset + 6 * kNumOtherAgentsObserved + idx] =
0.0;
}
obs_arr[kThisAgentIdxOffset + num_features * kNumOtherAgentsObserved] =
0.0;
// Update obs for agents still in the game
if (still_in_the_game_arr[kThisAgentArrayIdx]) {
int distance_arr_idx;
int i_index;
int j_index;
int neighbor_ids_sorted_by_distance_arr_idx;
// Find the nearest agents
const int kThisAgentArrayIdxOffset = kEnvId * kNumAgents *
(kNumAgents - 1) + kThisAgentId * (kNumAgents - 1);
// Initialize neighbor_ids_sorted_by_distance_arr
// other agents that are still in the same
int num_valid_other_agents = 0;
for (int other_agent_id = 0; other_agent_id < kNumAgents;
other_agent_id++) {
if ((other_agent_id != kThisAgentId) &&
(still_in_the_game_arr[kEnvId * kNumAgents + other_agent_id])) {
neighbor_ids_sorted_by_distance_arr_idx =
kThisAgentArrayIdxOffset + num_valid_other_agents;
neighbor_ids_sorted_by_distance_arr[
neighbor_ids_sorted_by_distance_arr_idx] = other_agent_id;
num_valid_other_agents++;
}
}
// First, find distances to all the valid agents
for (int idx = 0; idx < num_valid_other_agents; idx++) {
distance_arr_idx = kThisAgentArrayIdxOffset + idx;
neighbor_distances_arr[distance_arr_idx] = ComputeDistance(
loc_x_arr,
loc_y_arr,
kThisAgentId,
neighbor_ids_sorted_by_distance_arr[distance_arr_idx],
kEnvId,
kNumAgents);
}
// Find the nearest neighbor agent indices
for (int i = 0; i < min(num_valid_other_agents,
kNumOtherAgentsObserved); i++) {
i_index = kThisAgentArrayIdxOffset + i;
for (int j = i + 1; j < num_valid_other_agents; j++) {
j_index = kThisAgentArrayIdxOffset + j;
if (neighbor_distances_arr[j_index] <
neighbor_distances_arr[i_index]) {
float tmp1 = neighbor_distances_arr[i_index];
neighbor_distances_arr[i_index] =
neighbor_distances_arr[j_index];
neighbor_distances_arr[j_index] = tmp1;
int tmp2 = neighbor_ids_sorted_by_distance_arr[i_index];
neighbor_ids_sorted_by_distance_arr[i_index] =
neighbor_ids_sorted_by_distance_arr[j_index];
neighbor_ids_sorted_by_distance_arr[j_index] = tmp2;
}
}
}
// Save nearest neighbor ids.
for (int idx = 0; idx < min(num_valid_other_agents,
kNumOtherAgentsObserved); idx++) {
const int kNearestNeighborsIdx =
kEnvId * kNumAgents * kNumOtherAgentsObserved +
kThisAgentId * kNumOtherAgentsObserved +
idx;
nearest_neighbor_ids[kNearestNeighborsIdx] =
neighbor_ids_sorted_by_distance_arr[
kThisAgentArrayIdxOffset + idx];
}
// Update observation
for (int idx = 0; idx < min(num_valid_other_agents,
kNumOtherAgentsObserved); idx++) {
const int kNearestNeighborsIdx =
kEnvId * kNumAgents * kNumOtherAgentsObserved +
kThisAgentId * kNumOtherAgentsObserved +
idx;
const int kOtherAgentId = nearest_neighbor_ids[
kNearestNeighborsIdx];
const int kOtherAgentArrayIdx = kEnvId * kNumAgents +
kOtherAgentId;
const int kThisAgentIdxOffset =
kEnvId * kNumAgents * (
num_features * kNumOtherAgentsObserved + 1) +
kThisAgentId * (num_features * kNumOtherAgentsObserved + 1);
obs_arr[kThisAgentIdxOffset + 0 * kNumOtherAgentsObserved + idx] =
static_cast<float>(loc_x_arr[kOtherAgentArrayIdx] -
loc_x_arr[kThisAgentArrayIdx]) / (sqrt(2.0) * kGridLength);
obs_arr[kThisAgentIdxOffset + 1 * kNumOtherAgentsObserved + idx] =
static_cast<float>(loc_y_arr[kOtherAgentArrayIdx] -
loc_y_arr[kThisAgentArrayIdx]) / (sqrt(2.0) * kGridLength);
obs_arr[kThisAgentIdxOffset + 2 * kNumOtherAgentsObserved + idx] =
static_cast<float>(speed_arr[kOtherAgentArrayIdx] -
speed_arr[kThisAgentArrayIdx]) / (kMaxSpeed + kEpsilon);
obs_arr[kThisAgentIdxOffset + 3 * kNumOtherAgentsObserved + idx] =
static_cast<float>(acceleration_arr[kOtherAgentArrayIdx] -
acceleration_arr[kThisAgentArrayIdx]) / (kMaxSpeed + kEpsilon);
obs_arr[kThisAgentIdxOffset + 4 * kNumOtherAgentsObserved + idx] =
static_cast<float>(direction_arr[kOtherAgentArrayIdx] -
direction_arr[kThisAgentArrayIdx]) / (kTwoPi);
obs_arr[kThisAgentIdxOffset + 5 * kNumOtherAgentsObserved + idx] =
agent_types_arr[kOtherAgentId];
obs_arr[kThisAgentIdxOffset + 6 * kNumOtherAgentsObserved + idx] =
still_in_the_game_arr[kOtherAgentArrayIdx];
}
obs_arr[
kThisAgentIdxOffset + num_features * kNumOtherAgentsObserved] =
static_cast<float>(env_timestep_arr[kEnvId]) / kEpisodeLength;
}
}
}
}
// Device helper function to compute rewards
__device__ void CudaTagContinuousComputeReward(
float * rewards_arr,
float * loc_x_arr,
float * loc_y_arr,
const float kGridLength,
float * edge_hit_reward_penalty,
float * step_rewards_arr,
int * num_runners_arr,
int * agent_types_arr,
const float kDistanceMarginForReward,
const float kTagRewardForTagger,
const float kTagPenaltyForRunner,
const float kEndOfGameRewardForRunner,
bool kRunnerExitsGameAfterTagged,
int * still_in_the_game_arr,
int * done_arr,
int * env_timestep_arr,
int kNumAgents,
int kEpisodeLength,
const int kEnvId,
const int kThisAgentId,
const int kThisAgentArrayIdx
) {
if (kThisAgentId < kNumAgents) {
// initialize rewards
rewards_arr[kThisAgentArrayIdx] = 0.0;
if (still_in_the_game_arr[kThisAgentArrayIdx]) {
// Add the edge hit penalty and the step rewards / penalties
rewards_arr[kThisAgentArrayIdx] += edge_hit_reward_penalty[
kThisAgentArrayIdx];
rewards_arr[kThisAgentArrayIdx] += step_rewards_arr[kThisAgentId];
}
// Ensure that all the agents rewards are initialized before we proceed.
// The rewards are only set by the runners, so this pause is necessary.
__syncthreads();
float min_dist = kGridLength * sqrt(2.0);
bool is_runner = !agent_types_arr[kThisAgentId];
if (is_runner && still_in_the_game_arr[kThisAgentArrayIdx]) {
int nearest_tagger_id;
for (int other_agent_id = 0; other_agent_id < kNumAgents;
other_agent_id++) {
bool is_tagger = (agent_types_arr[other_agent_id] == 1);
if (is_tagger) {
const float dist = ComputeDistance(
loc_x_arr,
loc_y_arr,
kThisAgentId,
other_agent_id,
kEnvId,
kNumAgents);
if (dist < min_dist) {
min_dist = dist;
nearest_tagger_id = other_agent_id;
}
}
}
if (min_dist < kDistanceMarginForReward) {
// the runner is tagged.
rewards_arr[kThisAgentArrayIdx] += kTagPenaltyForRunner;
rewards_arr[kEnvId * kNumAgents + nearest_tagger_id] +=
kTagRewardForTagger;
if (kRunnerExitsGameAfterTagged) {
still_in_the_game_arr[kThisAgentArrayIdx] = 0;
num_runners_arr[kEnvId] -= 1;
}
}
// Add end of game reward for runners at the end of the episode.
if (env_timestep_arr[kEnvId] == kEpisodeLength) {
rewards_arr[kThisAgentArrayIdx] += kEndOfGameRewardForRunner;
}
}
// Wait here to update the number of runners before determining done_arr
__syncthreads();
// Use only agent 0's thread to set done_arr
if (kThisAgentId == 0) {
if ((env_timestep_arr[kEnvId] == kEpisodeLength) ||
(num_runners_arr[kEnvId] == 0)) {
done_arr[kEnvId] = 1;
}
}
}
}
__global__ void CudaTagContinuousStep(
float * loc_x_arr,
float * loc_y_arr,
float * speed_arr,
float * direction_arr,
float * acceleration_arr,
int * agent_types_arr,
float * edge_hit_reward_penalty,
const float kEdgeHitPenalty,
const float kGridLength,
float * acceleration_actions_arr,
float * turn_actions_arr,
const float kMaxSpeed,
const int kNumOtherAgentsObserved,
float * skill_levels_arr,
const bool kRunnerExitsGameAfterTagged,
int * still_in_the_game_arr,
const bool kUseFullObservation,
float * obs_arr,
int * action_indices_arr,
float * neighbor_distances_arr,
int * neighbor_ids_sorted_by_distance_arr,
int * nearest_neighbor_ids,
float * rewards_arr,
float * step_rewards_arr,
int * num_runners_arr,
const float kDistanceMarginForReward,
const float kTagRewardForTagger,
const float kTagPenaltyForRunner,
const float kEndOfGameRewardForRunner,
int * done_arr,
int * env_timestep_arr,
int kNumAgents,
int kEpisodeLength
) {
const int kEnvId = blockIdx.x;
const int kThisAgentId = threadIdx.x;
const int kThisAgentArrayIdx = kEnvId * kNumAgents + kThisAgentId;
const int kNumActions = 2;
// Increment time ONCE -- only 1 thread can do this.
if (kThisAgentId == 0) {
env_timestep_arr[kEnvId] += 1;
}
// Wait here until timestep has been updated
__syncthreads();
assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <=
kEpisodeLength);
if (kThisAgentId < kNumAgents) {
int kThisAgentActionIdxOffset = kEnvId * kNumAgents * kNumActions +
kThisAgentId * kNumActions;
float delta_acceleration = acceleration_actions_arr[action_indices_arr[
kThisAgentActionIdxOffset + 0]];
float delta_turn = turn_actions_arr[action_indices_arr[
kThisAgentActionIdxOffset + 1]];
acceleration_arr[kThisAgentArrayIdx] += delta_acceleration;
direction_arr[kThisAgentArrayIdx] = fmod(
direction_arr[kThisAgentArrayIdx] + delta_turn, kTwoPi) *
still_in_the_game_arr[kThisAgentArrayIdx];
if (direction_arr[kThisAgentArrayIdx] < 0) {
direction_arr[kThisAgentArrayIdx] = kTwoPi + direction_arr[
kThisAgentArrayIdx];
}
// Speed clipping
speed_arr[kThisAgentArrayIdx] = min(
kMaxSpeed * skill_levels_arr[kThisAgentId],
max(
0.0,
speed_arr[kThisAgentArrayIdx] + acceleration_arr[
kThisAgentArrayIdx])) * still_in_the_game_arr[kThisAgentArrayIdx];
// Reset acceleration to 0 when speed becomes 0 or
// kMaxSpeed (multiplied by skill levels)
if ((speed_arr[kThisAgentArrayIdx] <= 0.0) ||
(speed_arr[kThisAgentArrayIdx] >=
(kMaxSpeed * skill_levels_arr[kThisAgentId]))) {
acceleration_arr[kThisAgentArrayIdx] = 0.0;
}
loc_x_arr[kThisAgentArrayIdx] += speed_arr[kThisAgentArrayIdx] *
cos(direction_arr[kThisAgentArrayIdx]);
loc_y_arr[kThisAgentArrayIdx] += speed_arr[kThisAgentArrayIdx] *
sin(direction_arr[kThisAgentArrayIdx]);
// Crossing the edge
bool has_crossed_edge = (
(loc_x_arr[kThisAgentArrayIdx] < 0) |
(loc_x_arr[kThisAgentArrayIdx] > kGridLength) |
(loc_y_arr[kThisAgentArrayIdx] < 0) |
(loc_y_arr[kThisAgentArrayIdx] > kGridLength));
// Clip x and y if agent has crossed edge
if (has_crossed_edge) {
if (loc_x_arr[kThisAgentArrayIdx] < 0) {
loc_x_arr[kThisAgentArrayIdx] = 0.0;
} else if (loc_x_arr[kThisAgentArrayIdx] > kGridLength) {
loc_x_arr[kThisAgentArrayIdx] = kGridLength;
}
if (loc_y_arr[kThisAgentArrayIdx] < 0) {
loc_y_arr[kThisAgentArrayIdx] = 0.0;
} else if (loc_y_arr[kThisAgentArrayIdx] > kGridLength) {
loc_y_arr[kThisAgentArrayIdx] = kGridLength;
}
edge_hit_reward_penalty[kThisAgentArrayIdx] = kEdgeHitPenalty;
} else {
edge_hit_reward_penalty[kThisAgentArrayIdx] = 0.0;
}
}
// Make sure all agents have updated their states
__syncthreads();
// -------------------------------
// Generate observation
// -------------------------------
CudaTagContinuousGenerateObservation(
loc_x_arr,
loc_y_arr,
speed_arr,
direction_arr,
acceleration_arr,
agent_types_arr,
kGridLength,
kMaxSpeed,
kNumOtherAgentsObserved,
still_in_the_game_arr,
kUseFullObservation,
obs_arr,
neighbor_distances_arr,
neighbor_ids_sorted_by_distance_arr,
nearest_neighbor_ids,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
kThisAgentArrayIdx);
// -------------------------------
// Compute reward
// -------------------------------
CudaTagContinuousComputeReward(
rewards_arr,
loc_x_arr,
loc_y_arr,
kGridLength,
edge_hit_reward_penalty,
step_rewards_arr,
num_runners_arr,
agent_types_arr,
kDistanceMarginForReward,
kTagRewardForTagger,
kTagPenaltyForRunner,
kEndOfGameRewardForRunner,
kRunnerExitsGameAfterTagged,
still_in_the_game_arr,
done_arr,
env_timestep_arr,
kNumAgents,
kEpisodeLength,
kEnvId,
kThisAgentId,
kThisAgentArrayIdx);
}
}
|
the_stack
|
LegionRuntime::Logger::Category log_optimizer("optimizer");
__global__
void sgd_update(size_t count, float lr, float weight_decay,
float momentum, bool nesterov,
const float* WGrad, float* V, float* W)
{
// Refernce https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html#SGD
CUDA_KERNEL_LOOP(i, count)
{
float gt = WGrad[i] + weight_decay * W[i];
if (momentum > 0.0f) {
V[i] = V[i] * momentum + gt;
if (nesterov)
gt = gt + momentum * V[i];
else
gt = V[i];
}
W[i] -= lr * gt;
}
}
__host__
void SGDOptimizer::ps_update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorR<float, DIM> accWGrad( \
regions[0], task->regions[0], FID_DATA, ctx, runtime); \
TensorAccessorW<float, DIM> accW( \
regions[1], task->regions[1], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
for (int i = 0; i < domain.get_dim()-1; i++) { \
assert(accW.rect.lo[i] == accWGrad.rect.lo[i]); \
assert(accW.rect.hi[i] == accWGrad.rect.hi[i]); \
} \
size = accW.rect.volume(); \
assert(accWGrad.rect.volume() % accW.rect.volume() == 0); \
num_replicas = accWGrad.rect.volume() / accW.rect.volume(); \
w_grad_ptr = accWGrad.ptr; \
w_ptr = accW.ptr; \
if (op->momentum > 0.0f) { \
TensorAccessorW<float, DIM> accV( \
regions[2], task->regions[2], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
assert(accW.rect == accV.rect); \
v_ptr = accV.ptr; \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dims
assert(false);
}
}
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
// Step 1: Gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
apply_add_with_scale<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
(float*) w_grad_ptr, src, size, 1.0f);
}
//checkCUDA(cudaDeviceSynchronize());
// Step 2: SGD update
sgd_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
//checkCUDA(cudaDeviceSynchronize());
}
#ifdef FF_USE_NCCL
__host__
void SGDOptimizer::nccl_update_task(
const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const SGDOptimizer* op = (SGDOptimizer*) task->args;
const OpMeta* meta = *((OpMeta**) task->local_args);
//FFHandler handler = *((FFHandler*) task->local_args);
if (op->momentum > 0.0f) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
} else {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
}
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL;
size_t size = 0;
switch(domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorR<float, DIM> accWGrad( \
regions[0], task->regions[0], FID_DATA, ctx, runtime); \
TensorAccessorW<float, DIM> accW( \
regions[1], task->regions[1], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
assert(accW.rect == accWGrad.rect); \
size = accW.rect.volume(); \
w_grad_ptr = accWGrad.ptr; \
w_ptr = accW.ptr; \
if (op->momentum > 0.0f) { \
TensorAccessorW<float, DIM> accV( \
regions[2], task->regions[2], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
assert(accW.rect == accV.rect); \
v_ptr = accV.ptr; \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dims
assert(false);
}
}
// Use NCCL to sync gradients
//fprintf(stderr, "weight(%p) Before ncclAllReduce...\n", w_grad_ptr);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkNCCL(ncclAllReduce(w_grad_ptr, (float*) w_grad_ptr, size, ncclFloat,
ncclSum, meta->handle.ncclComm, stream));
//fprintf(stderr, "weight(%p) After ncclAllReduce...\n", w_grad_ptr);
// Step 2: SGD update
sgd_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
size, op->lr, op->weight_decay, op->momentum, op->nesterov,
w_grad_ptr, v_ptr, w_ptr);
//checkCUDA(cudaDeviceSynchronize());
}
#endif
// ==================================================================
// Adam Optimizer
// ==================================================================
__global__
void add_kernel(int count, float scale,
const float* src,
float* dst)
{
CUDA_KERNEL_LOOP(i, count)
{
dst[i] += src[i] * scale;
}
}
__global__
void scale_kernel(int count, float a, float b,
float* ptr)
{
CUDA_KERNEL_LOOP(i, count)
{
ptr[i] = (b - a) * ptr[i] + a;
}
}
__global__
void adam_update(int count, float alpha_t,
float beta1, float beta2,
float weight_decay, float epsilon,
const float *WGrad, float *M,
float *V, float *W)
{
// Reference for weight decay
// https://www.fast.ai/2018/07/02/adam-weight-decay/
CUDA_KERNEL_LOOP(i, count)
{
//W[i] -= weight_decay * alpha_t * W[i];
//float gt = WGrad[i];
float gt = WGrad[i] + weight_decay * W[i];
float mt = beta1 * M[i] + (1 - beta1) * gt;
float vt = beta2 * V[i] + (1 - beta2) * gt * gt;
M[i] = mt;
V[i] = vt;
W[i] -= alpha_t * mt / (sqrt(vt) + epsilon);
}
}
__host__
void AdamOptimizer::ps_update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const AdamOptimizer* op = (AdamOptimizer*) task->args;
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL, *m_ptr = NULL;
size_t size = 0, num_replicas = 0;
switch(domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorR<float, DIM> accWGrad( \
regions[0], task->regions[0], FID_DATA, ctx, runtime); \
TensorAccessorW<float, DIM> accW( \
regions[1], task->regions[1], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
TensorAccessorW<float, DIM> accV( \
regions[2], task->regions[2], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
TensorAccessorW<float, DIM> accM( \
regions[3], task->regions[3], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
size = accW.rect.volume(); \
assert(accWGrad.rect.volume() % accW.rect.volume() == 0); \
num_replicas = accWGrad.rect.volume() / accW.rect.volume(); \
w_grad_ptr = accWGrad.ptr; \
w_ptr = accW.ptr; \
v_ptr = accV.ptr; \
m_ptr = accM.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dims
assert(false);
}
}
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
// Step 1: Gather gradients in the first replica
for (int i = 1; i < num_replicas; i++) {
const float* src = w_grad_ptr + i * size;
add_kernel<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
size, 1.0f, src, (float*)w_grad_ptr);
}
//checkCUDA(cudaDeviceSynchronize());
//fprintf(stderr, "alpha = %.8lf alpha_t = %.8lf decay = %.8lf\n",
// op->alpha, op->alpha_t, op->weight_decay);
// Step 2: Adam update
adam_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
size, op->alpha_t, op->beta1, op->beta2,
op->weight_decay, op->epsilon,
w_grad_ptr, m_ptr, v_ptr, w_ptr);
//checkCUDA(cudaDeviceSynchronize());
}
#ifdef FF_USE_NCCL
__host__
void AdamOptimizer::nccl_update_task(const Task* task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const AdamOptimizer* op = (AdamOptimizer*) task->args;
const OpMeta* meta = *((OpMeta**) task->local_args);
//FFHandler handler = *((FFHandler*) task->local_args);
Domain domain = runtime->get_index_space_domain(ctx,
task->regions[1].region.get_index_space());
const float *w_grad_ptr = NULL;
float *w_ptr = NULL, *v_ptr = NULL, *m_ptr = NULL;
size_t size = 0;
switch(domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
TensorAccessorR<float, DIM> accWGrad( \
regions[0], task->regions[0], FID_DATA, ctx, runtime); \
TensorAccessorW<float, DIM> accW( \
regions[1], task->regions[1], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
TensorAccessorW<float, DIM> accV( \
regions[2], task->regions[2], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
TensorAccessorW<float, DIM> accM( \
regions[3], task->regions[3], FID_DATA, ctx, runtime, \
true/*readOutput*/); \
size = accW.rect.volume(); \
assert(accWGrad.rect == accW.rect); \
assert(accWGrad.rect == accV.rect); \
assert(accWGrad.rect == accM.rect); \
w_grad_ptr = accWGrad.ptr; \
w_ptr = accW.ptr; \
v_ptr = accV.ptr; \
m_ptr = accM.ptr; \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dims
assert(false);
}
}
// Use NCCL to sync gradients
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkNCCL(ncclAllReduce(w_grad_ptr, (float*)w_grad_ptr, size, ncclFloat,
ncclSum, meta->handle.ncclComm, stream));
//fprintf(stderr, "alpha = %.8lf alpha_t = %.8lf decay = %.8lf\n",
// op->alpha, op->alpha_t, op->weight_decay);
// Step 2: Adam update
adam_update<<<GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(
size, op->alpha_t, op->beta1, op->beta2,
op->weight_decay, op->epsilon,
w_grad_ptr, m_ptr, v_ptr, w_ptr);
//checkCUDA(cudaDeviceSynchronize());
}
#endif
|
the_stack
|
#include "octnet/gpu/conv.h"
#include "octnet/gpu/gpu.h"
#include <cstdlib>
#include <cstdio>
#include <cstring>
template <int rdc_fcn>
__global__ void kernel_conv3x3x3(octree grid, int n_blocks, const octree grid_in, const ot_data_t* weights, const ot_data_t* bias, ot_size_t channels_out, ot_size_t channels_in) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&grid, grid_idx);
// ot_data_t* in_data = grid_in.data_ptrs[grid_idx];
ot_data_t* in_data = octree_get_data(&grid_in, grid_idx);
// ot_data_t* out_data = grid.data_ptrs[grid_idx];
ot_data_t* out_data = octree_get_data(&grid, grid_idx);
int gn,gd,gh,gw;
octree_split_grid_idx(&grid_in, grid_idx, &gn, &gd, &gh, &gw);
int ds = gd * 8;
int hs = gh * 8;
int ws = gw * 8;
//check if L0 split is set
if(!tree_isset_bit(tree, 0)) {
// if NOT set
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (8*8*8);
}
conv3x3x3_border<INV_FILTER_FALSE, ADD_BIAS_TRUE>(0, 8, 0, 8, 0, 8, gn, ds, hs, ws, &grid_in, weights, bias, channels_out, factor, out_data);
conv3x3x3_const<INV_FILTER_FALSE, ADD_BIAS_TRUE>(in_data, weights, bias, channels_in, channels_out, factor*6*6*6, out_data);
}
else {
int bit_idx_l1 = 1;
for(int bdl1 = 0; bdl1 < 2; ++bdl1) {
for(int bhl1 = 0; bhl1 < 2; ++bhl1) {
for(int bwl1 = 0; bwl1 < 2; ++bwl1) {
if(!tree_isset_bit(tree, bit_idx_l1)) {
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (4*4*4);
}
int out_data_idx = tree_data_idx(tree, bit_idx_l1, channels_out);
int in_data_idx = out_data_idx / channels_out * channels_in;
conv3x3x3_border<INV_FILTER_FALSE, ADD_BIAS_TRUE>(bdl1*4, bdl1*4+4, bhl1*4, bhl1*4+4, bwl1*4, bwl1*4+4,
gn, ds, hs, ws, &grid_in, weights, bias, channels_out, factor, out_data + out_data_idx);
conv3x3x3_const<INV_FILTER_FALSE, ADD_BIAS_TRUE>(in_data + in_data_idx, weights, bias, channels_in, channels_out, factor*2*2*2, out_data + out_data_idx);
}
else {
int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1);
for(int bdl2 = 0; bdl2 < 2; ++bdl2) {
for(int bhl2 = 0; bhl2 < 2; ++bhl2) {
for(int bwl2 = 0; bwl2 < 2; ++bwl2) {
if(!tree_isset_bit(tree, bit_idx_l2)) {
int out_data_idx = tree_data_idx(tree, bit_idx_l2, channels_out);
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (2*2*2);
}
// int in_data_idx = out_data_idx / channels_out * channels_in;
conv3x3x3_border<INV_FILTER_FALSE, ADD_BIAS_TRUE>(bdl1*4+bdl2*2, bdl1*4+bdl2*2+2, bhl1*4+bhl2*2, bhl1*4+bhl2*2+2, bwl1*4+bwl2*2, bwl1*4+bwl2*2+2,
gn, ds, hs, ws, &grid_in, weights, bias, channels_out, factor, out_data + out_data_idx);
}
else {
int bit_idx_l3 = tree_child_bit_idx(bit_idx_l2);
for(int bdl3 = 0; bdl3 < 2; ++bdl3) {
for(int bhl3 = 0; bhl3 < 2; ++bhl3) {
for(int bwl3 = 0; bwl3 < 2; ++bwl3) {
int out_data_idx = tree_data_idx(tree, bit_idx_l3, channels_out);
// printf("%d, %d,%d,%d\n", bit_idx_l3, ds+bdl1*4+bdl2*2+bdl3, hs+bhl1*4+bhl2*2+bhl3, ws+bwl1*4+bwl2*2+bwl3);
conv3x3x3_point<INV_FILTER_FALSE>(gn, ds+bdl1*4+bdl2*2+bdl3, hs+bhl1*4+bhl2*2+bhl3, ws+bwl1*4+bwl2*2+bwl3,
&grid_in, weights, channels_out, 1, out_data + out_data_idx);
for(int co = 0; co < channels_out; ++co) {
out_data[out_data_idx + co] += bias[co];
}
bit_idx_l3++;
}
}
}
}
bit_idx_l2++;
}
}
}
} // else if isset L1
bit_idx_l1++;
} // for bwl1
} // for bhl1
} // for bdl1
} // if isset L0
}
}
void octree_conv3x3x3_sum_gpu(const octree* grid_in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, octree* grid) {
octree_resize_gpu(grid_in->n, grid_in->grid_depth, grid_in->grid_height, grid_in->grid_width, channels_out, grid_in->n_leafs, grid);
octree_cpy_scalars(grid_in, grid);
grid->feature_size = channels_out;
octree_cpy_trees_gpu_gpu(grid_in, grid);
octree_cpy_prefix_leafs_gpu_gpu(grid_in, grid);
int n_blocks = octree_num_blocks(grid_in);
const int channels_in = grid_in->feature_size;
octree_fill_data_gpu(grid, 0);
kernel_conv3x3x3<REDUCE_SUM><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
*grid, n_blocks, *grid_in, weights, bias, channels_out, channels_in
);
CUDA_POST_KERNEL_CHECK;
}
void octree_conv3x3x3_avg_gpu(const octree* grid_in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, octree* grid) {
octree_resize_gpu(grid_in->n, grid_in->grid_depth, grid_in->grid_height, grid_in->grid_width, channels_out, grid_in->n_leafs, grid);
octree_cpy_scalars(grid_in, grid);
grid->feature_size = channels_out;
octree_cpy_trees_gpu_gpu(grid_in, grid);
octree_cpy_prefix_leafs_gpu_gpu(grid_in, grid);
int n_blocks = octree_num_blocks(grid_in);
const int channels_in = grid_in->feature_size;
octree_fill_data_gpu(grid, 0);
kernel_conv3x3x3<REDUCE_AVG><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
*grid, n_blocks, *grid_in, weights, bias, channels_out, channels_in
);
CUDA_POST_KERNEL_CHECK;
}
template <int rdc_fcn>
__global__ void kernel_conv3x3x3_bwd(octree grad_in, int n_blocks, const octree grad_out, const ot_data_t* weights, const ot_data_t* bias, ot_size_t channels_in, ot_size_t channels_out) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&grad_in, grid_idx);
// ot_data_t* grad_in_data = grad_in.data_ptrs[grid_idx];
ot_data_t* grad_in_data = octree_get_data(&grad_in, grid_idx);
// ot_data_t* grad_out_data = grad_out.data_ptrs[grid_idx];
ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx);
int gn,gd,gh,gw;
octree_split_grid_idx(&grad_in, grid_idx, &gn, &gd, &gh, &gw);
int ds = gd * 8;
int hs = gh * 8;
int ws = gw * 8;
//check if L0 split is set
if(!tree_isset_bit(tree, 0)) {
// if NOT set
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (8*8*8);
}
conv3x3x3_border<INV_FILTER_TRUE, ADD_BIAS_FALSE>(0, 8, 0, 8, 0, 8, gn, ds, hs, ws, &grad_out, weights, 0, channels_in, factor, grad_in_data);
conv3x3x3_const<INV_FILTER_TRUE, ADD_BIAS_FALSE>(grad_out_data, weights, 0, channels_out, channels_in, factor*6*6*6, grad_in_data);
}
else {
int bit_idx_l1 = 1;
for(int bdl1 = 0; bdl1 < 2; ++bdl1) {
for(int bhl1 = 0; bhl1 < 2; ++bhl1) {
for(int bwl1 = 0; bwl1 < 2; ++bwl1) {
if(!tree_isset_bit(tree, bit_idx_l1)) {
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (4*4*4);
}
int out_data_idx = tree_data_idx(tree, bit_idx_l1, channels_out);
int in_data_idx = out_data_idx / channels_out * channels_in;
conv3x3x3_border<INV_FILTER_TRUE, ADD_BIAS_FALSE>(bdl1*4, bdl1*4+4, bhl1*4, bhl1*4+4, bwl1*4, bwl1*4+4,
gn, ds, hs, ws, &grad_out, weights, 0, channels_in, factor, grad_in_data + in_data_idx);
conv3x3x3_const<INV_FILTER_TRUE, ADD_BIAS_FALSE>(grad_out_data + out_data_idx, weights, 0, channels_out, channels_in, factor*2*2*2, grad_in_data + in_data_idx);
}
else {
int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1);
for(int bdl2 = 0; bdl2 < 2; ++bdl2) {
for(int bhl2 = 0; bhl2 < 2; ++bhl2) {
for(int bwl2 = 0; bwl2 < 2; ++bwl2) {
if(!tree_isset_bit(tree, bit_idx_l2)) {
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = 1;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = 1.f / (2*2*2);
}
int out_data_idx = tree_data_idx(tree, bit_idx_l2, channels_out);
int in_data_idx = out_data_idx / channels_out * channels_in;
conv3x3x3_border<INV_FILTER_TRUE, ADD_BIAS_FALSE>(bdl1*4+bdl2*2, bdl1*4+bdl2*2+2, bhl1*4+bhl2*2, bhl1*4+bhl2*2+2, bwl1*4+bwl2*2, bwl1*4+bwl2*2+2,
gn, ds, hs, ws, &grad_out, weights, 0, channels_in, factor, grad_in_data + in_data_idx);
}
else {
int bit_idx_l3 = tree_child_bit_idx(bit_idx_l2);
for(int bdl3 = 0; bdl3 < 2; ++bdl3) {
for(int bhl3 = 0; bhl3 < 2; ++bhl3) {
for(int bwl3 = 0; bwl3 < 2; ++bwl3) {
int in_data_idx = tree_data_idx(tree, bit_idx_l3, channels_in);
conv3x3x3_point<INV_FILTER_TRUE>(gn, ds+bdl1*4+bdl2*2+bdl3, hs+bhl1*4+bhl2*2+bhl3, ws+bwl1*4+bwl2*2+bwl3,
&grad_out, weights, channels_in, 1, grad_in_data + in_data_idx);
bit_idx_l3++;
}
}
}
}
bit_idx_l2++;
}
}
}
} // else if isset L1
bit_idx_l1++;
} // for bwl1
} // for bhl1
} // for bdl1
} // if isset L0
}
}
void octree_conv3x3x3_sum_bwd_gpu(const ot_data_t* weights, const octree* grad_out, int channels_in, octree* grad_in) {
octree_resize_gpu(grad_out->n, grad_out->grid_depth, grad_out->grid_height, grad_out->grid_width, channels_in, grad_out->n_leafs, grad_in);
octree_cpy_scalars(grad_out, grad_in);
grad_in->feature_size = channels_in;
octree_cpy_trees_gpu_gpu(grad_out, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(grad_out, grad_in);
int n_blocks = octree_num_blocks(grad_out);
const int channels_out = grad_out->feature_size;
octree_fill_data_gpu(grad_in, 0);
kernel_conv3x3x3_bwd<REDUCE_SUM><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
*grad_in, n_blocks, *grad_out, weights, 0, channels_in, channels_out
);
CUDA_POST_KERNEL_CHECK;
}
void octree_conv3x3x3_avg_bwd_gpu(const ot_data_t* weights, const octree* grad_out, int channels_in, octree* grad_in) {
octree_resize_gpu(grad_out->n, grad_out->grid_depth, grad_out->grid_height, grad_out->grid_width, channels_in, grad_out->n_leafs, grad_in);
octree_cpy_scalars(grad_out, grad_in);
grad_in->feature_size = channels_in;
octree_cpy_trees_gpu_gpu(grad_out, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(grad_out, grad_in);
int n_blocks = octree_num_blocks(grad_out);
const int channels_out = grad_out->feature_size;
octree_fill_data_gpu(grad_in, 0);
kernel_conv3x3x3_bwd<REDUCE_AVG><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
*grad_in, n_blocks, *grad_out, weights, 0, channels_in, channels_out
);
CUDA_POST_KERNEL_CHECK;
}
template <int rdc_fcn>
__global__ void kernel_conv3x3x3_wbwd(ot_data_t* grad_weights, ot_data_t* grad_bias, int n_blocks, const ot_data_t scale, const octree grid_in, const octree grad_out) {
const int channels_in = grid_in.feature_size;
const int channels_out = grad_out.feature_size;
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&grid_in, grid_idx);
// ot_data_t* grid_in_data = grid_in.data_ptrs[grid_idx];
ot_data_t* grid_in_data = octree_get_data(&grid_in, grid_idx);
// ot_data_t* grad_out_data = grad_out.data_ptrs[grid_idx];
ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx);
int gn,gd,gh,gw;
octree_split_grid_idx(&grid_in, grid_idx, &gn, &gd, &gh, &gw);
int ds = gd * 8;
int hs = gh * 8;
int ws = gw * 8;
//check if L0 split is set
if(!tree_isset_bit(tree, 0)) {
// if NOT set
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = scale;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = scale / (8*8*8);
}
conv3x3x3_border_wbwd(0, 8, 0, 8, 0, 8, gn, ds, hs, ws, &grid_in, grad_out_data, channels_out, factor, grad_weights, grad_bias);
conv3x3x3_const_wbwd(grid_in_data, grad_out_data, channels_in, channels_out, factor*6*6*6, grad_weights, grad_bias);
}
else {
int bit_idx_l1 = 1;
for(int bdl1 = 0; bdl1 < 2; ++bdl1) {
for(int bhl1 = 0; bhl1 < 2; ++bhl1) {
for(int bwl1 = 0; bwl1 < 2; ++bwl1) {
if(!tree_isset_bit(tree, bit_idx_l1)) {
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = scale;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = scale / (4*4*4);
}
int out_data_idx = tree_data_idx(tree, bit_idx_l1, channels_out);
int in_data_idx = out_data_idx / channels_out * channels_in;
conv3x3x3_border_wbwd(bdl1*4, bdl1*4+4, bhl1*4, bhl1*4+4, bwl1*4, bwl1*4+4,
gn, ds, hs, ws, &grid_in, grad_out_data + out_data_idx, channels_out, factor, grad_weights, grad_bias);
conv3x3x3_const_wbwd(grid_in_data + in_data_idx, grad_out_data + out_data_idx, channels_in, channels_out, factor*2*2*2, grad_weights, grad_bias);
}
else {
int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1);
for(int bdl2 = 0; bdl2 < 2; ++bdl2) {
for(int bhl2 = 0; bhl2 < 2; ++bhl2) {
for(int bwl2 = 0; bwl2 < 2; ++bwl2) {
if(!tree_isset_bit(tree, bit_idx_l2)) {
float factor;
if(rdc_fcn == REDUCE_SUM) {
factor = scale;
}
else if(rdc_fcn == REDUCE_AVG) {
factor = scale / (2*2*2);
}
int out_data_idx = tree_data_idx(tree, bit_idx_l2, channels_out);
conv3x3x3_border_wbwd(bdl1*4+bdl2*2, bdl1*4+bdl2*2+2, bhl1*4+bhl2*2, bhl1*4+bhl2*2+2, bwl1*4+bwl2*2, bwl1*4+bwl2*2+2,
gn, ds, hs, ws, &grid_in, grad_out_data + out_data_idx, channels_out, factor, grad_weights, grad_bias);
}
else {
int bit_idx_l3 = tree_child_bit_idx(bit_idx_l2);
for(int bdl3 = 0; bdl3 < 2; ++bdl3) {
for(int bhl3 = 0; bhl3 < 2; ++bhl3) {
for(int bwl3 = 0; bwl3 < 2; ++bwl3) {
int out_data_idx = tree_data_idx(tree, bit_idx_l3, channels_out);
conv3x3x3_point_wbwd(gn, ds+bdl1*4+bdl2*2+bdl3, hs+bhl1*4+bhl2*2+bhl3, ws+bwl1*4+bwl2*2+bwl3,
&grid_in, grad_out_data + out_data_idx, channels_out, scale, grad_weights);
for(int co = 0; co < channels_out; ++co) {
ot_data_t val = scale * grad_out_data[out_data_idx + co];
atomicAdd(grad_bias + co, val);
}
bit_idx_l3++;
}
}
}
}
bit_idx_l2++;
}
}
}
} // else if isset L1
bit_idx_l1++;
} // for bwl1
} // for bhl1
} // for bdl1
} // if isset L0
}
}
void octree_conv3x3x3_sum_wbwd_gpu(const octree* grid_in, const octree* grad_out, ot_data_t scale, ot_data_t* grad_weights, ot_data_t* grad_bias) {
int n_blocks = octree_num_blocks(grid_in);
kernel_conv3x3x3_wbwd<REDUCE_SUM><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
grad_weights, grad_bias, n_blocks, scale, *grid_in, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
void octree_conv3x3x3_avg_wbwd_gpu(const octree* grid_in, const octree* grad_out, ot_data_t scale, ot_data_t* grad_weights, ot_data_t* grad_bias) {
int n_blocks = octree_num_blocks(grid_in);
kernel_conv3x3x3_wbwd<REDUCE_AVG><<<GET_BLOCKS_T(n_blocks, 512), 512>>>(
grad_weights, grad_bias, n_blocks, scale, *grid_in, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
|
the_stack
|
#include <cub/cub.cuh>
#include <cub/device/device_scan.cuh>
#include <libvis/cuda/cuda_auto_tuner.h>
#include <math_constants.h>
#include "badslam/cuda_util.cuh"
#include "badslam/surfel_projection.cuh"
#include "badslam/util_nvcc_only.cuh"
namespace vis {
template <bool downsample_color>
__global__ void CalibrateAndDownsampleImagesCUDAKernel(
DepthParameters depth_params,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<u16> normals_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float> downsampled_depth,
CUDABuffer_<u16> downsampled_normals,
CUDABuffer_<u8> downsampled_color) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < downsampled_depth.width() && y < downsampled_depth.height()) {
constexpr int kOffsets[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
float depths[4];
float depth_sum = 0;
int depth_count = 0;
#pragma unroll
for (int i = 0; i < 4; ++ i) {
u16 raw_depth = depth_buffer(2 * y + kOffsets[i][0], 2 * x + kOffsets[i][1]);
if (!(raw_depth & kInvalidDepthBit)) {
depths[i] = RawToCalibratedDepth(
depth_params.a,
depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size,
x / depth_params.sparse_surfel_cell_size),
depth_params.raw_to_float_depth, raw_depth);
depth_sum += depths[i];
depth_count += 1;
} else {
depths[i] = CUDART_INF_F;
}
}
if (depth_count == 0) {
// Normal does not need to be set here, as the pixel is invalid by setting its depth to 0.
// However, the color must be set, as it might become relevant again for further downsampling.
downsampled_depth(y, x) = 0;
} else {
float average_depth = depth_sum / depth_count;
int closest_index;
float closest_distance = CUDART_INF_F;
#pragma unroll
for (int i = 0; i < 4; ++ i) {
float distance = fabs(depths[i] - average_depth);
if (distance < closest_distance) {
closest_index = i;
closest_distance = distance;
}
}
downsampled_depth(y, x) = depths[closest_index];
downsampled_normals(y, x) = normals_buffer(2 * y + kOffsets[closest_index][0], 2 * x + kOffsets[closest_index][1]);
}
if (downsample_color) {
// Bilinearly interpolate in the middle of the original 4 pixels to get their average.
float color = tex2D<float>(color_texture, 2 * x + 1.0f, 2 * y + 1.0f);
downsampled_color(y, x) = 255.f * color + 0.5f;
} else {
float color = tex2D<float>(color_texture, x + 0.5f, y + 0.5f);
downsampled_color(y, x) = 255.f * color + 0.5f;
}
}
}
__global__ void DownsampleImagesCUDAKernel(
CUDABuffer_<float> depth_buffer,
CUDABuffer_<u16> normals_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float> downsampled_depth,
CUDABuffer_<u16> downsampled_normals,
CUDABuffer_<u8> downsampled_color) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < downsampled_depth.width() && y < downsampled_depth.height()) {
constexpr int kOffsets[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
float depths[4];
float depth_sum = 0;
int depth_count = 0;
#pragma unroll
for (int i = 0; i < 4; ++ i) {
depths[i] = depth_buffer(2 * y + kOffsets[i][0], 2 * x + kOffsets[i][1]);
if (depths[i] > 0) {
depth_sum += depths[i];
depth_count += 1;
} else {
depths[i] = CUDART_INF_F;
}
}
if (depth_count == 0) {
// Normal does not need to be set here, as the pixel is invalid by setting its depth to 0.
// However, the color must be set, as it might become relevant again for further downsampling.
downsampled_depth(y, x) = 0;
} else {
float average_depth = depth_sum / depth_count;
int closest_index;
float closest_distance = CUDART_INF_F;
#pragma unroll
for (int i = 0; i < 4; ++ i) {
float distance = fabs(depths[i] - average_depth);
if (distance < closest_distance) {
closest_index = i;
closest_distance = distance;
}
}
downsampled_depth(y, x) = depths[closest_index];
downsampled_normals(y, x) = normals_buffer(2 * y + kOffsets[closest_index][0], 2 * x + kOffsets[closest_index][1]);
}
// Bilinearly interpolate in the middle of the original 4 pixels to get their average.
float color = tex2D<float>(color_texture, 2 * x + 1.0f, 2 * y + 1.0f);
downsampled_color(y, x) = 255.f * color + 0.5f;
}
}
// __global__ void DownsampleImagesConsistentlyCUDAKernel(
// CUDABuffer_<float> comparison_depth_buffer,
// CUDABuffer_<float> depth_buffer,
// CUDABuffer_<u16> normals_buffer,
// cudaTextureObject_t color_texture,
// CUDABuffer_<float> downsampled_depth,
// CUDABuffer_<u16> downsampled_normals,
// CUDABuffer_<uchar> downsampled_color) {
// unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (x < downsampled_depth.width() && y < downsampled_depth.height()) {
// constexpr int kOffsets[4][2] = {{0, 0}, {0, 1}, {1, 0}, {1, 1}};
// float depths[4];
//
// float depth_sum = 0;
// int depth_count = 0;
//
// #pragma unroll
// for (int i = 0; i < 4; ++ i) {
// depths[i] = depth_buffer(2 * y + kOffsets[i][0], 2 * x + kOffsets[i][1]);
// if (depths[i] > 0) {
// depth_sum += depths[i];
// depth_count += 1;
// } else {
// depths[i] = CUDART_INF_F;
// }
// }
//
// if (depth_count == 0) {
// downsampled_depth(y, x) = 0;
// } else {
// float comparison_depth = comparison_depth_buffer(y, x);
// if (comparison_depth == 0) {
// // Use average if no comparison depth exists for this pixel
// comparison_depth = depth_sum / depth_count;
// }
// int closest_index;
// float closest_distance = CUDART_INF_F;
//
// #pragma unroll
// for (int i = 0; i < 4; ++ i) {
// float distance = fabs(depths[i] - comparison_depth);
// if (distance < closest_distance) {
// closest_index = i;
// closest_distance = distance;
// }
// }
//
// downsampled_depth(y, x) = depths[closest_index];
// downsampled_normals(y, x) = normals_buffer(2 * y + kOffsets[closest_index][0], 2 * x + kOffsets[closest_index][1]);
//
// // For color averaging, use only pixels with valid and similar depth to the chosen depth.
// // This is to avoid using occluded surfels for averaging which "shine through" in the surfel rendering.
// // Notice that this will not properly simulate pixels with mixed colors at occlusion boundaries!
// constexpr float kColorAveragingDistanceThreshold = 0.15f; // TODO: tune this threshold
//
// depth_sum = 0; // misused for color averaging
// depth_count = 0;
//
// #pragma unroll
// for (int i = 0; i < 4; ++ i) {
// float distance = fabs(depths[i] - depths[closest_index]);
// if (distance < kColorAveragingDistanceThreshold) {
// depth_sum += 255.f * tex2D<float>(color_texture, 2 * x + kOffsets[i][1] + 0.5f, 2 * y + kOffsets[i][0] + 0.5f);
// depth_count += 1;
// }
// }
//
// downsampled_color(y, x) = depth_sum / depth_count + 0.5f;
// }
// }
// }
void CalibrateAndDownsampleImagesCUDA(
cudaStream_t stream,
bool downsample_color,
const DepthParameters& depth_params,
const CUDABuffer_<u16>& depth_buffer,
const CUDABuffer_<u16>& normals_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float>* downsampled_depth,
CUDABuffer_<u16>* downsampled_normals,
CUDABuffer_<u8>* downsampled_color,
bool debug) {
CUDA_CHECK();
if (debug) {
downsampled_depth->Clear(0, stream);
downsampled_normals->Clear(0, stream);
downsampled_color->Clear(0, stream);
}
COMPILE_OPTION(downsample_color,
CUDA_AUTO_TUNE_2D(
CalibrateAndDownsampleImagesCUDAKernel<_downsample_color>,
32, 32,
downsampled_depth->width(), downsampled_depth->height(),
0, stream,
/* kernel parameters */
depth_params,
depth_buffer,
normals_buffer,
color_texture,
*downsampled_depth,
*downsampled_normals,
*downsampled_color));
CUDA_CHECK();
}
void DownsampleImagesCUDA(
cudaStream_t stream,
const CUDABuffer_<float>& depth_buffer,
const CUDABuffer_<u16>& normals_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float>* downsampled_depth,
CUDABuffer_<u16>* downsampled_normals,
CUDABuffer_<u8>* downsampled_color,
bool debug) {
CUDA_CHECK();
if (debug) {
downsampled_depth->Clear(0, stream);
downsampled_normals->Clear(0, stream);
downsampled_color->Clear(0, stream);
}
CUDA_AUTO_TUNE_2D(
DownsampleImagesCUDAKernel,
32, 32,
downsampled_depth->width(), downsampled_depth->height(),
0, stream,
/* kernel parameters */
depth_buffer,
normals_buffer,
color_texture,
*downsampled_depth,
*downsampled_normals,
*downsampled_color);
CUDA_CHECK();
}
// void DownsampleImagesConsistentlyCUDA(
// cudaStream_t stream,
// const CUDABuffer_<float>& comparison_depth_buffer,
// const CUDABuffer_<u16>& /*comparison_normals_buffer*/,
// const CUDABuffer_<float>& depth_buffer,
// const CUDABuffer_<u16>& normals_buffer,
// cudaTextureObject_t color_texture,
// CUDABuffer_<float>* downsampled_depth,
// CUDABuffer_<u16>* downsampled_normals,
// CUDABuffer_<uchar>* downsampled_color,
// bool debug) {
// // TODO: comparison_normals_buffer is not used currently, could remove it
//
// CUDA_CHECK();
//
// if (debug) {
// downsampled_depth->Clear(0, stream);
// downsampled_normals->Clear(0, stream);
// downsampled_color->Clear(0, stream);
// }
//
// CUDA_AUTO_TUNE_2D(
// DownsampleImagesConsistentlyCUDAKernel,
// 32, 32,
// downsampled_depth->width(), downsampled_depth->height(),
// 0, stream,
// /* kernel parameters */
// comparison_depth_buffer,
// depth_buffer,
// normals_buffer,
// color_texture,
// *downsampled_depth,
// *downsampled_normals,
// *downsampled_color);
// CUDA_CHECK();
// }
// -----------------------------------------------------------------------------
__global__ void CalibrateDepthAndTransformColorToDepthCUDAKernel(
DepthToColorPixelCorner depth_to_color,
DepthParameters depth_params,
CUDABuffer_<u16> depth_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float> out_depth,
CUDABuffer_<u8> out_color) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < out_depth.width() && y < out_depth.height()) {
u16 raw_depth = depth_buffer(y, x);
float depth;
if (!(raw_depth & kInvalidDepthBit)) {
depth = RawToCalibratedDepth(
depth_params.a,
depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size,
x / depth_params.sparse_surfel_cell_size),
depth_params.raw_to_float_depth, raw_depth);
} else {
depth = 0;
}
float2 color_pxy;
bool color_in_bounds = TransformDepthToColorPixelCorner(make_float2(x + 0.5f, y + 0.5f), depth_to_color, &color_pxy);
out_depth(y, x) = color_in_bounds ? depth : 0;
float color = tex2D<float>(color_texture, color_pxy.x, color_pxy.y);
out_color(y, x) = 255.f * color + 0.5f;
}
}
void CalibrateDepthAndTransformColorToDepthCUDA(
cudaStream_t stream,
const DepthToColorPixelCorner& depth_to_color,
const DepthParameters& depth_params,
const CUDABuffer_<u16>& depth_buffer,
cudaTextureObject_t color_texture,
CUDABuffer_<float>* out_depth,
CUDABuffer_<u8>* out_color) {
CUDA_CHECK();
CUDA_AUTO_TUNE_2D(
CalibrateDepthAndTransformColorToDepthCUDAKernel,
32, 32,
out_depth->width(), out_depth->height(),
0, stream,
/* kernel parameters */
depth_to_color,
depth_params,
depth_buffer,
color_texture,
*out_depth,
*out_color);
CUDA_CHECK();
}
__global__ void CalibrateDepthCUDAKernel(
DepthParameters depth_params,
CUDABuffer_<u16> depth_buffer,
CUDABuffer_<float> out_depth) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < out_depth.width() && y < out_depth.height()) {
u16 raw_depth = depth_buffer(y, x);
float depth;
if (!(raw_depth & kInvalidDepthBit)) {
depth = RawToCalibratedDepth(
depth_params.a,
depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size,
x / depth_params.sparse_surfel_cell_size),
depth_params.raw_to_float_depth, raw_depth);
} else {
depth = 0;
}
out_depth(y, x) = depth;
}
}
void CalibrateDepthCUDA(
cudaStream_t stream,
const DepthParameters& depth_params,
const CUDABuffer_<u16>& depth_buffer,
CUDABuffer_<float>* out_depth) {
CUDA_CHECK();
CUDA_AUTO_TUNE_2D(
CalibrateDepthCUDAKernel,
32, 32,
out_depth->width(), out_depth->height(),
0, stream,
/* kernel parameters */
depth_params,
depth_buffer,
*out_depth);
CUDA_CHECK();
}
}
|
the_stack
|
typedef uint8_t uint8;
#define TB 128
#define DISP_MAX 256
#define BLOCK_SIZE 32
#define XDIM_MAX_THREADS 1024
#define BLOCK_D_SIZE 64
#define COLOR_DIFF(x, i, j) (abs(x[i] - x[j]))
struct postparams{
float pi1;
float pi2;
float tau_so;
float alpha1;
float sgm_q1;
float sgm_q2;
float alpha2;
float sigma;
int kernel_size;
};
void parseConf(postparams ¶ms,std::string conf ){
const std::string& chars = "\t\n\v\f\r ";
std::ifstream ifs(conf.c_str());
std::string line;
if(ifs.is_open()){
while(std::getline(ifs,line )){
std::string opt = line.substr(0,line.find_last_of(":"));
opt.erase(0, opt.find_first_not_of(chars));
opt.erase(opt.find_last_not_of(chars) + 1);
int start = line.find_last_of(":")+1;
int end = line.find_first_of("#") - start;
std::string val = line.substr(start,end);
val.erase(0, val.find_first_not_of(chars));
val.erase(val.find_last_not_of(chars) + 1);
if(!strcmp(opt.c_str(),"pi1")){
params.pi1 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"pi2")){
params.pi2 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"tau_so")){
params.tau_so = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"alpha1")){
params.alpha1 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"sgm_q1")){
params.sgm_q1 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"sgm_q2")){
params.sgm_q2 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"alpha2")){
params.alpha2 = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"sigma")){
params.sigma = atof(val.c_str());
}else if(!strcmp(opt.c_str(),"kernel_size")){
params.kernel_size = atoi(val.c_str());
}
}
}else{
std::cout << "File " << conf << " does not exist! " <<std::endl;
exit(0);
}
}
std::vector<std::string> getImages(std::string file){
std::vector<std::string> imageNames;
std::ifstream ifs(file.c_str());
std::string line;
if(ifs.is_open()){
while(std::getline(ifs,line )){
imageNames.push_back(line);
}
}else{
std::cout << "File " << file << " does not exist! " <<std::endl;
exit(0);
}
return imageNames;
}
template<typename T>
__global__ void argmin( float* disp_d, T* cost, int rows, int cols, int ndisp ){
int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if( Row < rows && Col < cols){
T mincost=cost[ Row*cols*ndisp+Col*ndisp ];
int d=0;
for(int i=1; i<ndisp; i++){
float cd = cost[ Row*cols*ndisp+Col*ndisp +i ];
if( cd < mincost ){
mincost = cd;
d = i;
}
}
disp_d[ Row*cols+Col ] = (float)d;
}
}
template<typename T>
__global__ void argmin_d( float* disp_d, T* cost, int rows, int cols, int ndisp ){
int Row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int Col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if( Row < rows && Col < cols){
T mincost=cost[ Row*cols+Col ];
int d=0;
for(int i=1; i<ndisp; i++){
float cd = cost[ i*rows*cols+Row*cols+Col ];
if( cd < mincost ){
mincost = cd;
d = i;
}
}
disp_d[ Row*cols+Col ] = (float)d;
}
}
template<typename T>
__global__ void swap_axis(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y;
__shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1];
if( Col< cols*rows){
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
if(Row+d < ndisp)
tile[threadIdx.y+d][threadIdx.x] = cost [(Row+d)*rows*cols+Col ];
}
}
__syncthreads();
Col = blockIdx.x*blockDim.x+threadIdx.y;
Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x;
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
if((Col+d) < cols*rows && Row<ndisp)
temp_cost[ (Col+d)*ndisp+Row ] = tile[threadIdx.x][threadIdx.y+d];
}
}
template<typename T>
__global__ void swap_axis_back(const T* __restrict__ cost, T* temp_cost, const int rows, const int cols, const int ndisp ){
int Col = blockIdx.x*blockDim.x + threadIdx.y;
int Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x;
__shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1];
if( Col< cols*rows){
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
tile[threadIdx.y+d][threadIdx.x] = cost [(Col+d)*ndisp+Row ];
}
}
__syncthreads();
Col = blockIdx.x*blockDim.x + threadIdx.x;
Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y;
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
if((Col+d) < cols*rows)
temp_cost[ (Row+d)*rows*cols+Col ] = tile[threadIdx.x][threadIdx.y+d];
}
}
template<typename T>
__global__ void transpose(const T* __restrict__ cost, T* temp_cost, const int dim1, const int dim2){
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int Row = blockIdx.y*BLOCK_D_SIZE + threadIdx.y;
int disp = blockIdx.z*dim1*dim2;
__shared__ T tile[BLOCK_D_SIZE][BLOCK_D_SIZE+1];
if( Col< dim2){
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
if((Row+d)<dim1)
tile[threadIdx.y+d][threadIdx.x] = cost [disp+(Row+d)*dim2+Col ];
}
}
__syncthreads();
Col = blockIdx.x*blockDim.x+threadIdx.y;
Row = blockIdx.y*BLOCK_D_SIZE+threadIdx.x;
#pragma unroll
for(int d=0; d<BLOCK_D_SIZE; d+=16){
if((Col+d) < dim2 && Row < dim1)
temp_cost[disp+(Col+d)*dim1+Row ] = tile[threadIdx.x][threadIdx.y+d];
}
}
template<typename T>
__global__ void VerticalIntegralKernel(T* output, const int rows , const int cols , const int ndisp,const int offset){
extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[];
T* slice_sm = reinterpret_cast<T *>(shared_mem);
int Row = threadIdx.x+offset;
int Col = blockIdx.y;
int disp = blockIdx.z;
T val=0,temp=0,temp1=0;
if( threadIdx.x < rows){
val = output[disp*rows*cols+Row*cols+Col];
for(int i=1; i<32; i<<=1 ){
temp = __shfl_up(val,i);
if( (threadIdx.x & 31) >=i )
val +=temp;
}
if( (threadIdx.x & 31) ==31 || threadIdx.x==(rows-1) )
slice_sm[threadIdx.x/32] = val;
}
__syncthreads();
temp=0;
if( threadIdx.x < 32 ){
temp = slice_sm[threadIdx.x];
for(int i=1; i<32; i<<=1){
temp1 = __shfl_up(temp,i);
if( (threadIdx.x & 31) >=i )
temp += temp1;
}
slice_sm[threadIdx.x] = temp;
}
__syncthreads();
if( Row < rows){
if(threadIdx.x >=32)
val += slice_sm[threadIdx.x/32-1];
output[disp*rows*cols+Row*cols+Col] = val;
}
}
// This kernel has to be converted to the inplace integral kernel. Profiling though shows that this is not a bottleck for the method.
template<typename T,typename I>
__global__ void HorizontalIntegralKernel_outofplace(T* integral_vol,const I* input, const int integrrows , const int integrcols , const int ndisp,const int offset){
extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[];
T* slice_sm = reinterpret_cast<T *>(shared_mem);
int Col = threadIdx.x+offset;
int Row = blockIdx.x;
int disp = blockIdx.z;
if( Col < integrcols && disp < ndisp){
slice_sm[threadIdx.x] = input[disp*integrrows*integrcols+Row*integrcols+Col];
}
if(offset>0 & threadIdx.x==0){
slice_sm[threadIdx.x] = integral_vol[disp*integrrows*integrcols+Row*integrcols+Col];
}
T sum;
for(int stride=1; stride< blockDim.x; stride*=2){
__syncthreads();
if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp )
sum = slice_sm[threadIdx.x] + slice_sm[threadIdx.x-stride];
__syncthreads();
if((int)threadIdx.x-stride>=0 && Col < integrcols && disp < ndisp )
slice_sm[threadIdx.x] = sum;
}
if( Col<integrcols && disp < ndisp){
integral_vol[disp*integrrows*integrcols+Row*integrcols+Col] = slice_sm[threadIdx.x];
}
}
template<typename T>
__global__ void IntegralKernel(T* output, const int dim1 , const int dim2 , const int ndisp,const int offset){
extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[];
T* slice_sm = reinterpret_cast<T *>(shared_mem);
int Col = threadIdx.x+offset;
int Row = blockIdx.y;
int disp = blockIdx.z;
T val=0,temp=0,temp1=0;
if( Col < dim2){
val = output[disp*dim1*dim2+Row*dim2+Col];
for(int i=1; i<32; i<<=1 ){
temp = __shfl_up(val,i);
if( (threadIdx.x & 31) >=i )
val +=temp;
}
if( (threadIdx.x & 31) ==31 || Col==(dim2-1) )
slice_sm[threadIdx.x/32] = val;
}
__syncthreads();
temp=0;
if( threadIdx.x < 32 ){
temp = slice_sm[threadIdx.x];
for(int i=1; i<32; i<<=1){
temp1 = __shfl_up(temp,i);
if( (threadIdx.x & 31) >=i )
temp += temp1;
}
slice_sm[threadIdx.x] = temp;
}
__syncthreads();
if( Col < dim2){
if(threadIdx.x >=32)
val += slice_sm[threadIdx.x/32-1];
output[disp*dim1*dim2+Row*dim2+Col] = val;
}
}
__device__ void sort(float *x, int n)
{
for (int i = 0; i < n - 1; i++) {
int min = i;
for (int j = i + 1; j < n; j++) {
if (x[j] < x[min]) {
min = j;
}
}
float tmp = x[min];
x[min] = x[i];
x[i] = tmp;
}
}
#define INDEX_D(dim0, dim1, dim2, dim3) \
assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \
((((dim0) * size3 + (dim3)) * size1 + (dim1)) * size2 + dim2)
#define INDEX(dim0, dim1, dim2, dim3) \
assert((dim1) >= 0 && (dim1) < size1 && (dim2) >= 0 && (dim2) < size2 && (dim3) >= 0 && (dim3) < size3), \
((((dim0) * size1 + (dim1)) * size2 + (dim2)) * size3 + dim3)
template <int sgm_direction,typename T>
__global__ void sgm_loop(float *x0, float *x1, T *input, T *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step)
{
int x, y, dx, dy;
int d = threadIdx.x;
if (sgm_direction == 0) {
/* right */
x = step; //step;
y = blockIdx.x;
dx = 1;
dy = 0;
} else if (sgm_direction == 1) {
/* left */
x = size2 - 1 - step; //step;
y = blockIdx.x;
dx = -1;
dy = 0;
} else if (sgm_direction == 2) {
/* down */
x = blockIdx.x;
y = step;//step;
dx = 0;
dy = 1;
} else if (sgm_direction == 3) {
/* up */
x = blockIdx.x;
y = size1 - 1 - step; //step;
dx = 0;
dy = -1;
}
if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) {
float val = input[INDEX(0, y, x, d)];
output[INDEX(0, y, x, d)] += val;
tmp[d * size2 + blockIdx.x] = val;
return;
}
extern __shared__ float sgm_shared[];
float * output_s = &sgm_shared[0];
float * output_min= &sgm_shared[size3];
output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x];
__syncthreads();
for (int i = 256; i > 0; i /= 2) {
if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) {
output_min[d] = output_min[d + i];
}
__syncthreads();
}
int ind2 = y * size2 + x;
float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx);
float D2;
int xx = x + d * direction;
if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) {
D2 = 10;
} else {
D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx);
}
float P1, P2;
if (D1 < tau_so && D2 < tau_so) {
P1 = pi1;
P2 = pi2;
} else if (D1 > tau_so && D2 > tau_so) {
P1 = pi1 / (sgm_q1 * sgm_q2);
P2 = pi2 / (sgm_q1 * sgm_q2);
} else {
P1 = pi1 / sgm_q1;
P2 = pi2 / sgm_q1;
}
float cost = min(output_s[d], output_min[0] + P2);
if (d - 1 >= 0) {
cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1));
}
if (d + 1 < size3) {
cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1));
}
float val = (input[INDEX(0, y, x, d)] + cost - output_min[0]);
output[INDEX(0, y, x, d)] += val;
tmp[d * size2 + blockIdx.x] = val;
}
template <int sgm_direction>
__global__ void sgm2(uint8 *x0, uint8 *x1, float *input, float *output, float *tmp, float pi1, float pi2, float tau_so, float alpha1, float sgm_q1, float sgm_q2, int direction, int size1, int size2, int size3, int step)
{
int x, y, dx, dy;
int d = threadIdx.x;
if (sgm_direction == 0) {
/* right */
x = blockIdx.y; //step;
y = blockIdx.x;
dx = 1;
dy = 0;
} else if (sgm_direction == 1) {
/* left */
x = size2 - 1 - blockIdx.y; //step;
y = blockIdx.x;
dx = -1;
dy = 0;
} else if (sgm_direction == 2) {
/* down */
x = blockIdx.x;
y = blockIdx.y;//step;
dx = 0;
dy = 1;
} else if (sgm_direction == 3) {
/* up */
x = blockIdx.x;
y = size1 - 1 - blockIdx.y; //step;
dx = 0;
dy = -1;
}
if (y - dy < 0 || y - dy >= size1 || x - dx < 0 || x - dx >= size2) {
float val = input[INDEX(0, y, x, d)];
output[INDEX_D(0, y, x, d)] += val;
tmp[d * size2 + blockIdx.x] = val;
return;
}
__shared__ double output_s[400], output_min[400];
output_s[d] = output_min[d] = tmp[d * size2 + blockIdx.x];
__syncthreads();
for (int i = 256; i > 0; i /= 2) {
if (d < i && d + i < size3 && output_min[d + i] < output_min[d]) {
output_min[d] = output_min[d + i];
}
__syncthreads();
}
int ind2 = y * size2 + x;
float D1 = COLOR_DIFF(x0, ind2, ind2 - dy * size2 - dx);
float D2;
int xx = x + d * direction;
if (xx < 0 || xx >= size2 || xx - dx < 0 || xx - dx >= size2) {
D2 = 10;
} else {
D2 = COLOR_DIFF(x1, ind2 + d * direction, ind2 + d * direction - dy * size2 - dx);
}
float P1, P2;
if (D1 < tau_so && D2 < tau_so) {
P1 = pi1;
P2 = pi2;
} else if (D1 > tau_so && D2 > tau_so) {
P1 = pi1 / (sgm_q1 * sgm_q2);
P2 = pi2 / (sgm_q1 * sgm_q2);
} else {
P1 = pi1 / sgm_q1;
P2 = pi2 / sgm_q1;
}
float cost = min(output_s[d], output_min[0] + P2);
if (d - 1 >= 0) {
cost = min(cost, output_s[d - 1] + (sgm_direction == 2 ? P1 / alpha1 : P1));
}
if (d + 1 < size3) {
cost = min(cost, output_s[d + 1] + (sgm_direction == 3 ? P1 / alpha1 : P1));
}
float val = (input[INDEX(0, y, x, d)] + cost - output_min[0])*.25;
output[INDEX_D(0, y, x, d)] += val;
tmp[d * size2 + blockIdx.x] = val;
}
__global__ void cross(float *x0, float *out, int size, int dim2, int dim3, int L1, float tau1)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int dir = id;
int x = dir % dim3;
dir /= dim3;
int y = dir % dim2;
dir /= dim2;
int dx = 0;
int dy = 0;
if (dir == 0) {
dx = -1;
} else if (dir == 1) {
dx = 1;
} else if (dir == 2) {
dy = -1;
} else if (dir == 3) {
dy = 1;
} else {
assert(0);
}
int xx, yy, ind1, ind2, dist;
ind1 = y * dim3 + x;
for (xx = x + dx, yy = y + dy;;xx += dx, yy += dy) {
if (xx < 0 || xx >= dim3 || yy < 0 || yy >= dim2) break;
dist = max(abs(xx - x), abs(yy - y));
if (dist == 1) continue;
ind2 = yy * dim3 + xx;
/* rule 1 */
if (COLOR_DIFF(x0, ind1, ind2) >= tau1) break;
/* rule 2 */
if (dist >= L1) break;
}
out[id] = dir <= 1 ? xx : yy;
}
}
template<typename T>
__global__ void cbca(float *x0c, float *x1c, T *vol, T *out, int size, int dim2, int dim3, int direction)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int d = id;
int x = d % dim3;
d /= dim3;
int y = d % dim2;
d /= dim2;
if (x + d * direction < 0 || x + d * direction >= dim3) {
out[id] = vol[id];
} else {
float sum = 0;
int cnt = 0;
int yy_s = max(x0c[(2 * dim2 + y) * dim3 + x], x1c[(2 * dim2 + y) * dim3 + x + d * direction]);
int yy_t = min(x0c[(3 * dim2 + y) * dim3 + x], x1c[(3 * dim2 + y) * dim3 + x + d * direction]);
for (int yy = yy_s + 1; yy < yy_t; yy++) {
int xx_s = max(x0c[(0 * dim2 + yy) * dim3 + x], x1c[(0 * dim2 + yy) * dim3 + x + d * direction] - d * direction);
int xx_t = min(x0c[(1 * dim2 + yy) * dim3 + x], x1c[(1 * dim2 + yy) * dim3 + x + d * direction] - d * direction);
for (int xx = xx_s + 1; xx < xx_t; xx++) {
float val = vol[(d * dim2 + yy) * dim3 + xx];
assert(!isnan(val));
sum += val;
cnt++;
}
}
assert(cnt > 0);
out[id] = sum / cnt;
assert(!isnan(out[id]));
}
}
}
template <typename T>
__global__ void subpixel_enchancement(float *d0, T *c2, float *out, int size, int dim23, int disp_max) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int d = d0[id];
out[id] = d;
if (1 <= d && d < disp_max - 1) {
float cn = c2[(d - 1) * dim23 + id];
float cz = c2[d * dim23 + id];
float cp = c2[(d + 1) * dim23 + id];
float denom = 2 * (cp + cn - 2 * cz);
if (denom > 1e-5) {
out[id] = d - min(1.0, max(-1.0, (cp - cn) / denom));
}
}
}
}
__global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % dim3;
int y = id / dim3;
float xs[11 * 11];
int xs_size = 0;
for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) {
for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) {
if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) {
xs[xs_size++] = img[yy * dim3 + xx];
}
}
}
sort(xs, xs_size);
out[id] = xs[xs_size / 2];
}
}
__global__ void mean2d(float *img, float *kernel, float *out, int size, int kernel_radius, int dim2, int dim3, float alpha2)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % dim3;
int y = id / dim3;
float sum = 0;
float cnt = 0;
int i = 0;
for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) {
for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++, i++) {
if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2 && abs(img[yy * dim3 + xx] - img[y * dim3 + x]) < alpha2) {
sum += img[yy * dim3 + xx] * kernel[i];
cnt += kernel[i];
}
}
}
out[id] = sum / cnt;
}
}
|
the_stack
|
#include <dgl/runtime/device_api.h>
#include "../../array/cuda/dgl_cub.cuh"
#include "../../runtime/cuda/cuda_common.h"
#include "../../runtime/workspace.h"
using namespace dgl::runtime;
namespace dgl {
namespace partition {
namespace impl {
namespace {
/**
* @brief Kernel to map global element IDs to partition IDs by remainder.
*
* @tparam IdType The type of ID.
* @param global The global element IDs.
* @param num_elements The number of element IDs.
* @param num_parts The number of partitions.
* @param part_id The mapped partition ID (outupt).
*/
template<typename IdType>
__global__ void _MapProcByRemainderKernel(
const IdType * const global,
const int64_t num_elements,
const int64_t num_parts,
IdType * const part_id) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = blockDim.x*static_cast<int64_t>(blockIdx.x)+threadIdx.x;
if (idx < num_elements) {
part_id[idx] = global[idx] % num_parts;
}
}
/**
* @brief Kernel to map global element IDs to partition IDs, using a bit-mask.
* The number of partitions must be a power a two.
*
* @tparam IdType The type of ID.
* @param global The global element IDs.
* @param num_elements The number of element IDs.
* @param mask The bit-mask with 1's for each bit to keep from the element ID to
* extract the partition ID (e.g., an 8 partition mask would be 0x07).
* @param part_id The mapped partition ID (outupt).
*/
template<typename IdType>
__global__ void _MapProcByMaskRemainderKernel(
const IdType * const global,
const int64_t num_elements,
const IdType mask,
IdType * const part_id) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = blockDim.x*static_cast<int64_t>(blockIdx.x)+threadIdx.x;
if (idx < num_elements) {
part_id[idx] = global[idx] & mask;
}
}
/**
* @brief Kernel to map global element IDs to local element IDs.
*
* @tparam IdType The type of ID.
* @param global The global element IDs.
* @param num_elements The number of IDs.
* @param num_parts The number of partitions.
* @param local The local element IDs (output).
*/
template<typename IdType>
__global__ void _MapLocalIndexByRemainderKernel(
const IdType * const global,
const int64_t num_elements,
const int num_parts,
IdType * const local) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < num_elements) {
local[idx] = global[idx] / num_parts;
}
}
/**
* @brief Kernel to map local element IDs within a partition to their global
* IDs, using the remainder over the number of partitions.
*
* @tparam IdType The type of ID.
* @param local The local element IDs.
* @param part_id The partition to map local elements from.
* @param num_elements The number of elements to map.
* @param num_parts The number of partitions.
* @param global The global element IDs (output).
*/
template<typename IdType>
__global__ void _MapGlobalIndexByRemainderKernel(
const IdType * const local,
const int part_id,
const int64_t num_elements,
const int num_parts,
IdType * const global) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = threadIdx.x+blockDim.x*blockIdx.x;
assert(part_id < num_parts);
if (idx < num_elements) {
global[idx] = (local[idx] * num_parts) + part_id;
}
}
/**
* @brief Device function to perform a binary search to find to which partition a
* given ID belongs.
*
* @tparam RangeType The type of range.
* @param range The prefix-sum of IDs assigned to partitions.
* @param num_parts The number of partitions.
* @param target The element ID to find the partition of.
*
* @return The partition.
*/
template<typename RangeType>
__device__ RangeType _SearchRange(
const RangeType * const range,
const int num_parts,
const RangeType target) {
int start = 0;
int end = num_parts;
int cur = (end+start)/2;
assert(range[0] == 0);
assert(target < range[num_parts]);
while (start+1 < end) {
if (target < range[cur]) {
end = cur;
} else {
start = cur;
}
cur = (start+end)/2;
}
return cur;
}
/**
* @brief Kernel to map element IDs to partition IDs.
*
* @tparam IdType The type of element ID.
* @tparam RangeType The type of of the range.
* @param range The prefix-sum of IDs assigned to partitions.
* @param global The global element IDs.
* @param num_elements The number of element IDs.
* @param num_parts The number of partitions.
* @param part_id The partition ID assigned to each element (output).
*/
template<typename IdType, typename RangeType>
__global__ void _MapProcByRangeKernel(
const RangeType * const range,
const IdType * const global,
const int64_t num_elements,
const int64_t num_parts,
IdType * const part_id) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = blockDim.x*static_cast<int64_t>(blockIdx.x)+threadIdx.x;
// rely on caching to load the range into L1 cache
if (idx < num_elements) {
part_id[idx] = static_cast<IdType>(_SearchRange(
range,
static_cast<int>(num_parts),
static_cast<RangeType>(global[idx])));
}
}
/**
* @brief Kernel to map global element IDs to their ID within their respective
* partition.
*
* @tparam IdType The type of element ID.
* @tparam RangeType The type of the range.
* @param range The prefix-sum of IDs assigned to partitions.
* @param global The global element IDs.
* @param num_elements The number of elements.
* @param num_parts The number of partitions.
* @param local The local element IDs (output).
*/
template<typename IdType, typename RangeType>
__global__ void _MapLocalIndexByRangeKernel(
const RangeType * const range,
const IdType * const global,
const int64_t num_elements,
const int num_parts,
IdType * const local) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = threadIdx.x+blockDim.x*blockIdx.x;
// rely on caching to load the range into L1 cache
if (idx < num_elements) {
const int proc = _SearchRange(
range,
static_cast<int>(num_parts),
static_cast<RangeType>(global[idx]));
local[idx] = global[idx] - range[proc];
}
}
/**
* @brief Kernel to map local element IDs within a partition to their global
* IDs.
*
* @tparam IdType The type of ID.
* @tparam RangeType The type of the range.
* @param range The prefix-sum of IDs assigend to partitions.
* @param local The local element IDs.
* @param part_id The partition to map local elements from.
* @param num_elements The number of elements to map.
* @param num_parts The number of partitions.
* @param global The global element IDs (output).
*/
template<typename IdType, typename RangeType>
__global__ void _MapGlobalIndexByRangeKernel(
const RangeType * const range,
const IdType * const local,
const int part_id,
const int64_t num_elements,
const int num_parts,
IdType * const global) {
assert(num_elements <= gridDim.x*blockDim.x);
const int64_t idx = threadIdx.x+blockDim.x*blockIdx.x;
assert(part_id < num_parts);
// rely on caching to load the range into L1 cache
if (idx < num_elements) {
global[idx] = local[idx] + range[part_id];
}
}
} // namespace
// Remainder Based Partition Operations
template <DLDeviceType XPU, typename IdType>
std::pair<IdArray, NDArray>
GeneratePermutationFromRemainder(
int64_t array_size,
int num_parts,
IdArray in_idx) {
std::pair<IdArray, NDArray> result;
const auto& ctx = in_idx->ctx;
auto device = DeviceAPI::Get(ctx);
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
const int64_t num_in = in_idx->shape[0];
CHECK_GE(num_parts, 1) << "The number of partitions (" << num_parts <<
") must be at least 1.";
if (num_parts == 1) {
// no permutation
result.first = aten::Range(0, num_in, sizeof(IdType)*8, ctx);
result.second = aten::Full(num_in, num_parts, sizeof(int64_t)*8, ctx);
return result;
}
result.first = aten::NewIdArray(num_in, ctx, sizeof(IdType)*8);
result.second = aten::Full(0, num_parts, sizeof(int64_t)*8, ctx);
int64_t * out_counts = static_cast<int64_t*>(result.second->data);
if (num_in == 0) {
// now that we've zero'd out_counts, nothing left to do for an empty
// mapping
return result;
}
const int64_t part_bits =
static_cast<int64_t>(std::ceil(std::log2(num_parts)));
// First, generate a mapping of indexes to processors
Workspace<IdType> proc_id_in(device, ctx, num_in);
{
const dim3 block(256);
const dim3 grid((num_in+block.x-1)/block.x);
if (num_parts < (1 << part_bits)) {
// num_parts is not a power of 2
CUDA_KERNEL_CALL(_MapProcByRemainderKernel, grid, block, 0, stream,
static_cast<const IdType*>(in_idx->data),
num_in,
num_parts,
proc_id_in.get());
} else {
// num_parts is a power of 2
CUDA_KERNEL_CALL(_MapProcByMaskRemainderKernel, grid, block, 0, stream,
static_cast<const IdType*>(in_idx->data),
num_in,
static_cast<IdType>(num_parts-1), // bit mask
proc_id_in.get());
}
}
// then create a permutation array that groups processors together by
// performing a radix sort
Workspace<IdType> proc_id_out(device, ctx, num_in);
IdType * perm_out = static_cast<IdType*>(result.first->data);
{
IdArray perm_in = aten::Range(0, num_in, sizeof(IdType)*8, ctx);
size_t sort_workspace_size;
CUDA_CALL(cub::DeviceRadixSort::SortPairs(nullptr, sort_workspace_size,
proc_id_in.get(), proc_id_out.get(), static_cast<IdType*>(perm_in->data), perm_out,
num_in, 0, part_bits, stream));
Workspace<void> sort_workspace(device, ctx, sort_workspace_size);
CUDA_CALL(cub::DeviceRadixSort::SortPairs(sort_workspace.get(), sort_workspace_size,
proc_id_in.get(), proc_id_out.get(), static_cast<IdType*>(perm_in->data), perm_out,
num_in, 0, part_bits, stream));
}
// explicitly free so workspace can be re-used
proc_id_in.free();
// perform a histogram and then prefixsum on the sorted proc_id vector
// Count the number of values to be sent to each processor
{
using AtomicCount = unsigned long long; // NOLINT
static_assert(sizeof(AtomicCount) == sizeof(*out_counts),
"AtomicCount must be the same width as int64_t for atomicAdd "
"in cub::DeviceHistogram::HistogramEven() to work");
// TODO(dlasalle): Once https://github.com/NVIDIA/cub/pull/287 is merged,
// add a compile time check against the cub version to allow
// num_in > (2 << 31).
CHECK(num_in < static_cast<int64_t>(std::numeric_limits<int>::max())) <<
"number of values to insert into histogram must be less than max "
"value of int.";
size_t hist_workspace_size;
CUDA_CALL(cub::DeviceHistogram::HistogramEven(
nullptr,
hist_workspace_size,
proc_id_out.get(),
reinterpret_cast<AtomicCount*>(out_counts),
num_parts+1,
static_cast<IdType>(0),
static_cast<IdType>(num_parts+1),
static_cast<int>(num_in),
stream));
Workspace<void> hist_workspace(device, ctx, hist_workspace_size);
CUDA_CALL(cub::DeviceHistogram::HistogramEven(
hist_workspace.get(),
hist_workspace_size,
proc_id_out.get(),
reinterpret_cast<AtomicCount*>(out_counts),
num_parts+1,
static_cast<IdType>(0),
static_cast<IdType>(num_parts+1),
static_cast<int>(num_in),
stream));
}
return result;
}
template std::pair<IdArray, IdArray>
GeneratePermutationFromRemainder<kDLGPU, int32_t>(
int64_t array_size,
int num_parts,
IdArray in_idx);
template std::pair<IdArray, IdArray>
GeneratePermutationFromRemainder<kDLGPU, int64_t>(
int64_t array_size,
int num_parts,
IdArray in_idx);
template <DLDeviceType XPU, typename IdType>
IdArray MapToLocalFromRemainder(
const int num_parts,
IdArray global_idx) {
const auto& ctx = global_idx->ctx;
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
if (num_parts > 1) {
IdArray local_idx = aten::NewIdArray(global_idx->shape[0], ctx,
sizeof(IdType)*8);
const dim3 block(128);
const dim3 grid((global_idx->shape[0] +block.x-1)/block.x);
CUDA_KERNEL_CALL(
_MapLocalIndexByRemainderKernel,
grid,
block,
0,
stream,
static_cast<const IdType*>(global_idx->data),
global_idx->shape[0],
num_parts,
static_cast<IdType*>(local_idx->data));
return local_idx;
} else {
// no mapping to be done
return global_idx;
}
}
template IdArray
MapToLocalFromRemainder<kDLGPU, int32_t>(
int num_parts,
IdArray in_idx);
template IdArray
MapToLocalFromRemainder<kDLGPU, int64_t>(
int num_parts,
IdArray in_idx);
template <DLDeviceType XPU, typename IdType>
IdArray MapToGlobalFromRemainder(
const int num_parts,
IdArray local_idx,
const int part_id) {
CHECK_LT(part_id, num_parts) << "Invalid partition id " << part_id <<
"/" << num_parts;
CHECK_GE(part_id, 0) << "Invalid partition id " << part_id <<
"/" << num_parts;
const auto& ctx = local_idx->ctx;
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
if (num_parts > 1) {
IdArray global_idx = aten::NewIdArray(local_idx->shape[0], ctx,
sizeof(IdType)*8);
const dim3 block(128);
const dim3 grid((local_idx->shape[0] +block.x-1)/block.x);
CUDA_KERNEL_CALL(
_MapGlobalIndexByRemainderKernel,
grid,
block,
0,
stream,
static_cast<const IdType*>(local_idx->data),
part_id,
global_idx->shape[0],
num_parts,
static_cast<IdType*>(global_idx->data));
return global_idx;
} else {
// no mapping to be done
return local_idx;
}
}
template IdArray
MapToGlobalFromRemainder<kDLGPU, int32_t>(
int num_parts,
IdArray in_idx,
int part_id);
template IdArray
MapToGlobalFromRemainder<kDLGPU, int64_t>(
int num_parts,
IdArray in_idx,
int part_id);
// Range Based Partition Operations
template <DLDeviceType XPU, typename IdType, typename RangeType>
std::pair<IdArray, NDArray>
GeneratePermutationFromRange(
int64_t array_size,
int num_parts,
IdArray range,
IdArray in_idx) {
std::pair<IdArray, NDArray> result;
const auto& ctx = in_idx->ctx;
auto device = DeviceAPI::Get(ctx);
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
const int64_t num_in = in_idx->shape[0];
CHECK_GE(num_parts, 1) << "The number of partitions (" << num_parts <<
") must be at least 1.";
if (num_parts == 1) {
// no permutation
result.first = aten::Range(0, num_in, sizeof(IdType)*8, ctx);
result.second = aten::Full(num_in, num_parts, sizeof(int64_t)*8, ctx);
return result;
}
result.first = aten::NewIdArray(num_in, ctx, sizeof(IdType)*8);
result.second = aten::Full(0, num_parts, sizeof(int64_t)*8, ctx);
int64_t * out_counts = static_cast<int64_t*>(result.second->data);
if (num_in == 0) {
// now that we've zero'd out_counts, nothing left to do for an empty
// mapping
return result;
}
const int64_t part_bits =
static_cast<int64_t>(std::ceil(std::log2(num_parts)));
// First, generate a mapping of indexes to processors
Workspace<IdType> proc_id_in(device, ctx, num_in);
{
const dim3 block(256);
const dim3 grid((num_in+block.x-1)/block.x);
CUDA_KERNEL_CALL(_MapProcByRangeKernel, grid, block, 0, stream,
static_cast<const RangeType*>(range->data),
static_cast<const IdType*>(in_idx->data),
num_in,
num_parts,
proc_id_in.get());
}
// then create a permutation array that groups processors together by
// performing a radix sort
Workspace<IdType> proc_id_out(device, ctx, num_in);
IdType * perm_out = static_cast<IdType*>(result.first->data);
{
IdArray perm_in = aten::Range(0, num_in, sizeof(IdType)*8, ctx);
size_t sort_workspace_size;
CUDA_CALL(cub::DeviceRadixSort::SortPairs(nullptr, sort_workspace_size,
proc_id_in.get(), proc_id_out.get(), static_cast<IdType*>(perm_in->data), perm_out,
num_in, 0, part_bits, stream));
Workspace<void> sort_workspace(device, ctx, sort_workspace_size);
CUDA_CALL(cub::DeviceRadixSort::SortPairs(sort_workspace.get(), sort_workspace_size,
proc_id_in.get(), proc_id_out.get(), static_cast<IdType*>(perm_in->data), perm_out,
num_in, 0, part_bits, stream));
}
// explicitly free so workspace can be re-used
proc_id_in.free();
// perform a histogram and then prefixsum on the sorted proc_id vector
// Count the number of values to be sent to each processor
{
using AtomicCount = unsigned long long; // NOLINT
static_assert(sizeof(AtomicCount) == sizeof(*out_counts),
"AtomicCount must be the same width as int64_t for atomicAdd "
"in cub::DeviceHistogram::HistogramEven() to work");
// TODO(dlasalle): Once https://github.com/NVIDIA/cub/pull/287 is merged,
// add a compile time check against the cub version to allow
// num_in > (2 << 31).
CHECK(num_in < static_cast<int64_t>(std::numeric_limits<int>::max())) <<
"number of values to insert into histogram must be less than max "
"value of int.";
size_t hist_workspace_size;
CUDA_CALL(cub::DeviceHistogram::HistogramEven(
nullptr,
hist_workspace_size,
proc_id_out.get(),
reinterpret_cast<AtomicCount*>(out_counts),
num_parts+1,
static_cast<IdType>(0),
static_cast<IdType>(num_parts+1),
static_cast<int>(num_in),
stream));
Workspace<void> hist_workspace(device, ctx, hist_workspace_size);
CUDA_CALL(cub::DeviceHistogram::HistogramEven(
hist_workspace.get(),
hist_workspace_size,
proc_id_out.get(),
reinterpret_cast<AtomicCount*>(out_counts),
num_parts+1,
static_cast<IdType>(0),
static_cast<IdType>(num_parts+1),
static_cast<int>(num_in),
stream));
}
return result;
}
template std::pair<IdArray, IdArray>
GeneratePermutationFromRange<kDLGPU, int32_t, int32_t>(
int64_t array_size,
int num_parts,
IdArray range,
IdArray in_idx);
template std::pair<IdArray, IdArray>
GeneratePermutationFromRange<kDLGPU, int64_t, int32_t>(
int64_t array_size,
int num_parts,
IdArray range,
IdArray in_idx);
template std::pair<IdArray, IdArray>
GeneratePermutationFromRange<kDLGPU, int32_t, int64_t>(
int64_t array_size,
int num_parts,
IdArray range,
IdArray in_idx);
template std::pair<IdArray, IdArray>
GeneratePermutationFromRange<kDLGPU, int64_t, int64_t>(
int64_t array_size,
int num_parts,
IdArray range,
IdArray in_idx);
template <DLDeviceType XPU, typename IdType, typename RangeType>
IdArray MapToLocalFromRange(
const int num_parts,
IdArray range,
IdArray global_idx) {
const auto& ctx = global_idx->ctx;
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
if (num_parts > 1 && global_idx->shape[0] > 0) {
IdArray local_idx = aten::NewIdArray(global_idx->shape[0], ctx,
sizeof(IdType)*8);
const dim3 block(128);
const dim3 grid((global_idx->shape[0] +block.x-1)/block.x);
CUDA_KERNEL_CALL(
_MapLocalIndexByRangeKernel,
grid,
block,
0,
stream,
static_cast<const RangeType*>(range->data),
static_cast<const IdType*>(global_idx->data),
global_idx->shape[0],
num_parts,
static_cast<IdType*>(local_idx->data));
return local_idx;
} else {
// no mapping to be done
return global_idx;
}
}
template IdArray
MapToLocalFromRange<kDLGPU, int32_t, int32_t>(
int num_parts,
IdArray range,
IdArray in_idx);
template IdArray
MapToLocalFromRange<kDLGPU, int64_t, int32_t>(
int num_parts,
IdArray range,
IdArray in_idx);
template IdArray
MapToLocalFromRange<kDLGPU, int32_t, int64_t>(
int num_parts,
IdArray range,
IdArray in_idx);
template IdArray
MapToLocalFromRange<kDLGPU, int64_t, int64_t>(
int num_parts,
IdArray range,
IdArray in_idx);
template <DLDeviceType XPU, typename IdType, typename RangeType>
IdArray MapToGlobalFromRange(
const int num_parts,
IdArray range,
IdArray local_idx,
const int part_id) {
CHECK_LT(part_id, num_parts) << "Invalid partition id " << part_id <<
"/" << num_parts;
CHECK_GE(part_id, 0) << "Invalid partition id " << part_id <<
"/" << num_parts;
const auto& ctx = local_idx->ctx;
cudaStream_t stream = CUDAThreadEntry::ThreadLocal()->stream;
if (num_parts > 1 && local_idx->shape[0] > 0) {
IdArray global_idx = aten::NewIdArray(local_idx->shape[0], ctx,
sizeof(IdType)*8);
const dim3 block(128);
const dim3 grid((local_idx->shape[0] +block.x-1)/block.x);
CUDA_KERNEL_CALL(
_MapGlobalIndexByRangeKernel,
grid,
block,
0,
stream,
static_cast<const RangeType*>(range->data),
static_cast<const IdType*>(local_idx->data),
part_id,
global_idx->shape[0],
num_parts,
static_cast<IdType*>(global_idx->data));
return global_idx;
} else {
// no mapping to be done
return local_idx;
}
}
template IdArray
MapToGlobalFromRange<kDLGPU, int32_t, int32_t>(
int num_parts,
IdArray range,
IdArray in_idx,
int part_id);
template IdArray
MapToGlobalFromRange<kDLGPU, int64_t, int32_t>(
int num_parts,
IdArray range,
IdArray in_idx,
int part_id);
template IdArray
MapToGlobalFromRange<kDLGPU, int32_t, int64_t>(
int num_parts,
IdArray range,
IdArray in_idx,
int part_id);
template IdArray
MapToGlobalFromRange<kDLGPU, int64_t, int64_t>(
int num_parts,
IdArray range,
IdArray in_idx,
int part_id);
} // namespace impl
} // namespace partition
} // namespace dgl
|
the_stack
|
#include <assert.h>
#include <stdint.h>
#include "rxmesh/context.h"
#include "rxmesh/kernels/collective.cuh"
#include "rxmesh/kernels/loader.cuh"
#include "rxmesh/kernels/util.cuh"
#include "rxmesh/types.h"
namespace rxmesh {
template <uint32_t rowOffset,
uint32_t blockThreads,
uint32_t itemPerThread = TRANSPOSE_ITEM_PER_THREAD>
__device__ __forceinline__ void block_mat_transpose(const uint32_t num_rows,
const uint32_t num_cols,
uint16_t* mat,
uint16_t* output,
int shift = 0)
{
// 1) Load mat into registers and zero out mat
uint16_t thread_data[itemPerThread];
uint16_t local_offset[itemPerThread];
uint32_t nnz = num_rows * rowOffset;
for (uint32_t i = 0; i < itemPerThread; ++i) {
uint32_t index = itemPerThread * threadIdx.x + i;
// TODO
// int pred = int(index < nnz);
// thread_data[i] = pred * (mat[index] >> shift) + (1 - pred) *
// INVALID16;
if (index < nnz) {
thread_data[i] = mat[index] >> shift;
mat[index] = 0;
} else {
thread_data[i] = INVALID16;
}
}
/*if (num_cols > nnz) {
// zero-ing the rest of mat
for (uint32_t i = threadIdx.x + nnz; i < num_cols; i += blockThreads) {
mat[i] = 0;
}
}*/
uint32_t m = max(nnz, num_cols);
__syncthreads();
for (uint32_t i = threadIdx.x; i < m; i += blockThreads) {
mat[i] = 0;
}
__syncthreads();
#if __CUDA_ARCH__ >= 700
// 2) compute the number of items in each bucket/col
__half* mat_half = (__half*)(mat);
for (uint32_t i = 0; i < itemPerThread; ++i) {
if (thread_data[i] != INVALID16) {
local_offset[i] = ::atomicAdd(&mat_half[thread_data[i]], 1);
}
}
__syncthreads();
for (uint32_t i = threadIdx.x; i < num_cols; i += blockThreads) {
uint16_t val = uint16_t(mat_half[i]);
mat[i] = val;
}
#else
for (uint32_t i = 0; i < itemPerThread; ++i) {
if (thread_data[i] != INVALID16) {
local_offset[i] = atomicAdd(&mat[thread_data[i]], 1u);
} else {
break;
}
}
__syncthreads();
#endif
// 3) exclusive scan on mat to compute the offset
cub_block_exclusive_sum<uint16_t, blockThreads>(mat, num_cols);
// 4) actually write the values
for (uint32_t i = 0; i < itemPerThread; ++i) {
uint16_t item = thread_data[i];
if (item != INVALID16) {
uint16_t offset = mat[item] + local_offset[i];
uint16_t row = (itemPerThread * threadIdx.x + i) / rowOffset;
output[offset] = row;
} else {
break;
}
}
}
template <uint32_t blockThreads>
__device__ __forceinline__ void v_v_oreinted(const PatchInfo& patch_info,
uint16_t*& s_output_offset,
uint16_t*& s_output_value,
uint16_t* s_ev)
{
const uint16_t num_edges = patch_info.num_edges;
const uint16_t num_faces = patch_info.num_faces;
const uint16_t num_vertices = patch_info.num_vertices;
const uint16_t num_owned_vertices = patch_info.num_owned_vertices;
s_output_offset = &s_ev[0];
s_output_value = &s_ev[num_vertices + 1 + (num_vertices + 1) % 2];
// start by loading the faces while also doing transposing EV (might
// increase ILP)
uint16_t* s_fe = &s_output_value[2 * num_edges];
uint16_t* s_ef = &s_fe[3 * num_faces + (3 * num_faces) % 2];
LocalEdgeT* temp_fe = reinterpret_cast<LocalEdgeT*>(s_fe);
load_patch_FE<blockThreads>(patch_info, temp_fe);
for (uint32_t i = threadIdx.x; i < num_edges * 2; i += blockThreads) {
s_ef[i] = INVALID16;
}
block_mat_transpose<2u, blockThreads>(
num_edges, num_vertices, s_output_offset, s_output_value);
// block_mat_transpose<2u, blockThreads>(
// num_faces, num_edges, s_patch_EF_offset, s_patch_EF_output);
// We could have used block_mat_transpose to transpose FE so we can look
// up the "two" faces sharing an edge. But we can do better because we know
// that we are working on manifold so it is only two edges per face. We
// also wanna keep FE for quick look up on a face's three edges.
// We need to sync here to make sure that s_fe is loaded but there is
// a sync in block_mat_transpose that takes care of this
for (uint16_t e = threadIdx.x; e < 3 * num_faces; e += blockThreads) {
uint16_t edge = s_fe[e] >> 1;
uint16_t face_id = e / 3;
auto ret = atomicCAS(s_ef + 2 * edge, INVALID16, face_id);
if (ret != INVALID16) {
ret = atomicCAS(s_ef + 2 * edge + 1, INVALID16, face_id);
assert(ret == INVALID16);
}
}
// To orient, we pin the first edge and check all the subsequent edges
// For each edge, we search for the two faces containing it (should be
// only two faces since this is a manifold mesh).
__syncthreads();
for (uint32_t v = threadIdx.x; v < num_owned_vertices; v += blockDim.x) {
// if the vertex is not owned by this patch, then there is no reason
// to orient its edges because no serious computation is done on it
uint16_t start = s_output_offset[v];
uint16_t end = s_output_offset[v + 1];
for (uint16_t e_id = start; e_id < end - 1; ++e_id) {
uint16_t e_0 = s_output_value[e_id];
uint16_t f0(s_ef[2 * e_0]), f1(s_ef[2 * e_0 + 1]);
// we don't do it for boundary faces
assert(f0 != INVALID16 && f1 != INVALID16 && f0 < num_faces &&
f1 < num_faces);
// candidate next edge (only one of them will win)
uint16_t e_candid_0, e_candid_1;
if ((s_fe[3 * f0 + 0] >> 1) == e_0) {
e_candid_0 = s_fe[3 * f0 + 2] >> 1;
}
if ((s_fe[3 * f0 + 1] >> 1) == e_0) {
e_candid_0 = s_fe[3 * f0 + 0] >> 1;
}
if ((s_fe[3 * f0 + 2] >> 1) == e_0) {
e_candid_0 = s_fe[3 * f0 + 1] >> 1;
}
if ((s_fe[3 * f1 + 0] >> 1) == e_0) {
e_candid_1 = s_fe[3 * f1 + 2] >> 1;
}
if ((s_fe[3 * f1 + 1] >> 1) == e_0) {
e_candid_1 = s_fe[3 * f1 + 0] >> 1;
}
if ((s_fe[3 * f1 + 2] >> 1) == e_0) {
e_candid_1 = s_fe[3 * f1 + 1] >> 1;
}
for (uint16_t vn = e_id + 1; vn < end; ++vn) {
uint16_t e_winning_candid = s_output_value[vn];
if (e_candid_0 == e_winning_candid ||
e_candid_1 == e_winning_candid) {
uint16_t temp = s_output_value[e_id + 1];
s_output_value[e_id + 1] = e_winning_candid;
s_output_value[vn] = temp;
break;
}
}
}
}
__syncthreads();
// Load EV into s_ef since both has the same size (2*#E)
s_ev = s_ef;
LocalVertexT* temp_ev = reinterpret_cast<LocalVertexT*>(s_ef);
load_patch_EV<blockThreads>(patch_info, temp_ev);
__syncthreads();
for (uint32_t v = threadIdx.x; v < num_vertices; v += blockThreads) {
uint32_t start = s_output_offset[v];
uint32_t end = s_output_offset[v + 1];
for (uint32_t e = start; e < end; ++e) {
uint16_t edge = s_output_value[e];
uint16_t v0 = s_ev[2 * edge];
uint16_t v1 = s_ev[2 * edge + 1];
assert(v0 == v || v1 == v);
// d_output[e] = (v0 == v) ? v1 : v0;
s_output_value[e] = (v0 == v) * v1 + (v1 == v) * v0;
}
}
}
template <uint32_t blockThreads>
__device__ __forceinline__ void v_e(const uint16_t num_vertices,
const uint16_t num_edges,
uint16_t* d_edges,
uint16_t* d_output)
{
// M_ve = M_ev^{T}. M_ev is already encoded and we need to just transpose
// it
// Here we do the transpose in place and the result is that d_output
// contains the row id of the transpose matrix (i.e. the edges id) while
// d_edges will contain the offset that starts with zero and end with
// num_edges*2 (zero is stored and the end can be inferred). Thus,
// d_output should be allocated to size = num_edges*2
block_mat_transpose<2u, blockThreads>(
num_edges, num_vertices, d_edges, d_output);
}
template <uint32_t blockThreads>
__device__ __forceinline__ void v_v(const uint16_t num_vertices,
const uint16_t num_edges,
uint16_t* d_edges,
uint16_t* d_output)
{
// M_vv = M_EV^{T} \dot M_EV
// This requires computing M_EV^{T} which we compute in shared memory
// similar to v_e. Doing that, we have store in d_output the edges
// incident to each vertex. After that we need to replace each edge with
// the other end vertex which is duplicated by writing it to
// s_edges_duplicate
uint16_t* s_edges_duplicate = &d_edges[2 * 2 * num_edges];
assert(2 * 2 * num_edges >= num_vertices + 1 + 2 * num_edges);
for (uint16_t i = threadIdx.x; i < 2 * num_edges; i += blockThreads) {
s_edges_duplicate[i] = d_edges[i];
}
// TODO we might be able to remove this sync if transpose has a sync
// that is done before writing to mat
__syncthreads();
v_e<blockThreads>(num_vertices, num_edges, d_edges, d_output);
__syncthreads();
for (uint32_t v = threadIdx.x; v < num_vertices; v += blockThreads) {
uint32_t start = d_edges[v];
uint32_t end = d_edges[v + 1];
for (uint32_t e = start; e < end; ++e) {
uint16_t edge = d_output[e];
uint16_t v0 = s_edges_duplicate[2 * edge];
uint16_t v1 = s_edges_duplicate[2 * edge + 1];
assert(v0 == v || v1 == v);
// d_output[e] = (v0 == v) ? v1 : v0;
d_output[e] = (v0 == v) * v1 + (v1 == v) * v0;
}
}
}
__device__ __forceinline__ void f_v(const uint16_t num_edges,
const uint16_t* d_edges,
const uint16_t num_faces,
uint16_t* d_faces)
{
// M_FV = M_FE \dot M_EV
// Hint: Since a single thread is responsible of reading one
// face in d_faces (i.e., three items), then this thread
// can safely over-write what is in d_faces.
for (uint32_t f = threadIdx.x; f < num_faces; f += blockDim.x) {
uint16_t f_v[3];
uint32_t f_id = 3 * f;
// TODO use vector load and store instead of looping
for (uint32_t i = 0; i < 3; i++) {
uint16_t e = d_faces[f_id + i];
flag_t e_dir(0);
Context::unpack_edge_dir(e, e, e_dir);
// if the direction is flipped, we take the second vertex
uint16_t e_id = (2 * e) + (1 * e_dir);
assert(e_id < 2 * num_edges);
f_v[i] = d_edges[e_id];
}
for (uint32_t i = 0; i < 3; i++) {
d_faces[f * 3 + i] = f_v[i];
}
}
}
template <uint32_t blockThreads>
__device__ __forceinline__ void v_f(const uint16_t num_faces,
const uint16_t num_edges,
const uint16_t num_vertices,
uint16_t* d_edges,
uint16_t* d_faces)
{
// M_vf = M_ev^{T} \dot M_fe^{T} = (M_ev \dot M_fe)^{T} = M_fv^{T}
// We follow the math here by computing M_fv and then transpose it
// In doing so we reuse all the shared memory used to store d_edges
// and d_faces
// First M_fv is computing in place i.e., d_face will contain the
// face vertices of each face (instead of edges)
// Second, the transpose happens in place i.e., d_faces will hold the
// offset and d_edges will hold the value (row id)
f_v(num_edges, d_edges, num_faces, d_faces);
__syncthreads();
block_mat_transpose<3u, blockThreads>(
num_faces, num_vertices, d_faces, d_edges);
}
template <uint32_t blockThreads>
__device__ __forceinline__ void e_f(const uint16_t num_edges,
const uint16_t num_faces,
uint16_t* d_faces,
uint16_t* d_output,
int shift = 1)
{
// M_ef = M_fe^{T}. M_fe is already encoded and we need to just transpose
// it
// Here we do the transpose in place and the result is that d_output
// contains the row id of the transpose matrix (i.e. the faces id) while
// d_faces will contain the offset that starts with zero and end with
// num_faces*3 (zero is stored and the end can be inferred). Thus,
// d_output should be allocated to size = num_faces*3
block_mat_transpose<3u, blockThreads>(
num_faces, num_edges, d_faces, d_output, shift);
}
template <uint32_t blockThreads>
__device__ __forceinline__ void f_f(const uint16_t num_edges,
const uint16_t num_faces,
uint16_t* s_FE,
uint16_t* s_FF_offset,
uint16_t* s_FF_output)
{
// First construct M_EF in shared memory
uint16_t* s_EF_offset = &s_FE[num_faces * 3];
uint16_t* s_EF_output = &s_EF_offset[num_edges + 1];
// copy FE in to EF_offset so we can do the transpose in place without
// losing FE
for (uint16_t i = threadIdx.x; i < num_faces * 3; i += blockThreads) {
flag_t dir(0);
uint16_t e = s_FE[i] >> 1;
s_EF_offset[i] = e;
s_FE[i] = e;
}
__syncthreads();
e_f<blockThreads>(num_edges, num_faces, s_EF_offset, s_EF_output, 0);
__syncthreads();
// Every thread (T) is responsible for a face (F)
// Each thread reads the edges (E) incident to its face (F). For each edge
// (E), we read the "number" of incident faces (FF) to this edge (num_EF).
// The number neighbor edges to the face F due to edge E is num_EF -1
// TODO we can store this sum of neighbor faces in registers and then do
// the exclusive sum on it and finally store it in shared memory
for (uint16_t f = threadIdx.x; f < num_faces; f += blockThreads) {
uint16_t num_neighbour_faces = 0;
for (uint16_t e = 0; e < 3; ++e) {
uint16_t edge = s_FE[3 * f + e];
// printf("\n t= %u f= %u, e= %u, b0= %u, b1= %u ", threadIdx.x, f,
// edge, s_EF_offset[edge], s_EF_offset[edge + 1]);
assert(s_EF_offset[edge + 1] >= s_EF_offset[edge]);
num_neighbour_faces +=
s_EF_offset[edge + 1] - s_EF_offset[edge] - 1;
}
s_FF_offset[f] = num_neighbour_faces;
}
__syncthreads();
cub_block_exclusive_sum<uint16_t, blockThreads>(s_FF_offset, num_faces);
for (uint16_t f = threadIdx.x; f < num_faces; f += blockThreads) {
uint16_t offset = s_FF_offset[f];
for (uint16_t e = 0; e < 3; ++e) {
uint16_t edge = s_FE[3 * f + e];
for (uint16_t ef = s_EF_offset[edge]; ef < s_EF_offset[edge + 1];
++ef) {
uint16_t n_face = s_EF_output[ef];
if (n_face != f) {
s_FF_output[offset] = n_face;
++offset;
}
}
}
assert(offset == s_FF_offset[f + 1]);
}
/*{
if (threadIdx.x == 0) {
printf("\n s_EF_output");
for (uint16_t f = 0; f < num_faces; ++f) {
printf("\n face = %u>>", f);
for (uint16_t ff = s_FF_offset[f]; ff < s_FF_offset[f + 1];
++ff) {
printf(" %u ", s_FF_output[ff]);
}
}
}
}*/
}
template <uint32_t blockThreads, Op op>
__device__ __forceinline__ void query(uint16_t*& s_output_offset,
uint16_t*& s_output_value,
uint16_t* s_ev,
uint16_t* s_fe,
const uint16_t num_vertices,
const uint16_t num_edges,
const uint16_t num_faces)
{
switch (op) {
case Op::VV: {
assert(num_vertices <= 2 * num_edges);
s_output_offset = &s_ev[0];
s_output_value = &s_ev[num_vertices + 1];
v_v<blockThreads>(num_vertices, num_edges, s_ev, s_output_value);
break;
}
case Op::VE: {
assert(num_vertices <= 2 * num_edges);
s_output_offset = &s_ev[0];
s_output_value = &s_ev[num_vertices + 1];
v_e<blockThreads>(num_vertices, num_edges, s_ev, s_output_value);
break;
}
case Op::VF: {
assert(num_vertices <= 2 * num_edges);
s_output_offset = &s_fe[0];
s_output_value = &s_ev[0];
v_f<blockThreads>(num_faces, num_edges, num_vertices, s_ev, s_fe);
break;
}
case Op::EV: {
s_output_value = s_ev;
break;
}
case Op::EF: {
assert(num_edges <= 3 * num_faces);
s_output_offset = &s_fe[0];
s_output_value = &s_fe[num_edges + 1];
e_f<blockThreads>(num_edges, num_faces, s_fe, s_output_value);
break;
}
case Op::FV: {
s_output_value = s_fe;
f_v(num_edges, s_ev, num_faces, s_fe);
break;
}
case Op::FE: {
s_output_value = s_fe;
break;
}
case Op::FF: {
assert(num_edges <= 3 * num_faces);
s_output_offset = &s_fe[3 * num_faces + 2 * 3 * num_faces];
// ^^^^FE ^^^^^EF
s_output_value = &s_output_offset[num_faces + 1];
f_f<blockThreads>(
num_edges, num_faces, s_fe, s_output_offset, s_output_value);
break;
}
default:
assert(1 != 1);
break;
}
}
} // namespace rxmesh
|
the_stack
|
//------------------------------------------------------------------------
// Forward TensorFlow op.
struct AntialiasFwdOp : public OpKernel
{
AntialiasKernelParams m_attribs;
AntialiasFwdOp(OpKernelConstruction* ctx): OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
OP_REQUIRES_OK(ctx, ctx->GetAttr("tri_const", &m_attribs.tri_const));
}
void Compute(OpKernelContext* ctx)
{
AntialiasKernelParams& p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
// Get input.
const Tensor& color = ctx->input(0);
const Tensor& rasterOut = ctx->input(1);
const Tensor& pos = ctx->input(2);
const Tensor& tri = ctx->input(3);
// Instance rendering mode?
p.instance_mode = pos.dims() > 2;
// Extract input dimensions.
if (p.instance_mode)
p.numVertices = (pos.dims() > 1) ? pos.dim_size(1) : 0;
else
p.numVertices = (pos.dims() > 0) ? pos.dim_size(0) : 0;
p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
p.n = (color.dims() > 0) ? color.dim_size(0) : 0;
p.height = (color.dims() > 1) ? color.dim_size(1) : 0;
p.width = (color.dims() > 2) ? color.dim_size(2) : 0;
p.channels = (color.dims() > 3) ? color.dim_size(3) : 0;
// Sanity checks.
OP_REQUIRES(ctx, color.dims() == 4 && color.dim_size(0) > 0 && color.dim_size(1) > 0 && color.dim_size(2) > 0 && color.dim_size(3) > 0, errors::InvalidArgument("color must have shape[>0, >0, >0, >0]"));
OP_REQUIRES(ctx, rasterOut.dims() == 4 && rasterOut.dim_size(0) > 0 && rasterOut.dim_size(1) > 0 && rasterOut.dim_size(2) > 0 && rasterOut.dim_size(3) == 4, errors::InvalidArgument("raster_out must have shape[>0, >0, >0, 4]"));
OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
OP_REQUIRES(ctx, color.dim_size(1) == rasterOut.dim_size(1) && color.dim_size(2) == rasterOut.dim_size(2), errors::InvalidArgument("color and raster_out inputs must have same spatial dimensions"));
if (p.instance_mode)
{
OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) > 0 && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out, pos"));
}
else
{
OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out"));
}
// Get input pointers.
p.color = color.flat<float>().data();
p.rasterOut = rasterOut.flat<float>().data();
p.tri = tri.flat<int>().data();
p.pos = pos.flat<float>().data();
// Misc parameters.
p.xh = .5f * (float)p.width;
p.yh = .5f * (float)p.height;
// Allocate output tensor.
Tensor* outputTensor = NULL;
TensorShape outputShape;
outputShape.AddDim(p.n);
outputShape.AddDim(p.height);
outputShape.AddDim(p.width);
outputShape.AddDim(p.channels);
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, outputShape, &outputTensor));
p.output = outputTensor->flat<float>().data();
// Allocate work buffer. One extra int4 for storing counters.
Tensor* workTensor = NULL;
TensorShape workShape;
workShape.AddDim(p.n * p.width * p.height * 8 + 4); // 8 int for a maximum of two work items per pixel.
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, workShape, &workTensor));
p.workBuffer = (int4*)(workTensor->flat<int>().data());
// Clear the work counters.
OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.workBuffer, 0, sizeof(int4), stream));
// Verify that buffers are aligned to allow float2/float4 operations.
OP_REQUIRES(ctx, !((uintptr_t)p.pos & 15), errors::Internal("pos input tensor not aligned to float4"));
OP_REQUIRES(ctx, !((uintptr_t)p.rasterOut & 7), errors::Internal("raster_out input tensor not aligned to float2"));
OP_REQUIRES(ctx, !((uintptr_t)p.workBuffer & 15), errors::Internal("work_buffer internal tensor not aligned to int4"));
// Kernel parameters.
void* args[] = {&p};
// (Re-)calculate opposite vertex hash.
if (!p.evHash || !p.tri_const)
{
if (p.allocTriangles < p.numTriangles)
{
p.allocTriangles = max(p.allocTriangles, 64);
while (p.allocTriangles < p.numTriangles)
p.allocTriangles <<= 1; // Must be power of two.
// (Re-)allocate memory for the hash.
OP_CHECK_CUDA_ERROR(ctx, cudaFree(p.evHash));
OP_CHECK_CUDA_ERROR(ctx, cudaMalloc(&p.evHash, p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE * sizeof(uint4)));
LOG(INFO) << "Increasing topology hash size to accommodate " << p.allocTriangles << " triangles";
}
// Clear the hash and launch the mesh kernel to populate it.
OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.evHash, 0, p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE * sizeof(uint4), stream));
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdMeshKernel, (p.numTriangles - 1) / AA_MESH_KERNEL_THREADS_PER_BLOCK + 1, AA_MESH_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
}
// Copy input to output as a baseline.
OP_CHECK_CUDA_ERROR(ctx, cudaMemcpyAsync(p.output, p.color, p.n * p.height * p.width * p.channels * sizeof(float), cudaMemcpyDeviceToDevice, stream));
// Choose launch parameters for the discontinuity finder kernel and launch.
dim3 blockSize(AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH, AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT, 1);
dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.n);
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdDiscontinuityKernel, gridSize, blockSize, args, 0, stream));
// Determine optimum block size for the persistent analysis kernel.
int device = 0;
int numCTA = 0;
int numSM = 0;
OP_CHECK_CUDA_ERROR(ctx, cudaGetDevice(&device));
OP_CHECK_CUDA_ERROR(ctx, cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasFwdAnalysisKernel, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, 0));
OP_CHECK_CUDA_ERROR(ctx, cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
// Launch analysis kernel.
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdAnalysisKernel, numCTA * numSM, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
}
};
REGISTER_OP("AntialiasFwd")
.Input ("color: float")
.Input ("raster_out: float")
.Input ("pos: float")
.Input ("tri: int32")
.Output ("output: float")
.Output ("work_buffer: int32")
.Attr ("tri_const: int");
REGISTER_KERNEL_BUILDER(Name("AntialiasFwd").Device(DEVICE_GPU), AntialiasFwdOp);
//------------------------------------------------------------------------
// Gradient TensorFlow op.
struct AntialiasGradOp : public OpKernel
{
AntialiasKernelParams m_attribs;
AntialiasGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
}
void Compute(OpKernelContext* ctx)
{
AntialiasKernelParams& p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
// Get input.
const Tensor& color = ctx->input(0);
const Tensor& rasterOut = ctx->input(1);
const Tensor& pos = ctx->input(2);
const Tensor& tri = ctx->input(3);
const Tensor& dy = ctx->input(4);
const Tensor& workBuffer = ctx->input(5);
// Instance rendering mode?
p.instance_mode = pos.dims() > 2;
// Extract input dimensions.
if (p.instance_mode)
p.numVertices = (pos.dims() > 1) ? pos.dim_size(1) : 0;
else
p.numVertices = (pos.dims() > 0) ? pos.dim_size(0) : 0;
p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
p.n = (color.dims() > 0) ? color.dim_size(0) : 0;
p.height = (color.dims() > 1) ? color.dim_size(1) : 0;
p.width = (color.dims() > 2) ? color.dim_size(2) : 0;
p.channels = (color.dims() > 3) ? color.dim_size(3) : 0;
// Sanity checks.
OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) > 0 && dy.dim_size(1) > 0 && dy.dim_size(2) > 0 && dy.dim_size(3) > 0, errors::InvalidArgument("dy must have shape[>0, >0, >0, >0]"));
OP_REQUIRES(ctx, color.dims() == 4 && color.dim_size(0) > 0 && color.dim_size(1) > 0 && color.dim_size(2) > 0 && color.dim_size(3) > 0, errors::InvalidArgument("color must have shape[>0, >0, >0, >0]"));
OP_REQUIRES(ctx, rasterOut.dims() == 4 && rasterOut.dim_size(0) > 0 && rasterOut.dim_size(1) > 0 && rasterOut.dim_size(2) > 0 && rasterOut.dim_size(3) == 4, errors::InvalidArgument("raster_out must have shape[>0, >0, >0, 4]"));
OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
OP_REQUIRES(ctx, color.dim_size(1) == rasterOut.dim_size(1) && color.dim_size(2) == rasterOut.dim_size(2), errors::InvalidArgument("color and raster_out inputs must have same spatial dimensions"));
OP_REQUIRES(ctx, color.dim_size(1) == dy.dim_size(1) && color.dim_size(2) == dy.dim_size(2) && color.dim_size(3) == dy.dim_size(3), errors::InvalidArgument("color and dy inputs must have same dimensions"));
if (p.instance_mode)
{
OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) > 0 && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out, pos"));
OP_REQUIRES(ctx, dy.dim_size(0) == p.n && rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs dy, color, raster_out, pos"));
}
else
{
OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out"));
OP_REQUIRES(ctx, dy.dim_size(0) == p.n && rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs dy, color, raster_out"));
}
// Get input pointers.
p.dy = dy.flat<float>().data();
p.color = color.flat<float>().data();
p.rasterOut = rasterOut.flat<float>().data();
p.tri = tri.flat<int>().data();
p.pos = pos.flat<float>().data();
p.workBuffer = (int4*)(workBuffer.flat<int>().data());
// Misc parameters.
p.xh = .5f * (float)p.width;
p.yh = .5f * (float)p.height;
// Allocate color gradient output tensor.
Tensor* gradColor = NULL;
TensorShape gradColorShape;
gradColorShape.AddDim(p.n);
gradColorShape.AddDim(p.height);
gradColorShape.AddDim(p.width);
gradColorShape.AddDim(p.channels);
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, gradColorShape, &gradColor));
p.gradColor = gradColor->flat<float>().data();
// Allocate position gradient output tensor.
Tensor* gradPos = NULL;
TensorShape gradPosShape;
if (p.instance_mode)
gradPosShape.AddDim(p.n);
gradPosShape.AddDim(p.numVertices);
gradPosShape.AddDim(4);
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, gradPosShape, &gradPos));
p.gradPos = gradPos->flat<float>().data();
// Initialize all the stuff.
OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(&p.workBuffer[0].y, 0, sizeof(int), stream)); // Gradient kernel work counter.
OP_CHECK_CUDA_ERROR(ctx, cudaMemcpyAsync(p.gradColor, p.dy, p.n * p.height * p.width * p.channels * sizeof(float), cudaMemcpyDeviceToDevice, stream));
OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.gradPos, 0, (p.instance_mode ? p.n : 1) * p.numVertices * 4 * sizeof(float), stream));
// Verify that buffers are aligned to allow float2/float4 operations.
OP_REQUIRES(ctx, !((uintptr_t)p.pos & 15), errors::Internal("pos input tensor not aligned to float4"));
OP_REQUIRES(ctx, !((uintptr_t)p.workBuffer & 15), errors::Internal("work_buffer internal tensor not aligned to int4"));
// Launch the gradient kernel.
void* args[] = {&p};
int device = 0;
int numCTA = 0;
int numSM = 0;
OP_CHECK_CUDA_ERROR(ctx, cudaGetDevice(&device));
OP_CHECK_CUDA_ERROR(ctx, cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasGradKernel, AA_GRAD_KERNEL_THREADS_PER_BLOCK, 0));
OP_CHECK_CUDA_ERROR(ctx, cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasGradKernel, numCTA * numSM, AA_GRAD_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
}
};
REGISTER_OP("AntialiasGrad")
.Input ("color: float")
.Input ("raster_out: float")
.Input ("pos: float")
.Input ("tri: int32")
.Input ("dy: float")
.Input ("work_buffer: int32")
.Output ("grad_color: float")
.Output ("grad_pos: float");
REGISTER_KERNEL_BUILDER(Name("AntialiasGrad").Device(DEVICE_GPU), AntialiasGradOp);
//------------------------------------------------------------------------
|
the_stack
|
* \file
* cub::DeviceReorder provides device-wide operations for partitioning and filtering lists of items residing within global memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include "device_scan.cuh"
#include "block/block_partition_tiles.cuh"
#include "../grid/grid_queue.cuh"
#include "../util_debug.cuh"
#include "../util_device.cuh"
#include "../util_vector.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Kernel entry points
*****************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Partition kernel entry point (multi-block)
*/
template <
typename BlockPartitionTilesPolicy, ///< Tuning policy for cub::BlockPartitionTiles abstraction
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
typename LengthOutputIterator, ///< Output iterator type for recording the length of the first partition (may be a simple pointer type)
typename PredicateOp, ///< Unary predicate operator indicating membership in the first partition type having member <tt>bool operator()(const T &val)</tt>
typename SizeT> ///< Integer type used for global array indexing
__launch_bounds__ (int(BlockPartitionTilesPolicy::BLOCK_THREADS))
__global__ void PartitionKernel(
InputIteratorRA d_in, ///< Input data
OutputIteratorRA d_out, ///< Output data
LengthOutputIterator d_partition_length, ///< Number of items in the first partition
ScanTileDescriptor<PartitionScanTuple<SizeT, BlockPartitionTilesPolicy::PARTITOINS> > *d_tile_status, ///< Global list of tile status
PredicateOp pred_op, ///< Unary predicate operator indicating membership in the first partition
SizeT num_items, ///< Total number of input items for the entire problem
int num_tiles, ///< Totla number of intut tiles for the entire problem
GridQueue<int> queue) ///< Descriptor for performing dynamic mapping of tile data to thread blocks
{
enum
{
TILE_STATUS_PADDING = PtxArchProps::WARP_THREADS,
};
typedef PartitionScanTuple<SizeT, BlockPartitionTilesPolicy::PARTITOINS> PartitionScanTuple;
// Thread block type for scanning input tiles
typedef BlockPartitionTiles<
BlockPartitionTilesPolicy,
InputIteratorRA,
OutputIteratorRA,
PredicateOp,
SizeT> BlockPartitionTilesT;
// Shared memory for BlockPartitionTiles
__shared__ typename BlockPartitionTilesT::TempStorage temp_storage;
// Process tiles
PartitionScanTuple partition_ends; // Ending offsets for partitions (one-after)
bool is_last_tile; // Whether or not this block handled the last tile (i.e., partition_ends is valid for the entire input)
BlockPartitionTilesT(temp_storage, d_in, d_out, d_tile_status + TILE_STATUS_PADDING, pred_op, num_items).ConsumeTiles(
queue,
num_tiles,
partition_ends,
is_last_tile);
// Record the length of the first partition
if (is_last_tile && (threadIdx.x == 0))
{
*d_partition_length = partition_ends.x;
}
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* DeviceReorder
*****************************************************************************/
/**
* \addtogroup DeviceModule
* @{
*/
/**
* \brief DeviceReorder provides device-wide operations for partitioning and filtering lists of items residing within global memory
*/
struct DeviceReorder
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/******************************************************************************
* Constants and typedefs
******************************************************************************/
/// Generic structure for encapsulating dispatch properties. Mirrors the constants within BlockPartitionTilesPolicy.
struct KernelDispachParams
{
int block_threads;
int items_per_thread;
BlockScanAlgorithm scan_algorithm;
int tile_size;
template <typename BlockPartitionTilesPolicy>
__host__ __device__ __forceinline__
void Init()
{
block_threads = BlockPartitionTilesPolicy::BLOCK_THREADS;
items_per_thread = BlockPartitionTilesPolicy::ITEMS_PER_THREAD;
scan_algorithm = BlockPartitionTilesPolicy::SCAN_ALGORITHM;
tile_size = block_threads * items_per_thread;
}
};
/******************************************************************************
* Tuning policies
******************************************************************************/
/// Specializations of tuned policy types for different PTX architectures
template <
int PARTITIONS,
typename T,
typename SizeT,
int ARCH>
struct TunedPolicies;
/// SM35 tune
template <int PARTITIONS, typename T, typename SizeT>
struct TunedPolicies<PARTITIONS, T, SizeT, 350>
{
enum {
NOMINAL_4B_ITEMS_PER_THREAD = 16,
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
};
typedef BlockPartitionTilesPolicy<PARTITIONS, 128, ITEMS_PER_THREAD, LOAD_LDG, BLOCK_SCAN_RAKING_MEMOIZE> PartitionPolicy;
};
/// SM30 tune
template <int PARTITIONS, typename T, typename SizeT>
struct TunedPolicies<PARTITIONS, T, SizeT, 300>
{
enum {
NOMINAL_4B_ITEMS_PER_THREAD = 9,
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
};
typedef BlockPartitionTilesPolicy<PARTITIONS, 256, ITEMS_PER_THREAD, LOAD_DEFAULT, BLOCK_SCAN_RAKING_MEMOIZE> PartitionPolicy;
};
/// SM20 tune
template <int PARTITIONS, typename T, typename SizeT>
struct TunedPolicies<PARTITIONS, T, SizeT, 200>
{
enum {
NOMINAL_4B_ITEMS_PER_THREAD = 15,
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
};
typedef BlockPartitionTilesPolicy<PARTITIONS, 128, ITEMS_PER_THREAD, LOAD_DEFAULT, BLOCK_SCAN_RAKING_MEMOIZE> PartitionPolicy;
};
/// SM10 tune
template <int PARTITIONS, typename T, typename SizeT>
struct TunedPolicies<PARTITIONS, T, SizeT, 100>
{
enum {
NOMINAL_4B_ITEMS_PER_THREAD = 7,
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
};
typedef BlockPartitionTilesPolicy<PARTITIONS, 128, ITEMS_PER_THREAD, LOAD_DEFAULT, BLOCK_SCAN_RAKING> PartitionPolicy;
};
/// Tuning policy for the PTX architecture that DevicePartition operations will get dispatched to
template <int PARTITIONS, typename T, typename SizeT>
struct PtxDefaultPolicies
{
static const int PTX_TUNE_ARCH = (CUB_PTX_ARCH >= 350) ?
350 :
(CUB_PTX_ARCH >= 300) ?
300 :
(CUB_PTX_ARCH >= 200) ?
200 :
100;
// Tuned policy set for the current PTX compiler pass
typedef TunedPolicies<PARTITIONS, T, SizeT, PTX_TUNE_ARCH> PtxTunedPolicies;
// PartitionPolicy that opaquely derives from the specialization corresponding to the current PTX compiler pass
struct PartitionPolicy : PtxTunedPolicies::PartitionPolicy {};
/**
* Initialize dispatch params with the policies corresponding to the PTX assembly we will use
*/
static void InitDispatchParams(int ptx_version, KernelDispachParams &scan_dispatch_params)
{
if (ptx_version >= 350)
{
typedef TunedPolicies<PARTITIONS, T, SizeT, 350> TunedPolicies;
scan_dispatch_params.Init<typename TunedPolicies::PartitionPolicy>();
}
else if (ptx_version >= 300)
{
typedef TunedPolicies<PARTITIONS, T, SizeT, 300> TunedPolicies;
scan_dispatch_params.Init<typename TunedPolicies::PartitionPolicy>();
}
else if (ptx_version >= 200)
{
typedef TunedPolicies<PARTITIONS, T, SizeT, 200> TunedPolicies;
scan_dispatch_params.Init<typename TunedPolicies::PartitionPolicy>();
}
else
{
typedef TunedPolicies<PARTITIONS, T, SizeT, 100> TunedPolicies;
scan_dispatch_params.Init<typename TunedPolicies::PartitionPolicy>();
}
}
};
/******************************************************************************
* Utility methods
******************************************************************************/
/**
* Internal dispatch routine
*/
template <
typename ScanInitKernelPtr, ///< Function type of cub::ScanInitKernel
typename PartitionKernelPtr, ///< Function type of cub::PartitionKernel
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
typename LengthOutputIterator, ///< Output iterator type for recording the length of the first partition (may be a simple pointer type)
typename PredicateOp, ///< Unary predicate operator indicating membership in the first partition type having member <tt>bool operator()(const T &val)</tt>
typename SizeT> ///< Integer type used for global array indexing
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
int ptx_version, ///< [in] PTX version
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
ScanInitKernelPtr init_kernel, ///< [in] Kernel function pointer to parameterization of cub::PartitionInitKernel
PartitionKernelPtr partition_kernel, ///< [in] Kernel function pointer to parameterization of cub::PartitionKernel
KernelDispachParams &scan_dispatch_params, ///< [in] Dispatch parameters that match the policy that \p partition_kernel was compiled for
InputIteratorRA d_in, ///< [in] Iterator pointing to scan input
OutputIteratorRA d_out, ///< [in] Iterator pointing to scan output
LengthOutputIterator d_partition_length, ///< [out] Output iterator referencing the location where the pivot offset (i.e., the length of the first partition) is to be recorded
PredicateOp pred_op, ///< [in] Unary predicate operator indicating membership in the first partition
SizeT num_items, ///< [in] Total number of items to partition
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
{
#ifndef CUB_RUNTIME_ENABLED
// Kernel launch not supported from this device
return CubDebug(cudaErrorNotSupported);
#else
enum
{
TILE_STATUS_PADDING = 32,
};
// Data type
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
// Scan tuple type and tile status descriptor type
typedef typename VectorHelper<SizeT, 2>::Type ScanTuple;
typedef ScanTileDescriptor<ScanTuple> ScanTileDescriptorT;
cudaError error = cudaSuccess;
do
{
// Number of input tiles
int num_tiles = (num_items + scan_dispatch_params.tile_size - 1) / scan_dispatch_params.tile_size;
// Temporary storage allocation requirements
void* allocations[2];
size_t allocation_sizes[2] =
{
(num_tiles + TILE_STATUS_PADDING) * sizeof(ScanTileDescriptorT), // bytes needed for tile status descriptors
GridQueue<int>::AllocationSize() // bytes needed for grid queue descriptor
};
// Alias temporaries (or set the necessary size of the storage allocation)
if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break;
// Return if the caller is simply requesting the size of the storage allocation
if (d_temp_storage == NULL)
return cudaSuccess;
// Global list of tile status
ScanTileDescriptorT *d_tile_status = (ScanTileDescriptorT*) allocations[0];
// Grid queue descriptor
GridQueue<int> queue(allocations[1]);
// Log init_kernel configuration
int init_kernel_threads = 128;
int init_grid_size = (num_tiles + init_kernel_threads - 1) / init_kernel_threads;
if (stream_synchronous) CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, init_kernel_threads, (long long) stream);
// Invoke init_kernel to initialize tile descriptors and queue descriptors
init_kernel<<<init_grid_size, init_kernel_threads, 0, stream>>>(
queue,
d_tile_status,
num_tiles);
// Sync the stream if specified
if (stream_synchronous && (CubDebug(error = SyncStream(stream)))) break;
// Get grid size for multi-block kernel
int scan_grid_size;
int multi_sm_occupancy = -1;
if (ptx_version < 200)
{
// We don't have atomics (or don't have fast ones), so just assign one
// block per tile (limited to 65K tiles)
scan_grid_size = num_tiles;
}
else
{
// We have atomics and can thus reuse blocks across multiple tiles using a queue descriptor.
// Get GPU id
int device_ordinal;
if (CubDebug(error = cudaGetDevice(&device_ordinal))) break;
// Get SM count
int sm_count;
if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break;
// Get a rough estimate of partition_kernel SM occupancy based upon the maximum SM occupancy of the targeted PTX architecture
multi_sm_occupancy = CUB_MIN(
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADBLOCKS,
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADS / scan_dispatch_params.block_threads);
#ifndef __CUDA_ARCH__
// We're on the host, so come up with a
Device device_props;
if (CubDebug(error = device_props.Init(device_ordinal))) break;
if (CubDebug(error = device_props.MaxSmOccupancy(
multi_sm_occupancy,
partition_kernel,
scan_dispatch_params.block_threads))) break;
#endif
// Get device occupancy for partition_kernel
int scan_occupancy = multi_sm_occupancy * sm_count;
// Get grid size for partition_kernel
scan_grid_size = (num_tiles < scan_occupancy) ?
num_tiles : // Not enough to fill the device with threadblocks
scan_occupancy; // Fill the device with threadblocks
}
// Log partition_kernel configuration
if (stream_synchronous) CubLog("Invoking partition_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n",
scan_grid_size, scan_dispatch_params.block_threads, (long long) stream, scan_dispatch_params.items_per_thread, multi_sm_occupancy);
// Invoke partition_kernel
partition_kernel<<<scan_grid_size, scan_dispatch_params.block_threads, 0, stream>>>(
d_in,
d_out,
d_partition_length,
d_tile_status,
pred_op,
num_items,
num_tiles,
queue);
// Sync the stream if specified
if (stream_synchronous && (CubDebug(error = SyncStream(stream)))) break;
}
while (0);
return error;
#endif // CUB_RUNTIME_ENABLED
}
/**
* Internal partition dispatch routine for using default tuning policies
*/
template <
typename PARTITIONS, ///< Number of partitions we are keeping
typename InputIteratorRA, ///< Random-access iterator type for input (may be a simple pointer type)
typename OutputIteratorRA, ///< Random-access iterator type for output (may be a simple pointer type)
typename LengthOutputIterator, ///< Output iterator type for recording the length of the first partition (may be a simple pointer type)
typename PredicateOp, ///< Unary predicate operator indicating membership in the first partition type having member <tt>bool operator()(const T &val)</tt>
typename SizeT> ///< Integer type used for global array indexing
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
InputIteratorRA d_in, ///< [in] Iterator pointing to input items
OutputIteratorRA d_out, ///< [in] Iterator pointing to output items
LengthOutputIterator d_partition_length, ///< [out] Output iterator referencing the location where the pivot offset (i.e., the length of the first partition) is to be recorded
PredicateOp pred_op, ///< [in] Unary predicate operator indicating membership in the first partition
SizeT num_items, ///< [in] Total number of items to partition
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
{
// Data type
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
// Tuning polices
typedef PtxDefaultPolicies<PARTITIONS, T, SizeT> PtxDefaultPolicies; // Wrapper of default kernel policies
typedef typename PtxDefaultPolicies::PartitionPolicy PartitionPolicy; // Partition kernel policy
cudaError error = cudaSuccess;
do
{
// Declare dispatch parameters
KernelDispachParams scan_dispatch_params;
int ptx_version;
#ifdef __CUDA_ARCH__
// We're on the device, so initialize the dispatch parameters with the PtxDefaultPolicies directly
scan_dispatch_params.Init<PartitionPolicy>();
ptx_version = CUB_PTX_ARCH;
#else
// We're on the host, so lookup and initialize the dispatch parameters with the policies that match the device's PTX version
if (CubDebug(error = PtxVersion(ptx_version))) break;
PtxDefaultPolicies::InitDispatchParams(ptx_version, scan_dispatch_params);
#endif
Dispatch(
ptx_version,
d_temp_storage,
temp_storage_bytes,
ScanInitKernel<T, SizeT>,
PartitionKernel<PartitionPolicy, InputIteratorRA, OutputIteratorRA, LengthOutputIterator, PredicateOp, SizeT>,
scan_dispatch_params,
d_in,
d_out,
d_partition_length,
pred_op,
num_items,
stream,
stream_synchronous);
if (CubDebug(error)) break;
}
while (0);
return error;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Splits a list of input items into two partitions within the given output list using the specified predicate. The relative ordering of inputs is not necessarily preserved.
*
* An item \p val is placed in the first partition if <tt>pred_op(val) == true</tt>, otherwise
* it is placed in the second partition. The offset of the partitioning pivot (equivalent to
* the total length of the first partition as well as the starting offset of the second), is
* recorded to \p d_partition_length.
*
* The length of the output referenced by \p d_out is assumed to be the same as that of \p d_in.
*
* \devicestorage
*
* \tparam InputIteratorRA <b>[inferred]</b> Random-access iterator type for input (may be a simple pointer type)
* \tparam OutputIteratorRA <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
* \tparam LengthOutputIterator <b>[inferred]</b> Random-access iterator type for output (may be a simple pointer type)
* \tparam PredicateOp <b>[inferred]</b> Unary predicate operator indicating membership in the first partition type having member <tt>bool operator()(const T &val)</tt>
*/
template <
typename InputIteratorRA,
typename OutputIteratorRA,
typename LengthOutputIterator,
typename PredicateOp>
__host__ __device__ __forceinline__
static cudaError_t Partition(
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \p d_temp_storage allocation.
InputIteratorRA d_in, ///< [in] Iterator pointing to input items
OutputIteratorRA d_out, ///< [in] Iterator pointing to output items
LengthOutputIterator d_pivot_offset, ///< [out] Output iterator referencing the location where the pivot offset is to be recorded
PredicateOp pred_op, ///< [in] Unary predicate operator indicating membership in the first partition
int num_items, ///< [in] Total number of items to partition
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool stream_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
return Dispatch(d_temp_storage, temp_storage_bytes, d_in, d_out, Sum(), T(), num_items, stream, stream_synchronous);
}
};
/** @} */ // DeviceModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
using namespace ONNXPlugin;
#define cublasCheck(op) \
do { \
auto ret = (op); \
if (ret != CUBLAS_STATUS_SUCCESS) { \
INFOF("%s fail, %d != %d", #op, ret, CUBLAS_STATUS_SUCCESS); \
} \
} while (0);
__global__ void sigmoidKernel(float* input, float* output, int edge) {
KernelPositionBlock;
output[position] = 1 / (1 + exp(-input[position]));
}
// __global__ void sigmoidKernel(__half* input, __half* output, int edge) {
// KernelPositionBlock;
// __half one = 1.0f;
// output[position] = one / (one + hexp(-input[position]));
// }
static __device__ float dmcnIm2colBilinear(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
// static __device__ __half dmcnIm2colBilinear(const __half *bottom_data, const int data_width,
// const int height, const int width, __half h, __half w)
// {
// int h_low = hfloor(h);
// int w_low = hfloor(w);
// int h_high = h_low + 1;
// int w_high = w_low + 1;
// __half one = 1.0f;
// __half h_low_hf = h_low;
// __half w_low_hf = w_low;
// __half lh = h - h_low_hf;
// __half lw = w - w_low_hf;
// __half hh = one - lh, hw = one - lw;
// __half zero = 0.0f;
// __half v1 = zero;
// if (h_low >= 0 && w_low >= 0)
// v1 = bottom_data[h_low * data_width + w_low];
// __half v2 = zero;
// if (h_low >= 0 && w_high <= width - 1)
// v2 = bottom_data[h_low * data_width + w_high];
// __half v3 = zero;
// if (h_high <= height - 1 && w_low >= 0)
// v3 = bottom_data[h_high * data_width + w_low];
// __half v4 = zero;
// if (h_high <= height - 1 && w_high <= width - 1)
// v4 = bottom_data[h_high * data_width + w_high];
// __half w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
// return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
// }
__global__ void DCNIm2colKernel(
const float *data_input, const float *data_offset, const float *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
float *data_output, int edge)
{
KernelPositionBlock;
const int f_area_input = width_input * height_input;
const int f_area_output = width_output * height_output;
// index index of output matrix
const int w_output = position % width_output;
const int h_output = (position / width_output) % height_output;
const int c_input = (position / width_output / height_output) % num_channels;
const int c_output = c_input * kernel_h * kernel_w;
const int deformable_group_index = c_input / channel_per_deformable_group;
const int h_input = h_output * stride_h - pad_h;
const int w_input = w_output * stride_w - pad_w;
int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
float *data_output_ptr = data_output + data_output_offset;
const float *data_input_ptr = data_input + c_input * f_area_input;
const float *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
const float *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int row = i + h_input;
const int col = j + w_input;
const int kernel_index = i * kernel_w + j;
const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
const float offset_h = data_offset_ptr[offset_h_offset];
const float offset_w = data_offset_ptr[offset_w_offset];
const float mask = data_mask_ptr[mask_offset];
float val = 0;
const float h_im = h_input + i * dilation_h + offset_h;
const float w_im = w_input + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height_input && w_im < width_input)
{
val = dmcnIm2colBilinear(data_input_ptr, width_input, height_input, width_input, h_im, w_im);
}
*data_output_ptr = val * mask;
data_output_ptr += f_area_output;
}
}
}
// __global__ void DCNIm2colKernel(
// const __half *data_input, const __half *data_offset, const __half *data_mask,
// const int height_input, const int width_input, const int kernel_h, const int kernel_w,
// const int pad_h, const int pad_w,
// const int stride_h, const int stride_w,
// const int dilation_h, const int dilation_w,
// const int channel_per_deformable_group,
// const int batch_size, const int num_channels, const int deformable_group,
// const int height_output, const int width_output,
// __half *data_output, int edge)
// {
// KernelPositionBlock;
// const int f_area_input = width_input * height_input;
// const int f_area_output = width_output * height_output;
// // index index of output matrix
// const int w_output = position % width_output;
// const int h_output = (position / width_output) % height_output;
// const int c_input = (position / width_output / height_output) % num_channels;
// const int c_output = c_input * kernel_h * kernel_w;
// const int deformable_group_index = c_input / channel_per_deformable_group;
// const int h_input = h_output * stride_h - pad_h;
// const int w_input = w_output * stride_w - pad_w;
// __half width_input_hf = __float2half(width_input);
// __half height_input_hf = __float2half(height_input);
// __half h_input_hf = __float2half(h_input);
// __half w_input_hf = __float2half(w_input);
// __half dilation_h_hf = __float2half(dilation_h);
// __half dilation_w_hf = __float2half(dilation_w);
// int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
// __half *data_output_ptr = data_output + data_output_offset;
// const __half *data_input_ptr = data_input + c_input * f_area_input;
// const __half *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
// const __half *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
// __half n_one = -1.0f;
// __half zero = 0.0f;
// for (int i = 0; i < kernel_h; ++i)
// {
// for (int j = 0; j < kernel_w; ++j)
// {
// __half i_hf = __float2half(i);
// __half j_hf = __float2half(j);
// const int row = i + h_input;
// const int col = j + w_input;
// const int kernel_index = i * kernel_w + j;
// const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
// const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
// const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
// const __half offset_h = data_offset_ptr[offset_h_offset];
// const __half offset_w = data_offset_ptr[offset_w_offset];
// const __half mask = data_mask_ptr[mask_offset];
// __half val = zero;
// __half h_im = h_input_hf + i_hf * dilation_h_hf + offset_h;
// __half w_im = w_input_hf + j_hf * dilation_w_hf + offset_w;
// if (h_im > n_one && w_im > n_one && h_im < height_input_hf && w_im < width_input_hf)
// {
// val = dmcnIm2colBilinear(data_input_ptr, width_input_hf, height_input_hf, width_input_hf, h_im, w_im);
// }
// *data_output_ptr = val * mask;
// data_output_ptr += f_area_output;
// }
// }
// }
template<typename DataType>
static __global__ void biasKernel(DataType* data_input, const DataType* bias, const int f_area, int edge) {
KernelPositionBlock;
int bias_index = position / f_area;
data_input[position] += bias[bias_index];
}
inline void segemm_native(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float alpha, /* host or device pointer */
const float *A,
int lda,
const float *B,
int ldb,
float beta, /* host or device pointer */
float *C,
int ldc) {
cublasCheck(cublasSgemm(handle, transa, transb, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc));
//cublasCheck(cublasGemmEx(handle, transa, transb, m, n, k, &alpha, A, CUDA_R_32F, lda, B, CUDA_R_32F, ldb, &beta, C, CUDA_R_32F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT));
}
// inline void segemm_native(cublasHandle_t handle,
// cublasOperation_t transa,
// cublasOperation_t transb,
// int m,
// int n,
// int k,
// float alpha,
// const __half *A,
// int lda,
// const __half *B,
// int ldb,
// float beta,
// __half *C,
// int ldc) {
// auto halpha = __float2half(alpha);
// auto hbeta = __float2half(beta);
// //cublasCheck(cublasHgemm(handle, transa, transb, m, n, k, &halpha, A, lda, B, ldb, &hbeta, C, ldc));
// cublasCheck(cublasGemmEx(handle, transa, transb, m, n, k, &halpha, A, CUDA_R_16F, lda, B, CUDA_R_16F, ldb, &hbeta, C, CUDA_R_16F, ldc, CUDA_R_16F, CUBLAS_GEMM_DFALT));
// }
template<typename DataType>
static void enqueue_native(cublasHandle_t handle, const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) {
auto& data = inputs[0];
auto& om = inputs[1];
auto& out = outputs[0];
int kernel_size = weights[0].width();
int deformable_group = 1;
size_t maskSize = (size_t)data.height() * data.width() * kernel_size * kernel_size * deformable_group;
size_t im2colSize = (size_t)data.channel() * kernel_size * kernel_size * out.height() * out.width();
const int m = out.channel();
const int n = out.count(2);
const int k = data.channel() * kernel_size * kernel_size;
float alpha = 1.0;
float beta = 0.0;
//cublasCheck(cublasSetStream(handle, stream));
for (int ibatch = 0; ibatch < data.batch(); ++ibatch) {
DataType* maskWorkspacePtr = (DataType*)workspace + (maskSize + im2colSize) * ibatch;
DataType* im2colWorkspacePtr = (DataType*)workspace + (maskSize + im2colSize) * ibatch + maskSize;
DataType* inputMask = om.ptr<DataType>(ibatch, om.channel() / 3 * 2);
checkCudaKernel(
sigmoidKernel<<<CUDATools::grid_dims(maskSize), CUDATools::block_dims(maskSize), 0, stream>>>(inputMask, maskWorkspacePtr, maskSize);
);
DataType* datainput = data.ptr<DataType>(ibatch);
DataType* offset = om.ptr<DataType>(ibatch);
auto jobs = (size_t)data.channel() * out.height() * out.width();
checkCudaKernel(
DCNIm2colKernel<<<CUDATools::grid_dims(jobs), CUDATools::block_dims(jobs), 0, stream>>>(
datainput, offset, maskWorkspacePtr, data.height(), data.width(), kernel_size, kernel_size, 1, 1, 1, 1, 1, 1, data.channel(), data.batch(), data.channel(), deformable_group,
out.height(), out.width(), im2colWorkspacePtr, jobs
);
);
DataType* weightKernel = weights[0].ptr<DataType>();
segemm_native(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, im2colWorkspacePtr, n, weightKernel, k, beta, out.ptr<DataType>(ibatch), n);
if (weights.size() > 1) {
DataType* weightBias = weights[1].ptr<DataType>();
size_t edge = out.count(1);
size_t area = out.count(2);
checkCudaKernel(
biasKernel<<<CUDATools::grid_dims(edge), CUDATools::block_dims(edge), 0, stream>>>(
out.ptr<DataType>(ibatch), weightBias, area, edge
);
);
}
}
}
class DCNv2 : public TRTPlugin {
public:
cublasHandle_t cublasHandle_ = nullptr;
SetupPlugin(DCNv2);
virtual void attachToContext(cudnnContext* /*cudnn*/, cublasContext* cublas, nvinfer1::IGpuAllocator* /*allocator*/) noexcept override{
cublasHandle_ = cublas;
}
virtual void detachFromContext() noexcept override{
cublasHandle_ = nullptr;
}
std::shared_ptr<LayerConfig> new_config() {
auto cfg = TRTPlugin::new_config();
//cfg->supportDataType_ = {nvinfer1::DataType::kFLOAT};
//cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT};
cfg->support_dtype_set_ = {nvinfer1::DataType::kFLOAT};
return cfg;
}
virtual void config_finish() override{
// INFO("weights = %d", config_->weights_.size());
// for(int i = 0; i < config_->weights_.size(); ++i){
// auto& w = config_->weights_[i];
// if(w->type() == TRT::DataType::Float16){
// INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), float(w->at<__half>(0)));
// }else{
// INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), w->at<float>(0));
// }
// }
}
size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int32_t nbInputs, const nvinfer1::PluginTensorDesc* outputs,
int32_t nbOutputs) const noexcept{
int kernel_size = 3;
int deformable_group = 1;
size_t im2colSize = (size_t)inputs[0].dims.d[1] * kernel_size * kernel_size * outputs[0].dims.d[2] * outputs[0].dims.d[3];
size_t maskSize = (size_t)inputs[0].dims.d[2] * inputs[0].dims.d[3] * kernel_size * kernel_size * deformable_group;
config_->workspace_size_ = (im2colSize + maskSize) * config_->max_batch_size_ * TRT::data_type_size(config_->usage_dtype_);
return config_->workspace_size_;
}
nvinfer1::DimsExprs getOutputDimensions(
int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept{
nvinfer1::DimsExprs output_dims;
output_dims.nbDims = 4;
output_dims.d[0] = inputs[0].d[0];
output_dims.d[1] = exprBuilder.constant(config_->weights_[0]->size(0));
output_dims.d[2] = inputs[0].d[2];
output_dims.d[3] = inputs[0].d[3];
return output_dims;
}
int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) {
if (config_->usage_dtype_ == TRT::DataType::Float) {
enqueue_native<float>(cublasHandle_, inputs, outputs, weights, workspace, stream);
}
else if (config_->usage_dtype_ == TRT::DataType::Float16) {
// enqueue_native<__half>(cublasHandle_, inputs, outputs, weights, workspace, stream);
INFOF("not implement function");
}
else{
INFOF("not implement function");
}
return 0;
}
};
RegisterPlugin(DCNv2);
|
the_stack
|
#include <cuda_runtime.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#include <nvector/nvector_cuda.h>
/* Real Constants */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
#if defined(SUNDIALS_INT64_T)
#define DSYM "ld"
#else
#define DSYM "d"
#endif
/*
* CUDA kernels
*/
__global__ void fKernel(const realtype *u, realtype *udot,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff;
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
uij = u[tid];
udn = (j == 0) ? ZERO : u[tid - 1];
uup = (j == MY-1) ? ZERO : u[tid + 1];
ult = (i == 0) ? ZERO : u[tid - MY];
urt = (i == MX-1) ? ZERO : u[tid + MY];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
udot[tid] = hdiff + hadv + vdiff;
}
}
__global__ void jtvKernel(const realtype *vdata, realtype *Jvdata,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
/* set the tid-th element of Jv */
Jvdata[tid] = -TWO*(verdc+hordc) * vdata[tid];
if (i != 0) Jvdata[tid] += (hordc - horac) * vdata[tid-MY];
if (i != MX-1) Jvdata[tid] += (hordc + horac) * vdata[tid+MY];
if (j != 0) Jvdata[tid] += verdc * vdata[tid-1];
if (j != MY-1) Jvdata[tid] += verdc * vdata[tid+1];
}
}
/* Type : _UserData (contains model and discretization parameters) */
struct _UserData {
sunindextype MX, MY, NEQ;
realtype dx, dy, XMAX, YMAX;
realtype hdcoef, hacoef, vdcoef;
};
typedef _UserData *UserData;
/* Problem setup and initialization functions */
static UserData SetUserData(int argc, char** argv);
static void SetIC(N_Vector u, UserData data);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp);
/* Private Helper Functions */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char** argv)
{
SUNContext sunctx;
realtype reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
cudaStream_t stream;
cudaError_t cuerr;
u = NULL;
data = NULL;
LS = NULL;
cvode_mem = NULL;
/* optional: create a cudaStream to use with the CUDA NVector
(otherwise the default stream is used) and creating kernel
execution policies */
cuerr = cudaStreamCreate(&stream);
if (cuerr != cudaSuccess) {
printf("Error in cudaStreamCreate(): %s\n", cudaGetErrorString(cuerr));
return(1);
}
/* Create the SUNDIALS context */
retval = SUNContext_Create(NULL, &sunctx);
if(check_retval(&retval, "SUNContext_Create", 1)) return(1);
SUNCudaThreadDirectExecPolicy stream_exec_policy(256, stream);
SUNCudaBlockReduceExecPolicy reduce_exec_policy(256, 0, stream);
/* Set model parameters */
data = SetUserData(argc, argv);
if(check_retval((void *)data, "malloc", 2)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
/* Create a CUDA nvector with initial values using managed
memory for the vector data */
u = N_VNewManaged_Cuda(data->NEQ, sunctx);
if(check_retval((void*)u, "N_VNewManaged_Cuda", 0)) return(1);
/* Use a non-default cuda stream for kernel execution */
retval = N_VSetKernelExecPolicy_Cuda(u, &stream_exec_policy, &reduce_exec_policy);
if(check_retval(&retval, "N_VSetKernelExecPolicy_Cuda", 0)) return(1);
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF, sunctx);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
LS = SUNLinSol_SPGMR(u, SUN_PREC_NONE, 0, sunctx);
if(check_retval(&retval, "SUNLinSol_SPGMR", 1)) return(1);
/* Set CVode linear solver to LS */
retval = CVodeSetLinearSolver(cvode_mem, LS, NULL);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the Jacobian-times-vector function */
retval = CVodeSetJacTimes(cvode_mem, NULL, jtv);
if(check_retval(&retval, "CVodeSetJacTimesVecFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax, data);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
SUNLinSolFree(LS); /* Free linear solver memory */
free(data); /* Free the user data */
SUNContext_Free(&sunctx);
cuerr = cudaStreamDestroy(stream); /* Free and cleanup the CUDA stream */
if(cuerr != cudaSuccess) { printf("Error: cudaStreamDestroy() failed\n"); return(1); }
return(0);
}
/*
*-------------------------------------------
* Problem setup and initialization functions
*-------------------------------------------
*/
/* Set model and discretization parameters */
UserData SetUserData(int argc, char *argv[])
{
const sunindextype MX = 10;
const sunindextype MY = 5;
const realtype XMAX = RCONST(2.0); /* domain boundaries */
const realtype YMAX = RCONST(1.0);
/* Allocate user data structure */
UserData ud = (UserData) malloc(sizeof *ud);
if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL);
ud->MX = MX;
ud->MY = MY;
ud->NEQ = MX*MY;
ud->XMAX = XMAX;
ud->YMAX = YMAX;
ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */
ud->dy = YMAX/(MY+1);
ud->hdcoef = ONE/(ud->dx*ud->dx);
ud->hacoef = HALF/(TWO*ud->dx);
ud->vdcoef = ONE/(ud->dy*ud->dy);
return ud;
}
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
/* Extract needed constants from data */
const realtype dx = data->dx;
const realtype dy = data->dy;
const realtype xmax = data->XMAX;
const realtype ymax = data->YMAX;
const sunindextype MY = data->MY;
const sunindextype NEQ = data->NEQ;
/* Extract pointer to solution vector data on the host */
realtype *udata = N_VGetHostArrayPointer_Cuda(u);
sunindextype i, j, tid;
realtype x, y;
/* Load initial profile into u vector */
for (tid=0; tid < NEQ; tid++) {
i = tid / MY;
j = tid % MY;
x = (i+1)*dx;
y = (j+1)*dy;
udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y);
}
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *udata = N_VGetDeviceArrayPointer_Cuda(u);
realtype *dudata = N_VGetDeviceArrayPointer_Cuda(udot);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
fKernel<<<grid,block>>>(udata, dudata, MX, MY, hordc, horac, verdc);
return(0);
}
/* Jacobian-times-vector routine. */
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *vdata = N_VGetDeviceArrayPointer_Cuda(v);
realtype *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
N_VConst(ZERO, Jv);
jtvKernel<<<grid,block>>>(vdata, Jvdata, MX, MY, hordc, horac, verdc);
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax,
UserData data)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %" DSYM " X %" DSYM "\n", data->MX, data->MY);
printf("Total system size = %" DSYM "\n", data->NEQ);
printf("Tolerance parameters: reltol = %" GSYM " abstol = %" GSYM "\n\n",
reltol, abstol);
printf("At t = %" GSYM " max.norm(u) =%14.6" ESYM " \n", T0, umax);
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
printf("At t = %4.2" FSYM " max.norm(u) =%14.6" ESYM " nst = %4ld\n", t, umax, nst);
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
long lenrw, leniw ;
long lenrwLS, leniwLS;
long int nst, nfe, nsetups, nni, ncfn, netf;
long int nli, npe, nps, ncfl, nfeLS;
int retval;
retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw);
check_retval(&retval, "CVodeGetWorkSpace", 1);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS);
check_retval(&retval, "CVodeGetLinWorkSpace", 1);
retval = CVodeGetNumLinIters(cvode_mem, &nli);
check_retval(&retval, "CVodeGetNumLinIters", 1);
retval = CVodeGetNumPrecEvals(cvode_mem, &npe);
check_retval(&retval, "CVodeGetNumPrecEvals", 1);
retval = CVodeGetNumPrecSolves(cvode_mem, &nps);
check_retval(&retval, "CVodeGetNumPrecSolves", 1);
retval = CVodeGetNumLinConvFails(cvode_mem, &ncfl);
check_retval(&retval, "CVodeGetNumLinConvFails", 1);
retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVodeGetNumLinRhsEvals", 1);
printf("\nFinal Statistics.. \n\n");
printf("lenrw = %5ld leniw = %5ld\n" , lenrw, leniw);
printf("lenrwLS = %5ld leniwLS = %5ld\n" , lenrwLS, leniwLS);
printf("nst = %5ld\n" , nst);
printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS);
printf("nni = %5ld nli = %5ld\n" , nni, nli);
printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf);
printf("npe = %5ld nps = %5ld\n" , npe, nps);
printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
the_stack
|
#define FULL_MASK 0xffffffff
/*
* OWN KERNELS
*/
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_data_kernel(float2 *g_idata_global, float2 *g_idata_n, float *g_odata,
unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += (g_idata_global[i].x - g_idata_n[i].x) * (g_idata_global[i].x - g_idata_n[i].x);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += (g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x) *
(g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_reg_sobolev_kernel(Mat4f *g_idata, float *g_odata, unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += norm_sq(g_idata[i].data[0]) + norm_sq(g_idata[i].data[1]) + norm_sq(g_idata[i].data[2]);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += norm_sq(g_idata[i + blockSize].data[0]) + norm_sq(g_idata[i + blockSize].data[1]) +
norm_sq(g_idata[i + blockSize].data[2]);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_voxel_max_energy_kernel(float2 *d_idata_global, float2 *d_idata_n,
Mat4f *d_idata_reg, float2 *d_o_data, float w_reg,
unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
float temp = (d_idata_global[i].x - d_idata_n[i].x) * (d_idata_global[i].x - d_idata_n[i].x) +
w_reg * (norm_sq(d_idata_reg[i].data[0]) + norm_sq(d_idata_reg[i].data[1]) +
norm_sq(d_idata_reg[i].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
temp = (d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) *
(d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) +
w_reg * (norm_sq(d_idata_reg[i + blockSize].data[0]) + norm_sq(d_idata_reg[i + blockSize].data[1]) +
norm_sq(d_idata_reg[i + blockSize].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
d_o_data[blockIdx.x] = local_max;
}
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_max_kernel(float4 *updates, float2 *g_o_max_data, unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
if (norm(updates[i]) > local_max.x) {
local_max.x = norm(updates[i]);
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
if (norm(updates[i + blockSize]) > local_max.x) {
local_max.x = norm(updates[i + blockSize]);
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
g_o_max_data[blockIdx.x] = local_max;
}
}
/* wrapper function for kernel launch */
void sobfu::device::reduce_data(int size, int threads, int blocks, float2 *d_idata_global, float2 *d_idata_n,
float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_data_kernel<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
reduce_data_kernel<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
reduce_data_kernel<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
reduce_data_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
reduce_data_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
reduce_data_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
reduce_data_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
reduce_data_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
reduce_data_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
reduce_data_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_data_kernel<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
reduce_data_kernel<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
reduce_data_kernel<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
reduce_data_kernel<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
reduce_data_kernel<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
reduce_data_kernel<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
reduce_data_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
reduce_data_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
reduce_data_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
reduce_data_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_reg_sobolev(int size, int threads, int blocks, Mat4f *d_idata, float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_reg_sobolev_kernel<512, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce_reg_sobolev_kernel<256, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce_reg_sobolev_kernel<128, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce_reg_sobolev_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce_reg_sobolev_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce_reg_sobolev_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce_reg_sobolev_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce_reg_sobolev_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce_reg_sobolev_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce_reg_sobolev_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_reg_sobolev_kernel<512, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce_reg_sobolev_kernel<256, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce_reg_sobolev_kernel<128, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce_reg_sobolev_kernel<64, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce_reg_sobolev_kernel<32, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce_reg_sobolev_kernel<16, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce_reg_sobolev_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce_reg_sobolev_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce_reg_sobolev_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce_reg_sobolev_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_voxel_max_energy(int size, int threads, int blocks, float2 *d_idata_global,
float2 *d_idata_n, Mat4f *d_idata_reg, float w_reg, float2 *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_voxel_max_energy_kernel<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
reduce_voxel_max_energy_kernel<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
reduce_voxel_max_energy_kernel<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
reduce_voxel_max_energy_kernel<64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
reduce_voxel_max_energy_kernel<32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
reduce_voxel_max_energy_kernel<16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
reduce_voxel_max_energy_kernel<8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
reduce_voxel_max_energy_kernel<4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
reduce_voxel_max_energy_kernel<2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
reduce_voxel_max_energy_kernel<1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_voxel_max_energy_kernel<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
reduce_voxel_max_energy_kernel<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
reduce_voxel_max_energy_kernel<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
reduce_voxel_max_energy_kernel<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
reduce_voxel_max_energy_kernel<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
reduce_voxel_max_energy_kernel<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
reduce_voxel_max_energy_kernel<8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
reduce_voxel_max_energy_kernel<4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
reduce_voxel_max_energy_kernel<2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
reduce_voxel_max_energy_kernel<1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
}
}
void sobfu::device::reduce_max(int size, int threads, int blocks, float4 *updates, float2 *d_o_max_data) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_max_kernel<512, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 256:
reduce_max_kernel<256, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 128:
reduce_max_kernel<128, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 64:
reduce_max_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 32:
reduce_max_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 16:
reduce_max_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 8:
reduce_max_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 4:
reduce_max_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 2:
reduce_max_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 1:
reduce_max_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_max_kernel<512, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 256:
reduce_max_kernel<256, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 128:
reduce_max_kernel<128, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 64:
reduce_max_kernel<64, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 32:
reduce_max_kernel<32, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 16:
reduce_max_kernel<16, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 8:
reduce_max_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 4:
reduce_max_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 2:
reduce_max_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 1:
reduce_max_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
}
}
}
|
the_stack
|
#define IDX2(X, n1, n2, i1, i2) (X[(i2)*(n1) + (i1)])
#define IDX3(X, n1, n2, n3, i1, i2, i3) (X[(i3)*((n1)*(n2)) + (i2)*(n1) + (i1)])
#define IDX4(X, n1, n2, n3, n4, i1, i2, i3, i4) (X[(i4)*((n1)*(n2)*(n3)) + (i3)*((n1)*(n2)) + (i2)*(n1) + (i1)])
/* nnline_ker evaluate xvar with non-linear functions with control points */
void __global__ nnline_ker(
const double *xlab, const double *ylab, const double *xvar,
double *yvar, int M, int N, int D, int P)
{
int k, l, p, q;
int total_number = M * N * D;
int n = blockDim.x * blockIdx.x + threadIdx.x;
int total_threads = gridDim.x * blockDim.x;
double margin = xlab[1] - xlab[0];
double margin_inv = 1 / margin;
for(; n<total_number; n += total_threads)
{
// n = idz*MN + idy*M + idx;
int idx = n % M;
int idy = (n-idx) % (M*N) / M;
int idz = (n-idy*M-idx) / (M*N);
k = floor((xvar[n] - xlab[0]) * margin_inv);
if(k < 0)
{
yvar[n] = xvar[n]- xlab[0] + IDX2(ylab, P, D, 0, idz);
}
else if(k >= P-1)
{
yvar[n] = xvar[n]- xlab[P-1] + IDX2(ylab, P, D, P-1, idz);
}
else
{
yvar[n] = (IDX2(ylab, P, D, k+1, idz) - IDX2(ylab, P, D, k, idz)) * (xvar[n] - xlab[k]) * margin_inv + IDX2(ylab, P, D, k, idz);
}
}
}
/**/
void __global__ nngetp_ker(
const double *xlab,
const double *xvar, double *pind,
int M, int N, int D)
{
int total_number = M*N*D;
int n = blockDim.x * blockIdx.x + threadIdx.x;
int total_threads = gridDim.x * blockDim.x;
int idx, idy, idz;
double margin = xlab[1] - xlab[0];
double margin_inv = 1 / margin;
for(; n<total_number; n += total_threads)
{
idx = n % M;
idy = (n-idx) % (M*N) / M;
idz = (n-idy*M-idx) / (M*N);
IDX3(pind, M, N, D, idx, idy, idz) = floor((xvar[n] - xlab[0]) * margin_inv);
}
}
/* nnback_ker back propagation computing gradients */
void __global__ nnbackx_ker(
const double *xlab, const double *ylab,
const double *xvar, const double *yvar,
double *grad, int M, int N, int D, int P)
{
int k, l, p, q;
int total_number = M * N * D;
int n = blockDim.x * blockIdx.x + threadIdx.x;
int total_threads = gridDim.x * blockDim.x;
double margin = xlab[1] - xlab[0];
double margin_inv = 1 / margin;
for(; n<total_number; n += total_threads)
{
// n = idz*MN + idy*M + idx;
int idx = n % M;
int idy = (n-idx) % (M*N) / M;
int idz = (n-idy*M-idx) / (M*N);
k = floor((xvar[n] - xlab[0]) / margin);
if(k<0 || k>=P-1)
{
grad[n] = 1 * yvar[n];
}
else
{
grad[n] = ((IDX2(ylab, P, D, k+1, idz) - IDX2(ylab, P, D, k, idz)) * margin_inv) * yvar[n];
}
}
}
void __global__ nnbackw_ker(
const double *xlab, const double *ylab,
const double *xvar, const double *yvar, const double *pind,
double *grad, int M, int N, int D, int P)
{
// __shared__ double INDP[128][128];
// __shared__ double L[41];
// __shared__ double Y[128][128];
// __shared__ double X[128][128];
int m, n, p, q;
int total_number = D * P;
int k = blockDim.x * blockIdx.x + threadIdx.x;
int total_threads = gridDim.x * blockDim.x;
double margin = xlab[1] - xlab[0];
double margin_inv = 1 / (margin);
// load global memory to shared memory
// do computation
for(; k<total_number; k+=total_threads)
{
int idp = k % P;
int idd = (k-idp) / P;
double sum = 0;
for(m=0; m<M; m++)
{
for(n=0; n<N; n++)
{
//double temp = ;
p = (IDX3(pind, M, N, D, m, n, idd));//floor((temp - xlab[0]) / margin);
//if(p>=0 && p<P-1)
//{
if(p == idp-1 && p>=0 ) //&& p<P-1
{
// IDX2(grad, P, D, idp, idk) += (1-(IDX3(xvar, M, N, D, m, n, k) - xlab[p]) / margin) * IDX3(yvar, M, N, D, m, n, k);
// IDX2(grad, P, D, idp+1, idk) += (IDX3(xvar, M, N, D, m, n, k) - xlab[p]) / margin * IDX3(yvar, M, N, D, m, n, k);
sum += (IDX3(xvar, M, N, D, m, n, idd)- xlab[p]) * margin_inv * IDX3(yvar, M, N, D, m, n, idd);
}
else if(p == idp && p<P-1)
{
sum += (1 - (IDX3(xvar, M, N, D, m, n, idd) - xlab[p]) * margin_inv) * IDX3(yvar, M, N, D, m, n, idd);
}
//}
}
}
IDX2(grad, P, D, idp, idd) = sum;
}
}
void __global__ nnbackw_ker2(
const double *xlab, const double *ylab,
const double *xvar, const double *yvar, const double *pind,
double *grad, int M, int N, int D, int P)
{
//__shared__ double INDP[128*128];
__shared__ double L[41];
//__shared__ double Y[128*128];
//__shared__ double X[128*128];
int m, n, p, q;
int total_number = D * P;
int k = blockDim.x * blockIdx.x + threadIdx.x;
int total_threads = gridDim.x * blockDim.x;
double margin = xlab[1] - xlab[0];
double margin_inv = 1 / (margin);
// load global memory to shared memory
int idd = blockIdx.x; // t-th channel
/*for(int t = threadIdx.x; t < M * N; t += blockDim.x)
{
INDP[t] = pind[idd * M * N + t];
}
__syncthreads();*/
for(int t = threadIdx.x; t < P; t += blockDim.x)
{
L[t] = 0;
}
__syncthreads();
// do computation
for(int t = threadIdx.x; t < M * N; t += blockDim.x)
{
m = t % M;
n = (t - m) / M;
p = pind[idd * M * N + t];
if(p>=0 && p<P-1)
{
double t1 = IDX3(xvar, M, N, D, m, n, idd);
double t2 = IDX3(yvar, M, N, D, m, n, idd);
L[p] += 1; //(1 - (t1 - xlab[p]) * margin_inv) * t2;
L[p+1] += 1; //(t1 - xlab[p]) * margin_inv * t2;
}
}
__syncthreads();
for(int t = threadIdx.x; t < P; t += blockDim.x)
{
IDX2(grad, P, D, t, idd) = L[t];
}
/*
for(; k<total_number; k+=total_threads)
{
int idp = k % P;
int idd = (k-idp) / P;
double sum = 0;
for(m=0; m<M; m++)
{
for(n=0; n<N; n++)
{
//double temp = ;
p = INDP; //(IDX3(pind, M, N, D, m, n, idd));//floor((temp - xlab[0]) / margin);
//if(p>=0 && p<P-1)
//{
if(p == idp-1 && p>=0 ) //&& p<P-1
{
// IDX2(grad, P, D, idp, idk) += (1-(IDX3(xvar, M, N, D, m, n, k) - xlab[p]) / margin) * IDX3(yvar, M, N, D, m, n, k);
// IDX2(grad, P, D, idp+1, idk) += (IDX3(xvar, M, N, D, m, n, k) - xlab[p]) / margin * IDX3(yvar, M, N, D, m, n, k);
sum += (IDX3(xvar, M, N, D, m, n, idd)- xlab[p]) * margin_inv * IDX3(yvar, M, N, D, m, n, idd);
}
else if(p == idp && p<P-1)
{
sum += (1 - (IDX3(xvar, M, N, D, m, n, idd) - xlab[p]) * margin_inv) * IDX3(yvar, M, N, D, m, n, idd);
}
//}
}
}
IDX2(grad, P, D, idp, idd) = sum;
} */
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
/**/
mxGPUArray const *xlab;
mxGPUArray const *ylab;
mxGPUArray const *xvar;
double const *d_xlab;
double const *d_ylab;
double const *d_xvar;
int M, N, D, P;
double margin;
int i, j, k, l, m, n;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/**/
int const threadsPerBlock = 256;
int blocksPerGrid;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/**/
xlab = mxGPUCreateFromMxArray(prhs[0]);
ylab = mxGPUCreateFromMxArray(prhs[1]);
xvar = mxGPUCreateFromMxArray(prhs[2]);
if(mxGPUGetClassID(xlab) != mxDOUBLE_CLASS || mxGPUGetClassID(ylab) != mxDOUBLE_CLASS || mxGPUGetClassID(xvar) != mxDOUBLE_CLASS)
{
mexErrMsgIdAndTxt(errId, errMsg);
}
d_xlab = (const double *)(mxGPUGetDataReadOnly(xlab));
d_ylab = (const double *)(mxGPUGetDataReadOnly(ylab));
d_xvar = (const double *)(mxGPUGetDataReadOnly(xvar));
/* get dimensions */
const mwSize *xlabdim = mxGPUGetDimensions(xlab);
const mwSize *ylabdim = mxGPUGetDimensions(ylab);
const mwSize *xvardim = mxGPUGetDimensions(xvar);
M = xvardim[0];
N = xvardim[1];
D = xvardim[2];
P = ylabdim[0];
if(nrhs == 3 && mxIsGPUArray(prhs[0]))
{
mxGPUArray *yvar;
double *d_yvar;
yvar = mxGPUCreateGPUArray(3, xvardim, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
d_yvar = (double *)(mxGPUGetData(yvar));
/**/
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
nnline_ker<<<blocksPerGrid, threadsPerBlock>>>(d_xlab, d_ylab, d_xvar, d_yvar, M, N, D, P);
plhs[0] = mxGPUCreateMxArrayOnGPU(yvar);
mxGPUDestroyGPUArray(yvar);
}
else if(nrhs ==4 && mxIsGPUArray(prhs[0]))
{
mxGPUArray const *yvar;
double const *d_yvar;
mxGPUArray *xgra;
mxGPUArray *ygra;
double *d_xgra;
double *d_ygra;
yvar = mxGPUCreateFromMxArray(prhs[3]);
d_yvar = (const double *)(mxGPUGetDataReadOnly(yvar));
xgra = mxGPUCreateGPUArray(3, xvardim, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
ygra = mxGPUCreateGPUArray(2, ylabdim, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
d_xgra = (double *)(mxGPUGetData(xgra));
d_ygra = (double *)(mxGPUGetData(ygra));
/**/
blocksPerGrid = (N * M * D + threadsPerBlock - 1) / threadsPerBlock;
nnbackx_ker<<<blocksPerGrid, threadsPerBlock>>>(d_xlab, d_ylab, d_xvar, d_yvar, d_xgra, M, N, D, P);
mxGPUArray *pind;
pind = mxGPUCreateGPUArray(3, xvardim, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
double *d_pind;
d_pind = (double *)(mxGPUGetData(pind));
nngetp_ker<<<blocksPerGrid, threadsPerBlock>>>(d_xlab, d_xvar, d_pind, M, N, D);
int threadsPerBlock2 = threadsPerBlock;
blocksPerGrid = (D * P + threadsPerBlock2 - 1) / threadsPerBlock2;
nnbackw_ker<<<blocksPerGrid, threadsPerBlock2>>>(d_xlab, d_ylab, d_xvar, d_yvar, d_pind, d_ygra, M, N, D, P);
plhs[0] = mxGPUCreateMxArrayOnGPU(xgra);
plhs[1] = mxGPUCreateMxArrayOnGPU(ygra);
mxGPUDestroyGPUArray(xgra);
mxGPUDestroyGPUArray(ygra);
mxGPUDestroyGPUArray(yvar);
mxGPUDestroyGPUArray(pind);
}
else
{
mexErrMsgIdAndTxt(errId, errMsg);
}
mxGPUDestroyGPUArray(xlab);
mxGPUDestroyGPUArray(ylab);
mxGPUDestroyGPUArray(xvar);
}
|
the_stack
|
namespace cuml {
namespace genetic {
class GeneticProgramTest : public ::testing::Test {
public:
GeneticProgramTest()
: d_data(0, cudaStream_t(0)),
d_y(0, cudaStream_t(0)),
d_lYpred(0, cudaStream_t(0)),
d_lY(0, cudaStream_t(0)),
d_lunitW(0, cudaStream_t(0)),
d_lW(0, cudaStream_t(0)),
dx2(0, cudaStream_t(0)),
dy2(0, cudaStream_t(0)),
dw2(0, cudaStream_t(0)),
dyp2(0, cudaStream_t(0)),
stream(handle.get_stream())
{
}
protected:
void SetUp() override
{
// Params
hyper_params.population_size = 2;
hyper_params.random_state = 123;
hyper_params.num_features = 3;
// X[0] * X[1] + X[2] + 0.5
h_nodes1.push_back(node(node::type::add));
h_nodes1.push_back(node(node::type::add));
h_nodes1.push_back(node(node::type::mul));
h_nodes1.push_back(node(0));
h_nodes1.push_back(node(1));
h_nodes1.push_back(node(2));
h_nodes1.push_back(node(0.5f));
// 0.5*X[1] - 0.4*X[2]
h_nodes2.push_back(node(node::type::sub));
h_nodes2.push_back(node(node::type::mul));
h_nodes2.push_back(node(0.5f));
h_nodes2.push_back(node(1));
h_nodes2.push_back(node(node::type::mul));
h_nodes2.push_back(node(0.4f));
h_nodes2.push_back(node(2));
// Programs
h_progs.resize(2);
h_progs[0].len = h_nodes1.size();
h_progs[0].nodes = new node[h_progs[0].len];
std::copy(h_nodes1.data(), h_nodes1.data() + h_nodes1.size(), h_progs[0].nodes);
h_progs[1].len = h_nodes2.size();
h_progs[1].nodes = new node[h_progs[1].len];
std::copy(h_nodes2.data(), h_nodes2.data() + h_nodes2.size(), h_progs[1].nodes);
// Loss weights
h_lunitW.resize(250, 1.0f);
// Smaller input
hw2.resize(5, 1.0f);
// Device memory
d_data.resize(75, stream);
d_y.resize(25, stream);
d_lYpred.resize(500, stream);
d_lY.resize(250, stream);
d_lunitW.resize(250, stream);
d_lW.resize(250, stream);
d_nodes1 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream);
d_nodes2 = (node*)rmm::mr::get_current_device_resource()->allocate(7 * sizeof(node), stream);
d_progs =
(program_t)rmm::mr::get_current_device_resource()->allocate(2 * sizeof(program), stream);
CUDA_CHECK(cudaMemcpyAsync(
d_lYpred.data(), h_lYpred.data(), 500 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(
d_lY.data(), h_lY.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(
d_lunitW.data(), h_lunitW.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(
d_lW.data(), h_lW.data(), 250 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(
d_data.data(), h_data.data(), 75 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(d_y.data(), h_y.data(), 25 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(d_nodes1, h_nodes1.data(), 7 * sizeof(node), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(d_nodes2, h_nodes2.data(), 7 * sizeof(node), cudaMemcpyHostToDevice, stream));
program tmp(h_progs[0]);
delete[] tmp.nodes;
tmp.nodes = d_nodes1;
CUDA_CHECK(cudaMemcpyAsync(&d_progs[0], &tmp, sizeof(program), cudaMemcpyHostToDevice, stream));
tmp.nodes = nullptr;
tmp = program(h_progs[1]);
delete[] tmp.nodes;
tmp.nodes = d_nodes2;
CUDA_CHECK(cudaMemcpyAsync(&d_progs[1], &tmp, sizeof(program), cudaMemcpyHostToDevice, stream));
tmp.nodes = nullptr;
// Small input
dx2.resize(15, stream);
dy2.resize(5, stream);
dw2.resize(5, stream);
dyp2.resize(10, stream);
CUDA_CHECK(
cudaMemcpyAsync(dx2.data(), hx2.data(), 15 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(dy2.data(), hy2.data(), 5 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(
cudaMemcpyAsync(dw2.data(), hw2.data(), 5 * sizeof(float), cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(
dyp2.data(), hyp2.data(), 10 * sizeof(float), cudaMemcpyHostToDevice, stream));
}
void TearDown() override
{
rmm::mr::get_current_device_resource()->deallocate(d_nodes1, 7 * sizeof(node), stream);
rmm::mr::get_current_device_resource()->deallocate(d_nodes2, 7 * sizeof(node), stream);
rmm::mr::get_current_device_resource()->deallocate(d_progs, 2 * sizeof(program), stream);
}
raft::handle_t handle;
cudaStream_t stream;
const int n_cols = 3;
const int n_progs = 2;
const int n_samples = 25;
const int n_samples2 = 5;
const float tolerance = 0.025f; // assuming upto 2.5% tolerance for results(for now)
// 25*3 datapoints generated using numpy
// y = X[0] * X[1] + X[2] + 0.5
std::vector<float> h_data{
-0.50446586, -2.06014071, 0.88514116, -2.3015387, 0.83898341, 1.65980218, -0.87785842,
0.31563495, 0.3190391, 0.53035547, 0.30017032, -0.12289023, -1.10061918, -0.0126646,
2.10025514, 1.13376944, -0.88762896, 0.05080775, -0.34934272, 2.18557541, 0.50249434,
-0.07557171, -0.52817175, -0.6871727, 0.51292982,
-1.44411381, 1.46210794, 0.28558733, 0.86540763, 0.58662319, 0.2344157, -0.17242821,
0.87616892, -0.7612069, -0.26788808, 0.61720311, -0.68372786, 0.58281521, -0.67124613,
0.19091548, -0.38405435, -0.19183555, 1.6924546, -1.1425182, 1.51981682, 0.90159072,
0.48851815, -0.61175641, -0.39675353, 1.25286816,
-1.39649634, -0.24937038, 0.93110208, -1.07296862, -0.20889423, -1.11731035, -1.09989127,
0.16003707, 1.74481176, -0.93576943, 0.12015895, 0.90085595, 0.04221375, -0.84520564,
-0.63699565, -0.3224172, 0.74204416, -0.74715829, -0.35224985, 1.13162939, 1.14472371,
-0.29809284, 1.62434536, -0.69166075, -0.75439794};
std::vector<float> h_y{-0.16799022, -2.76151846, 1.68388718, -2.56473777, 0.78327289,
-0.22822666, -0.44852371, 0.9365866, 2.001957, -0.57784534,
0.80542501, 1.48487942, -0.09924385, -0.33670458, 0.26397558,
-0.2578463, 1.41232295, -0.16116848, 0.54688057, 4.95330364,
2.09776794, 0.16498901, 2.44745782, 0.08097744, 0.3882355};
// Values for loss function tests (250 values each)
std::vector<float> h_lYpred{
0.06298f, 0.81894f, 0.12176f, 0.17104f, 0.12851f, 0.28721f, 0.85043f, 0.68120f, 0.57074f,
0.21796f, 0.96626f, 0.32337f, 0.21887f, 0.80867f, 0.96438f, 0.20052f, 0.28668f, 0.86931f,
0.71421f, 0.85405f, 0.13916f, 0.00316f, 0.59440f, 0.86299f, 0.67019f, 0.54309f, 0.82629f,
0.94563f, 0.01481f, 0.13665f, 0.77081f, 0.58024f, 0.02538f, 0.36610f, 0.13948f, 0.75034f,
0.80435f, 0.27488f, 0.74165f, 0.02921f, 0.51479f, 0.66415f, 0.27380f, 0.85304f, 0.95767f,
0.22758f, 0.38602f, 0.41555f, 0.53783f, 0.48663f, 0.11103f, 0.69397f, 0.21749f, 0.71930f,
0.28976f, 0.50971f, 0.68532f, 0.97518f, 0.71299f, 0.37629f, 0.56444f, 0.42280f, 0.51921f,
0.84366f, 0.30778f, 0.39493f, 0.74007f, 0.18280f, 0.22621f, 0.63083f, 0.46085f, 0.47259f,
0.65442f, 0.25453f, 0.23058f, 0.17460f, 0.30702f, 0.22421f, 0.37237f, 0.36660f, 0.29702f,
0.65276f, 0.30222f, 0.63844f, 0.99909f, 0.55084f, 0.05066f, 0.18914f, 0.36652f, 0.36765f,
0.93901f, 0.13575f, 0.72582f, 0.20223f, 0.06375f, 0.52581f, 0.77119f, 0.12127f, 0.27800f,
0.04008f, 0.01752f, 0.00394f, 0.68973f, 0.91931f, 0.48011f, 0.48363f, 0.09770f, 0.84381f,
0.80244f, 0.42710f, 0.82164f, 0.63239f, 0.08117f, 0.46195f, 0.49832f, 0.05717f, 0.16886f,
0.22311f, 0.45326f, 0.50748f, 0.19089f, 0.78211f, 0.34272f, 0.38456f, 0.64874f, 0.18216f,
0.64757f, 0.26900f, 0.20780f, 0.87067f, 0.16903f, 0.77285f, 0.70580f, 0.54404f, 0.97395f,
0.52550f, 0.81364f, 0.30085f, 0.36754f, 0.42492f, 0.79470f, 0.31590f, 0.26322f, 0.68332f,
0.96523f, 0.31110f, 0.97029f, 0.80217f, 0.77125f, 0.36302f, 0.13444f, 0.28420f, 0.20442f,
0.89692f, 0.50515f, 0.61952f, 0.48237f, 0.35080f, 0.75606f, 0.85438f, 0.70647f, 0.91793f,
0.24037f, 0.72867f, 0.84713f, 0.39838f, 0.49553f, 0.32876f, 0.22610f, 0.86573f, 0.99232f,
0.71321f, 0.30179f, 0.01941f, 0.84838f, 0.58587f, 0.43339f, 0.29490f, 0.07191f, 0.88531f,
0.26896f, 0.36085f, 0.96043f, 0.70679f, 0.39593f, 0.37642f, 0.76078f, 0.63827f, 0.36346f,
0.12755f, 0.07074f, 0.67744f, 0.35042f, 0.30773f, 0.15577f, 0.64096f, 0.05035f, 0.32882f,
0.33640f, 0.54106f, 0.76279f, 0.00414f, 0.17373f, 0.83551f, 0.18176f, 0.91190f, 0.03559f,
0.31992f, 0.86311f, 0.04054f, 0.49714f, 0.53551f, 0.65316f, 0.15681f, 0.80268f, 0.44978f,
0.26365f, 0.37162f, 0.97630f, 0.82863f, 0.73267f, 0.93207f, 0.47129f, 0.70817f, 0.57300f,
0.34240f, 0.89749f, 0.79844f, 0.67992f, 0.72523f, 0.43319f, 0.07310f, 0.61074f, 0.93830f,
0.90822f, 0.08077f, 0.28048f, 0.04549f, 0.44870f, 0.10337f, 0.93911f, 0.13464f, 0.16080f,
0.94620f, 0.15276f, 0.56239f, 0.38684f, 0.12437f, 0.98149f, 0.80650f, 0.44040f, 0.59698f,
0.82197f, 0.91634f, 0.89667f, 0.96333f, 0.21204f, 0.47457f, 0.95737f, 0.08697f, 0.50921f,
0.58647f, 0.71985f, 0.39455f, 0.73240f, 0.04227f, 0.74879f, 0.34403f, 0.94240f, 0.45158f,
0.83860f, 0.51819f, 0.87374f, 0.70416f, 0.52987f, 0.72727f, 0.53649f, 0.74878f, 0.13247f,
0.91358f, 0.61871f, 0.50048f, 0.04681f, 0.56370f, 0.68393f, 0.51947f, 0.85044f, 0.24416f,
0.39354f, 0.33526f, 0.66574f, 0.65638f, 0.15506f, 0.84167f, 0.84663f, 0.92094f, 0.14140f,
0.69364f, 0.40575f, 0.63543f, 0.35074f, 0.68887f, 0.70662f, 0.90424f, 0.09042f, 0.57486f,
0.52239f, 0.40711f, 0.82103f, 0.08674f, 0.14005f, 0.44922f, 0.81244f, 0.99037f, 0.26577f,
0.64744f, 0.25391f, 0.47913f, 0.09676f, 0.26023f, 0.86098f, 0.24472f, 0.15364f, 0.38980f,
0.02943f, 0.59390f, 0.25683f, 0.38976f, 0.90195f, 0.27418f, 0.45255f, 0.74992f, 0.07155f,
0.95425f, 0.77560f, 0.41618f, 0.27963f, 0.32602f, 0.75690f, 0.09356f, 0.73795f, 0.59604f,
0.97534f, 0.27677f, 0.06770f, 0.59517f, 0.64286f, 0.36224f, 0.22017f, 0.83546f, 0.21461f,
0.24793f, 0.08248f, 0.16668f, 0.74429f, 0.66674f, 0.68034f, 0.34710f, 0.82358f, 0.47555f,
0.50109f, 0.09328f, 0.98566f, 0.99481f, 0.41391f, 0.86833f, 0.38645f, 0.49203f, 0.44547f,
0.55391f, 0.87598f, 0.85542f, 0.56283f, 0.61385f, 0.70564f, 0.29067f, 0.91150f, 0.64787f,
0.18255f, 0.03792f, 0.69633f, 0.29029f, 0.31412f, 0.49111f, 0.34615f, 0.43144f, 0.31616f,
0.15405f, 0.44915f, 0.12777f, 0.09491f, 0.26003f, 0.71537f, 0.19450f, 0.91570f, 0.28420f,
0.77892f, 0.53199f, 0.66034f, 0.01978f, 0.35415f, 0.03664f, 0.42675f, 0.41304f, 0.33804f,
0.11290f, 0.89985f, 0.75959f, 0.59417f, 0.53113f, 0.38898f, 0.76259f, 0.83973f, 0.75809f,
0.65900f, 0.55141f, 0.14175f, 0.44740f, 0.95823f, 0.77612f, 0.48749f, 0.74491f, 0.57491f,
0.59119f, 0.26665f, 0.48599f, 0.85947f, 0.46245f, 0.08129f, 0.00825f, 0.29669f, 0.43499f,
0.47998f, 0.60173f, 0.26611f, 0.01223f, 0.81734f, 0.77892f, 0.79022f, 0.01394f, 0.45596f,
0.45259f, 0.32536f, 0.84229f, 0.43612f, 0.30531f, 0.10670f, 0.57758f, 0.65956f, 0.42007f,
0.32166f, 0.10552f, 0.63558f, 0.17990f, 0.50732f, 0.34599f, 0.16603f, 0.26309f, 0.04098f,
0.15997f, 0.79728f, 0.00528f, 0.35510f, 0.24344f, 0.07018f, 0.22062f, 0.92927f, 0.13373f,
0.50955f, 0.11199f, 0.75728f, 0.62117f, 0.18153f, 0.84993f, 0.04677f, 0.13013f, 0.92211f,
0.95474f, 0.88898f, 0.55561f, 0.22625f, 0.78700f, 0.73659f, 0.97613f, 0.02299f, 0.07724f,
0.78942f, 0.02193f, 0.05320f, 0.92053f, 0.35103f, 0.39305f, 0.24208f, 0.08225f, 0.78460f,
0.52144f, 0.32927f, 0.84725f, 0.36106f, 0.80349f};
std::vector<float> h_lY{
0.60960f, 0.61090f, 0.41418f, 0.90827f, 0.76181f, 0.31777f, 0.04096f, 0.27290f, 0.56879f,
0.75461f, 0.73555f, 0.41598f, 0.59506f, 0.08768f, 0.99554f, 0.20613f, 0.13546f, 0.32044f,
0.41057f, 0.38501f, 0.27894f, 0.24027f, 0.91171f, 0.26811f, 0.55595f, 0.71153f, 0.69739f,
0.53411f, 0.78365f, 0.60914f, 0.41856f, 0.61688f, 0.28741f, 0.28708f, 0.37029f, 0.47945f,
0.40612f, 0.75762f, 0.91728f, 0.70406f, 0.26717f, 0.71175f, 0.39243f, 0.35904f, 0.38469f,
0.08664f, 0.38611f, 0.35606f, 0.52801f, 0.96986f, 0.84780f, 0.56942f, 0.41712f, 0.17005f,
0.79105f, 0.74347f, 0.83473f, 0.06303f, 0.37864f, 0.66666f, 0.78153f, 0.11061f, 0.33880f,
0.82412f, 0.47141f, 0.53043f, 0.51184f, 0.34172f, 0.57087f, 0.88349f, 0.32870f, 0.11501f,
0.35460f, 0.23630f, 0.37728f, 0.96120f, 0.19871f, 0.78119f, 0.23860f, 0.70615f, 0.46745f,
0.43392f, 0.49967f, 0.39721f, 0.53185f, 0.27827f, 0.14435f, 0.82008f, 0.43275f, 0.82113f,
0.06428f, 0.53528f, 0.21594f, 0.86172f, 0.41172f, 0.96051f, 0.54487f, 0.01971f, 0.71222f,
0.04258f, 0.36715f, 0.24844f, 0.12494f, 0.34132f, 0.87059f, 0.70216f, 0.33533f, 0.10020f,
0.79337f, 0.26059f, 0.81314f, 0.54342f, 0.79115f, 0.71730f, 0.70860f, 0.00998f, 0.64761f,
0.01206f, 0.53463f, 0.94436f, 0.19639f, 0.23296f, 0.55945f, 0.14070f, 0.57765f, 0.50908f,
0.95720f, 0.95611f, 0.12311f, 0.95382f, 0.23116f, 0.36939f, 0.66395f, 0.76282f, 0.16314f,
0.00186f, 0.77662f, 0.58799f, 0.18155f, 0.10355f, 0.45982f, 0.34359f, 0.59476f, 0.72759f,
0.77310f, 0.50736f, 0.43720f, 0.63624f, 0.84569f, 0.73073f, 0.04179f, 0.64806f, 0.19924f,
0.96082f, 0.06270f, 0.27744f, 0.59384f, 0.07317f, 0.10979f, 0.47857f, 0.60274f, 0.54937f,
0.58563f, 0.45247f, 0.84396f, 0.43945f, 0.47719f, 0.40808f, 0.81152f, 0.48558f, 0.21577f,
0.93935f, 0.08222f, 0.43114f, 0.68239f, 0.78870f, 0.24300f, 0.84829f, 0.44764f, 0.57347f,
0.78353f, 0.30614f, 0.39493f, 0.40320f, 0.72849f, 0.39406f, 0.89363f, 0.33323f, 0.38395f,
0.94783f, 0.46082f, 0.30498f, 0.17110f, 0.14083f, 0.48474f, 0.45024f, 0.92586f, 0.77450f,
0.43503f, 0.45188f, 0.80866f, 0.24937f, 0.34205f, 0.35942f, 0.79689f, 0.77224f, 0.14354f,
0.54387f, 0.50787f, 0.31753f, 0.98414f, 0.03261f, 0.89748f, 0.82350f, 0.60235f, 0.00041f,
0.99696f, 0.39894f, 0.52078f, 0.54421f, 0.33405f, 0.81143f, 0.49764f, 0.44993f, 0.37257f,
0.16238f, 0.81337f, 0.51335f, 0.96118f, 0.98901f, 0.95259f, 0.36557f, 0.24654f, 0.99554f,
0.33408f, 0.01734f, 0.85852f, 0.41286f, 0.67371f, 0.93781f, 0.04977f, 0.17298f, 0.91502f,
0.70144f, 0.97356f, 0.12571f, 0.64375f, 0.10033f, 0.36798f, 0.90001f};
// Unitary weights
std::vector<float> h_lunitW;
// Non-unitary weights
std::vector<float> h_lW{
0.38674f, 0.59870f, 0.36761f, 0.59731f, 0.99057f, 0.24131f, 0.29727f, 0.94112f, 0.78962f,
0.71998f, 0.10983f, 0.33620f, 0.37988f, 0.14344f, 0.37377f, 0.06403f, 0.22877f, 0.21993f,
0.11340f, 0.28554f, 0.45453f, 0.14344f, 0.11715f, 0.23184f, 0.08622f, 0.26746f, 0.49058f,
0.06981f, 0.41885f, 0.04422f, 0.99925f, 0.71709f, 0.11910f, 0.49944f, 0.98116f, 0.66316f,
0.11646f, 0.25202f, 0.93223f, 0.81414f, 0.20446f, 0.23813f, 0.45380f, 0.83618f, 0.95958f,
0.72684f, 0.86808f, 0.96348f, 0.76092f, 0.86071f, 0.44155f, 0.85212f, 0.76185f, 0.51460f,
0.65627f, 0.38269f, 0.08251f, 0.07506f, 0.22281f, 0.05325f, 0.71190f, 0.62834f, 0.19348f,
0.44271f, 0.23677f, 0.81817f, 0.73055f, 0.48816f, 0.57524f, 0.45278f, 0.27998f, 0.35699f,
0.26875f, 0.63546f, 0.50990f, 0.21046f, 0.76892f, 0.74433f, 0.39302f, 0.55071f, 0.24554f,
0.56793f, 0.67852f, 0.43290f, 0.97266f, 0.52475f, 0.88402f, 0.79439f, 0.01496f, 0.46426f,
0.15537f, 0.35364f, 0.42962f, 0.47999f, 0.06357f, 0.78531f, 0.62165f, 0.45226f, 0.84973f,
0.63747f, 0.00593f, 0.31520f, 0.13150f, 0.47776f, 0.56420f, 0.21679f, 0.32107f, 0.62491f,
0.33747f, 0.86599f, 0.82573f, 0.26970f, 0.50087f, 0.86947f, 0.47433f, 0.91848f, 0.19534f,
0.45760f, 0.38407f, 0.18953f, 0.30000f, 0.37964f, 0.42509f, 0.55408f, 0.74500f, 0.44484f,
0.67679f, 0.12214f, 0.68380f, 0.74917f, 0.87429f, 0.04355f, 0.98426f, 0.88845f, 0.88318f,
0.64393f, 0.90849f, 0.87948f, 0.22915f, 0.86887f, 0.58676f, 0.51575f, 0.56549f, 0.41412f,
0.06593f, 0.40484f, 0.72931f, 0.02289f, 0.96391f, 0.61075f, 0.91701f, 0.29698f, 0.37095f,
0.42087f, 0.73251f, 0.93271f, 0.32687f, 0.48981f, 0.01081f, 0.11985f, 0.46962f, 0.02569f,
0.83989f, 0.21767f, 0.82370f, 0.35174f, 0.94939f, 0.46032f, 0.81569f, 0.66635f, 0.07019f,
0.68926f, 0.65628f, 0.19914f, 0.17936f, 0.64540f, 0.09031f, 0.05875f, 0.88790f, 0.83687f,
0.46605f, 0.08537f, 0.49514f, 0.44504f, 0.67687f, 0.28943f, 0.74668f, 0.43207f, 0.70990f,
0.62513f, 0.56137f, 0.94399f, 0.75806f, 0.41840f, 0.38428f, 0.30754f, 0.62633f, 0.23173f,
0.40750f, 0.49968f, 0.05536f, 0.11405f, 0.34185f, 0.36367f, 0.06341f, 0.66834f, 0.42899f,
0.08343f, 0.72266f, 0.33155f, 0.74943f, 0.15387f, 0.02475f, 0.35741f, 0.15806f, 0.35406f,
0.18226f, 0.31042f, 0.36047f, 0.62366f, 0.30036f, 0.66625f, 0.99695f, 0.99472f, 0.06743f,
0.56804f, 0.28185f, 0.77387f, 0.58763f, 0.77824f, 0.03720f, 0.99490f, 0.73720f, 0.93635f,
0.85669f, 0.91634f, 0.26065f, 0.97469f, 0.03867f, 0.52306f, 0.99167f, 0.90332f, 0.88546f,
0.07109f, 0.94168f, 0.10211f, 0.95949f, 0.86314f, 0.59917f, 0.41948f};
// Setup smaller input
std::vector<float> hx2 = {0.06298,
0.96626,
0.13916,
0.77081,
0.51479,
0.81894,
0.32337,
0.00316,
0.58024,
0.66415,
0.12176,
0.21887,
0.59440,
0.02538,
0.27380};
std::vector<float> hy2 = {0.11103, 0.69397, 0.21749, 0.71930, 0.28976};
std::vector<float> hyp2 = {
0.67334, 1.03133, 1.09484, 0.97263, 1.1157, 0.36077, 0.07413, -0.23618, 0.27997, 0.22255};
std::vector<float> hw2;
// Nodes and programs
std::vector<node> h_nodes1;
std::vector<node> h_nodes2;
std::vector<program> h_progs;
// Device ptrs
node* d_nodes1;
node* d_nodes2;
program_t d_progs;
rmm::device_uvector<float> d_data;
rmm::device_uvector<float> d_y;
rmm::device_uvector<float> d_lYpred;
rmm::device_uvector<float> d_lY;
rmm::device_uvector<float> d_lunitW;
rmm::device_uvector<float> d_lW;
rmm::device_uvector<float> dx2;
rmm::device_uvector<float> dy2;
rmm::device_uvector<float> dw2;
rmm::device_uvector<float> dyp2;
param hyper_params;
};
TEST_F(GeneticProgramTest, PearsonCoeff)
{
raft::CompareApproxAbs<float> compApprox(tolerance);
float h_expected_score[2] = {0.09528403f, 0.08269963f};
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::pearson;
// Unitary weights
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Unitary weights - small
h_expected_score[0] = 0.3247632f;
h_expected_score[1] = 0.0796348f;
compute_metric(
handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights
h_expected_score[0] = 0.14329584f;
h_expected_score[1] = 0.09064283f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, SpearmanCoeff)
{
raft::CompareApproxAbs<float> compApprox(tolerance);
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::spearman;
// Unitary weights
float h_expected_score[2] = {0.09268333f, 0.07529861f};
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Unitary weights - small
h_expected_score[0] = 0.10000f;
h_expected_score[1] = 0.10000f;
compute_metric(
handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights
h_expected_score[0] = 0.14072408f;
h_expected_score[1] = 0.08157397f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, MeanSquareLoss)
{
raft::CompareApprox<float> compApprox(tolerance);
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::mse;
// Unitary weights
float h_expected_score[2] = {0.14297023, 0.14242104};
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Unitary weights - small
h_expected_score[0] = 0.3892163f;
h_expected_score[1] = 0.1699830f;
compute_metric(
handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights
h_expected_score[0] = 0.13842479f;
h_expected_score[1] = 0.14538825f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, MeanAbsoluteLoss)
{
raft::CompareApprox<float> compApprox(tolerance);
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::mae;
// Unitary weights - big
float h_expected_score[2] = {0.30614017, 0.31275677};
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Unitary weights - small
h_expected_score[0] = 0.571255f;
h_expected_score[1] = 0.365957f;
compute_metric(
handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights -big
h_expected_score[0] = 0.29643119f;
h_expected_score[1] = 0.31756123f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, RMSLoss)
{
raft::CompareApprox<float> compApprox(tolerance);
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::rmse;
// Unitary weights
float h_expected_score[2] = {0.37811404, 0.37738713};
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Unitary weights - small
h_expected_score[0] = 0.6238720f;
h_expected_score[1] = 0.4122899f;
compute_metric(
handle, n_samples2, n_progs, dy2.data(), dyp2.data(), dw2.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights
h_expected_score[0] = 0.37205482f;
h_expected_score[1] = 0.38129811f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, LogLoss)
{
raft::CompareApprox<float> compApprox(tolerance);
float h_score[2] = {0.0f, 0.0f};
rmm::device_uvector<float> d_score(2, stream);
hyper_params.metric = metric_t::logloss;
// Unitary weights
float h_expected_score[2] = {0.72276, 0.724011};
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lunitW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
// Non-unitary weights
h_expected_score[0] = 0.715887f;
h_expected_score[1] = 0.721293f;
compute_metric(
handle, 250, 2, d_lY.data(), d_lYpred.data(), d_lW.data(), d_score.data(), hyper_params);
CUDA_CHECK(
cudaMemcpyAsync(h_score, d_score.data(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream));
std::copy(h_score, h_score + 2, std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(compApprox(h_score[i], h_expected_score[i]));
}
}
TEST_F(GeneticProgramTest, ProgramExecution)
{
raft::CompareApprox<float> compApprox(tolerance);
// Enable debug logging
ML::Logger::get().setLevel(CUML_LEVEL_INFO);
// Allocate memory
std::vector<float> h_ypred(n_progs * n_samples, 0.0f);
rmm::device_uvector<float> d_ypred(n_progs * n_samples, stream);
// Execute programs
execute(handle, d_progs, n_samples, n_progs, d_data.data(), d_ypred.data());
CUDA_CHECK(cudaMemcpyAsync(h_ypred.data(),
d_ypred.data(),
n_progs * n_samples * sizeof(float),
cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
// Check results
for (int i = 0; i < n_samples; ++i) {
ASSERT_TRUE(compApprox(h_ypred[i], h_y[i]));
}
for (int i = 0; i < n_samples; ++i) {
ASSERT_TRUE(compApprox(h_ypred[n_samples + i],
0.5 * h_data[n_samples + i] - 0.4 * h_data[2 * n_samples + i]));
}
}
TEST_F(GeneticProgramTest, ProgramFitnessScore)
{
raft::CompareApprox<float> compApprox(tolerance);
std::vector<metric_t> all_metrics = {
metric_t::mae, metric_t::mse, metric_t::rmse, metric_t::pearson, metric_t::spearman};
std::vector<float> hexpscores = {
0.57126, 0.36596, 0.38922, 0.16998, 0.62387, 0.41229, 0.32476, 0.07963, 0.10000, 0.10000};
std::vector<float> hactualscores(10);
rmm::device_uvector<float> dactualscores(10, stream);
// Start execution for all metrics
for (int i = 0; i < 5; ++i) {
hyper_params.metric = all_metrics[i];
find_batched_fitness(handle,
n_progs,
d_progs,
dactualscores.data() + 2 * i,
hyper_params,
n_samples2,
dx2.data(),
dy2.data(),
dw2.data());
CUDA_CHECK(cudaStreamSynchronize(stream));
}
CUDA_CHECK(cudaMemcpyAsync(hactualscores.data(),
dactualscores.data(),
10 * sizeof(float),
cudaMemcpyDeviceToHost,
stream));
std::copy(
hactualscores.begin(), hactualscores.end(), std::ostream_iterator<float>(std::cerr, ";"));
std::cerr << std::endl;
for (int i = 0; i < 10; ++i) {
ASSERT_TRUE(compApprox(std::abs(hactualscores[i]), hexpscores[i]));
}
}
} // namespace genetic
} // namespace cuml
|
the_stack
|
#pragma once
#include <gunrock/util/cta_work_distribution.cuh>
#include <gunrock/util/cta_work_progress.cuh>
#include <gunrock/util/kernel_runtime_stats.cuh>
#include <gunrock/oprtr/edge_map_partitioned/cta.cuh>
#include <gunrock/oprtr/advance/kernel_policy.cuh>
namespace gunrock {
namespace oprtr {
namespace edge_map_partitioned_backward {
// GetRowOffsets
//
// RelaxPartitionedEdges
/**
* Arch dispatch
*/
/**
* Not valid for this arch (default)
* @tparam KernelPolicy Kernel policy type for partitioned edge mapping.
* @tparam ProblemData Problem data type for partitioned edge mapping.
* @tparam Functor Functor type for the specific problem type.
* @tparam VALID
*/
template <typename KernelPolicy, typename ProblemData, typename Functor,
bool VALID =
#ifdef __CUDA_ARCH__
true
#else
false
#endif
>
struct Dispatch {};
template <typename KernelPolicy, typename ProblemData, typename Functor>
struct Dispatch<KernelPolicy, ProblemData, Functor, true> {
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typedef typename KernelPolicy::Value Value;
typedef typename ProblemData::DataSlice DataSlice;
typedef typename Functor::LabelT LabelT;
static __device__ __forceinline__ SizeT GetNeighborListLength(
SizeT *&d_row_offsets, VertexId *&d_column_indices, VertexId &d_vertex_id,
SizeT &max_vertex, SizeT &max_edge,
gunrock::oprtr::advance::TYPE &ADVANCE_TYPE) {
if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
ADVANCE_TYPE == gunrock::oprtr::advance::E2E) {
d_vertex_id = d_column_indices[d_vertex_id];
}
SizeT first =
d_vertex_id >= max_vertex ? max_edge : d_row_offsets[d_vertex_id];
SizeT second = (d_vertex_id + 1) >= max_vertex
? max_edge
: d_row_offsets[d_vertex_id + 1];
return (second > first) ? second - first : 0;
}
static __device__ __forceinline__ void GetEdgeCounts(
SizeT *&d_row_offsets, VertexId *&d_column_indices, VertexId *&d_queue,
SizeT *&d_scanned_edges, SizeT &num_elements, SizeT &max_vertex,
SizeT &max_edge, gunrock::oprtr::advance::TYPE &ADVANCE_TYPE) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = bid * blockDim.x + tid;
if (my_id >= num_elements || my_id >= max_edge) return;
VertexId v_id = d_queue[my_id];
SizeT num_edges =
GetNeighborListLength(d_row_offsets, d_column_indices, v_id, max_vertex,
max_edge, ADVANCE_TYPE);
d_scanned_edges[my_id] = num_edges;
}
static __device__ __forceinline__ void RelaxPartitionedEdges(
bool &queue_reset, VertexId &queue_index, int &label,
SizeT *&d_row_offsets, VertexId *&d_column_indices,
VertexId *&d_inverse_column_indices, SizeT *&d_scanned_edges,
unsigned int *&partition_starts, unsigned int &num_partitions,
// volatile int *&d_done,
VertexId *&d_queue, bool *&d_bitmap_in, bool *&d_bitmap_out,
DataSlice *&problem, SizeT &input_queue_len, SizeT *output_queue_len,
SizeT &partition_size, SizeT &max_vertices, SizeT &max_edges,
util::CtaWorkProgress<SizeT> &work_progress,
util::KernelRuntimeStats &kernel_stats,
gunrock::oprtr::advance::TYPE &ADVANCE_TYPE, bool &inverse_graph) {
// if (KernelPolicy::INSTRUMENT && (threadIdx.x == 0 && blockIdx.x == 0)) {
// kernel_stats.MarkStart();
//}
// Reset work progress
// if (queue_reset)
//{
// if (blockIdx.x == 0 && threadIdx.x < util::CtaWorkProgress::COUNTERS)
// {
// Reset all counters
// work_progress.template Reset<SizeT>();
// }
//}
// Determine work decomposition
if (threadIdx.x == 0) {
if (!queue_reset)
input_queue_len = work_progress.LoadQueueLength(queue_index);
if (blockIdx.x == 0) {
// obtain problem size
if (queue_reset) {
work_progress.StoreQueueLength(input_queue_len, queue_index);
}
work_progress.Enqueue(output_queue_len[0], queue_index + 1);
// Reset our next outgoing queue counter to zero
work_progress.StoreQueueLength(0, queue_index + 2);
work_progress.PrepResetSteal(queue_index + 1);
}
}
// Barrier to protect work decomposition
__syncthreads();
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_thread_start, my_thread_end;
my_thread_start = bid * partition_size;
my_thread_end = (bid + 1) * partition_size < output_queue_len[0]
? (bid + 1) * partition_size
: output_queue_len[0];
// printf("tid:%d, bid:%d, m_thread_start:%d, m_thread_end:%d\n",tid, bid,
// my_thread_start, my_thread_end);
if (my_thread_start >= output_queue_len[0]) return;
int my_start_partition = partition_starts[bid];
int my_end_partition = partition_starts[bid + 1] > input_queue_len
? partition_starts[bid + 1]
: input_queue_len;
// if (tid == 0 && bid == 252)
// printf("bid(%d) < num_partitions-1(%d)?,
// partition_starts[bid+1]+1:%d\n", bid, num_partitions-1,
// partition_starts[bid+1]+1);
__shared__ typename KernelPolicy::SmemStorage smem_storage;
// smem_storage.s_edges[NT]
// smem_storage.s_vertices[NT]
unsigned int *s_edges = (unsigned int *)&smem_storage.s_edges[0];
unsigned int *s_vertices = (unsigned int *)&smem_storage.s_vertices[0];
unsigned int *s_edge_ids = (unsigned int *)&smem_storage.s_edge_ids[0];
int my_work_size = my_thread_end - my_thread_start;
int out_offset = bid * partition_size;
int pre_offset =
my_start_partition > 0 ? d_scanned_edges[my_start_partition - 1] : 0;
int e_offset = my_thread_start - pre_offset;
int edges_processed = 0;
while (edges_processed < my_work_size &&
my_start_partition < my_end_partition) {
pre_offset =
my_start_partition > 0 ? d_scanned_edges[my_start_partition - 1] : 0;
__syncthreads();
s_edges[tid] =
(my_start_partition + tid < my_end_partition
? d_scanned_edges[my_start_partition + tid] - pre_offset
: max_edges);
// if (bid == 252 && tid == 2)
// printf("start_partition+tid:%d < my_end_partition:%d ?,
// d_queue[%d]:%d\n", my_start_partition+tid, my_end_partition,
// my_start_partition+tid, d_queue[my_start_partition+tid]);
if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
ADVANCE_TYPE == gunrock::oprtr::advance::V2E) {
s_vertices[tid] = my_start_partition + tid < my_end_partition
? d_queue[my_start_partition + tid]
: -1;
s_edge_ids[tid] = 0;
}
if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
ADVANCE_TYPE == gunrock::oprtr::advance::E2E) {
if (inverse_graph)
s_vertices[tid] =
my_start_partition + tid < my_end_partition
? d_inverse_column_indices[d_queue[my_start_partition + tid]]
: -1;
else
s_vertices[tid] =
my_start_partition + tid < my_end_partition
? d_column_indices[d_queue[my_start_partition + tid]]
: -1;
s_edge_ids[tid] = my_start_partition + tid < my_end_partition
? d_queue[my_start_partition + tid]
: -1;
}
int last = my_start_partition + KernelPolicy::THREADS >= my_end_partition
? my_end_partition - my_start_partition - 1
: KernelPolicy::THREADS - 1;
__syncthreads();
SizeT e_last =
min(s_edges[last] - e_offset, my_work_size - edges_processed);
SizeT v_index =
util::BinarySearch<KernelPolicy::THREADS>(tid + e_offset, s_edges);
VertexId v = s_vertices[v_index];
VertexId e_id = s_edge_ids[v_index];
SizeT end_last =
(v_index < my_end_partition ? s_edges[v_index] : max_edges);
SizeT internal_offset = v_index > 0 ? s_edges[v_index - 1] : 0;
SizeT lookup_offset = d_row_offsets[v];
for (int i = (tid + e_offset); i < e_last + e_offset;
i += KernelPolicy::THREADS) {
if (i >= end_last) {
v_index = util::BinarySearch<KernelPolicy::THREADS>(i, s_edges);
if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
ADVANCE_TYPE == gunrock::oprtr::advance::V2E) {
v = d_queue[v_index];
e_id = 0;
}
if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
ADVANCE_TYPE == gunrock::oprtr::advance::E2E) {
v = inverse_graph ? d_inverse_column_indices[d_queue[v_index]]
: d_column_indices[d_queue[v_index]];
e_id = d_queue[v_index];
}
end_last =
(v_index < KernelPolicy::THREADS ? s_edges[v_index] : max_edges);
internal_offset = v_index > 0 ? s_edges[v_index - 1] : 0;
lookup_offset = d_row_offsets[v];
}
int e = i - internal_offset;
int lookup = lookup_offset + e;
VertexId u = d_column_indices[lookup];
SizeT out_index = out_offset + edges_processed + (i - e_offset);
/*{
if (!ProblemData::MARK_PREDECESSORS) {
if (Functor::CondEdge(label, u, problem, lookup, e_id)) {
Functor::ApplyEdge(label, u, problem, lookup, e_id);
if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V) {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
u,
d_out + out_index);
} else if (ADVANCE_TYPE == gunrock::oprtr::advance::V2E
||ADVANCE_TYPE == gunrock::oprtr::advance::E2E) {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
(VertexId)lookup,
d_out + out_index);
}
}
else {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
-1,
d_out + out_index);
}
} else {
if (Functor::CondEdge(v, u, problem, lookup, e_id)) {
Functor::ApplyEdge(v, u, problem, lookup, e_id);
if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V) {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
u,
d_out + out_index);
} else if (ADVANCE_TYPE == gunrock::oprtr::advance::V2E
||ADVANCE_TYPE == gunrock::oprtr::advance::E2E) {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
(VertexId)lookup,
d_out + out_index);
}
}
else {
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
-1,
d_out + out_index);
}
}
}*/
}
edges_processed += e_last;
my_start_partition += KernelPolicy::THREADS;
e_offset = 0;
}
// if (KernelPolicy::INSTRUMENT && (blockIdx.x == 0 && threadIdx.x == 0)) {
// kernel_stats.MarkStop();
// kernel_stats.Flush();
//}
}
static __device__ __forceinline__ void RelaxLightEdges(
bool &queue_reset, VertexId &queue_index, int &label,
SizeT *&d_row_offsets, VertexId *&d_column_indices,
VertexId *&d_inverse_column_indices, SizeT *&d_scanned_edges,
// volatile int *&d_done,
VertexId *&d_queue, bool *&d_bitmap_in, bool *&d_bitmap_out,
DataSlice *&problem, SizeT &input_queue_len, SizeT *output_queue_len,
SizeT &max_vertices, SizeT &max_edges,
util::CtaWorkProgress<SizeT> &work_progress,
util::KernelRuntimeStats &kernel_stats,
gunrock::oprtr::advance::TYPE &ADVANCE_TYPE, bool inverse_graph) {
// if (KernelPolicy::INSTRUMENT && (blockIdx.x == 0 && threadIdx.x == 0)) {
// kernel_stats.MarkStart();
//}
// Reset work progress
// if (queue_reset)
//{
// if (blockIdx.x == 0 && threadIdx.x < util::CtaWorkProgress::COUNTERS)
// {
// Reset all counters
// work_progress.template Reset<SizeT>();
// }
//}
// Determine work decomposition
if (threadIdx.x == 0) {
// obtain problem size
if (!queue_reset)
input_queue_len = work_progress.LoadQueueLength(queue_index);
if (blockIdx.x == 0) {
if (queue_reset) {
work_progress.StoreQueueLength(input_queue_len, queue_index);
}
work_progress.Enqueue(output_queue_len[0], queue_index + 1);
// Reset our next outgoing queue counter to zero
work_progress.StoreQueueLength(0, queue_index + 2);
work_progress.PrepResetSteal(queue_index + 1);
}
}
// Barrier to protect work decomposition
__syncthreads();
unsigned int range = input_queue_len;
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = bid * KernelPolicy::THREADS + tid;
__shared__ typename KernelPolicy::SmemStorage smem_storage;
unsigned int *s_edges = (unsigned int *)&smem_storage.s_edges[0];
unsigned int *s_vertices = (unsigned int *)&smem_storage.s_vertices[0];
unsigned int *s_edge_ids = (unsigned int *)&smem_storage.s_edge_ids[0];
int offset = (KernelPolicy::THREADS * bid - 1) > 0
? d_scanned_edges[KernelPolicy::THREADS * bid - 1]
: 0;
int end_id = (KernelPolicy::THREADS * (bid + 1)) >= range
? range - 1
: KernelPolicy::THREADS * (bid + 1) - 1;
end_id = end_id % KernelPolicy::THREADS;
s_edges[tid] =
(my_id < range ? d_scanned_edges[my_id] - offset : max_edges);
if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V) {
s_vertices[tid] = (my_id < range ? d_queue[my_id] : max_vertices);
s_edge_ids[tid] = my_id; // used as input index
}
// do not support E2V and E2E for backward BFS now
/*if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V || ADVANCE_TYPE ==
gunrock::oprtr::advance::E2E) { if (inverse_graph) s_vertices[tid] = (my_id
< range ? d_inverse_column_indices[d_queue[my_id]] : max_vertices); else
s_vertices[tid] = (my_id < range ? d_column_indices[d_queue[my_id]]
: max_vertices); s_edge_ids[tid] = (my_id < range ? d_queue[my_id] :
max_vertices);
}*/
__syncthreads();
unsigned int size = s_edges[end_id];
VertexId v, e, v_id;
int v_index = util::BinarySearch<KernelPolicy::THREADS>(tid, s_edges);
v = s_vertices[v_index];
v_id = s_edge_ids[v_index];
int end_last =
(v_index < KernelPolicy::THREADS ? s_edges[v_index] : max_vertices);
bool found_parent = false;
for (int i = tid; i < size; i += KernelPolicy::THREADS) {
if (i >= end_last) {
v_index = util::BinarySearch<KernelPolicy::THREADS>(i, s_edges);
v = s_vertices[v_index];
v_id = s_edge_ids[v_index];
end_last =
(v_index < KernelPolicy::THREADS ? s_edges[v_index] : max_vertices);
found_parent = false;
}
if (found_parent) continue;
int internal_offset = v_index > 0 ? s_edges[v_index - 1] : 0;
e = i - internal_offset;
int lookup = d_row_offsets[v] + e;
VertexId u = d_column_indices[lookup];
bool parent_in_bitmap = d_bitmap_in[u];
if (parent_in_bitmap && !found_parent) {
if (!ProblemData::MARK_PREDECESSORS) {
if (Functor::CondEdge(label, v, problem))
Functor::ApplyEdge(label, v, problem);
} else {
if (Functor::CondEdge(u, v, problem))
Functor::ApplyEdge(u, v, problem);
}
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
true, d_bitmap_out + v);
util::io::ModifiedStore<ProblemData::QUEUE_WRITE_MODIFIER>::St(
(VertexId)-1, d_queue + v_id);
found_parent = true;
}
}
// if (KernelPolicy::INSTRUMENT && (blockIdx.x == 0 && threadIdx.x == 0)) {
// kernel_stats.MarkStop();
// kernel_stats.Flush();
//}
}
};
/**
* @brief Kernel entry for relax light edge function
*
* @tparam KernelPolicy Kernel policy type for partitioned edge mapping.
* @tparam ProblemData Problem data type for partitioned edge mapping.
* @tparam Functor Functor type for the specific problem type.
*
* @param[in] queue_reset If reset queue counter
* @param[in] queue_index Current frontier queue counter index
* @param[in] label label value to use in functor
* @param[in] d_row_offset Device pointer of SizeT to the row offsets queue
* @param[in] d_column_indices Device pointer of VertexId to the column indices
* queue
* @param[in] d_inverse_column_indices Device pointer of VertexId to the
* inverse column indices queue
* @param[in] d_scanned_edges Device pointer of scanned neighbor list queue of
* the current frontier
* @param[in] partition_stats Device pointer which marks the starting index of
* each partition
* @param[in] num_partitions Number of partitions
* @param[in] d_done Pointer of volatile int to the flag to set when
* we detect incoming frontier is empty
* @param[in] d_queue Device pointer of VertexId to the incoming
* frontier queue
* @param[out] d_bitmap_in Device pointer of bool to the input frontier
* bitmap
* @param[out] d_bitmap_out Device pointer of bool to the output frontier
* bitmap
* @param[in] problem Device pointer to the problem object
* @param[in] input_queue_len Length of the incoming frontier queue
* @param[in] output_queue_len Length of the outgoing frontier queue
* @param[in] max_vertices Maximum number of elements we can place into the
* incoming frontier
* @param[in] max_edges Maximum number of elements we can place into the
* outgoing frontier
* @param[in] work_progress queueing counters to record work progress
* @param[in] kernel_stats Per-CTA clock timing statistics (used when
* KernelPolicy::INSTRUMENT is set)
* @param[in] ADVANCE_TYPE enumerator which shows the advance type: V2V,
* V2E, E2V, or E2E
* @param[in] inverse_graph Whether this iteration's advance operator is in
* the opposite direction to the previous iteration
*/
template <typename KernelPolicy, typename ProblemData, typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::CTA_OCCUPANCY) __global__
void RelaxPartitionedEdges(
bool queue_reset, typename KernelPolicy::VertexId queue_index,
int label, typename KernelPolicy::SizeT *d_row_offsets,
typename KernelPolicy::VertexId *d_column_indices,
typename KernelPolicy::VertexId *d_inverse_column_indices,
typename KernelPolicy::VertexId *d_scanned_edges,
unsigned int *partition_starts, unsigned int num_partitions,
volatile int *d_done, typename KernelPolicy::VertexId *d_queue,
bool *d_bitmap_in, bool *d_bitmap_out,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::SizeT input_queue_len,
typename KernelPolicy::SizeT *output_queue_len,
typename KernelPolicy::SizeT partition_size,
typename KernelPolicy::SizeT max_vertices,
typename KernelPolicy::SizeT max_edges,
util::CtaWorkProgress<typename KernelPolicy::SizeT> work_progress,
util::KernelRuntimeStats kernel_stats,
gunrock::oprtr::advance::TYPE ADVANCE_TYPE =
gunrock::oprtr::advance::V2V,
bool inverse_graph = false) {
Dispatch<KernelPolicy, ProblemData, Functor>::RelaxPartitionedEdges(
queue_reset, queue_index, label, d_row_offsets, d_column_indices,
d_inverse_column_indices, d_scanned_edges, partition_starts,
num_partitions,
// d_done,
d_queue, d_bitmap_in, d_bitmap_out, problem, input_queue_len,
output_queue_len, partition_size, max_vertices, max_edges, work_progress,
kernel_stats, ADVANCE_TYPE, inverse_graph);
}
/**
* @brief Kernel entry for relax light edge function
*
* @tparam KernelPolicy Kernel policy type for partitioned edge mapping.
* @tparam ProblemData Problem data type for partitioned edge mapping.
* @tparam Functor Functor type for the specific problem type.
*
* @param[in] queue_reset If reset queue counter
* @param[in] queue_index Current frontier queue counter index
* @param[in] label label value to use in functor
* @param[in] d_row_offset Device pointer of SizeT to the row offsets queue
* @param[in] d_column_indices Device pointer of VertexId to the column indices
* queue
* @param[in] d_inverse_column_indices Device pointer of VertexId to the
* inverse column indices queue
* @param[in] d_scanned_edges Device pointer of scanned neighbor list queue of
* the current frontier
* @param[in] d_done Pointer of volatile int to the flag to set when
* we detect incoming frontier is empty
* @param[in] d_queue Device pointer of VertexId to the incoming
* frontier queue
* @param[out] d_bitmap_in Device pointer of bool to the input frontier
* bitmap
* @param[out] d_bitmap_out Device pointer of bool to the output frontier
* bitmap
* @param[in] problem Device pointer to the problem object
* @param[in] input_queue_len Length of the incoming frontier queue
* @param[in] output_queue_len Length of the outgoing frontier queue
* @param[in] max_vertices Maximum number of elements we can place into the
* incoming frontier
* @param[in] max_edges Maximum number of elements we can place into the
* outgoing frontier
* @param[in] work_progress queueing counters to record work progress
* @param[in] kernel_stats Per-CTA clock timing statistics (used when
* KernelPolicy::INSTRUMENT is set)
* @param[in] ADVANCE_TYPE enumerator which shows the advance type: V2V,
* V2E, E2V, or E2E
* @param[in] inverse_graph Whether this iteration's advance operator is in
* the opposite direction to the previous iteration
*/
template <typename KernelPolicy, typename ProblemData, typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::CTA_OCCUPANCY) __global__
void RelaxLightEdges(
bool queue_reset, typename KernelPolicy::VertexId queue_index,
int label, typename KernelPolicy::SizeT *d_row_offsets,
typename KernelPolicy::VertexId *d_column_indices,
typename KernelPolicy::VertexId *d_inverse_column_indices,
typename KernelPolicy::SizeT *d_scanned_edges,
// volatile int *d_done,
typename KernelPolicy::VertexId *d_queue, bool *d_bitmap_in,
bool *d_bitmap_out, typename ProblemData::DataSlice *problem,
typename KernelPolicy::SizeT input_queue_len,
typename KernelPolicy::SizeT *output_queue_len,
typename KernelPolicy::SizeT max_vertices,
typename KernelPolicy::SizeT max_edges,
util::CtaWorkProgress<typename KernelPolicy::SizeT> work_progress,
util::KernelRuntimeStats kernel_stats,
gunrock::oprtr::advance::TYPE ADVANCE_TYPE =
gunrock::oprtr::advance::V2V,
bool inverse_graph = false) {
Dispatch<KernelPolicy, ProblemData, Functor>::RelaxLightEdges(
queue_reset, queue_index, label, d_row_offsets, d_column_indices,
d_inverse_column_indices, d_scanned_edges,
// d_done,
d_queue, d_bitmap_in, d_bitmap_out, problem, input_queue_len,
output_queue_len, max_vertices, max_edges, work_progress, kernel_stats,
ADVANCE_TYPE, inverse_graph);
}
/**
* @brief Kernel entry for computing neighbor list length for each vertex in the
* current frontier
*
* @tparam KernelPolicy Kernel policy type for partitioned edge mapping.
* @tparam ProblemData Problem data type for partitioned edge mapping.
* @tparam Functor Functor type for the specific problem type.
*
* @param[in] d_row_offsets Device pointer of SizeT to the row offsets queue
* @param[in] d_column_indices Device pointer of VertexId to the column indices
* queue
* @param[in] d_queue Device pointer of VertexId to the incoming
* frontier queue
* @param[out] d_scanned_edges Device pointer of scanned neighbor list queue of
* the current frontier
* @param[in] num_elements Length of the current frontier queue
* @param[in] max_vertices Maximum number of elements we can place into the
* incoming frontier
* @param[in] max_edges Maximum number of elements we can place into the
* outgoing frontier
* @param[in] ADVANCE_TYPE enumerator which shows the advance type: V2V,
* V2E, E2V, or E2E
*/
template <typename KernelPolicy, typename ProblemData, typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::CTA_OCCUPANCY) __global__
void GetEdgeCounts(typename KernelPolicy::SizeT *d_row_offsets,
typename KernelPolicy::VertexId *d_column_indices,
typename KernelPolicy::VertexId *d_queue,
typename KernelPolicy::SizeT *d_scanned_edges,
typename KernelPolicy::SizeT num_elements,
typename KernelPolicy::SizeT max_vertex,
typename KernelPolicy::SizeT max_edge,
gunrock::oprtr::advance::TYPE ADVANCE_TYPE)
{
Dispatch<KernelPolicy, ProblemData, Functor>::GetEdgeCounts(
d_row_offsets, d_column_indices, d_queue, d_scanned_edges, num_elements,
max_vertex, max_edge, ADVANCE_TYPE);
}
} // namespace edge_map_partitioned_backward
} // namespace oprtr
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <stdio.h>
__device__ int isinbox(real4 pos, double4 xlow, double4 xhigh)
{
if((pos.x < xlow.x)||(pos.x > xhigh.x))
return 0;
if((pos.y < xlow.y)||(pos.y > xhigh.y))
return 0;
if((pos.z < xlow.z)||(pos.z > xhigh.z))
return 0;
return 1;
}
extern "C" __global__ void doDomainCheck(int n_bodies,
double4 xlow,
double4 xhigh,
real4 *body_pos,
int *validList //Valid is 1 if particle is outside domain
){
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_bodies) return;
real4 pos = body_pos[id];
int valid = isinbox(pos, xlow, xhigh);
valid = !valid;
validList[id] = id | ((valid) << 31);
}
//Checks the domain and computes the key list
//if a particle is outside the domain it gets a special key
//otherwise the normal key is used
extern "C" __global__ void doDomainCheckAdvanced(int n_bodies,
double4 xlow,
double4 xhigh,
real4 *body_pos,
int *validList //Valid is 1 if particle is outside domain
){
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if (id >= n_bodies) return;
real4 pos = body_pos[id];
int valid = isinbox(pos, xlow, xhigh);
valid = !valid;
validList[id] = id | ((valid) << 31);
}
extern "C" __global__ void extractSampleParticles(int n_bodies,
int sample_freq,
real4 *body_pos,
real4 *samplePosition
){
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
int idx = id*sample_freq;
if (idx >= n_bodies) return;
samplePosition[id] = body_pos[idx];
}
extern "C" __global__ void extractOutOfDomainParticlesR4(int n_extract,
int *extractList,
real4 *source,
real4 *destination)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if(id >= n_extract) return;
destination[id] = source[extractList[id]];
}
typedef struct bodyStruct
{
real4 pos;
real4 vel;
real4 acc0;
real4 acc1;
real4 Ppos;
real4 Pvel;
float2 time;
int id;
int temp;
} bodyStruct;
extern "C" __global__ void extractOutOfDomainParticlesAdvanced(int n_extract,
int *extractList,
real4 *Ppos,
real4 *Pvel,
real4 *pos,
real4 *vel,
real4 *acc0,
real4 *acc1,
float2 *time,
int *body_id,
bodyStruct *destination)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if(id >= n_extract) return;
//copy the data from a struct of arrays into a array of structs
destination[id].Ppos = Ppos[extractList[id]];
destination[id].Pvel = Pvel[extractList[id]];
destination[id].pos = pos[extractList[id]];
destination[id].vel = vel[extractList[id]];
destination[id].acc0 = acc0[extractList[id]];
destination[id].acc1 = acc1[extractList[id]];
destination[id].time = time[extractList[id]];
destination[id].id = body_id[extractList[id]];
}
extern "C" __global__ void internalMove(int n_extract,
int n_bodies,
double4 xlow,
double4 xhigh,
int *extractList,
int *indexList,
real4 *Ppos,
real4 *Pvel,
real4 *pos,
real4 *vel,
real4 *acc0,
real4 *acc1,
float2 *time,
int *body_id)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if(id >= n_extract) return;
int srcIdx = (n_bodies-n_extract) + id;
real4 testpos = Ppos[srcIdx];
if(isinbox(testpos, xlow, xhigh))
{
int dstIdx = atomicAdd(indexList, 1);
dstIdx = extractList[dstIdx];
//Move!
Ppos[dstIdx] = Ppos[srcIdx];
Pvel[dstIdx] = Pvel[srcIdx];
pos[dstIdx] = pos[srcIdx];
vel[dstIdx] = vel[srcIdx];
acc0[dstIdx] = acc0[srcIdx];
acc1[dstIdx] = acc1[srcIdx];
time[dstIdx] = time[srcIdx];
body_id[dstIdx] = body_id[srcIdx];
}//if isinbox
}
extern "C" __global__ void insertNewParticles(int n_extract,
int n_insert,
int n_oldbodies,
int offset,
real4 *Ppos,
real4 *Pvel,
real4 *pos,
real4 *vel,
real4 *acc0,
real4 *acc1,
float2 *time,
int *body_id,
bodyStruct *source)
{
uint bid = blockIdx.y * gridDim.x + blockIdx.x;
uint tid = threadIdx.x;
uint id = bid * blockDim.x + tid;
if(id >= n_insert) return;
//The newly added particles are added at the end of the array
int idx = (n_oldbodies-n_extract) + id + offset;
//copy the data from a struct of arrays into a array of structs
Ppos[idx] = source[id].Ppos;
Pvel[idx] = source[id].Pvel;
pos[idx] = source[id].pos;
vel[idx] = source[id].vel;
acc0[idx] = source[id].acc0;
acc1[idx] = source[id].acc1;
time[idx] = source[id].time;
body_id[idx] = source[id].id;
}
// extern "C" __global__ void insertNewParticles(int n_extract,
// int n_insert,
// int n_oldbodies,
// int *extractList,
// real4 *Ppos,
// real4 *Pvel,
// real4 *pos,
// real4 *vel,
// real4 *acc0,
// real4 *acc1,
// float2 *time,
// int *body_id,
// bodyStruct *source)
// {
// uint bid = blockIdx.y * gridDim.x + blockIdx.x;
// uint tid = threadIdx.x;
// uint id = bid * blockDim.x + tid;
//
// int idx, srcidx = -1;
// /*
//
// //Situaties:
// - n_insert > n_extract -> particles moeten aan einde worden toegevoegd (meer toevoegen dan weggehaald)
// id < n_extract -> idx = extractList[id] ; uit source[id]
// id >= n_extract & id < n_insert --> idx = n_oldbodies + (id-n_extract); uit source[id]
//
// - n_insert <= n_exract -> particles moeten van het einde naar het begin (meer verwijderd dan toegevoegd)
// id < n_extract -> idx = extractList[id] ; uit source[id]
// id >= n_extract & id < n_insert -> idx = extractList[id] ; uit dest[n_bodies-(n_extract-n_insert) + (id - n_insert)]
//
// */
//
// if(n_insert > n_extract)
// {
// if(id < n_extract)
// {
// idx = extractList[id];
// }
// else if(id >= n_extract && id < n_insert)
// {
// //Insert particles at the end of the array
// idx = n_oldbodies + (id-n_extract);
// }
// else
// {
// return;
// }
// }
// else
// {
// //n_insert <= n_extract
//
// if(id < n_insert)
// {
// idx = extractList[id];
// }
// else if(id >= n_insert && id < n_extract)
// {
// //Move particles from the back of the array to the empty spots
// idx = extractList[id];
// srcidx = extractList[n_oldbodies-(n_extract-n_insert) + (id - n_insert)];
// // srcidx = n_oldbodies-(n_extract-n_insert) + (id - n_insert);
// }
// else
// {
// return;
// }
// }
// /*
// Gaat niet goed als n_insert < n_extract
// omdat we als we gaan moven we ook kans hebben dat we iets moven
// van het begin naar het eind als daar iets is uitgehaald
// we zouden dus de laatste verwijderde moeten vinden en zorgen dat er neits achter komt ofzo
//
//
//
// */
//
// /*
// if(id < n_extract)
// {
// idx = extractList[id];
// }
// else if(id >= n_extract && id < n_insert)
// {
// if(n_insert > n_extract)
// {
// //Insert particles at the end of the array
// idx = n_oldbodies + (id-n_extract);
// }
// else
// {
// //Move particles from the back of the array to the empty spots
// idx = extractList[id];
// srcidx = n_oldbodies-(n_extract-n_insert) + (id - n_insert);
// }
// }
// else
// {
// //Outside all array ranges
// return;
// }*/
//
//
// if(srcidx < 0)
// {
// //copy the data from a struct of arrays into a array of structs
// Ppos[idx] = source[id].Ppos;
// Pvel[idx] = source[id].Pvel;
// pos[idx] = source[id].pos;
// vel[idx] = source[id].vel;
// acc0[idx] = source[id].acc0;
// acc1[idx] = source[id].acc1;
// time[idx] = source[id].time;
// body_id[idx] = source[id].id;
//
// printf("%d (CMOVE external %d) goes to: %d \n", source[id].id,n_insert, idx);
//
//
// }
// else
// {
// Ppos[idx] = Ppos[srcidx];
// Pvel[idx] = Pvel[srcidx];
// pos[idx] = pos[srcidx];
// vel[idx] = vel[srcidx];
// acc0[idx] = acc0[srcidx];
// acc1[idx] = acc1[srcidx];
// time[idx] = time[srcidx];
// int temp = body_id[idx];
// body_id[idx] = body_id[srcidx];
//
// printf("%d stored at: %d (CMOVE internal %d) goes to: %d overwr: %d \n", body_id[srcidx],srcidx, n_insert, idx, temp);
//
//
// }//if srcidx < 0
//
//
// }
|
the_stack
|
// #define DEBUG_RESULT
namespace lightseq {
namespace cuda {
template <OperationType OpType_>
GptEncoder<OpType_>::GptEncoder(int max_batch_size, const int *p_d_token_id,
float *p_d_ppl, int *p_d_sample_id,
const GptWeight<OpType_> &tw,
cudaStream_t stream, cudaStream_t cache_stream,
cublasHandle_t hd)
: _max_batch_size(max_batch_size),
_p_d_token_id(p_d_token_id),
_p_d_ppl(p_d_ppl),
_p_d_sample_id(p_d_sample_id),
_tw(tw),
_stream(stream),
_cache_stream(cache_stream),
_hd(hd),
_p_d_src_emb_wei(tw.get_src_emb_wei()),
_p_d_enc_wei(tw.get_enc_wei()),
_fone((_DataType)1.f),
_fzero((_DataType)0.f),
_atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)),
_max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size),
_max_thread_per_block(1024),
_h_real_seq_len(max_batch_size, 0),
_h_ppl(max_batch_size, 0.f),
_h_sample_id(max_batch_size * tw._max_step, 0),
_h_unfinished(1) {}
/**
Compute GPU memory size needed by gpt encoder,
to see how these memory is used, checkout init_buffer() for detail
*/
template <OperationType OpType_>
size_t GptEncoder<OpType_>::compute_buffer_bytesize() {
int si = _max_batch_size;
size_t sz0 = (size_t)_max_batch_dim;
sz0 += 2 * (size_t)_max_batch_dim * (size_t)_tw._n_enc_layer;
long long sz1 = (size_t)_max_batch_dim * 6 +
(size_t)_max_batch_size * (size_t)_tw._head_num *
(size_t)_tw._max_step * (size_t)_tw._max_step;
long long sz2 = (size_t)_max_batch_dim + (size_t)_max_batch_size *
(size_t)_tw._max_step *
(size_t)_tw._inner_size;
long long sz3 = (size_t)_max_batch_size * (size_t)_tw._max_step *
(size_t)_tw._src_vocab_size;
return (sz0 + max(max(sz1, sz2), sz3)) * sizeof(_DataType) + si * sizeof(int);
}
/**
Init the GPU memory pointer which point to
the memory buffer needed by encoder.
These buffer are used during custom cuda kernel function,
find the corresponding function to see how these buffer are used
*/
template <OperationType OpType_>
void GptEncoder<OpType_>::init_buffer(void *pbuf) {
// int buffer
int *p_d_int = reinterpret_cast<int *>(pbuf);
_p_d_real_seq_len = p_d_int;
p_d_int += _max_batch_size;
// datatype buffer
_DataType *p_d_datatype = reinterpret_cast<_DataType *>(p_d_int);
_p_d_query = p_d_datatype;
_p_d_k_cache = _p_d_query + _max_batch_dim;
_p_d_v_cache = _p_d_k_cache + _max_batch_dim * _tw._n_enc_layer;
p_d_datatype = _p_d_v_cache + _max_batch_dim * _tw._n_enc_layer;
// reuse 1 ---------------------
_p_d_qkv_projected = p_d_datatype;
_p_d_q = _p_d_qkv_projected + _max_batch_dim * 3;
_p_d_k = _p_d_q + _max_batch_dim;
_p_d_v = _p_d_k + _max_batch_dim;
// _max_batch_size * _tw._head_num *
// _tw._max_step * _tw._max_step
_p_d_c = _p_d_v + _max_batch_dim;
// reuse 2 ---------------------
_p_d_ffn_buf1 = p_d_datatype;
// _max_batch_size * _tw._max_step * _tw._inner_size
_p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim;
// reuse 3 ---------------------
// _max_batch_size * _tw._max_step * _tw._src_vocab_size
_p_d_logit = p_d_datatype;
CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_curandstate,
_max_batch_size * sizeof(curandState)));
CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_sample_id_buf,
_max_batch_size * _tw._max_step * sizeof(int)));
CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_unfinished, sizeof(int)));
ker_curand_setup<<<_max_batch_size, 1, 0, _stream>>>(_p_d_curandstate);
return;
}
/**
Some requirements needed by custom cuda kernel function
*/
template <OperationType OpType_>
std::string GptEncoder<OpType_>::check() {
// if (_max_thread_per_block < _tw._hidden_size) {
// return "violate hidden_size <= max_thread_per_block";
// }
if (_tw._inner_size & 1) {
return "violate inner_size % 2 = 0";
}
if (_tw._dim_per_head & 1) {
return "violate dim_per_head % 2 = 0";
}
if (_p_d_src_emb_wei.size() != 4) {
return "violate p_d_src_emb_wei.size() = 4";
}
if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) {
return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer";
}
return "";
}
template <OperationType OpType_>
void GptEncoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) {
_batch_size = batch_size;
_batch_seq_len = batch_seq_len;
_batch_token_num = batch_size * batch_seq_len;
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(),
sizeof(int) * _batch_size,
cudaMemcpyHostToDevice, _stream));
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(),
sizeof(float) * _batch_size,
cudaMemcpyHostToDevice, _stream));
#ifdef DEBUG_RESULT
std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len
<< std::endl;
print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len);
#endif
// token embedding, add position embedding and layer_norm
ker_gpt_embedding_launcher<_DataType>(
batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0],
_p_d_src_emb_wei[1], _p_d_token_id, _p_d_query, _p_d_real_seq_len,
_tw._padding_id, 0);
#ifdef DEBUG_RESULT
print_vec(_p_d_query, "input embeddings",
_batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
#endif
for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_enc_layer;
self_attention();
ffn_add_norm();
}
// last layer norm
ker_norm_layer_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_query,
_p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block);
compute_ppl();
return;
}
template <OperationType OpType_>
int GptEncoder<OpType_>::run_one_sample(int batch_size, int batch_seq_len) {
_batch_size = batch_size;
_batch_seq_len = batch_seq_len;
_batch_token_num = batch_size * batch_seq_len;
if (_batch_seq_len >= _tw._max_step) {
return _batch_seq_len;
}
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(),
sizeof(int) * _batch_size,
cudaMemcpyHostToDevice, _stream));
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(),
sizeof(float) * _batch_size,
cudaMemcpyHostToDevice, _stream));
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id, _p_d_token_id,
sizeof(int) * _batch_size * _tw._max_step,
cudaMemcpyDeviceToDevice, _stream));
#ifdef DEBUG_RESULT
std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len
<< std::endl;
std::cout << "Sample with " << _tw._sampling_method << std::endl;
std::cout << "padding_id: " << _tw._padding_id << std::endl;
std::cout << "vocab_size: " << _tw._src_vocab_size << std::endl;
print_vec(_p_d_sample_id, "batch_token_ids", batch_size * batch_seq_len);
#endif
// token embedding, add position embedding and layer_norm
ker_gpt_embedding_launcher<_DataType>(
_batch_size, _batch_seq_len, _tw._hidden_size, _stream,
_p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_sample_id, _p_d_query,
_p_d_real_seq_len, _tw._padding_id, 0);
#ifdef DEBUG_RESULT
print_vec(_p_d_query, "embedding", _batch_token_num * _tw._hidden_size - 10,
_batch_token_num * _tw._hidden_size);
#endif
for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_enc_layer;
self_attention(true);
ffn_add_norm();
}
// last layer norm
ker_norm_layer_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_query,
_p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block);
if (sample_one_token() == 0 || _batch_seq_len >= _tw._max_step) {
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id,
_batch_token_num * sizeof(int),
cudaMemcpyDeviceToDevice, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
return _batch_seq_len;
}
while (1) {
#ifdef DEBUG_RESULT
std::cout << "before sample:batch_size-" << _batch_size << " batch_seq_len-"
<< _batch_seq_len << std::endl;
print_vec(_p_d_sample_id, "batch_token_ids", _batch_token_num);
#endif
// token embedding, add position embedding and layer_norm
ker_gpt_embedding_launcher<_DataType>(
_batch_size, 1, _tw._hidden_size, _stream, _p_d_src_emb_wei[0],
_p_d_src_emb_wei[1], _p_d_last_sample_id, _p_d_query, _p_d_real_seq_len,
_tw._padding_id, _batch_seq_len - 1);
#ifdef DEBUG_RESULT
print_vec(_p_d_query, "embedding", _batch_size * _tw._hidden_size - 10,
_batch_size * _tw._hidden_size);
#endif
for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_enc_layer;
self_attention_with_cache();
ffn_add_norm_with_cache();
}
// last layer norm
ker_norm_layer_launcher<_DataType>(
_batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2],
_p_d_src_emb_wei[3], _max_thread_per_block);
#ifdef DEBUG_RESULT
print_vec(_p_d_query, "_p_d_query before logits",
_batch_size * _tw._hidden_size - 10,
_batch_size * _tw._hidden_size);
if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step)
break;
#else
if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step)
break;
#endif
}
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id,
_batch_token_num * sizeof(int),
cudaMemcpyDeviceToDevice, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
return _batch_seq_len;
}
template <OperationType OpType_>
int GptEncoder<OpType_>::sample_one_token() {
/* ---step 1. project hidden states to vocab logits--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size,
_p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType,
_tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 10,
_batch_token_num * _tw._src_vocab_size);
#endif
CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream));
/* ---step 2. sample new tokens from logits */
if (_tw._sampling_method == "topk") {
#ifdef DEBUG_RESULT
std::cout << "sampling using topk\n";
#endif
ker_topk_sample_launcher<_DataType>(
_batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block,
_stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf,
_p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished,
_p_d_curandstate, _tw._eos_id);
} else {
#ifdef DEBUG_RESULT
std::cout << "sampling using topp\n";
#endif
ker_topp_sample_launcher<_DataType>(
_batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block,
_stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf,
_p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished,
_p_d_curandstate, _tw._eos_id);
}
int *temp = _p_d_sample_id;
_p_d_sample_id = _p_d_sample_id_buf;
_p_d_sample_id_buf = temp;
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int),
cudaMemcpyDeviceToHost, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
_p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num;
_batch_seq_len++;
_batch_token_num += _batch_size;
return _h_unfinished;
}
template <OperationType OpType_>
int GptEncoder<OpType_>::sample_one_token_with_cache() {
/* ---step 1. project hidden states to vocab logits--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_size,
_tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size,
_p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType,
_tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
print_vec(_p_d_logit, "sampling-logits",
_batch_size * _tw._src_vocab_size - 5,
_batch_size * _tw._src_vocab_size);
#endif
CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream));
// /* ---step 2. sample new tokens from logits */
if (_tw._sampling_method == "topk") {
#ifdef DEBUG_RESULT
std::cout << "sampling using topk\n";
#endif
ker_topk_sample_launcher<_DataType>(
_batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream,
_p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len,
_tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate,
_tw._eos_id);
} else {
#ifdef DEBUG_RESULT
std::cout << "sampling using topp\n";
#endif
ker_topp_sample_launcher<_DataType>(
_batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream,
_p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len,
_tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate,
_tw._eos_id);
}
int *temp = _p_d_sample_id;
_p_d_sample_id = _p_d_sample_id_buf;
_p_d_sample_id_buf = temp;
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int),
cudaMemcpyDeviceToHost, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
_p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num;
_batch_seq_len++;
_batch_token_num += _batch_size;
return _h_unfinished;
}
template <OperationType OpType_>
void GptEncoder<OpType_>::self_attention(bool cache) {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_q,
_p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1],
_p_d_enc_wei[_weight_offset + 5], _max_thread_per_block);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_query, "input with bias",
_batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
print_vec(_p_d_q, "first ln output",
_batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
}
#endif
/* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head
* gemm--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType,
_tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero,
_p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
std::cout << "hidden_size: " << _tw._hidden_size << std::endl;
std::cout << "_batch_token_num: " << _batch_token_num << std::endl;
std::cout << "_dim_per_head: " << _tw._dim_per_head << std::endl;
std::cout << "_head_num: " << _tw._head_num << std::endl;
print_vec(_p_d_enc_wei[_weight_offset + 2], "qkv_weight_mat",
_tw._hidden_size * _tw._hidden_size * 3 - 5,
_tw._hidden_size * _tw._hidden_size * 3);
print_vec(_p_d_qkv_projected, "_p_d_qkv_projected",
_batch_token_num * _tw._hidden_size * 3 - 5,
_batch_token_num * _tw._hidden_size * 3);
}
#endif
// get q, k, v by split and reshape qkv
ker_arrange_encself_qkv_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected,
_p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block);
if (cache) {
cudaStream_t stream;
if (_batch_token_num > 360) {
stream = _cache_stream;
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
} else {
stream = _stream;
}
CHECK_GPU_ERROR(
cudaMemcpyAsync(_p_d_k_cache + _layer_id * _max_batch_dim, _p_d_k,
_batch_token_num * _tw._hidden_size * sizeof(_DataType),
cudaMemcpyDeviceToDevice, stream));
CHECK_GPU_ERROR(
cudaMemcpyAsync(_p_d_v_cache + _layer_id * _max_batch_dim, _p_d_v,
_batch_token_num * _tw._hidden_size * sizeof(_DataType),
cudaMemcpyDeviceToDevice, stream));
}
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_q, "_p_d_q", _batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
}
#endif
/* ---step 2. correlation = q * k, perform softmax on correlation--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len,
_tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType,
_batch_seq_len, _batch_seq_len * _batch_seq_len,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_c, "q*k",
_batch_token_num * _batch_seq_len * _tw._head_num - 5,
_batch_token_num * _batch_seq_len * _tw._head_num);
}
#endif
ker_correlation_softmax_gpt_launcher<_DataType>(_batch_size, _batch_seq_len,
_tw._head_num, _stream,
_p_d_c, _p_d_real_seq_len);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_c, "mask weights",
_batch_token_num * _batch_seq_len * _tw._head_num - 5,
_batch_token_num * _batch_seq_len * _tw._head_num);
}
#endif
/* ---step 3. new_q = correlation * v--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len,
_batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len,
_batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType,
_tw._dim_per_head, _batch_seq_len * _tw._dim_per_head,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_q, "value after attention",
_batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
}
#endif
// use v to save reshaped q, since they are in same size and v
// will not be use again before the next multi-head-attention
ker_arrange_atten_output_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v,
_batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_v, "reshaped value after attention", 0, 5);
print_vec(_p_d_query, "attention input with output bias", 0, 5);
}
#endif
/* ---step 4. new_q = ori_q + new_q * output_wei--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType,
_tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query,
_CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5);
print_vec(_p_d_query, "attention output", 0, 5);
}
#endif
return;
}
template <OperationType OpType_>
void GptEncoder<OpType_>::self_attention_with_cache() {
_DataType *_p_d_k_cache_cur_layer = _p_d_k_cache + _layer_id * _max_batch_dim;
_DataType *_p_d_v_cache_cur_layer = _p_d_v_cache + _layer_id * _max_batch_dim;
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_k_cache_cur_layer, "_p_d_k_cache_cur_layer",
_batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5,
_batch_size * (_batch_seq_len - 1) * _tw._hidden_size);
print_vec(_p_d_v_cache_cur_layer, "_p_d_v_cache_cur_layer",
_batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5,
_batch_size * (_batch_seq_len - 1) * _tw._hidden_size);
}
#endif
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_q,
_p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1],
_p_d_enc_wei[_weight_offset + 5], _max_thread_per_block);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_query, "input with bias", _batch_size * _tw._hidden_size - 5,
_batch_size * _tw._hidden_size);
print_vec(_p_d_q, "first ln output", _batch_size * _tw._hidden_size - 5,
_batch_size * _tw._hidden_size);
}
#endif
/* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head
* gemm--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_size,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType,
_tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero,
_p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_qkv_projected, "_p_d_qkv_projected",
_batch_size * _tw._hidden_size * 3 - 5,
_batch_size * _tw._hidden_size * 3);
}
#endif
// get q, k, v by split and reshape qkv
ker_arrange_qkv_with_cache_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected,
_p_d_enc_wei[_weight_offset + 3], _p_d_q, _p_d_k, _p_d_k_cache_cur_layer,
_p_d_v, _p_d_v_cache_cur_layer, _max_batch_dim, _batch_seq_len,
_tw._dim_per_head, _tw._head_num);
// copy new k and v to cache
cudaStream_t stream;
if (_batch_token_num > 360) {
stream = _cache_stream;
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
} else {
stream = _stream;
}
CHECK_GPU_ERROR(
cudaMemcpyAsync(_p_d_k_cache_cur_layer, _p_d_k,
_batch_token_num * _tw._hidden_size * sizeof(_DataType),
cudaMemcpyDeviceToDevice, stream));
CHECK_GPU_ERROR(
cudaMemcpyAsync(_p_d_v_cache_cur_layer, _p_d_v,
_batch_token_num * _tw._hidden_size * sizeof(_DataType),
cudaMemcpyDeviceToDevice, stream));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_q, "_p_d_q", _batch_size * _tw._hidden_size - 5,
_batch_size * _tw._hidden_size);
print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5,
_batch_token_num * _tw._hidden_size);
}
#endif
/* ---step 2. correlation = q * k, perform softmax on correlation
correlation: [batch_size, heads_num, 1, batch_seq_len]--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, 1, _tw._dim_per_head,
&_atten_scaler, _p_d_k, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head,
_tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len,
_batch_seq_len, _batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_c, "q*k", _batch_size * _batch_seq_len * _tw._head_num - 5,
_batch_size * _batch_seq_len * _tw._head_num);
}
#endif
ker_attention_mask_weights_launcher<_DataType>(_batch_size, 1, _batch_seq_len,
_tw._head_num, _stream, _p_d_c,
_p_d_real_seq_len);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_c, "mask weights",
_batch_size * _batch_seq_len * _tw._head_num - 5,
_batch_size * _batch_seq_len * _tw._head_num);
}
#endif
/* ---step 3. new_q = correlation * v--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, 1, _batch_seq_len,
&_fone, _p_d_v, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len,
_batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head,
_tw._dim_per_head, _batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_q, "value after attention",
_batch_size * _tw._hidden_size - 5,
_batch_size * _tw._hidden_size);
}
#endif
// use v to save reshaped q, since they are in same size and v
// will not be use again before the next multi-head-attention
ker_arrange_atten_output_launcher<_DataType>(
_batch_size, _tw._hidden_size, _stream, _p_d_q, _p_d_v, 1,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block);
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_v, "reshaped value after attention", 0, 5);
print_vec(_p_d_query, "attention input with output bias", 0, 5);
}
#endif
/* ---step 4. new_q = ori_q + new_q * output_wei--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_size,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType,
_tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query,
_CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
if (_layer_id == 0) {
print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5);
print_vec(_p_d_query, "attention output", 0, 5);
}
#endif
return;
}
template <OperationType OpType_>
void GptEncoder<OpType_>::ffn_add_norm() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1,
_p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7],
_p_d_enc_wei[_weight_offset + 11], _max_thread_per_block);
/* ---step 1. first ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType,
_tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero,
_p_d_ffn_buf2, _CType, _tw._inner_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
ker_bias_gelu_launcher<_DataType>(
_batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
/* ---step 2. second ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType,
_tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone,
_p_d_query, _CType, _tw._hidden_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
template <OperationType OpType_>
void GptEncoder<OpType_>::ffn_add_norm_with_cache() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1,
_p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7],
_p_d_enc_wei[_weight_offset + 11], _max_thread_per_block);
/* ---step 1. first ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_size,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType,
_tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero,
_p_d_ffn_buf2, _CType, _tw._inner_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
ker_bias_gelu_launcher<_DataType>(
_batch_size, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
/* ---step 2. second ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_size,
_tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType,
_tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone,
_p_d_query, _CType, _tw._hidden_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
/**
Compute ppl from encoder output
*/
template <OperationType OpType_>
void GptEncoder<OpType_>::compute_ppl() {
/* ---step 1. project hidden states to vocab logits--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size,
_p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType,
_tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 5,
_batch_token_num * _tw._src_vocab_size);
#endif
/* ---step 2. compute language model ppl--- */
ker_ppl_launcher<_DataType>(
_batch_size, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit,
_p_d_token_id, _p_d_real_seq_len, _p_d_ppl, _tw._src_vocab_size);
}
template class GptEncoder<OperationType::FP16>;
template class GptEncoder<OperationType::FP32>;
} // namespace cuda
} // namespace lightseq
|
the_stack
|
#include <cuda.h>
#include "LBM.h"
const int nThreads = 32;
__device__ __forceinline__ size_t gpu_field0_index(unsigned int x, unsigned int y)
{
return NX*y+x;
}
__device__ __forceinline__ size_t gpu_scalar_index(unsigned int x, unsigned int y)
{
return NX*y+x;
}
__device__ __forceinline__ size_t gpu_fieldn_index(unsigned int x, unsigned int y, unsigned int d)
{
return (NX*(NY*(d-1)+y)+x);
}
#define checkCudaErrors(err) __checkCudaErrors(err,#err,__FILE__,__LINE__)
#define getLastCudaError(msg) __getLastCudaError(msg,__FILE__,__LINE__)
inline void __checkCudaErrors(cudaError_t err, const char *const func, const char *const file, const int line )
{
if(err != cudaSuccess)
{
fprintf(stderr, "CUDA error at %s(%d)\"%s\": [%d] %s.\n",
file, line, func, (int)err, cudaGetErrorString(err));
exit(-1);
}
}
inline void __getLastCudaError(const char *const errorMessage, const char *const file, const int line )
{
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
fprintf(stderr, "CUDA error at %s(%d): [%d] %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(-1);
}
}
// forward declarations of kernels
__global__ void gpu_taylor_green(unsigned int,double*,double*,double*);
__global__ void gpu_init_equilibrium(double*,double*,double*,double*,double*);
__global__ void gpu_stream_collide_save(double*,double*,double*,double*,double*,double*,bool);
__global__ void gpu_compute_flow_properties(unsigned int,double*,double*,double*,double*);
__device__ void taylor_green_eval(unsigned int t, unsigned int x, unsigned int y, double *r, double *u, double *v)
{
double kx = 2.0*M_PI/NX;
double ky = 2.0*M_PI/NY;
double td = 1.0/(nu*(kx*kx+ky*ky));
double X = x+0.5;
double Y = y+0.5;
double ux = -u_max*sqrt(ky/kx)*cos(kx*X)*sin(ky*Y)*exp(-1.0*t/td);
double uy = u_max*sqrt(kx/ky)*sin(kx*X)*cos(ky*Y)*exp(-1.0*t/td);
double P = -0.25*rho0*u_max*u_max*((ky/kx)*cos(2.0*kx*X)+(kx/ky)*cos(2.0*ky*Y))*exp(-2.0*t/td);
double rho = rho0+3.0*P;
*r = rho;
*u = ux;
*v = uy;
}
__host__ void taylor_green(unsigned int t, double *r, double *u, double *v)
{
// blocks in grid
dim3 grid(NX/nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_taylor_green<<< grid, threads >>>(t,r,u,v);
getLastCudaError("gpu_taylor_green kernel error");
}
__global__ void gpu_taylor_green(unsigned int t, double *r, double *u, double *v)
{
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
size_t sidx = gpu_scalar_index(x,y);
taylor_green_eval(t,x,y,&r[sidx],&u[sidx],&v[sidx]);
}
__host__ void init_equilibrium(double *f0, double *f1, double *r, double *u, double *v)
{
// blocks in grid
dim3 grid(NX/nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_init_equilibrium<<< grid, threads >>>(f0,f1,r,u,v);
getLastCudaError("gpu_init_equilibrium kernel error");
}
__global__ void gpu_init_equilibrium(double *f0, double *f1, double *r, double *u, double *v)
{
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
double rho = r[gpu_scalar_index(x,y)];
double ux = u[gpu_scalar_index(x,y)];
double uy = v[gpu_scalar_index(x,y)];
// load equilibrium
// feq_i = w_i rho [1 + 3(ci . u) + (9/2) (ci . u)^2 - (3/2) (u.u)]
// feq_i = w_i rho [1 - 3/2 (u.u) + (ci . 3u) + (1/2) (ci . 3u)^2]
// feq_i = w_i rho [1 - 3/2 (u.u) + (ci . 3u){ 1 + (1/2) (ci . 3u) }]
// temporary variables
double w0r = w0*rho;
double wsr = ws*rho;
double wdr = wd*rho;
double omusq = 1.0 - 1.5*(ux*ux+uy*uy);
double tux = 3.0*ux;
double tuy = 3.0*uy;
f0[gpu_field0_index(x,y)] = w0r*(omusq);
double cidot3u = tux;
f1[gpu_fieldn_index(x,y,1)] = wsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tuy;
f1[gpu_fieldn_index(x,y,2)] = wsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -tux;
f1[gpu_fieldn_index(x,y,3)] = wsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -tuy;
f1[gpu_fieldn_index(x,y,4)] = wsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tux+tuy;
f1[gpu_fieldn_index(x,y,5)] = wdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tuy-tux;
f1[gpu_fieldn_index(x,y,6)] = wdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -(tux+tuy);
f1[gpu_fieldn_index(x,y,7)] = wdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tux-tuy;
f1[gpu_fieldn_index(x,y,8)] = wdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
}
__host__ void stream_collide_save(double *f0, double *f1, double *f2, double *r, double *u, double *v, bool save)
{
// blocks in grid
dim3 grid(NX/nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_stream_collide_save<<< grid, threads >>>(f0,f1,f2,r,u,v,save);
getLastCudaError("gpu_stream_collide_save kernel error");
}
__global__ void gpu_stream_collide_save(double *f0, double *f1, double *f2, double *r, double *u, double *v, bool save)
{
// useful constants
const double tauinv = 2.0/(6.0*nu+1.0); // 1/tau
const double omtauinv = 1.0-tauinv; // 1 - 1/tau
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int xp1 = (x+1)%NX;
unsigned int yp1 = (y+1)%NY;
unsigned int xm1 = (NX+x-1)%NX;
unsigned int ym1 = (NY+y-1)%NY;
// direction numbering scheme
// 6 2 5
// 3 0 1
// 7 4 8
double ft0 = f0[gpu_field0_index(x,y)];
// load populations from adjacent nodes
double ft1 = f1[gpu_fieldn_index(xm1,y, 1)];
double ft2 = f1[gpu_fieldn_index(x, ym1,2)];
double ft3 = f1[gpu_fieldn_index(xp1,y, 3)];
double ft4 = f1[gpu_fieldn_index(x, yp1,4)];
double ft5 = f1[gpu_fieldn_index(xm1,ym1,5)];
double ft6 = f1[gpu_fieldn_index(xp1,ym1,6)];
double ft7 = f1[gpu_fieldn_index(xp1,yp1,7)];
double ft8 = f1[gpu_fieldn_index(xm1,yp1,8)];
// compute moments
double rho = ft0+ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8;
double rhoinv = 1.0/rho;
double ux = rhoinv*(ft1+ft5+ft8-(ft3+ft6+ft7));
double uy = rhoinv*(ft2+ft5+ft6-(ft4+ft7+ft8));
// only write to memory when needed
if(save)
{
r[gpu_scalar_index(x,y)] = rho;
u[gpu_scalar_index(x,y)] = ux;
v[gpu_scalar_index(x,y)] = uy;
}
// now compute and relax to equilibrium
// note that
// relax to equilibrium
// feq_i = w_i rho [1 + 3(ci . u) + (9/2) (ci . u)^2 - (3/2) (u.u)]
// feq_i = w_i rho [1 - 3/2 (u.u) + (ci . 3u) + (1/2) (ci . 3u)^2]
// feq_i = w_i rho [1 - 3/2 (u.u) + (ci . 3u){ 1 + (1/2) (ci . 3u) }]
// temporary variables
double tw0r = tauinv*w0*rho; // w[0]*rho/tau
double twsr = tauinv*ws*rho; // w[1-4]*rho/tau
double twdr = tauinv*wd*rho; // w[5-8]*rho/tau
double omusq = 1.0 - 1.5*(ux*ux+uy*uy); // 1-(3/2)u.u
double tux = 3.0*ux;
double tuy = 3.0*uy;
f0[gpu_field0_index(x,y)] = omtauinv*ft0 + tw0r*(omusq);
double cidot3u = tux;
f2[gpu_fieldn_index(x,y,1)] = omtauinv*ft1 + twsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tuy;
f2[gpu_fieldn_index(x,y,2)] = omtauinv*ft2 + twsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -tux;
f2[gpu_fieldn_index(x,y,3)] = omtauinv*ft3 + twsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -tuy;
f2[gpu_fieldn_index(x,y,4)] = omtauinv*ft4 + twsr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tux+tuy;
f2[gpu_fieldn_index(x,y,5)] = omtauinv*ft5 + twdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tuy-tux;
f2[gpu_fieldn_index(x,y,6)] = omtauinv*ft6 + twdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = -(tux+tuy);
f2[gpu_fieldn_index(x,y,7)] = omtauinv*ft7 + twdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
cidot3u = tux-tuy;
f2[gpu_fieldn_index(x,y,8)] = omtauinv*ft8 + twdr*(omusq + cidot3u*(1.0+0.5*cidot3u));
}
__host__ void compute_flow_properties(unsigned int t, double *r, double *u, double *v,
double *prop, double *prop_gpu, double *prop_host)
{
// prop must point to space for 4 doubles:
// 0: energy
// 1: L2 error in rho
// 2: L2 error in ux
// 3: L2 error in uy
// blocks in grid
dim3 grid(NX/nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_compute_flow_properties<<< grid, threads, 7*threads.x*sizeof(double) >>>(t,r,u,v,prop_gpu);
getLastCudaError("gpu_compute_flow_properties kernel error");
// transfer block sums to host memory
size_t prop_size_bytes = 7*grid.x*grid.y*sizeof(double);
checkCudaErrors(cudaMemcpy(prop_host,prop_gpu,prop_size_bytes,cudaMemcpyDeviceToHost));
// initialise sums
double E = 0.0; // kinetic energy
double sumrhoe2 = 0.0; // sum of error squared in rho
double sumuxe2 = 0.0; // ux
double sumuye2 = 0.0; // uy
double sumrhoa2 = 0.0; // sum of analytical rho squared
double sumuxa2 = 0.0; // ux
double sumuya2 = 0.0; // uy
// finish summation with CPU
for(unsigned int i = 0; i < grid.x*grid.y; ++i)
{
E += prop_host[7*i];
sumrhoe2 += prop_host[7*i+1];
sumuxe2 += prop_host[7*i+2];
sumuye2 += prop_host[7*i+3];
sumrhoa2 += prop_host[7*i+4];
sumuxa2 += prop_host[7*i+5];
sumuya2 += prop_host[7*i+6];
}
// compute and return final values
prop[0] = E;
prop[1] = sqrt(sumrhoe2/sumrhoa2);
prop[2] = sqrt(sumuxe2/sumuxa2);
prop[3] = sqrt(sumuye2/sumuya2);
}
__global__ void gpu_compute_flow_properties(unsigned int t, double *r, double *u, double *v, double *prop_gpu)
{
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
extern __shared__ double data[];
// set up arrays for each variable
// each array begins after the previous ends
double *E = data;
double *rhoe2 = data + blockDim.x;
double *uxe2 = data + 2*blockDim.x;
double *uye2 = data + 3*blockDim.x;
double *rhoa2 = data + 4*blockDim.x;
double *uxa2 = data + 5*blockDim.x;
double *uya2 = data + 6*blockDim.x;
// load density and velocity
double rho = r[gpu_scalar_index(x,y)];
double ux = u[gpu_scalar_index(x,y)];
double uy = v[gpu_scalar_index(x,y)];
// compute kinetic energy density
E[threadIdx.x] = rho*(ux*ux + uy*uy);
// compute analytical results
double rhoa, uxa, uya;
taylor_green_eval(t,x,y,&rhoa,&uxa,&uya);
// compute terms for L2 error
rhoe2[threadIdx.x] = (rho-rhoa)*(rho-rhoa);
uxe2[threadIdx.x] = (ux-uxa)*(ux-uxa);
uye2[threadIdx.x] = (uy-uya)*(uy-uya);
rhoa2[threadIdx.x] = (rhoa-rho0)*(rhoa-rho0);
uxa2[threadIdx.x] = uxa*uxa;
uya2[threadIdx.x] = uya*uya;
// synchronise data in shared memory
__syncthreads();
// only one thread proceeds
if(threadIdx.x == 0)
{
// compute linear index for this block within grid
size_t idx = 7*(gridDim.x*blockIdx.y+blockIdx.x);
for(int n = 0; n < 7; ++n)
prop_gpu[idx+n] = 0.0;
// sum values for this block from shared memory
for(int i = 0; i < blockDim.x; ++i)
{
prop_gpu[idx ] += E[i];
prop_gpu[idx+1] += rhoe2[i];
prop_gpu[idx+2] += uxe2[i];
prop_gpu[idx+3] += uye2[i];
prop_gpu[idx+4] += rhoa2[i];
prop_gpu[idx+5] += uxa2[i];
prop_gpu[idx+6] += uya2[i];
}
}
}
__host__ void report_flow_properties(unsigned int t, double *rho, double *ux, double *uy,
double *prop_gpu, double *prop_host)
{
double prop[4];
compute_flow_properties(t,rho,ux,uy,prop,prop_gpu,prop_host);
printf("%u,%g,%g,%g,%g\n",t,prop[0],prop[1],prop[2],prop[3]);
}
__host__ void save_scalar(const char* name, double *scalar_gpu, double *scalar_host, unsigned int n)
{
// assume reasonably-sized file names
char filename[128];
char format[16];
// compute maximum number of digits
int ndigits = floor(log10((double)NSTEPS)+1.0);
// generate format string
// file name format is name0000nnn.bin
sprintf(format,"%%s%%0%dd.bin",ndigits);
sprintf(filename,format,name,n);
// transfer memory from GPU to host
checkCudaErrors(cudaMemcpy(scalar_host,scalar_gpu,mem_size_scalar,cudaMemcpyDeviceToHost));
// open file for writing
FILE *fout = fopen(filename,"wb+");
// write data
fwrite(scalar_host,1,mem_size_scalar,fout);
// close file
fclose(fout);
if(ferror(fout))
{
fprintf(stderr,"Error saving to %s\n",filename);
perror("");
}
else
{
if(!quiet)
printf("Saved to %s\n",filename);
}
}
|
the_stack
|
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 512;
const int BLOCK_SIZE_LIMIT = 32768;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N)
{
int ret = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
return (ret > BLOCK_SIZE_LIMIT) ? BLOCK_SIZE_LIMIT : ret;
}
__global__
void load_kernel(V_ID my_in_vtxs,
const V_ID* in_vtxs,
Vertex* old_pr_fb,
const Vertex* old_pr_zc)
{
for (V_ID i = blockIdx.x * blockDim.x + threadIdx.x; i < my_in_vtxs;
i+= blockDim.x * gridDim.x)
{
V_ID vtx = in_vtxs[i];
Vertex my_pr = old_pr_zc[vtx];
cub::ThreadStore<cub::STORE_CG>(old_pr_fb + vtx, my_pr);
}
}
__global__
void pr_kernel(V_ID rowLeft,
V_ID rowRight,
E_ID colLeft,
float initRank,
const NodeStruct* row_ptrs,
const EdgeStruct* col_idxs,
Vertex* old_pr_fb,
Vertex* new_pr_fb)
{
typedef cub::BlockScan<E_ID, CUDA_NUM_THREADS> BlockScan;
__shared__ BlockScan::TempStorage temp_storage;
//__shared__ float pr[CUDA_NUM_THREADS];
__shared__ E_ID blkColStart;
for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft; blkRowStart <= rowRight;
blkRowStart += blockDim.x * gridDim.x)
{
E_ID myNumEdges = 0, scratchOffset, totalNumEdges = 0;
V_ID myDegree = 0;
V_ID curVtx = blkRowStart + threadIdx.x;
if (curVtx <= rowRight) {
NodeStruct ns = row_ptrs[curVtx - rowLeft];
E_ID start_col_idx, end_col_idx = ns.index;
myDegree = ns.degree;
if (curVtx == rowLeft)
start_col_idx = colLeft;
else
start_col_idx = row_ptrs[curVtx - rowLeft - 1].index;
myNumEdges = end_col_idx - start_col_idx;
if (threadIdx.x == 0)
blkColStart = start_col_idx;
new_pr_fb[curVtx - rowLeft] = 0;
}
__syncthreads();
BlockScan(temp_storage).ExclusiveSum(myNumEdges, scratchOffset, totalNumEdges);
E_ID done = 0;
while (totalNumEdges > 0) {
if (threadIdx.x < totalNumEdges) {
EdgeStruct es = col_idxs[blkColStart + done + threadIdx.x - colLeft];
float src_pr = old_pr_fb[es.src];
atomicAdd(new_pr_fb + es.dst - rowLeft, src_pr);
}
done += CUDA_NUM_THREADS;
totalNumEdges -= (totalNumEdges > CUDA_NUM_THREADS) ?
CUDA_NUM_THREADS : totalNumEdges;
}
__syncthreads();
float my_pr = initRank + ALPHA * new_pr_fb[curVtx - rowLeft];
if (myDegree != 0)
my_pr = my_pr / myDegree;
new_pr_fb[curVtx - rowLeft] = my_pr;
}
}
/*static*/
void pull_app_task_impl(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 5);
assert(task->regions.size() == 5);
const GraphPiece *piece = (GraphPiece*) task->local_args;
const AccessorRO<NodeStruct, 1> acc_row_ptr(regions[0], FID_DATA);
const AccessorRO<V_ID, 1> acc_in_vtx(regions[1], FID_DATA);
const AccessorRO<EdgeStruct, 1> acc_col_idx(regions[2], FID_DATA);
const AccessorRO<Vertex, 1> acc_old_pr(regions[3], FID_DATA);
const AccessorWO<Vertex, 1> acc_new_pr(regions[4], FID_DATA);
Rect<1> rect_row_ptr = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_in_vtx = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<1> rect_col_idx = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<1> rect_old_pr = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Rect<1> rect_new_pr = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
assert(acc_row_ptr.accessor.is_dense_arbitrary(rect_row_ptr));
assert(acc_in_vtx.accessor.is_dense_arbitrary(rect_in_vtx));
assert(acc_col_idx.accessor.is_dense_arbitrary(rect_col_idx));
assert(acc_old_pr.accessor.is_dense_arbitrary(rect_old_pr));
assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr));
const NodeStruct* row_ptrs = acc_row_ptr.ptr(rect_row_ptr);
const V_ID* in_vtxs = acc_in_vtx.ptr(rect_in_vtx);
const EdgeStruct* col_idxs = acc_col_idx.ptr(rect_col_idx);
const Vertex* old_pr = acc_old_pr.ptr(rect_old_pr);
Vertex* new_pr = acc_new_pr.ptr(rect_new_pr);
V_ID rowLeft = rect_row_ptr.lo[0], rowRight = rect_row_ptr.hi[0];
E_ID colLeft = rect_col_idx.lo[0], colRight = rect_col_idx.hi[0];
load_kernel<<<GET_BLOCKS(piece->myInVtxs), CUDA_NUM_THREADS>>>(
piece->myInVtxs, in_vtxs, piece->oldPrFb, old_pr);
pr_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>(
rowLeft, rowRight, colLeft, (1 - ALPHA) / piece->nv,
row_ptrs, col_idxs, piece->oldPrFb, piece->newPrFb);
// Need to copy results back to new_pr
cudaDeviceSynchronize();
checkCUDA(cudaMemcpy(new_pr, piece->newPrFb,
(rowRight - rowLeft + 1) * sizeof(Vertex),
cudaMemcpyDeviceToHost));
}
__global__
void init_kernel(V_ID rowLeft,
V_ID rowRight,
E_ID colLeft,
NodeStruct* row_ptrs,
EdgeStruct* col_idxs,
const E_ID* raw_rows,
const V_ID* degrees,
const V_ID* raw_cols)
{
for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x;
n + rowLeft <= rowRight; n += blockDim.x * gridDim.x)
{
E_ID startColIdx, endColIdx = raw_rows[n];
if (n == 0)
startColIdx = colLeft;
else
startColIdx = raw_rows[n - 1];
row_ptrs[n].index = endColIdx;
if (degrees != NULL)
row_ptrs[n].degree = degrees[n];
for (E_ID e = startColIdx; e < endColIdx; e++)
{
col_idxs[e - colLeft].src = raw_cols[e - colLeft];
col_idxs[e - colLeft].dst = n + rowLeft;
}
}
}
GraphPiece pull_init_task_impl(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef VERTEX_DEGREE
assert(false);
#endif
#ifdef EDGE_WEIGHT
assert(false);
#endif
assert(regions.size() == 7);
assert(task->regions.size() == 7);
const Graph *graph = (Graph*) task->args;
const AccessorWO<NodeStruct, 1> acc_row_ptr(regions[0], FID_DATA);
const AccessorWO<V_ID, 1> acc_in_vtx(regions[1], FID_DATA);
const AccessorWO<EdgeStruct, 1> acc_col_idx(regions[2], FID_DATA);
const AccessorWO<Vertex, 1> acc_new_pr(regions[3], FID_DATA);
const AccessorRO<E_ID, 1> acc_raw_rows(regions[4], FID_DATA);
const AccessorRO<V_ID, 1> acc_raw_cols(regions[5], FID_DATA);
Rect<1> rect_row_ptr = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_in_vtx = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<1> rect_col_idx = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<1> rect_new_pr = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Rect<1> rect_raw_rows = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
Rect<1> rect_raw_cols = runtime->get_index_space_domain(
ctx, task->regions[5].region.get_index_space());
assert(acc_row_ptr.accessor.is_dense_arbitrary(rect_row_ptr));
assert(acc_in_vtx.accessor.is_dense_arbitrary(rect_in_vtx));
assert(acc_col_idx.accessor.is_dense_arbitrary(rect_col_idx));
assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr));
assert(acc_raw_rows.accessor.is_dense_arbitrary(rect_raw_rows));
assert(acc_raw_cols.accessor.is_dense_arbitrary(rect_raw_cols));
NodeStruct* row_ptrs = acc_row_ptr.ptr(rect_row_ptr);
V_ID* in_vtxs = acc_in_vtx.ptr(rect_in_vtx);
EdgeStruct* col_idxs = acc_col_idx.ptr(rect_col_idx);
Vertex* new_pr = acc_new_pr.ptr(rect_new_pr);
const E_ID* raw_rows = acc_raw_rows.ptr(rect_raw_rows);
const V_ID* raw_cols = acc_raw_cols.ptr(rect_raw_cols);
V_ID rowLeft = rect_row_ptr.lo[0], rowRight = rect_row_ptr.hi[0];
E_ID colLeft = rect_col_idx.lo[0], colRight = rect_col_idx.hi[0];
std::vector<V_ID> edges(colRight - colLeft + 1);
for (E_ID e = 0; e < colRight - colLeft + 1; e++)
edges[e] = raw_cols[e];
std::sort(edges.begin(), edges.end());
V_ID curVtx = edges[0], myInVtx = 0;
for (E_ID e = 0; e < colRight - colLeft + 1; e++) {
if (curVtx != edges[e]) {
edges[myInVtx++] = curVtx;
curVtx = edges[e];
}
}
edges[myInVtx++] = curVtx;
checkCUDA(cudaMemcpy(in_vtxs, edges.data(), sizeof(V_ID) * myInVtx,
cudaMemcpyHostToDevice));
// Add degree if regions.size() == 7
const V_ID *degrees = NULL;
if (regions.size() == 7) {
const AccessorRO<V_ID, 1> acc_degrees(regions[6], FID_DATA);
Rect<1> rect_degrees = runtime->get_index_space_domain(
ctx, task->regions[6].region.get_index_space());
assert(acc_degrees.accessor.is_dense_arbitrary(rect_degrees));
degrees = acc_degrees.ptr(rect_degrees.lo);
}
init_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>(
rowLeft, rowRight, colLeft, row_ptrs, col_idxs, raw_rows, degrees, raw_cols);
checkCUDA(cudaDeviceSynchronize());
float rank = 1.0f / graph->nv;
assert(sizeof(float) == sizeof(Vertex));
for (V_ID n = 0; n + rowLeft <= rowRight; n++) {
new_pr[n] = degrees[n] == 0 ? rank : rank / degrees[n];
}
GraphPiece piece;
piece.myInVtxs = myInVtx;
piece.nv = graph->nv;
piece.ne = graph->ne;
// Allocate oldPrFb/newPrFb on the same memory as row_ptr
std::set<Memory> memFB;
regions[0].get_memories(memFB);
assert(memFB.size() == 1);
assert(memFB.begin()->kind() == Memory::GPU_FB_MEM);
Realm::MemoryImpl* memImpl =
Realm::get_runtime()->get_memory_impl(*memFB.begin());
Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl;
off_t offset = memFBImpl->alloc_bytes(sizeof(Vertex) * graph->nv);
assert(offset >= 0);
piece.oldPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0);
offset = memFBImpl->alloc_bytes(sizeof(Vertex) * (rowRight - rowLeft + 1));
assert(offset >= 0);
piece.newPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0);
//checkCUDA(cudaMalloc(&(piece.oldPrFb), sizeof(float) * graph->nv));
//checkCUDA(cudaMalloc(&(piece.newPrFb), sizeof(float) * (rowRight-rowLeft+1)));
return piece;
}
|
the_stack
|
#include <vector>
#include <claraparabricks/genomeworks/types.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/device_preallocated_allocator.cuh>
namespace claraparabricks
{
namespace genomeworks
{
TEST(TestDevicePreallocatedAllocator, allocations_do_not_overlap)
{
CudaStream cuda_stream = make_cuda_stream();
std::vector<cudaStream_t> cuda_streams;
cuda_streams.push_back(cuda_stream.get());
details::DevicePreallocatedAllocator allocator(2000);
// 0 - 1999: free
cudaError status;
void* pointer_from_0_to_999_actually_to_1023 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_0_to_999_actually_to_1023, 1000, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1023: taken
// 1024 - 1999: free
void* pointer_from_1024_to_1523_actually_to_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1024_to_1523_actually_to_1535, 500, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1535: taken
// 1536 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1024_to_1523_actually_to_1535) - static_cast<gw_byte_t*>(pointer_from_0_to_999_actually_to_1023), 1024);
void* pointer_from_1536_to_1537_actually_to_1791 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1536_to_1537_actually_to_1791, 2, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1791: taken
// 1792 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1536_to_1537_actually_to_1791) - static_cast<gw_byte_t*>(pointer_from_1024_to_1523_actually_to_1535), 512);
void* pointer_from_1792_to_1999_actually_to_1999 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1792_to_1999_actually_to_1999, 208, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1999: taken
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1792_to_1999_actually_to_1999) - static_cast<gw_byte_t*>(pointer_from_1536_to_1537_actually_to_1791), 256);
status = allocator.DeviceFree(pointer_from_1792_to_1999_actually_to_1999);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_1536_to_1537_actually_to_1791);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_1024_to_1523_actually_to_1535);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_0_to_999_actually_to_1023);
ASSERT_EQ(status, cudaSuccess);
}
TEST(TestDevicePreallocatedAllocator, memory_correctly_deallocated)
{
CudaStream cuda_stream = make_cuda_stream();
std::vector<cudaStream_t> cuda_streams;
cuda_streams.push_back(cuda_stream.get());
details::DevicePreallocatedAllocator allocator(2000);
// 0 - 1999: free
cudaError status;
void* pointer_from_0_to_999_actually_to_1023 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_0_to_999_actually_to_1023, 1000, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1023: taken
// 1024 - 1999: free
void* pointer_from_1024_to_1523_actually_to_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1024_to_1523_actually_to_1535, 500, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1535: taken
// 1536 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1024_to_1523_actually_to_1535) - static_cast<gw_byte_t*>(pointer_from_0_to_999_actually_to_1023), 1024);
void* pointer_from_1536_to_1537_actually_to_1791 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1536_to_1537_actually_to_1791, 2, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1791: taken
// 1792 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1536_to_1537_actually_to_1791) - static_cast<gw_byte_t*>(pointer_from_1024_to_1523_actually_to_1535), 512);
status = allocator.DeviceFree(pointer_from_1024_to_1523_actually_to_1535);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1023: taken
// 1024 - 1535: free
// 1536 - 1791: taken
// 1792 - 1999: free
void* pointer_from_1024_to_1027_actually_1279 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1024_to_1027_actually_1279, 4, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1279: taken
// 1280 - 1535: free
// 1535 - 1791: taken
// 1792 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1024_to_1027_actually_1279) - static_cast<gw_byte_t*>(pointer_from_0_to_999_actually_to_1023), 1024);
void* pointer_from_1280_to_1300_actually_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1280_to_1300_actually_1535, 21, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1791: taken
// 1792 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1280_to_1300_actually_1535) - static_cast<gw_byte_t*>(pointer_from_1024_to_1027_actually_1279), 256);
void* pointer_from_1792_to_1800_actually_1999 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1792_to_1800_actually_1999, 9, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1999: taken
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1792_to_1800_actually_1999) - static_cast<gw_byte_t*>(pointer_from_1536_to_1537_actually_to_1791), 256);
status = allocator.DeviceFree(pointer_from_1280_to_1300_actually_1535);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_1024_to_1027_actually_1279);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_1536_to_1537_actually_to_1791);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_0_to_999_actually_to_1023);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1999: free
void* pointer_from_0_to_199_actually_255 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_0_to_199_actually_255, 200, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 255: taken
// 256 - 1999: free
void* pointer_from_256_to_260_actually_511 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_256_to_260_actually_511, 5, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 511: take
// 512 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_256_to_260_actually_511) - static_cast<gw_byte_t*>(pointer_from_0_to_199_actually_255), 256);
void* pointer_from_512_to_515_actually_767 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_512_to_515_actually_767, 4, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 767: take
// 768 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_512_to_515_actually_767) - static_cast<gw_byte_t*>(pointer_from_256_to_260_actually_511), 256);
status = allocator.DeviceFree(pointer_from_256_to_260_actually_511);
ASSERT_EQ(status, cudaSuccess);
// 0 - 255: taken
// 256 - 511: free
// 512 - 767: taken
// 768 - 1999: free
void* pointer_from_768_to_1067_actually_1279 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_768_to_1067_actually_1279, 300, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 255: taken
// 256 - 511: free
// 512 - 1279: taken
// 1280 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_768_to_1067_actually_1279) - static_cast<gw_byte_t*>(pointer_from_512_to_515_actually_767), 256);
void* pointer_from_256_to_270_actually_511 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_256_to_270_actually_511, 15, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1279: taken
// 1280 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_256_to_270_actually_511) - static_cast<gw_byte_t*>(pointer_from_0_to_199_actually_255), 256);
void* pointer_from_1280_to_1290_actually_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1280_to_1290_actually_1535, 11, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
// 0 - 1535: taken
// 1536 - 1999: free
ASSERT_EQ(static_cast<gw_byte_t*>(pointer_from_1280_to_1290_actually_1535) - static_cast<gw_byte_t*>(pointer_from_768_to_1067_actually_1279), 512);
status = allocator.DeviceFree(pointer_from_1280_to_1290_actually_1535);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_768_to_1067_actually_1279);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_512_to_515_actually_767);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_256_to_270_actually_511);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_0_to_199_actually_255);
ASSERT_EQ(status, cudaSuccess);
}
TEST(TestDevicePreallocatedAllocator, not_enough_memory_left)
{
CudaStream cuda_stream = make_cuda_stream();
std::vector<cudaStream_t> cuda_streams;
cuda_streams.push_back(cuda_stream.get());
details::DevicePreallocatedAllocator allocator(2000);
// 0 - 1999: free
cudaError status;
void* pointer_from_0_to_1499_actually_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_0_to_1499_actually_1535, 1500, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
ASSERT_NE(pointer_from_0_to_1499_actually_1535, nullptr);
// 0 - 1535: taken
// 1536 - 1999: free
void* pointer_from_1536_to_2000_actually_error = pointer_from_0_to_1499_actually_1535; // initially set to some value to make sure allocator.DeviceAllocate() sets it to nullptr if allocation was not successful
status = allocator.DeviceAllocate(&pointer_from_1536_to_2000_actually_error, 465, cuda_streams);
ASSERT_EQ(status, cudaErrorMemoryAllocation);
ASSERT_EQ(pointer_from_1536_to_2000_actually_error, nullptr);
// 0 - 1535: taken
// 1536 - 1999: free
void* pointer_from_1536_to_1999_actually_1999 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1536_to_1999_actually_1999, 464, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
ASSERT_NE(pointer_from_1536_to_1999_actually_1999, nullptr);
// 0 - 1999: taken
status = allocator.DeviceFree(pointer_from_1536_to_1999_actually_1999);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_0_to_1499_actually_1535);
ASSERT_EQ(status, cudaSuccess);
}
TEST(TestDevicePreallocatedAllocator, no_memory_left)
{
CudaStream cuda_stream = make_cuda_stream();
std::vector<cudaStream_t> cuda_streams;
cuda_streams.push_back(cuda_stream.get());
details::DevicePreallocatedAllocator allocator(2000);
// 0 - 1999: free
cudaError status;
void* pointer_from_0_to_1499_actually_1535 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_0_to_1499_actually_1535, 1500, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
ASSERT_NE(pointer_from_0_to_1499_actually_1535, nullptr);
// 0 - 1535: taken
// 1536 - 1999: free
void* pointer_from_1536_to_1999_actually_1999 = nullptr;
status = allocator.DeviceAllocate(&pointer_from_1536_to_1999_actually_1999, 464, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
ASSERT_NE(pointer_from_1536_to_1999_actually_1999, nullptr);
// 0 - 1999: taken
void* pointer_to_unsuccessful_allocation = pointer_from_1536_to_1999_actually_1999; // set it to some value to make sure it gets reset to nullptr
status = allocator.DeviceAllocate(&pointer_to_unsuccessful_allocation, 1, cuda_streams);
ASSERT_EQ(status, cudaErrorMemoryAllocation);
ASSERT_EQ(pointer_to_unsuccessful_allocation, nullptr);
status = allocator.DeviceFree(pointer_from_1536_to_1999_actually_1999);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(pointer_from_0_to_1499_actually_1535);
ASSERT_EQ(status, cudaSuccess);
}
TEST(TestDevicePreallocatedAllocator, deallocating_invalid_pointer)
{
CudaStream cuda_stream = make_cuda_stream();
std::vector<cudaStream_t> cuda_streams;
cuda_streams.push_back(cuda_stream.get());
details::DevicePreallocatedAllocator allocator(2000);
cudaError status;
void* valid_ptr = nullptr;
status = allocator.DeviceAllocate(&valid_ptr, 1500, cuda_streams);
ASSERT_EQ(status, cudaSuccess);
ASSERT_NE(valid_ptr, nullptr);
void* invalid_ptr = static_cast<void*>(static_cast<gw_byte_t*>(valid_ptr) + 10);
status = allocator.DeviceFree(invalid_ptr);
ASSERT_EQ(status, cudaErrorInvalidValue);
void* null_ptr = nullptr;
status = allocator.DeviceFree(null_ptr);
ASSERT_EQ(status, cudaSuccess);
// deallocating nullptr does nothing, but is a success
status = allocator.DeviceFree(nullptr);
ASSERT_EQ(status, cudaSuccess);
status = allocator.DeviceFree(valid_ptr);
ASSERT_EQ(status, cudaSuccess);
// deallocating previously deallocated pointer results in error
status = allocator.DeviceFree(valid_ptr); // pointer not valid anymore
ASSERT_EQ(status, cudaErrorInvalidValue);
}
} // namespace genomeworks
} // namespace claraparabricks
|
the_stack
|
#define BLOCK_DIM 16
#define MAX_FILTER_LENGTH 128
#define RESULT_VERIFICATION 1 // change 1 if you want to verify the result
__global__ void
convolution_kernel_v1(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size)
{
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
float result = 0.f;
for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row)
{
for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col)
{
// Find the global position to apply the given filter
int image_row = idx_y + filter_row;
int image_col = idx_x + filter_col;
float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ?
d_input[image_row * num_col + image_col] : 0.f;
float filter_value = d_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2];
result += image_value * filter_value;
}
}
d_output[idx_y * num_col + idx_x] = result;
}
__constant__ float c_filter[MAX_FILTER_LENGTH * MAX_FILTER_LENGTH];
__global__ void
convolution_kernel_v2(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size)
{
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
float result = 0.f;
for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row)
{
for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col)
{
int image_row = idx_y + filter_row;
int image_col = idx_x + filter_col;
float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ?
d_input[image_row * num_col + image_col] : 0.f;
float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2];
result += image_value * filter_value;
}
}
d_output[idx_y * num_col + idx_x] = result;
}
__global__ void
convolution_kernel_v3(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size)
{
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int pad_size = filter_size / 2;
int tile_size = BLOCK_DIM + 2 * pad_size;
extern __shared__ float s_input[];
for (int row = 0; row <= tile_size / BLOCK_DIM; row++)
{
for (int col = 0; col <= tile_size / BLOCK_DIM; col++)
{
int idx_row = idx_y + BLOCK_DIM * row - pad_size; // input data index row
int idx_col = idx_x + BLOCK_DIM * col - pad_size; // input data index column
int fid_row = threadIdx.y + BLOCK_DIM * row; // filter index row
int fid_col = threadIdx.x + BLOCK_DIM * col; // filter index column
if (fid_row >= tile_size || fid_col >= tile_size) continue;
s_input[tile_size * fid_row + fid_col] = \
(idx_row >= 0 && idx_row < num_row && idx_col >= 0 && idx_col < num_col) ?
d_input[num_col * idx_row + idx_col] : 0.f;
}
}
__syncthreads();
/* Tile Debugging */
// if (idx_x == BLOCK_DIM*1 && idx_y == BLOCK_DIM*1)
// {
// for (int row = 0; row < 2*pad_size + BLOCK_DIM; row++)
// {
// for (int col = 0; col < 2*pad_size + BLOCK_DIM; col++)
// {
// printf("%.0f ", s_input[tile_size * row + col]);
// }
// printf("\n");
// }
// }
float result = 0.f;
for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row)
{
for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col)
{
// Find the global position to apply the given filter
int image_row = threadIdx.y + pad_size + filter_row;
int image_col = threadIdx.x + pad_size + filter_col;
float image_value = s_input[tile_size * image_row + image_col];
float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2];
result += image_value * filter_value;
}
}
d_output[idx_y * num_col + idx_x] = result;
}
void convolution_gpu(int version, float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size)
{
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid((num_col + BLOCK_DIM - 1) / BLOCK_DIM, (num_row + BLOCK_DIM - 1) / BLOCK_DIM);
if (version == 1)
convolution_kernel_v1<<<dimGrid, dimBlock>>>(d_output, d_input, d_filter, num_row, num_col, filter_size);
else if (version == 2)
convolution_kernel_v2<<<dimGrid, dimBlock>>>(d_output, d_input, d_filter, num_row, num_col, filter_size);
else // version == 3
{
int shared_mem_size = (2*filter_size+BLOCK_DIM) * (2*filter_size+BLOCK_DIM) * sizeof(float);
convolution_kernel_v3<<<dimGrid, dimBlock, shared_mem_size, 0 >>>(d_output, d_input, d_filter, num_row, num_col, filter_size);
}
checkCudaErrors(cudaGetLastError());
}
void convolution_host(float *h_output, float *h_input, float *h_filter, int num_row, int num_col, int filter_size)
{
//For every pixel in the image
#pragma omp parallel
for (int row = 0; row < (int)num_row; ++row)
{
for (int col = 0; col < (int)num_col; ++col)
{
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row)
{
for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col)
{
// Find the global image position for this filter position
int image_row = row + filter_row;
int image_col = col + filter_col;
float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ?
h_input[image_row * num_col + image_col] : 0.f;
float filter_value = h_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2];
result += image_value * filter_value;
}
}
h_output[row * num_col + col] = result;
}
}
}
/* Generates Bi-symetric Gaussian Filter */
void generate_filter(float *h_filter, int filter_size)
{
float blur_kernel_sigma = 2.;
float sum_filter = 0.f; //for normalization
for (int row = -filter_size / 2; row <= filter_size / 2; row++)
{
for (int col = -filter_size / 2; col <= filter_size / 2; col++)
{
float filterValue = expf(-(float)(col * col + row * row) / (2.f * blur_kernel_sigma * blur_kernel_sigma));
h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] = filterValue;
sum_filter += filterValue;
}
}
// normalization
float normalizationFactor = 1.f / sum_filter;
for (int row = -filter_size / 2; row <= filter_size / 2; row++)
for (int col = -filter_size / 2; col <= filter_size / 2; col++)
h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] *= normalizationFactor;
}
void generate_data(float *h_buffer, int num_row, int num_col)
{
for (int row = 0; row < num_row; row++) {
for (int col = 0; col < num_col; col++) {
// h_buffer[row * num_col + col] = float(rand() & 0xFFFFFF) / RAND_MAX;
h_buffer[row * num_col + col] = 1.f;
}
}
}
bool value_test(float *a, float *b, int length)
{
float epsilon = 0.000001;
bool result = true;
for (int i = 0; i < length; i++)
if (abs(a[i] - b[i]) >= epsilon)
result = false;
return result;
}
int main()
{
int num_row = 2048;
int num_col = 2048;
int filter_size = 9;
int buf_size = num_row * num_col * sizeof(float);
float *h_input, *d_input;
float *h_output_host, *h_output_gpu, *d_output;
float *h_filter, *d_filter;
float elapsed_time_gpu;
// initialize timer
StopWatchInterface *timer_host, *timer_gpu;
sdkCreateTimer(&timer_host);
sdkCreateTimer(&timer_gpu);
srand(2019);
// allocate host memories
h_input = (float *)malloc(buf_size);
h_output_host = (float *)malloc(buf_size);
h_output_gpu = (float *)malloc(buf_size);
h_filter = (float *)malloc(filter_size * filter_size * sizeof(float));
// allocate gpu memories
cudaMalloc((void **)&d_input, buf_size);
cudaMalloc((void **)&d_output, buf_size);
cudaMalloc((void **)&d_filter, filter_size * filter_size * sizeof(float));
// generate data
generate_data(h_input, num_row, num_col);
generate_filter(h_filter, filter_size);
// copy input date to gpu
cudaMemcpy(d_input, h_input, buf_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter, h_filter, filter_size * filter_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_filter, h_filter, filter_size * filter_size * sizeof(float));
// processing in GPU
sdkStartTimer(&timer_gpu);
cudaProfilerStart();
convolution_gpu(1, d_output, d_input, d_filter, num_row, num_col, filter_size);
cudaDeviceSynchronize();
sdkStopTimer(&timer_gpu);
elapsed_time_gpu = sdkGetTimerValue(&timer_gpu);
printf("Processing Time (1) -> GPU: %.2f ms\n", elapsed_time_gpu);
// processing in GPU
sdkResetTimer(&timer_gpu);
sdkStartTimer(&timer_gpu);
convolution_gpu(2, d_output, d_input, d_filter, num_row, num_col, filter_size);
cudaDeviceSynchronize();
sdkStopTimer(&timer_gpu);
elapsed_time_gpu = sdkGetTimerValue(&timer_gpu);
printf("Processing Time (2) -> GPU: %.2f ms\n", elapsed_time_gpu);
// processing in GPU (3)
sdkResetTimer(&timer_gpu);
sdkStartTimer(&timer_gpu);
convolution_gpu(3, d_output, d_input, d_filter, num_row, num_col, filter_size);
cudaDeviceSynchronize();
sdkStopTimer(&timer_gpu);
cudaProfilerStop();
elapsed_time_gpu = sdkGetTimerValue(&timer_gpu);
printf("Processing Time (3) -> GPU: %.2f ms\n", elapsed_time_gpu);
#if (RESULT_VERIFICATION)
// processing in CPU
sdkStartTimer(&timer_host);
convolution_host(h_output_host, h_input, h_filter, num_row, num_col, filter_size);
sdkStopTimer(&timer_host);
float elapsed_time_host = sdkGetTimerValue(&timer_host);
printf("Processing Time -> Host: %.2f ms\n", elapsed_time_host);
// compare the result
cudaMemcpy(h_output_gpu, d_output, buf_size, cudaMemcpyDeviceToHost);
if (value_test(h_output_host, h_output_gpu, num_row * num_col))
printf("SUCCESS!!\n");
else
printf("Error\n");
#endif
// finalize
free(h_input);
free(h_output_host);
free(h_output_gpu);
free(h_filter);
return 0;
}
|
the_stack
|
namespace cgbn {
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ bool dequals(const uint32_t sync, const uint32_t a[limbs], const uint32_t b[limbs]) {
static const uint32_t TPI_ONES=(1ull<<tpi)-1;
uint32_t group_thread=threadIdx.x & tpi-1, warp_thread=threadIdx.x & warpSize-1;
uint32_t lor, mask;
lor=a[0] ^ b[0];
#pragma unroll
for(int32_t index=1;index<limbs;index++)
lor=lor | (a[index] ^ b[index]);
mask=__ballot_sync(sync, lor==0);
if(tpi<warpSize)
mask=mask>>(group_thread ^ warp_thread);
return mask==TPI_ONES;
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ int32_t dcompare(const uint32_t sync, const uint32_t a[limbs], const uint32_t b[limbs]) {
static const uint32_t TPI_ONES=(1ull<<tpi)-1;
uint32_t group_thread=threadIdx.x & tpi-1, warp_thread=threadIdx.x & warpSize-1;
uint32_t a_ballot, b_ballot;
if(limbs==1) {
a_ballot=__ballot_sync(sync, a[0]>=b[0]);
b_ballot=__ballot_sync(sync, a[0]<=b[0]);
}
else {
chain_t<> chain1;
#pragma unroll
for(int32_t index=0;index<limbs;index++)
chain1.sub(a[index], b[index]);
a_ballot=chain1.sub(0, 0);
a_ballot=__ballot_sync(sync, a_ballot==0);
chain_t<> chain2;
#pragma unroll
for(int32_t index=0;index<limbs;index++)
chain2.sub(b[index], a[index]);
b_ballot=chain2.sub(0, 0);
b_ballot=__ballot_sync(sync, b_ballot==0);
}
if(tpi<warpSize) {
uint32_t mask=TPI_ONES<<(warp_thread ^ group_thread);
a_ballot=a_ballot & mask;
b_ballot=b_ballot & mask;
}
return ucmp(a_ballot, b_ballot);
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dmask_set(uint32_t r[limbs], const int32_t numbits) {
int32_t group_thread=threadIdx.x & tpi-1, group_base=group_thread*limbs;
int32_t bits=tpi*limbs*32;
if(numbits>=bits || numbits<=-bits) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=0xFFFFFFFF;
}
else if(numbits>=0) {
int32_t limb=(numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0xFFFFFFFF, 0, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=0;
else if(limb>index)
r[index]=0xFFFFFFFF;
else
r[index]=straddle;
}
}
else {
int32_t limb=(numbits+bits>>5)-group_base;
int32_t straddle=uleft_wrap(0, 0xFFFFFFFF, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=0xFFFFFFFF;
else if(limb>index)
r[index]=0;
else
r[index]=straddle;
}
}
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dmask_and(uint32_t r[limbs], const uint32_t a[limbs], const int32_t numbits) {
int32_t group_thread=threadIdx.x & tpi-1, group_base=group_thread*limbs;
int32_t bits=tpi*limbs*32;
if(numbits>=bits || numbits<=-bits) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=a[index];
}
else if(numbits>=0) {
int32_t limb=(numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0xFFFFFFFF, 0, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=0;
else if(limb>index)
r[index]=a[index];
else
r[index]=a[index] & straddle;
}
}
else {
int32_t limb=(numbits+bits>>5)-group_base;
int32_t straddle=uleft_wrap(0, 0xFFFFFFFF, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=a[index];
else if(limb>index)
r[index]=0;
else
r[index]=a[index] & straddle;
}
}
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dmask_ior(uint32_t r[limbs], const uint32_t a[limbs], const int32_t numbits) {
int32_t group_thread=threadIdx.x & tpi-1, group_base=group_thread*limbs;
int32_t bits=tpi*limbs*32;
if(numbits>=bits || numbits<=-bits) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=0xFFFFFFFF;
}
else if(numbits>=0) {
int32_t limb=(numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0xFFFFFFFF, 0, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=a[index];
else if(limb>index)
r[index]=0xFFFFFFFF;
else
r[index]=a[index] | straddle;
}
}
else {
int32_t limb=(numbits+bits>>5)-group_base;
int32_t straddle=uleft_wrap(0, 0xFFFFFFFF, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=0xFFFFFFFF;
else if(limb>index)
r[index]=a[index];
else
r[index]=a[index] | straddle;
}
}
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dmask_xor(uint32_t r[limbs], const uint32_t a[limbs], const int32_t numbits) {
int32_t group_thread=threadIdx.x & tpi-1, group_base=group_thread*limbs;
int32_t bits=tpi*limbs*32;
if(numbits>=bits || numbits<=-bits) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=a[index] ^ 0xFFFFFFFF;
}
else if(numbits>=0) {
int32_t limb=(numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0xFFFFFFFF, 0, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=a[index];
else if(limb>index)
r[index]=a[index] ^ 0xFFFFFFFF;
else
r[index]=a[index] ^ straddle;
}
}
else {
int32_t limb=(numbits+bits>>5)-group_base;
int32_t straddle=uleft_wrap(0, 0xFFFFFFFF, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=a[index] ^ 0xFFFFFFFF;
else if(limb>index)
r[index]=a[index];
else
r[index]=a[index] ^ straddle;
}
}
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dmask_select(uint32_t r[limbs], const uint32_t clear[limbs], const uint32_t set[limbs], int32_t numbits) {
int32_t group_thread=threadIdx.x & tpi-1, group_base=group_thread*limbs;
int32_t bits=tpi*limbs*32;
if(numbits>=bits || numbits<=-bits) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=set[index];
}
else if(numbits>=0) {
int32_t limb=(numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0xFFFFFFFF, 0, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=clear[index];
else if(limb>index)
r[index]=set[index];
else
r[index]=(set[index] & straddle) | (clear[index] & ~straddle);
}
}
else {
int32_t limb=(bits+numbits>>5)-group_base;
int32_t straddle=uleft_wrap(0, 0xFFFFFFFF, numbits);
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
if(limb<index)
r[index]=set[index];
else if(limb>index)
r[index]=clear[index];
else
r[index]=(set[index] & straddle) | (clear[index] & ~straddle);
}
}
}
template<uint32_t tpi, uint32_t limbs, uint32_t max_rotation>
__device__ __forceinline__ void drotate_left(const uint32_t sync, uint32_t r[limbs], const uint32_t x[limbs], const uint32_t numbits) {
uint32_t rotate_bits=numbits & 0x1F, numlimbs=numbits>>5, threads=static_divide_small<limbs>(numlimbs);
numlimbs=numlimbs-threads*limbs;
if(numlimbs==0) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=__shfl_sync(sync, x[index], threadIdx.x-threads, tpi);
}
else {
mprotate_left<limbs, max_rotation>(r, x, numlimbs);
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=__shfl_sync(sync, r[index], threadIdx.x-threads-(index<numlimbs), tpi);
}
if(rotate_bits>0) {
uint32_t fill=__shfl_sync(sync, r[limbs-1], threadIdx.x-1, tpi);
mpleft<limbs>(r, r, rotate_bits, fill);
}
}
template<uint32_t tpi, uint32_t limbs, uint32_t max_rotation>
__device__ __forceinline__ void drotate_right(const uint32_t sync, uint32_t r[limbs], const uint32_t x[limbs], const uint32_t numbits) {
uint32_t rotate_bits=numbits & 0x1F, numlimbs=numbits>>5, threads=static_divide_small<limbs>(numlimbs);
numlimbs=numlimbs-threads*limbs;
if(numlimbs==0) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=__shfl_sync(sync, x[index], threadIdx.x+threads, tpi);
}
else {
mprotate_right<limbs, max_rotation>(r, x, numlimbs);
#pragma unroll
for(int32_t index=0;index<limbs;index++)
r[index]=__shfl_sync(sync, r[index], threadIdx.x+threads+(limbs-index<=numlimbs), tpi);
}
if(rotate_bits>0) {
uint32_t fill=__shfl_sync(sync, r[0], threadIdx.x+1, tpi);
mpright<limbs>(r, r, rotate_bits, fill);
}
}
template<uint32_t tpi, uint32_t limbs, bool zero>
__device__ __forceinline__ void dscatter(const uint32_t sync, uint32_t &dest, const uint32_t source[limbs], const uint32_t source_thread=31) {
uint32_t group_thread=threadIdx.x & tpi-1;
uint32_t t;
if(zero)
dest=0;
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
t=__shfl_sync(sync, source[index], source_thread, tpi);
dest=(group_thread==tpi-limbs+index) ? t : dest;
}
}
template<uint32_t tpi, uint32_t limbs>
__device__ __forceinline__ void dall_gather(const uint32_t sync, uint32_t dest[limbs], const uint32_t source) {
#pragma unroll
for(int32_t index=0;index<limbs;index++)
dest[index]=__shfl_sync(sync, source, tpi-limbs+index, tpi);
}
template<uint32_t tpi, uint32_t limbs, bool zero>
__device__ __forceinline__ void fwgather(const uint32_t sync, uint32_t dest[limbs], const uint32_t source, const uint32_t destination_thread=31) {
uint32_t group_thread=threadIdx.x & warpSize-1;
uint32_t t;
if(zero) {
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
t=__shfl_sync(sync, source, tpi-limbs+index, tpi);
dest[index]=(group_thread==destination_thread) ? t : 0;
}
}
else {
#pragma unroll
for(int32_t index=0;index<limbs;index++) {
t=__shfl_sync(sync, source, tpi-limbs+index, tpi);
dest[index]=(group_thread==destination_thread) ? t : dest[index];
}
}
}
} /* namespace cgbn */
|
the_stack
|
namespace lightseq {
namespace cuda {
/**
@brief: ker_split_multilg_request
the format of request in multilingual:
e.g. <en> <de> <hello> <world> <.>
request shape: [batch_size, src_seq_len + 2]
request = numpy.concatenate((src_lang_id, trg_lang_id, src_token_id), axis=1)
@thread
gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS
blockDim.x = MAX_THREADS
@param
req: [batch_size, src_seq_len + 2, hidden_dim]
src_lang_id: [batch_size]
trg_lang_id: [batch_size]
src_token_id: [batch_size, src_seq_len, hidden_dim]
req_len: src_seq_len + 2
*/
__global__ void ker_split_multilg_request(const int *req, int *src_lang_id,
int *trg_lang_id, int *src_token_id,
int batch_size, int req_len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < batch_size * req_len) {
int value = req[idx];
int seq_id = idx / req_len;
int token_id = idx % req_len;
if (token_id == 0) {
src_lang_id[seq_id] = value;
} else if (token_id == 1) {
trg_lang_id[seq_id] = value;
} else {
int new_idx = flat_2dim(seq_id, token_id - 2, req_len - 2);
src_token_id[new_idx] = value;
}
}
}
void launch_split_multilg_request(const int *req, int *src_lang_id,
int *trg_lang_id, int *src_token_id,
int batch_size, int req_len,
cudaStream_t &stream) {
if (req_len < 3) {
throw std::runtime_error("req_len should be greater than 2");
}
int nele = batch_size * req_len;
int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS;
ker_split_multilg_request<<<nblock, MAX_THREADS, 0, stream>>>(
req, src_lang_id, trg_lang_id, src_token_id, batch_size, req_len);
}
/**
@brief: ker_enc_emb
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS
blockDim.x = MAX_THREADS;
@param
token_emb: [vocab_size, hidden_dim]
pos_emb: [max_step, hidden_dim]
tokens: input token id, [batch_size, seq_len]
output: result, [batch_size, seq_len, hidden_dim]
pad_mask: record the padding token, [batch_size, seq_len]
pad_id, the padding token id
*/
template <typename T>
__global__ void ker_enc_emb(const T *token_emb, const T *pos_emb,
const int *tokens, T *output, int *pad_mask,
int pad_id, int batch_size, int seq_len,
int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
int tokens_idx = batch_idx * seq_len + seq_idx;
int token = tokens[tokens_idx];
float4 value;
if (token == pad_id) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token * hidden_dim + dim_idx];
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
value.x += pemb.x;
value.y += pemb.y;
value.z += pemb.z;
value.w += pemb.w;
}
((float4 *)output)[idx] = value;
}
template <>
__global__ void ker_enc_emb<__half>(const __half *token_emb,
const __half *pos_emb, const int *tokens,
__half *output, int *pad_mask, int pad_id,
int batch_size, int seq_len,
int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
int tokens_idx = batch_idx * seq_len + seq_idx;
int token = tokens[tokens_idx];
float4 value;
if (token == pad_id) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token * hidden_dim + dim_idx];
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
__half2 *value_h2 = (__half2 *)(&value);
__half2 *pemb_h2 = (__half2 *)(&pemb);
#pragma unroll
for (int i = 0; i < 4; i++) {
float2 value_f2 = __half22float2(value_h2[i]);
float2 pemb_f2 = __half22float2(pemb_h2[i]);
value_f2.x += pemb_f2.x;
value_f2.y += pemb_f2.y;
value_h2[i] = __float22half2_rn(value_f2);
}
}
((float4 *)output)[idx] = value;
}
/**
@brief: ker_enc_emb_multilg_token
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS
blockDim.x = MAX_THREADS;
@param
token_emb: [vocab_size, hidden_dim]
pos_emb: [max_step, hidden_dim]
tokens: input token id, [batch_size, seq_len]
lang_emb: language embedding, [num_lang, hidden_dim]
lang_id: language index, [batch_size]
output: result, [batch_size, seq_len, hidden_dim]
pad_mask: record the padding token, [batch_size, seq_len]
pad_id, the padding token id
*/
template <typename T>
__global__ void ker_enc_emb_multilg_token(const T *token_emb, const T *pos_emb,
const int *tokens, const T *lang_emb,
const int *lang_id, T *output,
int *pad_mask, int pad_id,
int batch_size, int seq_len,
int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
int tokens_idx = batch_idx * seq_len + seq_idx;
int token = tokens[tokens_idx];
float4 value;
if (token == pad_id) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token * hidden_dim + dim_idx];
// add pos emb
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
value.x += pemb.x;
value.y += pemb.y;
value.z += pemb.z;
value.w += pemb.w;
// add lang emb
pemb = ((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx];
value.x += pemb.x;
value.y += pemb.y;
value.z += pemb.z;
value.w += pemb.w;
}
((float4 *)output)[idx] = value;
}
template <>
__global__ void ker_enc_emb_multilg_token<__half>(
const __half *token_emb, const __half *pos_emb, const int *tokens,
const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask,
int pad_id, int batch_size, int seq_len, int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
int tokens_idx = batch_idx * seq_len + seq_idx;
int token = tokens[tokens_idx];
float4 value;
if (token == pad_id) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token * hidden_dim + dim_idx];
__half2 *value_h2 = (__half2 *)(&value);
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
__half2 *pemb_h2 = (__half2 *)(&pemb);
float4 lemb =
((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx];
__half2 *lemb_h2 = (__half2 *)(&lemb);
#pragma unroll
for (int i = 0; i < 4; i++) {
float2 value_f2 = __half22float2(value_h2[i]);
float2 pemb_f2 = __half22float2(pemb_h2[i]);
float2 lemb_f2 = __half22float2(lemb_h2[i]);
value_f2.x += pemb_f2.x + lemb_f2.x;
value_f2.y += pemb_f2.y + lemb_f2.y;
value_h2[i] = __float22half2_rn(value_f2);
}
}
((float4 *)output)[idx] = value;
}
/**
@brief: ker_enc_emb_multilg_sentence
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS
blockDim.x = MAX_THREADS;
@param
token_emb: [vocab_size, hidden_dim]
pos_emb: [max_step, hidden_dim]
tokens: input token id, [batch_size, seq_len]
lang_emb: language embedding, [num_lang, hidden_dim]
lang_id: language index, [batch_size]
output: result, [batch_size, seq_len, hidden_dim]
pad_mask: record the padding token, [batch_size, seq_len]
pad_id, the padding token id
*/
template <typename T>
__global__ void ker_enc_emb_multilg_sentence(
const T *token_emb, const T *pos_emb, const int *tokens, const T *lang_emb,
const int *lang_id, T *output, int *pad_mask, int pad_id, int batch_size,
int seq_len, int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
bool is_pad;
int token_emb_idx;
if (seq_idx == 0) {
is_pad = false;
token_emb = lang_emb;
token_emb_idx = lang_id[batch_idx];
} else {
token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1];
is_pad = (token_emb_idx == pad_id);
}
float4 value;
int tokens_idx = batch_idx * seq_len + seq_idx;
if (is_pad) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx];
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
value.x += pemb.x;
value.y += pemb.y;
value.z += pemb.z;
value.w += pemb.w;
}
((float4 *)output)[idx] = value;
}
template <>
__global__ void ker_enc_emb_multilg_sentence<__half>(
const __half *token_emb, const __half *pos_emb, const int *tokens,
const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask,
int pad_id, int batch_size, int seq_len, int hidden_dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * seq_len * hidden_dim) {
return;
}
int batch_idx, seq_idx, dim_idx;
decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx);
bool is_pad;
int token_emb_idx;
if (seq_idx == 0) {
is_pad = false;
token_emb = lang_emb;
token_emb_idx = lang_id[batch_idx];
} else {
token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1];
is_pad = (token_emb_idx == pad_id);
}
float4 value;
int tokens_idx = batch_idx * seq_len + seq_idx;
if (is_pad) {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 1;
}
value.x = 0.f;
value.y = 0.f;
value.z = 0.f;
value.w = 0.f;
} else {
if (dim_idx == 0) {
pad_mask[tokens_idx] = 0;
}
value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx];
float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx];
__half2 *value_h2 = (__half2 *)(&value);
__half2 *pemb_h2 = (__half2 *)(&pemb);
#pragma unroll
for (int i = 0; i < 4; i++) {
float2 value_f2 = __half22float2(value_h2[i]);
float2 pemb_f2 = __half22float2(pemb_h2[i]);
value_f2.x += pemb_f2.x;
value_f2.y += pemb_f2.y;
value_h2[i] = __float22half2_rn(value_f2);
}
}
((float4 *)output)[idx] = value;
}
template <typename T>
void launch_enc_emb(const T *token_emb, const T *pos_emb, const int *tokens,
T *output, int *pad_mask, int pad_id, int batch_size,
int seq_len, int hidden_dim, cudaStream_t stream,
const T *lang_emb, const int *lang_id, int multilg_type) {
if (hidden_dim % 4 != 0) {
throw std::runtime_error("violate hidden_dim % 4 = 0");
}
hidden_dim >>= 2;
int nele = batch_size * seq_len * hidden_dim;
int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS;
if (multilg_type == 0) {
ker_enc_emb<T><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size,
seq_len, hidden_dim);
} else if (multilg_type == 1) {
ker_enc_emb_multilg_token<T><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id,
batch_size, seq_len, hidden_dim);
} else {
ker_enc_emb_multilg_sentence<T><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id,
batch_size, seq_len, hidden_dim);
}
}
template <>
void launch_enc_emb<__half>(const __half *token_emb, const __half *pos_emb,
const int *tokens, __half *output, int *pad_mask,
int pad_id, int batch_size, int seq_len,
int hidden_dim, cudaStream_t stream,
const __half *lang_emb, const int *lang_id,
int multilg_type) {
if (hidden_dim % 8 != 0) {
throw std::runtime_error("violate hidden_dim % 8 = 0");
}
hidden_dim >>= 3;
int nele = batch_size * seq_len * hidden_dim;
int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS;
if (multilg_type == 0) {
ker_enc_emb<__half><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size,
seq_len, hidden_dim);
} else if (multilg_type == 1) {
ker_enc_emb_multilg_token<__half><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id,
batch_size, seq_len, hidden_dim);
} else {
ker_enc_emb_multilg_sentence<__half><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id,
batch_size, seq_len, hidden_dim);
}
}
template void launch_enc_emb<float>(const float *token_emb,
const float *pos_emb, const int *tokens,
float *output, int *pad_mask, int pad_id,
int batch_size, int seq_len, int hidden_dim,
cudaStream_t stream, const float *lang_emb,
const int *lang_id, int multilg_type);
template void launch_enc_emb<__half>(const __half *token_emb,
const __half *pos_emb, const int *tokens,
__half *output, int *pad_mask, int pad_id,
int batch_size, int seq_len,
int hidden_dim, cudaStream_t stream,
const __half *lang_emb, const int *lang_id,
int multilg_type);
/**
@brief: ker_dec_embedding
for decoder, look up token embedding, add position embedding
@thread
gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS;
blockDim.x = MAX_THREADS
@param
token_emb: [hidden_dim, vocab_size], note, it is different with encoder
pos_emb: [max_step, hidden_dim]
tokens: input token id, [batch_size, beam_size, max_step]
lang_emb: language embedding, [num_lang, hidden_dim]
lang_id: language index, [batch_size]
output: result, [batch_size, beam_size, hidden_dim]
step: current decoder step
max_step: max decoder steps
multilg_type: 0 for no multilg, 1 for token level multilg,
2 for sentence level multilg
*/
template <typename T>
__global__ void ker_dec_emb(const T *token_emb, const T *pos_emb, int *tokens,
const T *lang_emb, const int *lang_id, T *output,
int batch_size, int beam_size, int hidden_dim,
int vocab_size, int step, int max_step,
int multilg_type) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * beam_size * hidden_dim) {
return;
}
int batch_idx, beam_idx, dim_idx;
decompose_3dim(idx, beam_size, hidden_dim, &batch_idx, &beam_idx, &dim_idx);
T emb;
if (multilg_type == 2 && step == 0) {
// the bos of sentense level multilg is target lang id
int lid = lang_id[batch_idx];
emb = lang_emb[flat_2dim(lid, dim_idx, hidden_dim)];
tokens[flat_3dim(batch_idx, beam_idx, 0, beam_size, max_step)] = lid;
} else {
int token =
tokens[flat_3dim(batch_idx, beam_idx, step, beam_size, max_step)];
emb = token_emb[flat_2dim(dim_idx, token, vocab_size)];
}
float value =
float(emb) + float(pos_emb[flat_2dim(step, dim_idx, hidden_dim)]);
if (multilg_type == 1) {
// token level multilg, add lang_emb
value +=
float(lang_emb[flat_2dim(lang_id[batch_idx], dim_idx, hidden_dim)]);
}
output[idx] = T(value);
}
template <typename T>
void launch_dec_emb(const T *token_emb, const T *pos_emb, int *tokens,
const T *lang_emb, const int *lang_id, T *output,
int batch_size, int beam_size, int hidden_dim,
int vocab_size, int step, int max_step, int multilg_type,
cudaStream_t stream) {
if (step >= max_step) {
throw std::runtime_error("violate step < max_step");
}
int nele = batch_size * beam_size * hidden_dim;
int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS;
ker_dec_emb<T><<<nblock, MAX_THREADS, 0, stream>>>(
token_emb, pos_emb, tokens, lang_emb, lang_id, output, batch_size,
beam_size, hidden_dim, vocab_size, step, max_step, multilg_type);
}
template void launch_dec_emb<float>(const float *token_emb,
const float *pos_emb, int *tokens,
const float *lang_emb, const int *lang_id,
float *output, int batch_size,
int beam_size, int hidden_dim,
int vocab_size, int step, int max_step,
int multilg_type, cudaStream_t stream);
template void launch_dec_emb<__half>(const __half *token_emb,
const __half *pos_emb, int *tokens,
const __half *lang_emb, const int *lang_id,
__half *output, int batch_size,
int beam_size, int hidden_dim,
int vocab_size, int step, int max_step,
int multilg_type, cudaStream_t stream);
} // namespace cuda
} // namespace lightseq
|
the_stack
|
void process_error(int severity, string err); // this should probably live in a utils header file
bool fh_equal_to(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
bool fh_less(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
bool fh_greater(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
bool fh_greater_equal_to(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool fh_less_equal_to(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
char host_logical_and(char column1, char column2)
{
//cout << "AND " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else
if (column1 == 'N' || column2 == 'N') {
return 'N';
}
else
return 'R';
}
char host_logical_or(char column1, char column2)
{
//cout << "OR " << column1 << " " << column2 << endl;
if (column1 == 'A' && column2 == 'A')
return 'A';
else
if (column1 == 'N' && column2 == 'N')
return 'N';
else
return 'R';
}
char host_compare(int_type s, int_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && d>s ) // >
res = 'A';
else
if (op_type == 1 && d<s) // <
res = 'A';
else
if (op_type == 6 && d>=s) // >=
res = 'A';
else
if (op_type == 5 && d<=s) // <=
res = 'A';
else
if (op_type == 4 && d==s)// =
res = 'A';
else // !=
if(d!=s)
res = 'A';
return res;
}
char host_compare(float_type s, float_type d, int_type op_type)
{
char res = 'N';
if (op_type == 2 && (d-s) > EPSILON) // >
res = 'A';
else
if (op_type == 1 && (s-d) > EPSILON) // <
res = 'A';
else
if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >=
res = 'A';
else
if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <=
res = 'A';
else
if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// =
res = 'A';
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 'A';
return res;
}
char host_compare(int_type* column1, int_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " " << d << " " << op_type << endl;
if (op_type == 2) { // >
if (column1[1] <= d)
res = 'N';
else
if (column1[0] > d)
res = 'A';
}
else
if (op_type == 1) { // <
if (column1[0] >= d)
res = 'N';
else
if (column1[1] < d)
res = 'A';
}
else
if (op_type == 6) { // >=
if (column1[1] < d)
res = 'N';
else
if (column1[0] >= d)
res = 'A';
}
else
if (op_type == 5) { // <=
if (column1[0] > d)
res = 'N';
else
if (column1[1] <= d)
res = 'A';
}
else
if (op_type == 4 && column1[0] == d && column1[1] == d) { // =
res = 'A';
};
//cout << "res " << res << endl;
return res;
}
char host_compare(float_type* column1, float_type d, int_type op_type)
{
char res = 'R';
//cout << "CMP " << column1[0] << " " << column1[1] << " with " << d << endl;
if (op_type == 2) { // >
if(fh_less_equal_to(column1[1],d)) {
res = 'N';
}
else
if(fh_greater(column1[0],d)) {
res = 'A';
};
}
else
if (op_type == 1) { // <
if(fh_less(column1[1],d)) {
res = 'A';
}
else
if(fh_greater_equal_to(column1[0],d)) {
res = 'N';
};
}
else
if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[0],d)) {
res = 'A';
}
else
if(fh_less(column1[1],d)) {
res = 'N';
};
}
else
if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],d)) {
res = 'A';
}
else
if(fh_greater(column1[0],d)) {
res = 'N';
};
}
else
if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // =
res = 'A';
//cout << "res " << res << endl;
return res;
}
char host_compare(int_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(column1[0] > column2[1])
res = 'A';
else
if(column1[1] <= column2[0])
res = 'N';
}
else
if (op_type == 1) { // <
if(column1[1] < column2[0])
res = 'A';
else
if(column1[0] >= column2[1])
res = 'N';
}
else
if (op_type == 6) { // >=
if(column1[0] >= column2[1])
res = 'A';
else
if(column1[1] < column2[0])
res = 'N';
}
else
if (op_type == 5) { // <=
if(column1[1] <= column2[0])
res = 'A';
else
if(column1[0] > column2[1])
res = 'N';
}
else
if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, float_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],column2[1]))
res = 'A';
else
if(fh_less_equal_to(column1[1],column2[0]))
res = 'N';
}
else
if (op_type == 1) { // <
if(fh_less(column1[1],column2[0]))
res = 'A';
else
if(fh_greater_equal_to(column1[0],column2[1]))
res = 'N';
}
else
if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],column2[0]))
res = 'A';
else
if(fh_less(column1[1],column2[0]))
res = 'N';
}
else
if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],column2[0]))
res = 'A';
else
if(fh_greater(column1[0],column2[1]))
res = 'N';
}
else
if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 'A';
return res;
}
char host_compare(float_type* column1, int_type* column2, int_type op_type)
{
char res = 'R';
if (op_type == 2) { // >
if(fh_greater(column1[0],(float_type)column2[1]))
res = 'A';
else
if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'N';
}
else
if (op_type == 1) { // <
if(fh_less(column1[1],(float_type)column2[0]))
res = 'A';
else
if(fh_greater_equal_to(column1[0],(float_type)column2[1]))
res = 'N';
}
else
if (op_type == 6) { // >=
if(fh_greater_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else
if(fh_less(column1[1],(float_type)column2[0]))
res = 'N';
}
else
if (op_type == 5) { // <=
if(fh_less_equal_to(column1[1],(float_type)column2[0]))
res = 'A';
else
if(fh_greater(column1[0],(float_type)column2[1]))
res = 'N';
}
else
if (op_type == 4 && fh_equal_to(column1[0],(float_type) column2[1]) && fh_equal_to(column1[1],(float_type)column2[0])) // =
res = 'A';
return res;
}
float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - temp[0];
temp[1] = column2[1] - temp[1];
}
else {
temp[0] = column2[0] / temp[0];
temp[1] = column2[1] / temp[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = temp[0] - column2[0];
temp[1] = temp[1] - column2[1];
}
else {
temp[0] = temp[0] / column2[0];
temp[1] = temp[1] / column2[1];
}
};
return temp;
}
int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
int_type* host_op(int_type* column1, int_type d, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
float_type* host_op(int_type* column1, float_type d, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = (float_type)column1[0];
temp[1] = (float_type)column1[1];
float_type* temp1 = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp1[0] = temp[0] - d;
temp1[1] = temp[1] - d;
}
else {
temp1[0] = temp[0] / d;
temp1[1] = temp[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp1[0] = d - temp[0];
temp1[1] = d - temp[1];
}
else {
temp1[0] = d / temp[0];
temp1[1] = d / temp[1];
}
};
free(temp);
return temp1;
}
float_type* host_op(float_type* column1, float_type d, string op_type,int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else
if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else
if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
unsigned int precision_func(unsigned int& p1, unsigned int& p2, string op) {
if (op.compare("DIV") != 0 ) {
unsigned int res;
if (op.compare("MUL") != 0 ) {
if(p1 > p2) {
res = p1;
p2 = p1-p2;
p1 = 0;
}
else {
res = p1;
p1 = p2-p1;
p2 = 0;
};
return res;
}
else {
//std::swap(p1,p2);
res = p1+p2;
p1 = 0;
p2 = 0;
return res;
};
}
else {
if(p1 == p2) {
p1 = p1+4;
p2 = 0;
return p1;
}
else {
if(p1 > p2) {
p1 = p1 + (p1-p2) + 4;
p2 = 0;
return p1;
}
else {
p2 = p2 + (p2-p1) + 4;
p1 = 0;
return p2;
}
}
};
}
//CudaSet a contains two records - with all minimum and maximum values of the segment
//We need to determine if this segment needs to be processed
//The check takes place in host's memory
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a, unsigned int segment)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<float_type*> exe_vectors_f;
stack<int_type> exe_nums;
stack<char> bool_vectors;
stack<unsigned int> exe_precision;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
if(a->not_compressed)
return 'R';
//first we need to set all host arrays [0] and [1] of t to min and max values of appropriate files
set<string> uniques;
queue<string> fields(op_value);
CudaSet *t;
FILE* f;
unsigned int cnt;
string f1;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
// copy t min and max values to a only if int, decimal or float
if(t->type[fields.front()] <= 1) {
f1 = t->load_file_name + "." + fields.front() + "." + to_string(segment);
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
fread((char *)&cnt, 4, 1, f);
if (t->type[fields.front()] == 0) {
a->h_columns_int[fields.front()].resize(2);
fread((char *)&a->h_columns_int[fields.front()][0], 8, 1, f);
fread((char *)&a->h_columns_int[fields.front()][1], 8, 1, f);
fseek(f, 8+cnt, SEEK_CUR);
fread((char *)&a->mRecCount, 4, 1, f);
//cout << endl << "ZONE " << a->mRecCount << endl;
fread((char *)&cnt, 4, 1, f);
//cout << "file " << f1 << " " << segment << " " << a->h_columns_int[fields.front()][0] << ":" << a->h_columns_int[fields.front()][1] << endl;
}
else {
long long int t;
a->h_columns_float[fields.front()].resize(2);
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][0] = (float_type)t/100.0;
fread((char *)&t, 8, 1, f);
a->h_columns_float[fields.front()][1] = (float_type)t/100.0;
//cout << "file " << f1 << " " << segment << " " << a->h_columns_float[a->type_index[colIndex]][0] << ":" << a->h_columns_float[a->type_index[colIndex]][1] << endl;
};
fclose(f);
};
};
uniques.insert(fields.front());
fields.pop();
};
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
//cout << ss << endl;
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0
|| ss.compare("STRING") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
else
if (ss.compare("NAME") == 0) {
if(var_exists(a, op_value.front())) {
exe_value.push(op_value.front());
op_value.pop();
}
else {
process_error(1, "Couldn't find column " + op_value.front());
//cout << "Couldn't find column " << op_value.front() << endl;
//exit(0);
};
}
else
if (ss.compare("STRING") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else
if (ss.compare("MUL") == 0 )
res = n1*n2;
else
if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
exe_type.push("NUMBER");
exe_nums.push(res);
}
else
if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
int_type val;
int_type* t = get_host_vec(a, s1_val, exe_vectors);
exe_type.push("NAME");
exe_value.push("");
exe_precision.push(0);
}
else
if (s2.compare("NAME") == 0 && s1.compare("STRING") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
int_type val;
int_type* t = get_host_vec(a, s2_val, exe_vectors);
//cout << "name " << s2_val << endl;
exe_type.push("NAME");
exe_value.push("");
exe_precision.push(0);
}
else
if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = get_decimals(a, s1_val, exe_precision);
int_type* t = get_host_vec(a, s1_val, exe_vectors);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("NAME");
exe_value.push("");
exe_vectors.push(host_op(t,n1,ss,1));
}
else
if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = get_decimals(a, s2_val, exe_precision);
int_type* t = get_host_vec(a, s2_val, exe_vectors);
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,p2);
};
exe_type.push("NAME");
exe_value.push("");
exe_vectors.push(host_op(t,n1,ss,0));
}
else
if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p1 = get_decimals(a, s1_val, exe_precision);
auto p2 = get_decimals(a, s2_val, exe_precision);
int_type* t = get_host_vec(a, s1_val, exe_vectors);
int_type* s3 = get_host_vec(a, s2_val, exe_vectors);;
exe_type.push("NAME");
exe_value.push("");
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,p1);
t[1] = t[1]*(unsigned int)pow(10,p1);
};
if(p2) {
s3[0] = s3[0]*(unsigned int)pow(10,p2);
s3[1] = s3[1]*(unsigned int)pow(10,p2);
};
exe_vectors.push(host_op(t,s3,ss,1));
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) == a->columnNames.end())
delete [] t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s2_val) == a->columnNames.end())
delete [] s3;
}
}
else
if (ss.compare("CMP") == 0) {
int_type cmp_type = op_nums.front();
op_nums.pop();
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = std::max(p1, p2);
exe_precision.push(pres);
exe_type.push("NAME");
exe_value.push("");
if(p1)
n1 = n1*(unsigned int)pow(10,pres-p1);
if(p2)
n2 = n2*(unsigned int)pow(10,pres-p2);
bool_vectors.push(host_compare(n1,n2,cmp_type));
}
else
if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) {
time_t tt;
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
int_type val;
int_type* t = get_host_vec(a, s2_val, exe_vectors);
auto pos = s1_val.find("date()");
bool_vectors.push('R');
exe_type.push("NAME");
exe_value.push("");
}
else
if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
s2_val = exe_value.top();
exe_value.pop();
s1_val = exe_value.top();
exe_value.pop();
int_type val;
time_t tt;
int_type* t = get_host_vec(a, s1_val, exe_vectors);
bool_vectors.push('R');
exe_type.push("NAME");
exe_value.push("");
}
else
if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s1_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = get_decimals(a, s1_val, exe_precision);
int_type* t = get_host_vec(a, s1_val, exe_vectors);
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("NAME");
exe_value.push("");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else
if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto p1 = get_decimals(a, s2_val, exe_precision);
int_type* t = get_host_vec(a, s2_val, exe_vectors);
auto pres = std::max(p1, p2);
exe_precision.push(pres);
if(p1) {
t[0] = t[0]*(unsigned int)pow(10,pres-p1);
t[1] = t[1]*(unsigned int)pow(10,pres-p1);
};
if(p2) {
n1 = n1*(unsigned int)pow(10,pres-p2);
};
exe_type.push("NAME");
exe_value.push("");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
return 'R';
}
}
else
if (ss.compare("AND") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("NAME");
bool_vectors.push(host_logical_and(s2,s3));
}
else
if (ss.compare("OR") == 0) {
char s3 = bool_vectors.top();
bool_vectors.pop();
char s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("NAME");
bool_vectors.push(host_logical_or(s2,s3));
}
else {
if(ss.compare("JOIN") == 0)
process_error(2, "operation = is not valid");
//cout << "operation = is not valid" << endl;
else
process_error(2, "operation " + string(ss)+ " is not valid");
//cout << "operation " << ss << " is not valid" << endl;
exit(0); // never gets here
}
};
};
return bool_vectors.top();
}
|
the_stack
|
#include <system/op_boilerplate.h>
#include <atomic>
#include <stdio.h>
#include <stdlib.h>
#include "../Workspace.h"
#include <helpers/logger.h>
#include <math/templatemath.h>
#include <cstring>
#include <exceptions/cuda_exception.h>
#include <cuda.h>
#include <cuda_runtime.h>
namespace sd {
namespace memory {
Workspace::Workspace(ExternalWorkspace *external) {
if (external->sizeHost() > 0) {
_ptrHost = (char *) external->pointerHost();
_ptrDevice = (char *) external->pointerDevice();
_initialSize = external->sizeDevice();
_currentSize = external->sizeDevice();
_initialSizeSecondary = external->sizeHost();
_currentSizeSecondary = external->sizeHost();
_offset = 0L;
_offsetSecondary = 0L;
this->_cycleAllocations = 0;
this->_cycleAllocationsSecondary = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
_externalized = true;
}
}
Workspace::Workspace(Nd4jLong primarySize, Nd4jLong secondarySize) {
if (secondarySize > 0) {
auto res = cudaHostAlloc(reinterpret_cast<void **>(&_ptrHost), secondarySize, cudaHostAllocDefault);
if (res != 0)
throw cuda_exception::build("Can't allocate [HOST] memory", res);
cudaMemset(this->_ptrHost, 0, secondarySize);
this->_allocatedHost = true;
} else
this->_allocatedHost = false;
if (primarySize > 0) {
auto res = cudaMalloc(reinterpret_cast<void **>(&_ptrDevice), primarySize);
if (res != 0)
throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
cudaMemset(this->_ptrDevice, 0, primarySize);
this->_allocatedDevice = true;
} else
this->_allocatedDevice = false;
this->_initialSize = primarySize;
this->_initialSizeSecondary = secondarySize;
this->_currentSize = primarySize;
this->_currentSizeSecondary = secondarySize;
this->_offset = 0;
this->_offsetSecondary = 0;
this->_cycleAllocations = 0;
this->_spillsSize = 0;
this->_spillsSizeSecondary = 0;
}
void Workspace::init(Nd4jLong primaryBytes, Nd4jLong secondaryBytes) {
if (this->_currentSize < primaryBytes) {
if (this->_allocatedDevice && !_externalized)
cudaFree((void *)this->_ptrDevice);
auto res = cudaMalloc(reinterpret_cast<void **>(&_ptrDevice), secondaryBytes);
if (res != 0)
throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
cudaMemset(this->_ptrDevice, 0, primaryBytes);
this->_currentSize = primaryBytes;
this->_allocatedDevice = true;
}
if (this->_currentSizeSecondary < secondaryBytes) {
if (this->_allocatedHost && !_externalized)
cudaFreeHost((void *)this->_ptrHost);
auto res = cudaHostAlloc(reinterpret_cast<void **>(&_ptrHost), secondaryBytes, cudaHostAllocDefault);
if (res != 0)
throw cuda_exception::build("Can't allocate [HOST] memory", res);
cudaMemset(this->_ptrHost, 0, secondaryBytes);
this->_currentSizeSecondary = secondaryBytes;
this->_allocatedHost = true;
}
}
void Workspace::expandBy(Nd4jLong numBytes, Nd4jLong secondaryBytes) {
this->init(_currentSize + numBytes, _currentSizeSecondary + secondaryBytes);
}
void Workspace::expandTo(Nd4jLong numBytes, Nd4jLong secondaryBytes) {
this->init(numBytes, secondaryBytes);
}
void Workspace::freeSpills() {
_spillsSize = 0;
_spillsSizeSecondary = 0;
for (auto v:_spills)
cudaFree(v);
for (auto v:_spillsSecondary)
cudaFreeHost(v);
_spills.clear();
_spillsSecondary.clear();
}
Workspace::~Workspace() {
if (this->_allocatedHost && !_externalized)
cudaFreeHost((void *)this->_ptrHost);
if (this->_allocatedDevice && !_externalized)
cudaFree((void *)this->_ptrDevice);
freeSpills();
}
Nd4jLong Workspace::getUsedSize() {
return getCurrentOffset();
}
Nd4jLong Workspace::getCurrentSize() {
return _currentSize;
}
Nd4jLong Workspace::getCurrentOffset() {
return _offset.load();
}
void* Workspace::allocateBytes(Nd4jLong numBytes) {
return allocateBytes(sd::memory::MemoryType::HOST, numBytes);
}
Nd4jLong Workspace::getAllocatedSize() {
return getCurrentSize() + getSpilledSize();
}
void Workspace::scopeIn() {
freeSpills();
init(_cycleAllocations.load());
_cycleAllocations = 0;
}
void Workspace::scopeOut() {
_offset = 0;
}
Nd4jLong Workspace::getSpilledSize() {
return _spillsSize.load();
}
void* Workspace::allocateBytes(sd::memory::MemoryType type, Nd4jLong numBytes) {
switch (type) {
case HOST: {
if (numBytes < 1)
throw allocation_exception::build("Number of [HOST] bytes for allocation should be positive", numBytes);
//numBytes += 32;
void* result = nullptr;
this->_cycleAllocationsSecondary += numBytes;
this->_mutexAllocation.lock();
if (_offsetSecondary.load() + numBytes > _currentSizeSecondary) {
nd4j_debug("Allocating %lld [HOST] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
Nd4jPointer p;
auto res = cudaHostAlloc(reinterpret_cast<void **>(&p), numBytes, cudaHostAllocDefault);
if (res != 0)
throw cuda_exception::build("Can't allocate [HOST] memory", res);
_mutexSpills.lock();
_spillsSecondary.push_back(p);
_mutexSpills.unlock();
_spillsSizeSecondary += numBytes;
return p;
}
result = (void *)(_ptrHost + _offsetSecondary.load());
_offsetSecondary += numBytes;
//memset(result, 0, (int) numBytes);
nd4j_debug("Allocating %lld bytes from [HOST] workspace; Current PTR: %p; Current offset: %lld\n", numBytes, result, _offset.load());
this->_mutexAllocation.unlock();
return result;
}
break;
case DEVICE: {
if (numBytes < 1)
throw allocation_exception::build("Number of [DEVICE] bytes for allocation should be positive", numBytes);
//numBytes += 32;
void* result = nullptr;
this->_cycleAllocations += numBytes;
this->_mutexAllocation.lock();
if (_offset.load() + numBytes > _currentSize) {
nd4j_debug("Allocating %lld [DEVICE] bytes in spills\n", numBytes);
this->_mutexAllocation.unlock();
Nd4jPointer p;
auto res = cudaMalloc(reinterpret_cast<void **>(&p), numBytes);
if (res != 0)
throw cuda_exception::build("Can't allocate [DEVICE] memory", res);
_mutexSpills.lock();
_spills.push_back(p);
_mutexSpills.unlock();
_spillsSize += numBytes;
return p;
}
result = (void *)(_ptrDevice + _offset.load());
_offset += numBytes;
//memset(result, 0, (int) numBytes);
nd4j_debug("Allocating %lld bytes from [DEVICE] workspace; Current PTR: %p; Current offset: %lld\n", numBytes, result, _offset.load());
this->_mutexAllocation.unlock();
return result;
}
break;
default:
throw std::runtime_error("Unknown MemoryType was passed in");
}
}
Workspace* Workspace::clone() {
// for clone we take whatever is higher: current allocated size, or allocated size of current loop
return new Workspace(sd::math::nd4j_max<Nd4jLong >(this->getCurrentSize(), this->_cycleAllocations.load()));
}
Nd4jLong Workspace::getAllocatedSecondarySize() {
return getCurrentSecondarySize() + getSpilledSecondarySize();
}
Nd4jLong Workspace::getCurrentSecondarySize() {
return _currentSizeSecondary;
}
Nd4jLong Workspace::getCurrentSecondaryOffset() {
return _offsetSecondary.load();
}
Nd4jLong Workspace::getSpilledSecondarySize() {
return _spillsSizeSecondary;
}
Nd4jLong Workspace::getUsedSecondarySize() {
return getCurrentSecondaryOffset();
}
}
}
|
the_stack
|
#include "dynet/dynet.h"
#include "dynet/virtual-cudnn.h"
#include "tensor-eigen.h"
#include "dynet/cudnn-ops.h"
using namespace cudnn;
namespace dynet {
CudnnConvOp::CudnnConvOp(const std::vector<unsigned>& s, const bool padding_type) {
stride_.resize(s.size());
for (unsigned i = 0; i < stride_.size(); ++i) {
stride_[i] = static_cast<int>(s[i]);
}
is_valid_ = padding_type;
fwd_workspace = NULL;
bwd_filter_workspace = NULL;
bwd_data_workspace = NULL;
workspace_fwd_size_ = 0;
workspace_bwd_data_size_ = 0;
workspace_bwd_filter_size_ = 0;
createTensorDescriptor(&x_desc_);
createTensorDescriptor(&y_desc_);
createTensorDescriptor(&bias_desc_);
createFilterDescriptor(&filter_desc_);
createConvolutionDescriptor(&conv_desc_);
}
CudnnConvOp::~CudnnConvOp() noexcept(false) {
destroyTensorDescriptor(&x_desc_);
destroyTensorDescriptor(&y_desc_);
destroyTensorDescriptor(&bias_desc_);
destroyFilterDescriptor(&filter_desc_);
destroyConvolutionDescriptor(&conv_desc_);
}
void CudnnConvOp::forward_impl(const Device_GPU& dev, const std::vector<const Tensor*>& xs, Tensor& fx) {
AlignedMemoryPool* scratch_allocator = dev.pools[(int)DeviceMempool::SCS];
const Tensor* x = xs[0];
const Tensor* filter = xs[1];
Tensor* y = &fx;
unsigned XN = x->d.bd;
unsigned XC = x->d[2];
unsigned XH = x->d[0];
unsigned XW = x->d[1];
unsigned FYC = filter->d[3];
unsigned FXC = filter->d[2];
unsigned FH = filter->d[0];
unsigned FW = filter->d[1];
unsigned YN = fx.d.bd;
unsigned YC = fx.d[2];
unsigned YH = fx.d[0];
unsigned YW = fx.d[1];
int pad_h = 0, pad_w = 0;
bool h_odd = false, w_odd = false;
// infer pad_h, pad_w
// Total padding on rows and cols is
// pad_h = (YH - 1) * stride[0] + FH - XH
// pad_w = (YW - 1) * stride[1] + FW - XW
// We pad pad_h/2 on the left and pad_h - pad_h/2 on the right, pad_w/2 on the top
// and pad_w - pad_w/2 on the bottom. When pad_h or pad_w is odd, this means
// we pad more on the right and bottom than on the top and left.
if (!is_valid_) {
pad_h = std::max<int>(0, (YH - 1) * stride_[0] + FH - XH);
pad_w = std::max<int>(0, (YW - 1) * stride_[1] + FW - XW);
h_odd = (pad_h % 2 != 0);
w_odd = (pad_w % 2 != 0);
if (h_odd || w_odd) { // then we need to pad one row/col on the bottom/right
unsigned new_XH = XH + h_odd;
unsigned new_XW = XW + w_odd;
void* temp = scratch_allocator->allocate(sizeof(float) * new_XW * new_XH * XC * XN);
Tensor padded_x = Tensor(Dim({new_XH, new_XW, XC}, XN), static_cast<float*>(temp), xs[0]->device, DeviceMempool::FXS);
Eigen::array<std::pair<int, int>, 4> paddings;
paddings[0] = std::make_pair(0, static_cast<int>(h_odd));
paddings[1] = std::make_pair(0, static_cast<int>(w_odd));
paddings[2] = std::make_pair(0, 0);
paddings[3] = std::make_pair(0, 0);
tb<3>(padded_x).device(*dev.edevice) = tb<3>(*xs[0]).pad(paddings);
// re-point x to the padded input
XH = new_XH;
XW = new_XW;
x = &padded_x;
}
}
//set cudnn descriptors
setTensor4dDescriptor(&x_desc_, XN, XC, XW, XH);
setTensor4dDescriptor(&y_desc_, YN, YC, YW, YH);
setFilter4dDescriptor(&filter_desc_, FYC, FXC, FW, FH);
setConvolution2dDescriptor(&conv_desc_, pad_w/2, pad_h/2, stride_[1], stride_[0]);
if (xs.size() == 3) {
setTensor4dDescriptor(&bias_desc_, 1, FYC, 1, 1);
}
// TODO(Hao Zhang): there should be an autotune function to determine
// the best convolution algorithm to use.
// However, as DyNet changes CG for every sample (or every iteration),
// This autotune function seems to be unnecessary.
// Note: this following computations are *NON-DETERMINISTIC*
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(dev.cudnnHandle,
x_desc_, filter_desc_, conv_desc_, y_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, workspace_size_limit_bytes,
&fwd_algo_));
CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(dev.cudnnHandle,
x_desc_, filter_desc_, conv_desc_, y_desc_,
fwd_algo_, &workspace_fwd_size_));
fwd_workspace = scratch_allocator->allocate(workspace_fwd_size_);
float alpha = 1.f, beta = 0.f;
CUDNN_CHECK(cudnnConvolutionForward(dev.cudnnHandle,
&alpha, x_desc_, x->v, filter_desc_, filter->v,
conv_desc_, fwd_algo_, fwd_workspace, workspace_fwd_size_,
&beta, y_desc_, y->v));
if (xs.size() == 3) {
CUDNN_CHECK(cudnnAddTensor(dev.cudnnHandle, &alpha,
bias_desc_, xs[2]->v, &alpha, y_desc_, y->v));
}
}
// We don't suppose backward_impl depends on the exeuction of forward_impl
// i.e. backward_impl can be called independently
void CudnnConvOp::backward_impl(const Device_GPU & dev,
const std::vector<const Tensor*>& xs,
const Tensor& fx,
const Tensor& dEdf,
unsigned i,
Tensor& dEdxi) {
AlignedMemoryPool* scratch_allocator = dev.pools[(int)DeviceMempool::SCS];
const Tensor* x = xs[0];
const Tensor* filter = xs[1];
const Tensor* dy = &dEdf;
void* dxi = NULL;
unsigned XN = x->d.bd;
unsigned XC = x->d[2];
unsigned XH = x->d[0];
unsigned XW = x->d[1];
unsigned FYC = filter->d[3];
unsigned FXC = filter->d[2];
unsigned FH = filter->d[0];
unsigned FW = filter->d[1];
unsigned YN = fx.d.bd;
unsigned YC = fx.d[2];
unsigned YH = fx.d[0];
unsigned YW = fx.d[1];
// create padded input if necessary
int pad_h = 0, pad_w = 0;
bool h_odd = false, w_odd = false;
if (!is_valid_) {
pad_h = std::max<int>(0, (YH - 1) * stride_[0] + FH - XH);
pad_w = std::max<int>(0, (YW - 1) * stride_[1] + FW - XW);
h_odd = (pad_h % 2 != 0);
w_odd = (pad_w % 2 != 0);
if (h_odd || w_odd) {
unsigned new_XH = XH + h_odd;
unsigned new_XW = XW + w_odd;
void* temp = scratch_allocator->allocate(sizeof(float) * new_XW * new_XH * XC * XN);
Tensor padded_x = Tensor(Dim({new_XH, new_XW, XC}, XN), static_cast<float*>(temp), xs[0]->device, DeviceMempool::FXS);
Eigen::array<std::pair<int, int>, 4> paddings;
paddings[0] = std::make_pair(0, static_cast<int>(h_odd));
paddings[1] = std::make_pair(0, static_cast<int>(w_odd));
paddings[2] = std::make_pair(0, 0);
paddings[3] = std::make_pair(0, 0);
tb<3>(padded_x).device(*dev.edevice) = tb<3>(*xs[0]).pad(paddings);
// re-point x to the padded input
XH = new_XH;
XW = new_XW;
x = &padded_x;
}
}
setTensor4dDescriptor(&x_desc_, XN, XC, XW, XH);
setTensor4dDescriptor(&y_desc_, YN, YC, YW, YH);
setFilter4dDescriptor(&filter_desc_, FYC, FXC, FW, FH);
setConvolution2dDescriptor(&conv_desc_, pad_w/2, pad_h/2, stride_[1], stride_[0]);
if (i == 2) {
setTensor4dDescriptor(&bias_desc_, 1, FYC, 1, 1);
}
float alpha = 1.f, beta = 0.f;
switch(i) {
case 0: { // grad w.r.t. feature maps
dxi = scratch_allocator->allocate(sizeof(float) * XH * XW * XC * XN);
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(dev.cudnnHandle,
filter_desc_, y_desc_, conv_desc_, x_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
workspace_size_limit_bytes, &bwd_d_algo_));
CUDNN_CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(dev.cudnnHandle,
filter_desc_, y_desc_, conv_desc_, x_desc_,
bwd_d_algo_, &workspace_bwd_data_size_));
bwd_data_workspace = scratch_allocator->allocate(workspace_bwd_data_size_);
CUDNN_CHECK(cudnnConvolutionBackwardData(dev.cudnnHandle,
&alpha, filter_desc_, filter->v, y_desc_, dy->v,
conv_desc_, bwd_d_algo_, bwd_data_workspace, workspace_bwd_data_size_,
&beta, x_desc_, dxi));
Tensor padded_dx = Tensor(Dim({XH, XW, XC}, XN), static_cast<float*>(dxi), xs[0]->device, DeviceMempool::FXS);
Eigen::array<int, 4> offsets = {0, 0, 0, 0};
Eigen::array<int, 4> extents = {static_cast<int>(XH - h_odd), static_cast<int>(XW - w_odd), static_cast<int>(XC), static_cast<int>(XN)};
tb<3>(dEdxi).device(*dev.edevice) += tb<3>(padded_dx).slice(offsets, extents);
} break;
case 1: {// grad w.r.t. filters
dxi = scratch_allocator->allocate(sizeof(float) * FYC * FXC * FW * FH);
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(dev.cudnnHandle,
x_desc_, y_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
workspace_size_limit_bytes, &bwd_f_algo_));
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(dev.cudnnHandle,
x_desc_, y_desc_, conv_desc_, filter_desc_,
bwd_f_algo_, &workspace_bwd_filter_size_));
bwd_filter_workspace = scratch_allocator->allocate(workspace_bwd_filter_size_);
CUDNN_CHECK(cudnnConvolutionBackwardFilter(dev.cudnnHandle,
&alpha, x_desc_, x->v, y_desc_, dy->v,
conv_desc_, bwd_f_algo_, bwd_filter_workspace, workspace_bwd_filter_size_,
&beta, filter_desc_, dxi));
//accumlate the gradient
Tensor dxi_tensor = Tensor(Dim({FH, FW, FXC}, FYC), static_cast<float*>(dxi), xs[1]->device, DeviceMempool::FXS);
t<4>(dEdxi).device(*dev.edevice) += t<4>(dxi_tensor);
} break;
case 2: {// grad w.r.t. bias
dxi = scratch_allocator->allocate(sizeof(float) * FYC);
CUDNN_CHECK(cudnnConvolutionBackwardBias(dev.cudnnHandle,
&alpha, y_desc_, dy->v,
&beta, bias_desc_, dxi));
CUDNN_CHECK(cudnnAddTensor(dev.cudnnHandle, &alpha,
bias_desc_, dxi, &alpha, bias_desc_, dEdxi.v));
} break;
default:
throw std::runtime_error("dynet::CudnnConvOp::backward_impl, conv2d have at most 3 inputs");
}
}
CudnnMaxPooling2DOp::CudnnMaxPooling2DOp(const std::vector<unsigned>& ksize,
const std::vector<unsigned>& stride,
const bool padding_type) {
ksize_.resize(ksize.size());
stride_.resize(stride.size());
for (unsigned i = 0; i < ksize.size(); ++i) {
ksize_[i] = static_cast<int>(ksize[i]);
stride_[i] = static_cast<int>(stride[i]);
}
is_valid_ = padding_type;
createTensorDescriptor(&x_desc_);
createTensorDescriptor(&y_desc_);
createPoolingDescriptor(&pooling_desc_);
}
CudnnMaxPooling2DOp::~CudnnMaxPooling2DOp() noexcept(false) {
destroyTensorDescriptor(&x_desc_);
destroyTensorDescriptor(&y_desc_);
destroyPoolingDescriptor(&pooling_desc_);
}
void CudnnMaxPooling2DOp::forward_impl(const Device_GPU & dev,
const std::vector<const Tensor*>& xs,
Tensor& fx) {
const Tensor* x = xs[0];
Tensor* y = &fx;
AlignedMemoryPool* scratch_allocator = dev.pools[(int)DeviceMempool::SCS];
unsigned XN = x->d.bd;
unsigned XC = x->d[2];
unsigned XH = x->d[0];
unsigned XW = x->d[1];
unsigned YN = fx.d.bd;
unsigned YC = fx.d[2];
unsigned YH = fx.d[0];
unsigned YW = fx.d[1];
// infer pad_h, pad_w
int pad_h = 0, pad_w = 0;
if (!is_valid_) {
pad_h = std::max<int>(0, (YH - 1) * stride_[0] + ksize_[0] - XH) / 2;
pad_w = std::max<int>(0, (YW - 1) * stride_[1] + ksize_[1] - XW) / 2;
}
setTensor4dDescriptor(&x_desc_, XN, XC, XW, XH);
setTensor4dDescriptor(&y_desc_, YN, YC, YW, YH);
setPooling2dDescriptor(&pooling_desc_, CUDNN_POOLING_MAX, ksize_[1], ksize_[0],
pad_w, pad_h, stride_[1], stride_[0]);
float alpha = 1.f, beta = 0.f;
CUDNN_CHECK(cudnnPoolingForward(dev.cudnnHandle, pooling_desc_,
&alpha, x_desc_, x->v,
&beta, y_desc_, y->v));
}
void CudnnMaxPooling2DOp::backward_impl(const Device_GPU & dev,
const std::vector<const Tensor*>& xs,
const Tensor& fx,
const Tensor& dEdf,
unsigned i,
Tensor& dEdxi) {
const Tensor* x = xs[0];
const Tensor* y = &fx;
const Tensor* dy = &dEdf;
void* dxi = NULL;
AlignedMemoryPool* scratch_allocator = dev.pools[(int)DeviceMempool::SCS];
unsigned XN = x->d.bd;
unsigned XC = x->d[2];
unsigned XH = x->d[0];
unsigned XW = x->d[1];
unsigned YN = fx.d.bd;
unsigned YC = fx.d[2];
unsigned YH = fx.d[0];
unsigned YW = fx.d[1];
// infer pad_h, pad_w
int pad_h = 0, pad_w = 0;
if (!is_valid_) {
pad_h = std::max<int>(0, (YH - 1) * stride_[0] + ksize_[0] - XH) / 2;
pad_w = std::max<int>(0, (YW - 1) * stride_[1] + ksize_[1] - XW) / 2;
}
setTensor4dDescriptor(&x_desc_, XN, XC, XW, XH);
setTensor4dDescriptor(&y_desc_, YN, YC, YW, YH);
setPooling2dDescriptor(&pooling_desc_, CUDNN_POOLING_MAX, ksize_[1], ksize_[0],
pad_w, pad_h, stride_[1], stride_[0]);
// here we could reuse the descriptor we created for forward, because
// they share the same size
float alpha = 1.f, beta = 0.f;
dxi = scratch_allocator->allocate(sizeof(float) * XN * XC * XH * XW);
CUDNN_CHECK(cudnnPoolingBackward(dev.cudnnHandle, pooling_desc_,
&alpha, y_desc_, y->v, y_desc_, dy->v,
x_desc_, x->v, &beta, x_desc_, dxi));
CUDNN_CHECK(cudnnAddTensor(dev.cudnnHandle, &alpha,
x_desc_, dxi, &alpha, x_desc_, dEdxi.v));
}
} // namespace dynet
#endif
|
the_stack
|
#include "caffe2/utils/GpuDefs.cuh"
#include "caffe2/utils/GpuScanUtils.cuh"
#include "caffe2/utils/math.h"
#include <cuda_runtime.h>
namespace caffe2 {
// From the cutorch library
template <typename T>
struct AddOp {
__device__ __forceinline__ T operator()(T &lhs, T &rhs) {
return lhs + rhs;
}
};
template <typename T>
struct TopKTypeConfig {};
template <>
struct TopKTypeConfig<float> {
typedef unsigned int RadixType;
// Converts a float to an integer representation with the same
// sorting; i.e., for floats f1, f2:
// if f1 < f2 then convert(f1) < convert(f2)
// We use this to enable radix selection of floating-point values.
// This also gives a relative order for NaNs, but that's ok, as they
// will all be adjacent
static inline __device__ RadixType convert(float v) {
RadixType x = __float_as_int(v);
RadixType mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
return (x ^ mask);
}
static inline __device__ float deconvert(RadixType v) {
RadixType mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff;
return __int_as_float(v ^ mask);
}
};
template <>
struct TopKTypeConfig<unsigned char> {
typedef unsigned int RadixType;
static inline __device__ RadixType convert(unsigned char v) {
return v;
}
static inline __device__ unsigned char deconvert(RadixType v) {
return v;
}
};
template <>
struct TopKTypeConfig<char> {
typedef unsigned int RadixType;
static inline __device__ RadixType convert(char v) {
return 128u + v;
}
static inline __device__ char deconvert(RadixType v) {
return v - 128;
}
};
template <>
struct TopKTypeConfig<short> {
typedef unsigned int RadixType;
static inline __device__ RadixType convert(short v) {
assert(sizeof(short) == 2);
return 32768u + v;
}
static inline __device__ short deconvert(RadixType v) {
return v - 32768;
}
};
template <>
struct TopKTypeConfig<int> {
typedef unsigned int RadixType;
static inline __device__ RadixType convert(int v) {
assert(sizeof(int) == 4);
return 2147483648u + v;
}
static inline __device__ int deconvert(RadixType v) {
return v - 2147483648u;
}
};
template <>
struct TopKTypeConfig<long> {
typedef unsigned long long int RadixType;
static inline __device__ RadixType convert(long v) {
assert(sizeof(long) == 8);
return 9223372036854775808ull + v;
}
static inline __device__ long deconvert(RadixType v) {
return v - 9223372036854775808ull;
}
};
template <>
struct TopKTypeConfig<double> {
typedef unsigned long long int RadixType;
static inline __device__ RadixType convert(double v) {
RadixType x = __double_as_longlong(v);
RadixType mask = -((x >> 63)) | 0x8000000000000000;
return (x ^ mask);
}
static inline __device__ double deconvert(RadixType v) {
RadixType mask = ((v >> 63) - 1) | 0x8000000000000000;
return __longlong_as_double(v ^ mask);
}
};
// This function counts the distribution of all input values in a
// slice we are selecting by radix digit at `radixDigitPos`, but only
// those that pass the filter `((v & desiredMask) == desired)`.
// This produces and broadcasts the seen counts for a single block only.
// `smem` must have at least `RadixSize` elements.
template <typename DataType,
typename BitDataType,
typename CountType,
int RadixSize,
int RadixBits>
__device__ void countRadixUsingMask(CountType counts[RadixSize],
CountType* smem,
BitDataType desired,
BitDataType desiredMask,
int radixDigitPos,
int sliceSize,
const DataType* data) {
// Clear out per-thread counts from a previous round
#pragma unroll
for (int i = 0; i < RadixSize; ++i) {
counts[i] = 0;
}
if (threadIdx.x < RadixSize) {
smem[threadIdx.x] = 0;
}
__syncthreads();
// Scan over all the data. Upon a read, the warp will accumulate
// counts per each digit in the radix using warp voting.
for (int i = threadIdx.x; i < sliceSize; i += blockDim.x) {
BitDataType val = TopKTypeConfig<DataType>::convert(data[i]);
bool hasVal = ((val & desiredMask) == desired);
BitDataType digitInRadix = Bitfield<BitDataType>::getBitfield(val, radixDigitPos, RadixBits);
#pragma unroll
for (unsigned int j = 0; j < RadixSize; ++j) {
bool vote = hasVal && (digitInRadix == j);
#if CUDA_VERSION >= 9000
counts[j] += __popc(__ballot_sync(__activemask(), vote));
#else
counts[j] += __popc(__ballot(vote));
#endif
}
}
// Now, for each warp, sum values
if (getLaneId() == 0) {
#pragma unroll
for (unsigned int i = 0; i < RadixSize; ++i) {
atomicAdd(&smem[i], counts[i]);
}
}
__syncthreads();
// For each thread, read in the total counts
#pragma unroll
for (unsigned int i = 0; i < RadixSize; ++i) {
counts[i] = smem[i];
}
__syncthreads();
}
// Over what radix we are selecting values
#define RADIX_BITS 2 // digits are base-(2 ^ RADIX_BITS)
#define RADIX_SIZE 4 // 2 ^ RADIX_BITS
#define RADIX_MASK (RADIX_SIZE - 1)
// This finds the unique value `v` that matches the pattern
// ((v & desired) == desiredMask) in our sorted int format
template <typename DataType, typename BitDataType>
__device__ DataType findPattern(DataType* smem,
const DataType* data,
int sliceSize,
BitDataType desired,
BitDataType desiredMask) {
if (threadIdx.x < 32) {
smem[threadIdx.x] = (DataType) 0;
}
__syncthreads();
// All threads participate in the loop, in order to sync on the flag
int numIterations = math::roundUp(sliceSize, (int) blockDim.x);
for (int i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < sliceSize);
DataType v = inRange ? data[i] : (DataType)0;
if (inRange && ((TopKTypeConfig<DataType>::convert(v) & desiredMask) == desired)) {
// There should not be conflicts if we are using findPattern,
// since the result is unique
smem[0] = (DataType)1;
smem[1] = v; // can't use val as the flag, since it could be 0
}
__syncthreads();
DataType found = smem[0];
DataType val = smem[1];
__syncthreads();
// Check to see if a thread found the value
if (found != (DataType)0) {
// all threads return this value
return val;
}
}
// should not get here
assert(false);
return (DataType)0;
}
// Returns the top-Kth element found in the data using radix selection
template <typename DataType, typename BitDataType, bool Order>
__device__ void radixSelect(const DataType* data,
int k,
int sliceSize,
int* smem,
DataType* topK) {
// Per-thread buckets into which we accumulate digit counts in our
// radix
int counts[RADIX_SIZE];
// We only consider elements x such that (x & desiredMask) == desired
// Initially, we consider all elements of the array, so the above
// statement is true regardless of input.
BitDataType desired = 0;
BitDataType desiredMask = 0;
// We are looking for the top kToFind-th element when iterating over
// digits; this count gets reduced by elimination when counting
// successive digits
int kToFind = k;
// We start at the most significant digit in our radix, scanning
// through to the least significant digit
#pragma unroll
for (int digitPos = sizeof(DataType) * 8 - RADIX_BITS;
digitPos >= 0;
digitPos -= RADIX_BITS) {
// Count radix distribution for the current position and reduce
// across all threads
countRadixUsingMask<DataType, BitDataType,
int,
RADIX_SIZE, RADIX_BITS>(
counts, smem,
desired, desiredMask, digitPos,
sliceSize, data);
// All threads participate in the comparisons below to know the
// final result
#define CHECK_RADIX(i) \
int count = counts[i]; \
\
/* All threads have the same value in counts here, so all */ \
/* threads will return from the function. */ \
if (count == 1 && kToFind == 1) { \
/* There is a unique answer. */ \
desired = Bitfield<BitDataType>::setBitfield(desired, i, digitPos, RADIX_BITS); \
desiredMask = \
Bitfield<BitDataType>::setBitfield(desiredMask, RADIX_MASK, digitPos, RADIX_BITS); \
\
/* The answer is now the unique element v such that: */ \
/* (v & desiredMask) == desired */ \
/* However, we do not yet know what the actual element is. We */ \
/* need to perform a search through the data to find the */ \
/* element that matches this pattern. */ \
*topK = findPattern<DataType, BitDataType>( \
(DataType*) smem, data, sliceSize, \
desired, desiredMask); \
return; \
} \
\
if (count >= kToFind) { \
desired = Bitfield<BitDataType>::setBitfield(desired, i, digitPos, RADIX_BITS); \
desiredMask = \
Bitfield<BitDataType>::setBitfield(desiredMask, RADIX_MASK, digitPos, RADIX_BITS); \
\
/* The top-Kth element v must now be one such that: */ \
/* (v & desiredMask == desired) */ \
/* but we haven't narrowed it down; we must check the next */ \
/* least-significant digit */ \
break; \
} \
\
kToFind -= count \
if (Order) {
// Process in descending order
#pragma unroll
for (int i = RADIX_SIZE - 1; i >= 0; --i) {
CHECK_RADIX(i);
}
} else {
// Process in ascending order
#pragma unroll
for (int i = 0; i < RADIX_SIZE; ++i) {
CHECK_RADIX(i);
}
}
#undef CHECK_RADIX
} // end digitPos for
// There is no unique result, but there is a non-unique result
// matching `desired` exactly
*topK = TopKTypeConfig<DataType>::deconvert(desired);
}
template <typename T, bool Order, typename IndicesType>
__global__ void gatherTopK(const T* inputPtr,
int inputSliceSize,
int outputSliceSize, // aka `k`
int numInputSlices,
T* topKPtr,
IndicesType* indicesPtr) {
__shared__ int smem[32]; // one per each warp, up to warp limit
int slice = blockIdx.x;
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
const T* inputSliceStart = &inputPtr[slice * inputSliceSize];
T* topKSliceStart = &topKPtr[slice * outputSliceSize];
caffe2::TIndex* indicesSliceStart = &indicesPtr[slice * outputSliceSize];
// Find the k-th highest element in our input
T topKValue = (T)0;
radixSelect<T, typename TopKTypeConfig<T>::RadixType, Order>(
inputSliceStart, outputSliceSize,
inputSliceSize,
smem, &topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
int numIterations = math::roundUp(inputSliceSize, (int) blockDim.x);
int writeIndexStart = 0;
for (int i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v = inRange ? inputSliceStart[i] : (T)0;
bool hasTopK;
if (Order) {
hasTopK = inRange && (v > topKValue);
} else {
hasTopK = inRange && (v < topKValue);
}
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
assert(writeIndex < outputSliceSize);
int topKOffset = writeIndex;
int indexOffset = writeIndex;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
assert(outputSliceSize >= writeIndexStart);
int topKRemaining = (outputSliceSize - writeIndexStart);
for (int i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v = inRange ? inputSliceStart[i] : (T)0;
bool hasTopK = inRange && (v == topKValue);
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
assert(writeIndex < outputSliceSize);
int topKOffset = writeIndex;
int indexOffset = writeIndex;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
}
#undef RADIX_BITS
#undef RADIX_SIZE
#undef RADIX_MASK
} // namespace caffe2
#endif // CAFFE2_OPERATORS_TOP_K_RADIX_SELECTION_H_
|
the_stack
|
#include <cstdio>
#include <cstdlib>
#include <cstdint>
#include <cstring>
#include <sys/time.h>
#include <errno.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <hipblas.h>
//=============================================================================
#define ASSERT(condition) \
(void)((condition) || (assert_(#condition, __FILE__, __LINE__), 0))
void assert_(const char* condition_string, const char* file, int line) {
fprintf(stderr, "%s: \"%s\". At file %s, line %i.\n", "Assertion error",
condition_string, file, line);
exit(EXIT_FAILURE);
}
#define SAFE_CALL_MPI(call) \
{int errcode = call; \
ASSERT(MPI_SUCCESS == errcode && "Failure in call: " #call);}
#define SAFE_CALL_HIP(call) \
{hipError_t errcode = call; \
ASSERT(hipSuccess == errcode && "Failure in call: " #call);}
#define SAFE_CALL_HIPBLAS(call) \
{hipblasStatus_t errcode = call; \
ASSERT(HIPBLAS_STATUS_SUCCESS == errcode && "Failure in call: " #call);}
//-----------------------------------------------------------------------------
/// Wallclock timer.
double get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
double result = ((double)tv.tv_sec + (double)tv.tv_usec * 1.e-6);
return result;
}
//-----------------------------------------------------------------------------
/// Choices for tensor core GEMM method.
enum {
TC_METHOD_NONE = 0,
TC_METHOD_FLOAT16 = 1,
TC_METHOD_INT8 = 2,
TC_METHOD_FLOAT32 = 3,
NUM_TC_METHOD = 4
};
//-----------------------------------------------------------------------------
template<typename GemmIn_t> struct TCBufTypes;
template<> struct TCBufTypes<float> {
static __host__ __device__ float zero() {return (float)0;}
static __host__ __device__ float one() {return (float)1;}
static __host__ __device__ float two() {return (float)2;}
};
//-----------------------------------------------------------------------------
template<int TC_METHOD> struct TCSelector;
template<> struct TCSelector<TC_METHOD_FLOAT32> {
enum {TC_METHOD = TC_METHOD_FLOAT32};
// types.
typedef float GemmIn_t;
typedef float GemmOut_t;
};
//-----------------------------------------------------------------------------
/// Matrix class, templated on scalar data type.
template<typename P_>
class Matrix {
enum {ROUNDUP = 8};
public:
typedef P_ P;
//----------
Matrix(size_t num_row, size_t num_col)
: num_row_(num_row)
, num_col_(num_col)
, num_row_up_(((num_row+ROUNDUP-1)/ROUNDUP)*ROUNDUP)
, num_col_up_(((num_col+ROUNDUP-1)/ROUNDUP)*ROUNDUP)
, num_elt_up_(num_row_up_ * num_col_up_)
, sizeP(sizeof(P)) {
SAFE_CALL_HIP(hipHostMalloc((void**)&h_, num_elt_up_ * sizeP));
ASSERT(h_ && "Failure in host memory allocation");
memset((void*)h_, 0, num_elt_up_ * sizeP);
SAFE_CALL_HIP(hipMalloc((void**)&d_, num_elt_up_ * sizeP));
ASSERT(d_ && "Failure in device memory allocation");
//SAFE_CALL_HIP(hipMemset((void*)d_, 0, num_elt_up_ * sizeP));
}
//----------
~Matrix() {
SAFE_CALL_HIP(hipHostFree(h_));
SAFE_CALL_HIP(hipFree(d_));
}
//----------
P* h() const {return h_;}
P* d() const {return d_;}
__host__ __device__ size_t nr() const {return num_row_;}
__host__ __device__ size_t nc() const {return num_col_;}
size_t nru() const {return num_row_up_;}
size_t ncu() const {return num_col_up_;}
//----------
P& elt(size_t i, size_t j) {
return h_[i + num_row_up_ * j];
}
//----------
__device__ P& eltd(size_t i, size_t j) {
return d_[i + num_row_up_ * j];
}
//----------
void to_device(hipStream_t stream) {
SAFE_CALL_HIP(hipMemcpyAsync(d_, h_, num_elt_up_ * sizeP,
hipMemcpyHostToDevice, stream));
}
//----------
void from_device(hipStream_t stream) {
SAFE_CALL_HIP(hipMemcpyAsync(h_, d_, num_elt_up_ * sizeP,
hipMemcpyDeviceToHost, stream));
}
//----------
static __device__ P& eltd(size_t i, size_t j, P* d, size_t num_row_up) {
return d[i + num_row_up * j];
}
//----------
private:
size_t num_row_;
size_t num_col_;
size_t num_row_up_;
size_t num_col_up_;
size_t num_elt_up_;
size_t sizeP;
P* h_;
P* d_;
// Disallowed methods.
Matrix(const Matrix&);
void operator=(const Matrix&);
};
//=============================================================================
/// Greatest common divisor.
size_t gcd(size_t a, size_t b){
if (a == 0)
return b;
return gcd(b % a, a);
}
//-----------------------------------------------------------------------------
/// Least common multiple.
size_t lcm(size_t a, size_t b){
return (a * b) / gcd(a, b);
}
//-----------------------------------------------------------------------------
/// Distance between nonzero elements along a column of the matrix.
__host__ __device__ size_t nonzero_stride(const size_t& i) {
enum {MAX = 499}; // Use prime number to randomize against sizes.
return 1 + i % MAX;
}
//-----------------------------------------------------------------------------
/// HIP kernel for set_input_matrix.
template<class Matrix_t>
__global__ void set_input_matrix_kernel(
size_t nr, size_t nc, size_t nru, typename Matrix_t::P* d,
size_t base_vector_num, typename Matrix_t::P value) {
const size_t index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= nr * nc)
return;
const size_t r = index % nr;
const size_t c = index / nr;
typedef typename Matrix_t::P P;
const P zero = TCBufTypes<P>::zero();
const size_t stride = nonzero_stride(r + base_vector_num);
Matrix_t::eltd(r, c, d, nru) = c % stride ? zero : value;
}
//-----------------------------------------------------------------------------
/// Set a sparse subset of the entries of a matrix.
///
/// All entries of the matrix A are zero, except for a small number of entries
/// along each column set to 1 according to a stride. The number of
/// interactions of elements between two columns is based on the least common
/// multiple of their respective stride values.
template<class Matrix_t>
void set_input_matrix(Matrix_t& a, size_t base_vector_num,
typename Matrix_t::P value, hipStream_t stream) {
const int threadblocksize = 256;
const int num_threadblocks = (a.nr() * a.nc() + threadblocksize - 1)
/ threadblocksize;
hipLaunchKernelGGL(HIP_KERNEL_NAME(set_input_matrix_kernel<Matrix_t>),
dim3(num_threadblocks), dim3(threadblocksize), 0, stream ,
a.nr(), a.nc(), a.nru(), a.d(), base_vector_num, value);
}
//-----------------------------------------------------------------------------
/// A very simplistic hash for a reult matrix element, used for validation.
size_t elt_hash(size_t v, size_t r, size_t c) {
return 1 + (v * r * c) % (((size_t)1) << 40);
}
//-----------------------------------------------------------------------------
template<typename TCS, typename GemmIn_t, typename GemmOut_t>
void perform_gemm(hipblasHandle_t accelblas_handle, size_t m, size_t n, size_t k,
Matrix<GemmIn_t>& tc_buf_left, Matrix<GemmIn_t>& tc_buf_right,
Matrix<GemmOut_t>& c_buf) {
const GemmOut_t alpha = TCBufTypes<GemmOut_t>::one();
const GemmOut_t beta = TCBufTypes<GemmOut_t>::zero();
hipblasStatus_t status = hipblasSgemm(
accelblas_handle
, HIPBLAS_OP_N, HIPBLAS_OP_T
, m, n, k
, &alpha
, (float*)tc_buf_left.d(), tc_buf_left.nru()
, (float*)tc_buf_right.d(), tc_buf_right.nru()
, &beta
, (float*)c_buf.d(), c_buf.nru()
);
ASSERT(status == HIPBLAS_STATUS_SUCCESS && "Failure in call to hipblasSgemm.");
/* cblas as a reference
const float alpha = 1;
const float beta = 0;
cblas_sgemm(CblasColMajor, CblasNoTrans, CblasTrans,
m, n, k, alpha, tc_buf_left.h(), tc_buf_left.nru(),
tc_buf_right.h(), tc_buf_right.nru(), beta, c_buf.h(), c_buf.nru());
*/
}
//-----------------------------------------------------------------------------
template<int TC_METHOD>
void perform_run(size_t num_vector, size_t num_field, int num_iterations) {
SAFE_CALL_MPI(MPI_Barrier(MPI_COMM_WORLD));
const double timetotal1 = get_time();
int num_proc = 0;
int proc_num = 0;
SAFE_CALL_MPI(MPI_Comm_rank(MPI_COMM_WORLD, &proc_num));
SAFE_CALL_MPI(MPI_Comm_size(MPI_COMM_WORLD, &num_proc));
// Compute sizes.
// Because of divisibility issues, each proc may have a different number
// of vectors. However for simplicity the GEMM is computed on a padded-up
// size that is the same on each proc.
const size_t base_vector_num_left = (num_vector * proc_num) / num_proc;
const size_t base_vector_num_leftp1 = (num_vector * (proc_num+1)) / num_proc;
const size_t num_vector_local = base_vector_num_leftp1 - base_vector_num_left;
const size_t num_vector_local_up = (num_vector + num_proc - 1) / num_proc;
const size_t num_field_local = num_field;
if (proc_num == 0) {
printf("num_vector %zu num_field %zu num_iterations %i num_proc %i\n",
num_vector, num_field, num_iterations, num_proc);
}
// HIP initializations.
hipStream_t stream;
SAFE_CALL_HIP(hipStreamCreate(&stream));
hipblasHandle_t accelblas_handle;
SAFE_CALL_HIPBLAS(hipblasCreate(&accelblas_handle));
SAFE_CALL_HIPBLAS(hipblasSetStream(accelblas_handle, stream));
// Matrix setup.
typedef TCSelector<TC_METHOD> TCS;
typedef typename TCS::GemmIn_t GemmIn_t;
typedef typename TCS::GemmOut_t GemmOut_t;
const GemmOut_t zero = TCBufTypes<GemmOut_t>::zero();
const GemmOut_t one = TCBufTypes<GemmOut_t>::one();
const size_t m = 2 * num_vector_local_up; // each vec gets 2 matrix rows.
const size_t n = m;
const size_t k = num_field_local;
Matrix<GemmIn_t> tc_buf_left(m, k);
Matrix<GemmIn_t> tc_buf_right(n, k);
Matrix<GemmOut_t> c_buf(m, n);
set_input_matrix(tc_buf_left, base_vector_num_left, one, stream);
c_buf.to_device(stream);
// Loop over steps.
double timegemm = 0;
double flops_local = 0;
size_t hash_local = 0;
const int num_steps = (num_proc + 2) / 2;
const int num_steps_this_proc = num_proc % 2 == 0 && proc_num >= num_proc/2 ?
num_steps - 1 : num_steps;
for (int iteration = 1; iteration <= num_iterations; ++iteration) {
for (int step = 1; step <= num_steps; ++step) {
SAFE_CALL_HIP(hipStreamSynchronize(stream));
SAFE_CALL_MPI(MPI_Barrier(MPI_COMM_WORLD));
const double timetotal2 = get_time();
const double timetotal= timetotal2 - timetotal1;
const bool do_out = proc_num == 0 && (
!(iteration & (iteration-1)) || iteration % 256 == 0 ||
iteration == num_iterations);
if (do_out) {
printf("Iteration %i of %i, step %i of %i, elapsed sec %.3f: setup...",
iteration, num_iterations, step, num_steps, timetotal);
fflush(stdout);
}
const int proc_num_right = (proc_num + step - 1) % num_proc;
const size_t base_vector_num_right =
(num_vector * proc_num_right) / num_proc;
const size_t base_vector_num_rightp1 =
(num_vector * (proc_num_right+1)) / num_proc;
const size_t num_vector_local_right =
base_vector_num_rightp1 - base_vector_num_right;
const bool is_step_active = step <= num_steps_this_proc;
if (is_step_active) {
set_input_matrix(tc_buf_right, base_vector_num_right, one, stream);
} // if is_step_active
// Perform GEMM.
if (do_out) {
printf(" GEMM...");
fflush(stdout);
}
SAFE_CALL_HIP(hipStreamSynchronize(stream));
SAFE_CALL_MPI(MPI_Barrier(MPI_COMM_WORLD));
const double timegemm1 = get_time();
if (is_step_active) {
perform_gemm<TCS, GemmIn_t, GemmOut_t>(accelblas_handle, m, n, k,
tc_buf_left, tc_buf_right, c_buf);
flops_local += 2. * m * n * k;
} // if is_step_active
SAFE_CALL_HIP(hipStreamSynchronize(stream));
SAFE_CALL_MPI(MPI_Barrier(MPI_COMM_WORLD));
const double timegemm2 = get_time();
timegemm += timegemm2 - timegemm1;
// Check.
if (do_out) {
printf(" check...");
fflush(stdout);
}
if (is_step_active) {
c_buf.from_device(stream);
SAFE_CALL_HIP(hipStreamSynchronize(stream));
const int check_freq1 = 89; // spot check, for speed.
const int check_freq2 = 113;
for (size_t c=0; c<m; c+=check_freq1) {
const size_t stride2 = nonzero_stride(c + base_vector_num_right);
for (size_t r=0; r<m; r+=check_freq2) {
const size_t stride1 = nonzero_stride(r + base_vector_num_left);
// WARNING: lcm can be slow, is not O(1) complexity.
const size_t l = lcm(stride1, stride2);
const size_t value = c_buf.elt(r,c);
ASSERT(c_buf.elt(r,c) == 1 + (k-1)/l && "Error in compiuted result.");
}
}
} // if is_step_active
// Compute hash/checksum.
if (is_step_active) {
for (size_t c=0; c<num_vector_local_right; ++c) {
const size_t c_global = c + base_vector_num_right;
for (size_t r=0; r<num_vector_local; ++r) {
const size_t r_global = r + base_vector_num_left;
const bool not_in_upper = step==1 && r >= c;
if (not_in_upper)
continue;
const size_t value = c_buf.elt(r,c);
hash_local += elt_hash(value, r_global, c_global);
//printf("%zu %zu %zu\n", r_global, c_global, value);
}
}
} // if is_step_active
if (do_out) {
printf("\n");
fflush(stdout);
}
} // step
} // for iteration
// Print final reaults.
double flops = 0;
SAFE_CALL_MPI(MPI_Allreduce(&flops_local, &flops, 1,
MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD));
size_t hash = 0;
SAFE_CALL_MPI(MPI_Allreduce(&hash_local, &hash, 1,
MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD));
SAFE_CALL_HIP(hipStreamSynchronize(stream));
SAFE_CALL_MPI(MPI_Barrier(MPI_COMM_WORLD));
const double timetotal2 = get_time();
const double timetotal= timetotal2 - timetotal1;
if (proc_num == 0) {
printf("TF %.3f GEMM sec %.3f GEMM TF/sec %.3f total sec %.3f hash %zu\n",
flops/1e12, timegemm, flops*1e-12/timegemm, timetotal, hash);
}
// Finish.
SAFE_CALL_HIPBLAS(hipblasDestroy(accelblas_handle));
SAFE_CALL_HIP(hipStreamDestroy(stream));
}
//-----------------------------------------------------------------------------
int main(int argc, char** argv) {
// Initialize MPI.
SAFE_CALL_MPI(MPI_Init(&argc, &argv));
// Parse command line.
size_t num_vector = 0;
size_t num_field = 0;
int num_iterations = 1;
for (int i = 1 ; i < argc; ++i) {
if (strcmp(argv[i], "--num_vector") == 0) {
++i;
ASSERT(i < argc && 0 ? 0 : "Missing value for num_vector.");
num_vector = strtol(argv[i], NULL, 10);
}
if (strcmp(argv[i], "--num_field") == 0) {
++i;
ASSERT(i < argc && 0 ? 0 : "Missing value for num_field.");
num_field = strtol(argv[i], NULL, 10);
}
if (strcmp(argv[i], "--num_iterations") == 0) {
++i;
ASSERT(i < argc && 0 ? 0 : "Missing value for num_iterations.");
num_iterations = atoi(argv[i]);
}
}
ASSERT(num_vector >= 2);
ASSERT(num_field >= 1);
ASSERT(num_iterations >= 1);
perform_run<TC_METHOD_FLOAT32>(num_vector, num_field, num_iterations);
SAFE_CALL_MPI(MPI_Finalize());
return 0;
}
//=============================================================================
|
the_stack
|
namespace hvvr {
uint32_t pixelFormatSize(PixelFormat pixelFormat) {
switch (pixelFormat) {
case PixelFormat::RGBA8_SRGB:
return 4;
case PixelFormat::RGBA16:
return 8;
case PixelFormat::RGBA32F:
return 16;
default:
assert(false);
return 0;
}
}
void Camera_StreamedData::reset(uint32_t tileCount) {
tileCountOccupied = 0;
tileCountEmpty = 0;
tileIndexRemapEmpty.resizeDestructive(tileCount);
tileIndexRemapOccupied.resizeDestructive(tileCount);
tileTriRanges.resizeDestructive(tileCount);
triIndices.resizeDestructive(MAX_TRI_INDICES_TO_INTERSECT);
tileFrusta3D.resizeDestructive(tileCount);
}
GPUCamera::GPUCamera(const Camera* cameraPtr) : streamedIndexCPU(0), streamedIndexGPU(-1), cameraPtr(cameraPtr) {
cutilSafeCall(cudaStreamCreate(&stream));
for (int n = 0; n < frameBuffering; n++) {
cutilSafeCall(cudaEventCreateWithFlags(&streamed[n].gpuDone, cudaEventBlockingSync | cudaEventDisableTiming));
}
}
// TODO: there's no cleanup code for GPUCamera, yet, and it would be a big pain to clean it up to properly support
// the full set of constructors and assignments (especially move variants) given the number of members...
void GPUCamera::initLookupTables(int _MSAARate) {
// getSubsampleUnitOffset needs a compile-time constant for MSAARate
enum { MSAARate = COLOR_MODE_MSAA_RATE };
if (MSAARate != _MSAARate)
fail("MSAARate for lookup table must match compile-time constant\n");
std::uniform_real_distribution<float> uniformRandomDist(0.0f, 1.0f);
std::mt19937 generator;
auto r = std::bind(uniformRandomDist, std::ref(generator));
// lookup table for random lens position
enum { TileCount = DOF_LENS_POS_LOOKUP_TABLE_TILES };
std::vector<vector2> tileSubsampleLensPosData(TILE_SIZE * TileCount * MSAARate);
for (int tile = 0; tile < TileCount; tile++) {
for (int sample = 0; sample < int(TILE_SIZE); sample++) {
float rotation = r() * Tau;
for (int subsample = 0; subsample < MSAARate; subsample++) {
vector2 pos =
getSubsampleUnitOffset<MSAARate>(vector2(0.0f, 0.0f), (subsample * 7 + 7) % MSAARate, rotation);
// tileSubsampleLensPosData[tile * TILE_SIZE * MSAARate + subsample * TILE_SIZE + sample].x =
// uint32_t(floatToHalf(pos.x)) | (uint32_t(floatToHalf(pos.y)) << 16);
tileSubsampleLensPosData[tile * TILE_SIZE * MSAARate + subsample * TILE_SIZE + sample] = pos;
}
}
}
d_tileSubsampleLensPos.resizeDestructive(TILE_SIZE * TileCount * MSAARate);
d_tileSubsampleLensPos.upload(tileSubsampleLensPosData.data());
}
Camera_StreamedData* GPUCamera::streamedDataLock(uint32_t tileCount) {
Camera_StreamedData* rval = streamed + streamedIndexCPU;
cutilSafeCall(cudaEventSynchronize(rval->gpuDone));
streamedIndexCPU = (streamedIndexCPU + 1) % frameBuffering;
rval->reset(tileCount);
return rval;
}
void GPUCamera::streamedDataUnlock() {
streamedIndexGPU = (streamedIndexGPU + 1) % frameBuffering;
Camera_StreamedData* streamSrc = streamed + streamedIndexGPU;
// some things don't have appropriate access patterns for reasonable PCIe streaming perf, so we copy them
local.tileIndexRemapEmpty.resizeDestructive(streamSrc->tileIndexRemapEmpty.size());
local.tileIndexRemapEmpty.uploadAsync(streamSrc->tileIndexRemapEmpty.data(), stream);
local.tileIndexRemapOccupied.resizeDestructive(streamSrc->tileIndexRemapOccupied.size());
local.tileIndexRemapOccupied.uploadAsync(streamSrc->tileIndexRemapOccupied.data(), stream);
cutilFlush(stream);
local.tileTriRanges.resizeDestructive(streamSrc->tileTriRanges.size());
local.tileTriRanges.uploadAsync(streamSrc->tileTriRanges.data(), stream);
local.tileFrusta3D.resizeDestructive(streamSrc->tileFrusta3D.size());
local.tileFrusta3D.uploadAsync(streamSrc->tileFrusta3D.data(), stream);
cutilFlush(stream);
}
void GPUCamera::streamedDataGpuDone() {
cutilSafeCall(cudaEventRecord(streamed[streamedIndexGPU].gpuDone, stream));
cutilFlush(stream);
}
void GPUCamera::setCameraJitter(vector2 jitter) {
frameJitter = jitter;
}
static int getMSAARate(RaycasterOutputFormat outputMode) {
return (outputMode == RaycasterOutputFormat::COLOR_RGBA8) ? COLOR_MODE_MSAA_RATE : 1;
}
static TextureFormat pixelFormatToTextureFormat(PixelFormat format) {
switch (format) {
case PixelFormat::RGBA8_SRGB:
return TextureFormat::r8g8b8a8_unorm_srgb;
case PixelFormat::RGBA16:
return TextureFormat::r16g16b16a16_unorm;
case PixelFormat::RGBA32F:
return TextureFormat::r32g32b32a32_float;
default:
assert(false);
}
return TextureFormat::none;
}
// TODO(anankervis): merge the different functions that duplicate camera resource creation
void GPUCamera::updateConfig(RaycasterOutputFormat _outputMode,
int32_t* sampleRemap,
DirectionalBeam* directionalSamples,
ThinLens _lens,
uint32_t _sampleCount,
uint32_t imageWidth,
uint32_t imageHeight,
uint32_t imageStride,
uint32_t _splitColorSamples) {
splitColorSamples = _splitColorSamples;
// one sample per output pixel, one sample per pentile subpixel, or one sample per R,G,B channel
assert(splitColorSamples == 1 || splitColorSamples == 2 || splitColorSamples == 3);
validSampleCount = imageWidth * imageHeight * splitColorSamples;
d_sampleRemap = GPUBuffer<int32_t>(sampleRemap, sampleRemap + validSampleCount);
sampleCount = _sampleCount;
d_batchSpaceBeams = GPUBuffer<DirectionalBeam>(directionalSamples, directionalSamples + sampleCount);
outputMode = _outputMode;
int msaaRate = getMSAARate(outputMode);
d_gBuffer = GPUBuffer<RaycasterGBufferSubsample>(sampleCount * msaaRate);
PixelFormat outputFormat = outputModeToPixelFormat(outputMode);
TextureFormat textureFormat = pixelFormatToTextureFormat(outputFormat);
auto createImageSizedTexture = [&]() {
return createEmptyTexture(imageWidth, imageHeight, textureFormat, cudaAddressModeClamp, cudaAddressModeClamp);
};
previousResultTexture = createImageSizedTexture();
resultTexture = createImageSizedTexture();
contrastEnhancementSettings.enable = true;
contrastEnhancementSettings.f_e = 1.0f;
contrastEnhancementBuffers.horizontallyFiltered = createImageSizedTexture();
contrastEnhancementBuffers.fullyFiltered = createImageSizedTexture();
auto pixelFormat = outputModeToPixelFormat(outputMode);
d_sampleResults =
GPUBuffer<uint32_t>((sampleCount * pixelFormatSize(pixelFormat) + sizeof(uint32_t) - 1) / sizeof(uint32_t));
resultImage.update(imageWidth, imageHeight, imageStride, pixelFormat);
lens = _lens;
initLookupTables(msaaRate);
}
void GPUCamera::registerPolarFoveatedSamples(const std::vector<vector2ui>& polarRemapToPixel,
float _maxEccentricityRadians,
const EccentricityMap& eMap,
uint32_t samplesPerRing,
uint32_t paddedSampleCount) {
PixelFormat outputFormat = outputModeToPixelFormat(outputMode);
sampleCount = paddedSampleCount;
d_sampleResults = GPUBuffer<uint32_t>((paddedSampleCount * pixelFormatSize(outputFormat) + sizeof(uint32_t) - 1) /
sizeof(uint32_t));
// For temporal filtering
d_tMaxBuffer = GPUBuffer<float>(paddedSampleCount);
eccentricityMap = eMap;
maxEccentricityRadians = _maxEccentricityRadians;
int msaaRate = getMSAARate(outputMode);
size_t totalSubsampleCount = paddedSampleCount * msaaRate;
// Allow us to launch a complete tile
d_gBuffer = GPUBuffer<RaycasterGBufferSubsample>(totalSubsampleCount);
d_polarRemapToPixel = makeGPUBuffer(polarRemapToPixel);
TextureFormat textureFormat = pixelFormatToTextureFormat(outputFormat);
uint32_t ringCount = uint32_t(polarRemapToPixel.size() / samplesPerRing);
auto createFoveatedImage = [&](TextureFormat format, bool linearFilter = true) {
return createEmptyTexture(samplesPerRing, ringCount, format, cudaAddressModeWrap, cudaAddressModeClamp,
linearFilter);
};
polarTextures.raw = createFoveatedImage(textureFormat);
polarTextures.depth = createFoveatedImage(TextureFormat::r32_float, false);
polarTextures.moment1 = createFoveatedImage(TextureFormat::r16g16b16a16_unorm);
polarTextures.moment2 = createFoveatedImage(TextureFormat::r16g16b16a16_unorm);
initLookupTables(msaaRate);
}
bool GPUCamera::bindTexture(GPUContext& gpuContext, ImageResourceDescriptor texture) {
if (resultsResource) {
gpuContext.interopUnmapResources();
cutilSafeCall(cudaGraphicsUnregisterResource(resultsResource));
resultsResource = nullptr;
}
if (texture.memoryType == ImageResourceDescriptor::MemoryType::DX_TEXTURE) {
#if defined(_WIN32)
// cudaGraphicsRegisterFlagsNone is only valid flag as of 7/22/2016
cutilSafeCall(cudaGraphicsD3D11RegisterResource(&resultsResource, (ID3D11Texture2D*)texture.data,
cudaGraphicsRegisterFlagsNone));
#else
assert(false, "Cannot do DirectX interop on non-windows platforms");
#endif
} else if (texture.memoryType == ImageResourceDescriptor::MemoryType::OPENGL_TEXTURE) {
cutilSafeCall(cudaGraphicsGLRegisterImage(&resultsResource, (GLuint)(uint64_t)texture.data, GL_TEXTURE_2D,
cudaGraphicsMapFlagsWriteDiscard));
}
return true;
}
void GPUCamera::copyImageToBoundTexture() {
cudaArray* cuArray;
cutilSafeCall(cudaGraphicsSubResourceGetMappedArray(&cuArray, resultsResource, 0, 0));
size_t srcStride = resultImage.width() * resultImage.bytesPerPixel(); // tightly packed
cutilSafeCall(cudaMemcpy2DToArrayAsync(cuArray, 0, 0, resultImage.data(), srcStride, srcStride,
resultImage.height(), cudaMemcpyDeviceToDevice, stream));
}
void GPUCamera::copyImageToCPU(ImageResourceDescriptor cpuTarget) {
assert(!cpuTarget.isHardwareRenderTarget());
auto pixFormat = outputModeToPixelFormat(outputMode);
resultImage.update(cpuTarget.width, cpuTarget.height, (uint32_t)cpuTarget.stride, pixFormat);
cutilSafeCall(
cudaMemcpyAsync(cpuTarget.data, resultImage.data(), resultImage.sizeInMemory(), cudaMemcpyDeviceToHost, 0));
}
void GPUCamera::intersectShadeResolve(GPUSceneState& sceneState, const matrix4x4& cameraToWorld) {
Camera_StreamedData& streamedData = streamed[streamedIndexGPU];
// prep the scene
sceneState.update();
cutilSafeCall(cudaStreamWaitEvent(stream, sceneState.updateEvent, 0));
// The intersect and resolve kernels assume every thread will map to a valid work item, with valid input and output
// slots. Sample count should be padded to a minimum of CUDA_GROUP_SIZE. In practice, it is padded to BLOCK_SIZE.
assert(sampleCount % CUDA_GROUP_SIZE == 0);
if (streamedData.tileCountEmpty > 0) {
clearEmpty();
}
CameraBeams cameraBeams(*this);
if (streamedData.tileCountOccupied > 0) {
intersect(sceneState, cameraBeams, cameraToWorld);
shadeAndResolve(sceneState, cameraBeams, cameraToWorld);
}
streamedDataGpuDone();
}
} // namespace hvvr
|
the_stack
|
#include <df/optimization/icp.h>
#include <df/optimization/linearSystems.h>
#include <df/util/cudaHelpers.h>
#include <df/util/debugHelpers.h>
#include <df/util/eigenHelpers.h>
#include <df/util/globalTimer.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <Eigen/Core>
namespace df {
template <typename Scalar,
typename CameraModelT,
int DPred,
typename ... DebugArgsT>
__global__ void icpKernel(internal::JacobianAndResidual<Scalar,1,6> * jacobiansAndResiduals,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > liveVertices,
const DeviceTensor2<Eigen::UnalignedVec<Scalar,DPred> > predictedVertices,
const DeviceTensor2<Eigen::UnalignedVec<Scalar,DPred> > predictedNormals,
const CameraModelT cameraModel,
const Sophus::SE3<Scalar> updatedPose,
const Eigen::Matrix<Scalar,6,1> initialPose,
const Eigen::Matrix<Scalar,2,1> depthRange,
const Scalar maxError,
DebugArgsT ... debugArgs) {
typedef Eigen::Matrix<Scalar,DPred,1,Eigen::DontAlign> VecD;
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2;
static constexpr Scalar border = Scalar(2); // TODO
static constexpr Scalar rayNormDotThreshold = Scalar(0.1); // TODO
const uint x = threadIdx.x + blockIdx.x * blockDim.x;
const uint y = threadIdx.y + blockIdx.y * blockDim.y;
const uint width = liveVertices.dimensionSize(0);
const uint height = liveVertices.dimensionSize(1);
// TODO: template for guaranteed in-bound blocking
if (x < width && y < height) {
// TODO: take care of this with a memset?
jacobiansAndResiduals[x + width*y].J = Eigen::Matrix<Scalar,1,6>::Zero();
jacobiansAndResiduals[x + width*y].r = 0;
const VecD & predictedVertex = predictedVertices(x,y);
const Scalar predictedDepth = predictedVertex(2);
if ((predictedDepth < depthRange(0)) || predictedDepth > depthRange(1)) {
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,255,0,255),debugArgs...);
return;
}
const Vec3 updatedPredVertex = updatedPose * predictedVertex.template head<3>();
const Vec2 projectedPredVertex = cameraModel.project(updatedPredVertex);
// const Vec2 projectedPredVertex (updatedPredVertex(0)/updatedPredVertex(2)*cameraModel.params()[0] + cameraModel.params()[2],
// updatedPredVertex(1)/updatedPredVertex(2)*cameraModel.params()[1] + cameraModel.params()[3]);
// if ( x > 200 && x < 220 && y > 200 && y < 220) {
// printf("(%d,%d) -> (%f,%f)\n",x,y,projectedPredVertex(0),projectedPredVertex(1));
// }
// TODO: interpolate?
const int u = projectedPredVertex(0) + Scalar(0.5);
const int v = projectedPredVertex(1) + Scalar(0.5);
if ( (u <= border) || (u >= (width-1-border)) || (v <= border) || (v >= (height-1-border)) ) {
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,0,255,255),debugArgs...);
return;
}
const Vec3 & liveVertex = liveVertices(u,v);
const Scalar liveDepth = liveVertex(2);
if ((liveDepth < depthRange(0)) || (liveDepth > depthRange(1))) {
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,255,255),debugArgs...);
return;
}
// TODO: double-check validity of this method of getting the ray
const Vec3 ray = updatedPredVertex.normalized();
const VecD & predictedNormal = predictedNormals(x,y);
if (-ray.dot(predictedNormal.template head<3>()) < rayNormDotThreshold) {
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,0,255),debugArgs...);
return;
}
const Scalar error = predictedNormal.template head<3>().dot(liveVertex - updatedPredVertex);
const Scalar absError = fabs(error);
if (absError > maxError) {
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,255,0,255),debugArgs...);
return;
}
const Scalar weightSqrt = Scalar(1) / (liveDepth);
const Eigen::Matrix<Scalar,1,3> dError_dUpdatedPredictedPoint = predictedNormal.template head<3>().transpose();
Eigen::Matrix<Scalar,3,6> dUpdatedPredictedPoint_dUpdate;
dUpdatedPredictedPoint_dUpdate << 1, 0, 0, 0, updatedPredVertex(2), -updatedPredVertex(1),
0, 1, 0, -updatedPredVertex(2), 0, updatedPredVertex(0),
0, 0, 1, updatedPredVertex(1), -updatedPredVertex(0), 0;
jacobiansAndResiduals[x + width*y].J = weightSqrt * dError_dUpdatedPredictedPoint * dUpdatedPredictedPoint_dUpdate;
jacobiansAndResiduals[x + width*y].r = weightSqrt * error;
const uchar gray = min(Scalar(255),255 * absError / maxError );
PixelDebugger<DebugArgsT...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(gray,gray,gray,255),debugArgs...);
}
}
namespace internal {
template <typename Scalar,
typename CameraModelT,
int DPred,
typename ... DebugArgsT>
LinearSystem<Scalar,6> icpIteration(const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & liveVertices,
const DeviceTensor2<Eigen::UnalignedVec<Scalar,DPred> > & predVertices,
const DeviceTensor2<Eigen::UnalignedVec<Scalar,DPred> > & predNormals,
const CameraModelT & cameraModel,
const Sophus::SE3<Scalar> & predictionPose,
const Eigen::Matrix<Scalar,6,1>& initialPose,
const Eigen::Matrix<Scalar,2,1> & depthRange,
const Scalar maxError,
const dim3 grid,
const dim3 block,
DebugArgsT ... debugArgs) {
// TODO: make efficient
static thrust::device_vector<JacobianAndResidual<Scalar,1,6> > jacobiansAndResiduals(liveVertices.count());
GlobalTimer::tick("icpKernel");
cudaFuncSetCacheConfig(icpKernel<Scalar,CameraModelT,DPred>, cudaFuncCachePreferL1);
icpKernel<Scalar><<<grid,block>>>(thrust::raw_pointer_cast(jacobiansAndResiduals.data()),
liveVertices,predVertices,predNormals,
cameraModel,
predictionPose,
initialPose,
depthRange,
maxError,
debugArgs ...);
cudaDeviceSynchronize();
CheckCudaDieOnError();
GlobalTimer::tock("icpKernel");
// static thrust::device_vector<LinearSystem<Scalar,6> > systems(jacobiansAndResiduals.size());
// std::cout << sizeof(LinearSystem<Scalar,6>) << std::endl;
// std::cout << sizeof(LinearSystem2<Scalar,6>) << std::endl;
// std::cout << sizeof(RawVec<Scalar,6*7/2>) << std::endl;
// std::cout << sizeof(RawVec<Scalar,1>) << std::endl;
// std::cout << sizeof(LinearSystem3<Scalar,6>) << std::endl;
// GlobalTimer::tick("transform");
// thrust::transform(jacobiansAndResiduals.begin(),jacobiansAndResiduals.end(),
// systems.begin(),LinearSystemCreationFunctor<Scalar,1,6>());
// cudaDeviceSynchronize();
// CheckCudaDieOnError();
// GlobalTimer::tock("transform");
// GlobalTimer::tick("reduce");
// LinearSystem<Scalar,6> system = thrust::reduce(systems.begin(),systems.end(),LinearSystem<Scalar,6>::zero(),LinearSystemSumFunctor<Scalar,6>());
// cudaDeviceSynchronize();
// GlobalTimer::tock("reduce");
// CheckCudaDieOnError();
// GlobalTimer::tick("transform_reduce");
// LinearSystem2<Scalar,6> system = thrust::transform_reduce(jacobiansAndResiduals.begin(),
// jacobiansAndResiduals.end(),
// LinearSystemCreationFunctor2<Scalar,1,6>(),
// LinearSystem2<Scalar,6>::zero(),
// LinearSystemSumFunctor2<Scalar,6>());
// cudaDeviceSynchronize();
// CheckCudaDieOnError();
// GlobalTimer::tock("transform_reduce");
GlobalTimer::tick("transform_reduce");
LinearSystem<Scalar,6> system = thrust::transform_reduce(jacobiansAndResiduals.begin(),
jacobiansAndResiduals.end(),
LinearSystemCreationFunctor<Scalar,1,6>(),
LinearSystem<Scalar,6>::zero(),
LinearSystemSumFunctor<Scalar,6>());
/*
static constexpr Scalar huberAlpha = Scalar(0.01);
const Scalar totalResidual = thrust::transform_reduce(jacobiansAndResiduals.begin(),
jacobiansAndResiduals.end(),
ResidualFunctorHuber<Scalar,1,6>(huberAlpha),
Scalar(0),
thrust::plus<Scalar>());
std::cout << "icp residual" << totalResidual << std::endl;
GlobalTimer::tick("transform_reduce");
LinearSystem<Scalar,6> system = thrust::transform_reduce(jacobiansAndResiduals.begin(),
jacobiansAndResiduals.end(),
LinearSystemCreationFunctorHuber<Scalar,1,6>(huberAlpha),
LinearSystem<Scalar,6>::zero(),
LinearSystemSumFunctor<Scalar,6>());
*/
cudaDeviceSynchronize();
CheckCudaDieOnError();
GlobalTimer::tock("transform_reduce");
// std::cout << "size: " << sizeof(LinearSystem<Scalar,6>) << std::endl;
// LinearSystem2<Scalar,6> * sysptr = reinterpret_cast<LinearSystem2<Scalar,6> *>(&system);
// return *sysptr;
return system;
}
template LinearSystem<float,6> icpIteration(const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &,
const Eigen::Matrix<float,6,1> &,
const Eigen::Vector2f &,
const float,
const dim3, const dim3);
template LinearSystem<float,6> icpIteration(const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &,
const Eigen::Matrix<float,6,1> &,
const Eigen::Vector2f &,
const float,
const dim3, const dim3, \
DeviceTensor2<Eigen::UnalignedVec4<uchar> >);
template LinearSystem<float,6> icpIteration(const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec4<float> > &,
const DeviceTensor2<Eigen::UnalignedVec4<float> > &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &,
const Eigen::Matrix<float,6,1> &,
const Eigen::Vector2f &,
const float,
const dim3, const dim3);
template LinearSystem<float,6> icpIteration(const DeviceTensor2<Eigen::UnalignedVec3<float> > &,
const DeviceTensor2<Eigen::UnalignedVec4<float> > &,
const DeviceTensor2<Eigen::UnalignedVec4<float> > &,
const Poly3CameraModel<float> &,
const Sophus::SE3f &,
const Eigen::Matrix<float,6,1> &,
const Eigen::Vector2f &,
const float,
const dim3, const dim3, \
DeviceTensor2<Eigen::UnalignedVec4<uchar> >);
} // namespace internal
} // namespace df
|
the_stack
|
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
namespace cudf {
namespace lists {
namespace detail {
namespace {
template <typename Type>
struct has_negative_nans_fn {
column_device_view const d_entries;
bool const has_nulls;
has_negative_nans_fn(column_device_view const d_entries, bool const has_nulls)
: d_entries(d_entries), has_nulls(has_nulls)
{
}
__device__ Type operator()(size_type idx) const noexcept
{
if (has_nulls && d_entries.is_null_nocheck(idx)) { return false; }
auto const val = d_entries.element<Type>(idx);
return std::isnan(val) && std::signbit(val); // std::signbit(x) == true if x is negative
}
};
/**
* @brief A structure to be used along with type_dispatcher to check if a column has any
* negative NaN value.
*
* This functor is used to check for replacing negative NaN if there exists one. It is neccessary
* because when calling to `lists::detail::sort_lists`, the negative NaN and positive NaN values (if
* both exist) are separated to the two ends of the output column. This is due to the API
* `lists::detail::sort_lists` internally calls `cub::DeviceSegmentedRadixSort`, which performs
* sorting by comparing bits of the input numbers. Since negative and positive NaN have
* different bits representation, they may not be moved to be close to each other after sorted.
*/
struct has_negative_nans_dispatch {
template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr>
bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept
{
auto const d_entries = column_device_view::create(lists_entries, stream);
return thrust::count_if(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lists_entries.size()),
detail::has_negative_nans_fn<Type>{*d_entries, lists_entries.has_nulls()});
}
template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr>
bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const
{
// Recursively check negative NaN on the children columns.
return std::any_of(
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lists_entries.num_children()),
[structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) {
auto const col = structs_view.get_sliced_child(child_idx);
return type_dispatcher(col.type(), detail::has_negative_nans_dispatch{}, col, stream);
});
}
template <typename Type,
std::enable_if_t<!cuda::std::is_floating_point_v<Type> &&
!std::is_same_v<Type, cudf::struct_view>>* = nullptr>
bool operator()(column_view const&, rmm::cuda_stream_view) const
{
// Columns of non floating-point data will never contain NaN.
return false;
}
};
template <typename Type>
struct replace_negative_nans_fn {
__device__ Type operator()(Type val) const noexcept
{
return std::isnan(val) ? std::numeric_limits<Type>::quiet_NaN() : val;
}
};
/**
* @brief A structure to be used along with type_dispatcher to replace -NaN by NaN for all rows
* in a floating-point data column.
*/
struct replace_negative_nans_dispatch {
template <typename Type,
std::enable_if_t<!cuda::std::is_floating_point_v<Type> &&
!std::is_same_v<Type, cudf::struct_view>>* = nullptr>
std::unique_ptr<column> operator()(column_view const& lists_entries,
rmm::cuda_stream_view) const noexcept
{
// For non floating point type and non struct, just return a copy of the input.
return std::make_unique<column>(lists_entries);
}
template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr>
std::unique_ptr<column> operator()(column_view const& lists_entries,
rmm::cuda_stream_view stream) const noexcept
{
auto new_entries = cudf::detail::allocate_like(
lists_entries, lists_entries.size(), cudf::mask_allocation_policy::NEVER, stream);
new_entries->set_null_mask(cudf::detail::copy_bitmask(lists_entries, stream),
lists_entries.null_count());
// Replace all negative NaN values.
thrust::transform(rmm::exec_policy(stream),
lists_entries.template begin<Type>(),
lists_entries.template end<Type>(),
new_entries->mutable_view().template begin<Type>(),
detail::replace_negative_nans_fn<Type>{});
return new_entries;
}
template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr>
std::unique_ptr<column> operator()(column_view const& lists_entries,
rmm::cuda_stream_view stream) const noexcept
{
std::vector<std::unique_ptr<cudf::column>> output_struct_members;
std::transform(
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lists_entries.num_children()),
std::back_inserter(output_struct_members),
[structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) {
auto const col = structs_view.get_sliced_child(child_idx);
return type_dispatcher(col.type(), detail::replace_negative_nans_dispatch{}, col, stream);
});
return cudf::make_structs_column(lists_entries.size(),
std::move(output_struct_members),
lists_entries.null_count(),
cudf::detail::copy_bitmask(lists_entries, stream),
stream);
}
};
/**
* @brief Generate a 0-based offset column for a lists column.
*
* Given a lists_column_view, which may have a non-zero offset, generate a new column containing
* 0-based list offsets. This is done by subtracting each of the input list offset by the first
* offset.
*
* @code{.pseudo}
* Given a list column having offsets = { 3, 7, 9, 13 },
* then output_offsets = { 0, 4, 6, 10 }
* @endcode
*
* @param lists_column The input lists column.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device resource used to allocate memory.
* @return A column containing 0-based list offsets.
*/
std::unique_ptr<column> generate_clean_offsets(lists_column_view const& lists_column,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto output_offsets = make_numeric_column(data_type{type_to_id<offset_type>()},
lists_column.size() + 1,
mask_state::UNALLOCATED,
stream,
mr);
thrust::transform(
rmm::exec_policy(stream),
lists_column.offsets_begin(),
lists_column.offsets_end(),
output_offsets->mutable_view().begin<offset_type>(),
[first = lists_column.offsets_begin()] __device__(auto offset) { return offset - *first; });
return output_offsets;
}
/**
* @brief Transform a given lists column to a new lists column in which all the list entries holding
* -NaN value are replaced by (positive) NaN.
*
* Replacing -NaN by NaN is necessary before sorting (individual) lists because the sorting API is
* using radix sort, which compares bits of the number thus it may separate -NaN by NaN to the two
* ends of the result column.
*/
std::unique_ptr<column> replace_negative_nans_entries(column_view const& lists_entries,
lists_column_view const& lists_column,
rmm::cuda_stream_view stream)
{
// We need to copy the offsets column of the input lists_column. Since the input lists_column may
// be sliced, we need to generate clean offsets (i.e., offsets starting from zero).
auto new_offsets =
generate_clean_offsets(lists_column, stream, rmm::mr::get_current_device_resource());
auto new_entries = type_dispatcher(
lists_entries.type(), detail::replace_negative_nans_dispatch{}, lists_entries, stream);
return make_lists_column(
lists_column.size(),
std::move(new_offsets),
std::move(new_entries),
lists_column.null_count(),
cudf::detail::copy_bitmask(
lists_column.parent(), stream, rmm::mr::get_current_device_resource()));
}
/**
* @brief Populate list offsets for all list entries.
*
* Given an `offsets` column_view containing offsets of a lists column and a number of all list
* entries in the column, generate an array that maps from each list entry to the offset of the list
* containing that entry.
*
* @code{.pseudo}
* num_entries = 10, offsets = { 0, 4, 6, 10 }
* output = { 1, 1, 1, 1, 2, 2, 3, 3, 3, 3 }
* @endcode
*
* @param num_entries The number of list entries.
* @param offsets Column view to the list offsets.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device resource used to allocate memory.
* @return A column containing entry list offsets.
*/
std::unique_ptr<column> generate_entry_list_offsets(size_type num_entries,
column_view const& offsets,
rmm::cuda_stream_view stream)
{
auto entry_list_offsets = make_numeric_column(offsets.type(),
num_entries,
mask_state::UNALLOCATED,
stream,
rmm::mr::get_current_device_resource());
thrust::upper_bound(rmm::exec_policy(stream),
offsets.begin<offset_type>(),
offsets.end<offset_type>(),
thrust::make_counting_iterator<offset_type>(0),
thrust::make_counting_iterator<offset_type>(num_entries),
entry_list_offsets->mutable_view().begin<offset_type>());
return entry_list_offsets;
}
/**
* @brief Performs an equality comparison between two entries in a lists column.
*
* For the two elements that are NOT in the same list in the lists column, they will always be
* considered as different. If they are from the same list and their type is not floating point,
* this functor will return the same comparison result as `cudf::element_equality_comparator`.
*
* For floating-point types, entries holding NaN value can be considered as different values or the
* same value depending on the `nans_equal` parameter.
*
* @tparam Type The data type of entries
* @tparam nans_equal Flag to specify whether NaN entries should be considered as equal value (only
* applicable for floating-point data column)
*/
template <class Type>
struct column_row_comparator_fn {
offset_type const* const list_offsets;
column_device_view const lhs;
column_device_view const rhs;
null_equality const nulls_equal;
bool const has_nulls;
bool const nans_equal;
__host__ __device__ column_row_comparator_fn(offset_type const* const list_offsets,
column_device_view const& lhs,
column_device_view const& rhs,
null_equality const nulls_equal,
bool const has_nulls,
bool const nans_equal)
: list_offsets(list_offsets),
lhs(lhs),
rhs(rhs),
nulls_equal(nulls_equal),
has_nulls(has_nulls),
nans_equal(nans_equal)
{
}
template <typename T, std::enable_if_t<!cuda::std::is_floating_point_v<T>>* = nullptr>
bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept
{
return lhs_val == rhs_val;
}
template <typename T, std::enable_if_t<cuda::std::is_floating_point_v<T>>* = nullptr>
bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept
{
// If both element(i) and element(j) are NaNs and nans are considered as equal value then this
// comparison will return `true`. This is the desired behavior in Pandas.
if (nans_equal && std::isnan(lhs_val) && std::isnan(rhs_val)) { return true; }
// If nans are considered as NOT equal, even both element(i) and element(j) are NaNs this
// comparison will still return `false`. This is the desired behavior in Apache Spark.
return lhs_val == rhs_val;
}
bool __device__ operator()(size_type i, size_type j) const noexcept
{
// Two entries are not considered for equality if they belong to different lists.
if (list_offsets[i] != list_offsets[j]) { return false; }
if (has_nulls) {
bool const lhs_is_null{lhs.nullable() && lhs.is_null_nocheck(i)};
bool const rhs_is_null{rhs.nullable() && rhs.is_null_nocheck(j)};
if (lhs_is_null && rhs_is_null) {
return nulls_equal == null_equality::EQUAL;
} else if (lhs_is_null != rhs_is_null) {
return false;
}
}
return compare<Type>(lhs.element<Type>(i), lhs.element<Type>(j));
}
};
/**
* @brief Struct used in type_dispatcher for comparing two entries in a lists column.
*/
struct column_row_comparator_dispatch {
offset_type const* const list_offsets;
column_device_view const lhs;
column_device_view const rhs;
null_equality const nulls_equal;
bool const has_nulls;
bool const nans_equal;
__device__ column_row_comparator_dispatch(offset_type const* const list_offsets,
column_device_view const& lhs,
column_device_view const& rhs,
null_equality const nulls_equal,
bool const has_nulls,
bool const nans_equal)
: list_offsets(list_offsets),
lhs(lhs),
rhs(rhs),
nulls_equal(nulls_equal),
has_nulls(has_nulls),
nans_equal(nans_equal)
{
}
template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr>
bool __device__ operator()(size_type i, size_type j) const noexcept
{
return column_row_comparator_fn<Type>{
list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}(i, j);
}
template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>()>* = nullptr>
bool operator()(size_type, size_type) const
{
CUDF_FAIL(
"column_row_comparator_dispatch cannot operate on types that are not equally comparable.");
}
};
/**
* @brief Performs an equality comparison between rows of two tables using `column_row_comparator`
* to compare rows of their corresponding columns.
*/
struct table_row_comparator_fn {
offset_type const* const list_offsets;
table_device_view const lhs;
table_device_view const rhs;
null_equality const nulls_equal;
bool const has_nulls;
bool const nans_equal;
table_row_comparator_fn(offset_type const* const list_offsets,
table_device_view const& lhs,
table_device_view const& rhs,
null_equality const nulls_equal,
bool const has_nulls,
bool const nans_equal)
: list_offsets(list_offsets),
lhs(lhs),
rhs(rhs),
nulls_equal(nulls_equal),
has_nulls(has_nulls),
nans_equal(nans_equal)
{
}
bool __device__ operator()(size_type i, size_type j) const noexcept
{
auto column_comp = [=](column_device_view const& lhs, column_device_view const& rhs) {
return type_dispatcher(
lhs.type(),
column_row_comparator_dispatch{list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal},
i,
j);
};
return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), column_comp);
}
};
/**
* @brief Struct used in type_dispatcher for copying indices of the list entries ignoring
* duplicates.
*/
struct get_unique_entries_dispatch {
template <class Type,
std::enable_if_t<!cudf::is_equality_comparable<Type, Type>() &&
!std::is_same_v<Type, cudf::struct_view>>* = nullptr>
offset_type* operator()(offset_type const*,
column_view const&,
size_type,
offset_type*,
null_equality,
nan_equality,
bool,
rmm::cuda_stream_view) const
{
CUDF_FAIL(
"`get_unique_entries_dispatch` cannot operate on types that are not equally comparable.");
}
template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr>
offset_type* operator()(offset_type const* list_offsets,
column_view const& all_lists_entries,
size_type num_entries,
offset_type* output_begin,
null_equality nulls_equal,
nan_equality nans_equal,
bool has_nulls,
rmm::cuda_stream_view stream) const noexcept
{
auto const d_view = column_device_view::create(all_lists_entries, stream);
auto const comp = column_row_comparator_fn<Type>{list_offsets,
*d_view,
*d_view,
nulls_equal,
has_nulls,
nans_equal == nan_equality::ALL_EQUAL};
return thrust::unique_copy(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_entries),
output_begin,
comp);
}
template <class Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr>
offset_type* operator()(offset_type const* list_offsets,
column_view const& all_lists_entries,
size_type num_entries,
offset_type* output_begin,
null_equality nulls_equal,
nan_equality nans_equal,
bool has_nulls,
rmm::cuda_stream_view stream) const noexcept
{
auto const entries_tview = table_view{{all_lists_entries}};
auto const flatten_nullability = has_nested_nulls(entries_tview)
? structs::detail::column_nullability::FORCE
: structs::detail::column_nullability::MATCH_INCOMING;
auto const entries_flattened = cudf::structs::detail::flatten_nested_columns(
entries_tview, {order::ASCENDING}, {null_order::AFTER}, flatten_nullability);
auto const d_view = table_device_view::create(entries_flattened, stream);
auto const comp = table_row_comparator_fn{list_offsets,
*d_view,
*d_view,
nulls_equal,
has_nulls,
nans_equal == nan_equality::ALL_EQUAL};
return thrust::unique_copy(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_entries),
output_begin,
comp);
}
};
/**
* @brief Copy list entries and entry list offsets ignoring duplicates.
*
* Given an array of all entries flattened from a list column and an array that maps each entry to
* the offset of the list containing that entry, those entries and list offsets are copied into
* new arrays such that the duplicated entries within each list will be ignored.
*
* @param all_lists_entries The input array containing all list entries.
* @param entries_list_offsets A map from list entries to their corresponding list offsets.
* @param nulls_equal Flag to specify whether null entries should be considered equal.
* @param nans_equal Flag to specify whether NaN entries should be considered equal
* (only applicable for floating-point data column).
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device resource used to allocate memory.
* @return A pair of columns, the first one contains unique list entries and the second one
* contains their corresponding list offsets.
*/
std::vector<std::unique_ptr<column>> get_unique_entries_and_list_offsets(
column_view const& all_lists_entries,
column_view const& entries_list_offsets,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_entries = all_lists_entries.size();
// Allocate memory to store the indices of the unique entries.
auto unique_indices = rmm::device_uvector<offset_type>(num_entries, stream);
auto const output_begin = unique_indices.begin();
auto const output_end = type_dispatcher(all_lists_entries.type(),
get_unique_entries_dispatch{},
entries_list_offsets.begin<offset_type>(),
all_lists_entries,
num_entries,
output_begin,
nulls_equal,
nans_equal,
all_lists_entries.has_nulls(),
stream);
auto gather_map = column_view(data_type{type_to_id<offset_type>()},
static_cast<size_type>(thrust::distance(output_begin, output_end)),
unique_indices.data());
// Collect unique entries and entry list offsets.
// The new null_count and bitmask of the unique entries will also be generated
// by the gather function.
return cudf::detail::gather(table_view{{all_lists_entries, entries_list_offsets}},
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr)
->release();
}
/**
* @brief Generate list offsets from entry offsets.
*
* Generate an array of list offsets for the final result lists column. The list offsets of the
* original lists column are also taken into account to make sure the result lists column will have
* the same empty list rows (if any) as in the original lists column.
*
* @param num_entries The number of unique entries after removing duplicates.
* @param entries_list_offsets The mapping from list entries to their list offsets.
* @param original_offsets The list offsets of the original lists column, which will also be used to
* store the new list offsets.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device resource used to allocate memory.
*/
void generate_offsets(size_type num_entries,
column_view const& entries_list_offsets,
mutable_column_view const& original_offsets,
rmm::cuda_stream_view stream)
{
// Firstly, generate temporary list offsets for the unique entries, ignoring empty lists (if any).
// If entries_list_offsets = {1, 1, 1, 1, 2, 3, 3, 3, 4, 4 }, num_entries = 10,
// then new_offsets = { 0, 4, 5, 8, 10 }.
auto const new_offsets = allocate_like(
original_offsets, mask_allocation_policy::NEVER, rmm::mr::get_current_device_resource());
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator<offset_type>(0),
thrust::make_counting_iterator<offset_type>(num_entries + 1),
new_offsets->mutable_view().begin<offset_type>(),
[num_entries, offsets_ptr = entries_list_offsets.begin<offset_type>()] __device__(
auto i) -> bool {
return i == 0 || i == num_entries || offsets_ptr[i] != offsets_ptr[i - 1];
});
// Generate a prefix sum of number of empty lists, storing inplace to the original lists
// offsets.
// If the original list offsets is { 0, 0, 5, 5, 6, 6 } (there are 2 empty lists),
// and new_offsets = { 0, 4, 6 }, then output = { 0, 1, 1, 2, 2, 3}.
auto const iter_trans_begin = cudf::detail::make_counting_transform_iterator(
0, [offsets = original_offsets.begin<offset_type>()] __device__(auto i) {
return (i > 0 && offsets[i] == offsets[i - 1]) ? 1 : 0;
});
thrust::inclusive_scan(rmm::exec_policy(stream),
iter_trans_begin,
iter_trans_begin + original_offsets.size(),
original_offsets.begin<offset_type>());
// Generate the final list offsets.
// If the original list offsets are { 0, 0, 5, 5, 6, 6 }, the new offsets are { 0, 4, 6 },
// and the prefix sums of empty lists are { 0, 1, 1, 2, 2, 3 },
// then output = { 0, 0, 4, 4, 5, 5 }.
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<offset_type>(0),
thrust::make_counting_iterator<offset_type>(original_offsets.size()),
original_offsets.begin<offset_type>(),
[prefix_sum_empty_lists = original_offsets.begin<offset_type>(),
offsets = new_offsets->view().begin<offset_type>()] __device__(auto i) {
return offsets[i - prefix_sum_empty_lists[i]];
});
}
} // anonymous namespace
/**
* @copydoc cudf::lists::drop_list_duplicates
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (lists_column.is_empty()) return cudf::empty_like(lists_column.parent());
if (auto const child_type = lists_column.child().type();
cudf::is_nested(child_type) && child_type.id() != type_id::STRUCT) {
CUDF_FAIL("Nested types other than STRUCT are not supported in `drop_list_duplicates`.");
}
// Flatten all entries (depth = 1) of the lists column.
auto const lists_entries = lists_column.get_sliced_child(stream);
// sorted_lists will store the results of the original lists after calling segmented_sort.
auto const sorted_lists = [&]() {
// If nans_equal == ALL_EQUAL and the column contains lists of floating-point data type,
// we need to replace -NaN by NaN before sorting.
auto const replace_negative_nan =
nans_equal == nan_equality::ALL_EQUAL &&
type_dispatcher(
lists_entries.type(), detail::has_negative_nans_dispatch{}, lists_entries, stream);
if (replace_negative_nan) {
auto const new_lists_column =
detail::replace_negative_nans_entries(lists_entries, lists_column, stream);
return detail::sort_lists(
lists_column_view(new_lists_column->view()), order::ASCENDING, null_order::AFTER, stream);
} else {
return detail::sort_lists(lists_column, order::ASCENDING, null_order::AFTER, stream);
}
}();
auto const sorted_lists_entries =
lists_column_view(sorted_lists->view()).get_sliced_child(stream);
// Generate a 0-based offset column.
auto lists_offsets = detail::generate_clean_offsets(lists_column, stream, mr);
// Generate a mapping from list entries to offsets of the lists containing those entries.
auto const entries_list_offsets =
detail::generate_entry_list_offsets(sorted_lists_entries.size(), lists_offsets->view(), stream);
// Copy non-duplicated entries (along with their list offsets) to new arrays.
auto unique_entries_and_list_offsets = detail::get_unique_entries_and_list_offsets(
sorted_lists_entries, entries_list_offsets->view(), nulls_equal, nans_equal, stream, mr);
// Generate offsets for the new lists column.
detail::generate_offsets(unique_entries_and_list_offsets.front()->size(),
unique_entries_and_list_offsets.back()->view(),
lists_offsets->mutable_view(),
stream);
// Construct a new lists column without duplicated entries.
// Reuse the null_count and bitmask of the lists_column: those are the null information for
// the list elements (rows).
// For the entries of those lists (rows), their null_count and bitmask were generated separately
// during the step `get_unique_entries_and_list_offsets` above.
return make_lists_column(lists_column.size(),
std::move(lists_offsets),
std::move(unique_entries_and_list_offsets.front()),
lists_column.null_count(),
cudf::detail::copy_bitmask(lists_column.parent(), stream, mr));
}
} // namespace detail
/**
* @copydoc cudf::lists::drop_list_duplicates
*/
std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column,
null_equality nulls_equal,
nan_equality nans_equal,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::drop_list_duplicates(
lists_column, nulls_equal, nans_equal, rmm::cuda_stream_default, mr);
}
} // namespace lists
} // namespace cudf
|
the_stack
|
int visited[5000];
int count[5000];
int visited_index = 0;
int nstate;
State matchstate = { Match }; /* matching state */
List l1, l2;
static int listid;
void addstate(List*, State*);
void step(List*, int, List*);
/* Compute initial state list */
List*
startlist(State *start, List *l)
{
l->n = 0;
listid++;
addstate(l, start);
return l;
}
/* Check whether state list contains a match. */
int
ismatch(List *l)
{
int i;
for(i=0; i<l->n; i++)
if(l->s[i]->c == Match)
return 1;
return 0;
}
/* Add s to l, following unlabeled arrows. */
void
addstate(List *l, State *s)
{
// lastlist check is present to ensure that if
// multiple states point to this state, then only
// one instance of the state is added to the list
if(s == NULL || s->lastlist == listid)
return;
s->lastlist = listid;
if(s->c == Split){
/* follow unlabeled arrows */
addstate(l, s->out);
addstate(l, s->out1);
return;
}
l->s[l->n++] = s;
}
/*
* Step the NFA from the states in clist
* past the character c,
* to create next NFA state set nlist.
*/
void
step(List *clist, int c, List *nlist)
{
int i;
State *s;
listid++;
nlist->n = 0;
for(i=0; i<clist->n; i++){
s = clist->s[i];
if(s->c == c || s->c == Any)
addstate(nlist, s->out);
}
}
/* Run NFA to determine whether it matches s. */
int
match(State *start, char *s)
{
int c;
List *clist, *nlist, *t;
clist = startlist(start, &l1);
nlist = &l2;
for(; *s; s++){
c = *s & 0xFF;
step(clist, c, nlist);
t = clist; clist = nlist; nlist = t; // swap clist, nlist
// check for a match in the middle of the string
if (ismatch(clist))
return 1;
}
return ismatch(clist);
}
/* Check for a string match at all possible start positions */
int
anyMatch(State *start, char *s) {
int isMatch = match(start, s);
int index = 0;
int len = strlen(s);
while (!isMatch && index <= len) {
isMatch = match(start, s + index);
index ++;
}
return isMatch;
}
/* Allocate and initialize State */
State*
state(int c, State *out, State *out1)
{
State *s;
s = (State *) malloc(sizeof *s);
s->id = ++nstate;
s->lastlist = 0;
s->c = c;
s->out = out;
s->out1 = out1;
// device pointer of itself
// serves no real purpose other than to help transfer the NFA over
s->dev = NULL;
s->free = STATE_INIT;
return s;
}
/* Initialize Frag struct. */
Frag
frag(State *start, Ptrlist *out)
{
Frag n = { start, out };
return n;
}
/* Create singleton list containing just outp. */
Ptrlist*
list1(State **outp)
{
Ptrlist *l;
l = (Ptrlist*)outp;
l->next = NULL;
return l;
}
/* Patch the list of states at out to point to start. */
void
patch(Ptrlist *l, State *s)
{
Ptrlist *next;
for(; l; l=next){
next = l->next;
l->s = s;
}
}
/* Join the two lists l1 and l2, returning the combination. */
Ptrlist*
append(Ptrlist *l1, Ptrlist *l2)
{
Ptrlist *oldl1;
oldl1 = l1;
while(l1->next)
l1 = l1->next;
l1->next = l2;
return oldl1;
}
/*
* Convert postfix regular expression to NFA.
* Return start state.
*/
State*
post2nfa(char *postfix)
{
char *p;
Frag stack[1000], *stackp, e1, e2, e;
State *s;
// fprintf(stderr, "postfix: %s\n", postfix);
if(postfix == NULL)
return NULL;
#define push(s) *stackp++ = s
#define pop() *--stackp
stackp = stack;
for(p=postfix; *p; p++){
switch(*p){
case ANY: /* any (.) */
s = state(Any, NULL, NULL);
push(frag(s, list1(&s->out)));
break;
default:
s = state(*p, NULL, NULL);
push(frag(s, list1(&s->out)));
break;
case CONCATENATE: /* catenate */
e2 = pop();
e1 = pop();
patch(e1.out, e2.start);
push(frag(e1.start, e2.out));
break;
case ALTERNATE: /* alternate (|)*/
e2 = pop();
e1 = pop();
s = state(Split, e1.start, e2.start);
push(frag(s, append(e1.out, e2.out)));
break;
case QUESTION: /* zero or one (?)*/
e = pop();
s = state(Split, e.start, NULL);
push(frag(s, append(e.out, list1(&s->out1))));
break;
case STAR: /* zero or more (*)*/
e = pop();
s = state(Split, e.start, NULL);
patch(e.out, s);
push(frag(s, list1(&s->out1)));
break;
case PLUS: /* one or more (+)*/
e = pop();
s = state(Split, e.start, NULL);
patch(e.out, s);
push(frag(e.start, list1(&s->out1)));
break;
}
}
e = pop();
if(stackp != stack)
return NULL;
patch(e.out, &matchstate);
return e.start;
#undef pop
#undef push
}
/*
* Convert infix regexp re to postfix notation.
* Insert ESC (or 0x1b) as explicit concatenation operator.
* Cheesy parser, return static buffer.
*/
char*
re2post(char *re)
{
int nalt, natom;
static char buf[8000];
char *dst;
struct {
int nalt;
int natom;
} paren[100], *p;
p = paren;
dst = buf;
nalt = 0;
natom = 0;
if(strlen(re) >= sizeof buf/2)
return NULL;
for(; *re; re++){
switch(*re){
case PAREN_OPEN: // (
if(natom > 1){
--natom;
*dst++ = CONCATENATE;
}
if(p >= paren+100)
return NULL;
p->nalt = nalt;
p->natom = natom;
p++;
nalt = 0;
natom = 0;
break;
case ALTERNATE: // |
if(natom == 0)
return NULL;
while(--natom > 0)
*dst++ = CONCATENATE;
nalt++;
break;
case PAREN_CLOSE: // )
if(p == paren)
return NULL;
if(natom == 0)
return NULL;
while(--natom > 0)
*dst++ = CONCATENATE;
for(; nalt > 0; nalt--)
*dst++ = ALTERNATE;
--p;
nalt = p->nalt;
natom = p->natom;
natom++;
break;
case STAR: // *
case PLUS: // +
case QUESTION: // ?
if(natom == 0)
return NULL;
*dst++ = *re;
break;
default:
if(natom > 1){
--natom;
*dst++ = CONCATENATE;
}
*dst++ = *re;
natom++;
break;
}
}
if(p != paren)
return NULL;
while(--natom > 0)
*dst++ = CONCATENATE;
for(; nalt > 0; nalt--)
*dst++ = ALTERNATE;
*dst = 0;
return buf;
}
void readFile (char *fileName, char ***lines, int *lineIndex) {
FILE *fp = fopen(fileName, "r");
char *source = NULL;
if (fp != NULL) {
/* Go to the end of the file. */
if (fseek(fp, 0L, SEEK_END) == 0) {
/* Get the size of the file. */
long bufsize = ftell(fp);
if (bufsize == -1) { /* Error */ }
/* Allocate our buffer to that size. */
source = (char *) malloc(sizeof(char) * (bufsize + 1));
/* Go back to the start of the file. */
if (fseek(fp, 0L, SEEK_SET) == 0) { /* Error */ }
/* Read the entire file into memory. */
size_t newLen = fread(source, sizeof(char), bufsize, fp);
if (newLen == 0) {
fputs("Error reading file", stderr);
} else {
source[newLen] = '\0'; /* Just to be safe. */
}
}
fclose(fp);
}
*lines = (char **) malloc (sizeof(char *) * 1);
**lines = source;
*lineIndex = 1;
}
void usage(const char* progname) {
printf("Usage: %s [options] [pattern] \n", progname);
printf("Program Options:\n");
printf(" -v Visualize the NFA then exit\n");
printf(" -p View postfix expression then exit\n");
printf(" -s View simplified expression then exit\n");
printf(" -t Print timing data\n");
printf(" -f <FILE> --file Input file to be matched\n");
printf(" -r <FILE> --regex Input file with regexs\n");
printf(" -? This message\n");
printf("[pattern] required only if -r or --regex is not used\n");
}
void parseCmdLine(int argc, char **argv, int *visualize, int *postfix, int *time, int *simplified, char **fileName, char **regexFile) {
if (argc < 3) {
usage(argv[0]);
exit(EXIT_SUCCESS);
}
int opt;
static struct option long_options[] = {
{"help", no_argument, 0, '?'},
{"postfix", no_argument, 0, 'p'},
{"simplified", no_argument, 0, 's'},
{"visualize", no_argument, 0, 'v'},
{"file", required_argument, 0, 'f'},
{"regex", required_argument, 0, 'r'},
{"time", no_argument, 0, 't'},
{0 ,0, 0, 0}
};
*visualize = 0;
*postfix = 0;
*time = 0;
*simplified = 0;
while ((opt = getopt_long_only(argc, argv, "tvpsf:r:?", long_options, NULL)) != EOF) {
switch (opt) {
case 'v':
*visualize = 1;
break;
case 'p':
*postfix = 1;
break;
case 'f':
*fileName = optarg;
break;
case 'r':
*regexFile = optarg;
break;
case 't':
*time = 1;
break;
case 's':
*simplified = 1;
break;
default:
usage(argv[0]);
exit(EXIT_SUCCESS);
}
}
}
int hasSeen(State * start, int * index) {
int i;
for (i = 0; i < 5000; i++) {
if (visited[i] == start->id) {
*index = i;
return 0;
}
}
return 1;
}
void visualize_nfa_help(State * start) {
int index;
if (start == NULL) {
return;
}
if (hasSeen(start, &index) == 0) {
if (count[index] > 0) {
return;
}
}
count[start->id]++;
visited[start->id] = start->id;
char data[10];
if (start->c == Match) {
strcpy(data, "Match");
}
else if (start->c == Split) {
strcpy(data, "Split");
}
else if (start->c == Any) {
strcpy(data, "Any");
}
else {
sprintf(data, "Char %c", start->c);
}
int outId, outId1;
outId = (start->out == NULL) ? -1 : start->out->id;
outId1 = (start->out1 == NULL) ? -1 : start->out1->id;
printf("{ \"id\": \"%d\", \"data\":\"%s\", \"out\":\"%d\", \"out1\":\"%d\" \n},", start->id, data, outId, outId1);
visualize_nfa_help(start->out);
visualize_nfa_help(start->out1);
}
void visualize_nfa(State * start) {
memset(visited, 0, 5000*(sizeof(int)));
memset(count, 0, 5000*(sizeof(int)));
printf("[");
visualize_nfa_help(start);
printf("]\n");
}
double gettime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec / 1000000.0;
}
|
the_stack
|
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined DUAL_ENERGY )
// internal functions
#if ( DUAL_ENERGY == DE_ENPY && defined __CUDACC__ )
GPU_DEVICE
static real Hydro_DensPres2Entropy( const real Dens, const real Pres, const real Gamma_m1 );
GPU_DEVICE
static real Hydro_DensEntropy2Pres( const real Dens, const real Enpy, const real Gamma_m1,
const bool CheckMinPres, const real MinPres );
#endif
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_DualEnergyFix
// Description : Correct the internal and total energies using the dual-energy formalism
//
// Note : 1. Invoked by Hydro_FullStepUpdate(), InterpolateGhostZon(), ...
// 2. A floor value "MinPres" is applied to the corrected pressure if CheckMinPres is on
// 3 A floor value "TINY_NUMBER" is applied to the input entropy as well
// 4. Call-by-reference for "Etot, Enpy, and DE_Status"
// 5. Fluid variables returned by this function are guaranteed to be consistent with each other
// --> They must satisfy "entropy = pressure / density^(Gamma-1)", where pressure is calculated
// by (Etot - Ekin - Emag)*(Gamma-1.0)
// --> It doesn't matter we use entropy to correct Eint or vice versa, and it also holds even when
// the floor value is applied to pressure
//
// Parameter : Dens : Mass density
// MomX/Y/Z : Momentum density
// Etot : Total energy density
// Enpy : Entropy
// DE_Status : Assigned to (DE_UPDATED_BY_ETOT / DE_UPDATED_BY_DUAL / DE_UPDATED_BY_MIN_PRES)
// to indicate whether this cell is updated by the total energy, dual energy variable,
// or pressure floor (MinPres)
// Gamma_m1 : Adiabatic index - 1.0
// _Gamma_m1 : 1.0/Gamma_m1
// CheckMinPres : Return Hydro_CheckMinPres()
// --> In some cases we actually want to check if pressure becomes unphysical,
// for which we don't want to enable this option
// MinPres : Minimum allowed pressure
// DualEnergySwitch : if ( Eint/(Ekin+Emag) < DualEnergySwitch ) ==> correct Eint and Etot
// else ==> correct Enpy
// Emag : Magnetic energy density (0.5*B^2) --> for MHD only
//
// Return : Etot, Enpy, DE_Status
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void Hydro_DualEnergyFix( const real Dens, const real MomX, const real MomY, const real MomZ,
real &Etot, real &Enpy, char &DE_Status, const real Gamma_m1, const real _Gamma_m1,
const bool CheckMinPres, const real MinPres, const real DualEnergySwitch,
const real Emag )
{
const bool CheckMinPres_No = false;
const bool CheckMinEint_No = false;
// apply entropy floor
Enpy = FMAX( Enpy, TINY_NUMBER );
// calculate energies
// --> note that here Eint can even be negative due to numerical errors
// --> Enth (i.e., non-thermal energy) includes both kinetic and magnetic energies
real Enth, Eint, Pres;
Eint = Hydro_Con2Eint( Dens, MomX, MomY, MomZ, Etot, CheckMinEint_No, NULL_REAL, Emag );
Enth = Etot - Eint;
// determine whether or not to use the dual-energy variable (entropy or internal energy) to correct the total energy density
if ( Eint/Enth < DualEnergySwitch )
{
// correct total energy
// --> we will apply pressure floor later
# if ( DUAL_ENERGY == DE_ENPY )
Pres = Hydro_DensEntropy2Pres( Dens, Enpy, Gamma_m1, CheckMinPres_No, NULL_REAL );
Eint = Pres*_Gamma_m1;
# elif ( DUAL_ENERGY == DE_EINT )
# error : DE_EINT is NOT supported yet !!
# endif
Etot = Enth + Eint;
DE_Status = DE_UPDATED_BY_DUAL;
}
else
{
// correct entropy
Pres = Eint*Gamma_m1;
Enpy = Hydro_DensPres2Entropy( Dens, Pres, Gamma_m1 );
DE_Status = DE_UPDATED_BY_ETOT;
} // if ( Eint/Enth < DualEnergySwitch ) ... else ...
// apply pressure floor
if ( CheckMinPres && Pres < MinPres )
{
Pres = MinPres;
Eint = Pres*_Gamma_m1;
// ensure that both energy and entropy are consistent with the pressure floor
Etot = Enth + Eint;
Enpy = Hydro_DensPres2Entropy( Dens, Pres, Gamma_m1 );
DE_Status = DE_UPDATED_BY_MIN_PRES;
}
} // FUNCTION : Hydro_DualEnergyFix
#if ( DUAL_ENERGY == DE_ENPY )
// Hydro_Con2Entropy() is used by CPU only
#ifndef __CUDACC__
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_Con2Entropy
// Description : Evaluate the gas entropy from the input fluid variables
// --> Here entropy is defined as "pressure / density^(Gamma-1)" (i.e., entropy per volume)
//
// Note : 1. Used by the dual-energy formalism
// 2. Invoked by Hydro_Init_ByFunction_AssignData(), Gra_Close(), Init_ByFile(), ...
// 3. Currently this function does NOT apply pressure floor when calling Hydro_Con2Pres()
// --> However, note that Hydro_DensPres2Entropy() does apply a floor value (TINY_NUMBER) for entropy
//
// Parameter : Dens : Mass density
// MomX/Y/Z : Momentum density
// Engy : Total energy density
// Emag : Magnetic energy density (0.5*B^2) --> for MHD only
// EoS_DensEint2Pres : EoS routine to compute the gas pressure
// EoS_AuxArray_* : Auxiliary arrays for EoS_DensEint2Pres()
// EoS_Table : EoS tables
//
// Return : Enpy
//-------------------------------------------------------------------------------------------------------
real Hydro_Con2Entropy( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy,
const real Emag, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX] )
{
// currently this function does NOT apply pressure floor when calling Hydro_Con2Pres()
const bool CheckMinPres_No = false;
real Pres, Enpy;
// calculate pressure and convert it to entropy
// --> note that DE_ENPY only works with EOS_GAMMA, which does not involve passive scalars
Pres = Hydro_Con2Pres( Dens, MomX, MomY, MomZ, Engy, NULL, CheckMinPres_No, NULL_REAL, Emag,
EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL );
Enpy = Hydro_DensPres2Entropy( Dens, Pres, EoS_AuxArray_Flt[1] );
return Enpy;
} // FUNCTION : Hydro_Con2Entropy
#endif // ifndef __CUDACC__
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_DensPres2Entropy
// Description : Evaluate the gas entropy from the input density and pressure
// --> Here entropy is defined as "pressure / density^(Gamma-1)" (i.e., entropy per volume)
//
// Note : 1. Used by the dual-energy formalism
// 2. Invoked by Hydro_Con2Entropy() and Hydro_DualEnergyFix()
// --> This function is invoked by both CPU and GPU codes
// 3. A floor value (TINY_NUMBER) is applied to the returned value
//
// Parameter : Dens : Mass density
// Pres : Pressure
// Gamma_m1 : Adiabatic index - 1.0
//
// Return : Enpy
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
real Hydro_DensPres2Entropy( const real Dens, const real Pres, const real Gamma_m1 )
{
real Enpy;
// calculate entropy
Enpy = Pres*POW( Dens, -Gamma_m1 );
// apply a floor value
Enpy = FMAX( Enpy, TINY_NUMBER );
return Enpy;
} // FUNCTION : Hydro_DensPres2Entropy
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_DensEntropy2Pres
// Description : Evaluate the gas pressure from the input density and entropy
// --> Here entropy is defined as "pressure / density^(Gamma-1)" (i.e., entropy per volume)
//
// Note : 1. Used by the dual-energy formalism
// 2. Invoked by Hydro_DualEnergyFix(), Flu_Close(), Hydro_Aux_Check_Negative(), and Flu_FixUp()
// --> This function is invoked by both CPU and GPU codes
// 3. A floor value "MinPres" is applied to the returned pressure if CheckMinPres is on
//
// Parameter : Dens : Mass density
// Enpy : Enpy
// Gamma_m1 : Adiabatic index - 1.0
// CheckMinPres : Return Hydro_CheckMinPres()
// --> In some cases we actually want to check if pressure becomes unphysical,
// for which we don't want to enable this option
// MinPres : Minimum allowed pressure
//
// Return : Pres
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
real Hydro_DensEntropy2Pres( const real Dens, const real Enpy, const real Gamma_m1,
const bool CheckMinPres, const real MinPres )
{
real Pres;
// calculate pressure
Pres = Enpy*POW( Dens, Gamma_m1 );
// apply a floor value
if ( CheckMinPres ) Pres = Hydro_CheckMinPres( Pres, MinPres );
return Pres;
} // FUNCTION : Hydro_DensEntropy2Pres
#endif // #if ( DUAL_ENERGY == DE_ENPY )
#endif // #if ( MODEL == HYDRO && defined DUAL_ENERGY )
#endif // #ifndef __CUFLU_DUALENERGY__
|
the_stack
|
__device__ __forceinline__ float atomicMin(float *address, float val)
{
int ret = __float_as_int(*address);
while(val < __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__device__ void min_dim_1(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
int gStartx, int gStarty, int tid, int bid,
int M, int N
){
int vx = tid % 16;
int vy = tid / 16;
#pragma unroll
for (int ni = 0; ni < 8; ni++){
// initialize with first value
float value;
if (likely(gStarty + vy*8 < M)){
value = cCache[0].val[ni];
} else {
value = INFINITY;
}
float index = vy*8;
// Reduce within thread
#pragma unroll
for (int mi = 1; mi < 8; mi++){
int iM = gStarty + vy*8 + mi;
float temp;
if (likely(iM < M)){
temp = cCache[mi].val[ni];
} else {
temp = INFINITY;
}
if (temp < value){
value = temp;
index = vy*8 + mi;
}
}
// Store reduced values and indices in shared memory
valSmem[vy][vx * 8 + ni] = value;
idxSmem[vy][vx * 8 + ni] = index;
}
__syncthreads();
// first 128 threads do block wise reduction
if (tid < 128){
float value = valSmem[0][tid];
float index = idxSmem[0][tid];
#pragma unroll
for (int i=1; i<16; i++){
float temp = valSmem[i][tid];
if (temp < value){
value = temp;
index = idxSmem[i][tid];
}
}
// global reduction
int iN = gStartx + tid;
if (iN < N){
atomicMin(values + (bid) * N + iN, value);
if (value <= values[(bid) * N + iN]){
indices[(bid) * N + iN] = ll_t(index) + gStarty;
}
}
/*
*/
}
}
__device__ void min_dim_2(
float8 cCache[8],
_VOLATILE_ float valSmem[16][128+4],
_VOLATILE_ float idxSmem[16][128+4],
float* values,
ll_t* indices,
int gStartx, int gStarty, int tid, int bid,
int M, int N
){
int vx = tid % 16;
int vy = tid / 16;
#pragma unroll
for (int mi = 0; mi < 8; mi++){
// initialize with first value
float value;
if (likely(gStartx + vx*8 < N)){
value = cCache[mi].val[0];
} else {
value = INFINITY;
}
float index = vx*8;
// Reduce within thread
#pragma unroll
for (int ni = 1; ni < 8; ni++){
int iN = gStartx + vx*8 + ni;
float temp;
if (likely(iN < N)){
temp = cCache[mi].val[ni];
} else {
temp = INFINITY;
}
if (temp < value){
value = temp;
index = vx*8 + ni;
}
}
// Store reduced values and indices in shared memory
valSmem[vx][vy * 8 + mi] = value;
idxSmem[vx][vy * 8 + mi] = index;
}
__syncthreads();
// first 128 threads do block-wise reduction
if (tid < 128){
float value = valSmem[0][tid];
float index = idxSmem[0][tid];
#pragma unroll
for (int i = 1; i < 16; i++){
float temp = valSmem[i][tid];
if (temp < value){
value = temp;
index = idxSmem[i][tid];
}
}
// global reduction
int iM = gStarty + tid;
if (iM < M){
atomicMin(values + (bid) * M + iM, value);
if (value <= values[(bid) * M + iM]){
indices[(bid) * M + iM] = ll_t(index) + gStartx;
}
}
}
}
extern "C"
__global__ void min_bmm_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ values,
ll_t* __restrict__ indices,
int M, int N, int K, int DIM
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
#pragma unroll
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// Reduce along DIM
if (DIM == 1){
min_dim_1(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
} else if (DIM == 2){
min_dim_2(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
}
}
extern "C"
__global__ void min_bmm_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ values,
ll_t* __restrict__ indices,
int M, int N, int K, int DIM
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
#pragma unroll
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// Reduce along DIM
if (DIM == 1){
min_dim_1(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
} else if (DIM == 2){
min_dim_2(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
}
}
extern "C"
__global__ void min_bmm_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ values,
ll_t* __restrict__ indices,
int M, int N, int K, int DIM
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_nn(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_nn(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// Reduce along DIM
if (DIM == 1){
min_dim_1(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
} else if (DIM == 2){
min_dim_2(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
}
}
extern "C"
__global__ void min_bmm_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ values,
ll_t* __restrict__ indices,
int M, int N, int K, int DIM
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ _VOLATILE_ float aSmem[16][128+4];
__shared__ _VOLATILE_ float bSmem[16][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
#pragma unroll
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, 0,
M, N, K
);
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
buffer2smem_16_tt(
aSmem, bSmem,
aBuffer1, aBuffer2,
bBuffer1, bBuffer2
);
if (likely(itr < nIt - 1)){
load_ab_tt(
A, B,
aBuffer1, aBuffer2, bBuffer1, bBuffer2,
bid, gStartx, gStarty, gStartk + 16,
M, N, K
);
}
// synchroznie threads in order make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
thread_matmul_16_v3(aSmem, bSmem, cCache, vx, vy);
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// Reduce along DIM
if (DIM == 1){
min_dim_1(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
} else if (DIM == 2){
min_dim_2(
cCache, aSmem, bSmem, values, indices,
gStartx, gStarty, tid, bid, M, N);
}
}
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
namespace{
__device__ __forceinline__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__float_as_int(fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ __forceinline__ double atomicMin(double* address, double val)
{
unsigned long long int* address_as_i = (unsigned long long int*) address;
unsigned long long int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__double_as_longlong(fminf(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_face_frontside(const scalar_t *face) {
return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]);
}
template <typename scalar_t> struct point
{
public:
scalar_t x;
scalar_t y;
__host__ __device__ scalar_t dot(point<scalar_t> p)
{
return this->x * p.x + this->y * p.y;
};
__host__ __device__ point<scalar_t> operator-(point<scalar_t>& p)
{
point<scalar_t> np;
np.x = this->x - p.x;
np.y = this->y - p.y;
return np;
};
__host__ __device__ point<scalar_t> operator+(point<scalar_t>& p)
{
point<scalar_t> np;
np.x = this->x + p.x;
np.y = this->y + p.y;
return np;
};
__host__ __device__ point<scalar_t> operator*(scalar_t s)
{
point<scalar_t> np;
np.x = s * this->x;
np.y = s * this->y;
return np;
};
};
template <typename scalar_t>
__device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) {
return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0;
}
template <typename scalar_t>
__device__ __forceinline__ void barycentric_weight(scalar_t *w, point<scalar_t> p, point<scalar_t> p0, point<scalar_t> p1, point<scalar_t> p2) {
// vectors
point<scalar_t> v0, v1, v2;
scalar_t s = p.dot(p);
v0 = p2 - p0;
v1 = p1 - p0;
v2 = p - p0;
// dot products
scalar_t dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0)
scalar_t dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1)
scalar_t dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2)
scalar_t dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1)
scalar_t dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2)
// barycentric coordinates
scalar_t inverDeno;
if(dot00*dot11 - dot01*dot01 == 0)
inverDeno = 0;
else
inverDeno = 1/(dot00*dot11 - dot01*dot01);
scalar_t u = (dot11*dot02 - dot01*dot12)*inverDeno;
scalar_t v = (dot00*dot12 - dot01*dot02)*inverDeno;
// weight
w[0] = 1 - u - v;
w[1] = v;
w[2] = u;
}
template <typename scalar_t>
__global__ void forward_rasterize_cuda_kernel(
const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3]
scalar_t* depth_buffer,
int* triangle_buffer,
scalar_t* baryw_buffer,
int batch_size, int h, int w,
int ntri) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * ntri) {
return;
}
int bn = i/ntri;
const scalar_t* face = &face_vertices[i * 9];
scalar_t bw[3];
point<scalar_t> p0, p1, p2, p;
p0.x = face[0]; p0.y=face[1];
p1.x = face[3]; p1.y=face[4];
p2.x = face[6]; p2.y=face[7];
int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
for(int y = y_min; y <= y_max; y++) //h
{
for(int x = x_min; x <= x_max; x++) //w
{
p.x = x; p.y = y;
barycentric_weight(bw, p, p0, p1, p2);
if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face))
// if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0))
{
// const
scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]);
// printf("%f %f %f \n", (float)zp, (float)face[2], (float)bw[2]);
atomicMin(&depth_buffer[bn*h*w + y*w + x], zp);
if(depth_buffer[bn*h*w + y*w + x] == zp)
{
// depth_min = zp;
// atomic long long for two int
// scalar_t tri_ind = i%ntri;
// atomicMin(&depth_buffer[bn*h*w + y*w + x], zp);
triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri);
for(int k=0; k<3; k++){
baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k];
}
// buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth;
}
}
}
}
}
template <typename scalar_t>
__global__ void forward_rasterize_colors_cuda_kernel(
const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3]
const scalar_t* __restrict__ face_colors, //[bz, nf, 3, 3]
scalar_t* depth_buffer,
int* triangle_buffer,
scalar_t* images,
int batch_size, int h, int w,
int ntri) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * ntri) {
return;
}
int bn = i/ntri;
const scalar_t* face = &face_vertices[i * 9];
const scalar_t* color = &face_colors[i * 9];
scalar_t bw[3];
point<scalar_t> p0, p1, p2, p;
p0.x = face[0]; p0.y=face[1];
p1.x = face[3]; p1.y=face[4];
p2.x = face[6]; p2.y=face[7];
scalar_t cl[3][3];
for (int num = 0; num < 3; num++) {
for (int dim = 0; dim < 3; dim++) {
cl[num][dim] = color[3 * num + dim]; //[3p,3rgb]
}
}
int x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
int x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
int y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
int y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
for(int y = y_min; y <= y_max; y++) //h
{
for(int x = x_min; x <= x_max; x++) //w
{
p.x = x; p.y = y;
barycentric_weight(bw, p, p0, p1, p2);
if(((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0)) && check_face_frontside(face))
// if((bw[2] >= 0) && (bw[1] >= 0) && (bw[0]>0))
{
scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]);
atomicMin(&depth_buffer[bn*h*w + y*w + x], zp);
if(depth_buffer[bn*h*w + y*w + x] == zp)
{
// depth_min = zp;
// atomic long long for two int
// scalar_t tri_ind = i%ntri;
// atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp);
// atomicMin(&depth_buffer[bn*h*w + y*w + x], zp);
triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri);
for(int k=0; k<3; k++){
// baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k];
images[bn*h*w*3 + y*w*3 + x*3 + k] = bw[0]*cl[0][k] + bw[1]*cl[1][k] + bw[2]*cl[2][k];
}
// buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth;
}
}
}
}
}
}
std::vector<at::Tensor> forward_rasterize_cuda(
at::Tensor face_vertices,
at::Tensor depth_buffer,
at::Tensor triangle_buffer,
at::Tensor baryw_buffer,
int h,
int w){
const auto batch_size = face_vertices.size(0);
const auto ntri = face_vertices.size(1);
// print(channel_size)
const int threads = 512;
const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda1", ([&] {
forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
face_vertices.data<scalar_t>(),
depth_buffer.data<scalar_t>(),
triangle_buffer.data<int>(),
baryw_buffer.data<scalar_t>(),
batch_size, h, w,
ntri);
}));
AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda2", ([&] {
forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
face_vertices.data<scalar_t>(),
depth_buffer.data<scalar_t>(),
triangle_buffer.data<int>(),
baryw_buffer.data<scalar_t>(),
batch_size, h, w,
ntri);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err));
return {depth_buffer, triangle_buffer, baryw_buffer};
}
std::vector<at::Tensor> forward_rasterize_colors_cuda(
at::Tensor face_vertices,
at::Tensor face_colors,
at::Tensor depth_buffer,
at::Tensor triangle_buffer,
at::Tensor images,
int h,
int w){
const auto batch_size = face_vertices.size(0);
const auto ntri = face_vertices.size(1);
// print(channel_size)
const int threads = 512;
const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1);
//initial
AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] {
forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
face_vertices.data<scalar_t>(),
face_colors.data<scalar_t>(),
depth_buffer.data<scalar_t>(),
triangle_buffer.data<int>(),
images.data<scalar_t>(),
batch_size, h, w,
ntri);
}));
AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] {
forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
face_vertices.data<scalar_t>(),
face_colors.data<scalar_t>(),
depth_buffer.data<scalar_t>(),
triangle_buffer.data<int>(),
images.data<scalar_t>(),
batch_size, h, w,
ntri);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err));
return {depth_buffer, triangle_buffer, images};
}
|
the_stack
|
#include "flowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void FlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
const scalar_t* __restrict__ count,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
}
}
return ;
}
int FlowProjection_gpu_forward_kernel(
cudaStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_forward_kernelfunc", ([&] {
FlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjectionAveraging_kernelfunc", ([&] {
FlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(cudaGetLastError());
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowFillhole_kernelfunc", ([&] {
FlowFillhole_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int FlowProjection_gpu_backward_kernel(
cudaStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& gradoutput,
at::Tensor& gradinput1
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "FlowProjection_gpu_backward_kernelfunc", ([&] {
FlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),
count.data<scalar_t>(),
gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
}
|
the_stack
|
///////////////////////////////////////////////////////////////////////////////////
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
#define __LDG_PTR "l"
#else
#define __LDG_PTR "r"
#endif
#include "cuda_helper.h"
//typedef __device_builtin__ struct ulong16 ulong16;
typedef struct __align__(32) uint8
{
unsigned int s0, s1, s2, s3, s4, s5, s6, s7;
} uint8;
typedef struct __align__(64) ulonglong2to8
{
ulonglong2 l0, l1, l2, l3;
} ulonglong2to8;
typedef struct __align__(128) ulonglong8to16
{
ulonglong2to8 lo, hi;
} ulonglong8to16;
typedef struct __align__(256) ulonglong16to32
{
ulonglong8to16 lo, hi;
} ulonglong16to32;
typedef struct __align__(512) ulonglong32to64
{
ulonglong16to32 lo, hi;
} ulonglong32to64;
typedef struct __align__(1024) ulonglonglong
{
ulonglong8to16 s0, s1, s2, s3, s4, s5, s6, s7;
} ulonglonglong;
typedef struct __align__(64) uint16
{
union
{
struct
{
unsigned int s0, s1, s2, s3, s4, s5, s6, s7;
};
uint8 lo;
};
union
{
struct
{
unsigned int s8, s9, sa, sb, sc, sd, se, sf;
};
uint8 hi;
};
} uint16;
typedef struct __align__(128) uint32
{
uint16 lo, hi;
} uint32;
struct __align__(128) ulong8
{
ulonglong4 s0, s1, s2, s3;
};
typedef __device_builtin__ struct ulong8 ulong8;
typedef struct __align__(256) ulonglong16
{
ulonglong2 s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sa, sb, sc, sd, se, sf;
} ulonglong16;
typedef struct __align__(32) uint48
{
uint4 s0, s1;
} uint48;
typedef struct __align__(64) uint816
{
uint48 s0, s1;
} uint816;
typedef struct __align__(128) uint1632
{
uint816 s0, s1;
} uint1632;
typedef struct __align__(256) uintx64
{
uint1632 s0, s1;
} uintx64;
typedef struct __align__(512) uintx128
{
uintx64 s0, s1;
} uintx128;
typedef struct __align__(1024) uintx256
{
uintx128 s0, s1;
} uintx256;
typedef struct __align__(256) uint4x16
{
uint4 s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15;
} uint4x16;
static __inline__ __device__ ulonglong2to8 make_ulonglong2to8(ulonglong2 s0, ulonglong2 s1, ulonglong2 s2, ulonglong2 s3)
{
ulonglong2to8 t; t.l0 = s0; t.l1 = s1; t.l2 = s2; t.l3 = s3;
return t;
}
static __inline__ __device__ ulonglong8to16 make_ulonglong8to16(const ulonglong2to8 &s0, const ulonglong2to8 &s1)
{
ulonglong8to16 t; t.lo = s0; t.hi = s1;
return t;
}
static __inline__ __device__ ulonglong16to32 make_ulonglong16to32(const ulonglong8to16 &s0, const ulonglong8to16 &s1)
{
ulonglong16to32 t; t.lo = s0; t.hi = s1;
return t;
}
static __inline__ __device__ ulonglong32to64 make_ulonglong32to64(const ulonglong16to32 &s0, const ulonglong16to32 &s1)
{
ulonglong32to64 t; t.lo = s0; t.hi = s1;
return t;
}
static __inline__ __host__ __device__ ulonglonglong make_ulonglonglong(
const ulonglong8to16 &s0, const ulonglong8to16 &s1, const ulonglong8to16 &s2, const ulonglong8to16 &s3,
const ulonglong8to16 &s4, const ulonglong8to16 &s5, const ulonglong8to16 &s6, const ulonglong8to16 &s7)
{
ulonglonglong t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
return t;
}
static __inline__ __device__ uint48 make_uint48(uint4 s0, uint4 s1)
{
uint48 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uint816 make_uint816(const uint48 &s0, const uint48 &s1)
{
uint816 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uint1632 make_uint1632(const uint816 &s0, const uint816 &s1)
{
uint1632 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx64 make_uintx64(const uint1632 &s0, const uint1632 &s1)
{
uintx64 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx128 make_uintx128(const uintx64 &s0, const uintx64 &s1)
{
uintx128 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx256 make_uintx256(const uintx128 &s0, const uintx128 &s1)
{
uintx256 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __device__ uintx256 make_uintx64(const uintx128 &s0, const uintx128 &s1)
{
uintx256 t; t.s0 = s0; t.s1 = s1;
return t;
}
static __inline__ __host__ __device__ uint4x16 make_uint4x16(
uint4 s0, uint4 s1, uint4 s2, uint4 s3, uint4 s4, uint4 s5, uint4 s6, uint4 s7,
uint4 s8, uint4 s9, uint4 sa, uint4 sb, uint4 sc, uint4 sd, uint4 se, uint4 sf)
{
uint4x16 t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
t.s8 = s8; t.s9 = s9; t.s10 = sa; t.s11 = sb; t.s12 = sc; t.s13 = sd; t.s14 = se; t.s15 = sf;
return t;
}
static __inline__ __host__ __device__ uint16 make_uint16(
unsigned int s0, unsigned int s1, unsigned int s2, unsigned int s3, unsigned int s4, unsigned int s5, unsigned int s6, unsigned int s7,
unsigned int s8, unsigned int s9, unsigned int sa, unsigned int sb, unsigned int sc, unsigned int sd, unsigned int se, unsigned int sf)
{
uint16 t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
t.s8 = s8; t.s9 = s9; t.sa = sa; t.sb = sb; t.sc = sc; t.sd = sd; t.se = se; t.sf = sf;
return t;
}
static __inline__ __host__ __device__ uint16 make_uint16(const uint8 &a, const uint8 &b)
{
uint16 t; t.lo = a; t.hi = b; return t;
}
static __inline__ __host__ __device__ uint32 make_uint32(const uint16 &a, const uint16 &b)
{
uint32 t; t.lo = a; t.hi = b; return t;
}
static __inline__ __host__ __device__ uint8 make_uint8(
unsigned int s0, unsigned int s1, unsigned int s2, unsigned int s3, unsigned int s4, unsigned int s5, unsigned int s6, unsigned int s7)
{
uint8 t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
return t;
}
static __inline__ __host__ __device__ ulonglong16 make_ulonglong16(const ulonglong2 &s0, const ulonglong2 &s1,
const ulonglong2 &s2, const ulonglong2 &s3, const ulonglong2 &s4, const ulonglong2 &s5, const ulonglong2 &s6, const ulonglong2 &s7,
const ulonglong2 &s8, const ulonglong2 &s9,
const ulonglong2 &sa, const ulonglong2 &sb, const ulonglong2 &sc, const ulonglong2 &sd, const ulonglong2 &se, const ulonglong2 &sf
)
{
ulonglong16 t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3; t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
t.s8 = s8; t.s9 = s9; t.sa = sa; t.sb = sb; t.sc = sc; t.sd = sd; t.se = se; t.sf = sf;
return t;
}
static __inline__ __host__ __device__ ulong8 make_ulong8(
ulonglong4 s0, ulonglong4 s1, ulonglong4 s2, ulonglong4 s3)
{
ulong8 t; t.s0 = s0; t.s1 = s1; t.s2 = s2; t.s3 = s3;// t.s4 = s4; t.s5 = s5; t.s6 = s6; t.s7 = s7;
return t;
}
/*
static __forceinline__ __device__ uchar4 operator^ (uchar4 a, uchar4 b)
{
return make_uchar4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
static __forceinline__ __device__ uchar4 operator+ (uchar4 a, uchar4 b)
{
return make_uchar4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
static __forceinline__ __device__ uint4 operator^ (uint4 a, uint4 b)
{
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
static __forceinline__ __device__ uint4 operator+ (uint4 a, uint4 b)
{
return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
*/
static __forceinline__ __device__ ulonglong4 operator^ (ulonglong4 a, ulonglong4 b)
{
return make_ulonglong4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
static __forceinline__ __device__ ulonglong4 operator+ (ulonglong4 a, ulonglong4 b)
{
return make_ulonglong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
static __forceinline__ __device__ ulonglong2 operator^ (ulonglong2 a, ulonglong2 b)
{
return make_ulonglong2(a.x ^ b.x, a.y ^ b.y);
}
static __forceinline__ __device__ ulonglong2 operator+ (ulonglong2 a, ulonglong2 b)
{
return make_ulonglong2(a.x + b.x, a.y + b.y);
}
static __forceinline__ __device__ ulong8 operator^ (const ulong8 &a, const ulong8 &b)
{
return make_ulong8(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3);
} //, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7); }
static __forceinline__ __device__ ulong8 operator+ (const ulong8 &a, const ulong8 &b)
{
return make_ulong8(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3);
} //, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7); }
static __forceinline__ __device__ __host__ uint8 operator^ (const uint8 &a, const uint8 &b)
{
return make_uint8(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7);
}
static __forceinline__ __device__ __host__ uint8 operator+ (const uint8 &a, const uint8 &b)
{
return make_uint8(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7);
}
////////////// mess++ //////
static __forceinline__ __device__ uint48 operator^ (const uint48 &a, const uint48 &b)
{
return make_uint48(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
static __forceinline__ __device__ uint816 operator^ (const uint816 &a, const uint816 &b)
{
return make_uint816(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
static __forceinline__ __device__ uint1632 operator^ (const uint1632 &a, const uint1632 &b)
{
return make_uint1632(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
static __forceinline__ __device__ uintx64 operator^ (const uintx64 &a, const uintx64 &b)
{
return make_uintx64(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
static __forceinline__ __device__ uintx128 operator^ (const uintx128 &a, const uintx128 &b)
{
return make_uintx128(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
static __forceinline__ __device__ uintx256 operator^ (const uintx256 &a, const uintx256 &b)
{
return make_uintx256(a.s0 ^ b.s0, a.s1 ^ b.s1);
}
/////////////////////////
static __forceinline__ __device__ __host__ uint16 operator^ (const uint16 &a, const uint16 &b)
{
return make_uint16(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7,
a.s8 ^ b.s8, a.s9 ^ b.s9, a.sa ^ b.sa, a.sb ^ b.sb, a.sc ^ b.sc, a.sd ^ b.sd, a.se ^ b.se, a.sf ^ b.sf);
}
static __forceinline__ __device__ __host__ uint16 operator+ (const uint16 &a, const uint16 &b)
{
return make_uint16(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7,
a.s8 + b.s8, a.s9 + b.s9, a.sa + b.sa, a.sb + b.sb, a.sc + b.sc, a.sd + b.sd, a.se + b.se, a.sf + b.sf);
}
static __forceinline__ __device__ uint32 operator^ (const uint32 &a, const uint32 &b)
{
return make_uint32(a.lo ^ b.lo, a.hi ^ b.hi);
}
static __forceinline__ __device__ uint32 operator+ (const uint32 &a, const uint32 &b)
{
return make_uint32(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__ ulonglong16 operator^ (const ulonglong16 &a, const ulonglong16 &b)
{
return make_ulonglong16(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7,
a.s8 ^ b.s8, a.s9 ^ b.s9, a.sa ^ b.sa, a.sb ^ b.sb, a.sc ^ b.sc, a.sd ^ b.sd, a.se ^ b.se, a.sf ^ b.sf
);
}
static __forceinline__ __device__ ulonglong16 operator+ (const ulonglong16 &a, const ulonglong16 &b)
{
return make_ulonglong16(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7,
a.s8 + b.s8, a.s9 + b.s9, a.sa + b.sa, a.sb + b.sb, a.sc + b.sc, a.sd + b.sd, a.se + b.se, a.sf + b.sf
);
}
static __forceinline__ __device__ void operator^= (ulong8 &a, const ulong8 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uintx64 &a, const uintx64 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uintx128 &a, const uintx128 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uintx256 &a, const uintx256 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uint816 &a, const uint816 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uint48 &a, const uint48 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uint32 &a, const uint32 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (uint32 &a, const uint32 &b)
{
a = a + b;
}
/*
static __forceinline__ __device__ void operator^= (uint4 &a, uint4 b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (uchar4 &a, uchar4 b)
{
a = a ^ b;
}
*/
static __forceinline__ __device__ __host__ void operator^= (uint8 &a, const uint8 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ __host__ void operator^= (uint16 &a, const uint16 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (ulonglong16 &a, const ulonglong16 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (ulonglong4 &a, const ulonglong4 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator^= (ulonglong2 &a, const ulonglong2 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (ulonglong2 &a, const ulonglong2 &b)
{
a = a + b;
}
static __forceinline__ __device__
ulonglong2to8 operator^ (const ulonglong2to8 &a, const ulonglong2to8 &b)
{
return make_ulonglong2to8(a.l0 ^ b.l0, a.l1 ^ b.l1, a.l2 ^ b.l2, a.l3 ^ b.l3);
}
static __forceinline__ __device__
ulonglong2to8 operator+ (const ulonglong2to8 &a, const ulonglong2to8 &b)
{
return make_ulonglong2to8(a.l0 + b.l0, a.l1 + b.l1, a.l2 + b.l2, a.l3 + b.l3);
}
static __forceinline__ __device__
ulonglong8to16 operator^ (const ulonglong8to16 &a, const ulonglong8to16 &b)
{
return make_ulonglong8to16(a.lo ^ b.lo, a.hi ^ b.hi);
}
static __forceinline__ __device__
ulonglong8to16 operator+ (const ulonglong8to16 &a, const ulonglong8to16 &b)
{
return make_ulonglong8to16(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__
ulonglong16to32 operator^ (const ulonglong16to32 &a, const ulonglong16to32 &b)
{
return make_ulonglong16to32(a.lo ^ b.lo, a.hi ^ b.hi);
}
static __forceinline__ __device__
ulonglong16to32 operator+ (const ulonglong16to32 &a, const ulonglong16to32 &b)
{
return make_ulonglong16to32(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__
ulonglong32to64 operator^ (const ulonglong32to64 &a, const ulonglong32to64 &b)
{
return make_ulonglong32to64(a.lo ^ b.lo, a.hi ^ b.hi);
}
static __forceinline__ __device__
ulonglong32to64 operator+ (const ulonglong32to64 &a, const ulonglong32to64 &b)
{
return make_ulonglong32to64(a.lo + b.lo, a.hi + b.hi);
}
static __forceinline__ __device__ ulonglonglong operator^ (const ulonglonglong &a, const ulonglonglong &b)
{
return make_ulonglonglong(a.s0 ^ b.s0, a.s1 ^ b.s1, a.s2 ^ b.s2, a.s3 ^ b.s3, a.s4 ^ b.s4, a.s5 ^ b.s5, a.s6 ^ b.s6, a.s7 ^ b.s7);
}
static __forceinline__ __device__ ulonglonglong operator+ (const ulonglonglong &a, const ulonglonglong &b)
{
return make_ulonglonglong(a.s0 + b.s0, a.s1 + b.s1, a.s2 + b.s2, a.s3 + b.s3, a.s4 + b.s4, a.s5 + b.s5, a.s6 + b.s6, a.s7 + b.s7);
}
static __forceinline__ __device__ void operator^= (ulonglong2to8 &a, const ulonglong2to8 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (uint4 &a, uint4 b)
{
a = a + b;
}
static __forceinline__ __device__ void operator+= (uchar4 &a, uchar4 b)
{
a = a + b;
}
static __forceinline__ __device__ __host__ void operator+= (uint8 &a, const uint8 &b)
{
a = a + b;
}
static __forceinline__ __device__ __host__ void operator+= (uint16 &a, const uint16 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator+= (ulong8 &a, const ulong8 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator+= (ulonglong16 &a, const ulonglong16 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator+= (ulonglong8to16 &a, const ulonglong8to16 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator^= (ulonglong8to16 &a, const ulonglong8to16 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (ulonglong16to32 &a, const ulonglong16to32 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator^= (ulonglong16to32 &a, const ulonglong16to32 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (ulonglong32to64 &a, const ulonglong32to64 &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator^= (ulonglong32to64 &a, const ulonglong32to64 &b)
{
a = a ^ b;
}
static __forceinline__ __device__ void operator+= (ulonglonglong &a, const ulonglonglong &b)
{
a = a + b;
}
static __forceinline__ __device__ void operator^= (ulonglonglong &a, const ulonglonglong &b)
{
a = a ^ b;
}
#if __CUDA_ARCH__ < 320
#define rotateL ROTL32
#define rotateR ROTR32
#else
static __forceinline__ __device__ uint32_t rotateL(uint32_t vec4, uint32_t shift)
{
uint32_t ret;
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(vec4), "r"(vec4), "r"(shift));
return ret;
}
static __forceinline__ __device__ uint32_t rotateR(uint32_t vec4, uint32_t shift)
{
uint32_t ret;
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(vec4), "r"(vec4), "r"(shift));
return ret;
}
#endif
// same for SM 3.5+, really faster ?
__device__ static void shift256R(uint32_t* ret, const uint8 &vec4, uint32_t shift)
{
uint32_t truc = 0, truc2 = cuda_swab32(vec4.s7), truc3 = 0;
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[8] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s6);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[7] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s5);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[6] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s4);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[5] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s3);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[4] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s2);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[3] = cuda_swab32(truc);
truc2 = cuda_swab32(vec4.s1);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc3), "r"(truc2), "r"(shift));
ret[2] = cuda_swab32(truc);
truc3 = cuda_swab32(vec4.s0);
asm("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(truc) : "r"(truc2), "r"(truc3), "r"(shift));
ret[1] = cuda_swab32(truc);
asm("shr.b32 %0, %1, %2;" : "=r"(truc) : "r"(truc3), "r"(shift));
ret[0] = cuda_swab32(truc);
}
// complicated way to copy 256 bytes ;)
static __device__ __inline__ uintx64 ldg256(const uint4 *ptr)
{
uintx64 ret;
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.s0.s0.s0.s0.x), "=r"(ret.s0.s0.s0.s0.y), "=r"(ret.s0.s0.s0.s0.z), "=r"(ret.s0.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+16];" : "=r"(ret.s0.s0.s0.s1.x), "=r"(ret.s0.s0.s0.s1.y), "=r"(ret.s0.s0.s0.s1.z), "=r"(ret.s0.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+32];" : "=r"(ret.s0.s0.s1.s0.x), "=r"(ret.s0.s0.s1.s0.y), "=r"(ret.s0.s0.s1.s0.z), "=r"(ret.s0.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+48];" : "=r"(ret.s0.s0.s1.s1.x), "=r"(ret.s0.s0.s1.s1.y), "=r"(ret.s0.s0.s1.s1.z), "=r"(ret.s0.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+64];" : "=r"(ret.s0.s1.s0.s0.x), "=r"(ret.s0.s1.s0.s0.y), "=r"(ret.s0.s1.s0.s0.z), "=r"(ret.s0.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+80];" : "=r"(ret.s0.s1.s0.s1.x), "=r"(ret.s0.s1.s0.s1.y), "=r"(ret.s0.s1.s0.s1.z), "=r"(ret.s0.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+96];" : "=r"(ret.s0.s1.s1.s0.x), "=r"(ret.s0.s1.s1.s0.y), "=r"(ret.s0.s1.s1.s0.z), "=r"(ret.s0.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+112];" : "=r"(ret.s0.s1.s1.s1.x), "=r"(ret.s0.s1.s1.s1.y), "=r"(ret.s0.s1.s1.s1.z), "=r"(ret.s0.s1.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+128];" : "=r"(ret.s1.s0.s0.s0.x), "=r"(ret.s1.s0.s0.s0.y), "=r"(ret.s1.s0.s0.s0.z), "=r"(ret.s1.s0.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+144];" : "=r"(ret.s1.s0.s0.s1.x), "=r"(ret.s1.s0.s0.s1.y), "=r"(ret.s1.s0.s0.s1.z), "=r"(ret.s1.s0.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+160];" : "=r"(ret.s1.s0.s1.s0.x), "=r"(ret.s1.s0.s1.s0.y), "=r"(ret.s1.s0.s1.s0.z), "=r"(ret.s1.s0.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+176];" : "=r"(ret.s1.s0.s1.s1.x), "=r"(ret.s1.s0.s1.s1.y), "=r"(ret.s1.s0.s1.s1.z), "=r"(ret.s1.s0.s1.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+192];" : "=r"(ret.s1.s1.s0.s0.x), "=r"(ret.s1.s1.s0.s0.y), "=r"(ret.s1.s1.s0.s0.z), "=r"(ret.s1.s1.s0.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+208];" : "=r"(ret.s1.s1.s0.s1.x), "=r"(ret.s1.s1.s0.s1.y), "=r"(ret.s1.s1.s0.s1.z), "=r"(ret.s1.s1.s0.s1.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+224];" : "=r"(ret.s1.s1.s1.s0.x), "=r"(ret.s1.s1.s1.s0.y), "=r"(ret.s1.s1.s1.s0.z), "=r"(ret.s1.s1.s1.s0.w) : __LDG_PTR(ptr));
asm("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4+240];" : "=r"(ret.s1.s1.s1.s1.x), "=r"(ret.s1.s1.s1.s1.y), "=r"(ret.s1.s1.s1.s1.z), "=r"(ret.s1.s1.s1.s1.w) : __LDG_PTR(ptr));
return ret;
}
#endif // #ifndef CUDA_VECTOR_H
|
the_stack
|
FEMSolver::FEMSolver(
std::string fname, bool isTetMesh, bool verbose) :
verbose_(verbose), // output verbosity
filename_(fname), // mesh file name
maxLevels_(100), // the maximum number of levels
maxIters_(100), // the maximum solve iterations
preInnerIters_(5), // the pre inner iterations for GSINNER
postInnerIters_(5), // the post inner iterations for GSINNER
postRelaxes_(1), // The number of post relax iterations
cycleIters_(1), // The number of CG iterations per outer iteration
dsType_(0), // Data Structure Type
topSize_(256), // the Max size of coarsest level
randMisParameters_(90102), // the Max size of coarsest level
partitionMaxSize_(512), // the largest partition size (use getMaxThreads() to determine for your device)
aggregatorType_(0), // aggregator oldMis (0), metis bottom up (1),
// metis top down (2), aggMisGPU (3), aggMisCPU (4), newMisLight (5)
convergeType_(0), // Convergence tolerance algo [ABSOLUTE_CONVERGENCE (0), RELATIVE_CONVERGENCE (1)]
tolerance_(1e-6), // the convergence tolerance
cycleType_(0), // set the cycle algorithm
solverType_(0), // the solving algorithm [AMG_SOLVER (0),PCG_SOLVER (1)]
smootherWeight_(1.0), // the weight parameter used in a smoother
proOmega_(0.67), // the weight parameter used in a prolongator smoother
device_(0), // the device number to run on
blockSize_(256), // maximum size of a block
tetMesh_(NULL), // the tetmesh pointer
triMesh_(NULL) // the trimesh pointer
{
if (isTetMesh) {
this->tetMesh_ = TetMesh::read((this->filename_ + ".node").c_str(),
(this->filename_ + ".ele").c_str(), verbose);
} else {
TriMesh::verbose = verbose;
this->triMesh_ = TriMesh::read(this->filename_.c_str());
}
this->getMatrixFromMesh();
}
FEMSolver::~FEMSolver() {
if (this->tetMesh_ != NULL)
delete this->tetMesh_;
if (this->triMesh_ != NULL)
delete this->triMesh_;
}
/**
* Creates the mesh, partitions the mesh, and runs the algorithm.
*
* @data The set of options for the Eikonal algorithm.
* The defaults are used if nothing is provided.
*/
void FEMSolver::solveFEM(
Vector_h_CG* x_h, Vector_h_CG* b_h) {
this->checkMatrixForValidContents(&this->A_h_);
clock_t starttime, endtime;
starttime = clock();
Matrix_ell_d_CG A_device(this->A_h_);
//copy back to the host
cudaThreadSynchronize();
//register configuration parameters
AMG<Matrix_h, Vector_h> amg(this->verbose_, this->convergeType_,
this->cycleType_, this->solverType_, this->tolerance_,
this->cycleIters_, this->maxIters_, this->maxLevels_,
this->topSize_, this->smootherWeight_, this->preInnerIters_,
this->postInnerIters_, this->postRelaxes_, this->dsType_,
this->randMisParameters_, this->partitionMaxSize_, this->proOmega_,
this->aggregatorType_, this->blockSize_,
this->triMesh_, this->tetMesh_);
//setup multi grid for solver
amg.setup(A_device);
//print info
if (this->verbose_)
amg.printGridStatistics();
//copy to device
Vector_d_CG x_d(*x_h);
Vector_d_CG b_d(*b_h);
//run solver
amg.solve(b_d, x_d);
//copy back to host
*x_h = Vector_h_CG(x_d);
*b_h = Vector_h_CG(b_d);
endtime = clock();
double duration = (double)(endtime - starttime) * 1000 / CLOCKS_PER_SEC;
if (this->verbose_)
printf("Computing time : %.10lf ms\n", duration);
}
size_t FEMSolver::getMatrixRows() {
return this->A_h_.num_rows;
}
bool FEMSolver::InitCUDA() {
int count = 0;
bool found = false;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for (int i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
found = true;
break;
}
}
}
if (!found) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaDeviceProp props;
cudaSafeCall(cudaSetDevice(this->device_));
cudaSafeCall(cudaGetDeviceProperties(&props, 0));
if (this->verbose_) {
printf("Device %d: \"%s\" with Compute %d.%d capability\n",
this->device_, props.name, props.major, props.minor);
printf("CUDA initialized.\n");
}
return true;
}
void FEMSolver::checkMatrixForValidContents(Matrix_ell_h* A_h) {
if (A_h->num_rows == 0) {
if (this->verbose_) {
printf("Error no matrix specified\n");
}
std::string error = "Error no matrix specified";
throw std::invalid_argument(error);
}
}
void FEMSolver::getMatrixFromMesh() {
if (this->triMesh_ == NULL && this->tetMesh_ == NULL)
exit(0);
Matrix_ell_d_CG A_device;
//assembly / generate matrix step
if (this->triMesh_ != NULL) {
this->triMesh_->set_verbose(this->verbose_);
this->triMesh_->need_neighbors();
this->triMesh_->need_meshquality();
//generate the unit constant mesh stiffness matrix
trimesh2ell<Matrix_ell_d_CG >(this->triMesh_, A_device);
// 2D fem solving object
FEM2D fem2d;
Vector_d_CG RHS(this->triMesh_->vertices.size(), 0.0);
fem2d.initializeWithTriMesh(this->triMesh_);
fem2d.assemble(this->triMesh_, A_device, RHS);
} else {
this->tetMesh_->set_verbose(this->verbose_);
this->tetMesh_->need_neighbors();
this->tetMesh_->need_meshquality();
//generate the unit constant mesh stiffness matrix
tetmesh2ell<Matrix_ell_d_CG >(this->tetMesh_, A_device, this->verbose_);
// 3D fem solving object
FEM3D fem3d;
Vector_d_CG RHS(this->tetMesh_->vertices.size(), 0.0);
fem3d.initializeWithTetMesh(this->tetMesh_);
fem3d.assemble(this->tetMesh_, A_device, RHS, true);
}
cudaThreadSynchronize();
this->A_h_ = Matrix_ell_h(A_device);
}
bool FEMSolver::compare_sparse_entry(SparseEntry_t a, SparseEntry_t b) {
return ((a.row_ != b.row_) ? (a.row_ < b.row_) : (a.col_ < b.col_));
}
int FEMSolver::readMatlabSparseMatrix(const std::string &filename) {
//read in the description header
std::ifstream in(filename.c_str(), std::ios::binary);
if (!in.is_open()) {
std::cerr << "could not open file: " << filename << std::endl;
return 1;
}
char buffer[256];
in.read(buffer, 128);
int32_t type;
in.read((char*)&type, 4);
if (type == 15) {
std::cerr << "Compression not supported. Save matlab data with '-v6' option." << std::endl;
in.close();
return 1;
} else if (type != 14) {
std::cerr << filename << " is not a matlab matrix." << std::endl;
in.close();
return 1;
}
//read in the array flags
uint32_t data_size;
in.read((char*)&data_size, 4);
in.read((char*)&type, 4);
if (type != 6 && type != 5) {
std::cerr << "Invalid type for sparse matrix. Must be 32bit." << std::endl;
in.close();
return 1;
}
int32_t byte_per_element;
in.read((char*)&byte_per_element, 4);
uint32_t mclass;
in.read((char*)&mclass, 4);
mclass &= 0x000000FF;
if (mclass != 5) {
std::cerr << "This is not a sparse matrix file." << std::endl;
in.close();
return 1;
}
uint32_t nzmax;
in.read((char*)&nzmax, 4);
//read in the dimensions and name
in.read((char*)&type, 4);
in.read((char*)&byte_per_element, 4);
if ((type != 6 && type != 5) || byte_per_element != 8) {
std::cerr << "Matrix of wrong dimension type or # of dimensions." << std::endl;
std::cerr << "Matrix must be 2 dimensions and of 32bit type." << std::endl;
in.close();
return 1;
}
int32_t x_dim, y_dim;
in.read((char*)&x_dim, 4);
in.read((char*)&y_dim, 4);
//Array name
uint32_t arrayName_type = 0;
uint32_t arrayName_length = 0;
uint8_t byteAlignmentForPadding = 4;
in.read((char*)&arrayName_type, 2);
in.read((char*)&arrayName_length, 2);
//If next 16-bits are zero, then MAT file is not using the small data
// element format for storing array name
if (arrayName_length == 0) {
in.read((char*)&arrayName_length, 4);
byteAlignmentForPadding = 8;
}
if (arrayName_type != 1 && arrayName_type != 2) {
std::cerr << "WARNING: Invalid variable type (" << arrayName_type;
std::cerr << ") for array name characters (Must be 8-bit)." << std::endl;
in.close();
return -1;
}
//Account for padding of array name to match the 32-bit or 64-bit requirement,
// depending on the short or normal format for the array name format.
int lenRemainder = arrayName_length % byteAlignmentForPadding;
if (lenRemainder != 0)
arrayName_length = arrayName_length + byteAlignmentForPadding - lenRemainder;
in.read(buffer, arrayName_length); //Read the array name (ignore)
//read in the row indices
in.read((char*)&type, 4);
if (type != 6 && type != 5) {
std::cerr << "Invalid type row index for sparse matrix. Must be 32bit." << std::endl;
in.close();
return 1;
}
in.read((char*)&byte_per_element, 4);
std::vector<int32_t> row_vals(byte_per_element / 4, 0);
in.read(reinterpret_cast<char*>(row_vals.data()), byte_per_element);
//read in remaining bytes
in.read(buffer, byte_per_element % 8);
//read in the column indices
in.read((char*)&type, 4);
if (type != 6 && type != 5) {
std::cerr << "Invalid column index type for sparse matrix. Must be 32bit." << std::endl;
in.close();
return 1;
}
in.read((char*)&byte_per_element, 4);
std::vector<int32_t> col_vals(byte_per_element / 4, 0);
in.read(reinterpret_cast<char*>(col_vals.data()), byte_per_element);
//read in remaining bytes
in.read(buffer, byte_per_element % 8);
//read in the data values
in.read((char*)&type, 4);
if (type != 9) {
std::cerr << "Invalid value for sparse matrix. " <<
"Must be double float." << std::endl;
in.close();
return 1;
}
in.read((char*)&byte_per_element, 4);
std::vector<double> double_vals(byte_per_element / 8, 0);
in.read(reinterpret_cast<char*>(double_vals.data()), byte_per_element);
in.close();
std::vector<SparseEntry_t> sparse_entries;
int32_t num_entries = col_vals[y_dim];
sparse_entries.reserve(num_entries);
std::vector<int32_t> row_max;
row_max.resize(x_dim);
for (size_t i = 0; i < row_max.size(); i++)
row_max[i] = 0;
for (size_t i = 0; i < y_dim; i++) {
int32_t idx = col_vals[i];
int32_t idx_end = col_vals[i + 1] - 1;
int32_t col = static_cast<int32_t>(i);
for (int32_t j = idx; j <= idx_end; j++) {
row_max[row_vals[j]]++;
sparse_entries.push_back(
SparseEntry_t(row_vals[j], col,
static_cast<float>(double_vals[j])));
}
}
//now set up the ell matrix.
//sort the sparse entries
std::sort(sparse_entries.begin(), sparse_entries.end(), compare_sparse_entry);
//determine the max nonzeros per row
int32_t max_row = 0;
for (size_t i = 0; i < row_max.size(); i++)
max_row = max_row > row_max[i] ? max_row : row_max[i];
//set up the matrix
Matrix_ell_h A(x_dim, y_dim, num_entries, max_row);
//iterate through to add values.
// X is used to fill unused entries in the matrix
const int bad_entry = Matrix_ell_h::invalid_index;
int32_t current_row = 0, row_count = 0;
for (size_t i = 0; i < sparse_entries.size(); i++) {
A.column_indices(current_row, row_count) = sparse_entries[i].col_;
A.values(current_row, row_count) = sparse_entries[i].val_;
row_count++;
if (((i + 1 < sparse_entries.size()) && (current_row != sparse_entries[i + 1].row_))
|| (i + 1 == sparse_entries.size())) {
while (row_count < max_row) {
A.column_indices(current_row, row_count) = bad_entry;
A.values(current_row, row_count) = 0.f;
row_count++;
}
if (i + 1 < sparse_entries.size())
current_row = sparse_entries[i + 1].row_;
row_count = 0;
}
}
//zero out current entries.
Matrix_ell_h original(this->A_h_);
for (size_t ii = 0; ii < original.num_rows; ii++) {
size_t jj = 0;
size_t numNeighbor = ((this->triMesh_ == NULL) ?
(this->tetMesh_->neighbors[ii].size()) :
(this->triMesh_->neighbors[ii].size())) + 1;
while (jj < numNeighbor) {
original.values(ii, jj) = 1e-12f;
jj++;
}
}
//add custom entries to zeroed entries.
cusp::add(A, original, this->A_h_);
return 0;
}
int FEMSolver::readMatlabArray(const std::string &filename, Vector_h_CG* rhs) {
//read in the description header
std::ifstream in(filename.c_str(), std::ios::in | std::ios::binary);
if (!in.is_open()) {
std::cerr << "could not open file: " << filename << std::endl;
return -1;
}
char buffer[256];
in.read(buffer, 128);
int32_t type;
in.read((char*)&type, 4);
if (type == 15) {
std::cerr << "Compression not supported. Save matlab data with '-v6' option." << std::endl;
in.close();
return -1;
} else if (type != 14) {
std::cerr << filename << " is not a matlab matrix." << std::endl;
in.close();
return -1;
}
//read in the array flags
uint32_t data_size;
in.read((char*)&data_size, 4);
in.read((char*)&type, 4);
if (type != 6) {
std::cerr << "Invalid type for normal matrix. Must be double precision." << std::endl;
in.close();
return -1;
}
int32_t byte_per_element;
in.read((char*)&byte_per_element, 4);
uint32_t mclass;
in.read((char*)&mclass, 4);
mclass &= 0x000000FF;
if (mclass == 5) {
std::cerr << "This import routine is not for a sparse matrix file." << std::endl;
in.close();
return -1;
}
uint32_t nzmax;
in.read((char*)&nzmax, 4);
//read in the dimensions and name
in.read((char*)&type, 4);
in.read((char*)&byte_per_element, 4);
if ((type != 6 && type != 5) || byte_per_element != 8) {
std::cerr << "Matrix of wrong dimension type or # of dimensions." << std::endl;
std::cerr << "Matrix must be 2 dimensions and of 32bit type." << std::endl;
in.close();
return -1;
}
int32_t x_dim, y_dim;
in.read((char*)&x_dim, 4);
in.read((char*)&y_dim, 4);
//Array name
uint32_t arrayName_type = 0;
in.read((char*)&arrayName_type, 2);
if (arrayName_type != 1 && arrayName_type != 2) {
std::cerr << "WARNING: Invalid variable type (" << arrayName_type;
std::cerr << ") for array name characters (Must be 8-bit)." << std::endl;
in.close();
return -1;
}
uint32_t arrayName_length = 0;
in.read((char*)&arrayName_length, 2);
//Account for padding of array name to match 32-bit requirement
int lenRemainder = arrayName_length % 4;
if (lenRemainder != 0)
arrayName_length = arrayName_length + 4 - lenRemainder;
in.read(buffer, arrayName_length); //Read the array name (ignore)
//Data type in array field
in.read((char*)&type, 4);
if (type != 9) {
std::cerr << "Matrix data type must be miDOUBLE (type is ";
std::cerr << type << ")." << std::endl;
in.close();
return -1;
}
//Length of array field
uint32_t arrayData_length;
in.read((char*)&arrayData_length, 4);
std::vector<double> double_vals(arrayData_length / 8, 0);
in.read(reinterpret_cast<char*>(double_vals.data()), arrayData_length);
in.close();
rhs->clear();
for (int j = 0; j < double_vals.size(); j++) {
rhs->push_back(double_vals[j]);
}
return 0;
}
int FEMSolver::writeMatlabArray(const std::string &filename, const Vector_h_CG &array) {
//read in the description header
std::ofstream file(filename.c_str(), std::ios::out | std::ios::binary);
//write description
std::string desc = "MATLAB 5.0 MAT-file, Platform: GLNXA64, Created by SCI-Solver_FEM.";
desc.resize(116, ' ');
file.write((char*)desc.c_str(), desc.length());
//write offset
char zeros[32] = { 0 };
file.write(zeros, 8);
int16_t version = 0x0100;
file.write((char*)&version, sizeof(int16_t));
//write endian
char endian[2] = { 'I', 'M' };
file.write(endian, sizeof(int16_t));
//write the matrix header and size.
int32_t type = 14;
file.write((char*)&type, sizeof(int32_t));
int32_t totalSize = 0;
long sizeAddress = (long)file.tellp();
file.write((char*)&totalSize, sizeof(int32_t));
long startAddress = (long)file.tellp();
//write the array flags.
int32_t flagsType = 6;
int32_t flagsSize = 8;
file.write((char*)&flagsType, sizeof(int32_t));
file.write((char*)&flagsSize, sizeof(int32_t));
//write the class
uint32_t mclass = 6;
file.write((char*)&mclass, sizeof(int32_t));
file.write(zeros, 4);
//write dimensions
int32_t dimensionsType = 5;
int32_t dimensionsSize = 8;
int32_t dim_x = (int32_t)array.size();
int32_t dim_y = 1;
file.write((char*)&dimensionsType, sizeof(int32_t));
file.write((char*)&dimensionsSize, sizeof(int32_t));
file.write((char*)&dim_x, sizeof(int32_t));
file.write((char*)&dim_y, sizeof(int32_t));
//write array name
int8_t arrayName[8] = { 'x', '_', 'h', '\0' };
int16_t arrayNameType = 1;
int16_t arrayNameSize = 3;
file.write((char*)&arrayNameType, sizeof(int16_t));
file.write((char*)&arrayNameSize, sizeof(int16_t));
file.write((char*)arrayName, 4 * sizeof(int8_t));
//write the real data header
int32_t arrayType = 9;
int32_t arraySize = dim_x * 8;
file.write((char*)&arrayType, sizeof(int32_t));
file.write((char*)&arraySize, sizeof(int32_t));
//finally write the data.
for (size_t i = 0; i < array.size(); i++) {
double val = static_cast <double> (array[i]);
file.write((char*)&val, sizeof(double));
}
//now write back the size to the main header.
long endAddress = (long)file.tellp();
totalSize = endAddress - startAddress;
file.seekp(sizeAddress);
file.write((char*)&totalSize, sizeof(int32_t));
file.close();
return 0;
}
void FEMSolver::writeVTK(std::vector <double> values, std::string fname)
{
if (this->tetMesh_ != NULL) {
int nv = this->tetMesh_->vertices.size();
int nt = this->tetMesh_->tets.size();
FILE* vtkfile;
vtkfile = fopen((fname + ".vtk").c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (int i = 0; i < nv; i++) {
fprintf(vtkfile, "%.12f %.12f %.12f\n",
this->tetMesh_->vertices[i][0],
this->tetMesh_->vertices[i][1],
this->tetMesh_->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 5);
for (int i = 0; i < nt; i++) {
fprintf(vtkfile, "4 %d %d %d %d\n",
this->tetMesh_->tets[i][0],
this->tetMesh_->tets[i][1],
this->tetMesh_->tets[i][2],
this->tetMesh_->tets[i][3]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (int i = 0; i < nt; i++) {
fprintf(vtkfile, "10\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n",
nv, values.size());
for (int i = 0; i < values.size(); i++) {
fprintf(vtkfile, "%.12f\n ", values[i]);
}
fclose(vtkfile);
} else if (this->triMesh_ != NULL) {
size_t nv = this->triMesh_->vertices.size();
size_t nt = this->triMesh_->faces.size();
FILE* vtkfile;
vtkfile = fopen((fname + ".vtk").c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (size_t i = 0; i < nv; i++) {
fprintf(vtkfile, "%.12f %.12f %.12f\n",
this->triMesh_->vertices[i][0],
this->triMesh_->vertices[i][1],
this->triMesh_->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 4);
for (size_t i = 0; i < nt; i++) {
fprintf(vtkfile, "3 %d %d %d\n",
this->triMesh_->faces[i][0],
this->triMesh_->faces[i][1],
this->triMesh_->faces[i][2]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (size_t i = 0; i < nt; i++) {
fprintf(vtkfile, "5\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n", nv);
for (size_t i = 0; i < nv; i++) {
fprintf(vtkfile, "%.12f\n", static_cast<float>(values[i]));
}
fclose(vtkfile);
}
}
|
the_stack
|
// SelectShape.cu
// 实现形状选择算法
#include "SelectShape.h"
#include <iostream>
using namespace std;
// 宏:MAX_LABEL
// 特征值数组中标记的最大值,默认为 256。
#ifndef MAX_LABEL
#define MAX_LABEL 256
#endif
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// Kernel 函数: _selectShapeByIndexKer(根据索引值进行形状区域拷贝)
// 查询输入图像中像素值(标记值)等于 label 的像素,拷贝至输出图像中;
// 否则将输出图像中该位置清0。
static __global__ void // Kernel 函数无返回值。
_selectShapeByIndexKer(
ImageCuda inimg, // 输入图像。
ImageCuda outimg, // 输出图像。
int label // 待查询的标记值。
);
// Kernel 函数: _setLabelByValueKer(根据特征值进行形状区域拷贝)
// 查询 rank 数组中特征值等于 value 的项,将其对应 flag 标记设为 1;
// 否则为0。
static __global__ void // Kernel 函数无返回值。
_setLabelByValueKer(
int *rank, // 特征值数组。
int value, // 待查询的特征值。
unsigned char *flaglabel // 标记数组。
);
// Kernel 函数: _selectShapeByValueKer(根据特征值进行形状区域拷贝)
// 查询输入图像中区域的特征值等于 value,则将该区域拷贝至输出图像中;
// 否则将输出图像中该位置清 0。
static __global__ void // Kernel 函数无返回值。
_selectShapeByValueKer(
ImageCuda inimg, // 输入图像。
ImageCuda outimg, // 输出图像。
unsigned char *flaglabel // 标记数组。
);
// Kernel 函数: _shapeClearByLabelKer(清除区域标记)
// 如果 flag 等于 0, 则设置其对应区域的像素值为 0;否则不做任何改变。
static __global__ void // Kernel 函数无返回值。
_shapeClearByLabelKer(
ImageCuda inimg, // 输入图像。
unsigned char *flaglabel // 标记数组。
);
// Kernel 函数: _setLabelByMinMaxKer(根据最大最小范围进行形状区域拷贝)
// 查询 rank 数组中特征值在最大最小范围内的项,将其对应 flag 标记设为 1;
// 否则为 0。
static __global__ void // Kernel 函数无返回值。
_setLabelByMinMaxKer(
int *rank, // 特征值数组。
int minvalue, // 最小特征值。
int maxvalue, // 最大特征值。
unsigned char *flaglabel // 标记数组。
);
// Kernel 函数: _selectShapeByIndexKer(根据索引值进行形状区域拷贝)
static __global__ void _selectShapeByIndexKer(
ImageCuda inimg, ImageCuda outimg, int label)
{
// 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 r 需要进行乘 4 计算。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
// 计算第一个输入坐标点对应的图像数据数组下标。
int inidx = r * inimg.pitchBytes + c;
// 计算第一个输出坐标点对应的图像数据数组下标。
int outidx = r * outimg.pitchBytes + c;
// 读取第一个输入坐标点对应的像素值。
unsigned char intemp;
intemp = inimg.imgMeta.imgData[inidx];
// 一个线程处理四个像素点.
// 如果输入图像的该像素值等于 label, 则将其拷贝到输出图像中;
// 否则将输出图像中该位置清 0。
// 线程中处理的第一个点。
outimg.imgMeta.imgData[outidx] = (intemp == label ? intemp : 0);
// 处理剩下的三个像素点。
for (int i =0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++r >= outimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
inidx += inimg.pitchBytes;
outidx += outimg.pitchBytes;
intemp = inimg.imgMeta.imgData[inidx];
// 如果输入图像的该像素值等于 label, 则将其拷贝到输出图像中;
// 否则将输出图像中该位置清 0。
outimg.imgMeta.imgData[outidx] = (intemp == label ? intemp : 0);
}
}
// Host 成员方法:selectShapeByIndex(根据标记索引形状)
__host__ int SelectShape::selectShapeByIndex(Image *inimg, Image *outimg)
{
// 检查图像是否为 NULL。
if (inimg == NULL)
return NULL_POINTER;
// 检查 rank 数组是否为空。
if (this->rank == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输
// 入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 通过输入的索引值找到相对应的形状区域标记值。
int label = this->rank[2 * this->index + 1];
// 如果输入图像不等于输出图像,并且输出图像不为空。
if (inimg != outimg && outimg != NULL) {
// 将输出图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入
// 图像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数,根据标记 label 进行形状区域拷贝。
_selectShapeByIndexKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, label);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 如果输入图像等于输出图像,或者输出图像为空。
} else {
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (insubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数根据标记 label 进行形状区域拷贝。
_selectShapeByIndexKer<<<gridsize, blocksize>>>(
insubimgCud, insubimgCud, label);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
return NO_ERROR;
}
// Kernel 函数: _setLabelByValueKer(根据特征值进行形状区域拷贝)
static __global__ void _setLabelByValueKer(
int *rank, int value, unsigned char *flaglabel)
{
// 获取线程号。
int tid = threadIdx.x;
// 如果特征值等于 value,将其对应 flag 标记设为1。
if (rank[2 * tid] == value)
flaglabel[rank[2 * tid + 1]] = 1;
}
// Kernel 函数: _selectShapeByValueKer(根据特征值进行形状区域拷贝)
static __global__ void _selectShapeByValueKer(
ImageCuda inimg, ImageCuda outimg, unsigned char *flaglabel)
{
// 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 r 需要进行乘 4 计算。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
int tid = blockDim.x * threadIdx.y + threadIdx.x;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
// 将 flag 数组保存到 share 内存中,加快读取速度。
__shared__ unsigned char shared[MAX_LABEL];
// 将 flag 数组拷贝到 share 内存中。
shared[tid & ((1 << 8)-1)] = flaglabel[tid & ((1 << 8)-1)];
__syncthreads();
// 计算第一个输入坐标点对应的图像数据数组下标。
int inidx = r * inimg.pitchBytes + c;
// 计算第一个输出坐标点对应的图像数据数组下标。
int outidx = r * outimg.pitchBytes + c;
// 读取第一个输入坐标点对应的像素值。
unsigned char intemp;
intemp = inimg.imgMeta.imgData[inidx];
// 一个线程处理四个像素点.
// 如果输入图像的该像素值对应的 flag 等于1, 则将其拷贝到输出图像中;
// 否则将输出图像的该像素值设为 0。
// 线程中处理的第一个点。
outimg.imgMeta.imgData[outidx] = (shared[intemp] == 1 ? intemp : 0);
// 处理剩下的三个像素点。
for (int i =0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++r >= outimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
inidx += inimg.pitchBytes;
outidx += outimg.pitchBytes;
intemp = inimg.imgMeta.imgData[inidx];
// 如果输入图像的该像素值对应的 flag 等于1, 则将其拷贝到输出图像中;
// 否则将输出图像的该像素值设为0。
outimg.imgMeta.imgData[outidx] = (shared[intemp] == 1 ? intemp : 0);
}
}
// Kernel 函数: _shapeClearByLabelKer(清除区域标记)
static __global__ void _shapeClearByLabelKer(
ImageCuda inimg, unsigned char *flaglabel)
{
// 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 r 需要进行乘 4 计算。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
// 计算第一个输入坐标点对应的图像数据数组下标。
int inidx = r * inimg.pitchBytes + c;
// 一个线程处理四个像素点.
// 如果输入图像的该像素值对应的 flag 等于 0, 则将像素值设为 0;
// 否则保持原来的像素值。
// 线程中处理的第一个点。
if (flaglabel[inimg.imgMeta.imgData[inidx]] == 0)
inimg.imgMeta.imgData[inidx] = 0;
// 处理剩下的三个像素点。
for (int i =0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++r >= inimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
inidx += inimg.pitchBytes;
// 如果输入图像的该像素值对应的 flag 等于 0, 则将像素值设为 0;
// 否则保持原来的像素值。
if (flaglabel[inimg.imgMeta.imgData[inidx]] == 0)
inimg.imgMeta.imgData[inidx] = 0;
}
}
// Host 成员方法:selectShapeByValue(根据特征值查找形状)
__host__ int SelectShape::selectShapeByValue(Image *inimg, Image *outimg)
{
// 检查图像是否为 NULL。
if (inimg == NULL)
return NULL_POINTER;
// 检查 rank 数组是否为空。
if (this->rank == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输
// 入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。
cudaError_t cudaerrcode;
int *alldevicedata;
unsigned char *devflaglabel;
int *devRank;
cudaerrcode = cudaMalloc((void** )&alldevicedata,
(2 * this->pairsnum + MAX_LABEL) * sizeof (int));
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 初始化 Device 上的内存空间。
cudaerrcode = cudaMemset(alldevicedata, 0,
(2 * this->pairsnum + MAX_LABEL)
* sizeof (int));
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 通过偏移读取 devRank 内存空间。
devRank = alldevicedata;
// 将 Host 上的 rank 拷贝到 Device 上的 devRank 中。
cudaerrcode = cudaMemcpy(devRank, this->rank,
2 * this->pairsnum * sizeof (int),
cudaMemcpyHostToDevice);
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 通过偏移读取 devflaglabel 内存空间。
devflaglabel = (unsigned char*)(alldevicedata + 2 * this->pairsnum);
// 调用核函数,在 devRank数组中查询 value 值,并获取其标记值。
_setLabelByValueKer<<<1, this->pairsnum>>>(
devRank, this->value, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
// 如果输入图像不等于输出图像,并且输出图像不为空。
if (inimg != outimg && outimg != NULL) {
// 将输出图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入
// 图像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数,根据特征值 value 进行形状区域拷贝。
_selectShapeByValueKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
// 如果输入图像等于输出图像,或者输出图像为空。
} else {
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (insubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数,根据特征值 value 进行形状区域拷贝。
_shapeClearByLabelKer<<<gridsize, blocksize>>>(
insubimgCud, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
}
// 释放 Device 上的临时空间 alldevicedata。
cudaFree(alldevicedata);
return NO_ERROR;
}
// Kernel 函数: _setLabelByMinMaxKer(根据最大最小范围进行形状区域拷贝)
static __global__ void _setLabelByMinMaxKer(
int *rank, int minvalue, int maxvalue,
unsigned char *flaglabel)
{
// 获取线程号。
int tid = threadIdx.x;
// 如果特征值在最小最大范围内,将其对应 flag 标记设为 1。
if (rank[2 * tid] >= minvalue && rank[2 * tid] <= maxvalue)
flaglabel[rank[2 * tid + 1]] = 1;
}
// Host 成员方法:selectShapeByMinMax(根据特征值最大最小范围查找形状)
__host__ int SelectShape::selectShapeByMinMax(Image *inimg, Image *outimg)
{
// 检查图像是否为 NULL。
if (inimg == NULL)
return NULL_POINTER;
// 检查 rank 数组是否为空。
if (this->rank == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输
// 入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。
cudaError_t cudaerrcode;
int *alldevicedata;
unsigned char *devflaglabel;
int *devRank;
cudaerrcode = cudaMalloc((void** )&alldevicedata,
(2 * this->pairsnum + MAX_LABEL)
* sizeof (int));
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 初始化 Device 上的内存空间。
cudaerrcode = cudaMemset(alldevicedata, 0,
(2 * this->pairsnum + MAX_LABEL)
* sizeof (int));
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 通过偏移读取 devRank 内存空间。
devRank = alldevicedata;
// 将 Host 上的 rank 拷贝到 Device 上的 devRank 中。
cudaerrcode = cudaMemcpy(devRank, this->rank,
2 * this->pairsnum * sizeof (int),
cudaMemcpyHostToDevice);
if (cudaerrcode != cudaSuccess)
return cudaerrcode;
// 通过偏移读取 devflaglabel 内存空间。
devflaglabel = (unsigned char*)(alldevicedata + 2 * this->pairsnum);
// 调用核函数,在 devRank数组中查询 在最大最小范围内的 value 值,
// 并获取其标记值。
_setLabelByMinMaxKer<<<1, this->pairsnum>>>(
devRank, this->minvalue, this->maxvalue, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
// 如果输入图像不等于输出图像,并且输出图像不为空。
if (inimg != outimg && outimg != NULL) {
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入
// 图像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数,根据特征值标记进行形状区域拷贝。
_selectShapeByValueKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
// 如果输入图像等于输出图像,或者输出图像为空。
} else {
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (insubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数,根据特征值标记进行形状区域拷贝。
_shapeClearByLabelKer<<<gridsize, blocksize>>>(
insubimgCud, devflaglabel);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess) {
cudaFree(alldevicedata);
return CUDA_ERROR;
}
}
// 释放 Device 上的临时空间 alldevicedata。
cudaFree(alldevicedata);
return NO_ERROR;
}
|
the_stack
|
#pragma once
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/oprtr/oprtr.cuh>
#include <gunrock/util/reduce_device.cuh>
#include <gunrock/app/hits/hits_problem.cuh>
namespace gunrock {
namespace app {
namespace hits {
/**
* @brief Speciflying parameters for hits Enactor
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_enactor(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(app::UseParameters_enactor(parameters));
return retval;
}
/**
* @brief defination of hits iteration loop
* @tparam EnactorT Type of enactor
*/
template <typename EnactorT>
struct hitsIterationLoop
: public IterationLoopBase<EnactorT, Use_FullQ | Push> {
typedef typename EnactorT::VertexT VertexT;
typedef typename EnactorT::SizeT SizeT;
typedef typename EnactorT::ValueT ValueT;
typedef typename EnactorT::Problem::GraphT::CsrT CsrT;
typedef typename EnactorT::Problem::GraphT::GpT GpT;
typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop;
hitsIterationLoop() : BaseIterationLoop() {}
/**
* @brief Core computation of hits, one iteration
* @param[in] peer_ Which GPU peers to work on, 0 means local
* \return cudaError_t error message(s), if any
*/
cudaError_t Core(int peer_ = 0) {
// --
// Alias variables
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &graph = data_slice.sub_graph[0];
auto &frontier = enactor_slice.frontier;
auto &oprtr_parameters = enactor_slice.oprtr_parameters;
auto &retval = enactor_stats.retval;
auto &iteration = enactor_stats.iteration;
cudaStream_t stream = enactor_slice.stream;
// HITS-specific data slices
auto &hrank_curr = data_slice.hrank_curr;
auto &arank_curr = data_slice.arank_curr;
auto &hrank_next = data_slice.hrank_next;
auto &arank_next = data_slice.arank_next;
auto &cub_temp_space = data_slice.cub_temp_space;
auto &hrank_mag = data_slice.hrank_mag;
auto &arank_mag = data_slice.arank_mag;
auto &cur_error = data_slice.cur_error;
// Set the frontier to NULL to specify that it should include
// all vertices
util::Array1D<SizeT, VertexT> *null_frontier = NULL;
frontier.queue_length = graph.nodes;
frontier.queue_reset = true;
// Number of times to iterate the HITS algorithm
auto max_iter = data_slice.max_iter;
auto hits_norm = data_slice.hits_norm;
// Normalize the HITS scores every N iterations.
// Provides speedup at the risk of data overflow
auto normalize_n = data_slice.normalize_n;
// Reset next ranks to zero
auto reset_zero_op = [hrank_next, arank_next] __host__ __device__(
VertexT * v_q, const SizeT &pos) {
hrank_next[pos] = (ValueT)0.0;
arank_next[pos] = (ValueT)0.0;
};
GUARD_CU(frontier.V_Q()->ForAll(reset_zero_op, graph.nodes));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
// Advance operation to update all hub and auth scores
auto advance_op =
[hrank_curr, arank_curr, hrank_next, arank_next] __host__ __device__(
const VertexT &src, VertexT &dest, const SizeT &edge_id,
const VertexT &input_item, const SizeT &input_pos,
SizeT &output_pos) -> bool {
// Update the hub and authority scores.
// TODO: Look into NeighborReduce for speed improvements
atomicAdd(&hrank_next[src], arank_curr[dest]);
atomicAdd(&arank_next[dest], hrank_curr[src]);
return true;
};
// Perform advance operation
GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>(
graph.csr(), null_frontier, null_frontier, oprtr_parameters,
advance_op));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
// After updating the scores, normalize the hub and array scores
// either after every N iterations (user-specified, default=1),
// or at the last iteration
if (((iteration + 1) % normalize_n == 0) || iteration == (max_iter - 1)) {
if(hits_norm == HITS_NORMALIZATION_METHOD_2) { // The default
// Square each element
auto square_op = [hrank_next, arank_next] __host__ __device__(
VertexT * v_q, const SizeT &pos) {
hrank_next[pos] = hrank_next[pos] * hrank_next[pos];
arank_next[pos] = arank_next[pos] * arank_next[pos];
};
GUARD_CU(frontier.V_Q()->ForAll(square_op, graph.nodes));
GUARD_CU2(cudaStreamSynchronize(stream),
"cudaStreamSynchronize Failed");
// Sum all squared scores in each array
GUARD_CU(util::cubReduce(
cub_temp_space, hrank_next, hrank_mag, graph.nodes,
[] __host__ __device__(const ValueT &a, const ValueT &b) {
return a + b;
},
ValueT(0), stream));
GUARD_CU(util::cubReduce(
cub_temp_space, arank_next, arank_mag, graph.nodes,
[] __host__ __device__(const ValueT &a, const ValueT &b) {
return a + b;
},
ValueT(0), stream));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
auto normalize_divide_square_op =
[hrank_next, arank_next, hrank_mag, arank_mag] __host__ __device__(
VertexT * v_q, const SizeT &pos) {
if (hrank_mag[0] > 0) {
hrank_next[pos] = sqrt(hrank_next[pos]) / sqrt(hrank_mag[0]);
}
if (arank_mag[0] > 0) {
arank_next[pos] = sqrt(arank_next[pos]) / sqrt(arank_mag[0]);
}
};
// Divide all elements by the square root of their squared sums.
// Note: take sqrt of x in denominator because x^2 was done in place.
GUARD_CU(frontier.V_Q()->ForAll(normalize_divide_square_op, graph.nodes));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
}
else if(hits_norm == HITS_NORMALIZATION_METHOD_1) {
// Sum all scores in each array
GUARD_CU(util::cubReduce(
cub_temp_space, hrank_next, hrank_mag, graph.nodes,
[] __host__ __device__(const ValueT &a, const ValueT &b) {
return abs(a) + abs(b);
},
ValueT(0), stream));
GUARD_CU(util::cubReduce(
cub_temp_space, arank_next, arank_mag, graph.nodes,
[] __host__ __device__(const ValueT &a, const ValueT &b) {
return abs(a) + abs(b);
},
ValueT(0), stream));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
auto normalize_divide_op =
[hrank_next, arank_next, hrank_mag, arank_mag] __host__ __device__(
VertexT * v_q, const SizeT &pos) {
if (hrank_mag[0] > 0) {
hrank_next[pos] = hrank_next[pos] / hrank_mag[0];
}
if (arank_mag[0] > 0) {
arank_next[pos] = arank_next[pos] / arank_mag[0];
}
};
// Divide all elements by the square root of their squared sums.
// Note: take sqrt of x in denominator because x^2 was done in place.
GUARD_CU(frontier.V_Q()->ForAll(normalize_divide_op, graph.nodes));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
}
else {
assert(false); // TODO: How does gunrock handle error cases?
}
// hrank_curr is now temp space, since it will be overwritten with 0s on the next iteration. Use this as temp space for the forall to compute error
auto err_op = [hrank_next, hrank_curr] __host__ __device__(
VertexT * v_q, const SizeT &pos) {
hrank_curr[pos] = abs(hrank_next[pos] - hrank_curr[pos]);
};
GUARD_CU(frontier.V_Q()->ForAll(err_op, graph.nodes));
GUARD_CU2(cudaStreamSynchronize(stream),
"cudaStreamSynchronize Failed");
// How perform the reduction to compute the error
GUARD_CU(util::cubReduce(
cub_temp_space, hrank_curr, cur_error, graph.nodes,
[] __host__ __device__(const ValueT &a, const ValueT &b) {
return abs(a) + abs(b);
},
ValueT(0), stream));
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize Failed");
}
// After normalization, swap the next and current vectors
auto hrank_temp = hrank_curr;
hrank_curr = hrank_next;
hrank_next = hrank_temp;
auto arank_temp = arank_curr;
arank_curr = arank_next;
arank_next = arank_temp;
// Possibly normalize only at the end, or every n iterations
// for potential speed improvements. Additionally, look into
// NeighborReduce for adding host and auth scores
return retval;
}
bool Stop_Condition(int gpu_num = 0) {
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slices = this->enactor->enactor_slices;
auto &iter = enactor_slices[0].enactor_stats.iteration;
auto &user_iter = data_slice.max_iter;
auto &tol = data_slice.hits_tol;
bool quiet = this->enactor->problem->parameters.template Get<bool>("quiet");
// We haven't done any real work yet
if(iter == 0) return false;
// user defined stop condition
data_slice.cur_error.Move(util::DEVICE, util::HOST);
if(data_slice.cur_error[0] < tol) {
return true;
}
if (iter == user_iter) {
return true;
}
return false;
}
/**
* @brief Routine to combine received data and local data
* @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each
* transmition item, typed VertexT
* @tparam NUM_VALUE__ASSOCIATES Number of data associated with each
* transmition item, typed ValueT
* @param received_length The numver of transmition items received
* @param[in] peer_ which peer GPU the data came from
* \return cudaError_t error message(s), if any
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
cudaError_t ExpandIncoming(SizeT &received_length, int peer_) {
// ================ INCOMPLETE TEMPLATE - MULTIGPU ====================
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto expand_op = [] __host__ __device__(
VertexT & key, const SizeT &in_pos,
VertexT *vertex_associate_ins,
ValueT *value__associate_ins) -> bool { return true; };
cudaError_t retval =
BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>(
received_length, peer_, expand_op);
return retval;
}
}; // end of hitsIteration
/**
* @brief Template enactor class.
* @tparam _Problem Problem type we process on
* @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor
* @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor
*/
template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE,
unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault>
class Enactor
: public EnactorBase<
typename _Problem::GraphT, typename _Problem::GraphT::VertexT,
typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> {
public:
typedef _Problem Problem;
typedef typename Problem::SizeT SizeT;
typedef typename Problem::VertexT VertexT;
typedef typename Problem::GraphT GraphT;
typedef typename GraphT::VertexT LabelT;
typedef typename GraphT::ValueT ValueT;
typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag>
BaseEnactor;
typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT;
typedef hitsIterationLoop<EnactorT> IterationT;
Problem *problem;
IterationT *iterations;
/**
* @brief hits constructor
*/
Enactor() : BaseEnactor("hits"), problem(NULL) {
this->max_num_vertex_associates = 0;
this->max_num_value__associates = 1;
}
/**
* @brief hits destructor
*/
virtual ~Enactor() { /*Release();*/
}
/*
* @brief Releasing allocated memory space
* @param target The location to release memory from
* \return cudaError_t error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Release(target));
delete[] iterations;
iterations = NULL;
problem = NULL;
return retval;
}
/**
* @brief Initialize the problem.
* @param[in] problem The problem object.
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
this->problem = &problem;
// Lazy initialization
GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0];
auto &graph = problem.sub_graphs[gpu];
GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges,
this->queue_factors));
}
iterations = new IterationT[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(iterations[gpu].Init(this, gpu));
}
GUARD_CU(this->Init_Threads(
this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>)));
return retval;
}
/**
* @brief one run of hits, to be called within GunrockThread
* @param thread_data Data for the CPU thread
* \return cudaError_t error message(s), if any
*/
cudaError_t Run(ThreadSlice &thread_data) {
gunrock::app::Iteration_Loop<0, 1, IterationT>(
thread_data, iterations[thread_data.thread_num]);
return cudaSuccess;
}
/**
* @brief Reset enactor
* @param[in] nodes Number of nodes in the graph
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Reset(typename GraphT::SizeT nodes,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Reset(target));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
if (this->num_gpus == 1) {
this->thread_slices[gpu].init_size = 1;
for (int peer_ = 0; peer_ < this->num_gpus; peer_++) {
auto &frontier =
this->enactor_slices[gpu * this->num_gpus + peer_].frontier;
frontier.queue_length = (peer_ == 0) ? nodes : 0;
if (peer_ == 0) {
GUARD_CU(frontier.V_Q()->ForAll(
[] __host__ __device__(VertexT * v_q, const SizeT &pos) {
v_q[pos] = pos;
},
nodes, target, 0));
}
}
} else { // Incomplete/untested
this->thread_slices[gpu].init_size = 0;
for (int peer_ = 0; peer_ < this->num_gpus; peer_++) {
this->enactor_slices[gpu * this->num_gpus + peer_]
.frontier.queue_length = 0;
}
}
}
GUARD_CU(BaseEnactor::Sync());
return retval;
}
/**
* @brief Enacts a hits computing on the specified graph.
...
* \return cudaError_t error message(s), if any
*/
cudaError_t Enact() {
cudaError_t retval = cudaSuccess;
GUARD_CU(this->Run_Threads(this));
util::PrintMsg("GPU Template Done.", this->flag & Debug);
return retval;
}
};
} // namespace hits
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
#include "convolution_cuda.h"
static __device__ inline signed char float2int8(float v)
{
int int32 = static_cast<int>(round(v));
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
__global__ void gpu_convolution_cuda_forward(const float* a_input, const ncnn::CudaMatInfo a_info,
const float* weight_data, const ncnn::CudaMatInfo weight_info,
const float* bias_data, const float* activation_params,
float* output, const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info product_info,
const int* const gpu_space_offset) {
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ float buffer[];
float* shared_kptr = buffer;
const int k_index = threadIdx.x;
if (k_index < product_info.maxk)
{
const float* kptr = (const float*)weight_data + product_info.maxk * a_info.c * num_output;
for (int input_channel = 0; input_channel < a_info.c; input_channel++)
{
shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index];
}
}
__syncthreads();
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c)
{
return;
}
float sum = 0.f;
if (product_info.bias_term)
{
sum += bias_data[num_output];
}
for (int input_channel = 0; input_channel < a_info.c; input_channel++)
{
const float* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w;
for (int k = 0; k < product_info.maxk; k++)
{
const float val = sptr [gpu_space_offset[k]];
const float w = shared_kptr[input_channel * product_info.maxk + k];
sum += val * w;
}
}
if (product_info.activation_type == 1)
{
sum = max(sum, 0.f);
}
else if (product_info.activation_type == 2)
{
float slope = activation_params[0];
sum = sum > 0.f ? sum : sum * slope;
}
else if (product_info.activation_type == 3)
{
float min = activation_params[0];
float max = activation_params[1];
if (sum < min)
sum = min;
if (sum > max)
sum = max;
}
else if (product_info.activation_type == 4)
{
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}
else if (product_info.activation_type == 5)
{
sum = static_cast<float>(sum * tanh(log(exp(sum) + 1.f)));
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
output[output_index] = sum;
}
__global__ void gpu_convolution_cuda_forward_02(const float* a_input, const ncnn::CudaMatInfo a_info,
const float* weight_data, const ncnn::CudaMatInfo weight_info,
const float* bias_data, const float* activation_params,
float* output, const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info product_info,
const int* const gpu_space_offset) {
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = (blockIdx.z * blockDim.z + threadIdx.z)/a_info.c;
const int input_channel = (blockIdx.z * blockDim.z + threadIdx.z)%a_info.c;
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c || input_channel >= a_info.c)
{
return;
}
float partial_sum = 0.f;
if (input_channel == 0 && product_info.bias_term)
{
partial_sum += bias_data[num_output];
}
const float* kptr = (const float*)weight_data + product_info.maxk * a_info.c * num_output;
const float* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w;
for (int k = 0; k < product_info.maxk; k++)
{
const float val = sptr[gpu_space_offset[k]];
const float w = kptr[input_channel * product_info.maxk + k];
partial_sum += val * w;
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
atomicAdd(static_cast<float*>(output+output_index), partial_sum);
}
__global__ void gpu_convolution_cuda_forward_02_sum(const float* activation_params,
float* output,
const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info convolution_info)
{
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = (blockIdx.z * blockDim.z + threadIdx.z);
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c)
{
return;
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
float sum = output[output_index];
if (convolution_info.activation_type == 1)
{
sum = max(sum, 0.f);
}
else if (convolution_info.activation_type == 2)
{
float slope = activation_params[0];
sum = sum > 0.f ? sum : sum * slope;
}
else if (convolution_info.activation_type == 3)
{
float min = activation_params[0];
float max = activation_params[1];
if (sum < min)
sum = min;
if (sum > max)
sum = max;
}
else if (convolution_info.activation_type == 4)
{
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}
else if (convolution_info.activation_type == 5)
{
sum = static_cast<float>(sum * tanh(log(exp(sum) + 1.f)));
}
output[output_index] = sum;
}
__global__ void gpu_convolution_cuda_forward_int8(const signed char* a_input, const ncnn::CudaMatInfo a_info,
const float* weight_data, const ncnn::CudaMatInfo weight_info,
const float* bias_data, const float* activation_params,
signed char* output, const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info product_info,
const int* const gpu_space_offset,
const float *gpu_weight_data_int8_scales) {
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = blockIdx.z * blockDim.z + threadIdx.z;
extern __shared__ signed char buffer_int8[];
signed char* shared_kptr = buffer_int8;
const int k_index = threadIdx.x;
if (k_index < product_info.maxk)
{
const signed char* kptr = (const signed char*)weight_data + product_info.maxk * a_info.c * num_output;
for (int input_channel = 0; input_channel < a_info.c; input_channel++)
{
shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index];
}
}
__syncthreads();
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c)
{
return;
}
int sum = 0;
for (int input_channel = 0; input_channel < a_info.c; input_channel++)
{
const signed char* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w;
for (int k = 0; k < product_info.maxk; k++)
{
const int val = sptr [gpu_space_offset[k]];
const int w = shared_kptr[input_channel * product_info.maxk + k];
sum += val * w;
}
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
if (product_info.use_int8_requantize)
{
// requantize and relu
float scale_in;
if (gpu_weight_data_int8_scales[num_output] == 0)
scale_in = 0;
else
scale_in = 1.f / (*product_info.gpu_bottom_blob_int8_scale * gpu_weight_data_int8_scales[num_output]);
float sumfp32 = sum * scale_in;
if (product_info.bias_term)
sumfp32 += bias_data[num_output];
float scale_out = *product_info.gpu_top_blob_int8_scale;
signed char sums8 = float2int8(sumfp32 * scale_out);
if (product_info.activation_type == 1)
{
sums8 = max(sums8, (signed char)0);
}
output[output_index] = sums8;
}
else
{
// dequantize and relu
float scale_in;
if (gpu_weight_data_int8_scales[num_output] == 0)
scale_in = 0;
else
scale_in = 1.f / (*product_info.gpu_bottom_blob_int8_scale * gpu_weight_data_int8_scales[num_output]);
float sumfp32 = sum * scale_in;
if (product_info.bias_term)
sumfp32 += bias_data[num_output];
if (product_info.activation_type == 1)
{
sumfp32 = max(sumfp32, 0.f);
}
((float*)output)[output_index] = sumfp32;
}
}
__global__ void gpu_convolution_cuda_forward_03(const float* a_input, const ncnn::CudaMatInfo a_info,
const float* weight_data, const ncnn::CudaMatInfo weight_info,
const float* bias_data, const float* activation_params,
float* output, const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info product_info,
const int* const gpu_space_offset)
{
const int input_column = blockIdx.x * blockDim.x + threadIdx.x;
const int input_row = blockIdx.y * blockDim.y + threadIdx.y;
const int input_channel = blockIdx.z * blockDim.z + threadIdx.z;
// const int block_idx = blockIdx.z* gridDim.x*gridDim.y+blockIdx.y*blockDim.x+blockIdx.x;
extern __shared__ float buffer[];
float* shared_input = buffer;
float* shared_kptr = buffer + (blockDim.z * blockDim.x * blockDim.y) * product_info.maxk;
float* shared_partial_sums = buffer + (blockDim.z * blockDim.x * blockDim.y * product_info.maxk) + (blockDim.z * product_info.maxk);
const int buffer_column = threadIdx.x;
const int buffer_row = threadIdx.y;
const int buffer_channel = threadIdx.z;
const int shared_partial_sums_index = buffer_channel * blockDim.x * blockDim.y + buffer_row * blockDim.x + buffer_column;
shared_partial_sums[shared_partial_sums_index] = 0;
if (input_column >= a_info.w || input_row >= a_info.h || input_channel >= a_info.c)
{
return;
}
const int output_row = input_row / product_info.stride_h;
const int output_column = input_column / product_info.stride_w;
for (int k_index=0; k_index<product_info.maxk; ++k_index)
{
const int input_index = input_channel * a_info.cstep + input_row * a_info.w + input_column + gpu_space_offset[k_index];
const int buffer_index = buffer_channel * product_info.maxk * blockDim.x * blockDim.y + buffer_row * blockDim.x * product_info.maxk + buffer_column * product_info.maxk + k_index;
shared_input[buffer_index] = a_input[input_index];
// if ((input_row >=0 && input_row<=1) && input_channel == 0 && (input_column == 3 || input_column == 4)) {
// printf("GPU input: input_channel: %d input_row: %d input_column: %d block_idx: %d output_row: %d output_column: %d buffer_channel: %d buffer_row: %d buffer_column: %d k_index:%d input_index: %d buffer_index: %d gpu_space_offset[k_index]: %d value: %f\n ",
// input_channel, input_row, input_column, block_idx, output_row, output_column, buffer_channel, buffer_row, buffer_column, k_index, input_index, buffer_index, gpu_space_offset[k_index], a_input[input_index]);
// }
}
__syncthreads();
if (output_column >= output_info.w || output_row >= output_info.h)
return;
if ((input_row % product_info.stride_h != 0) || (input_column % product_info.stride_w != 0))
return;
for (int num_output = 0; num_output < product_info.num_output; ++num_output)
{
//load mask
const float* kptr = (const float*)weight_data + product_info.maxk * a_info.c * num_output;
// if (buffer_row == 0 && buffer_column == 0)
{
for (int k2 = 0; k2 < product_info.maxk; k2++)
{
shared_kptr[buffer_channel * product_info.maxk + k2] = kptr[input_channel * product_info.maxk + k2];
// if (num_output == 0 && (input_channel >= 0 && input_channel <= 0))
// printf("GPU KPTR block_idx: %d kernel_w: %d kernel_h: %d input_channel:%d buffer_channel: %d product_info.maxk: %d buffer_row: %d buffer_column:%d value: %f k:%d\n",
// block_idx, product_info.kernel_w, product_info.kernel_h, input_channel, buffer_channel, product_info.maxk, buffer_row, buffer_column, shared_kptr[buffer_channel * product_info.maxk + k2], k2);
}
}
__syncthreads();
float partial_sum = 0.f;
if (buffer_channel == 0 && product_info.bias_term)
{
partial_sum += bias_data[num_output];
}
for (int k = 0; k < product_info.maxk; k++)
{
const float val = shared_input[buffer_channel * product_info.maxk * blockDim.x * blockDim.y + buffer_row * blockDim.x * product_info.maxk + buffer_column * product_info.maxk + k];
const float w = shared_kptr[buffer_channel * product_info.maxk + k];
partial_sum += val * w;
// if (num_output == 0 && output_row == 0 && output_column == 2 && (input_channel >= 0 && input_channel <= 0))
// printf("GPU block_idx: %d stride_w: %d stride_h: %d buffer_channel: %d input channel: %d input_row: %d input_column: %d num_output: %d output_row: %d output_column: %d maxk: %d k: %d buffer index: %d val: %f w: %f partial_sum: %f\n",
// block_idx, product_info.stride_w, product_info.stride_h, buffer_channel, input_channel,
// input_row, input_column, num_output, output_row, output_column, product_info.maxk, k, buffer_channel * product_info.maxk + k, val, w, partial_sum);
}
shared_partial_sums[shared_partial_sums_index] = partial_sum;
__syncthreads();
if (buffer_channel == 0)
{
float num_output_block_sum = 0.f;
const int min_z = a_info.c < blockDim.z ? a_info.c : blockDim.z;
for (int i = 0; i < min_z; ++i)
{
const int current_shared_partial_sums_index = i * blockDim.x * blockDim.y + buffer_row * blockDim.x + buffer_column;
num_output_block_sum += shared_partial_sums[current_shared_partial_sums_index];
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
atomicAdd(static_cast<float*>(output + output_index), num_output_block_sum);
}
__syncthreads();
}
}
__global__ void gpu_convolution_cuda_forward_03_sum(const float* activation_params,
float* output,
const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info convolution_info)
{
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = (blockIdx.z * blockDim.z + threadIdx.z);
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c)
{
return;
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
float sum = output[output_index];
if (convolution_info.activation_type == 1)
{
sum = max(sum, 0.f);
}
else if (convolution_info.activation_type == 2)
{
float slope = activation_params[0];
sum = sum > 0.f ? sum : sum * slope;
}
else if (convolution_info.activation_type == 3)
{
float min = activation_params[0];
float max = activation_params[1];
if (sum < min)
sum = min;
if (sum > max)
sum = max;
}
else if (convolution_info.activation_type == 4)
{
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}
else if (convolution_info.activation_type == 5)
{
sum = static_cast<float>(sum * tanh(log(exp(sum) + 1.f)));
}
output[output_index] = sum;
}
__global__ void gpu_convolution_cuda_transform(const float* a_input, const ncnn::CudaMatInfo a_info, float* scratchpad_memory)
{
const int input_column = blockIdx.x * blockDim.x + threadIdx.x;
const int input_row = blockIdx.y * blockDim.y + threadIdx.y;
const int input_channel = blockIdx.z * blockDim.z + threadIdx.z;
if (input_column >= a_info.w || input_row >= a_info.h || input_channel >= a_info.c)
{
return;
}
int input_index = input_channel * a_info.cstep + input_row * a_info.w + input_column;
int output_index = input_row*a_info.w*a_info.c + input_column*a_info.c + input_channel;
scratchpad_memory[output_index] = a_input[input_index];
}
__global__ void gpu_convolution_cuda_forward_04(const float* a_input, const ncnn::CudaMatInfo a_info,
const float* weight_data, const ncnn::CudaMatInfo weight_info,
const float* bias_data, const float* activation_params,
float* output, const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info convolution_info,
const int* const gpu_space_offset,
float* scratchpad_memory) {
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = (blockIdx.z * blockDim.z + threadIdx.z) % convolution_info.num_output;
const int k_index = (blockIdx.z * blockDim.z + threadIdx.z) / convolution_info.num_output;
// const int block_idx = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * blockDim.x + blockIdx.x;
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c || k_index >= convolution_info.maxk)
{
return;
}
float partial_sum = 0.f;
if (convolution_info.bias_term && k_index == 0)
{
partial_sum += bias_data[num_output];
}
const float* kptr = (const float*)weight_data + convolution_info.maxk * a_info.c * num_output;
const float* sptr = a_input + output_row * convolution_info.stride_h * a_info.w * a_info.c + output_column * convolution_info.stride_w * a_info.c
+ gpu_space_offset[k_index] * a_info.c;
for (int input_channel = 0; input_channel < a_info.c; input_channel++)
{
const float val = sptr[input_channel];
const float w = kptr[input_channel * convolution_info.maxk + k_index];
partial_sum += val * w;
}
const int scratchpad_index = (a_info.c*a_info.w*a_info.h)+(num_output * output_info.w * output_info.h + output_row * output_info.w + output_column)*convolution_info.maxk+ k_index;
scratchpad_memory[scratchpad_index] = partial_sum;
}
__global__ void gpu_convolution_cuda_forward_04_sum(const float* activation_params,
float* output,
const ncnn::CudaMatInfo input_info,
const ncnn::CudaMatInfo output_info,
const ncnn::Convolution_cuda::Convolution_info convolution_info,
float* scratchpad_memory)
{
const int output_column = blockIdx.x * blockDim.x + threadIdx.x;
const int output_row = blockIdx.y * blockDim.y + threadIdx.y;
const int num_output = (blockIdx.z * blockDim.z + threadIdx.z);
if (output_column >= output_info.w || output_row >= output_info.h || num_output >= output_info.c)
{
return;
}
const int output_index = num_output * output_info.cstep + output_row * output_info.w + output_column;
float sum = 0;
const int scratchpad_index = (input_info.c * input_info.w * input_info.h) + (num_output * output_info.w * output_info.h
+ output_row * output_info.w + output_column) * convolution_info.maxk;
for (int i = 0; i < convolution_info.maxk; ++i)
{
sum += scratchpad_memory[scratchpad_index + i];
}
if (convolution_info.activation_type == 1)
{
sum = max(sum, 0.f);
}
else if (convolution_info.activation_type == 2)
{
float slope = activation_params[0];
sum = sum > 0.f ? sum : sum * slope;
}
else if (convolution_info.activation_type == 3)
{
float min = activation_params[0];
float max = activation_params[1];
if (sum < min)
sum = min;
if (sum > max)
sum = max;
}
else if (convolution_info.activation_type == 4)
{
sum = static_cast<float>(1.f / (1.f + exp(-sum)));
}
else if (convolution_info.activation_type == 5)
{
sum = static_cast<float>(sum * tanh(log(exp(sum) + 1.f)));
}
output[output_index] = sum;
}
namespace ncnn {
int convolution_cuda_forward_04(const CudaMat& bottom_blob, CudaMat& top_blob, const Convolution_cuda::Convolution_info& info,
float* gpu_scratchpad_memory, int gpu_scratchpad_memory_size)
{
//transform input
if ((bottom_blob.total() + top_blob.total()*info.maxk)*sizeof(float) > gpu_scratchpad_memory_size) {
std::cout << "CONVOLUTION current scratchpad memory: " << gpu_scratchpad_memory_size << " required: "
<< (bottom_blob.total() + top_blob.total() * info.maxk) * sizeof(float) << std::endl;
throw std::runtime_error("Not enough scratchpad memory");
}
int thread_per_block_transform_x = ((bottom_blob.w - 1) / 16 + 1) * 16;
if (thread_per_block_transform_x > 16) thread_per_block_transform_x = 16;
int thread_per_block_transform_y = ((bottom_blob.h - 1) / 2 + 1) * 2;
if (thread_per_block_transform_y > 2) thread_per_block_transform_y = 2;
const int thread_per_block_transform_z = 16;
const int total_number_of_columns_transform = bottom_blob.w;
const int total_number_of_rows_transform = bottom_blob.h;
const int total_number_of_channels_transform = bottom_blob.c;
const dim3 block_size_transform(thread_per_block_transform_x, thread_per_block_transform_y, thread_per_block_transform_z);
const dim3 grid_size_transform((total_number_of_columns_transform - 1) / thread_per_block_transform_x + 1,
(total_number_of_rows_transform - 1) / thread_per_block_transform_y + 1,
(total_number_of_channels_transform - 1) / thread_per_block_transform_z + 1);
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
const ncnn::CudaMatInfo top_blob_info{top_blob};
const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data};
gpu_convolution_cuda_transform<<<grid_size_transform, block_size_transform>>>(static_cast<const float*>(bottom_blob.get_craw_data()),
bottom_blob_info,
gpu_scratchpad_memory);
const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk;
int thread_per_block_x = ((number_of_threads - 1) / 8 + 1) * 8;
if (thread_per_block_x > 8) thread_per_block_x = 8;
int thread_per_block_y = ((top_blob.h - 1) / 2 + 1) * 2;
if (thread_per_block_y > 2) thread_per_block_y = 2;
int thread_per_block_z = ((top_blob.c - 1) / 32 + 1) * 32;
if (thread_per_block_z > 64) thread_per_block_z = 64;
const int total_number_of_columns = top_blob.w;
const int total_number_of_rows = top_blob.h;
const int total_number_of_channels = top_blob.c * info.maxk;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
gpu_convolution_cuda_forward_04<<<grid_size, block_size>>>(static_cast<const float*>(gpu_scratchpad_memory),
bottom_blob_info,
static_cast<const float*>(info.gpu_weight_data->get_craw_data()),
weight_info,
static_cast<const float*>(info.gpu_bias_data->get_craw_data()),
static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
info,
static_cast<const int*>(info.gpu_space_ofs),
gpu_scratchpad_memory);
int thread_per_block_sum_x = ((top_blob.w - 1) / 16 + 1) * 16;
if (thread_per_block_sum_x > 16) thread_per_block_sum_x = 16;
int thread_per_block_sum_y = ((bottom_blob.h - 1) / 2 + 1) * 2;
if (thread_per_block_sum_y > 2) thread_per_block_sum_y = 2;
const int thread_per_block_sum_z = 16;
const int total_number_of_columns_sum = top_blob.w;
const int total_number_of_rows_sum = top_blob.h;
const int total_number_of_channels_sum = top_blob.c;
const dim3 block_size_sum(thread_per_block_sum_x, thread_per_block_sum_y, thread_per_block_sum_z);
const dim3 grid_size_sum((total_number_of_columns_sum - 1) / thread_per_block_sum_x + 1,
(total_number_of_rows_sum - 1) / thread_per_block_sum_y + 1,
(total_number_of_channels_sum - 1) / thread_per_block_sum_z + 1);
gpu_convolution_cuda_forward_04_sum<<<grid_size_sum, block_size_sum>>>(static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<float*>(top_blob.get_raw_data()),
bottom_blob_info,
top_blob_info,
info,
gpu_scratchpad_memory);
return 0;
}
int convolution_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, const Convolution_cuda::Convolution_info& info)
{
const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk;
int thread_per_block_x = ((number_of_threads - 1) / 32 + 1) * 32;
if (thread_per_block_x > 64) thread_per_block_x = 64;
int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = top_blob.c;
const int total_number_of_columns = top_blob.w;
const int total_number_of_rows = top_blob.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
const ncnn::CudaMatInfo top_blob_info{top_blob};
const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data};
gpu_convolution_cuda_forward<<<grid_size, block_size, bottom_blob.c * info.maxk * sizeof(float)>>>(static_cast<const float*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<const float*>(info.gpu_weight_data->get_craw_data()),
weight_info,
static_cast<const float*>(info.gpu_bias_data->get_craw_data()),
static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
info,
static_cast<const int*>(info.gpu_space_ofs));
return 0;
}
int convolution_cuda_forward_03(const CudaMat& bottom_blob, CudaMat& top_blob, const Convolution_cuda::Convolution_info& info)
{
int temp = 32*1024/(info.maxk*sizeof(float));
int thread_per_block_x = 3;
int thread_per_block_y = 3;
int thread_per_block_z = temp/(thread_per_block_x*thread_per_block_y);
if (thread_per_block_z > 64) thread_per_block_z = 64;
const int total_number_of_channels = bottom_blob.c;
const int total_number_of_columns = bottom_blob.w;
const int total_number_of_rows = bottom_blob.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
const ncnn::CudaMatInfo top_blob_info{top_blob};
const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data};
const int shared_mem_size = ((thread_per_block_z * thread_per_block_x * thread_per_block_y * (info.maxk)) + (info.maxk*thread_per_block_z)
+ thread_per_block_z * thread_per_block_x * thread_per_block_y) * sizeof(float);
// static int counter = 0;
// std::cout << "COUNTER: " << counter << " shared_mem_size: " << shared_mem_size << " grid_size: x:" << grid_size.x << " y: " << grid_size.y << " z:" << grid_size.z << " block_size: x: " << block_size.x << " y: " << block_size.y <<
// " z:" << block_size.z << std::endl;
// counter++;
// std::cout << "Padding: left: " << info.pad_left << " right: " << info.pad_right << std::endl;
gpu_convolution_cuda_forward_03<<<grid_size, block_size, shared_mem_size>>>
(static_cast<const float*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<const float*>(info.gpu_weight_data->get_craw_data()),
weight_info,
static_cast<const float*>(info.gpu_bias_data->get_craw_data()),
static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
info,
static_cast<const int*>(info.gpu_space_ofs));
int thread_per_block_x_sum = ((top_blob.w - 1) / 32 + 1) * 32;
if (thread_per_block_x_sum > 32) thread_per_block_x_sum = 32;
int thread_per_block_y_sum = ((top_blob.h - 1) / 8 + 1) * 8;
if (thread_per_block_y_sum > 8) thread_per_block_y_sum = 8;
const int thread_per_block_z_sum = 4;
const int total_number_of_channels_sum = top_blob.c;
const int total_number_of_columns_sum = top_blob.w;
const int total_number_of_rows_sum = top_blob.h;
const dim3 block_size_sum(thread_per_block_x_sum, thread_per_block_y_sum, thread_per_block_z_sum);
const dim3 grid_size_sum((total_number_of_columns_sum - 1) / thread_per_block_x_sum + 1,
(total_number_of_rows_sum - 1) / thread_per_block_y_sum + 1,
(total_number_of_channels_sum - 1) / thread_per_block_z_sum + 1);
// std::cout << "shared_mem_size: " << shared_mem_size << " grid_size_sum: x:" << grid_size_sum.x << " y: " << grid_size_sum.y << " z:" << grid_size_sum.z << " block_size_sum: x: "
// << block_size_sum.x << " y: " << block_size_sum.y << " z:" << block_size_sum.z << std::endl;
gpu_convolution_cuda_forward_03_sum<<<grid_size, block_size>>>(static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
info);
return 0;
}
int convolution_cuda_forward_int8(const CudaMat& bottom_blob, CudaMat& top_blob, const Convolution_cuda::Convolution_info& info)
{
const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk;
int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = top_blob.c;
const int total_number_of_columns = top_blob.w;
const int total_number_of_rows = top_blob.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
const ncnn::CudaMatInfo top_blob_info{top_blob};
const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data};
gpu_convolution_cuda_forward_int8<<<grid_size, block_size, bottom_blob.c * info.maxk * sizeof(signed char)>>>(static_cast<const signed char*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<const float*>(info.gpu_weight_data->get_craw_data()),
weight_info,
static_cast<const float*>(info.gpu_bias_data->get_craw_data()),
static_cast<const float*>(info.gpu_activation_params->get_craw_data()),
static_cast<signed char*>(top_blob.get_raw_data()),
top_blob_info,
info,
static_cast<const int*>(info.gpu_space_ofs),
static_cast<const float*>(info.gpu_weight_data_int8_scales->get_craw_data()));
return 0;
}
}
|
the_stack
|
#include <stdio.h>
#include <cutil_inline.h>
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 32
#define BLOCK_ROWS 8
// modified so it doesn't take forever to run on emulation mode
#if 1
int MATRIX_SIZE_X = 128;
int MATRIX_SIZE_Y = 128;
int MUL_FACTOR = 4;
#else
int MATRIX_SIZE_X = 2048;
int MATRIX_SIZE_Y = 2048;
int MUL_FACTOR = 32;
#endif
// Number of repetitions used for timing. Two sets of repetitions are performed:
// 1) over kernel launches and 2) inside the kernel over just the loads and stores
#define NUM_REPS 100
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index+i*width] = idata[index+i*width];
}
}
}
__global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index+i*width] = tile[threadIdx.y+i][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Transpose that effectively reorders execution of thread blocks along diagonals of the
// matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as
// corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly
// used cartesian coordinates so that the only changes to the code from the coalesced version
// are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and
// bloclIdx.y with the subscripted versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int r=0; r<nreps; r++) {
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r<nreps; r++) {
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x];
}
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float* gold, float* idata,
const int size_x, const int size_y)
{
for( int y = 0; y < size_y; ++y) {
for( int x = 0; x < size_x; ++x) {
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
// ----
// main
// ----
int
main( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
int devID;
cudaDeviceProp props;
// get number of SMs on this GPU
cutilSafeCall(cudaGetDevice(&devID));
cutilSafeCall(cudaGetDeviceProperties(&props, devID));
int SMcount = 32 / props.multiProcessorCount;
printf("CUDA device has %d Multi-Processors\n", props.multiProcessorCount);
// set matrix size
int size_x = max(MATRIX_SIZE_X / SMcount, TILE_DIM*MUL_FACTOR);
int size_y = max(MATRIX_SIZE_Y / SMcount, TILE_DIM*MUL_FACTOR);
if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) {
printf("Matrix size must be integral multiple of tile size\n\nTest PASSED\n");
cutilExit(argc, argv);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int, int);
char *kernelName;
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
// CUDA events
cudaEvent_t start, stop;
// size of memory required to store the matrix
const int mem_size = sizeof(float) * size_x*size_y;
// allocate host memory
float *h_idata = (float*) malloc(mem_size);
float *h_odata = (float*) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size) );
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size) );
// initalize host data
for( int i = 0; i < (size_x*size_y); ++i)
h_idata[i] = (float) i;
// copy host data to device
cutilSafeCall( cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf("\nMatrix size: %dx%d, tile size: %dx%d, block size: %dx%d\n\n",
size_x, size_y, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
printf("Kernel\t\t\tLoop over kernel\tLoop within kernel\n");
printf("------\t\t\t----------------\t------------------\n");
// initialize events
cutilSafeCall( cudaEventCreate(&start) );
cutilSafeCall( cudaEventCreate(&stop) );
//
// loop over different kernels
//
CUTBoolean success = CUTTrue;
for (int k = 0; k<8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = © kernelName = "simple copy "; break;
case 1:
kernel = ©SharedMem; kernelName = "shared memory copy "; break;
case 2:
kernel = &transposeNaive; kernelName = "naive transpose "; break;
case 3:
kernel = &transposeCoalesced; kernelName = "coalesced transpose "; break;
case 4:
kernel = &transposeNoBankConflicts; kernelName = "no bank conflict trans"; break;
case 5:
kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break;
case 6:
kernel = &transposeFineGrained; kernelName = "fine-grained "; break;
case 7:
kernel = &transposeDiagonal; kernelName = "diagonal transpose "; break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) {
gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check
} else {
gold = transposeGold;
}
// warmup to avoid timing startup
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, 1);
// take measurements for loop over kernel launches
cutilSafeCall( cudaEventRecord(start, 0) );
for (int i=0; i < NUM_REPS; i++) {
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, 1);
}
cutilSafeCall( cudaEventRecord(stop, 0) );
cutilSafeCall( cudaEventSynchronize(stop) );
float outerTime;
cutilSafeCall( cudaEventElapsedTime(&outerTime, start, stop) );
cutilSafeCall( cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) );
CUTBoolean res = cutComparef(gold, h_odata, size_x*size_y);
if (res == CUTFalse) {
printf("*** %s kernel FAILED ***\n", kernelName);
success = CUTFalse;
}
// take measurements for loop inside kernel
cutilSafeCall( cudaEventRecord(start, 0) );
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, NUM_REPS);
cutilSafeCall( cudaEventRecord(stop, 0) );
cutilSafeCall( cudaEventSynchronize(stop) );
float innerTime;
cutilSafeCall( cudaEventElapsedTime(&innerTime, start, stop) );
cutilSafeCall( cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) );
res = cutComparef(gold, h_odata, size_x*size_y);
if (res == CUTFalse) {
printf("*** %s kernel FAILED ***\n", kernelName);
success = CUTFalse;
}
// report effective bandwidths
float outerBandwidth = 2.*1000*mem_size/(1024*1024*1024)/(outerTime/NUM_REPS);
float innerBandwidth = 2.*1000*mem_size/(1024*1024*1024)/(innerTime/NUM_REPS);
printf("%s\t%5.2f GB/s\t\t%5.2f GB/s\n", kernelName, outerBandwidth, innerBandwidth);
}
printf("\nTest %s\n", (success == CUTTrue) ? "PASSED" : "FAILED");
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
cutilSafeCall( cudaFree(d_idata) );
cutilSafeCall( cudaFree(d_odata) );
cutilSafeCall( cudaEventDestroy(start) );
cutilSafeCall( cudaEventDestroy(stop) );
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
|
the_stack
|
// Enabled to print a bunch of junk during solving
#define DEBUG_PRINT_SOLVER_INFO 0
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
/////////////////////////////////////////////////////////////////////////
// Eval Residual
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
//This command gets the lane ID within the current warp
unsigned int laneid;
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], residual);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(cudaDeviceSynchronize());
timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 residuumA;
const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_r[x] = residuum; // store for next iteration
state.d_rA[x] = residuumA; // store for next iteration
const float3 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
const float3 pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1
state.d_pA[x] = pA;
d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(N, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanAlpha (Init): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 tmpA;
const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
state.d_Ap_A[x] = tmpA; // store for next kernel call
d = dot(state.d_p[x], tmp) + dot(state.d_pA[x], tmpA); // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
state.d_a[x] = state.d_a[x] + state.d_deltaA[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters)
{
CUDATimer timer;
double residual = EvalResidual(input, state, parameters, timer);
printf("%f\n", residual);
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
Initialization(input, state, parameters, timer);
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
ApplyLinearUpdate(input, state, parameters, timer);
float residual = EvalResidual(input, state, parameters, timer);
printf("%f\n", residual);
timer.nextIteration();
}
timer.evaluate();
return residual;
}
|
the_stack
|
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
//#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <vector>
#include "voxlib_common.h"
struct RVIP_Params {
int voxel_dims[3];
int voxel_strides[3];
int max_samples;
int img_dims[2];
// Camera parameters
float cam_ori[3];
float cam_fwd[3];
float cam_side[3];
float cam_up[3];
float cam_c[2];
float cam_f;
//unsigned long seed;
};
/*
out_voxel_id: torch CUDA int32 [ img_dims[0], img_dims[1], max_samples, 1]
out_depth: torch CUDA float [2, img_dims[0], img_dims[1], max_samples, 1]
out_raydirs: torch CUDA float [ img_dims[0], img_dims[1], 1, 3]
Image coordinates refer to the center of the pixel
[0, 0, 0] at voxel coordinate is at the corner of the corner block (instead of at the center)
*/
template <int TILE_DIM>
static __global__ void ray_voxel_intersection_perspective_kernel(int32_t* __restrict__ out_voxel_id, float* __restrict__ out_depth, float* __restrict__ out_raydirs,
const int32_t* __restrict__ in_voxel, const RVIP_Params p) {
int img_coords[2];
img_coords[1] = blockIdx.x*TILE_DIM+threadIdx.x;
img_coords[0] = blockIdx.y*TILE_DIM+threadIdx.y;
if (img_coords[0] >= p.img_dims[0] || img_coords[1] >= p.img_dims[1]) {
return;
}
int pix_index = img_coords[0] * p.img_dims[1] + img_coords[1];
// Calculate ray origin and direction
float rayori[3], raydir[3];
rayori[0] = p.cam_ori[0];
rayori[1] = p.cam_ori[1];
rayori[2] = p.cam_ori[2];
// Camera intrinsics
float ndc_imcoords[2];
ndc_imcoords[0] = p.cam_c[0] - (float)img_coords[0]; // Flip height
ndc_imcoords[1] = (float)img_coords[1] - p.cam_c[1];
raydir[0] = p.cam_up[0] * ndc_imcoords[0] + p.cam_side[0] * ndc_imcoords[1] + p.cam_fwd[0] * p.cam_f;
raydir[1] = p.cam_up[1] * ndc_imcoords[0] + p.cam_side[1] * ndc_imcoords[1] + p.cam_fwd[1] * p.cam_f;
raydir[2] = p.cam_up[2] * ndc_imcoords[0] + p.cam_side[2] * ndc_imcoords[1] + p.cam_fwd[2] * p.cam_f;
normalize<float, 3>(raydir);
// Save out_raydirs
out_raydirs[pix_index*3] = raydir[0];
out_raydirs[pix_index*3+1] = raydir[1];
out_raydirs[pix_index*3+2] = raydir[2];
float axis_t[3];
int axis_int[3];
//int axis_intbound[3];
// Current voxel
axis_int[0] = floorf(rayori[0]);
axis_int[1] = floorf(rayori[1]);
axis_int[2] = floorf(rayori[2]);
#pragma unroll
for (int i=0; i<3; i++) {
if (raydir[i] > 0) {
// Initial t value
// Handle boundary case where rayori[i] is a whole number. Always round Up for the next block
//axis_t[i] = (ceilf(nextafterf(rayori[i], HUGE_VALF)) - rayori[i]) / raydir[i];
axis_t[i] = ((float)(axis_int[i]+1) - rayori[i]) / raydir[i];
} else if (raydir[i] < 0) {
axis_t[i] = ((float)axis_int[i] - rayori[i]) / raydir[i];
} else {
axis_t[i] = HUGE_VALF;
}
}
// Fused raymarching and sampling
bool quit = false;
for (int cur_plane=0; cur_plane < p.max_samples; cur_plane++) { // Last cycle is for calculating p2
float t = nanf("0");
float t2 = nanf("0");
int32_t blk_id = 0;
// Find the next intersection
while (!quit) {
// Find the next smallest t
float tnow;
/*
#pragma unroll
for (int i=0; i<3; i++) {
if (axis_t[i] <= axis_t[(i+1)%3] && axis_t[i] <= axis_t[(i+2)%3]) {
// Update current t
tnow = axis_t[i];
// Update t candidates
if (raydir[i] > 0) {
axis_int[i] += 1;
if (axis_int[i] >= p.voxel_dims[i]) {
quit = true;
}
axis_t[i] = ((float)(axis_int[i]+1) - rayori[i]) / raydir[i];
} else {
axis_int[i] -= 1;
if (axis_int[i] < 0) {
quit = true;
}
axis_t[i] = ((float)axis_int[i] - rayori[i]) / raydir[i];
}
break; // Avoid advancing multiple steps as axis_t is updated
}
}
*/
// Hand unroll
if (axis_t[0] <= axis_t[1] && axis_t[0] <= axis_t[2]) {
// Update current t
tnow = axis_t[0];
// Update t candidates
if (raydir[0] > 0) {
axis_int[0] += 1;
if (axis_int[0] >= p.voxel_dims[0]) {
quit = true;
}
axis_t[0] = ((float)(axis_int[0]+1) - rayori[0]) / raydir[0];
} else {
axis_int[0] -= 1;
if (axis_int[0] < 0) {
quit = true;
}
axis_t[0] = ((float)axis_int[0] - rayori[0]) / raydir[0];
}
} else if (axis_t[1] <= axis_t[2]) {
tnow = axis_t[1];
if (raydir[1] > 0) {
axis_int[1] += 1;
if (axis_int[1] >= p.voxel_dims[1]) {
quit = true;
}
axis_t[1] = ((float)(axis_int[1]+1) - rayori[1]) / raydir[1];
} else {
axis_int[1] -= 1;
if (axis_int[1] < 0) {
quit = true;
}
axis_t[1] = ((float)axis_int[1] - rayori[1]) / raydir[1];
}
} else {
tnow = axis_t[2];
if (raydir[2] > 0) {
axis_int[2] += 1;
if (axis_int[2] >= p.voxel_dims[2]) {
quit = true;
}
axis_t[2] = ((float)(axis_int[2]+1) - rayori[2]) / raydir[2];
} else {
axis_int[2] -= 1;
if (axis_int[2] < 0) {
quit = true;
}
axis_t[2] = ((float)axis_int[2] - rayori[2]) / raydir[2];
}
}
if (quit) {
break;
}
// Skip empty space
// Could there be deadlock if the ray direction is away from the world?
if (axis_int[0] < 0 || axis_int[0] >= p.voxel_dims[0] || axis_int[1] < 0 || axis_int[1] >= p.voxel_dims[1] || axis_int[2] < 0 || axis_int[2] >= p.voxel_dims[2]) {
continue;
}
// Test intersection using voxel grid
blk_id = in_voxel[axis_int[0]*p.voxel_strides[0] + axis_int[1]*p.voxel_strides[1] + axis_int[2]*p.voxel_strides[2]];
if (blk_id == 0) {
continue;
}
// Now that there is an intersection
t = tnow;
// Calculate t2
/*
#pragma unroll
for (int i=0; i<3; i++) {
if (axis_t[i] <= axis_t[(i+1)%3] && axis_t[i] <= axis_t[(i+2)%3]) {
t2 = axis_t[i];
break;
}
}
*/
// Hand unroll
if (axis_t[0] <= axis_t[1] && axis_t[0] <= axis_t[2]) {
t2 = axis_t[0];
} else if (axis_t[1] <= axis_t[2]) {
t2 = axis_t[1];
} else {
t2 = axis_t[2];
}
break;
} // while !quit (ray marching loop)
out_depth[pix_index*p.max_samples+cur_plane] = t;
out_depth[p.img_dims[0]*p.img_dims[1]*p.max_samples + pix_index*p.max_samples+cur_plane] = t2;
out_voxel_id[pix_index*p.max_samples+cur_plane] = blk_id;
} // cur_plane
}
/*
out:
out_voxel_id: torch CUDA int32 [ img_dims[0], img_dims[1], max_samples, 1]
out_depth: torch CUDA float [2, img_dims[0], img_dims[1], max_samples, 1]
out_raydirs: torch CUDA float [ img_dims[0], img_dims[1], 1, 3]
in:
in_voxel: torch CUDA int32 [X, Y, Z] [40, 512, 512]
cam_ori: torch float [3]
cam_dir: torch float [3]
cam_up: torch float [3]
cam_f: float
cam_c: int [2]
img_dims: int [2]
max_samples: int
*/
std::vector<torch::Tensor> ray_voxel_intersection_perspective_cuda(const torch::Tensor& in_voxel, const torch::Tensor& cam_ori, const torch::Tensor& cam_dir, const torch::Tensor& cam_up, float cam_f, const std::vector<float>& cam_c, const std::vector<int>& img_dims, int max_samples) {
CHECK_CUDA(in_voxel);
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
torch::Device device = in_voxel.device();
//assert(in_voxel.dtype() == torch::kU8);
assert(in_voxel.dtype() == torch::kInt32); // Minecraft compatibility
assert(in_voxel.dim() == 3);
assert(cam_ori.dtype() == torch::kFloat32);
assert(cam_ori.numel() == 3);
assert(cam_dir.dtype() == torch::kFloat32);
assert(cam_dir.numel() == 3);
assert(cam_up.dtype() == torch::kFloat32);
assert(cam_up.numel() == 3);
assert(img_dims.size() == 2);
RVIP_Params p;
// Calculate camera rays
const torch::Tensor cam_ori_c = cam_ori.cpu();
const torch::Tensor cam_dir_c = cam_dir.cpu();
const torch::Tensor cam_up_c = cam_up.cpu();
// Get the coordinate frame of camera space in world space
normalize<float, 3>(p.cam_fwd, cam_dir_c.data_ptr<float>());
cross<float>(p.cam_side, p.cam_fwd, cam_up_c.data_ptr<float>());
normalize<float, 3>(p.cam_side);
cross<float>(p.cam_up, p.cam_side, p.cam_fwd);
normalize<float, 3>(p.cam_up); // Not absolutely necessary as both vectors are normalized. But just in case...
copyarr<float, 3>(p.cam_ori, cam_ori_c.data_ptr<float>());
p.cam_f = cam_f;
p.cam_c[0] = cam_c[0];
p.cam_c[1] = cam_c[1];
p.max_samples = max_samples;
//printf("[Renderer] max_dist: %ld\n", max_dist);
p.voxel_dims[0] = in_voxel.size(0);
p.voxel_dims[1] = in_voxel.size(1);
p.voxel_dims[2] = in_voxel.size(2);
p.voxel_strides[0] = in_voxel.stride(0);
p.voxel_strides[1] = in_voxel.stride(1);
p.voxel_strides[2] = in_voxel.stride(2);
//printf("[Renderer] Voxel resolution: %ld, %ld, %ld\n", p.voxel_dims[0], p.voxel_dims[1], p.voxel_dims[2]);
p.img_dims[0] = img_dims[0];
p.img_dims[1] = img_dims[1];
// Create output tensors
// For Minecraft Seg Mask
torch::Tensor out_voxel_id = torch::empty({p.img_dims[0], p.img_dims[1], p.max_samples, 1}, torch::TensorOptions().dtype(torch::kInt32).device(device));
torch::Tensor out_depth;
// Produce two sets of localcoords, one for entry point, the other one for exit point. They share the same corner_ids.
out_depth = torch::empty({2, p.img_dims[0], p.img_dims[1], p.max_samples, 1}, torch::TensorOptions().dtype(torch::kFloat32).device(device));
torch::Tensor out_raydirs = torch::empty({p.img_dims[0], p.img_dims[1], 1, 3}, torch::TensorOptions().dtype(torch::kFloat32).device(device).requires_grad(false));
const int TILE_DIM = 8;
dim3 dimGrid((p.img_dims[1]+TILE_DIM-1)/TILE_DIM, (p.img_dims[0]+TILE_DIM-1)/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
ray_voxel_intersection_perspective_kernel<TILE_DIM><<<dimGrid, dimBlock, 0, stream>>>(
out_voxel_id.data_ptr<int32_t>(), out_depth.data_ptr<float>(), out_raydirs.data_ptr<float>(), in_voxel.data_ptr<int32_t>(), p
);
return {out_voxel_id, out_depth, out_raydirs};
}
|
the_stack
|
#include "common.h"
#include "dispatch.h"
#include "so3.h"
#include "rxso3.h"
#include "se3.h"
#include "sim3.h"
#define GPU_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i += blockDim.x * gridDim.x)
#define NUM_THREADS 256
#define NUM_BLOCKS(batch_size) ((batch_size + NUM_THREADS - 1) / NUM_THREADS)
template <typename Group, typename scalar_t>
__global__ void exp_forward_kernel(const scalar_t* a_ptr, scalar_t* X_ptr, int num_threads) {
// exponential map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Data>(X_ptr + i*Group::N) = Group::Exp(a).data();
}
}
template <typename Group, typename scalar_t>
__global__ void exp_backward_kernel(const scalar_t* grad, const scalar_t* a_ptr, scalar_t* da, int num_threads) {
// exponential map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a(a_ptr + i*Group::K);
Grad dX(grad + i*Group::N);
Eigen::Map<Grad>(da + i*Group::K) = dX * Group::left_jacobian(a);
}
}
template <typename Group, typename scalar_t>
__global__ void log_forward_kernel(const scalar_t* X_ptr, scalar_t* a_ptr, int num_threads) {
// logarithm map forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Eigen::Map<Tangent>(a_ptr + i*Group::K) = a;
}
}
template <typename Group, typename scalar_t>
__global__ void log_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t* dX, int num_threads) {
// logarithm map backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Tangent a = Group(X_ptr + i*Group::N).Log();
Grad da(grad + i*Group::K);
Eigen::Map<Grad>(dX + i*Group::N) = da * Group::left_jacobian_inverse(a);
}
}
template <typename Group, typename scalar_t>
__global__ void inv_forward_kernel(const scalar_t* X_ptr, scalar_t* Y_ptr, int num_threads) {
// group inverse forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Data>(Y_ptr + i*Group::N) = X.inv().data();
}
}
template <typename Group, typename scalar_t>
__global__ void inv_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, scalar_t *dX, int num_threads) {
// group inverse backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Y = Group(X_ptr + i*Group::N).inv();
Grad dY(grad + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = -dY * Y.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void mul_forward_kernel(const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* Z_ptr, int num_threads) {
// group multiplication forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group Z = Group(X_ptr + i*Group::N) * Group(Y_ptr + i*Group::N);
Eigen::Map<Data>(Z_ptr + i*Group::N) = Z.data();
}
}
template <class Group, typename scalar_t>
__global__ void mul_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* Y_ptr, scalar_t* dX, scalar_t* dY, int num_threads) {
// group multiplication backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Grad dZ(grad + i*Group::N);
Group X(X_ptr + i*Group::N);
Eigen::Map<Grad>(dX + i*Group::N) = dZ;
Eigen::Map<Grad>(dY + i*Group::N) = dZ * X.Adj();
}
}
template <typename Group, typename scalar_t>
__global__ void adj_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.Adj(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adj_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Grad db(grad + i*Group::K);
Tangent a(a_ptr + i*Group::K);
Tangent b = X.Adj() * a;
Eigen::Map<Grad>(da + i*Group::K) = db * X.Adj();
Eigen::Map<Grad>(dX + i*Group::N) = -db * Group::adj(b);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// adjoint forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(b_ptr + i*Group::K) = X.AdjT(a);
}
}
template <typename Group, typename scalar_t>
__global__ void adjT_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* dX, scalar_t* da, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent db(grad + i*Group::K);
Grad a(a_ptr + i*Group::K);
Eigen::Map<Tangent>(da + i*Group::K) = X.Adj(db);
Eigen::Map<Grad>(dX + i*Group::N) = -a * Group::adj(X.Adj(db));
}
}
template <typename Group, typename scalar_t>
__global__ void act_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,3,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
Eigen::Map<Point>(q_ptr + i*3) = X * p;
}
}
template <typename Group, typename scalar_t>
__global__ void act_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,3,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,3>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*3);
PointGrad dq(grad + i*3);
Eigen::Map<PointGrad>(dp + i*3) = dq * X.Matrix4x4().block<3,3>(0,0);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act_jacobian(X*p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_forward_kernel(const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* q_ptr, int num_threads) {
// action on point forward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Point = Eigen::Matrix<scalar_t,4,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
Eigen::Map<Point>(q_ptr + i*4) = X.act4(p);
}
}
template <typename Group, typename scalar_t>
__global__ void act4_backward_kernel(const scalar_t* grad, const scalar_t* X_ptr, const scalar_t* p_ptr, scalar_t* dX, scalar_t* dp, int num_threads) {
// adjoint backward kernel
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Grad = Eigen::Matrix<scalar_t,1,Group::K>;
using Point = Eigen::Matrix<scalar_t,4,1>;
using PointGrad = Eigen::Matrix<scalar_t,1,4>;
using Transformation = Eigen::Matrix<scalar_t,4,4>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Point p(p_ptr + i*4);
PointGrad dq(grad + i*4);
Eigen::Map<PointGrad>(dp + i*4) = dq * X.Matrix4x4();
const Point q = X.act4(p);
Eigen::Map<Grad>(dX + i*Group::N) = dq * Group::act4_jacobian(q);
}
}
template <typename Group, typename scalar_t>
__global__ void as_matrix_forward_kernel(const scalar_t* X_ptr, scalar_t* T_ptr, int num_threads) {
// convert to 4x4 matrix representation
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
using Matrix4 = Eigen::Matrix<scalar_t,4,4,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Matrix4>(T_ptr + i*16) = X.Matrix4x4();
}
}
template <typename Group, typename scalar_t>
__global__ void orthogonal_projector_kernel(const scalar_t* X_ptr, scalar_t* P_ptr, int num_threads) {
// orthogonal projection matrix
using Proj = Eigen::Matrix<scalar_t,Group::N,Group::N,Eigen::RowMajor>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Eigen::Map<Proj>(P_ptr + i*Group::N*Group::N) = X.orthogonal_projector();
}
}
template <typename Group, typename scalar_t>
__global__ void jleft_forward_kernel(const scalar_t* X_ptr, const scalar_t* a_ptr, scalar_t* b_ptr, int num_threads) {
// left jacobian inverse action
using Tangent = Eigen::Matrix<scalar_t,Group::K,1>;
using Data = Eigen::Matrix<scalar_t,Group::N,1>;
GPU_1D_KERNEL_LOOP(i, num_threads) {
Group X(X_ptr + i*Group::N);
Tangent a(a_ptr + i*Group::K);
Tangent b = Group::left_jacobian_inverse(X.Log()) * a;
Eigen::Map<Tangent>(b_ptr + i*Group::K) = b;
}
}
// unary operations
torch::Tensor exp_forward_gpu(int group_id, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor X;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_forward_kernel", ([&] {
X = torch::zeros({batch_size, group_t::N}, a.options());
exp_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
a.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
batch_size);
}));
return X;
}
std::vector<torch::Tensor> exp_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor a) {
int batch_size = a.size(0);
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, a.type(), "exp_backward_kernel", ([&] {
exp_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {da};
}
torch::Tensor log_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor a;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_forward_kernel", ([&] {
a = torch::zeros({batch_size, group_t::K}, X.options());
log_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
batch_size);
}));
return a;
}
std::vector<torch::Tensor> log_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "log_backward_kernel", ([&] {
log_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
torch::Tensor inv_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor Y = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_forward_kernel", ([&] {
inv_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
batch_size);
}));
return Y;
}
std::vector<torch::Tensor> inv_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "inv_backward_kernel", ([&] {
inv_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
batch_size);
}));
return {dX};
}
// binary operations
torch::Tensor mul_forward_gpu(int group_id, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor Z = torch::zeros_like(X);
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_forward_kernel", ([&] {
mul_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
Z.data_ptr<scalar_t>(),
batch_size);
}));
return Z;
}
std::vector<torch::Tensor> mul_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor Y) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dY = torch::zeros(Y.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "mul_backward_kernel", ([&] {
mul_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
Y.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dY.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dY};
}
torch::Tensor adj_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_forward_kernel", ([&] {
adj_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adj_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adj_backward_kernel", ([&] {
adj_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor adjT_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_forward_kernel", ([&] {
adjT_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
std::vector<torch::Tensor> adjT_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor da = torch::zeros(a.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "adjT_backward_kernel", ([&] {
adjT_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
da.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, da};
}
torch::Tensor act_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_forward_kernel", ([&] {
act_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act_backward_kernel", ([&] {
act_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor act4_forward_gpu(int group_id, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor q = torch::zeros(p.sizes(), p.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_forward_kernel", ([&] {
act4_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
q.data_ptr<scalar_t>(),
batch_size);
}));
return q;
}
std::vector<torch::Tensor> act4_backward_gpu(int group_id, torch::Tensor grad, torch::Tensor X, torch::Tensor p) {
int batch_size = X.size(0);
torch::Tensor dX = torch::zeros(X.sizes(), grad.options());
torch::Tensor dp = torch::zeros(p.sizes(), grad.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "act4_backward_kernel", ([&] {
act4_backward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
grad.data_ptr<scalar_t>(),
X.data_ptr<scalar_t>(),
p.data_ptr<scalar_t>(),
dX.data_ptr<scalar_t>(),
dp.data_ptr<scalar_t>(),
batch_size);
}));
return {dX, dp};
}
torch::Tensor as_matrix_forward_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor T4x4 = torch::zeros({X.size(0), 4, 4}, X.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "as_matrix_forward_kernel", ([&] {
as_matrix_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
T4x4.data_ptr<scalar_t>(),
batch_size);
}));
return T4x4;
}
torch::Tensor orthogonal_projector_gpu(int group_id, torch::Tensor X) {
int batch_size = X.size(0);
torch::Tensor P;
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "orthogonal_projector_kernel", ([&] {
P = torch::zeros({X.size(0), group_t::N, group_t::N}, X.options());
orthogonal_projector_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
P.data_ptr<scalar_t>(),
batch_size);
}));
return P;
}
torch::Tensor jleft_forward_gpu(int group_id, torch::Tensor X, torch::Tensor a) {
int batch_size = X.size(0);
torch::Tensor b = torch::zeros(a.sizes(), a.options());
DISPATCH_GROUP_AND_FLOATING_TYPES(group_id, X.type(), "jleft_forward_kernel", ([&] {
jleft_forward_kernel<group_t, scalar_t><<<NUM_BLOCKS(batch_size), NUM_THREADS>>>(
X.data_ptr<scalar_t>(),
a.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
batch_size);
}));
return b;
}
|
the_stack
|
#include "cudakernel/nn/resize.h"
#include "ppl/common/types.h"
struct half8_ {
half x0;
half y0;
half z0;
half w0;
half x1;
half y1;
half z1;
half w1;
};
static inline __device__ float cudaComputeSourceIndexCubic(
float scale,
int dstIndex)
{
float srcIdx = scale * (dstIndex + 0.5) - 0.5;
return srcIdx;
}
static inline __device__ float cudaComputeSourceIndexNearest(
float scale,
int dstIndex,
int transform_mode)
{
float srcIdx = 0.f;
if (transform_mode == 3) {
srcIdx = scale * dstIndex;
} else {
srcIdx = scale * (dstIndex + 0.5) - 0.5;
}
return (srcIdx < 0) ? 0.f : srcIdx;
}
static inline __device__ float cudaComputeSourceIndexBilinear(
float scale,
int dstIndex)
{
float srcIdx = scale * (dstIndex + 0.5) - 0.5;
return (srcIdx < 0) ? 0.f : srcIdx;
}
static __device__ __forceinline__ float cubic_convolution1(
float x,
float A)
{
return ((A + 2) * x - (A + 3)) * x * x + 1;
}
static __device__ __forceinline__ float cubic_convolution2(
float x,
float A)
{
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
static __device__ __forceinline__ void get_cubic_resize_coefficients(
float coeffs[4],
float t,
float cubic_coeff)
{
float A = cubic_coeff;
float x1 = t;
coeffs[0] = cubic_convolution2(x1 + 1.0, A);
coeffs[1] = cubic_convolution1(x1, A);
// opposite coefficients
float x2 = 1.0 - t;
coeffs[2] = cubic_convolution1(x2, A);
coeffs[3] = cubic_convolution2(x2 + 1.0, A);
}
template <typename T>
static __device__ inline T cubic_interplote(float frac0, T data0, float frac1, T data1, float frac2, T data2, float frac3, T data3);
template<typename T>
static __device__ inline T cubic_interplote(float frac0, T data0, float frac1, T data1, float frac2, T data2, float frac3, T data3) {
T res;
res = frac0 * data0 + frac1 * data1 +
frac2 * data2 + frac3 * data3;
return res;
}
__device__ inline float cubic_interplote_float(float frac0, float data0, float frac1, float data1, float frac2, float data2, float frac3, float data3) {
float res;
res = frac0 * data0 + frac1 * data1 +
frac2 * data2 + frac3 * data3;
return res;
}
template <>
__device__ inline half cubic_interplote<half>(float frac0, half data0, float frac1, half data1, float frac2, half data2, float frac3, half data3)
{
half res;
res = frac0 * __half2float(data0) + frac1 * __half2float(data1) +
frac2 * __half2float(data2) + frac3 * __half2float(data3);
return res;
}
// template <>
// __device__ inline half8_ cubic_interplote<half8_>(float frac0, half8_ data0, float frac1, half8_ data1, float frac2, half8_ data2, float frac3, half8_ data3)
// {
// half8_ res;
// res.x0 = frac0 * __half2float(data0.x0) + frac1 * __half2float(data1.x0) +
// frac2 * __half2float(data2.x0) + frac3 * __half2float(data3.x0);
// res.y0 = frac0 * __half2float(data0.y0) + frac1 * __half2float(data1.y0) +
// frac2 * __half2float(data2.y0) + frac3 * __half2float(data3.y0);
// res.z0 = frac0 * __half2float(data0.z0) + frac1 * __half2float(data1.z0) +
// frac2 * __half2float(data2.z0) + frac3 * __half2float(data3.z0);
// res.w0 = frac0 * __half2float(data0.w0) + frac1 * __half2float(data1.w0) +
// frac2 * __half2float(data2.w0) + frac3 * __half2float(data3.w0);
// res.x1 = frac0 * __half2float(data0.x1) + frac1 * __half2float(data1.x1) +
// frac2 * __half2float(data2.x1) + frac3 * __half2float(data3.x1);
// res.y1 = frac0 * __half2float(data0.y1) + frac1 * __half2float(data1.y1) +
// frac2 * __half2float(data2.y1) + frac3 * __half2float(data3.y1);
// res.z1 = frac0 * __half2float(data0.z1) + frac1 * __half2float(data1.z1) +
// frac2 * __half2float(data2.z1) + frac3 * __half2float(data3.z1);
// res.w1 = frac0 * __half2float(data0.w1) + frac1 * __half2float(data1.w1) +
// frac2 * __half2float(data2.w1) + frac3 * __half2float(data3.w1);
// return res;
// }
template <typename T>
static __device__ __forceinline__ T cubic_interp1d(
T x0,
T x1,
T x2,
T x3,
float t,
float cubic_coeff)
{
float coeffs[4];
get_cubic_resize_coefficients(coeffs, t, cubic_coeff);
return cubic_interplote<T>(coeffs[0], x0, coeffs[1], x1, coeffs[2], x2, coeffs[3], x3);
}
static __device__ __forceinline__ float cubic_interp1d_float(
float x0,
float x1,
float x2,
float x3,
float t,
float cubic_coeff)
{
float coeffs[4];
get_cubic_resize_coefficients(coeffs, t, cubic_coeff);
return cubic_interplote_float(coeffs[0], x0, coeffs[1], x1, coeffs[2], x2, coeffs[3], x3);
}
template <typename T>
__device__ __forceinline__ static T resize_get_value_bounded(
const T* data,
int height,
int width,
int access_c,
int y,
int x)
{
int access_y = max(min(y, height - 1), 0);
int access_x = max(min(x, width - 1), 0);
return data[access_c * height * width + access_y * width + access_x];
}
template<typename T>
__device__ inline T bilinear_interplote(float frac_w0, float frac_w1,
float frac_h0, float frac_h1, T data0, T data1, T data2, T data3) {
T res;
res = frac_h0 * (frac_w0 * data0 + frac_w1 * data1) +
frac_h1 * (frac_w0 * data2 + frac_w1 * data3);
return res;
}
template<>
__device__ inline int8_t bilinear_interplote<int8_t>(float frac_w0, float frac_w1,
float frac_h0, float frac_h1, int8_t data0, int8_t data1, int8_t data2, int8_t data3) {
int8_t res;
res = round(frac_h0 * (frac_w0 * data0 + frac_w1 * data1) +
frac_h1 * (frac_w0 * data2 + frac_w1 * data3));
return res;
}
template <>
__device__ inline half bilinear_interplote<half>(float frac_w0, float frac_w1, float frac_h0, float frac_h1, half data0, half data1, half data2, half data3)
{
half res;
res = frac_h0 * (frac_w0 * __half2float(data0) + frac_w1 * __half2float(data1)) +
frac_h1 * (frac_w0 * __half2float(data2) + frac_w1 * __half2float(data3));
return res;
}
template <>
__device__ inline half8_ bilinear_interplote<half8_>(float frac_w0, float frac_w1, float frac_h0, float frac_h1, half8_ data0, half8_ data1, half8_ data2, half8_ data3)
{
half8_ res;
res.x0 = frac_h0 * (frac_w0 * __half2float(data0.x0) + frac_w1 * __half2float(data1.x0)) +
frac_h1 * (frac_w0 * __half2float(data2.x0) + frac_w1 * __half2float(data3.x0));
res.y0 = frac_h0 * (frac_w0 * __half2float(data0.y0) + frac_w1 * __half2float(data1.y0)) +
frac_h1 * (frac_w0 * __half2float(data2.y0) + frac_w1 * __half2float(data3.y0));
res.z0 = frac_h0 * (frac_w0 * __half2float(data0.z0) + frac_w1 * __half2float(data1.z0)) +
frac_h1 * (frac_w0 * __half2float(data2.z0) + frac_w1 * __half2float(data3.z0));
res.w0 = frac_h0 * (frac_w0 * __half2float(data0.w0) + frac_w1 * __half2float(data1.w0)) +
frac_h1 * (frac_w0 * __half2float(data2.w0) + frac_w1 * __half2float(data3.w0));
res.x1 = frac_h0 * (frac_w0 * __half2float(data0.x1) + frac_w1 * __half2float(data1.x1)) +
frac_h1 * (frac_w0 * __half2float(data2.x1) + frac_w1 * __half2float(data3.x1));
res.y1 = frac_h0 * (frac_w0 * __half2float(data0.y1) + frac_w1 * __half2float(data1.y1)) +
frac_h1 * (frac_w0 * __half2float(data2.y1) + frac_w1 * __half2float(data3.y1));
res.z1 = frac_h0 * (frac_w0 * __half2float(data0.z1) + frac_w1 * __half2float(data1.z1)) +
frac_h1 * (frac_w0 * __half2float(data2.z1) + frac_w1 * __half2float(data3.z1));
res.w1 = frac_h0 * (frac_w0 * __half2float(data0.w1) + frac_w1 * __half2float(data1.w1)) +
frac_h1 * (frac_w0 * __half2float(data2.w1) + frac_w1 * __half2float(data3.w1));
return res;
}
template <typename T>
__global__ void ppl_cukernel_resize_bilinear_int8(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width,
float in_scale,
float out_scale)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
//const float h1r = h_scale * h2;
const float h1r = cudaComputeSourceIndexBilinear(h_scale, h2);
const int h1 = h1r;
const int h1p = (h1 < in_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
//const float w1r = w_scale * w2;
const float w1r = cudaComputeSourceIndexBilinear(w_scale, w2);
const int w1 = w1r;
const int w1p = (w1 < in_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) { //右边一个和下边一个
// pos2[0] = h0lambda * (w0lambda * pos1[0] +
// w1lambda * pos1[w1p]) +
// h1lambda * (w0lambda * pos1[h1p * in_width] +
// w1lambda * pos1[h1p * in_width + w1p]);
int32_t temp = bilinear_interplote<T>(w0lambda, w1lambda, h0lambda, h1lambda, pos1[0], pos1[w1p], pos1[h1p * in_width], pos1[h1p * in_width + w1p]);
temp = round(temp * in_scale / out_scale);
if(temp > 127) temp = 127;
if(temp < -128) temp = -128;
pos2[0] = temp;
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
}
}
template <typename T>
__global__ void ppl_cukernel_resize_bilinear(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
//const float h1r = h_scale * h2;
const float h1r = cudaComputeSourceIndexBilinear(h_scale, h2);
const int h1 = h1r;
const int h1p = (h1 < in_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
//const float w1r = w_scale * w2;
const float w1r = cudaComputeSourceIndexBilinear(w_scale, w2);
const int w1 = w1r;
const int w1p = (w1 < in_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) { //右边一个和下边一个
// pos2[0] = h0lambda * (w0lambda * pos1[0] +
// w1lambda * pos1[w1p]) +
// h1lambda * (w0lambda * pos1[h1p * in_width] +
// w1lambda * pos1[h1p * in_width + w1p]);
pos2[0] = bilinear_interplote<T>(w0lambda, w1lambda, h0lambda, h1lambda, pos1[0], pos1[w1p], pos1[h1p * in_width], pos1[h1p * in_width + w1p]);
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
}
}
template <typename T>
__global__ void ppl_cukernel_resize_nearest_int8(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width,
int transform_mode,
float in_scale,
float out_scale)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
//const float h1r = h_scale * h2;
const float h1r = cudaComputeSourceIndexNearest(h_scale, h2, transform_mode);
const int h1 = h1r;
//const float w1r = w_scale * w2;
const float w1r = cudaComputeSourceIndexNearest(w_scale, w2, transform_mode);
const int w1 = w1r;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
int32_t temp = round(pos1[0] * in_scale / out_scale);
if(temp > 127) temp = 127;
if(temp < -128) temp = -128;
pos2[0] = temp;
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
}
}
template <typename T>
__global__ void ppl_cukernel_resize_nearest(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width,
int transform_mode)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
//const float h1r = h_scale * h2;
const float h1r = cudaComputeSourceIndexNearest(h_scale, h2, transform_mode);
const int h1 = h1r;
//const float w1r = w_scale * w2;
const float w1r = cudaComputeSourceIndexNearest(w_scale, w2, transform_mode);
const int w1 = w1r;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
}
}
template <typename T>
__global__ void ppl_cukernel_resize_cubic_int8(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width,
float cubic_coeff,
float in_scale,
float out_scale)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
const float h1r = cudaComputeSourceIndexCubic(h_scale, h2);
const int h1 = floorf(h1r);
const float h1lambda = h1r - h1;
const float w1r = cudaComputeSourceIndexCubic(w_scale, w2);
const int w1 = floorf(w1r);
const float w1lambda = w1r - w1;
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
float coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d_float(
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 - 1),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 0),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 1),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 2),
w1lambda,
cubic_coeff);
}
float temp = cubic_interp1d_float(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
h1lambda,
cubic_coeff);
int32_t res = round(temp * in_scale / out_scale);
if(res > 127) res = 127;
if(res < -128) res = -128;
pos2[0] = res;
pos2 += out_width * out_height;
}
}
}
template <typename T>
__global__ void ppl_cukernel_resize_cubic(
int num_threads,
float h_scale,
float w_scale,
int channels,
const T* input,
int in_height,
int in_width,
T* output,
int out_height,
int out_width,
float cubic_coeff)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num_threads) {
const int w2 = index % out_width; // 0:out_width-1
const int h2 = index / out_width; // 0:out_height-1
// special case: just copy
if (in_height == out_height && in_width == out_width) {
const int h1 = h2;
const int w1 = w2;
const T* pos1 = &input[h1 * in_width + w1];
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
pos2[0] = pos1[0];
pos1 += in_width * in_height;
pos2 += out_width * out_height;
}
return;
}
const float h1r = cudaComputeSourceIndexCubic(h_scale, h2);
const int h1 = floorf(h1r);
const float h1lambda = h1r - h1;
const float w1r = cudaComputeSourceIndexCubic(w_scale, w2);
const int w1 = floorf(w1r);
const float w1lambda = w1r - w1;
T* pos2 = &output[h2 * out_width + w2];
for (int c = 0; c < channels; ++c) {
T coefficients[4];
for (int k = 0; k < 4; k++) {
coefficients[k] = cubic_interp1d<T>(
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 - 1),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 0),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 1),
resize_get_value_bounded(
input, in_height, in_width, c, h1 - 1 + k, w1 + 2),
w1lambda,
cubic_coeff);
}
pos2[0] = cubic_interp1d<T>(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
h1lambda,
cubic_coeff);
pos2 += out_width * out_height;
}
}
}
static inline float hostComputeAreaScale(int input_size, int output_size, int mode)
{
if (output_size > 1 || mode == 0 || mode == 3) {
return float(input_size) / output_size;
} else {
return 0.f;
}
}
// coordinate_transformation_mode definition
// {"half_pixel", 0}, {"pytorch_half_pixel", 1}, {"align_corners", 2},
// {"asymmetric", 3}, {"tf_half_pixel_for_nn", 4}, {"tf_crop_and_resize", 5}
// interpolation mode
// {"nearest", 0}, {"linear", 1}, {"cubic", 2}
template <typename T>
void ppl_resize_forward(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const T* input,
const ppl::nn::TensorShape* output_shape,
T* output,
bool scale_pre_set,
float h_scale_pre,
float w_scale_pre,
int transform_mode,
int inter_mode,
float cubic_coeff)
{
int dim_count = output_shape->GetDimCount();
int out_height = 1, out_width = 1;
int in_height = 1, in_width = 1;
for (int it = 2; it < dim_count - 1; ++it) {
out_height *= output_shape->GetDim(it);
in_height *= input_shape->GetDim(it);
}
out_width = output_shape->GetDim(dim_count - 1);
in_width = input_shape->GetDim(dim_count - 1);
int channels = output_shape->GetDim(0) * output_shape->GetDim(1);
float h_scale = 0.f, w_scale = 0.f;
if (scale_pre_set) {
h_scale = h_scale_pre;
w_scale = w_scale_pre;
} else {
h_scale = hostComputeAreaScale(in_height, out_height, transform_mode);
w_scale = hostComputeAreaScale(in_width, out_width, transform_mode);
}
int num_threads = out_height * out_width;
int block_size = 256;
int grid = (num_threads + block_size - 1) / block_size;
if (inter_mode == 0) {
ppl_cukernel_resize_nearest<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width, transform_mode);
} else if (inter_mode == 1) {
ppl_cukernel_resize_bilinear<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width);
} else if (inter_mode == 2) {
ppl_cukernel_resize_cubic<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width, cubic_coeff);
}
}
template <typename T>
void ppl_resize_forward_int8(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const T* input,
const ppl::nn::TensorShape* output_shape,
T* output,
bool scale_pre_set,
float h_scale_pre,
float w_scale_pre,
int transform_mode,
int inter_mode,
float cubic_coeff,
float in_scale,
float out_scale)
{
int dim_count = output_shape->GetDimCount();
int out_height = 1, out_width = 1;
int in_height = 1, in_width = 1;
for (int it = 2; it < dim_count - 1; ++it) {
out_height *= output_shape->GetDim(it);
in_height *= input_shape->GetDim(it);
}
out_width = output_shape->GetDim(dim_count - 1);
in_width = input_shape->GetDim(dim_count - 1);
int channels = output_shape->GetDim(0) * output_shape->GetDim(1);
float h_scale = 0.f, w_scale = 0.f;
if (scale_pre_set) {
h_scale = h_scale_pre;
w_scale = w_scale_pre;
} else {
h_scale = hostComputeAreaScale(in_height, out_height, transform_mode);
w_scale = hostComputeAreaScale(in_width, out_width, transform_mode);
}
int num_threads = out_height * out_width;
int block_size = 256;
int grid = (num_threads + block_size - 1) / block_size;
if (inter_mode == 0) {
ppl_cukernel_resize_nearest_int8<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width, transform_mode, in_scale, out_scale);
} else if (inter_mode == 1) {
ppl_cukernel_resize_bilinear_int8<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width, in_scale, out_scale);
} else if (inter_mode == 2) {
ppl_cukernel_resize_cubic_int8<T><<<grid, block_size, 0, stream>>>(
num_threads, h_scale, w_scale, channels, input, in_height, in_width, output, out_height, out_width, cubic_coeff, in_scale, out_scale);
}
}
ppl::common::RetCode PPLCUDAResizeForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output,
bool scale_pre_set,
float h_scale,
float w_scale,
int transform_mode,
int inter_mode,
float cubic_coeff,
float in_scale,
float out_scale)
{
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
ppl_resize_forward<half>(stream, input_shape, (const half*)input, output_shape, (half*)output, scale_pre_set, h_scale, w_scale, transform_mode, inter_mode, cubic_coeff);
return ppl::common::RC_SUCCESS;
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
ppl_resize_forward<float>(stream, input_shape, (const float*)input, output_shape, (float*)output, scale_pre_set, h_scale, w_scale, transform_mode, inter_mode, cubic_coeff);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_INT8) {
ppl_resize_forward_int8<int8_t>(stream, input_shape, (const int8_t*)input, output_shape, (int8_t*)output, scale_pre_set, h_scale, w_scale, transform_mode, inter_mode, cubic_coeff, in_scale, out_scale);
}else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
}
|
the_stack
|
#include <cmath>
#include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAApplyUtils.cuh> // at::cuda::getApplyGrid
#include <THC/THC.h>
#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
/********************************
* Forward kernel for approxmatch
*********************************/
template<typename scalar_t>
__global__ void approxmatch(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,scalar_t * __restrict__ match,scalar_t * temp){
scalar_t * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
scalar_t multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ scalar_t buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
scalar_t level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
scalar_t x2=xyz2[i*m*3+l0*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l0*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
scalar_t w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
scalar_t x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
scalar_t sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
scalar_t x1=buf[k*4+0];
scalar_t y1=buf[k*4+1];
scalar_t z1=buf[k*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
scalar_t consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
scalar_t rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
__syncthreads();
}
}
}
//void approxmatchLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,scalar_t * match,scalar_t * temp){
// approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match,temp);
//}
/* ApproxMatch forward interface
Input:
xyz1: (B, N1, 3) # dataset_points
xyz2: (B, N2, 3) # query_points
Output:
match: (B, N2, N1)
*/
at::Tensor ApproxMatchForward(
const at::Tensor xyz1,
const at::Tensor xyz2){
const auto b = xyz1.size(0);
const auto n = xyz1.size(1);
const auto m = xyz2.size(1);
CHECK_EQ(xyz2.size(0), b);
CHECK_EQ(xyz1.size(2), 3);
CHECK_EQ(xyz2.size(2), 3);
CHECK_INPUT(xyz1);
CHECK_INPUT(xyz2);
auto match = at::zeros({b, m, n}, xyz1.type());
auto temp = at::zeros({b, (n+m)*2}, xyz1.type());
AT_DISPATCH_FLOATING_TYPES(xyz1.scalar_type(), "ApproxMatchForward", ([&] {
approxmatch<scalar_t><<<32,512>>>(b, n, m, xyz1.data<scalar_t>(), xyz2.data<scalar_t>(), match.data<scalar_t>(), temp.data<scalar_t>());
}));
THCudaCheck(cudaGetLastError());
return match;
}
/********************************
* Forward kernel for matchcost
*********************************/
template<typename scalar_t>
__global__ void matchcost(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ out){
__shared__ scalar_t allsum[512];
const int Block=1024;
__shared__ scalar_t buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
scalar_t subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*3;l+=blockDim.x)
buf[l]=xyz2[i*m*3+l0*3+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*3+0];
scalar_t y2=buf[l*3+1];
scalar_t z2=buf[l*3+2];
scalar_t d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
//void matchcostLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * out){
// matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
//}
/* MatchCost forward interface
Input:
xyz1: (B, N1, 3) # dataset_points
xyz2: (B, N2, 3) # query_points
match: (B, N2, N1)
Output:
cost: (B)
*/
at::Tensor MatchCostForward(
const at::Tensor xyz1,
const at::Tensor xyz2,
const at::Tensor match){
const auto b = xyz1.size(0);
const auto n = xyz1.size(1);
const auto m = xyz2.size(1);
CHECK_EQ(xyz2.size(0), b);
CHECK_EQ(xyz1.size(2), 3);
CHECK_EQ(xyz2.size(2), 3);
CHECK_INPUT(xyz1);
CHECK_INPUT(xyz2);
auto cost = at::zeros({b}, xyz1.type());
AT_DISPATCH_FLOATING_TYPES(xyz1.scalar_type(), "MatchCostForward", ([&] {
matchcost<scalar_t><<<32,512>>>(b, n, m, xyz1.data<scalar_t>(), xyz2.data<scalar_t>(), match.data<scalar_t>(), cost.data<scalar_t>());
}));
THCudaCheck(cudaGetLastError());
return cost;
}
/********************************
* matchcostgrad2 kernel
*********************************/
template<typename scalar_t>
__global__ void matchcostgrad2(int b,int n,int m,const scalar_t * __restrict__ grad_cost,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad2){
__shared__ scalar_t sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
scalar_t x2=xyz2[(i*m+k)*3+0];
scalar_t y2=xyz2[(i*m+k)*3+1];
scalar_t z2=xyz2[(i*m+k)*3+2];
scalar_t subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
scalar_t x1=x2-xyz1[(i*n+j)*3+0];
scalar_t y1=y2-xyz1[(i*n+j)*3+1];
scalar_t z1=z2-xyz1[(i*n+j)*3+2];
scalar_t d=match[i*n*m+k*n+j]*2;
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0]*grad_cost[i];
grad2[(i*m+k)*3+1]=sum_grad[1]*grad_cost[i];
grad2[(i*m+k)*3+2]=sum_grad[2]*grad_cost[i];
}
__syncthreads();
}
}
}
/********************************
* matchcostgrad1 kernel
*********************************/
template<typename scalar_t>
__global__ void matchcostgrad1(int b,int n,int m,const scalar_t * __restrict__ grad_cost,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
scalar_t x1=xyz1[i*n*3+l*3+0];
scalar_t y1=xyz1[i*n*3+l*3+1];
scalar_t z1=xyz1[i*n*3+l*3+2];
scalar_t dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
scalar_t x2=xyz2[i*m*3+k*3+0];
scalar_t y2=xyz2[i*m*3+k*3+1];
scalar_t z2=xyz2[i*m*3+k*3+2];
scalar_t d=match[i*n*m+k*n+l]*2;
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx*grad_cost[i];
grad1[i*n*3+l*3+1]=dy*grad_cost[i];
grad1[i*n*3+l*3+2]=dz*grad_cost[i];
}
}
}
//void matchcostgradLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * grad1,scalar_t * grad2){
// matchcostgrad1<<<32,512>>>(b,n,m,xyz1,xyz2,match,grad1);
// matchcostgrad2<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
//}
/* MatchCost backward interface
Input:
grad_cost: (B) # gradients on cost
xyz1: (B, N1, 3) # dataset_points
xyz2: (B, N2, 3) # query_points
match: (B, N2, N1)
Output:
grad1: (B, N1, 3)
grad2: (B, N2, 3)
*/
std::vector<at::Tensor> MatchCostBackward(
const at::Tensor grad_cost,
const at::Tensor xyz1,
const at::Tensor xyz2,
const at::Tensor match){
const auto b = xyz1.size(0);
const auto n = xyz1.size(1);
const auto m = xyz2.size(1);
CHECK_EQ(xyz2.size(0), b);
CHECK_EQ(xyz1.size(2), 3);
CHECK_EQ(xyz2.size(2), 3);
CHECK_INPUT(xyz1);
CHECK_INPUT(xyz2);
auto grad1 = at::zeros({b, n, 3}, xyz1.type());
auto grad2 = at::zeros({b, m, 3}, xyz1.type());
AT_DISPATCH_FLOATING_TYPES(xyz1.scalar_type(), "MatchCostBackward", ([&] {
matchcostgrad1<scalar_t><<<32,512>>>(b, n, m, grad_cost.data<scalar_t>(), xyz1.data<scalar_t>(), xyz2.data<scalar_t>(), match.data<scalar_t>(), grad1.data<scalar_t>());
matchcostgrad2<scalar_t><<<dim3(32,32),256>>>(b, n, m, grad_cost.data<scalar_t>(), xyz1.data<scalar_t>(), xyz2.data<scalar_t>(), match.data<scalar_t>(), grad2.data<scalar_t>());
}));
THCudaCheck(cudaGetLastError());
return std::vector<at::Tensor>({grad1, grad2});
}
#endif
|
the_stack
|
#include "gpuTopo.h"
#include <isce3/core/Basis.h>
#include <isce3/core/DenseMatrix.h>
#include <isce3/core/Ellipsoid.h>
#include <isce3/core/LookSide.h>
#include <isce3/core/Pixel.h>
#include <isce3/error/ErrorCode.h>
#include <isce3/geometry/TopoLayers.h>
// isce3::cuda::core
#include <isce3/cuda/core/Orbit.h>
#include <isce3/cuda/core/OrbitView.h>
#include <isce3/cuda/core/gpuLUT1d.h>
#include <isce3/cuda/except/Error.h>
// isce3::cuda::geometry
#include "gpuDEMInterpolator.h"
#include "gpuGeometry.h"
#include "gpuTopoLayers.h"
using isce3::core::Vec3;
using isce3::core::Mat3;
using isce3::error::ErrorCode;
using isce3::core::LookSide;
#define THRD_PER_BLOCK 96 // Number of threads per block (should always %32==0)
__device__
bool initAzimuthLine(size_t line,
const isce3::cuda::core::OrbitView& orbit,
double startAzUTCTime,
double prf,
Vec3& pos, Vec3& vel,
isce3::core::Basis& TCNbasis) {
// Get satellite azimuth time
const double tline = startAzUTCTime + line / prf;
// Interpolate orbit (keeping track of validity without interrupting workflow)
ErrorCode status = orbit.interpolate(&pos, &vel, tline);
bool valid = (status == ErrorCode::Success);
// Compute geocentric TCN basis
TCNbasis = isce3::core::Basis(pos, vel);
return valid;
}
__device__
void setOutputTopoLayers(const Vec3& targetLLH,
isce3::cuda::geometry::gpuTopoLayers & layers,
size_t index, LookSide lookSide,
const isce3::core::Pixel & pixel,
const Vec3& pos, const Vec3& vel,
const isce3::core::Basis& TCNbasis,
isce3::cuda::core::ProjectionBase ** projOutput,
const isce3::core::Ellipsoid& ellipsoid,
const isce3::cuda::geometry::gpuDEMInterpolator & demInterp) {
Vec3 targetXYZ, enu;
const double degrees = 180.0 / M_PI;
// Convert lat/lon values to output coordinate system
Vec3 xyzOut;
(*projOutput)->forward(targetLLH, xyzOut);
const double x = xyzOut[0];
const double y = xyzOut[1];
// Set outputs
layers.x(index, x);
layers.y(index, y);
layers.z(index, targetLLH[2]);
// Convert llh->xyz for ground point
ellipsoid.lonLatToXyz(targetLLH, targetXYZ);
// Compute vector from satellite to ground point
const Vec3 satToGround = targetXYZ - pos;
// Compute cross-track range
if (lookSide == LookSide::Left) {
layers.crossTrack(index, -satToGround.dot(TCNbasis.x1()));
} else {
layers.crossTrack(index, satToGround.dot(TCNbasis.x1()));
}
// Computation in ENU coordinates around target
const Mat3 xyz2enu = Mat3::xyzToEnu(targetLLH[1], targetLLH[0]);
enu = xyz2enu.dot(satToGround);
const double cosalpha = std::abs(enu[2]) / enu.norm();
// Incidence angle
layers.inc(index, std::acos(cosalpha) * degrees);
// Heading considering zero-Doppler grid and anti-clock. ref. starting from the East
double heading;
if (lookSide == LookSide::Left) {
heading = (std::atan2(enu[1], enu[0]) - (0.5*M_PI)) * degrees;
} else {
heading = (std::atan2(enu[1], enu[0]) + (0.5*M_PI)) * degrees;
}
if (heading > 180) {
heading -= 360;
} else if (heading < -180) {
heading += 360;
}
layers.hdg(index, heading);
// East-west slope using central difference
double aa = demInterp.interpolateXY(x - demInterp.deltaX(), y);
double bb = demInterp.interpolateXY(x + demInterp.deltaX(), y);
double gamma = targetLLH[1];
double alpha = ((bb - aa) * degrees) / (2.0 * ellipsoid.rEast(gamma) * demInterp.deltaX());
// North-south slope using central difference
aa = demInterp.interpolateXY(x, y - demInterp.deltaY());
bb = demInterp.interpolateXY(x, y + demInterp.deltaY());
double beta = ((bb - aa) * degrees) / (2.0 * ellipsoid.rNorth(gamma) * demInterp.deltaY());
// Compute local incidence angle
enu /= enu.norm();
double costheta = ((enu[0] * alpha) + (enu[1] * beta) - enu[2])
/ std::sqrt(1.0 + (alpha * alpha) + (beta * beta));
layers.localInc(index, std::acos(costheta)*degrees);
// Compute amplitude simulation
double sintheta = std::sqrt(1.0 - (costheta * costheta));
bb = sintheta + 0.1 * costheta;
layers.sim(index, std::log10(std::abs(0.01 * costheta / (bb * bb * bb))));
// Calculate psi angle between image plane and local slope
Vec3 n_img_enu, n_trg_enu;
Vec3 n_imghat = satToGround.cross(vel).normalized();
if (lookSide == LookSide::Left) {
n_imghat *= -1;
}
n_img_enu = xyz2enu.dot(n_imghat);
n_trg_enu[0] = -alpha;
n_trg_enu[1] = -beta;
n_trg_enu[2] = 1.0;
const double cospsi = n_img_enu.dot(n_trg_enu) /
(n_trg_enu.norm() * n_img_enu.norm());
layers.localPsi(index, std::acos(cospsi) * degrees);
}
__global__
void runTopoBlock(isce3::core::Ellipsoid ellipsoid,
isce3::cuda::core::OrbitView orbit,
isce3::cuda::core::gpuLUT1d<double> doppler,
isce3::cuda::geometry::gpuDEMInterpolator demInterp,
isce3::cuda::core::ProjectionBase ** projOutput,
isce3::cuda::geometry::gpuTopoLayers layers,
size_t lineStart,
LookSide lookSide,
double startAzUTCTime,
double wavelength,
double prf,
double startingRange,
double rangePixelSpacing,
double threshold, int numiter, int extraiter,
unsigned int * totalconv) {
// Get the flattened index
size_t index_flat = (blockDim.x * blockIdx.x) + threadIdx.x;
const size_t NPIXELS = layers.length() * layers.width();
// Only process if a valid pixel (while trying to avoid thread divergence)
if (index_flat < NPIXELS) {
// Unravel the flattened pixel index
const size_t line = index_flat / layers.width();
const size_t rbin = index_flat - line * layers.width();
// Interpolate orbit (keeping track of validity without interrupting workflow)
isce3::core::Basis TCNbasis;
Vec3 pos, vel;
bool valid = (initAzimuthLine(line + lineStart, orbit, startAzUTCTime,
prf, pos, vel, TCNbasis) != 0);
// Compute magnitude of satellite velocity
const double satVmag = vel.norm();
// Get current slant range
const double rng = startingRange + rbin * rangePixelSpacing;
// Get current Doppler value and factor
const double dopval = doppler.eval(rng);
const double dopfact = 0.5 * wavelength * (dopval / satVmag) * rng;
// Store slant range bin data in Pixel
isce3::core::Pixel pixel(rng, dopfact, rbin);
// Initialize LLH to middle of input DEM and average height
Vec3 llh = demInterp.midLonLat();
// Perform rdr->geo iterations
int geostat = isce3::cuda::geometry::rdr2geo(
pixel, TCNbasis, pos, vel, ellipsoid, demInterp, llh, lookSide,
threshold, numiter, extraiter);
// Save data in output arrays
setOutputTopoLayers(llh, layers, index_flat, lookSide, pixel, pos, vel, TCNbasis,
projOutput, ellipsoid, demInterp);
// Update convergence count
atomicAdd(totalconv, (unsigned int) geostat);
}
}
// C++ Host code for launching kernel to run topo on current block
void isce3::cuda::geometry::
runGPUTopo(const isce3::core::Ellipsoid & ellipsoid,
const isce3::core::Orbit & orbit,
const isce3::core::LUT1d<double> & doppler,
isce3::geometry::DEMInterpolator & demInterp,
isce3::geometry::TopoLayers & layers,
size_t lineStart,
LookSide lookSide,
int epsgOut,
double startAzUTCTime,
double wavelength,
double prf,
double startingRange,
double rangePixelSpacing,
double threshold, int numiter, int extraiter,
unsigned int & totalconv) {
// Create gpu ISCE objects
isce3::cuda::core::Orbit gpu_orbit(orbit);
isce3::cuda::core::gpuLUT1d<double> gpu_doppler(doppler);
isce3::cuda::geometry::gpuDEMInterpolator gpu_demInterp(demInterp);
isce3::cuda::geometry::gpuTopoLayers gpu_layers(layers);
// Allocate projection pointers on device
isce3::cuda::core::ProjectionBase **projOutput_d;
checkCudaErrors(cudaMalloc(&projOutput_d, sizeof(isce3::cuda::core::ProjectionBase **)));
createProjection<<<1, 1>>>(projOutput_d, epsgOut);
// DEM interpolator initializes its projection and interpolator
gpu_demInterp.initProjInterp();
// Allocate integer for storing convergence results
unsigned int * totalconv_d;
checkCudaErrors(cudaMalloc(&totalconv_d, sizeof(unsigned int)));
checkCudaErrors(cudaMemcpy(totalconv_d, &totalconv, sizeof(unsigned int),
cudaMemcpyHostToDevice));
// Determine grid layout
dim3 block(THRD_PER_BLOCK);
const size_t npixel = layers.length() * layers.width();
const int nBlocks = (int) std::ceil((1.0 * npixel) / THRD_PER_BLOCK);
dim3 grid(nBlocks);
// Launch kernel
runTopoBlock<<<grid, block>>>(ellipsoid, gpu_orbit, gpu_doppler,
gpu_demInterp, projOutput_d, gpu_layers,
lineStart, lookSide,
startAzUTCTime, wavelength, prf, startingRange,
rangePixelSpacing, threshold, numiter, extraiter,
totalconv_d);
// Check for any kernel errors
checkCudaErrors(cudaPeekAtLastError());
// Copy results back to host
gpu_layers.copyToHost(layers);
checkCudaErrors(cudaMemcpy(&totalconv, totalconv_d, sizeof(unsigned int),
cudaMemcpyDeviceToHost));
// Delete projection pointer on device
gpu_demInterp.finalizeProjInterp();
deleteProjection<<<1, 1>>>(projOutput_d);
// Free projection pointer and convergence count
checkCudaErrors(cudaFree(totalconv_d));
checkCudaErrors(cudaFree(projOutput_d));
}
// end of file
|
the_stack
|
#include "operator/jet_vector.h"
#include "operator/jet_vector_math_impl.cuh"
namespace MegBA {
namespace math {
namespace impl {
inline std::array<dim3, 2> fitGridAndBlock(const unsigned int nItem) {
std::array<dim3, 2> gridAndDim;
if (nItem < 256) {
gridAndDim[1] = dim3(nItem);
gridAndDim[0] = dim3(1);
} else {
gridAndDim[1] = dim3(256);
gridAndDim[0] = dim3((nItem - 1) / gridAndDim[1].x + 1);
}
return gridAndDim;
}
template <typename T>
__global__ void JetVector_add_JetVector_Kernel(const unsigned int N,
const unsigned int nItem,
const T *f_res, const T *f_grad,
const T *g_res, const T *g_grad,
T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
g_grad[tid + i * nItem] + f_grad[tid + i * nItem];
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
__global__ void Jet_PVector_add_JetVector_Kernel(const unsigned int nItem,
const T *f_res,
const int f_grad_position,
const T *g_res, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] += 1;
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
__global__ void Jet_PVector_add_Jet_PVector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, const int g_grad_position, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] = 1;
out_grad[tid + g_grad_position * nItem] += 1;
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
void JetVector_add_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
if (g.getGradPosition() != -1) {
// f is JPV, g is JPV
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
f.getGradShape() * nItem * sizeof(T));
Jet_PVector_add_Jet_PVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], g.getGradPosition(), out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
// f is JPV, g is not JPV
cudaMemcpyAsync(out->getCUDAGradPtr()[i], g.getCUDAGradPtr()[i],
out->getGradShape() * nItem * sizeof(T),
cudaMemcpyDeviceToDevice);
Jet_PVector_add_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
}
} else {
// f is not JPV, g is JPV
if (g.getGradPosition() != -1) {
cudaMemcpyAsync(out->getCUDAGradPtr()[i], f.getCUDAGradPtr()[i],
out->getGradShape() * nItem * sizeof(T),
cudaMemcpyDeviceToDevice);
Jet_PVector_add_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, g.getCUDAResPtr()[i], g.getGradPosition(),
f.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
JetVector_add_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
__global__ void Jet_PVector_add_Scalar_Vector_Kernel(const unsigned int nItem,
const T *f_res,
const int f_grad_position,
const T *g_res, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] = 1;
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
__global__ void JetVector_add_Scalar_Vector_Kernel(const unsigned int nItem,
const T *f_res,
const T *g_res, T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
void JetVector_add_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
const auto nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
// f is JPV
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_add_Scalar_Vector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
// f is not JPV
cudaMemcpyAsync(out->getCUDAGradPtr()[i], f.getCUDAGradPtr()[i],
f.getGradShape() * nItem * sizeof(T),
cudaMemcpyDeviceToDevice);
JetVector_add_Scalar_Vector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
}
template <typename T>
__global__ void Scalar_Vector_add_Scalar_Vector_Kernel(const unsigned int nItem,
const T *f_res,
const T *g_res,
T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] + g_res[tid];
}
template <typename T>
void Scalar_Vector_add_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
const auto nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_Vector_add_Scalar_Vector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
template <typename T>
void vectorAddVectorCUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
if (f.getGradShape() != 0) {
if (g.getGradShape() != 0) {
JetVector_add_JetVector_CUDA(f, g, out);
} else {
JetVector_add_Scalar_Vector_CUDA(f, g, out);
}
} else {
if (g.getGradShape() != 0) {
JetVector_add_Scalar_Vector_CUDA(g, f, out);
} else {
Scalar_Vector_add_Scalar_Vector_CUDA(f, g, out);
}
}
}
template void vectorAddVectorCUDA<double>(const MegBA::JetVector<double> &f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void vectorAddVectorCUDA<float>(const MegBA::JetVector<float> &f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void Jet_PVector_minus_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const int f_grad_position, const T *g_res, const T *g_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = -g_grad[tid + i * nItem];
out_grad[tid + f_grad_position * nItem] += 1;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
__global__ void Jet_PVector_minus_Jet_PVector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, const int g_grad_position, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] = 1;
out_grad[tid + g_grad_position * nItem] -= 1;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
__global__ void JetVector_minus_Jet_PVector_Kernel(const unsigned int nItem,
const T *f_res,
const T *g_res,
const int g_grad_position,
T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + g_grad_position * nItem] -= 1;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
__global__ void JetVector_minus_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, const T *g_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
f_grad[tid + i * nItem] - g_grad[tid + i * nItem];
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
void JetVector_minus_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
if (g.getGradPosition() != -1) {
// f is JPV, g is JPV
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_minus_Jet_PVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], g.getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
// f is JPV, g is not JPV
Jet_PVector_minus_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getGradPosition(), g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
} else {
if (g.getGradPosition() != -1) {
// f is not JPV, g is JPV
cudaMemcpyAsync(out->getCUDAGradPtr()[i], f.getCUDAGradPtr()[i],
f.getGradShape() * nItem * sizeof(T),
cudaMemcpyDeviceToDevice);
JetVector_minus_Jet_PVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
g.getGradPosition(), out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
// f is not JPV, g is not JPV
JetVector_minus_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
__global__ void Jet_PVector_minus_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] = 1;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
__global__ void JetVector_minus_Scalar_Vector_Kernel(const unsigned int nItem,
const T *f_res,
const T *g_res,
T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
void JetVector_minus_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_minus_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
cudaMemcpyAsync(out->getCUDAGradPtr()[i], f.getCUDAGradPtr()[i],
out->getGradShape() * nItem * sizeof(T),
cudaMemcpyDeviceToDevice);
JetVector_minus_Scalar_Vector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
}
template <typename T>
__global__ void Scalar_Vector_minus_PJetVector_Kernel(const unsigned int nItem,
const T *f_res,
const T *g_res,
const int f_grad_position,
T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_grad[tid + f_grad_position * nItem] = -1;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
__global__ void Scalar_Vector_minus_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *g_res, const T *g_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = -g_grad[tid + i * nItem];
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
void Scalar_Vector_minus_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (g.getGradPosition() != -1) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Scalar_Vector_minus_PJetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
g.getGradPosition(), out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
Scalar_Vector_minus_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
}
}
}
template <typename T>
__global__ void Scalar_Vector_minus_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const T *g_res, T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] - g_res[tid];
}
template <typename T>
void Scalar_Vector_minus_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_Vector_minus_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(nItem, f.getCUDAResPtr()[i],
g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
template <typename T>
void vectorSubVectorCUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
if (f.getGradShape() != 0) {
if (g.getGradShape() != 0) {
JetVector_minus_JetVector_CUDA(f, g, out);
} else {
JetVector_minus_Scalar_Vector_CUDA(f, g, out);
}
} else {
if (g.getGradShape() != 0) {
Scalar_Vector_minus_JetVector_CUDA(f, g, out);
} else {
Scalar_Vector_minus_Scalar_Vector_CUDA(f, g, out);
}
}
}
template void vectorSubVectorCUDA<double>(const MegBA::JetVector<double> &f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void vectorSubVectorCUDA<float>(const MegBA::JetVector<float> &f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void JetVector_multiplies_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, const T *g_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_res_local * g_grad[tid + i * nItem] +
g_res_local * f_grad[tid + i * nItem];
out_res[tid] = f_res_local * g_res_local;
}
template <typename T>
__global__ void Jet_PVector_multiplies_Jet_PVector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, const int g_grad_position, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
out_grad[tid + f_grad_position * nItem] = g_res_local;
out_grad[tid + g_grad_position * nItem] += f_res_local;
out_res[tid] = f_res_local * g_res_local;
}
template <typename T>
__global__ void Jet_PVector_multiplies_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const int f_grad_position, const T *g_res, const T *g_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_res_local * g_grad[tid + i * nItem];
out_grad[tid + f_grad_position * nItem] += g_res_local;
out_res[tid] = f_res_local * g_res_local;
}
template <typename T>
void JetVector_multiplies_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
if (g.getGradPosition() != -1) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_multiplies_Jet_PVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], g.getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
Jet_PVector_multiplies_JetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getGradPosition(), g.getCUDAResPtr()[i],
g.getCUDAGradPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
}
} else {
if (g.getGradPosition() != -1) {
Jet_PVector_multiplies_JetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, g.getCUDAResPtr()[i],
g.getGradPosition(), f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
JetVector_multiplies_JetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i],
g.getCUDAGradPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
__global__ void Jet_PVector_multiplies_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
out_grad[tid + f_grad_position * nItem] = g_res_local;
out_res[tid] = f_res_local * g_res_local;
}
template <typename T>
__global__ void JetVector_multiplies_Scalar_Vector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = g_res_local * f_grad[tid + i * nItem];
out_res[tid] = f_res_local * g_res_local;
}
template <typename T>
void JetVector_multiplies_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_multiplies_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
JetVector_multiplies_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
template <typename T>
__global__ void Scalar_Vector_multiplies_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const T *g_res, T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] * g_res[tid];
}
template <typename T>
void Scalar_Vector_multiplies_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_Vector_multiplies_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(nItem, f.getCUDAResPtr()[i],
g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
template <typename T>
void vectorMulVectorCUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
if (f.getGradShape() != 0) {
if (g.getGradShape() != 0) {
JetVector_multiplies_JetVector_CUDA(f, g, out);
} else {
JetVector_multiplies_Scalar_Vector_CUDA(f, g, out);
}
} else {
if (g.getGradShape() != 0) {
JetVector_multiplies_Scalar_Vector_CUDA(g, f, out);
} else {
Scalar_Vector_multiplies_Scalar_Vector_CUDA(f, g, out);
}
}
}
template void vectorMulVectorCUDA<double>(const MegBA::JetVector<double> &f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void vectorMulVectorCUDA<float>(const MegBA::JetVector<float> &f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void Jet_PVector_divides_Jet_PVector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, const int g_grad_position, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
bool same_position = f_grad_position == g_grad_position;
out_grad[tid + f_grad_position * nItem] =
(1 - f_res_div_g_res_local * same_position) * g_res_inv_local;
out_grad[tid + g_grad_position * nItem] +=
(same_position - f_res_div_g_res_local) * g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
__global__ void Jet_PVector_divides_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const int f_grad_position, const T *g_res, const T *g_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
-f_res_div_g_res_local * g_grad[tid + i * nItem] * g_res_inv_local;
out_grad[tid + f_grad_position * nItem] += g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
__global__ void JetVector_divides_Jet_PVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, const int g_grad_position, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem] * g_res_inv_local;
out_grad[tid + g_grad_position * nItem] +=
-f_res_div_g_res_local * g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
__global__ void JetVector_divides_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, const T *g_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
(f_grad[tid + i * nItem] -
f_res_div_g_res_local * g_grad[tid + i * nItem]) *
g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
void JetVector_divides_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != -1) {
if (g.getGradPosition() != -1) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_divides_Jet_PVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], g.getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
Jet_PVector_divides_JetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getGradPosition(), g.getCUDAResPtr()[i],
g.getCUDAGradPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
}
} else {
if (g.getGradPosition() != -1) {
JetVector_divides_Jet_PVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i],
g.getGradPosition(), out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
JetVector_divides_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
__global__ void Jet_PVector_divides_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const int f_grad_position,
const T *g_res, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T g_res_inv_local = T(1) / g_res[tid];
out_grad[tid + f_grad_position * nItem] = g_res_inv_local;
out_res[tid] = f_res[tid] * g_res_inv_local;
}
template <typename T>
__global__ void JetVector_divides_Scalar_Vector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *f_grad, const T *g_res, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T g_res_inv_local = T(1) / g_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem] * g_res_inv_local;
out_res[tid] = f_res[tid] * g_res_inv_local;
}
template <typename T>
void JetVector_divides_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (f.getGradPosition() != 0) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Jet_PVector_divides_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], f.getGradPosition(),
g.getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
JetVector_divides_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
f.getCUDAGradPtr()[i], g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
template <typename T>
__global__ void Scalar_Vector_divides_Scalar_Vector_Kernel(
const unsigned int nItem, const T *f_res, const T *g_res, T *out_res) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
out_res[tid] = f_res[tid] / g_res[tid];
}
template <typename T>
void Scalar_Vector_divides_Scalar_Vector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_Vector_divides_Scalar_Vector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(nItem, f.getCUDAResPtr()[i],
g.getCUDAResPtr()[i],
out->getCUDAResPtr()[i]);
}
}
template <typename T>
__global__ void Scalar_Vector_divides_Jet_PVector_Kernel(
const unsigned int nItem, const T *f_res, const T *g_res,
const int g_grad_position, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
out_grad[tid + g_grad_position * nItem] =
-f_res_div_g_res_local * g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
__global__ void Scalar_Vector_divides_JetVector_Kernel(
const unsigned int N, const unsigned int nItem, const T *f_res,
const T *g_res, const T *g_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
T g_res_local = g_res[tid];
T g_res_inv_local = T(1) / g_res_local;
T f_res_div_g_res_local = f_res_local * g_res_inv_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
-f_res_div_g_res_local * g_grad[tid + i * nItem] * g_res_inv_local;
out_res[tid] = f_res_div_g_res_local;
}
template <typename T>
void Scalar_Vector_divides_JetVector_CUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
if (g.getGradPosition() != 0) {
cudaMemsetAsync(out->getCUDAGradPtr()[i], 0,
out->getGradShape() * nItem * sizeof(T));
Scalar_Vector_divides_Jet_PVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
nItem, f.getCUDAResPtr()[i], g.getCUDAResPtr()[i],
g.getGradPosition(), out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
Scalar_Vector_divides_JetVector_Kernel<T>
<<<gridAndDim[0], gridAndDim[1]>>>(
out->getGradShape(), nItem, f.getCUDAResPtr()[i],
g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
template <typename T>
void vectorDivVectorCUDA(const MegBA::JetVector<T> &f,
const MegBA::JetVector<T> &g,
MegBA::JetVector<T> *out) {
if (f.getGradShape() != 0) {
if (g.getGradShape() != 0) {
JetVector_divides_JetVector_CUDA(f, g, out);
} else {
JetVector_divides_Scalar_Vector_CUDA(f, g, out);
}
} else {
if (g.getGradShape() != 0) {
Scalar_Vector_divides_JetVector_CUDA(f, g, out);
} else {
Scalar_Vector_divides_Scalar_Vector_CUDA(f, g, out);
}
}
}
template void vectorDivVectorCUDA<double>(const MegBA::JetVector<double> &f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void vectorDivVectorCUDA<float>(const MegBA::JetVector<float> &f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void JetVector_add_Scalar_Kernel(const unsigned int N,
const unsigned int nItem,
const T *f_res, const T *f_grad,
const T g, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem];
out_res[tid] = f_res[tid] + g;
}
template <typename T>
void jetVectorAddScalarCUDA(const MegBA::JetVector<T> &f, T g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
JetVector_add_Scalar_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i], g,
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void jetVectorAddScalarCUDA<double>(const MegBA::JetVector<double> &f,
double g,
MegBA::JetVector<double> *out);
template void jetVectorAddScalarCUDA<float>(const MegBA::JetVector<float> &f,
float g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void JetVector_minus_Scalar_Kernel(const unsigned int N,
const unsigned int nItem,
const T *f_res, const T *f_grad,
const T g, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem];
out_res[tid] = f_res[tid] - g;
}
template <typename T>
void jetVectorSubScalarCUDA(const MegBA::JetVector<T> &f, T g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
JetVector_minus_Scalar_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i], g,
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void jetVectorSubScalarCUDA<double>(const MegBA::JetVector<double> &f,
double g,
MegBA::JetVector<double> *out);
template void jetVectorSubScalarCUDA<float>(const MegBA::JetVector<float> &f,
float g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void JetVector_multiplies_Scalar_Kernel(const unsigned int N,
const unsigned int nItem,
const T *f_res,
const T *f_grad, const T g,
T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem] * g;
out_res[tid] = f_res[tid] * g;
}
template <typename T>
void jetVectorMulScalarCUDA(const MegBA::JetVector<T> &f, T g,
MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
JetVector_multiplies_Scalar_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i], g,
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void jetVectorMulScalarCUDA<double>(const MegBA::JetVector<double> &f,
double g,
MegBA::JetVector<double> *out);
template void jetVectorMulScalarCUDA<float>(const MegBA::JetVector<float> &f,
float g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void Scalar_minus_JetVector_Kernel(const unsigned int N,
const unsigned int nItem,
const T f, const T *g_res,
const T *g_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = -g_grad[tid + i * nItem];
out_res[tid] = f - g_res[tid];
}
template <typename T>
void scalarSubJetVectorCUDA(T f, const JetVector<T> &g, JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_minus_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
g.getGradShape(), nItem, f, g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void scalarSubJetVectorCUDA<double>(double f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void scalarSubJetVectorCUDA<float>(float f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void Scalar_divides_JetVector_Kernel(const unsigned int N,
const unsigned int nItem,
const T f, const T *g_res,
const T *g_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T g_res_inv_local = T(1) / g_res[tid];
T g_res_inv_times_f_local = f * g_res_inv_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
-g_grad[tid + i * nItem] * g_res_inv_local * g_res_inv_times_f_local;
out_res[tid] = g_res_inv_times_f_local;
}
template <typename T>
void scalarDivJetVectorCUDA(T f, const JetVector<T> &g, JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
Scalar_divides_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
g.getGradShape(), nItem, f, g.getCUDAResPtr()[i], g.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void scalarDivJetVectorCUDA<double>(double f,
const MegBA::JetVector<double> &g,
MegBA::JetVector<double> *out);
template void scalarDivJetVectorCUDA<float>(float f,
const MegBA::JetVector<float> &g,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void abs_JetVector_Kernel(const unsigned int N,
const unsigned int nItem, const T *f_res,
const T *f_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
int mask_local = static_cast<int>(f_res_local > 0) * 2 - 1;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = mask_local * f_grad[tid + i * nItem];
out_res[tid] = mask_local * f_res_local;
}
template <typename T>
void absJetVectorCUDA(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
abs_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void absJetVectorCUDA<double>(const MegBA::JetVector<double> &f,
MegBA::JetVector<double> *out);
template void absJetVectorCUDA<float>(const MegBA::JetVector<float> &f,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void cos_JetVector_Kernel(const unsigned int N,
const unsigned int nItem, const T *f_res,
const T *f_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
-f_grad[tid + i * nItem] * std::sin(f_res_local);
out_res[tid] = std::cos(f_res_local);
}
template <typename T>
void cosJetVectorCUDA(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
cos_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void cosJetVectorCUDA<double>(const MegBA::JetVector<double> &f,
MegBA::JetVector<double> *out);
template void cosJetVectorCUDA<float>(const MegBA::JetVector<float> &f,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void sin_JetVector_Kernel(const unsigned int N,
const unsigned int nItem, const T *f_res,
const T *f_grad, T *out_res, T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_local = f_res[tid];
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] = f_grad[tid + i * nItem] * std::cos(f_res_local);
out_res[tid] = std::sin(f_res_local);
}
template <typename T>
void sinJetVectorCUDA(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
sin_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void sinJetVectorCUDA<double>(const MegBA::JetVector<double> &f,
MegBA::JetVector<double> *out);
template void sinJetVectorCUDA<float>(const MegBA::JetVector<float> &f,
MegBA::JetVector<float> *out);
template <typename T>
__global__ void sqrt_JetVector_Kernel(const unsigned int N,
const unsigned int nItem, const T *f_res,
const T *f_grad, T *out_res,
T *out_grad) {
/*
* 1D block and grid
*/
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f_res_sqrt_local = std::sqrt(f_res[tid]);
T f_res_sqrt_half_inv_local = T(0.5) / f_res_sqrt_local;
for (unsigned int i = 0; i < N; ++i)
out_grad[tid + i * nItem] =
f_grad[tid + i * nItem] * f_res_sqrt_half_inv_local;
out_res[tid] = f_res_sqrt_local;
}
template <typename T>
void sqrtJetVectorCUDA(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) {
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
unsigned int nItem = out->getItemNum(i);
std::array<dim3, 2> gridAndDim = fitGridAndBlock(nItem);
sqrt_JetVector_Kernel<T><<<gridAndDim[0], gridAndDim[1]>>>(
f.getGradShape(), nItem, f.getCUDAResPtr()[i], f.getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
template void sqrtJetVectorCUDA<double>(const MegBA::JetVector<double> &f,
MegBA::JetVector<double> *out);
template void sqrtJetVectorCUDA<float>(const MegBA::JetVector<float> &f,
MegBA::JetVector<float> *out);
} // namespace impl
} // namespace math
} // namespace MegBA
|
the_stack
|
#include "util.cuh"
#include "embedding.h"
// Memory & time efficient implementation of embedding score functions
// Much of the code is adapted from GraphVite
// https://github.com/DeepGraphLearning/graphvite
namespace at {
template <class scalar_t>
__global__
void transe_forward_out_cuda(const scalar_t *entity, const scalar_t *relation, const int64_t *h_index,
const int64_t *t_index, const int64_t *r_index, scalar_t *score,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
const scalar_t *h = entity + h_index[sample_id] * embedding_dim;
const scalar_t *r = relation + r_index[sample_id] * embedding_dim;
const scalar_t *t = entity + t_index[sample_id] * embedding_dim;
scalar_t x = 0;
for (int64_t i = lane_id; i < embedding_dim; i += warpSize)
x += ::abs(h[i] + r[i] - t[i]);
x = warp_broadcast(warp_reduce(x), 0);
if (lane_id == 0)
score[sample_id] = x;
}
}
template <class scalar_t>
__global__
void transe_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
int64_t h_sample = h_index[sample_id];
int64_t r_sample = r_index[sample_id];
int64_t t_sample = t_index[sample_id];
const scalar_t *h = entity + h_sample * embedding_dim;
const scalar_t *r = relation + r_sample * embedding_dim;
const scalar_t *t = entity + t_sample * embedding_dim;
scalar_t *h_grad = entity_grad + h_sample * embedding_dim;
scalar_t *r_grad = relation_grad + r_sample * embedding_dim;
scalar_t *t_grad = entity_grad + t_sample * embedding_dim;
scalar_t grad = score_grad[sample_id];
for (int64_t i = lane_id; i < embedding_dim; i += warpSize) {
scalar_t s = h[i] + r[i] - t[i] > 0 ? 1 : -1;
atomicAdd(&h_grad[i], grad * s);
atomicAdd(&r_grad[i], grad * s);
atomicAdd(&t_grad[i], -grad * s);
}
}
}
template <class scalar_t>
__global__
void distmult_forward_out_cuda(const scalar_t *entity, const scalar_t *relation, const int64_t *h_index,
const int64_t *t_index, const int64_t *r_index, scalar_t *score,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
const scalar_t *h = entity + h_index[sample_id] * embedding_dim;
const scalar_t *r = relation + r_index[sample_id] * embedding_dim;
const scalar_t *t = entity + t_index[sample_id] * embedding_dim;
scalar_t x = 0;
for (int64_t i = lane_id; i < embedding_dim; i += warpSize)
x += h[i] * r[i] * t[i];
x = warp_broadcast(warp_reduce(x), 0);
if (lane_id == 0)
score[sample_id] = x;
}
}
template <class scalar_t>
__global__
void distmult_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
int64_t h_sample = h_index[sample_id];
int64_t r_sample = r_index[sample_id];
int64_t t_sample = t_index[sample_id];
const scalar_t *h = entity + h_sample * embedding_dim;
const scalar_t *r = relation + r_sample * embedding_dim;
const scalar_t *t = entity + t_sample * embedding_dim;
scalar_t *h_grad = entity_grad + h_sample * embedding_dim;
scalar_t *r_grad = relation_grad + r_sample * embedding_dim;
scalar_t *t_grad = entity_grad + t_sample * embedding_dim;
scalar_t grad = score_grad[sample_id];
for (int64_t i = lane_id; i < embedding_dim; i += warpSize) {
scalar_t h_i = h[i], r_i = r[i], t_i = t[i];
atomicAdd(&h_grad[i], grad * r_i * t_i);
atomicAdd(&r_grad[i], grad * h_i * t_i);
atomicAdd(&t_grad[i], grad * h_i * r_i);
}
}
}
template <class scalar_t>
__global__
void complex_forward_out_cuda(const scalar_t *entity, const scalar_t *relation, const int64_t *h_index,
const int64_t *t_index, const int64_t *r_index, scalar_t *score,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
const scalar_t *h = entity + h_index[sample_id] * embedding_dim;
const scalar_t *r = relation + r_index[sample_id] * embedding_dim;
const scalar_t *t = entity + t_index[sample_id] * embedding_dim;
scalar_t x = 0;
for (int64_t i = lane_id; i < embedding_dim / 2; i += warpSize) {
scalar_t h_re = h[i], h_im = h[i + embedding_dim / 2];
scalar_t r_re = r[i], r_im = r[i + embedding_dim / 2];
scalar_t t_re = t[i], t_im = t[i + embedding_dim / 2];
scalar_t product_re = h_re * r_re - h_im * r_im;
scalar_t product_im = h_re * r_im + h_im * r_re;
x += product_re * t_re + product_im * t_im;
}
x = warp_broadcast(warp_reduce(x), 0);
if (lane_id == 0)
score[sample_id] = x;
}
}
template <class scalar_t>
__global__
void complex_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
int64_t h_sample = h_index[sample_id];
int64_t r_sample = r_index[sample_id];
int64_t t_sample = t_index[sample_id];
const scalar_t *h = entity + h_sample * embedding_dim;
const scalar_t *r = relation + r_sample * embedding_dim;
const scalar_t *t = entity + t_sample * embedding_dim;
scalar_t *h_grad = entity_grad + h_sample * embedding_dim;
scalar_t *r_grad = relation_grad + r_sample * embedding_dim;
scalar_t *t_grad = entity_grad + t_sample * embedding_dim;
scalar_t grad = score_grad[sample_id];
for (int64_t i = lane_id; i < embedding_dim / 2; i += warpSize) {
scalar_t h_re = h[i], h_im = h[i + embedding_dim / 2];
scalar_t r_re = r[i], r_im = r[i + embedding_dim / 2];
scalar_t t_re = t[i], t_im = t[i + embedding_dim / 2];
atomicAdd(&h_grad[i], grad * (r_re * t_re + r_im * t_im));
atomicAdd(&h_grad[i + embedding_dim / 2], grad * (-r_im * t_re + r_re * t_im));
atomicAdd(&r_grad[i], grad * (h_re * t_re + h_im * t_im));
atomicAdd(&r_grad[i + embedding_dim / 2], grad * (-h_im * t_re + h_re * t_im));
atomicAdd(&t_grad[i], grad * (h_re * r_re - h_im * r_im));
atomicAdd(&t_grad[i + embedding_dim / 2], grad * (h_re * r_im + h_im * r_re));
}
}
}
template <class scalar_t>
__global__
void rotate_forward_out_cuda(const scalar_t *entity, const scalar_t *relation, const int64_t *h_index,
const int64_t *t_index, const int64_t *r_index, scalar_t *score,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
const scalar_t *h = entity + h_index[sample_id] * embedding_dim;
const scalar_t *r = relation + r_index[sample_id] * embedding_dim / 2;
const scalar_t *t = entity + t_index[sample_id] * embedding_dim;
scalar_t x = 0;
for (int64_t i = lane_id; i < embedding_dim / 2; i += warpSize) {
scalar_t h_re = h[i], h_im = h[i + embedding_dim / 2];
scalar_t r_re = ::cos(r[i]), r_im = ::sin(r[i]);
scalar_t t_re = t[i], t_im = t[i + embedding_dim / 2];
scalar_t distance_re = h_re * r_re - h_im * r_im - t_re;
scalar_t distance_im = h_re * r_im + h_im * r_re - t_im;
x += ::sqrt(distance_re * distance_re + distance_im * distance_im);
}
x = warp_broadcast(warp_reduce(x), 0);
if (lane_id == 0)
score[sample_id] = x;
}
}
template <class scalar_t>
__global__
void rotate_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const float kEpsilon = 1e-15; // 1e-15 from GraphVite
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
int64_t h_sample = h_index[sample_id];
int64_t r_sample = r_index[sample_id];
int64_t t_sample = t_index[sample_id];
const scalar_t *h = entity + h_sample * embedding_dim;
const scalar_t *r = relation + r_sample * embedding_dim / 2;
const scalar_t *t = entity + t_sample * embedding_dim;
scalar_t *h_grad = entity_grad + h_sample * embedding_dim;
scalar_t *r_grad = relation_grad + r_sample * embedding_dim / 2;
scalar_t *t_grad = entity_grad + t_sample * embedding_dim;
scalar_t grad = score_grad[sample_id];
for (int64_t i = lane_id; i < embedding_dim / 2; i += warpSize) {
scalar_t h_re = h[i], h_im = h[i + embedding_dim / 2];
scalar_t r_re = ::cos(r[i]), r_im = ::sin(r[i]);
scalar_t t_re = t[i], t_im = t[i + embedding_dim / 2];
scalar_t distance_re = h_re * r_re - h_im * r_im - t_re;
scalar_t distance_im = h_re * r_im + h_im * r_re - t_im;
scalar_t g = grad / (::sqrt(distance_re * distance_re + distance_im * distance_im) + kEpsilon);
atomicAdd(&h_grad[i], g * (distance_re * r_re + distance_im * r_im));
atomicAdd(&h_grad[i + embedding_dim / 2], g * (-distance_re * r_im + distance_im * r_re));
atomicAdd(&r_grad[i], g * (-distance_re * (h_re * r_im + h_im * r_re)
+ distance_im * (h_re * r_re - h_im * r_im)));
atomicAdd(&t_grad[i], -g * distance_re);
atomicAdd(&t_grad[i + embedding_dim / 2], -g * distance_im);
}
}
}
template <class scalar_t>
__global__
void simple_forward_out_cuda(const scalar_t *entity, const scalar_t *relation, const int64_t *h_index,
const int64_t *t_index, const int64_t *r_index, scalar_t *score,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
const scalar_t *h = entity + h_index[sample_id] * embedding_dim;
const scalar_t *r = relation + r_index[sample_id] * embedding_dim;
const scalar_t *t = entity + t_index[sample_id] * embedding_dim;
scalar_t x = 0;
for (int64_t i = lane_id; i < embedding_dim; i += warpSize) {
int64_t j = (i + embedding_dim / 2) % embedding_dim;
x += h[i] * r[i] * t[j];
}
x = warp_broadcast(warp_reduce(x), 0);
if (lane_id == 0)
score[sample_id] = x;
}
}
template <class scalar_t>
__global__
void simple_backward_out_cuda(const scalar_t *entity, const scalar_t *relation,
const int64_t *h_index, const int64_t *t_index, const int64_t *r_index,
const scalar_t *score_grad, scalar_t *entity_grad, scalar_t *relation_grad,
int64_t num_entity, int64_t num_relation, int64_t embedding_dim, int64_t num_sample) {
const int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
const int lane_id = thread_id % warpSize;
const int num_thread = gridDim.x * blockDim.x;
for (int64_t sample_id = thread_id / warpSize; sample_id < num_sample; sample_id += num_thread / warpSize) {
int64_t h_sample = h_index[sample_id];
int64_t r_sample = r_index[sample_id];
int64_t t_sample = t_index[sample_id];
const scalar_t *h = entity + h_sample * embedding_dim;
const scalar_t *r = relation + r_sample * embedding_dim;
const scalar_t *t = entity + t_sample * embedding_dim;
scalar_t *h_grad = entity_grad + h_sample * embedding_dim;
scalar_t *r_grad = relation_grad + r_sample * embedding_dim;
scalar_t *t_grad = entity_grad + t_sample * embedding_dim;
scalar_t grad = score_grad[sample_id];
for (int64_t i = lane_id; i < embedding_dim; i += warpSize) {
int64_t j = (i + embedding_dim / 2) % embedding_dim;
scalar_t h_i = h[i], r_i = r[i], t_j = t[j];
atomicAdd(&h_grad[i], grad * r_i * t_j);
atomicAdd(&r_grad[i], grad * h_i * t_j);
atomicAdd(&t_grad[j], grad * h_i * r_i);
}
}
}
// If written in templates, the partial instantiation of template template parameters can't be resolved
// Therefore we opt for a macro implementation
#define DECLARE_FORWARD_IMPL(NAME) \
Tensor NAME##_forward_cuda(const Tensor &entity_, const Tensor &relation_, const Tensor &h_index_, \
const Tensor &t_index_, const Tensor &r_index_) { \
constexpr const char *fn_name = #NAME"_forward_cuda"; \
TensorArg entity_arg(entity_, "entity", 1), relation_arg(relation_, "relation", 2), \
h_index_arg(h_index_, "h_index", 3), r_index_arg(r_index_, "r_index", 4), \
t_index_arg(t_index_, "t_index", 5); \
\
embedding_forward_check(fn_name, entity_arg, relation_arg, h_index_arg, r_index_arg, t_index_arg); \
checkAllSameGPU(fn_name, {entity_arg, relation_arg, h_index_arg, r_index_arg, t_index_arg}); \
\
const Tensor entity = entity_.contiguous(); \
const Tensor relation = relation_.contiguous(); \
const Tensor h_index = h_index_.contiguous(); \
const Tensor r_index = r_index_.contiguous(); \
const Tensor t_index = t_index_.contiguous(); \
\
int64_t num_entity = entity.size(0); \
int64_t num_relation = relation.size(0); \
int64_t embedding_dim = entity.size(-1); \
int64_t num_sample = h_index.numel(); \
\
Tensor score = at::empty(h_index.sizes(), entity.options()); \
\
cudaSetDevice(entity.get_device()); \
auto stream = at::cuda::getCurrentCUDAStream(); \
\
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), fn_name, [&] { \
NAME##_forward_out_cuda<scalar_t><<<4096, 512, 0, stream>>>( \
entity.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), \
h_index.data_ptr<int64_t>(), t_index.data_ptr<int64_t>(), r_index.data_ptr<int64_t>(), \
score.data_ptr<scalar_t>(), \
num_entity, num_relation, embedding_dim, num_sample \
); \
}); \
\
return score; \
} \
#define DECLARE_BACKWARD_IMPL(NAME) \
std::tuple<Tensor, Tensor> NAME##_backward_cuda( \
const Tensor &entity_, const Tensor &relation_, const Tensor &h_index_, \
const Tensor &t_index_, const Tensor &r_index_, const Tensor &score_grad_) { \
constexpr const char *fn_name = #NAME"_backward_cuda"; \
TensorArg entity_arg(entity_, "entity", 1), relation_arg(relation_, "relation", 2), \
h_index_arg(h_index_, "h_index", 3), r_index_arg(r_index_, "r_index", 4), \
t_index_arg(t_index_, "t_index", 5), score_grad_arg(score_grad_, "score_grad", 6); \
\
embedding_backward_check(fn_name, entity_arg, relation_arg, h_index_arg, r_index_arg, t_index_arg, \
score_grad_arg); \
checkAllSameGPU(fn_name, {entity_arg, relation_arg, h_index_arg, r_index_arg, t_index_arg, score_grad_arg}); \
\
const Tensor entity = entity_.contiguous(); \
const Tensor relation = relation_.contiguous(); \
const Tensor h_index = h_index_.contiguous(); \
const Tensor r_index = r_index_.contiguous(); \
const Tensor t_index = t_index_.contiguous(); \
const Tensor score_grad = score_grad_.contiguous(); \
\
int64_t num_entity = entity.size(0); \
int64_t num_relation = relation.size(0); \
int64_t embedding_dim = entity.size(-1); \
int64_t num_sample = h_index.numel(); \
\
Tensor entity_grad = at::zeros_like(entity); \
Tensor relation_grad = at::zeros_like(relation); \
\
cudaSetDevice(entity.get_device()); \
auto stream = at::cuda::getCurrentCUDAStream(); \
\
AT_DISPATCH_FLOATING_TYPES(entity.scalar_type(), fn_name, [&] { \
NAME##_backward_out_cuda<scalar_t><<<4096, 512, 0, stream>>>( \
entity.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), \
h_index.data_ptr<int64_t>(), t_index.data_ptr<int64_t>(), r_index.data_ptr<int64_t>(), \
score_grad.data_ptr<scalar_t>(), \
entity_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), \
num_entity, num_relation, embedding_dim, num_sample \
); \
}); \
\
return std::make_tuple(entity_grad, relation_grad); \
}
DECLARE_FORWARD_IMPL(transe)
DECLARE_BACKWARD_IMPL(transe)
DECLARE_FORWARD_IMPL(distmult)
DECLARE_BACKWARD_IMPL(distmult)
DECLARE_FORWARD_IMPL(complex)
DECLARE_BACKWARD_IMPL(complex)
DECLARE_FORWARD_IMPL(rotate)
DECLARE_BACKWARD_IMPL(rotate)
DECLARE_FORWARD_IMPL(simple)
DECLARE_BACKWARD_IMPL(simple)
} // namespace at
|
the_stack
|
#include <stdio.h>
#include <math.h>
#include <curand.h>
#include <thrust/sort.h>
#include "helper_math.h"
#include "math_constants.h"
#include "kernel.cuh"
#include "shared_variables.cuh"
//#define X_BOUNDARY 7.f
//#define X_BOUNDARY 50.f
//#define Z_BOUNDARY 50.f
//#define
#define EPS 0.001f
////////////// fluid constants /////////////
#define MAX_FLUID_NEIGHBORS 500
#define H 2.f // kernel radius
#define H2 4.f // H^2
#define H6 64.f // H^6
#define H9 512.f // H^9
#define POLY6_COEFF 0.00305992474f // 315 / (64 * pi * H9)
#define SPIKEY_COEFF 0.22381163872f // 45 / (pi * H6)
#define FLUID_RELAXATION .01f // epsilon used when calculating lambda
#define K_P .1f // scales artificial pressure
#define E_P 4.f // exponent to art. pressure
#define DQ_P .2f // between .1 and .3 (for art pressure)
/////////////////// friction ///////////////
#define S_FRICTION .005f
#define K_FRICTION .0002f
//#define S_FRICTION .15f
//#define K_FRICTION .003f
// textures for particle position and velocity
texture<float4, 1, cudaReadModeElementType> oldPosTex;
texture<float, 1, cudaReadModeElementType> invMassTex;
texture<int, 1, cudaReadModeElementType> oldPhaseTex;
texture<uint, 1, cudaReadModeElementType> gridParticleHashTex;
texture<uint, 1, cudaReadModeElementType> cellStartTex;
texture<uint, 1, cudaReadModeElementType> cellEndTex;
// simulation parameters in constant memory
__constant__ SimParams params;
struct collide_world_functor
{
float *rands;
int3 minBounds;
int3 maxBounds;
__host__ __device__
collide_world_functor(float *_rands, int3 _minBounds, int3 _maxBounds)
: rands(_rands), minBounds(_minBounds), maxBounds(_maxBounds) {}
template <typename Tuple>
__device__
void operator()(Tuple t)
{
float4 posData = thrust::get<0>(t);
float4 Xstar = thrust::get<1>(t);
int phase = thrust::get<2>(t);
float3 epos = make_float3(posData.x, posData.y, posData.z);
float3 pos = make_float3(Xstar.x, Xstar.y, Xstar.z);
float3 n = make_float3(0.f);
float d = params.particleRadius;
float eps = d * 0.f;
if (phase < SOLID)
eps = d * 0.01f;
if (epos.y < minBounds.y + params.particleRadius)
{
epos.y = minBounds.y + params.particleRadius + rands[5] * eps;
n += make_float3(0,1,0);
}
eps = d * 0.01f;
if (epos.x > maxBounds.x - params.particleRadius)
{
epos.x = maxBounds.x - (params.particleRadius + rands[0] * eps);
n += make_float3(-1,0,0);
}
if (epos.x < minBounds.x + params.particleRadius)
{
epos.x = minBounds.x + (params.particleRadius + rands[1] * eps);
n += make_float3(1,0,0);
}
if (epos.y > maxBounds.y - params.particleRadius)
{
epos.y = maxBounds.y - (params.particleRadius + rands[2] * eps);
n += make_float3(0,-1,0);
}
#ifndef TWOD
if (epos.z > maxBounds.z - params.particleRadius)
{
epos.z = maxBounds.z - (params.particleRadius + rands[3] * eps);
n += make_float3(0,0,-1);
}
if (epos.z < minBounds.z + params.particleRadius)
{
epos.z = minBounds.z + (params.particleRadius + rands[4] * eps);
n += make_float3(0,0,1);
}
#endif
#ifdef TWOD
epos.z = ZPOS; // 2D
pos.z = ZPOS;
#endif
if (length(n) < EPS || phase < CLOTH)
{
thrust::get<0>(t) = make_float4(epos, posData.w);
return;
}
float3 dp = (epos - pos);
float3 dpt = dp - dot(dp, n) * n;
float ldpt = length(dpt);
if (ldpt < EPS)
{
thrust::get<0>(t) = make_float4(epos, posData.w);
return;
}
if (ldpt < sqrt(S_FRICTION) * d)
epos -= dpt;
else
epos -= dpt * min(sqrt(K_FRICTION) * d / ldpt, 1.f);
// store new position and velocity
thrust::get<0>(t) = make_float4(epos, posData.w);
}
};
struct integrate_functor
{
float deltaTime;
__host__ __device__
integrate_functor(float delta_time)
: deltaTime(delta_time) {}
template <typename Tuple>
__device__
void operator()(Tuple t)
{
volatile float4 posData = thrust::get<0>(t);
volatile float4 velData = thrust::get<1>(t);
float3 pos = make_float3(posData.x, posData.y, posData.z);
float3 vel = make_float3(velData.x, velData.y, velData.z);
vel += params.gravity * deltaTime;
// new position = old position + velocity * deltaTime
pos += vel * deltaTime;
// store new position and velocity
thrust::get<0>(t) = make_float4(pos, posData.w);
}
};
// calculate position in uniform grid
__device__ int3 calcGridPos(float3 p)
{
int3 gridPos;
gridPos.x = floor((p.x - params.worldOrigin.x) / params.cellSize.x);
gridPos.y = floor((p.y - params.worldOrigin.y) / params.cellSize.y);
gridPos.z = floor((p.z - params.worldOrigin.z) / params.cellSize.z);
return gridPos;
}
// calculate address in grid from position (clamping to edges)
__device__ uint calcGridHash(int3 gridPos)
{
gridPos.x = gridPos.x & (params.gridSize.x-1); // wrap grid, assumes size is power of 2
gridPos.y = gridPos.y & (params.gridSize.y-1);
gridPos.z = gridPos.z & (params.gridSize.z-1);
return __umul24(__umul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __umul24(gridPos.y, params.gridSize.x) + gridPos.x;
}
// calculate grid hash value for each particle
__global__
void calcHashD(uint *gridParticleHash, // output
uint *gridParticleIndex, // output
float4 *pos, // input: positions
uint numParticles)
{
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z));
uint hash = calcGridHash(gridPos);
// store grid hash and particle index
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
// rearrange particle data into sorted order, and find the start of each cell
// in the sorted hash array
__global__
void reorderDataAndFindCellStartD(uint *cellStart, // output: cell start index
uint *cellEnd, // output: cell end index
float4 *sortedPos, // output: sorted positions
float *sortedW, // output: sorted inverse masses
int *sortedPhase, // output: sorted phase values
uint *gridParticleHash, // input: sorted grid hashes
uint *gridParticleIndex,// input: sorted particle indices
float4 *oldPos, // input: position array
float *W,
int *phase,
uint numParticles)
{
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint hash;
// handle case when no. of particles not multiple of block size
if (index < numParticles)
{
hash = gridParticleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two hash values per thread
sharedHash[threadIdx.x+1] = hash;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = gridParticleHash[index-1];
}
}
__syncthreads();
if (index < numParticles)
{
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
// Now use the sorted index to reorder the pos and vel data
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex); // macro does either global read or texture fetch
float w = FETCH(invMass, sortedIndex); // macro does either global read or texture fetch
int phase = FETCH(oldPhase, sortedIndex); // macro does either global read or texture fetch
sortedPos[index] = pos;
sortedW[index] = w;
sortedPhase[index] = phase;
}
}
// collide a particle against all other particles in a given cell
__device__
void collideCell(int3 gridPos,
uint index,
float3 pos,
int phase,
float4 *oldPos,
uint *cellStart,
uint *cellEnd,
uint *neighbors,
uint *numNeighbors)
{
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
float collideDist = params.particleRadius * 2.001f; // slightly bigger radius
float collideDist2 = collideDist * collideDist;
// float3 delta = make_float3(0.0f);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
int phase2 = FETCH(oldPhase, j);
if (phase > SOLID && phase == phase2)
continue;
// collide two spheres
float3 diff = pos - pos2;
float mag2 = dot(diff, diff);
if (mag2 < collideDist2 && numNeighbors[index] < MAX_FLUID_NEIGHBORS)
{
// neighbor stuff
neighbors[index * MAX_FLUID_NEIGHBORS + numNeighbors[index]] = j;
numNeighbors[index] += 1;
// delta += diff * (sqrt(mag2) - collideDist) * -.5f;
}
}
}
}
}
__global__
void collideD(float4 *newPos, // output: new pos
float4 *prevPositions,
float4 *sortedPos, // input: sorted positions
float *sortedW,
int *sortedPhase,
uint *gridParticleIndex, // input: sorted particle indices
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint *neighbors,
uint *numNeighbors)
{
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
int phase = FETCH(oldPhase, index);
if (phase < CLOTH) return;
// read particle data from sorted arrays
float3 pos = make_float3(FETCH(oldPos, index));
// get address in grid
int3 gridPos = calcGridPos(pos);
// examine neighbouring cells
float3 delta = make_float3(0.f);
numNeighbors[index] = 0;
for (int z=-1; z<=1; z++)
{
for (int y=-1; y<=1; y++)
{
for (int x=-1; x<=1; x++)
{
int3 neighbourPos = gridPos + make_int3(x, y, z);
collideCell(neighbourPos, index, pos, phase, sortedPos, cellStart, cellEnd, neighbors, numNeighbors);
}
}
}
float collideDist = params.particleRadius * 2.001f;
float w = FETCH(invMass, index);
float sW = (w != 0.f ? (1.f / ((1.f / w) * exp(-pos.y))) : w);
uint originalIndex = gridParticleIndex[index];
// float3 currPos = make_float3(newPos[originalIndex]);
float3 prevPos = make_float3(prevPositions[originalIndex]);
for (uint i = 0; i < numNeighbors[index]; i++)
{
float3 pos2 = make_float3(FETCH(oldPos, neighbors[index * MAX_FLUID_NEIGHBORS + i]));
float w2 = FETCH(invMass, neighbors[index * MAX_FLUID_NEIGHBORS + i]);
int phase2 = FETCH(oldPhase, neighbors[index * MAX_FLUID_NEIGHBORS + i]);
float3 diff = pos - pos2;
float dist = length(diff);
float mag = dist - collideDist;
float colW = w;
float colW2 = w2;
if (phase >= SOLID && phase2 >= SOLID)
{
colW = sW;
colW2 = (w2 != 0.f ? (1.f / ((1.f / w2) * exp(-pos.y))) : w2);
}
// colWsum = colW + colW1);
float scale = mag / (colW + colW2);
float3 dp = diff * (scale / dist);
float3 dp1 = -colW * dp / numNeighbors[index];
float3 dp2 = colW2 * dp / numNeighbors[index];
delta += dp1;
////////////////////// friction //////////////////
if (phase < SOLID || phase2 < SOLID)
continue;
uint neighborIndex = gridParticleIndex[neighbors[index * MAX_FLUID_NEIGHBORS + i]];
float3 prevPos2 = make_float3(prevPositions[neighborIndex]);
// float3 currPos2 = make_float3(newPos[neighbors[index * MAX_FLUID_NEIGHBORS + i]]);
float3 nf = normalize(diff);
float3 dpRel = (pos + dp1 - prevPos) - (prevPos + dp2 - prevPos2);
float3 dpt = dpRel - dot(dpRel, nf) * nf;
float ldpt = length(dpt);
if (ldpt < EPS)
continue;
if (ldpt < (S_FRICTION) * dist)
delta -= dpt * colW / (colW + colW2);
else
delta -= dpt * min((K_FRICTION) * dist / ldpt, 1.f);
}
// write new velocity back to original unsorted location
newPos[originalIndex] = make_float4(pos + delta, 1.0f);
}
struct subtract_functor
{
const float time;
subtract_functor(float _time) : time(_time) {}
__device__
float4 operator()(const float4& orig, const float4& solved) const {
return (solved - orig) / -time;
}
};
// collide a particle against all other particles in a given cell
__device__
void collideCellRadius(int3 gridPos,
uint index,
float3 pos,
uint *cellStart,
uint *cellEnd,
uint *neighbors,
uint *numNeighbors)
{
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
float3 relPos = pos - pos2;
float dist2 = dot(relPos, relPos);
if (dist2 < H2 && numNeighbors[index] < MAX_FLUID_NEIGHBORS)
{
// neighbor stuff
neighbors[index * MAX_FLUID_NEIGHBORS + numNeighbors[index]] = j;
numNeighbors[index] += 1;
}
}
}
}
}
__global__
void findLambdasD(float *lambda, // input: sorted positions
uint *gridParticleIndex, // input: sorted particle indices
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint *neighbors,
uint *numNeighbors,
float *ros)
{
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
int phase = FETCH(oldPhase, index);
if (phase != FLUID) return;
// read particle data from sorted arrays
float3 pos = make_float3(FETCH(oldPos, index));
// get address in grid
int3 gridPos = calcGridPos(pos);
// examine neighbouring cells
int rad = (int)ceil(H / params.cellSize.x);
numNeighbors[index] = 0;
for (int z=-rad; z<=rad; z++)
{
for (int y=-rad; y<=rad; y++)
{
for (int x=-rad; x<=rad; x++)
{
int3 neighbourPos = gridPos + make_int3(x, y, z);
collideCellRadius(neighbourPos, index, pos, cellStart, cellEnd, neighbors, numNeighbors);
}
}
}
float w = FETCH(invMass, index);
float ro = 0.f;
float denom = 0.f;
float3 grad = make_float3(0.f);
for (uint i = 0; i < numNeighbors[index]; i++)
{
uint ni = neighbors[index * MAX_FLUID_NEIGHBORS + i];
float3 pos2 = make_float3(FETCH(oldPos, ni));
// float w2 = FETCH(invMass, ni);
float3 r = pos - pos2;
float rlen2 = dot(r, r);
float rlen = sqrt(rlen2);
float hMinus2 = H2 - rlen2;
float hMinus = H - rlen;
// do fluid solid scaling hurr
ro += (POLY6_COEFF * hMinus2*hMinus2*hMinus2 ) / w;
float3 spikeyGrad;
if (rlen < 0.0001f)
spikeyGrad = make_float3(0.f); // randomize a little
else
spikeyGrad = (r / rlen) * -SPIKEY_COEFF * hMinus*hMinus;
spikeyGrad /= ros[gridParticleIndex[index]];
grad += -spikeyGrad;
denom += dot(spikeyGrad, spikeyGrad);
}
ro += (POLY6_COEFF * H6 ) / w;
denom += dot(grad, grad);
lambda[index] = - ((ro / ros[gridParticleIndex[index]]) - 1) / (denom + FLUID_RELAXATION);
}
__global__
void solveFluidsD(float *lambda, // input: sorted positions
uint *gridParticleIndex, // input: sorted particle indices
float4 *particles,
uint numParticles,
uint *neighbors,
uint *numNeighbors,
float *ros)
{
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
int phase = FETCH(oldPhase, index);
if (phase != FLUID) return;
float4 pos = FETCH(oldPos, index);
float4 delta = make_float4(0.f);
for (uint i = 0; i < numNeighbors[index]; i++)
{
float4 pos2 = FETCH(oldPos, neighbors[index * MAX_FLUID_NEIGHBORS + i]);
float4 r = pos - pos2;
float rlen2 = dot(r, r);
float rlen = sqrt(rlen2);
float hMinus2 = H2 - rlen2;
float hMinus = H - rlen;
float4 spikeyGrad;
if (rlen < 0.0001f)
spikeyGrad = make_float4(0,EPS,0,0) * -SPIKEY_COEFF * hMinus*hMinus;
else
spikeyGrad = (r / rlen) * -SPIKEY_COEFF * hMinus*hMinus;
float term2 = H2 - (DQ_P * DQ_P * H2);
float numer = (POLY6_COEFF * hMinus2*hMinus2*hMinus2 ) ;
float denom = (POLY6_COEFF * term2*term2*term2 );
float lambdaCorr = -K_P * pow(numer / denom, E_P);
delta += (lambda[index] + lambda[neighbors[index * MAX_FLUID_NEIGHBORS + i]] + lambdaCorr) * spikeyGrad;
}
uint origIndex = gridParticleIndex[index];
particles[origIndex] += delta / (ros[gridParticleIndex[index]] + numNeighbors[index]);
}
#endif // INTEGRATION_KERNEL_H
|
the_stack
|
#include <type_traits> //std::remove_cv
namespace xlib {
#define Load_MACRO(CACHE_MOD, ptx_modifier) \
\
template<> \
__device__ __forceinline__ \
ulonglong2 LoadSupport<CACHE_MOD, ulonglong2>(ulonglong2* pointer) { \
ulonglong2 ret; \
asm volatile("ld."#ptx_modifier".v2.u64 {%0, %1}, [%2];" \
: "=l"(ret.x), "=l"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
uint4 LoadSupport<CACHE_MOD, uint4>(uint4* pointer) { \
uint4 ret; \
asm volatile("ld."#ptx_modifier".v4.u32 {%0, %1, %2, %3}, [%4];" \
: "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) \
: "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
uint2 LoadSupport<CACHE_MOD, uint2>(uint2* pointer) { \
uint2 ret; \
asm volatile("ld."#ptx_modifier".v2.u32 {%0, %1}, [%2];" \
: "=r"(ret.x), "=r"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
ushort4 LoadSupport<CACHE_MOD, ushort4>(ushort4* pointer) { \
ushort4 ret; \
asm volatile("ld."#ptx_modifier".v4.u16 {%0, %1, %2, %3}, [%4];" \
: "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) \
: "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
ushort2 LoadSupport<CACHE_MOD, ushort2>(ushort2* pointer) { \
ushort2 ret; \
asm volatile("ld."#ptx_modifier".v2.u16 {%0, %1}, [%2];" \
: "=h"(ret.x), "=h"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
longlong2 LoadSupport<CACHE_MOD, longlong2>(longlong2* pointer) { \
longlong2 ret; \
asm volatile("ld."#ptx_modifier".v2.s64 {%0, %1}, [%2];" \
: "=l"(ret.x), "=l"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
int4 LoadSupport<CACHE_MOD, int4>(int4* pointer) { \
int4 ret; \
asm volatile("ld."#ptx_modifier".v4.s32 {%0, %1, %2, %3}, [%4];" \
: "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) \
: "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
int2 LoadSupport<CACHE_MOD, int2>(int2* pointer) { \
int2 ret; \
asm volatile("ld."#ptx_modifier".v2.s32 {%0, %1}, [%2];" \
: "=r"(ret.x), "=r"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
short4 LoadSupport<CACHE_MOD, short4>(short4* pointer) { \
short4 ret; \
asm volatile("ld."#ptx_modifier".v4.s16 {%0, %1, %2, %3}, [%4];" \
: "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) \
: "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
short2 LoadSupport<CACHE_MOD, short2>(short2* pointer) { \
short2 ret; \
asm volatile("ld."#ptx_modifier".v2.s16 {%0, %1}, [%2];" \
: "=h"(ret.x), "=h"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
long long unsigned LoadSupport<CACHE_MOD, long long unsigned> \
(long long unsigned* pointer) { \
\
long long unsigned ret; \
asm volatile("ld."#ptx_modifier".u64 %0, [%1];" \
: "=l"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
unsigned LoadSupport<CACHE_MOD, unsigned>(unsigned* pointer) { \
unsigned ret; \
asm volatile("ld."#ptx_modifier".u32 %0, [%1];" \
: "=r"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
unsigned short LoadSupport<CACHE_MOD, unsigned short> \
(unsigned short* pointer) { \
\
unsigned short ret; \
asm volatile("ld."#ptx_modifier".u16 %0, [%1];" \
: "=h"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
unsigned char LoadSupport<CACHE_MOD, unsigned char>(unsigned char* pointer) { \
unsigned short ret; \
asm volatile("ld."#ptx_modifier".u8 %0, [%1];" \
:"=h"(ret) : "l"(pointer)); \
return static_cast<unsigned char>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
uchar2 LoadSupport<CACHE_MOD, uchar2>(uchar2* pointer) { \
unsigned short ret = LoadSupport<CACHE_MOD>( \
reinterpret_cast<unsigned short*>(pointer)); \
return reinterpret_cast<uchar2&>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
uchar4 LoadSupport<CACHE_MOD, uchar4>(uchar4* pointer) { \
unsigned ret = LoadSupport<CACHE_MOD>( \
reinterpret_cast<unsigned*>(pointer)); \
return reinterpret_cast<uchar4&>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
long long int LoadSupport<CACHE_MOD, long long int>(long long int* pointer) { \
long long int ret; \
asm volatile("ld."#ptx_modifier".s64 %0, [%1];" \
: "=l"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
int LoadSupport<CACHE_MOD, int>(int* pointer) { \
int ret; \
asm volatile("ld."#ptx_modifier".s32 %0, [%1];" \
: "=r"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
short LoadSupport<CACHE_MOD, short>(short* pointer) { \
short ret; \
asm volatile("ld."#ptx_modifier".s16 %0, [%1];" \
: "=h"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
char LoadSupport<CACHE_MOD, char>(char* pointer) { \
short ret; \
asm volatile("ld."#ptx_modifier".s8 %0, [%1];" \
: "=h"(ret) : "l"(pointer)); \
return static_cast<char>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
char2 LoadSupport<CACHE_MOD, char2>(char2* pointer) { \
short ret = LoadSupport<CACHE_MOD>(reinterpret_cast<short*>(pointer)); \
return reinterpret_cast<char2&>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
char4 LoadSupport<CACHE_MOD, char4>(char4* pointer) { \
int ret = LoadSupport<CACHE_MOD>(reinterpret_cast<int*>(pointer)); \
return reinterpret_cast<char4&>(ret); \
} \
\
template<> \
__device__ __forceinline__ \
double2 LoadSupport<CACHE_MOD, double2>(double2* pointer) { \
double2 ret; \
asm volatile("ld."#ptx_modifier".v2.f64 {%0, %1}, [%2];" \
: "=d"(ret.x), "=d"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
float4 LoadSupport<CACHE_MOD, float4>(float4* pointer) { \
float4 ret; \
asm volatile("ld."#ptx_modifier".v4.f32 {%0, %1, %2, %3}, [%4];" \
: "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) \
: "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
float2 LoadSupport<CACHE_MOD, float2>(float2* pointer) { \
float2 ret; \
asm volatile("ld."#ptx_modifier".v2.f32 {%0, %1}, [%2];" \
: "=f"(ret.x), "=f"(ret.y) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
float LoadSupport<CACHE_MOD, float>(float* pointer) { \
float ret; \
asm volatile("ld."#ptx_modifier".f32 %0, [%1];" \
: "=f"(ret) : "l"(pointer)); \
return ret; \
} \
\
template<> \
__device__ __forceinline__ \
double LoadSupport<CACHE_MOD, double>(double* pointer) { \
double ret; \
asm volatile("ld."#ptx_modifier".f64 %0, [%1];" \
: "=d"(ret) : "l"(pointer)); \
return ret; \
}
//==============================================================================
//==============================================================================
template<CacheModifier MODIFIER = DF>
struct ThreadLoad;
template<CacheModifier MODIFIER>
struct ThreadLoad {
template<typename T>
static __device__ __forceinline__ T op(T* pointer) {
static_assert(sizeof(T) != sizeof(T), "NOT IMPLEMENTED");
return *pointer;
}
};
template<>
struct ThreadLoad<DF> {
template<typename T>
__device__ __forceinline__
static T op(T* pointer) {
return *pointer;
}
};
template<>
struct ThreadLoad<NC> {
template<typename T>
__device__ __forceinline__
static T op(T* pointer) {
return __ldg(pointer);
}
};
//==============================================================================
template<CacheModifier M, typename T>
__device__ __forceinline__ T LoadSupport(T* pointer);
#define LoadStruct_MACRO(CACHE_MOD) \
\
template<> \
struct ThreadLoad<CACHE_MOD> { \
template<typename T> \
__device__ __forceinline__ \
static T op(T* pointer) { \
return LoadSupport<CACHE_MOD>( \
const_cast<typename std::remove_cv<T>::type*>(pointer)); \
} \
};
LoadStruct_MACRO(CA)
LoadStruct_MACRO(CG)
LoadStruct_MACRO(CS)
LoadStruct_MACRO(CV)
LoadStruct_MACRO(NC_CA)
LoadStruct_MACRO(NC_CG)
LoadStruct_MACRO(NC_CS)
Load_MACRO(CA, global.ca)
Load_MACRO(CG, global.cg)
Load_MACRO(CS, global.cs)
Load_MACRO(CV, global.volatile)
Load_MACRO(NC_CA, global.ca.nc)
Load_MACRO(NC_CG, global.cg.nc)
Load_MACRO(NC_CS, global.cs.nc)
#undef LoadStruct_MACRO
#undef Load_MACRO
//==============================================================================
//==============================================================================
template<CacheModifier MODIFIER, typename T>
__device__ __forceinline__
T Load(T* pointer) {
return ThreadLoad<MODIFIER>::op(pointer);
}
} // namespace xlib
|
the_stack
|
// Warp reduce kernels to reduce N groups of data into N numbers, where N = warpSize / width.
// width should be a power of 2 and should be less than warpSize.
template <typename scalar_t>
__device__ __forceinline__ scalar_t warpReduce(scalar_t x, int width=C10_WARP_SIZE){
for (unsigned offset = width/2; offset > 0; offset /= 2){
x += __shfl_down_sync(0xffffffff, x, offset, width);
}
return x;
}
inline int largestPowerOfTwo(int x){
int y = 1;
while (y <= x)
y <<= 1;
return y >> 1;
}
// Helper class to calculate pointer offset that can be shared by different flavors of kernels.
// For fwd, batch offset and stride are different for packing and non-packing mode.
struct OffsetCalFwd{
__device__ __forceinline__ OffsetCalFwd(
int64_t batch,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t gLen,
int64_t hiddenSize,
bool packOutput) :
batch(batch),
batchOffset(batchOffset),
maxFLen(maxFLen),
maxGLen(maxGLen),
gLen(gLen),
hiddenSize(hiddenSize),
packOutput(packOutput)
{}
int64_t batch;
const int64_t *batchOffset;
int64_t maxFLen;
int64_t maxGLen;
int64_t gLen;
int64_t hiddenSize;
bool packOutput;
__device__ __forceinline__ int64_t getBatchOffset(){
return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize
: batch*maxFLen*maxGLen*hiddenSize;
}
__device__ __forceinline__ int64_t getStrideF(){
return packOutput ? gLen*hiddenSize : maxGLen*hiddenSize;
}
};
// Helper class to calculate pointer offset that can be shared by different flavors of kernels
// For bwd, batch offset and stride are different for packing and non-packing mode.
// The reducion is done for two input tensors. Therefore, generating two sets of offsets
// according to bwdFasterDim can lead to a unified implementation in the actual kernel.
struct OffsetCalBwd{
__device__ __forceinline__ OffsetCalBwd(
int64_t batch,
const int64_t *batchOffset,
const int *fLen,
const int *gLen,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
bool bwdFasterDim) :
batch(batch),
batchOffset(batchOffset),
maxFLen(maxFLen),
maxGLen(maxGLen),
fLen(fLen),
gLen(gLen),
hiddenSize(hiddenSize),
packOutput(packOutput),
bwdFasterDim(bwdFasterDim)
{}
int64_t batch;
const int64_t *batchOffset;
const int *fLen;
const int *gLen;
int64_t maxFLen;
int64_t maxGLen;
int64_t hiddenSize;
bool packOutput;
bool bwdFasterDim; // whether doing bwd on the faster moving dimension
__device__ __forceinline__ int64_t getBatchOffset(){
return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize
: batch*maxFLen*maxGLen*hiddenSize;
}
__device__ __forceinline__ int64_t getMaxXLen(){
return bwdFasterDim ? maxGLen : maxFLen;
}
__device__ __forceinline__ auto getMyXLen() -> decltype(gLen[batch]){
return bwdFasterDim ? gLen[batch] : fLen[batch];
}
__device__ __forceinline__ auto getMyYLen() -> decltype(gLen[batch]){
return bwdFasterDim ? fLen[batch] : gLen[batch];
}
__device__ __forceinline__ int64_t getStrideX(){
return bwdFasterDim ? hiddenSize : ((packOutput ? gLen[batch] : maxGLen) * hiddenSize);
}
__device__ __forceinline__ int64_t getStrideY(){
return bwdFasterDim ? ((packOutput ? gLen[batch] : maxGLen) * hiddenSize) : hiddenSize;
}
};
// Vanila transducer joint forward kernel
// Detail of this joint function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// f is a tensor of shape [batch, T, H]
// g is a tensor of shape [batch, U, H]
// the transducer joint does
// sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1)
// The resultant tensor is of shape [batch, T, U, H]
// Each thread block is working on one "batch" of data in the output tensor, [batch, t, u, :]
// This joint function can optionally pack the output where the output tensor with a shape of
// [B, T, U, H] is packed into [B_packed, H].
// Don't-care region (t > fLen) or (u > gLen) is removed.
// To enable packing, the starting offset for each batch need to be specified with batchOffset.
template <typename scalar_t, class OffsetCal>
__global__ void transducer_joint_forward(
const scalar_t *f,
const scalar_t *g,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
scalar_t *sum) {
const int batch = blockIdx.z;
const int t = blockIdx.y;
const int u = blockIdx.x;
const auto myFLen = fLen[batch];
const auto myGLen = gLen[batch];
OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput);
const auto myBatchOffset = offsetCal.getBatchOffset();
const auto strideF = offsetCal.getStrideF();
scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize;
scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize;
scalar_t *mySum = sum + myBatchOffset + t*strideF + u * hiddenSize;
if (t < myFLen and u < myGLen){
#pragma unroll
for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){
if (h < hiddenSize){
mySum[h] = myF[h] + myG[h];
}
}
}
else if (packOutput == false and t < maxFLen and u < maxGLen){
// Need to write finite data to don't-care region because we instantiate the result tensor
// with torch::empty for performance reasons. Even though it is don't-care region, the
// contents need to be finite, otherwise could lead to NaN in WGRAD.
// In packing mode, this write is no longer necessary as we remove the don't-care region
// from the output.
// Picking -1 (over 0) here for ease of testing.
#pragma unroll
for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){
if (h < hiddenSize){
mySum[h] = -1;
}
}
}
}
// Tiled version of the joint forward kernel
// Detail of this joint function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// f is a tensor of shape [batch, T, H]
// g is a tensor of shape [batch, U, H]
// the transducer joint does
// sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1)
// The resultant tensor is of shape [batch, T, U, H]
// Each thread is working on a tile of the shape of tileF x tileG in the result tensor.
// The input for the tile is first loaded in the register and is reused tileG and tileF times.
// This joint function can optionally pack the output where the output tensor with a shape of
// [B, T, U, H] is packed into [B_packed, H].
// Don't-care region (t > fLen) or (u > gLen) is removed.
// To enable packing, the starting offset for each batch need to be specified with batchOffset.
template <typename scalar_t, int tileF, int tileG, class OffsetCal>
__global__ void transducer_joint_tiled_forward(
const scalar_t *f,
const scalar_t *g,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
int64_t hiddenPerBlock,
bool packOutput,
scalar_t *sum) {
const int batch = blockIdx.z;
const int t = blockIdx.y * tileF;
const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock;
const int u = blockIdx.x / hiddenBlock * tileG;
const int hOffset = (blockIdx.x % hiddenBlock) * hiddenPerBlock;
const int h = threadIdx.x;
const auto myFLen = fLen[batch];
const auto myGLen = gLen[batch];
OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput);
const auto myBatchOffset = offsetCal.getBatchOffset();
const auto strideF = offsetCal.getStrideF();
scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize + hOffset;
scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize + hOffset;
scalar_t *mySum = sum + myBatchOffset + t*strideF + u*hiddenSize + hOffset;
if (t < myFLen and u < myGLen and hOffset+h < hiddenSize){
// register buffers for tiled input reuse
scalar_t fBuffer[tileF], gBuffer[tileG];
for (int i = 0; i < tileF; ++i){
if (t + i < myFLen)
fBuffer[i] = myF[i*hiddenSize + h];
}
for (int j = 0; j < tileG; ++j){
if (u + j < myGLen)
gBuffer[j] = myG[j*hiddenSize + h];
}
#pragma unroll
for (int i = 0; i < tileF; ++i){
if (t + i < myFLen){
#pragma unroll
for (int j = 0; j < tileG; ++j){
if (u + j < myGLen)
mySum[i*strideF + j*hiddenSize + h] = fBuffer[i] + gBuffer[j];
else if (packOutput == false and u + j < maxGLen)
mySum[i*strideF + j*hiddenSize + h] = -1;
}
}
else if (packOutput == false and t + i < maxFLen){
// Again need to write finite data to don't-care region
#pragma unroll
for (int j = 0; j < tileG; ++j){
if (u + j < maxGLen)
mySum[i*strideF + j*hiddenSize + h] = -1;
}
}
}
}
else if (packOutput == false and t < maxFLen and u < maxGLen and hOffset+h < hiddenSize){
// Only need to ensure the finity in normal mode
#pragma unroll
for (int i = 0; i < tileF; ++i){
if (t + i < maxFLen){
#pragma unroll
for (int j = 0; j < tileG; ++j){
if (u + j < maxGLen)
mySum[i*strideF + j*hiddenSize + h] = -1;
}
}
}
}
}
// Bwd operation (reduction) on one input tensor. Since the operation performed for the two input
// tensors are exactly the same, only one kernel is needed, and the different indexing offsets
// and strides are handled by OffsetCalBwd.
// When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a
// non-packed form.
template <typename scalar_t, typename acc_t, class OffsetCal>
__device__ void transducer_joint_single_backward(
const scalar_t *grad,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
bool bwdFasterDim, // whether bwd on the faster moving dimension (u)
scalar_t *inGrad,
int yBlockOffset=0) {
const int batch = blockIdx.z;
// For the second input tensor, this offset need to be subtracted because the first yBlockOffset
// sets of thread blocks are for the first input tensor.
const int x = blockIdx.y-yBlockOffset;
const int hOffset = blockIdx.x*C10_WARP_SIZE;
const int wid = threadIdx.y;
const int lid = threadIdx.x;
const int numWarp = blockDim.y;
extern __shared__ char smem8[];
auto smem = reinterpret_cast<acc_t*>(smem8);
OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput,
bwdFasterDim);
const auto maxXLen = offsetCal.getMaxXLen();
const auto myXLen = offsetCal.getMyXLen();
const auto myYLen = offsetCal.getMyYLen();
scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset;
if (x < myXLen){
const auto myBatchOffset = offsetCal.getBatchOffset();
const auto strideX = offsetCal.getStrideX();
const auto strideY = offsetCal.getStrideY();
scalar_t const *myGrad = grad + myBatchOffset + x*strideX + hOffset;
// Each warp reduces numYPerWarp "y" first
acc_t warpSum = 0;
auto numYPerWarp = (myYLen+numWarp-1)/numWarp;
for (int warpY = 0; warpY < numYPerWarp; ++warpY){
auto y = wid*numYPerWarp + warpY;
if (y < myYLen and (hOffset+lid) < hiddenSize)
warpSum += myGrad[y*strideY + lid];
}
// transpose partial sum in SMEM and reduce further using warpReduce
smem[lid*numWarp + wid] = warpSum;
__syncthreads();
auto sum = smem[wid*C10_WARP_SIZE + lid];
sum = warpReduce(sum, numWarp);
// a a b b c c d d
// a a b b c c d d
// a a b b c c d d
// a a b b c c d d
// example of 4 warps (a, b, c, d) with 8 threads per warp
// Each warp need 8 / 4 = 2 threads to write the results.
if (hOffset+wid*C10_WARP_SIZE/numWarp+lid/numWarp < hiddenSize){
if (lid % numWarp == 0){
myInGrad[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = sum;
}
}
}
else if (wid == 0 and hOffset + lid < hiddenSize){
// Need to ensure the grad is zero for don't care region
myInGrad[lid] = 0;
}
}
// Actual bwd (reduction) kernel get launched.
// Call transducer_joint_single_backward twice on two input tensors.
// The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op
// uses the rest.
template <typename scalar_t, typename acc_t, class OffsetCal>
__global__ void transducer_joint_combined_backward(
const scalar_t *grad,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
scalar_t *fGrad,
scalar_t *gGrad) {
if (blockIdx.y < maxFLen){
transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>(
grad,
fLen,
gLen,
batchOffset,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
false,
fGrad);
}
else{
transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>(
grad,
fLen,
gLen,
batchOffset,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
true,
gGrad,
maxFLen);
}
}
// Vectorized version of transducer_joint_single_backward
// Doing exact same operation as transducer_joint_single_backward except the load and store are
// vectorized.
// When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a
// non-packed form.
template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal>
__device__ void transducer_joint_single_vec_backward(
const scalar_t *grad,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
bool bwdFasterDim,
scalar_t *inGrad,
int yBlockOffset=0){
const int batch = blockIdx.z;
const int x = blockIdx.y - yBlockOffset;
const int hOffset = blockIdx.x*C10_WARP_SIZE*V;
const int wid = threadIdx.y;
const int lid = threadIdx.x;
const int numWarp = blockDim.y;
OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput,
bwdFasterDim);
const auto maxXLen = offsetCal.getMaxXLen();
const auto myXLen = offsetCal.getMyXLen();
const auto myYLen = offsetCal.getMyYLen();
scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset;
extern __shared__ char smem8[];
auto smem = reinterpret_cast<acc_t*>(smem8);
acc_t warpSum[V];
scalar_t inBuffer[V];
scalar_t outBuffer[V];
auto myInGradVec = reinterpret_cast<vec_t*>(myInGrad);
auto outBufferVec = reinterpret_cast<vec_t*>(outBuffer);
if (x < myXLen){
const auto myBatchOffset = offsetCal.getBatchOffset();
const auto strideX = offsetCal.getStrideX();
const auto strideY = offsetCal.getStrideY();
const scalar_t *myGrad = grad + myBatchOffset + x*strideX + hOffset;
for (int i = 0; i < V; ++i)
warpSum[i] = 0;
// Each warp reduces numYPerWarp "y" first
auto numYPerWarp = (myYLen+numWarp-1)/numWarp;
for (int warpY = 0; warpY < numYPerWarp; ++warpY){
auto y = wid*numYPerWarp + warpY;
auto myGradVec = reinterpret_cast<vec_t const *>(myGrad + y*strideY);
auto inBufferVec = reinterpret_cast<vec_t*>(inBuffer);
if (hOffset + lid*V < hiddenSize and y < myYLen){
*inBufferVec = myGradVec[lid]; // vectorized load
#pragma unroll
for (int i = 0; i < V; ++i){
warpSum[i] += inBuffer[i];
}
}
}
// transpose partial sum in SMEM and reduce further using warpReduce
for (int i = 0; i < V; ++i){
smem[lid*numWarp + wid] = warpSum[i];
__syncthreads();
auto sum = smem[wid*C10_WARP_SIZE + lid];
if (hOffset+(wid*C10_WARP_SIZE/numWarp)*V < hiddenSize){
sum = warpReduce(sum, numWarp);
if (lid % numWarp == 0){
outBuffer[i] = sum;
}
}
__syncthreads();
}
// a a b b c c d d
// a a b b c c d d
// a a b b c c d d
// a a b b c c d d
// example of 4 warps (a, b, c, d) with 8 threads per warp
// Each warp need 8 / 4 = 2 threads to write the results.
if (lid % numWarp == 0 and hOffset+(wid*C10_WARP_SIZE/numWarp + lid/numWarp)*V < hiddenSize)
myInGradVec[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = *outBufferVec;
}
else if (wid == 0 and hOffset + lid*V < hiddenSize){
// Need to ensure the grad is zero for don't care region
myInGradVec[lid] = 0;
}
}
// Vecotrized version of transducer_joint_combined_backward
// Call transducer_joint_single_vec_backward twice on two input tensors.
// The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op
// uses the rest.
template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal>
__global__ void transducer_joint_combined_vec_backward(
const scalar_t *grad,
const int *fLen,
const int *gLen,
const int64_t *batchOffset,
int64_t maxFLen,
int64_t maxGLen,
int64_t hiddenSize,
bool packOutput,
scalar_t *fGrad,
scalar_t *gGrad) {
if (blockIdx.y < maxFLen){
transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>(
grad,
fLen,
gLen,
batchOffset,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
false,
fGrad);
}
else{
transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>(
grad,
fLen,
gLen,
batchOffset,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
true,
gGrad,
maxFLen);
}
}
torch::Tensor transducer_joint_cuda_forward(
torch::Tensor f,
torch::Tensor g,
torch::Tensor fLen,
torch::Tensor gLen,
torch::Tensor batchOffset,
int64_t packedBatch,
int opt,
bool packOutput,
int tileSize){
auto tensorOpt = f.options();
auto dtype = f.scalar_type();
const auto batchSize = f.size(0);
const auto maxFLen = f.size(1);
const auto maxGLen = g.size(1);
const auto hiddenSize = f.size(2);
int64_t *batchOffsetPtr = nullptr;
torch::Tensor sum;
if (!packOutput){
sum = torch::empty({batchSize, maxFLen, maxGLen, hiddenSize}, tensorOpt);
batchOffsetPtr = nullptr;
}
else{
sum = torch::empty({packedBatch, hiddenSize}, tensorOpt);
batchOffsetPtr = batchOffset.data_ptr<int64_t>();
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(opt == 0 or opt == 1, "Got an invalid optimization level ", opt);
// Simple heuristics
const int numThread = std::min(128, (static_cast<int>(hiddenSize)+C10_WARP_SIZE-1)
/ C10_WARP_SIZE * C10_WARP_SIZE);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_forward", ([&] {
if (opt == 0){
// vanilla kernel
const int threads = numThread;
const dim3 blocks(maxGLen, maxFLen, batchSize);
transducer_joint_forward<scalar_t, OffsetCalFwd>
<<<blocks, threads, 0, stream>>>(
f.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
fLen.data_ptr<int>(),
gLen.data_ptr<int>(),
batchOffsetPtr,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
sum.data_ptr<scalar_t>());
}
if (opt == 1){
// tiled version. For simplicity, assume tileF == tileG, even though the kernel can
// support more general cases.
const int threads = numThread;
const int hiddenPerBlock = numThread;
const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock;
const dim3 blocks( (maxGLen+tileSize-1)/tileSize * hiddenBlock,
(maxFLen+tileSize-1)/tileSize,
batchSize);
TORCH_CHECK(tileSize == 1 or tileSize == 2 or tileSize == 4,
"Expected tileSize to be in [1, 2, 4], but got ", tileSize);
switch (tileSize) {
#define LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(tile) case tile:\
transducer_joint_tiled_forward<scalar_t, tile, tile, OffsetCalFwd>\
<<<blocks, threads, 0, stream>>>(\
f.data_ptr<scalar_t>(),\
g.data_ptr<scalar_t>(),\
fLen.data_ptr<int>(),\
gLen.data_ptr<int>(),\
batchOffsetPtr,\
maxFLen,\
maxGLen,\
hiddenSize,\
hiddenPerBlock,\
packOutput,\
sum.data_ptr<scalar_t>());\
break;
LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(1);
LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(2);
LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(4);
}
}
}));
THCudaCheck(cudaGetLastError());
return sum;
}
std::vector<torch::Tensor> transducer_joint_cuda_backward(
torch::Tensor grad,
torch::Tensor fLen,
torch::Tensor gLen,
torch::Tensor batchOffset,
int maxFLen,
int maxGLen,
bool packOutput){
auto tensorOpt = grad.options();
auto dtype = grad.scalar_type();
const int batchSize = fLen.size(0);
const int hiddenSize = grad.size(-1);
const auto deviceProperties = at::cuda::getCurrentDeviceProperties();
const int maxNumWarp = deviceProperties->maxThreadsPerBlock / C10_WARP_SIZE;
torch::Tensor fGrad = torch::empty({batchSize, maxFLen, hiddenSize}, tensorOpt);
torch::Tensor gGrad = torch::empty({batchSize, maxGLen, hiddenSize}, tensorOpt);
int64_t *batchOffsetPtr = (!packOutput) ? nullptr : batchOffset.data_ptr<int64_t>();
// The number "y" I would like each thread to work on
const int workPerThread = 32;
// Since the bwd for f and g have the same thread block size, we need to use the max of the two.
int numWarp = largestPowerOfTwo((std::max(maxFLen, maxGLen) + workPerThread-1) / workPerThread);
// Would like to have at least 2 warps
numWarp = std::max(2, numWarp);
// cap on the maximum number of warps allowed
numWarp = std::min(maxNumWarp, numWarp);
// Need smem for transposing the partial sum. The partial sum is in a matrix of the shape
// numWarp x warpSize
const int smemSize = numWarp * C10_WARP_SIZE;
const dim3 threads(C10_WARP_SIZE, numWarp, 1);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_cuda_backward_kernel", ([&] {
auto gradPtr = grad.data_ptr<scalar_t>();
auto fLenPtr = fLen.data_ptr<int>();
auto gLenPtr = gLen.data_ptr<int>();
auto fGradPtr = fGrad.data_ptr<scalar_t>();
auto gGradPtr = gGrad.data_ptr<scalar_t>();
// resolve the acc_t type
using acc_t = at::acc_type<scalar_t, true>;
using vec_t = uint64_t;
constexpr int vectFactor = sizeof(vec_t) / sizeof(scalar_t);
constexpr int vecAlignment = std::alignment_of<vec_t>::value;
// if all input and output tensors meet the alignment requirement
bool memAlign = (reinterpret_cast<uint64_t>(gradPtr) % vecAlignment == 0)
and (reinterpret_cast<uint64_t>(fGradPtr) % vecAlignment == 0)
and (reinterpret_cast<uint64_t>(gGradPtr) % vecAlignment == 0);
if (vectFactor > 1 and hiddenSize%vectFactor == 0 and memAlign){
// If vectorization helps and the alignment requirement is met, use the vectorized
// kernel. For simplicity, hiddenSize needs to be a multiple vecFactor.
const dim3 blocks( (hiddenSize+C10_WARP_SIZE*vectFactor-1)/(C10_WARP_SIZE*vectFactor),
maxFLen+maxGLen,
batchSize);
transducer_joint_combined_vec_backward
<scalar_t, acc_t, vec_t, vectFactor, OffsetCalBwd>
<<<blocks, threads, smemSize*sizeof(acc_t)>>>(
gradPtr,
fLenPtr,
gLenPtr,
batchOffsetPtr,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
fGradPtr,
gGradPtr);
}
else{
const dim3 blocks((hiddenSize+C10_WARP_SIZE-1)/C10_WARP_SIZE,
maxFLen + maxGLen, batchSize);
transducer_joint_combined_backward<scalar_t, acc_t, OffsetCalBwd>
<<<blocks, threads, smemSize*sizeof(acc_t)>>>(
gradPtr,
fLenPtr,
gLenPtr,
batchOffsetPtr,
maxFLen,
maxGLen,
hiddenSize,
packOutput,
fGradPtr,
gGradPtr);
}
}));
return {fGrad, gGrad};
}
|
the_stack
|
namespace arboretum_test {
TEST(SingleNodeHistSumFloatBin16, Naive) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
for (unsigned i = 0; i < size; ++i) {
ASSERT_EQ(count[i], 1);
ASSERT_FLOAT_EQ(grad[i], sum[i]);
}
}
TEST(SingleNodeHistSumFloatBin8, Naive) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
for (unsigned i = 0; i < size; ++i) {
ASSERT_EQ(count[i], 1);
ASSERT_FLOAT_EQ(grad[i], sum[i]);
}
}
TEST(SingleNodeHistSumFloatBin16, SingleSegment) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
float true_sum = size * (size - 1) / 2;
ASSERT_FLOAT_EQ(sum[0], float(true_sum));
// }
}
TEST(SingleNodeHistSumFloatBin8, SingleSegment) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
float true_sum = size * (size - 1) / 2;
ASSERT_FLOAT_EQ(sum[0], float(true_sum));
// }
}
TEST(SingleNodeHistSumFloatBin16, SingleSegmentFullSize) {
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0 % 1024;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 11, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
float true_sum = size * (size - 1) / 2;
ASSERT_FLOAT_EQ(sum[0], float(true_sum));
// }
}
TEST(SingleNodeHistSumFloatBin8, SingleSegmentFullSize) {
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<float> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0 % 1024;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, float>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 11, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
float true_sum = size * (size - 1) / 2;
ASSERT_FLOAT_EQ(sum[0], float(true_sum));
// }
}
TEST(SingleNodeHistSumDoubleBin16, Naive) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
for (unsigned i = 0; i < size; ++i) {
ASSERT_EQ(count[i], 1);
ASSERT_DOUBLE_EQ(grad[i], sum[i]);
}
}
TEST(SingleNodeHistSumDoubleBin8, Naive) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
for (unsigned i = 0; i < size; ++i) {
ASSERT_EQ(count[i], 1);
ASSERT_DOUBLE_EQ(grad[i], sum[i]);
}
}
TEST(SingleNodeHistSumDoubleBin16, SingleSegment) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
double true_sum = size * (size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0], true_sum);
// }
}
TEST(SingleNodeHistSumDoubleBin8, SingleSegment) {
const size_t size = 1 << 5;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 6, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
double true_sum = size * (size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0], true_sum);
// }
}
TEST(SingleNodeHistSumDoubleBin16, SingleSegmentFullSize) {
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0 % 1024;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 11, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
double true_sum = size * (size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0], true_sum);
// }
}
TEST(SingleNodeHistSumDoubleBin8, SingleSegmentFullSize) {
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(size, 0.0);
thrust::device_vector<unsigned> count(size, 0);
thrust::device_vector<unsigned char> bin(size);
thrust::device_vector<unsigned> node_size(2);
node_size[0] = 0;
node_size[1] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0 % 1024;
}
arboretum::core::HistTreeGrower<unsigned, unsigned char, float, double>::
HistSumSingleNode(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 8, size);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size);
// sum of 0 + 1 + .. + size-1
double true_sum = size * (size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0], true_sum);
// }
}
TEST(MultiNodeHistSumDouble, _2_NodesNoTrick) {
const unsigned hist_size = 4;
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(hist_size * 2, 0.0);
thrust::device_vector<unsigned> count(hist_size * 2, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(3);
node_size[0] = 0;
node_size[1] = size / 2;
node_size[2] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i % hist_size;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSum(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()), NULL, NULL,
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 10, hist_size, 2, false);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
for (int i = 0; i < hist_size * 2; ++i) {
ASSERT_EQ(count[i], size / 8) << i;
}
// sum of 0 + 1 + .. + size / 2 -1
double true_sum = (size / 2) * (size / 2 - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0] + sum[1] + sum[2] + sum[3], true_sum);
// sum of size / 2 + ... + size -1
true_sum = (size / 2) * (size / 2 + size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[hist_size] + sum[hist_size + 1] + sum[hist_size + 2] +
sum[hist_size + 3],
true_sum);
// }
}
TEST(MultiNodeHistSumDouble, _2_NodesAsymmetricNoTrick) {
const unsigned hist_size = 4;
const size_t size = 128 * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(hist_size * 2, 0.0);
thrust::device_vector<unsigned> count(hist_size * 2, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(3);
node_size[0] = 0;
node_size[1] = 10;
node_size[2] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i % hist_size;
}
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSum(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()), NULL, NULL,
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 10, hist_size, 2, false);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], 3);
ASSERT_EQ(count[1], 3);
ASSERT_EQ(count[2], 2);
ASSERT_EQ(count[3], 2);
ASSERT_EQ(count[4], 317);
ASSERT_EQ(count[5], 317);
ASSERT_EQ(count[6], 318);
ASSERT_EQ(count[7], 318);
ASSERT_DOUBLE_EQ(sum[0], 12.0);
ASSERT_DOUBLE_EQ(sum[1], 15.0);
ASSERT_DOUBLE_EQ(sum[2], 8.0);
ASSERT_DOUBLE_EQ(sum[3], 10.0);
ASSERT_DOUBLE_EQ(sum[4], 204148.0);
ASSERT_DOUBLE_EQ(sum[5], 204465.0);
ASSERT_DOUBLE_EQ(sum[6], 204792.0);
ASSERT_DOUBLE_EQ(sum[7], 205110.0);
double true_sum = (size) * (size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0] + sum[1] + sum[2] + sum[3] + sum[hist_size] +
sum[hist_size + 1] + sum[hist_size + 2] +
sum[hist_size + 3],
true_sum);
}
TEST(MultiNodeHistSumDouble, _2_NodesAsymmetricWithTrick) {
const unsigned hist_size = 4;
const size_t size = 128 * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(hist_size * 2, 0.0);
thrust::device_vector<unsigned> count(hist_size * 2, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(3);
node_size[0] = 0;
node_size[1] = 10;
node_size[2] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i % hist_size;
}
thrust::device_vector<unsigned> parent_count(hist_size, 0);
thrust::device_vector<double> parent_sum(hist_size, 0);
parent_count[0] = parent_count[1] = parent_count[2] = parent_count[3] = 320;
// sum of 0 + 1 + .. + size / 2 -1
parent_sum[0] = 204148.0 + 12.0;
parent_sum[1] = 204465.0 + 15.0;
parent_sum[2] = 204792.0 + 8.0;
parent_sum[3] = 205110.0 + 10.0;
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSum(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(parent_sum.data()),
thrust::raw_pointer_cast(parent_count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 10, hist_size, 2, true);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], 3);
ASSERT_EQ(count[1], 3);
ASSERT_EQ(count[2], 2);
ASSERT_EQ(count[3], 2);
ASSERT_EQ(count[4], 317);
ASSERT_EQ(count[5], 317);
ASSERT_EQ(count[6], 318);
ASSERT_EQ(count[7], 318);
ASSERT_DOUBLE_EQ(sum[0], 12.0);
ASSERT_DOUBLE_EQ(sum[1], 15.0);
ASSERT_DOUBLE_EQ(sum[2], 8.0);
ASSERT_DOUBLE_EQ(sum[3], 10.0);
ASSERT_DOUBLE_EQ(sum[4], 204148.0);
ASSERT_DOUBLE_EQ(sum[5], 204465.0);
ASSERT_DOUBLE_EQ(sum[6], 204792.0);
ASSERT_DOUBLE_EQ(sum[7], 205110.0);
}
TEST(MultiNodeHistSumDouble, _2_NodesAsymmetricWithTrick2) {
const unsigned hist_size = 4;
const size_t size = 128 * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(hist_size * 2, 0.0);
thrust::device_vector<unsigned> count(hist_size * 2, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(3);
node_size[0] = 0;
node_size[1] = size - 10;
node_size[2] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = i % hist_size;
}
thrust::device_vector<unsigned> parent_count(hist_size, 0);
thrust::device_vector<double> parent_sum(hist_size, 0);
parent_count[0] = parent_count[1] = parent_count[2] = parent_count[3] = 320;
// sum of 0 + 1 + .. + size / 2 -1
parent_sum[0] = 204148.0 + 12.0;
parent_sum[1] = 204465.0 + 15.0;
parent_sum[2] = 204792.0 + 8.0;
parent_sum[3] = 205110.0 + 10.0;
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSum(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(parent_sum.data()),
thrust::raw_pointer_cast(parent_count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 10, hist_size, 2, true);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], 318);
ASSERT_EQ(count[1], 318);
ASSERT_EQ(count[2], 317);
ASSERT_EQ(count[3], 317);
ASSERT_EQ(count[4], 2);
ASSERT_EQ(count[5], 2);
ASSERT_EQ(count[6], 3);
ASSERT_EQ(count[7], 3);
ASSERT_DOUBLE_EQ(sum[0], 201612.0);
ASSERT_DOUBLE_EQ(sum[1], 201930.0);
ASSERT_DOUBLE_EQ(sum[2], 200978.0);
ASSERT_DOUBLE_EQ(sum[3], 201295.0);
ASSERT_DOUBLE_EQ(sum[4], 2548.0);
ASSERT_DOUBLE_EQ(sum[5], 2550.0);
ASSERT_DOUBLE_EQ(sum[6], 3822.0);
ASSERT_DOUBLE_EQ(sum[7], 3825.0);
}
TEST(MultiNodeHistSumDouble, SingleSegmentWithTrick) {
const unsigned hist_size = 4;
const size_t size = HIST_SUM_BLOCK_DIM * 10;
thrust::device_vector<float> grad(size);
thrust::device_vector<double> sum(hist_size * 2, 0.0);
thrust::device_vector<unsigned> count(hist_size * 2, 0);
thrust::device_vector<unsigned short> bin(size);
thrust::device_vector<unsigned> node_size(3);
node_size[0] = 0;
node_size[1] = size / 2;
node_size[2] = size;
for (unsigned i = 0; i < size; ++i) {
grad[i] = float(i);
bin[i] = 0;
}
thrust::device_vector<unsigned> parent_count(hist_size, 0);
thrust::device_vector<double> parent_sum(hist_size, 0);
parent_count[0] = size;
// sum of 0 + 1 + .. + size / 2 -1
double true_sum = (size) * (size - 1) / 2;
parent_sum[0] = true_sum;
arboretum::core::HistTreeGrower<unsigned, unsigned short, float, double>::
HistSum(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(count.data()),
thrust::raw_pointer_cast(parent_sum.data()),
thrust::raw_pointer_cast(parent_count.data()),
thrust::raw_pointer_cast(grad.data()),
thrust::raw_pointer_cast(node_size.data()),
thrust::raw_pointer_cast(bin.data()), 10, hist_size, 2, true);
TEST_OK(cudaDeviceSynchronize());
TEST_OK(cudaGetLastError());
ASSERT_EQ(count[0], size / 2);
ASSERT_EQ(count[hist_size], size / 2);
// sum of 0 + 1 + .. + size / 2 -1
true_sum = (size / 2) * (size / 2 - 1) / 2;
ASSERT_DOUBLE_EQ(sum[0], true_sum);
// sum of size / 2 + ... + size -1
true_sum = (size / 2) * (size / 2 + size - 1) / 2;
ASSERT_DOUBLE_EQ(sum[hist_size], true_sum);
// }
}
} // namespace arboretum_test
|
the_stack
|
namespace sd {
namespace ops {
namespace helpers {
static SD_INLINE SD_HOST_DEVICE sd::LongType boundsAmp(sd::LongType const low, sd::LongType const high,
sd::LongType const value) {
if (high < value) return high;
if (value < low) return low;
return value;
}
template <typename TKernelFunc>
static SD_KERNEL void computeSpansKernel(TKernelFunc* kernel, int* startsVec, float* weightsVector,
sd::LongType outSize, sd::LongType inSize, float kernelScale, int spanSize,
float const invScale, float const invTranslate, float invKernelScale,
float* tempWeightsBuf) {
// return value if within bounds or bounds otherwise
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
__shared__ int maxSpanSize;
if (threadIdx.x == 0 && blockIdx.x == 0) {
maxSpanSize = 0;
}
__syncthreads();
for (auto x = tid; x < outSize; x += step) {
const float columnFloat = x + 0.5f;
const float sampleFloat = columnFloat * invScale + invTranslate;
// Don't sample when the sampling location is outside the source image.
if (sampleFloat < 0 || sampleFloat > inSize) {
// Add an empty span.
startsVec[x] = 0;
continue;
}
sd::LongType spanStart = math::sd_ceil<float, float>(sampleFloat - kernel->radius() * kernelScale - 0.5f);
sd::LongType spanEnd = math::sd_floor<float, float>(sampleFloat + kernel->radius() * kernelScale - 0.5f);
spanStart = boundsAmp(0LL, inSize - 1, spanStart);
spanEnd = boundsAmp(0LL, inSize - 1, spanEnd) + 1;
int const spanSize = spanEnd - spanStart;
if (spanSize > spanSize) {
return; // throw "Exception"; ////return Logger::logStatusMsg(Status::BAD_INPUT, "Span is too large: "); // +
// spanSize + " vs " + spans._spanSize);//, spanSize, spans._spanSize));
}
float totalWeightSum = 0.f;
auto tempWeights = &tempWeightsBuf[x];
auto actualWeights = 0;
for (int source = spanStart; source < spanEnd; ++source) {
float kernelPos = static_cast<float>(source) + 0.5f - sampleFloat;
float weight = (*kernel)(kernelPos * invKernelScale);
totalWeightSum += weight;
tempWeights[actualWeights++] = weight;
}
maxSpanSize = math::sd_max(maxSpanSize, spanSize);
if (math::sd_abs(totalWeightSum) >= 1000.f * DataTypeUtils::min<float>()) { //
auto totalWeightSumInverted = 1.0f / totalWeightSum;
auto outIndex = spanSize * x;
for (auto weightIndex = 0; weightIndex < actualWeights; ++weightIndex) {
weightsVector[outIndex] = tempWeights[weightIndex] * totalWeightSumInverted;
++outIndex;
}
}
startsVec[x] = spanStart;
}
}
template <typename TKernelFunc>
static sd::Status computeSpans(LaunchContext* context, TKernelFunc& kernel, sd::LongType const outSize,
sd::LongType const inSize, float const scale, float const translate,
bool const antialias, Spans& spans) {
// When sampling, we need the inverse scale and translation, to map from an
// output to an input pixel.
float const invScale = 1.f / scale;
float const invTranslate = -invScale * translate;
// When downsampling the kernel should be scaled since we want to low pass
// filter and interpolate, but when upsampling it should not be since we only
// want to interpolate.
float const kernelScale = antialias ? math::sd_max(invScale, 1.f) : 1.f;
spans._spanSize =
math::sd_min(2 * static_cast<int>(std::ceil(kernel.radius() * kernelScale)) + 1, static_cast<int>(inSize));
spans._starts = NDArrayFactory::create<int>('c', {outSize});
spans._starts.syncToHost();
spans._weights = NDArrayFactory::create<float>('c', {outSize, spans._spanSize});
spans._weights.syncToHost();
auto startsVec = reinterpret_cast<int*>(spans._starts.buffer());
auto weightsVector = reinterpret_cast<float*>(spans._weights.buffer());
spans._weights.nullify();
const float invKernelScale = 1.f / kernelScale;
// NDArray tempWeights = NDArrayFactory::create<float>('c', {outSize, spans._spanSize});
// auto tempWeightsBuf = reinterpret_cast<float*>(tempWeights.specialBuffer());
// PointersManager mg(context, "ops::helpers::computeSpans");
// auto specialKernel = reinterpret_cast<TKernelFunc*>(mg.replicatePointer(&kernel, sizeof(TKernelFunc)));
auto stream = context->getCudaStream();
// computeSpansKernel<TKernelFunc><<<1, 1, 128, *stream>>>(specialKernel, startsVec, weightsVector, outSize, inSize,
// kernelScale, spans._spanSize, invScale, invTranslate, invKernelScale, tempWeightsBuf);
auto maxSpanSize = 0;
std::vector<float> tempWeights;
for (auto x = 0; x < outSize; x++) {
const float columnFloat = x + 0.5f;
const float sampleFloat = columnFloat * invScale + invTranslate;
// Don't sample when the sampling location is outside the source image.
if (sampleFloat < 0 || sampleFloat > inSize) {
// Add an empty span.
startsVec[x] = 0;
continue;
}
sd::LongType spanStart = math::sd_ceil<float, float>(sampleFloat - kernel.radius() * kernelScale - 0.5f);
sd::LongType spanEnd = math::sd_floor<float, float>(sampleFloat + kernel.radius() * kernelScale - 0.5f);
spanStart = boundsAmp(0LL, inSize - 1, spanStart);
spanEnd = boundsAmp(0LL, inSize - 1, spanEnd) + 1;
int const spanSize = spanEnd - spanStart;
if (spanSize > spans._spanSize) {
return Logger::logStatusMsg(
Status::BAD_INPUT,
"Span is too large: "); // + spanSize + " vs " + spans._spanSize);//, spanSize, spans._spanSize));
}
float totalWeightSum = 0.f;
tempWeights.clear();
for (int source = spanStart; source < spanEnd; ++source) {
float kernelPos = static_cast<float>(source) + 0.5f - sampleFloat;
float weight = kernel(kernelPos * invKernelScale);
totalWeightSum += weight;
tempWeights.push_back(weight);
}
maxSpanSize = math::sd_max(maxSpanSize, spanSize);
if (math::sd_abs(totalWeightSum) >= 1000.f * DataTypeUtils::min<float>()) { //
auto totalWeightSumInverted = 1.0f / totalWeightSum;
auto outIndex = spans._spanSize * x;
for (auto weightIndex = 0; weightIndex < tempWeights.size(); ++weightIndex) {
weightsVector[outIndex++] = tempWeights[weightIndex] * totalWeightSumInverted;
// ++outIndex;
}
}
startsVec[x] = spanStart;
}
spans._starts.tickWriteHost();
spans._weights.tickWriteHost();
spans._starts.syncToDevice();
spans._weights.syncToDevice();
// cudaStreamSynchronize(*stream);
return sd::Status::OK;
}
// template int computeSpans(LaunchContext* context, TriangleKernelFunc& kernel, sd::LongType const outSize,
// sd::LongType const inSize, float const scale, float const translate, bool const antialias, Spans& spans);
template <typename X, typename Z>
static SD_KERNEL void batchedGatherSpan(sd::LongType outputWidth, sd::LongType outputHeight, int rowSpanSize,
int const* rowStartsBuf, Z const* rowWeightBuf, int columnSpanSize,
int const* columnStartsBuf, Z const* columnWeightBuf, X const* pImages,
const sd::LongType* imageSpecialShapeInfo, Z* pIntermediate, Z* pOutput,
sd::LongType outputPixPerBatch) {
auto batchSize = shape::sizeAt(imageSpecialShapeInfo, 0);
auto inputHeight = shape::sizeAt(imageSpecialShapeInfo, 1);
auto inputWidth = shape::sizeAt(imageSpecialShapeInfo, 2);
auto channels = shape::sizeAt(imageSpecialShapeInfo, 3);
bool inputEws1 = shape::elementWiseStride(imageSpecialShapeInfo) == 1;
auto inputPixPerBatch = shape::strideAt(imageSpecialShapeInfo, 0);
auto inRowStride = shape::strideAt(imageSpecialShapeInfo, 1);
auto wStride = shape::strideAt(imageSpecialShapeInfo, 2);
auto cStride = shape::strideAt(imageSpecialShapeInfo, 3);
auto intermediatePixPerBatch = inputWidth * outputHeight * channels;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int b = tid; b < batchSize; b += step) {
auto imagePtr = pImages + b * inputPixPerBatch;
auto intermediatePtr = pIntermediate + b * intermediatePixPerBatch;
auto outputPtr = pOutput + b * outputPixPerBatch;
gatherRows<X, Z>(rowSpanSize, rowStartsBuf, rowWeightBuf, imagePtr, inputHeight, inputWidth, outputHeight,
inputWidth, channels, intermediatePtr, inputEws1, inRowStride, wStride, cStride);
gatherColumns<Z>(columnSpanSize, columnStartsBuf, columnWeightBuf, intermediatePtr, outputHeight, inputWidth,
outputHeight, outputWidth, channels, outputPtr);
}
}
template <typename X, typename Z>
static void gatherSpans(LaunchContext* context, int const rowSpanSize, NDArray const& rowStarts,
NDArray const& rowWeights, int const colSpanSize, NDArray const& columnStarts,
NDArray const& columnWeights, NDArray const* images, NDArray& intermediate, NDArray* output) {
const auto imageSpecialShapeInfo = images->specialShapeInfo();
auto outputHeight = output->sizeAt(1);
auto outputWidth = output->sizeAt(2);
auto channels = images->sizeAt(3);
auto outputPixPerBatch = outputWidth * outputHeight * channels;
auto intermediatePtr = reinterpret_cast<Z*>(intermediate.specialBuffer());
auto imagePtr = reinterpret_cast<X const*>(images->specialBuffer());
auto outputPtr = reinterpret_cast<Z*>(output->specialBuffer());
auto stream = context->getCudaStream();
auto rowStartsBuf = reinterpret_cast<int const*>(rowStarts.specialBuffer());
auto rowWeightBuf = reinterpret_cast<Z const*>(rowWeights.specialBuffer());
auto columnStartsBuf = reinterpret_cast<int const*>(columnStarts.specialBuffer());
auto columnWeightBuf = reinterpret_cast<Z const*>(columnWeights.specialBuffer());
batchedGatherSpan<X, Z><<<128, 128, 256, *stream>>>(
outputWidth, outputHeight, rowSpanSize, rowStartsBuf, rowWeightBuf, colSpanSize, columnStartsBuf, columnWeightBuf,
imagePtr, imageSpecialShapeInfo, intermediatePtr, outputPtr, outputPixPerBatch);
}
template <typename X, typename Z>
static sd::Status resizeKernel(LaunchContext* context, ImageResizeMethods method, NDArray const* input,
sd::LongType outWidth, sd::LongType outHeight, bool antialias, double coefficient,
NDArray* output) {
sd::LongType const batchSize = input->sizeAt(0);
sd::LongType const inputHeight = input->sizeAt(1);
sd::LongType const inputWidth = input->sizeAt(2);
sd::LongType const channels = input->sizeAt(3);
NDArray::prepareSpecialUse({output}, {input});
Z rowScale = Z(outHeight) / Z(inputHeight);
Z columnScale = Z(outWidth) / Z(inputWidth);
// Return if the output is empty.
if (output->lengthOf() == 0) return sd::Status::OK;
Spans colSpans;
Spans rowSpans;
auto res = sd::Status::OK;
switch (method) {
case kResizeBilinear: {
TriangleKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeBicubic: {
KeysCubicKernelFunc<float> kernel(static_cast<float>(coefficient));
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeLanczos3: {
LanczosKernelFunc kernel(3.f);
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeLanczos5: {
LanczosKernelFunc kernel(5.f);
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeGaussian: {
GaussianKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeMitchellcubic: {
MitchellCubicKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != sd::Status::OK) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
};
NDArray intermediate = NDArrayFactory::create<Z>('c', {batchSize, outHeight, inputWidth, channels});
// const functor::Spans& const_row_spans = row_spans;
// typename TTypes<int32, 1>::ConstTensor row_starts(
// const_row_spans.starts.tensor<int32, 1>());
auto& rowStarts = rowSpans._starts; // shape {outWidth}
auto& rowWeights = rowSpans._weights; // shape {outWidth, numSpans}
auto& columnStarts = colSpans._starts; // shape {outHeights}
auto& columnWeights = colSpans._weights; // shape {outHeights, numSpans}
gatherSpans<X, Z>(context, rowSpans._spanSize, rowStarts, rowWeights, colSpans._spanSize, columnStarts, columnWeights,
input, intermediate, output);
NDArray::registerSpecialUse({output}, {input});
return res;
}
#if defined(HAS_FLOAT32)
#define SD_FLOAT_TYPES_FLOAT32 SKIP_FIRST_COMMA(TTYPE_FLOAT32)
static sd::Status resizeTriangle(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new TriangleKernelFunc);
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeBilinear, image, width, height, antialias, 0, output), SD_NUMERIC_TYPES,
SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::resizeTriangle: This resize method is avaliable in future versions");
}
static sd::Status resizeLanczos3(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new LanczosKernelFunc(3.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeLanczos3, image, width, height, antialias, 0, output), SD_NUMERIC_TYPES,
SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::resizeLanczos3: This resize method is avaliable in future versions");
}
static sd::Status resizeLanczos5(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new LanczosKernelFunc(5.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeLanczos5, image, width, height, antialias, 0, output), SD_NUMERIC_TYPES,
SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::resizeLanczos5: This resize method is avaliable in future versions");
}
static sd::Status resizeGaussian(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const antialias, NDArray* output) {
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeGaussian, image, width, height, antialias, 0, output), SD_NUMERIC_TYPES,
SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::resizeGaussian: This resize method is avaliable in future versions");
}
static sd::Status resizeMitchellcubic(sd::LaunchContext* context, NDArray const* image, int const width,
int const height, bool const antialias, NDArray* output) {
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeMitchellcubic, image, width, height, antialias, 0, output), SD_NUMERIC_TYPES,
SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::ResizeMitchellcubic: This resize method is avaliable in future versions");
}
static sd::Status resizeBicubicA(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
CoordinateTransformationMode coorMode, bool exclude_outside, double coefficient,
NDArray* output) {
constexpr bool alignCorners = false;
return resizeBicubicFunctorA(context, image, width, height, alignCorners, coorMode, exclude_outside, coefficient,
output);
}
static sd::Status resizeBicubicAntialias(sd::LaunchContext* context, NDArray const* image, int const width,
int const height, bool const antialias, double coefficient, NDArray* output) {
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(context, kResizeBicubic, image, width, height, antialias, coefficient, output),
SD_NUMERIC_TYPES, SD_FLOAT_TYPES_FLOAT32);
return Logger::logStatusMsg(Status::VALIDATION,
"helpers::ResizeMitchellcubic: This resize method is avaliable in future versions");
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sd::Status resizeFunctor(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, CoordinateTransformationMode coorMode, bool exclude_outside,
NearestMode nearestMode, double coefficient, bool antialias, NDArray* output) {
switch (method) {
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, coorMode, nearestMode, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, false, output);
#if defined(HAS_FLOAT32)
case kResizeBilinear:
return resizeTriangle(context, image, width, height, antialias, output);
case kResizeLanczos3:
return resizeLanczos3(context, image, width, height, antialias, output);
case kResizeLanczos5:
return resizeLanczos5(context, image, width, height, antialias, output);
case kResizeGaussian:
return resizeGaussian(context, image, width, height, antialias, output);
case kResizeMitchellcubic:
return resizeMitchellcubic(context, image, width, height, antialias, output);
case kResizeBicubic: {
// if antialias then coorMode is HALF_PIXEL and exlude_outside is true
if (antialias) {
return resizeBicubicAntialias(context, image, width, height, antialias, coefficient, output);
} else {
// use modified v1
return resizeBicubicA(context, image, width, height, coorMode, exclude_outside, coefficient, output);
}
}
#else
case kResizeBilinear:
case kResizeLanczos3:
case kResizeLanczos5:
case kResizeGaussian:
case kResizeMitchellcubic:
case kResizeBicubic: {
sd_printf("helper::resizeFunctor: only float type is supported by this resize method %i\n", (int)method);
return Logger::logStatusMsg(Status::BAD_INPUT, "helper::resizeFunctor: only float type supported");
}
#endif
default:
sd_printf("helper::resizeFunctor: Wrong resize method %i\n", (int)method);
throw std::runtime_error("helper::resizeFunctor: Wrong resize method.");
}
return sd::Status::OK;
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
namespace cupoch {
namespace geometry {
namespace {
std::pair<float, float> TangentMinMax(float min_angle, float max_angle) {
float min_angle_tan = tan(min_angle);
float max_angle_tan = tan(max_angle);
// Correct sign of tan around singularity points
if (min_angle_tan < 0.0) min_angle_tan = -min_angle_tan;
if (max_angle_tan > 0.0) max_angle_tan = -max_angle_tan;
return std::make_pair(min_angle_tan, max_angle_tan);
}
__device__ bool IsShadow(float r1,
float r2,
float included_angle,
float min_angle_tan,
float max_angle_tan) {
const float perpendicular_y = r2 * sin(included_angle);
const float perpendicular_x = r1 - r2 * cos(included_angle);
const float perpendicular_tan = fabs(perpendicular_y) / perpendicular_x;
if (perpendicular_tan > 0) {
if (perpendicular_tan < min_angle_tan) return true;
} else {
if (perpendicular_tan > max_angle_tan) return true;
}
return false;
}
struct apply_scan_shadow_filter_functor {
apply_scan_shadow_filter_functor(const float* ranges,
float min_angle_tan,
float max_angle_tan,
float angle_increment,
int num_steps,
int window,
int neighbors,
bool remove_shadow_start_point,
float* out)
: ranges_(ranges),
min_angle_tan_(min_angle_tan),
max_angle_tan_(max_angle_tan),
angle_increment_(angle_increment),
num_steps_(num_steps),
window_(window),
neighbors_(neighbors),
remove_shadow_start_point_(remove_shadow_start_point),
out_(out){};
const float* ranges_;
const float min_angle_tan_;
const float max_angle_tan_;
const float angle_increment_;
const int num_steps_;
const int window_;
const int neighbors_;
const bool remove_shadow_start_point_;
float* out_;
__device__ void operator()(size_t idx) {
int n = idx / num_steps_;
int i = idx % num_steps_;
for (int y = -window_; y < window_ + 1; y++) {
int j = i + y;
if (j < 0 || j >= num_steps_ || i == j) continue;
if (IsShadow(ranges_[n * num_steps_ + i],
ranges_[n * num_steps_ + j], y * angle_increment_,
min_angle_tan_, max_angle_tan_)) {
for (int index = max(i - neighbors_, 0);
index <= min(i + neighbors_, num_steps_ - 1); index++) {
if (ranges_[i] < ranges_[index]) {
out_[n * num_steps_ + index] =
std::numeric_limits<float>::quiet_NaN();
}
}
if (remove_shadow_start_point_) {
out_[n * num_steps_ + i] =
std::numeric_limits<float>::quiet_NaN();
}
}
}
}
};
} // namespace
LaserScanBuffer::LaserScanBuffer(int num_steps,
int num_max_scans,
float min_angle,
float max_angle)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
num_steps_(num_steps),
num_max_scans_(num_max_scans),
min_angle_(min_angle),
max_angle_(max_angle) {}
LaserScanBuffer::~LaserScanBuffer(){};
LaserScanBuffer::LaserScanBuffer(const LaserScanBuffer& other)
: GeometryBase3D(Geometry::GeometryType::LaserScanBuffer),
ranges_(other.ranges_),
intensities_(other.intensities_),
top_(other.top_),
bottom_(other.bottom_),
num_steps_(other.num_steps_),
num_max_scans_(other.num_max_scans_),
min_angle_(other.min_angle_),
max_angle_(other.max_angle_),
origins_(other.origins_) {}
thrust::host_vector<float> LaserScanBuffer::GetRanges() const {
thrust::host_vector<float> ranges;
if (top_ == bottom_) {
return ranges;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = end - start;
ranges.resize(n * num_steps_);
thrust::copy_n(ranges_.begin() + start * num_steps_, n * num_steps_,
ranges.begin());
return ranges;
} else {
ranges.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(ranges_.begin() + start * num_steps_, offset,
ranges.begin());
thrust::copy_n(ranges_.begin(), end * num_steps_,
ranges.begin() + offset);
return ranges;
}
}
thrust::host_vector<float> LaserScanBuffer::GetIntensities() const {
thrust::host_vector<float> intensities;
if (top_ == bottom_) {
return intensities;
}
int start = top_ % num_max_scans_;
int end = bottom_ % num_max_scans_;
if (start < end) {
int n = start - end;
intensities.resize(n * num_steps_);
thrust::copy_n(intensities_.begin() + start * num_steps_,
n * num_steps_, intensities.begin());
return intensities;
} else {
intensities.resize(num_max_scans_ * num_steps_);
int offset = (num_max_scans_ - start) * num_steps_;
thrust::copy_n(intensities_.begin() + start * num_steps_, offset,
intensities.begin());
thrust::copy_n(intensities_.begin(), end * num_steps_,
intensities.begin() + offset);
return intensities;
}
}
LaserScanBuffer& LaserScanBuffer::Clear() {
top_ = 0;
bottom_ = 0;
ranges_.clear();
intensities_.clear();
return *this;
}
bool LaserScanBuffer::IsEmpty() const { return ranges_.empty(); }
Eigen::Vector3f LaserScanBuffer::GetMinBound() const {
utility::LogError("LaserScanBuffer::GetMinBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetMaxBound() const {
utility::LogError("LaserScanBuffer::GetMaxBound is not supported");
return Eigen::Vector3f::Zero();
}
Eigen::Vector3f LaserScanBuffer::GetCenter() const {
utility::LogError("LaserScanBuffer::GetCenter is not supported");
return Eigen::Vector3f::Zero();
}
AxisAlignedBoundingBox<3> LaserScanBuffer::GetAxisAlignedBoundingBox() const {
utility::LogError(
"LaserScanBuffer::GetAxisAlignedBoundingBox is not supported");
return AxisAlignedBoundingBox<3>();
}
LaserScanBuffer& LaserScanBuffer::Transform(
const Eigen::Matrix4f& transformation) {
thrust::for_each(origins_.begin(), origins_.end(),
[transformation] __device__(Eigen::Matrix4f_u & trans) {
trans = trans * transformation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Translate(const Eigen::Vector3f& translation,
bool relative) {
thrust::for_each(origins_.begin(), origins_.end(),
[translation] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 1>(0, 3) =
trans.block<3, 1>(0, 3) + translation;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::Scale(const float scale, bool center) {
thrust::for_each(ranges_.begin(), ranges_.end(),
[scale] __device__(float& r) { r *= scale; });
return *this;
}
LaserScanBuffer& LaserScanBuffer::Rotate(const Eigen::Matrix3f& R,
bool center) {
thrust::for_each(origins_.begin(), origins_.end(),
[R] __device__(Eigen::Matrix4f_u & trans) {
trans.block<3, 3>(0, 0) = trans.block<3, 3>(0, 0) * R;
});
return *this;
}
LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::device_vector<float>& ranges,
const Eigen::Matrix4f& transformation,
const utility::device_vector<float>& intensities) {
if (ranges.size() != num_steps_) {
utility::LogError("[AddRanges] Invalid size of input ranges.");
return *this;
}
if (HasIntensities() && ranges.size() != intensities.size()) {
utility::LogError("[AddRanges] Invalid size of intensities.");
return *this;
}
bool add_intensities =
!intensities.empty() && ranges.size() == intensities.size();
int end = bottom_ % num_max_scans_;
if (bottom_ + 1 <= num_max_scans_) {
ranges_.insert(ranges_.end(), ranges.begin(), ranges.end());
if (add_intensities)
intensities_.insert(intensities_.end(), intensities.begin(),
intensities.end());
origins_.push_back(transformation);
bottom_++;
} else {
thrust::copy_n(ranges.begin(), num_steps_,
ranges_.begin() + end * num_steps_);
if (add_intensities)
thrust::copy_n(intensities.begin(), num_steps_,
intensities_.begin() + end * num_steps_);
origins_[end] = transformation;
top_++;
bottom_++;
}
return *this;
}
LaserScanBuffer& LaserScanBuffer::AddRanges(
const utility::pinned_host_vector<float>& ranges,
const Eigen::Matrix4f& transformation,
const utility::pinned_host_vector<float>& intensities) {
utility::device_vector<float> d_ranges(ranges.size());
cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(d_ranges.data()),
ranges.data(), ranges.size() * sizeof(float),
cudaMemcpyHostToDevice));
utility::device_vector<float> d_intensities(intensities.size());
cudaSafeCall(cudaMemcpy(
thrust::raw_pointer_cast(d_intensities.data()), intensities.data(),
intensities.size() * sizeof(float), cudaMemcpyHostToDevice));
return AddRanges(d_ranges, transformation, d_intensities);
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::RangeFilter(
float min_range, float max_range) const {
auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_,
min_angle_, max_angle_);
if (max_range <= min_range) {
utility::LogError(
"[RangeFilter] Invalid parameter with min_range greater than "
"max_range.");
}
out->ranges_.resize(ranges_.size());
out->top_ = top_;
out->bottom_ = bottom_;
thrust::transform(
ranges_.begin(), ranges_.end(), out->ranges_.begin(),
[min_range, max_range] __device__(float r) {
return (r < min_range || r > max_range)
? std::numeric_limits<float>::quiet_NaN()
: r;
});
return out;
}
std::shared_ptr<LaserScanBuffer> LaserScanBuffer::ScanShadowsFilter(
float min_angle,
float max_angle,
int window,
int neighbors,
bool remove_shadow_start_point) const {
auto out = std::make_shared<LaserScanBuffer>(*this);
auto minmax_tan = TangentMinMax(min_angle, max_angle);
apply_scan_shadow_filter_functor func(
thrust::raw_pointer_cast(ranges_.data()), minmax_tan.first,
minmax_tan.second, GetAngleIncrement(), num_steps_, window,
neighbors, remove_shadow_start_point,
thrust::raw_pointer_cast(out->ranges_.data()));
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(ranges_.size()), func);
return out;
}
} // namespace geometry
} // namespace cupoch
|
the_stack
|
* Test of iterator utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <iterator>
#include <stdio.h>
#include <typeinfo>
#include <cub/iterator/arg_index_input_iterator.cuh>
#include <cub/iterator/cache_modified_input_iterator.cuh>
#include <cub/iterator/cache_modified_output_iterator.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/tex_obj_input_iterator.cuh>
#include <cub/iterator/tex_ref_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_type.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
template <typename T>
struct TransformOp
{
// Increment transform
__host__ __device__ __forceinline__ T operator()(T input) const
{
T addend;
InitValue(INTEGER_SEED, addend, 1);
return input + addend;
}
};
struct SelectOp
{
template <typename T>
__host__ __device__ __forceinline__ bool operator()(T input)
{
return true;
}
};
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Test random access input iterator
*/
template <
typename InputIteratorT,
typename T>
__global__ void Kernel(
InputIteratorT d_in,
T *d_out,
InputIteratorT *d_itrs)
{
d_out[0] = *d_in; // Value at offset 0
d_out[1] = d_in[100]; // Value at offset 100
d_out[2] = *(d_in + 1000); // Value at offset 1000
d_out[3] = *(d_in + 10000); // Value at offset 10000
d_in++;
d_out[4] = d_in[0]; // Value at offset 1
d_in += 20;
d_out[5] = d_in[0]; // Value at offset 21
d_itrs[0] = d_in; // Iterator at offset 21
d_in -= 10;
d_out[6] = d_in[0]; // Value at offset 11;
d_in -= 11;
d_out[7] = d_in[0]; // Value at offset 0
d_itrs[1] = d_in; // Iterator at offset 0
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Run iterator test on device
*/
template <
typename InputIteratorT,
typename T,
int TEST_VALUES>
void Test(
InputIteratorT d_in,
T (&h_reference)[TEST_VALUES])
{
// Allocate device arrays
T *d_out = NULL;
InputIteratorT *d_itrs = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * TEST_VALUES));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_itrs, sizeof(InputIteratorT) * 2));
int compare;
// Run unguarded kernel
Kernel<<<1, 1>>>(d_in, d_out, d_itrs);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Check results
compare = CompareDeviceResults(h_reference, d_out, TEST_VALUES, g_verbose, g_verbose);
printf("\tValues: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check iterator at offset 21
InputIteratorT h_itr = d_in + 21;
compare = CompareDeviceResults(&h_itr, d_itrs, 1, g_verbose, g_verbose);
printf("\tIterators: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check iterator at offset 0
compare = CompareDeviceResults(&d_in, d_itrs + 1, 1, g_verbose, g_verbose);
printf("\tIterators: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_itrs) CubDebugExit(g_allocator.DeviceFree(d_itrs));
}
/**
* Test constant iterator
*/
template <typename T>
void TestConstant(T base)
{
printf("\nTesting constant iterator on type %s (base: %lld)\n", typeid(T).name(), (unsigned long long) (base)); fflush(stdout);
//
// Test iterator manipulation in kernel
//
T h_reference[8] = {base, base, base, base, base, base, base, base};
ConstantInputIterator<T> d_itr(base);
Test(d_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
int copy_items = 100;
T *h_copy = new T[copy_items];
T *d_copy = NULL;
for (int i = 0; i < copy_items; ++i)
h_copy[i] = d_itr[i];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * copy_items));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
thrust::copy_if(d_itr, d_itr + copy_items, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_copy, d_copy, copy_items, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
if (h_copy) delete[] h_copy;
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
}
/**
* Test counting iterator
*/
template <typename T>
void TestCounting(T base)
{
printf("\nTesting counting iterator on type %s (base: %d) \n", typeid(T).name(), int(base)); fflush(stdout);
//
// Test iterator manipulation in kernel
//
// Initialize reference data
T h_reference[8];
h_reference[0] = base + 0; // Value at offset 0
h_reference[1] = base + 100; // Value at offset 100
h_reference[2] = base + 1000; // Value at offset 1000
h_reference[3] = base + 10000; // Value at offset 10000
h_reference[4] = base + 1; // Value at offset 1
h_reference[5] = base + 21; // Value at offset 21
h_reference[6] = base + 11; // Value at offset 11
h_reference[7] = base + 0; // Value at offset 0;
CountingInputIterator<T> d_itr(base);
Test(d_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
unsigned long long max_items = ((1ull << ((sizeof(T) * 8) - 1)) - 1);
size_t copy_items = (size_t) CUB_MIN(max_items - base, 100); // potential issue with differencing overflows when T is a smaller type than can handle the offset
T *h_copy = new T[copy_items];
T *d_copy = NULL;
for (unsigned long long i = 0; i < copy_items; ++i)
h_copy[i] = d_itr[i];
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * copy_items));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
thrust::copy_if(d_itr, d_itr + copy_items, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_copy, d_copy, copy_items, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
if (h_copy) delete[] h_copy;
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
}
/**
* Test modified iterator
*/
template <typename T, typename CastT>
void TestModified()
{
printf("\nTesting cache-modified iterator on type %s\n", typeid(T).name()); fflush(stdout);
//
// Test iterator manipulation in kernel
//
const unsigned int TEST_VALUES = 11000;
T *h_data = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
{
RandomBits(h_data[i]);
}
// Allocate device arrays
T *d_data = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES));
CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice));
// Initialize reference data
T h_reference[8];
h_reference[0] = h_data[0]; // Value at offset 0
h_reference[1] = h_data[100]; // Value at offset 100
h_reference[2] = h_data[1000]; // Value at offset 1000
h_reference[3] = h_data[10000]; // Value at offset 10000
h_reference[4] = h_data[1]; // Value at offset 1
h_reference[5] = h_data[21]; // Value at offset 21
h_reference[6] = h_data[11]; // Value at offset 11
h_reference[7] = h_data[0]; // Value at offset 0;
Test(CacheModifiedInputIterator<LOAD_DEFAULT, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_CA, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_CG, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_CS, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_CV, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_LDG, T>((CastT*) d_data), h_reference);
Test(CacheModifiedInputIterator<LOAD_VOLATILE, T>((CastT*) d_data), h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
T *d_copy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES));
CacheModifiedInputIterator<LOAD_CG, T> d_in_itr((CastT*) d_data);
CacheModifiedOutputIterator<STORE_CG, T> d_out_itr((CastT*) d_copy);
thrust::copy_if(d_in_itr, d_in_itr + TEST_VALUES, d_out_itr, SelectOp());
int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
if (h_data) delete[] h_data;
if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data));
}
/**
* Test transform iterator
*/
template <typename T, typename CastT>
void TestTransform()
{
printf("\nTesting transform iterator on type %s\n", typeid(T).name()); fflush(stdout);
//
// Test iterator manipulation in kernel
//
const unsigned int TEST_VALUES = 11000;
T *h_data = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
{
InitValue(INTEGER_SEED, h_data[i], i);
}
// Allocate device arrays
T *d_data = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES));
CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice));
TransformOp<T> op;
// Initialize reference data
T h_reference[8];
h_reference[0] = op(h_data[0]); // Value at offset 0
h_reference[1] = op(h_data[100]); // Value at offset 100
h_reference[2] = op(h_data[1000]); // Value at offset 1000
h_reference[3] = op(h_data[10000]); // Value at offset 10000
h_reference[4] = op(h_data[1]); // Value at offset 1
h_reference[5] = op(h_data[21]); // Value at offset 21
h_reference[6] = op(h_data[11]); // Value at offset 11
h_reference[7] = op(h_data[0]); // Value at offset 0;
TransformInputIterator<T, TransformOp<T>, CastT*> d_itr((CastT*) d_data, op);
Test(d_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
T *h_copy = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
h_copy[i] = op(h_data[i]);
T *d_copy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
thrust::copy_if(d_itr, d_itr + TEST_VALUES, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_copy, d_copy, TEST_VALUES, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_copy) delete[] h_copy;
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
if (h_data) delete[] h_data;
if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data));
}
/**
* Test tex-obj texture iterator
*/
template <typename T, typename CastT>
void TestTexObj()
{
printf("\nTesting tex-obj iterator on type %s\n", typeid(T).name()); fflush(stdout);
//
// Test iterator manipulation in kernel
//
const unsigned int TEST_VALUES = 11000;
const unsigned int DUMMY_OFFSET = 500;
const unsigned int DUMMY_TEST_VALUES = TEST_VALUES - DUMMY_OFFSET;
T *h_data = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
{
RandomBits(h_data[i]);
}
// Allocate device arrays
T *d_data = NULL;
T *d_dummy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES));
CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_dummy, sizeof(T) * DUMMY_TEST_VALUES));
CubDebugExit(cudaMemcpy(d_dummy, h_data + DUMMY_OFFSET, sizeof(T) * DUMMY_TEST_VALUES, cudaMemcpyHostToDevice));
// Initialize reference data
T h_reference[8];
h_reference[0] = h_data[0]; // Value at offset 0
h_reference[1] = h_data[100]; // Value at offset 100
h_reference[2] = h_data[1000]; // Value at offset 1000
h_reference[3] = h_data[10000]; // Value at offset 10000
h_reference[4] = h_data[1]; // Value at offset 1
h_reference[5] = h_data[21]; // Value at offset 21
h_reference[6] = h_data[11]; // Value at offset 11
h_reference[7] = h_data[0]; // Value at offset 0;
// Create and bind obj-based test iterator
TexObjInputIterator<T> d_obj_itr;
CubDebugExit(d_obj_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES));
Test(d_obj_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
T *d_copy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
CubDebugExit(cudaMemset(d_copy, 0, sizeof(T) * TEST_VALUES));
thrust::copy_if(d_obj_itr, d_obj_itr + TEST_VALUES, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
CubDebugExit(d_obj_itr.UnbindTexture());
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
if (h_data) delete[] h_data;
if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data));
if (d_dummy) CubDebugExit(g_allocator.DeviceFree(d_dummy));
}
#if CUDA_VERSION >= 5050
/**
* Test tex-ref texture iterator
*/
template <typename T, typename CastT>
void TestTexRef()
{
printf("\nTesting tex-ref iterator on type %s\n", typeid(T).name()); fflush(stdout);
//
// Test iterator manipulation in kernel
//
const unsigned int TEST_VALUES = 11000;
const unsigned int DUMMY_OFFSET = 500;
const unsigned int DUMMY_TEST_VALUES = TEST_VALUES - DUMMY_OFFSET;
T *h_data = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
{
RandomBits(h_data[i]);
}
// Allocate device arrays
T *d_data = NULL;
T *d_dummy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES));
CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_dummy, sizeof(T) * DUMMY_TEST_VALUES));
CubDebugExit(cudaMemcpy(d_dummy, h_data + DUMMY_OFFSET, sizeof(T) * DUMMY_TEST_VALUES, cudaMemcpyHostToDevice));
// Initialize reference data
T h_reference[8];
h_reference[0] = h_data[0]; // Value at offset 0
h_reference[1] = h_data[100]; // Value at offset 100
h_reference[2] = h_data[1000]; // Value at offset 1000
h_reference[3] = h_data[10000]; // Value at offset 10000
h_reference[4] = h_data[1]; // Value at offset 1
h_reference[5] = h_data[21]; // Value at offset 21
h_reference[6] = h_data[11]; // Value at offset 11
h_reference[7] = h_data[0]; // Value at offset 0;
// Create and bind ref-based test iterator
TexRefInputIterator<T, __LINE__> d_ref_itr;
CubDebugExit(d_ref_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES));
// Create and bind dummy iterator of same type to check with interferance
TexRefInputIterator<T, __LINE__> d_ref_itr2;
CubDebugExit(d_ref_itr2.BindTexture((CastT*) d_dummy, sizeof(T) * DUMMY_TEST_VALUES));
Test(d_ref_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
T *d_copy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
CubDebugExit(cudaMemset(d_copy, 0, sizeof(T) * TEST_VALUES));
thrust::copy_if(d_ref_itr, d_ref_itr + TEST_VALUES, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
CubDebugExit(d_ref_itr.UnbindTexture());
CubDebugExit(d_ref_itr2.UnbindTexture());
if (h_data) delete[] h_data;
if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data));
if (d_dummy) CubDebugExit(g_allocator.DeviceFree(d_dummy));
}
/**
* Test texture transform iterator
*/
template <typename T, typename CastT>
void TestTexTransform()
{
printf("\nTesting tex-transform iterator on type %s\n", typeid(T).name()); fflush(stdout);
//
// Test iterator manipulation in kernel
//
const unsigned int TEST_VALUES = 11000;
T *h_data = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
{
InitValue(INTEGER_SEED, h_data[i], i);
}
// Allocate device arrays
T *d_data = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES));
CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice));
TransformOp<T> op;
// Initialize reference data
T h_reference[8];
h_reference[0] = op(h_data[0]); // Value at offset 0
h_reference[1] = op(h_data[100]); // Value at offset 100
h_reference[2] = op(h_data[1000]); // Value at offset 1000
h_reference[3] = op(h_data[10000]); // Value at offset 10000
h_reference[4] = op(h_data[1]); // Value at offset 1
h_reference[5] = op(h_data[21]); // Value at offset 21
h_reference[6] = op(h_data[11]); // Value at offset 11
h_reference[7] = op(h_data[0]); // Value at offset 0;
// Create and bind texture iterator
typedef TexRefInputIterator<T, __LINE__> TextureIterator;
TextureIterator d_tex_itr;
CubDebugExit(d_tex_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES));
// Create transform iterator
TransformInputIterator<T, TransformOp<T>, TextureIterator> xform_itr(d_tex_itr, op);
Test(xform_itr, h_reference);
#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer
//
// Test with thrust::copy_if()
//
T *h_copy = new T[TEST_VALUES];
for (int i = 0; i < TEST_VALUES; ++i)
h_copy[i] = op(h_data[i]);
T *d_copy = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES));
thrust::device_ptr<T> d_copy_wrapper(d_copy);
thrust::copy_if(xform_itr, xform_itr + TEST_VALUES, d_copy_wrapper, SelectOp());
int compare = CompareDeviceResults(h_copy, d_copy, TEST_VALUES, g_verbose, g_verbose);
printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_copy) delete[] h_copy;
if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy));
#endif // THRUST_VERSION
CubDebugExit(d_tex_itr.UnbindTexture());
if (h_data) delete[] h_data;
if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data));
}
#endif // CUDA_VERSION
/**
* Run non-integer tests
*/
template <typename T, typename CastT>
void Test(Int2Type<false> is_integer)
{
TestModified<T, CastT>();
TestTransform<T, CastT>();
#if CUB_CDP
// Test tex-obj iterators if CUDA dynamic parallelism enabled
TestTexObj<T, CastT>(type_string);
#endif // CUB_CDP
#if CUDA_VERSION >= 5050
// Test tex-ref iterators for CUDA 5.5
TestTexRef<T, CastT>();
TestTexTransform<T, CastT>();
#endif // CUDA_VERSION
}
/**
* Run integer tests
*/
template <typename T, typename CastT>
void Test(Int2Type<true> is_integer)
{
TestConstant<T>(0);
TestConstant<T>(99);
TestCounting<T>(0);
TestCounting<T>(99);
// Run non-integer tests
Test<T, CastT>(Int2Type<false>());
}
/**
* Run tests
*/
template <typename T>
void Test()
{
enum {
IS_INTEGER = (Traits<T>::CATEGORY == SIGNED_INTEGER) || (Traits<T>::CATEGORY == UNSIGNED_INTEGER)
};
// Test non-const type
Test<T, T>(Int2Type<IS_INTEGER>());
// Test non-const type
Test<T, const T>(Int2Type<IS_INTEGER>());
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
// Evaluate different data types
Test<char>();
Test<short>();
Test<int>();
Test<long>();
Test<long long>();
Test<float>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double>();
Test<char2>();
Test<short2>();
Test<int2>();
Test<long2>();
Test<longlong2>();
Test<float2>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double2>();
Test<char3>();
Test<short3>();
Test<int3>();
Test<long3>();
Test<longlong3>();
Test<float3>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double3>();
Test<char4>();
Test<short4>();
Test<int4>();
Test<long4>();
Test<longlong4>();
Test<float4>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double4>();
Test<TestFoo>();
Test<TestBar>();
printf("\nTest complete\n"); fflush(stdout);
return 0;
}
|
the_stack
|
#define AS_U32(addr) *((uint32_t*)(addr))
#define AS_UINT2(addr) *((uint2*)(addr))
#define AS_UINT4(addr) *((uint4*)(addr))
#define AS_UL2(addr) *((ulonglong2*)(addr))
#define t_fn0(x) (sharedMemory[x])
#define t_fn1(x) (sharedMemory[0x100U | (x)])
#define t_fn2(x) (sharedMemory[0x200U | (x)])
#define t_fn3(x) (sharedMemory[0x300U | (x)])
#define round(shared, out, x, k) \
out[0] = (k)[0] ^ (t_fn0(x[0] & 0xff) ^ t_fn1((x[1] >> 8) & 0xff) ^ t_fn2((x[2] >> 16) & 0xff) ^ t_fn3((x[3] >> 24) & 0xff)); \
out[1] = (k)[1] ^ (t_fn0(x[1] & 0xff) ^ t_fn1((x[2] >> 8) & 0xff) ^ t_fn2((x[3] >> 16) & 0xff) ^ t_fn3((x[0] >> 24) & 0xff)); \
out[2] = (k)[2] ^ (t_fn0(x[2] & 0xff) ^ t_fn1((x[3] >> 8) & 0xff) ^ t_fn2((x[0] >> 16) & 0xff) ^ t_fn3((x[1] >> 24) & 0xff)); \
out[3] = (k)[3] ^ (t_fn0(x[3] & 0xff) ^ t_fn1((x[0] >> 8) & 0xff) ^ t_fn2((x[1] >> 16) & 0xff) ^ t_fn3((x[2] >> 24) & 0xff));
#define round_u4(shared, out, in, k) \
((uint32_t*)out)[0] = (k)[0] ^ t_fn0(in[0].x) ^ t_fn1(in[1].y) ^ t_fn2(in[2].z) ^ t_fn3(in[3].w); \
((uint32_t*)out)[1] = (k)[1] ^ t_fn0(in[1].x) ^ t_fn1(in[2].y) ^ t_fn2(in[3].z) ^ t_fn3(in[0].w); \
((uint32_t*)out)[2] = (k)[2] ^ t_fn0(in[2].x) ^ t_fn1(in[3].y) ^ t_fn2(in[0].z) ^ t_fn3(in[1].w); \
((uint32_t*)out)[3] = (k)[3] ^ t_fn0(in[3].x) ^ t_fn1(in[0].y) ^ t_fn2(in[1].z) ^ t_fn3(in[2].w);
#ifdef __INTELLISENSE__
#define __byte_perm(a,b,c) a
#endif
#define OFF32_0(x) (x & 0xFFu)
#define OFF32_1(x) __byte_perm(x, 0x01, 0x5541)
#define OFF32_2(x) __byte_perm(x, 0x02, 0x5542)
#define OFF32_3(x) __byte_perm(x, 0x03, 0x5543)
#define SHARED_0(x) sharedMemory[OFF32_0(x)]
#define SHARED_1(x) sharedMemory[OFF32_1(x)]
#define SHARED_2(x) sharedMemory[OFF32_2(x)]
#define SHARED_3(x) sharedMemory[OFF32_3(x)]
__device__ __forceinline__
void cn_aes_single_round(uint32_t * const sharedMemory, uint32_t * const in, uint32_t * out, uint32_t* expandedKey)
{
asm("// aes_single_round");
out[0] = expandedKey[0] ^ SHARED_0(in[0]) ^ SHARED_1(in[1]) ^ SHARED_2(in[2]) ^ SHARED_3(in[3]);
out[1] = expandedKey[1] ^ SHARED_0(in[1]) ^ SHARED_1(in[2]) ^ SHARED_2(in[3]) ^ SHARED_3(in[0]);
out[2] = expandedKey[2] ^ SHARED_0(in[2]) ^ SHARED_1(in[3]) ^ SHARED_2(in[0]) ^ SHARED_3(in[1]);
out[3] = expandedKey[3] ^ SHARED_0(in[3]) ^ SHARED_1(in[0]) ^ SHARED_2(in[1]) ^ SHARED_3(in[2]);
}
//
#ifdef _WIN64
/* do a mul.wide.u32 to prevent a shl + cvt 32 to 64 on ld.shared [ptr] */
#define OFF8_0(x) (x & 0xFFu) * sizeof(uint32_t)
#define OFF8_1(x) __byte_perm(x, 0x01, 0x5541) * sizeof(uint32_t)
#define OFF8_2(x) __byte_perm(x, 0x02, 0x5542) * sizeof(uint32_t)
#define OFF8_3(x) __byte_perm(x, 0x03, 0x5543) * sizeof(uint32_t)
#else
#define OFF8_0(x) (x & 0xFFu) << 2
#define OFF8_1(x) __byte_perm(x, 0x01, 0x5541) << 2
#define OFF8_2(x) __byte_perm(x, 0x02, 0x5542) << 2
#define OFF8_3(x) __byte_perm(x, 0x03, 0x5543) << 2
#endif
#define SHAR8_0(x) AS_U32(&sharedMemory[OFF8_0(x)])
#define SHAR8_1(x) AS_U32(&sharedMemory[OFF8_1(x)])
#define SHAR8_2(x) AS_U32(&sharedMemory[OFF8_2(x)])
#define SHAR8_3(x) AS_U32(&sharedMemory[OFF8_3(x)])
__device__ __forceinline__
void cn_aes_single_round_b(uint8_t * const sharedMemory, void * const long_state, const uint4 key, uint4 *res)
{
asm("// aes_single_round_b");
uint4 in = AS_UINT4(long_state);
*res = key;
res->x ^= SHAR8_0(in.x) ^ SHAR8_1(in.y) ^ SHAR8_2(in.z) ^ SHAR8_3(in.w);
res->y ^= SHAR8_0(in.y) ^ SHAR8_1(in.z) ^ SHAR8_2(in.w) ^ SHAR8_3(in.x);
res->z ^= SHAR8_0(in.z) ^ SHAR8_1(in.w) ^ SHAR8_2(in.x) ^ SHAR8_3(in.y);
res->w ^= SHAR8_0(in.w) ^ SHAR8_1(in.x) ^ SHAR8_2(in.y) ^ SHAR8_3(in.z);
}
#define round_perm(shared, out, in, k) \
out[0] = (k)[0] ^ SHARED_0(in[0]) ^ SHARED_1(in[1]) ^ SHARED_2(in[2]) ^ SHARED_3(in[3]); \
out[1] = (k)[1] ^ SHARED_0(in[1]) ^ SHARED_1(in[2]) ^ SHARED_2(in[3]) ^ SHARED_3(in[0]); \
out[2] = (k)[2] ^ SHARED_0(in[2]) ^ SHARED_1(in[3]) ^ SHARED_2(in[0]) ^ SHARED_3(in[1]); \
out[3] = (k)[3] ^ SHARED_0(in[3]) ^ SHARED_1(in[0]) ^ SHARED_2(in[1]) ^ SHARED_3(in[2]);
__device__ __forceinline__
void cn_aes_pseudo_round_mut(const uint32_t * sharedMemory, uint32_t * val, uint32_t const * expandedKey)
{
asm("// aes_pseudo_round_mut");
uint32_t b[4];
round_perm(sharedMemory, b, val, expandedKey);
round_perm(sharedMemory, val, b, expandedKey + (1 * N_COLS));
round_perm(sharedMemory, b, val, expandedKey + (2 * N_COLS));
round_perm(sharedMemory, val, b, expandedKey + (3 * N_COLS));
round_perm(sharedMemory, b, val, expandedKey + (4 * N_COLS));
round_perm(sharedMemory, val, b, expandedKey + (5 * N_COLS));
round_perm(sharedMemory, b, val, expandedKey + (6 * N_COLS));
round_perm(sharedMemory, val, b, expandedKey + (7 * N_COLS));
round_perm(sharedMemory, b, val, expandedKey + (8 * N_COLS));
round_perm(sharedMemory, val, b, expandedKey + (9 * N_COLS));
}
static __forceinline__ __device__ uint4 operator ^ (const uint4 &a, const uint4 &b) {
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
#define round_perm4(in, k) {\
uint4 tmp; \
tmp.x = SHARED_0(in.x) ^ SHARED_1(in.y) ^ SHARED_2(in.z) ^ SHARED_3(in.w); \
tmp.y = SHARED_0(in.y) ^ SHARED_1(in.z) ^ SHARED_2(in.w) ^ SHARED_3(in.x); \
tmp.z = SHARED_0(in.z) ^ SHARED_1(in.w) ^ SHARED_2(in.x) ^ SHARED_3(in.y); \
tmp.w = SHARED_0(in.w) ^ SHARED_1(in.x) ^ SHARED_2(in.y) ^ SHARED_3(in.z); \
val = tmp ^ key[k]; \
}
__device__ __forceinline__
void cn_aes_pseudo_round_mut_uint4(uint32_t * const sharedMemory, uint4 &val, uint4 const key[10])
{
asm("// aes_pseudo_round_mut_uint4");
round_perm4(val, 0);
round_perm4(val, 1);
round_perm4(val, 2);
round_perm4(val, 3);
round_perm4(val, 4);
round_perm4(val, 5);
round_perm4(val, 6);
round_perm4(val, 7);
round_perm4(val, 8);
round_perm4(val, 9);
}
/*
__device__ __forceinline__
void cn_aes_gpu_init2(uint32_t* sharedMemory)
{
#if 0
if(blockDim.x >= 64)
{
if(threadIdx.x < 64) {
#define thrX (threadIdx.x << 2U) // ensure offsets aligned (16) to vector
#pragma unroll 4
for (uint32_t i = 0; i < 1024U; i += 256U) // 32x32 = 1024, 4 * 256 also
AS_UINT4(&sharedMemory[i + thrX]) = AS_UINT4(&d_t_fn[i + thrX]);
}
} else
#endif
if(blockDim.x >= 32) {
if(threadIdx.x < 32) {
#if 0
#pragma unroll 32
for(uint32_t i = 0; i < 1024; i += 32)
sharedMemory[threadIdx.x + i] = d_t_fn[threadIdx.x + i];
#else
#define thrX (threadIdx.x << 2U) // ensure offsets aligned (16) to vector
#pragma unroll 8
for (uint32_t i = 0; i < 1024; i += 128U) // 32x32 = 1024, 8 * 128 also
AS_UINT4(&sharedMemory[i + thrX]) = AS_UINT4(&d_t_fn[i + thrX]);
#endif
}
} else {
if(threadIdx.x < 4) {
#if 0
for (uint32_t i = 0; i < 1024; i += 4)
sharedMemory[threadIdx.x + i] = d_t_fn[threadIdx.x + i];
#else
#define thrX (threadIdx.x << 2U) // ensure offsets aligned (16) to vector
#pragma unroll 64
for (uint32_t i = 0; i < 1024; i += 16U)
AS_UINT4(&sharedMemory[i + thrX]) = AS_UINT4(&d_t_fn[i + thrX]);
#endif
}
}
}
*/
__device__ __forceinline__
void cn_aes_gpu_init(uint32_t* sharedMemory)
{
// AES 0
switch (threadIdx.x) {
case 0:
AS_UL2(&sharedMemory[0x000]) = make_ulonglong2(0x847c7cf8a56363c6, 0x8d7b7bf6997777ee);
AS_UL2(&sharedMemory[0x004]) = make_ulonglong2(0xbd6b6bd60df2f2ff, 0x54c5c591b16f6fde);
AS_UL2(&sharedMemory[0x008]) = make_ulonglong2(0x0301010250303060, 0x7d2b2b56a96767ce);
AS_UL2(&sharedMemory[0x00C]) = make_ulonglong2(0x62d7d7b519fefee7, 0x9a7676ece6abab4d);
AS_UL2(&sharedMemory[0x010]) = make_ulonglong2(0x9d82821f45caca8f, 0x877d7dfa40c9c989);
AS_UL2(&sharedMemory[0x014]) = make_ulonglong2(0xeb5959b215fafaef, 0x0bf0f0fbc947478e);
AS_UL2(&sharedMemory[0x018]) = make_ulonglong2(0x67d4d4b3ecadad41, 0xeaafaf45fda2a25f);
AS_UL2(&sharedMemory[0x01C]) = make_ulonglong2(0xf7a4a453bf9c9c23, 0x5bc0c09b967272e4);
break;
case 1:
AS_UL2(&sharedMemory[0x020]) = make_ulonglong2(0x1cfdfde1c2b7b775, 0x6a26264cae93933d);
AS_UL2(&sharedMemory[0x024]) = make_ulonglong2(0x413f3f7e5a36366c, 0x4fcccc8302f7f7f5);
AS_UL2(&sharedMemory[0x028]) = make_ulonglong2(0xf4a5a5515c343468, 0x08f1f1f934e5e5d1);
AS_UL2(&sharedMemory[0x02C]) = make_ulonglong2(0x73d8d8ab937171e2, 0x3f15152a53313162);
AS_UL2(&sharedMemory[0x030]) = make_ulonglong2(0x52c7c7950c040408, 0x5ec3c39d65232346);
AS_UL2(&sharedMemory[0x034]) = make_ulonglong2(0xa196963728181830, 0xb59a9a2f0f05050a);
AS_UL2(&sharedMemory[0x038]) = make_ulonglong2(0x361212240907070e, 0x3de2e2df9b80801b);
AS_UL2(&sharedMemory[0x03C]) = make_ulonglong2(0x6927274e26ebebcd, 0x9f7575eacdb2b27f);
break;
case 2:
AS_UL2(&sharedMemory[0x040]) = make_ulonglong2(0x9e83831d1b090912, 0x2e1a1a34742c2c58);
AS_UL2(&sharedMemory[0x044]) = make_ulonglong2(0xb26e6edc2d1b1b36, 0xfba0a05bee5a5ab4);
AS_UL2(&sharedMemory[0x048]) = make_ulonglong2(0x4d3b3b76f65252a4, 0xceb3b37d61d6d6b7);
AS_UL2(&sharedMemory[0x04C]) = make_ulonglong2(0x3ee3e3dd7b292952, 0x97848413712f2f5e);
AS_UL2(&sharedMemory[0x050]) = make_ulonglong2(0x68d1d1b9f55353a6, 0x2cededc100000000);
AS_UL2(&sharedMemory[0x054]) = make_ulonglong2(0x1ffcfce360202040, 0xed5b5bb6c8b1b179);
AS_UL2(&sharedMemory[0x058]) = make_ulonglong2(0x46cbcb8dbe6a6ad4, 0x4b393972d9bebe67);
AS_UL2(&sharedMemory[0x05C]) = make_ulonglong2(0xd44c4c98de4a4a94, 0x4acfcf85e85858b0);
break;
case 3:
AS_UL2(&sharedMemory[0x060]) = make_ulonglong2(0x2aefefc56bd0d0bb, 0x16fbfbede5aaaa4f);
AS_UL2(&sharedMemory[0x064]) = make_ulonglong2(0xd74d4d9ac5434386, 0x9485851155333366);
AS_UL2(&sharedMemory[0x068]) = make_ulonglong2(0x10f9f9e9cf45458a, 0x817f7ffe06020204);
AS_UL2(&sharedMemory[0x06C]) = make_ulonglong2(0x443c3c78f05050a0, 0xe3a8a84bba9f9f25);
AS_UL2(&sharedMemory[0x070]) = make_ulonglong2(0xfea3a35df35151a2, 0x8a8f8f05c0404080);
AS_UL2(&sharedMemory[0x074]) = make_ulonglong2(0xbc9d9d21ad92923f, 0x04f5f5f148383870);
AS_UL2(&sharedMemory[0x078]) = make_ulonglong2(0xc1b6b677dfbcbc63, 0x6321214275dadaaf);
AS_UL2(&sharedMemory[0x07C]) = make_ulonglong2(0x1affffe530101020, 0x6dd2d2bf0ef3f3fd);
break;
case 4:
AS_UL2(&sharedMemory[0x080]) = make_ulonglong2(0x140c0c184ccdcd81, 0x2fececc335131326);
AS_UL2(&sharedMemory[0x084]) = make_ulonglong2(0xa2979735e15f5fbe, 0x3917172ecc444488);
AS_UL2(&sharedMemory[0x088]) = make_ulonglong2(0xf2a7a75557c4c493, 0x473d3d7a827e7efc);
AS_UL2(&sharedMemory[0x08C]) = make_ulonglong2(0xe75d5dbaac6464c8, 0x957373e62b191932);
AS_UL2(&sharedMemory[0x090]) = make_ulonglong2(0x98818119a06060c0, 0x7fdcdca3d14f4f9e);
AS_UL2(&sharedMemory[0x094]) = make_ulonglong2(0x7e2a2a5466222244, 0x8388880bab90903b);
AS_UL2(&sharedMemory[0x098]) = make_ulonglong2(0x29eeeec7ca46468c, 0x3c141428d3b8b86b);
AS_UL2(&sharedMemory[0x09C]) = make_ulonglong2(0xe25e5ebc79dedea7, 0x76dbdbad1d0b0b16);
break;
case 5:
AS_UL2(&sharedMemory[0x0A0]) = make_ulonglong2(0x563232643be0e0db, 0x1e0a0a144e3a3a74);
AS_UL2(&sharedMemory[0x0A4]) = make_ulonglong2(0x0a06060cdb494992, 0xe45c5cb86c242448);
AS_UL2(&sharedMemory[0x0A8]) = make_ulonglong2(0x6ed3d3bd5dc2c29f, 0xa66262c4efacac43);
AS_UL2(&sharedMemory[0x0AC]) = make_ulonglong2(0xa4959531a8919139, 0x8b7979f237e4e4d3);
AS_UL2(&sharedMemory[0x0B0]) = make_ulonglong2(0x43c8c88b32e7e7d5, 0xb76d6dda5937376e);
AS_UL2(&sharedMemory[0x0B4]) = make_ulonglong2(0x64d5d5b18c8d8d01, 0xe0a9a949d24e4e9c);
AS_UL2(&sharedMemory[0x0B8]) = make_ulonglong2(0xfa5656acb46c6cd8, 0x25eaeacf07f4f4f3);
AS_UL2(&sharedMemory[0x0BC]) = make_ulonglong2(0x8e7a7af4af6565ca, 0x18080810e9aeae47);
break;
case 6:
AS_UL2(&sharedMemory[0x0C0]) = make_ulonglong2(0x887878f0d5baba6f, 0x722e2e5c6f25254a);
AS_UL2(&sharedMemory[0x0C4]) = make_ulonglong2(0xf1a6a657241c1c38, 0x51c6c697c7b4b473);
AS_UL2(&sharedMemory[0x0C8]) = make_ulonglong2(0x7cdddda123e8e8cb, 0x211f1f3e9c7474e8);
AS_UL2(&sharedMemory[0x0CC]) = make_ulonglong2(0xdcbdbd61dd4b4b96, 0x858a8a0f868b8b0d);
AS_UL2(&sharedMemory[0x0D0]) = make_ulonglong2(0x423e3e7c907070e0, 0xaa6666ccc4b5b571);
AS_UL2(&sharedMemory[0x0D4]) = make_ulonglong2(0x05030306d8484890, 0x120e0e1c01f6f6f7);
AS_UL2(&sharedMemory[0x0D8]) = make_ulonglong2(0x5f35356aa36161c2, 0xd0b9b969f95757ae);
AS_UL2(&sharedMemory[0x0DC]) = make_ulonglong2(0x58c1c19991868617, 0xb99e9e27271d1d3a);
break;
case 7:
AS_UL2(&sharedMemory[0x0E0]) = make_ulonglong2(0x13f8f8eb38e1e1d9, 0x33111122b398982b);
AS_UL2(&sharedMemory[0x0E4]) = make_ulonglong2(0x70d9d9a9bb6969d2, 0xa7949433898e8e07);
AS_UL2(&sharedMemory[0x0E8]) = make_ulonglong2(0x221e1e3cb69b9b2d, 0x20e9e9c992878715);
AS_UL2(&sharedMemory[0x0EC]) = make_ulonglong2(0xff5555aa49cece87, 0x7adfdfa578282850);
AS_UL2(&sharedMemory[0x0F0]) = make_ulonglong2(0xf8a1a1598f8c8c03, 0x170d0d1a80898909);
AS_UL2(&sharedMemory[0x0F4]) = make_ulonglong2(0x31e6e6d7dabfbf65, 0xb86868d0c6424284);
AS_UL2(&sharedMemory[0x0F8]) = make_ulonglong2(0xb0999929c3414182, 0x110f0f1e772d2d5a);
AS_UL2(&sharedMemory[0x0FC]) = make_ulonglong2(0xfc5454a8cbb0b07b, 0x3a16162cd6bbbb6d);
break;
}
// AES 1
switch (threadIdx.x) {
case 0:
AS_UL2(&sharedMemory[0x100]) = make_ulonglong2(0x7c7cf8846363c6a5, 0x7b7bf68d7777ee99);
AS_UL2(&sharedMemory[0x104]) = make_ulonglong2(0x6b6bd6bdf2f2ff0d, 0xc5c591546f6fdeb1);
AS_UL2(&sharedMemory[0x108]) = make_ulonglong2(0x0101020330306050, 0x2b2b567d6767cea9);
AS_UL2(&sharedMemory[0x10C]) = make_ulonglong2(0xd7d7b562fefee719, 0x7676ec9aabab4de6);
AS_UL2(&sharedMemory[0x110]) = make_ulonglong2(0x82821f9dcaca8f45, 0x7d7dfa87c9c98940);
AS_UL2(&sharedMemory[0x114]) = make_ulonglong2(0x5959b2ebfafaef15, 0xf0f0fb0b47478ec9);
AS_UL2(&sharedMemory[0x118]) = make_ulonglong2(0xd4d4b367adad41ec, 0xafaf45eaa2a25ffd);
AS_UL2(&sharedMemory[0x11C]) = make_ulonglong2(0xa4a453f79c9c23bf, 0xc0c09b5b7272e496);
break;
case 1:
AS_UL2(&sharedMemory[0x120]) = make_ulonglong2(0xfdfde11cb7b775c2, 0x26264c6a93933dae);
AS_UL2(&sharedMemory[0x124]) = make_ulonglong2(0x3f3f7e4136366c5a, 0xcccc834ff7f7f502);
AS_UL2(&sharedMemory[0x128]) = make_ulonglong2(0xa5a551f43434685c, 0xf1f1f908e5e5d134);
AS_UL2(&sharedMemory[0x12C]) = make_ulonglong2(0xd8d8ab737171e293, 0x15152a3f31316253);
AS_UL2(&sharedMemory[0x130]) = make_ulonglong2(0xc7c795520404080c, 0xc3c39d5e23234665);
AS_UL2(&sharedMemory[0x134]) = make_ulonglong2(0x969637a118183028, 0x9a9a2fb505050a0f);
AS_UL2(&sharedMemory[0x138]) = make_ulonglong2(0x1212243607070e09, 0xe2e2df3d80801b9b);
AS_UL2(&sharedMemory[0x13C]) = make_ulonglong2(0x27274e69ebebcd26, 0x7575ea9fb2b27fcd);
break;
case 2:
AS_UL2(&sharedMemory[0x140]) = make_ulonglong2(0x83831d9e0909121b, 0x1a1a342e2c2c5874);
AS_UL2(&sharedMemory[0x144]) = make_ulonglong2(0x6e6edcb21b1b362d, 0xa0a05bfb5a5ab4ee);
AS_UL2(&sharedMemory[0x148]) = make_ulonglong2(0x3b3b764d5252a4f6, 0xb3b37dced6d6b761);
AS_UL2(&sharedMemory[0x14C]) = make_ulonglong2(0xe3e3dd3e2929527b, 0x848413972f2f5e71);
AS_UL2(&sharedMemory[0x150]) = make_ulonglong2(0xd1d1b9685353a6f5, 0xededc12c00000000);
AS_UL2(&sharedMemory[0x154]) = make_ulonglong2(0xfcfce31f20204060, 0x5b5bb6edb1b179c8);
AS_UL2(&sharedMemory[0x158]) = make_ulonglong2(0xcbcb8d466a6ad4be, 0x3939724bbebe67d9);
AS_UL2(&sharedMemory[0x15C]) = make_ulonglong2(0x4c4c98d44a4a94de, 0xcfcf854a5858b0e8);
break;
case 3:
AS_UL2(&sharedMemory[0x160]) = make_ulonglong2(0xefefc52ad0d0bb6b, 0xfbfbed16aaaa4fe5);
AS_UL2(&sharedMemory[0x164]) = make_ulonglong2(0x4d4d9ad7434386c5, 0x8585119433336655);
AS_UL2(&sharedMemory[0x168]) = make_ulonglong2(0xf9f9e91045458acf, 0x7f7ffe8102020406);
AS_UL2(&sharedMemory[0x16C]) = make_ulonglong2(0x3c3c78445050a0f0, 0xa8a84be39f9f25ba);
AS_UL2(&sharedMemory[0x170]) = make_ulonglong2(0xa3a35dfe5151a2f3, 0x8f8f058a404080c0);
AS_UL2(&sharedMemory[0x174]) = make_ulonglong2(0x9d9d21bc92923fad, 0xf5f5f10438387048);
AS_UL2(&sharedMemory[0x178]) = make_ulonglong2(0xb6b677c1bcbc63df, 0x21214263dadaaf75);
AS_UL2(&sharedMemory[0x17C]) = make_ulonglong2(0xffffe51a10102030, 0xd2d2bf6df3f3fd0e);
break;
case 4:
AS_UL2(&sharedMemory[0x180]) = make_ulonglong2(0x0c0c1814cdcd814c, 0xececc32f13132635);
AS_UL2(&sharedMemory[0x184]) = make_ulonglong2(0x979735a25f5fbee1, 0x17172e39444488cc);
AS_UL2(&sharedMemory[0x188]) = make_ulonglong2(0xa7a755f2c4c49357, 0x3d3d7a477e7efc82);
AS_UL2(&sharedMemory[0x18C]) = make_ulonglong2(0x5d5dbae76464c8ac, 0x7373e6951919322b);
AS_UL2(&sharedMemory[0x190]) = make_ulonglong2(0x818119986060c0a0, 0xdcdca37f4f4f9ed1);
AS_UL2(&sharedMemory[0x194]) = make_ulonglong2(0x2a2a547e22224466, 0x88880b8390903bab);
AS_UL2(&sharedMemory[0x198]) = make_ulonglong2(0xeeeec72946468cca, 0x1414283cb8b86bd3);
AS_UL2(&sharedMemory[0x19C]) = make_ulonglong2(0x5e5ebce2dedea779, 0xdbdbad760b0b161d);
break;
case 5:
AS_UL2(&sharedMemory[0x1A0]) = make_ulonglong2(0x32326456e0e0db3b, 0x0a0a141e3a3a744e);
AS_UL2(&sharedMemory[0x1A4]) = make_ulonglong2(0x06060c0a494992db, 0x5c5cb8e42424486c);
AS_UL2(&sharedMemory[0x1A8]) = make_ulonglong2(0xd3d3bd6ec2c29f5d, 0x6262c4a6acac43ef);
AS_UL2(&sharedMemory[0x1AC]) = make_ulonglong2(0x959531a4919139a8, 0x7979f28be4e4d337);
AS_UL2(&sharedMemory[0x1B0]) = make_ulonglong2(0xc8c88b43e7e7d532, 0x6d6ddab737376e59);
AS_UL2(&sharedMemory[0x1B4]) = make_ulonglong2(0xd5d5b1648d8d018c, 0xa9a949e04e4e9cd2);
AS_UL2(&sharedMemory[0x1B8]) = make_ulonglong2(0x5656acfa6c6cd8b4, 0xeaeacf25f4f4f307);
AS_UL2(&sharedMemory[0x1BC]) = make_ulonglong2(0x7a7af48e6565caaf, 0x08081018aeae47e9);
break;
case 6:
AS_UL2(&sharedMemory[0x1C0]) = make_ulonglong2(0x7878f088baba6fd5, 0x2e2e5c7225254a6f);
AS_UL2(&sharedMemory[0x1C4]) = make_ulonglong2(0xa6a657f11c1c3824, 0xc6c69751b4b473c7);
AS_UL2(&sharedMemory[0x1C8]) = make_ulonglong2(0xdddda17ce8e8cb23, 0x1f1f3e217474e89c);
AS_UL2(&sharedMemory[0x1CC]) = make_ulonglong2(0xbdbd61dc4b4b96dd, 0x8a8a0f858b8b0d86);
AS_UL2(&sharedMemory[0x1D0]) = make_ulonglong2(0x3e3e7c427070e090, 0x6666ccaab5b571c4);
AS_UL2(&sharedMemory[0x1D4]) = make_ulonglong2(0x03030605484890d8, 0x0e0e1c12f6f6f701);
AS_UL2(&sharedMemory[0x1D8]) = make_ulonglong2(0x35356a5f6161c2a3, 0xb9b969d05757aef9);
AS_UL2(&sharedMemory[0x1DC]) = make_ulonglong2(0xc1c1995886861791, 0x9e9e27b91d1d3a27);
break;
case 7:
AS_UL2(&sharedMemory[0x1E0]) = make_ulonglong2(0xf8f8eb13e1e1d938, 0x1111223398982bb3);
AS_UL2(&sharedMemory[0x1E4]) = make_ulonglong2(0xd9d9a9706969d2bb, 0x949433a78e8e0789);
AS_UL2(&sharedMemory[0x1E8]) = make_ulonglong2(0x1e1e3c229b9b2db6, 0xe9e9c92087871592);
AS_UL2(&sharedMemory[0x1EC]) = make_ulonglong2(0x5555aaffcece8749, 0xdfdfa57a28285078);
AS_UL2(&sharedMemory[0x1F0]) = make_ulonglong2(0xa1a159f88c8c038f, 0x0d0d1a1789890980);
AS_UL2(&sharedMemory[0x1F4]) = make_ulonglong2(0xe6e6d731bfbf65da, 0x6868d0b8424284c6);
AS_UL2(&sharedMemory[0x1F8]) = make_ulonglong2(0x999929b0414182c3, 0x0f0f1e112d2d5a77);
AS_UL2(&sharedMemory[0x1FC]) = make_ulonglong2(0x5454a8fcb0b07bcb, 0x16162c3abbbb6dd6);
break;
}
// AES 2
switch (threadIdx.x) {
case 0:
AS_UL2(&sharedMemory[0x200]) = make_ulonglong2(0x7cf8847c63c6a563, 0x7bf68d7b77ee9977);
AS_UL2(&sharedMemory[0x204]) = make_ulonglong2(0x6bd6bd6bf2ff0df2, 0xc59154c56fdeb16f);
AS_UL2(&sharedMemory[0x208]) = make_ulonglong2(0x0102030130605030, 0x2b567d2b67cea967);
AS_UL2(&sharedMemory[0x20C]) = make_ulonglong2(0xd7b562d7fee719fe, 0x76ec9a76ab4de6ab);
AS_UL2(&sharedMemory[0x210]) = make_ulonglong2(0x821f9d82ca8f45ca, 0x7dfa877dc98940c9);
AS_UL2(&sharedMemory[0x214]) = make_ulonglong2(0x59b2eb59faef15fa, 0xf0fb0bf0478ec947);
AS_UL2(&sharedMemory[0x218]) = make_ulonglong2(0xd4b367d4ad41ecad, 0xaf45eaafa25ffda2);
AS_UL2(&sharedMemory[0x21C]) = make_ulonglong2(0xa453f7a49c23bf9c, 0xc09b5bc072e49672);
break;
case 1:
AS_UL2(&sharedMemory[0x220]) = make_ulonglong2(0xfde11cfdb775c2b7, 0x264c6a26933dae93);
AS_UL2(&sharedMemory[0x224]) = make_ulonglong2(0x3f7e413f366c5a36, 0xcc834fccf7f502f7);
AS_UL2(&sharedMemory[0x228]) = make_ulonglong2(0xa551f4a534685c34, 0xf1f908f1e5d134e5);
AS_UL2(&sharedMemory[0x22C]) = make_ulonglong2(0xd8ab73d871e29371, 0x152a3f1531625331);
AS_UL2(&sharedMemory[0x230]) = make_ulonglong2(0xc79552c704080c04, 0xc39d5ec323466523);
AS_UL2(&sharedMemory[0x234]) = make_ulonglong2(0x9637a19618302818, 0x9a2fb59a050a0f05);
AS_UL2(&sharedMemory[0x238]) = make_ulonglong2(0x12243612070e0907, 0xe2df3de2801b9b80);
AS_UL2(&sharedMemory[0x23C]) = make_ulonglong2(0x274e6927ebcd26eb, 0x75ea9f75b27fcdb2);
break;
case 2:
AS_UL2(&sharedMemory[0x240]) = make_ulonglong2(0x831d9e8309121b09, 0x1a342e1a2c58742c);
AS_UL2(&sharedMemory[0x244]) = make_ulonglong2(0x6edcb26e1b362d1b, 0xa05bfba05ab4ee5a);
AS_UL2(&sharedMemory[0x248]) = make_ulonglong2(0x3b764d3b52a4f652, 0xb37dceb3d6b761d6);
AS_UL2(&sharedMemory[0x24C]) = make_ulonglong2(0xe3dd3ee329527b29, 0x841397842f5e712f);
AS_UL2(&sharedMemory[0x250]) = make_ulonglong2(0xd1b968d153a6f553, 0xedc12ced00000000);
AS_UL2(&sharedMemory[0x254]) = make_ulonglong2(0xfce31ffc20406020, 0x5bb6ed5bb179c8b1);
AS_UL2(&sharedMemory[0x258]) = make_ulonglong2(0xcb8d46cb6ad4be6a, 0x39724b39be67d9be);
AS_UL2(&sharedMemory[0x25C]) = make_ulonglong2(0x4c98d44c4a94de4a, 0xcf854acf58b0e858);
break;
case 3:
AS_UL2(&sharedMemory[0x260]) = make_ulonglong2(0xefc52aefd0bb6bd0, 0xfbed16fbaa4fe5aa);
AS_UL2(&sharedMemory[0x264]) = make_ulonglong2(0x4d9ad74d4386c543, 0x8511948533665533);
AS_UL2(&sharedMemory[0x268]) = make_ulonglong2(0xf9e910f9458acf45, 0x7ffe817f02040602);
AS_UL2(&sharedMemory[0x26C]) = make_ulonglong2(0x3c78443c50a0f050, 0xa84be3a89f25ba9f);
AS_UL2(&sharedMemory[0x270]) = make_ulonglong2(0xa35dfea351a2f351, 0x8f058a8f4080c040);
AS_UL2(&sharedMemory[0x274]) = make_ulonglong2(0x9d21bc9d923fad92, 0xf5f104f538704838);
AS_UL2(&sharedMemory[0x278]) = make_ulonglong2(0xb677c1b6bc63dfbc, 0x21426321daaf75da);
AS_UL2(&sharedMemory[0x27C]) = make_ulonglong2(0xffe51aff10203010, 0xd2bf6dd2f3fd0ef3);
break;
case 4:
AS_UL2(&sharedMemory[0x280]) = make_ulonglong2(0x0c18140ccd814ccd, 0xecc32fec13263513);
AS_UL2(&sharedMemory[0x284]) = make_ulonglong2(0x9735a2975fbee15f, 0x172e39174488cc44);
AS_UL2(&sharedMemory[0x288]) = make_ulonglong2(0xa755f2a7c49357c4, 0x3d7a473d7efc827e);
AS_UL2(&sharedMemory[0x28C]) = make_ulonglong2(0x5dbae75d64c8ac64, 0x73e6957319322b19);
AS_UL2(&sharedMemory[0x290]) = make_ulonglong2(0x8119988160c0a060, 0xdca37fdc4f9ed14f);
AS_UL2(&sharedMemory[0x294]) = make_ulonglong2(0x2a547e2a22446622, 0x880b8388903bab90);
AS_UL2(&sharedMemory[0x298]) = make_ulonglong2(0xeec729ee468cca46, 0x14283c14b86bd3b8);
AS_UL2(&sharedMemory[0x29C]) = make_ulonglong2(0x5ebce25edea779de, 0xdbad76db0b161d0b);
break;
case 5:
AS_UL2(&sharedMemory[0x2A0]) = make_ulonglong2(0x32645632e0db3be0, 0x0a141e0a3a744e3a);
AS_UL2(&sharedMemory[0x2A4]) = make_ulonglong2(0x060c0a064992db49, 0x5cb8e45c24486c24);
AS_UL2(&sharedMemory[0x2A8]) = make_ulonglong2(0xd3bd6ed3c29f5dc2, 0x62c4a662ac43efac);
AS_UL2(&sharedMemory[0x2AC]) = make_ulonglong2(0x9531a4959139a891, 0x79f28b79e4d337e4);
AS_UL2(&sharedMemory[0x2B0]) = make_ulonglong2(0xc88b43c8e7d532e7, 0x6ddab76d376e5937);
AS_UL2(&sharedMemory[0x2B4]) = make_ulonglong2(0xd5b164d58d018c8d, 0xa949e0a94e9cd24e);
AS_UL2(&sharedMemory[0x2B8]) = make_ulonglong2(0x56acfa566cd8b46c, 0xeacf25eaf4f307f4);
AS_UL2(&sharedMemory[0x2BC]) = make_ulonglong2(0x7af48e7a65caaf65, 0x08101808ae47e9ae);
break;
case 6:
AS_UL2(&sharedMemory[0x2C0]) = make_ulonglong2(0x78f08878ba6fd5ba, 0x2e5c722e254a6f25);
AS_UL2(&sharedMemory[0x2C4]) = make_ulonglong2(0xa657f1a61c38241c, 0xc69751c6b473c7b4);
AS_UL2(&sharedMemory[0x2C8]) = make_ulonglong2(0xdda17cdde8cb23e8, 0x1f3e211f74e89c74);
AS_UL2(&sharedMemory[0x2CC]) = make_ulonglong2(0xbd61dcbd4b96dd4b, 0x8a0f858a8b0d868b);
AS_UL2(&sharedMemory[0x2D0]) = make_ulonglong2(0x3e7c423e70e09070, 0x66ccaa66b571c4b5);
AS_UL2(&sharedMemory[0x2D4]) = make_ulonglong2(0x030605034890d848, 0x0e1c120ef6f701f6);
AS_UL2(&sharedMemory[0x2D8]) = make_ulonglong2(0x356a5f3561c2a361, 0xb969d0b957aef957);
AS_UL2(&sharedMemory[0x2DC]) = make_ulonglong2(0xc19958c186179186, 0x9e27b99e1d3a271d);
break;
case 7:
AS_UL2(&sharedMemory[0x2E0]) = make_ulonglong2(0xf8eb13f8e1d938e1, 0x11223311982bb398);
AS_UL2(&sharedMemory[0x2E4]) = make_ulonglong2(0xd9a970d969d2bb69, 0x9433a7948e07898e);
AS_UL2(&sharedMemory[0x2E8]) = make_ulonglong2(0x1e3c221e9b2db69b, 0xe9c920e987159287);
AS_UL2(&sharedMemory[0x2EC]) = make_ulonglong2(0x55aaff55ce8749ce, 0xdfa57adf28507828);
AS_UL2(&sharedMemory[0x2F0]) = make_ulonglong2(0xa159f8a18c038f8c, 0x0d1a170d89098089);
AS_UL2(&sharedMemory[0x2F4]) = make_ulonglong2(0xe6d731e6bf65dabf, 0x68d0b8684284c642);
AS_UL2(&sharedMemory[0x2F8]) = make_ulonglong2(0x9929b0994182c341, 0x0f1e110f2d5a772d);
AS_UL2(&sharedMemory[0x2FC]) = make_ulonglong2(0x54a8fc54b07bcbb0, 0x162c3a16bb6dd6bb);
break;
}
// AES 3
switch (threadIdx.x) {
case 0:
AS_UL2(&sharedMemory[0x300]) = make_ulonglong2(0xf8847c7cc6a56363, 0xf68d7b7bee997777);
AS_UL2(&sharedMemory[0x304]) = make_ulonglong2(0xd6bd6b6bff0df2f2, 0x9154c5c5deb16f6f);
AS_UL2(&sharedMemory[0x308]) = make_ulonglong2(0x0203010160503030, 0x567d2b2bcea96767);
AS_UL2(&sharedMemory[0x30C]) = make_ulonglong2(0xb562d7d7e719fefe, 0xec9a76764de6abab);
AS_UL2(&sharedMemory[0x310]) = make_ulonglong2(0x1f9d82828f45caca, 0xfa877d7d8940c9c9);
AS_UL2(&sharedMemory[0x314]) = make_ulonglong2(0xb2eb5959ef15fafa, 0xfb0bf0f08ec94747);
AS_UL2(&sharedMemory[0x318]) = make_ulonglong2(0xb367d4d441ecadad, 0x45eaafaf5ffda2a2);
AS_UL2(&sharedMemory[0x31C]) = make_ulonglong2(0x53f7a4a423bf9c9c, 0x9b5bc0c0e4967272);
break;
case 1:
AS_UL2(&sharedMemory[0x320]) = make_ulonglong2(0xe11cfdfd75c2b7b7, 0x4c6a26263dae9393);
AS_UL2(&sharedMemory[0x324]) = make_ulonglong2(0x7e413f3f6c5a3636, 0x834fccccf502f7f7);
AS_UL2(&sharedMemory[0x328]) = make_ulonglong2(0x51f4a5a5685c3434, 0xf908f1f1d134e5e5);
AS_UL2(&sharedMemory[0x32C]) = make_ulonglong2(0xab73d8d8e2937171, 0x2a3f151562533131);
AS_UL2(&sharedMemory[0x330]) = make_ulonglong2(0x9552c7c7080c0404, 0x9d5ec3c346652323);
AS_UL2(&sharedMemory[0x334]) = make_ulonglong2(0x37a1969630281818, 0x2fb59a9a0a0f0505);
AS_UL2(&sharedMemory[0x338]) = make_ulonglong2(0x243612120e090707, 0xdf3de2e21b9b8080);
AS_UL2(&sharedMemory[0x33C]) = make_ulonglong2(0x4e692727cd26ebeb, 0xea9f75757fcdb2b2);
break;
case 2:
AS_UL2(&sharedMemory[0x340]) = make_ulonglong2(0x1d9e8383121b0909, 0x342e1a1a58742c2c);
AS_UL2(&sharedMemory[0x344]) = make_ulonglong2(0xdcb26e6e362d1b1b, 0x5bfba0a0b4ee5a5a);
AS_UL2(&sharedMemory[0x348]) = make_ulonglong2(0x764d3b3ba4f65252, 0x7dceb3b3b761d6d6);
AS_UL2(&sharedMemory[0x34C]) = make_ulonglong2(0xdd3ee3e3527b2929, 0x139784845e712f2f);
AS_UL2(&sharedMemory[0x350]) = make_ulonglong2(0xb968d1d1a6f55353, 0xc12ceded00000000);
AS_UL2(&sharedMemory[0x354]) = make_ulonglong2(0xe31ffcfc40602020, 0xb6ed5b5b79c8b1b1);
AS_UL2(&sharedMemory[0x358]) = make_ulonglong2(0x8d46cbcbd4be6a6a, 0x724b393967d9bebe);
AS_UL2(&sharedMemory[0x35C]) = make_ulonglong2(0x98d44c4c94de4a4a, 0x854acfcfb0e85858);
break;
case 3:
AS_UL2(&sharedMemory[0x360]) = make_ulonglong2(0xc52aefefbb6bd0d0, 0xed16fbfb4fe5aaaa);
AS_UL2(&sharedMemory[0x364]) = make_ulonglong2(0x9ad74d4d86c54343, 0x1194858566553333);
AS_UL2(&sharedMemory[0x368]) = make_ulonglong2(0xe910f9f98acf4545, 0xfe817f7f04060202);
AS_UL2(&sharedMemory[0x36C]) = make_ulonglong2(0x78443c3ca0f05050, 0x4be3a8a825ba9f9f);
AS_UL2(&sharedMemory[0x370]) = make_ulonglong2(0x5dfea3a3a2f35151, 0x058a8f8f80c04040);
AS_UL2(&sharedMemory[0x374]) = make_ulonglong2(0x21bc9d9d3fad9292, 0xf104f5f570483838);
AS_UL2(&sharedMemory[0x378]) = make_ulonglong2(0x77c1b6b663dfbcbc, 0x42632121af75dada);
AS_UL2(&sharedMemory[0x37C]) = make_ulonglong2(0xe51affff20301010, 0xbf6dd2d2fd0ef3f3);
break;
case 4:
AS_UL2(&sharedMemory[0x380]) = make_ulonglong2(0x18140c0c814ccdcd, 0xc32fecec26351313);
AS_UL2(&sharedMemory[0x384]) = make_ulonglong2(0x35a29797bee15f5f, 0x2e39171788cc4444);
AS_UL2(&sharedMemory[0x388]) = make_ulonglong2(0x55f2a7a79357c4c4, 0x7a473d3dfc827e7e);
AS_UL2(&sharedMemory[0x38C]) = make_ulonglong2(0xbae75d5dc8ac6464, 0xe6957373322b1919);
AS_UL2(&sharedMemory[0x390]) = make_ulonglong2(0x19988181c0a06060, 0xa37fdcdc9ed14f4f);
AS_UL2(&sharedMemory[0x394]) = make_ulonglong2(0x547e2a2a44662222, 0x0b8388883bab9090);
AS_UL2(&sharedMemory[0x398]) = make_ulonglong2(0xc729eeee8cca4646, 0x283c14146bd3b8b8);
AS_UL2(&sharedMemory[0x39C]) = make_ulonglong2(0xbce25e5ea779dede, 0xad76dbdb161d0b0b);
break;
case 5:
AS_UL2(&sharedMemory[0x3A0]) = make_ulonglong2(0x64563232db3be0e0, 0x141e0a0a744e3a3a);
AS_UL2(&sharedMemory[0x3A4]) = make_ulonglong2(0x0c0a060692db4949, 0xb8e45c5c486c2424);
AS_UL2(&sharedMemory[0x3A8]) = make_ulonglong2(0xbd6ed3d39f5dc2c2, 0xc4a6626243efacac);
AS_UL2(&sharedMemory[0x3AC]) = make_ulonglong2(0x31a4959539a89191, 0xf28b7979d337e4e4);
AS_UL2(&sharedMemory[0x3B0]) = make_ulonglong2(0x8b43c8c8d532e7e7, 0xdab76d6d6e593737);
AS_UL2(&sharedMemory[0x3B4]) = make_ulonglong2(0xb164d5d5018c8d8d, 0x49e0a9a99cd24e4e);
AS_UL2(&sharedMemory[0x3B8]) = make_ulonglong2(0xacfa5656d8b46c6c, 0xcf25eaeaf307f4f4);
AS_UL2(&sharedMemory[0x3BC]) = make_ulonglong2(0xf48e7a7acaaf6565, 0x1018080847e9aeae);
break;
case 6:
AS_UL2(&sharedMemory[0x3C0]) = make_ulonglong2(0xf08878786fd5baba, 0x5c722e2e4a6f2525);
AS_UL2(&sharedMemory[0x3C4]) = make_ulonglong2(0x57f1a6a638241c1c, 0x9751c6c673c7b4b4);
AS_UL2(&sharedMemory[0x3C8]) = make_ulonglong2(0xa17cddddcb23e8e8, 0x3e211f1fe89c7474);
AS_UL2(&sharedMemory[0x3CC]) = make_ulonglong2(0x61dcbdbd96dd4b4b, 0x0f858a8a0d868b8b);
AS_UL2(&sharedMemory[0x3D0]) = make_ulonglong2(0x7c423e3ee0907070, 0xccaa666671c4b5b5);
AS_UL2(&sharedMemory[0x3D4]) = make_ulonglong2(0x0605030390d84848, 0x1c120e0ef701f6f6);
AS_UL2(&sharedMemory[0x3D8]) = make_ulonglong2(0x6a5f3535c2a36161, 0x69d0b9b9aef95757);
AS_UL2(&sharedMemory[0x3DC]) = make_ulonglong2(0x9958c1c117918686, 0x27b99e9e3a271d1d);
break;
case 7:
AS_UL2(&sharedMemory[0x3E0]) = make_ulonglong2(0xeb13f8f8d938e1e1, 0x223311112bb39898);
AS_UL2(&sharedMemory[0x3E4]) = make_ulonglong2(0xa970d9d9d2bb6969, 0x33a7949407898e8e);
AS_UL2(&sharedMemory[0x3E8]) = make_ulonglong2(0x3c221e1e2db69b9b, 0xc920e9e915928787);
AS_UL2(&sharedMemory[0x3EC]) = make_ulonglong2(0xaaff55558749cece, 0xa57adfdf50782828);
AS_UL2(&sharedMemory[0x3F0]) = make_ulonglong2(0x59f8a1a1038f8c8c, 0x1a170d0d09808989);
AS_UL2(&sharedMemory[0x3F4]) = make_ulonglong2(0xd731e6e665dabfbf, 0xd0b8686884c64242);
AS_UL2(&sharedMemory[0x3F8]) = make_ulonglong2(0x29b0999982c34141, 0x1e110f0f5a772d2d);
AS_UL2(&sharedMemory[0x3FC]) = make_ulonglong2(0xa8fc54547bcbb0b0, 0x2c3a16166dd6bbbb);
break;
}
}
__device__ __forceinline__
void cn_aes_gpu_init_u4(uint32_t* sharedMemory)
{
// AES 0
switch (threadIdx.x) {
case 0:
AS_UINT4(&sharedMemory[0x000]) = make_uint4(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
AS_UINT4(&sharedMemory[0x004]) = make_uint4(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
AS_UINT4(&sharedMemory[0x008]) = make_uint4(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
AS_UINT4(&sharedMemory[0x00C]) = make_uint4(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
AS_UINT4(&sharedMemory[0x010]) = make_uint4(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
AS_UINT4(&sharedMemory[0x014]) = make_uint4(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
AS_UINT4(&sharedMemory[0x018]) = make_uint4(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
AS_UINT4(&sharedMemory[0x01C]) = make_uint4(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
break;
case 1:
AS_UINT4(&sharedMemory[0x020]) = make_uint4(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
AS_UINT4(&sharedMemory[0x024]) = make_uint4(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
AS_UINT4(&sharedMemory[0x028]) = make_uint4(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
AS_UINT4(&sharedMemory[0x02C]) = make_uint4(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
AS_UINT4(&sharedMemory[0x030]) = make_uint4(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
AS_UINT4(&sharedMemory[0x034]) = make_uint4(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
AS_UINT4(&sharedMemory[0x038]) = make_uint4(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
AS_UINT4(&sharedMemory[0x03C]) = make_uint4(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
break;
case 2:
AS_UINT4(&sharedMemory[0x040]) = make_uint4(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
AS_UINT4(&sharedMemory[0x044]) = make_uint4(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
AS_UINT4(&sharedMemory[0x048]) = make_uint4(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
AS_UINT4(&sharedMemory[0x04C]) = make_uint4(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
AS_UINT4(&sharedMemory[0x050]) = make_uint4(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
AS_UINT4(&sharedMemory[0x054]) = make_uint4(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
AS_UINT4(&sharedMemory[0x058]) = make_uint4(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
AS_UINT4(&sharedMemory[0x05C]) = make_uint4(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
break;
case 3:
AS_UINT4(&sharedMemory[0x060]) = make_uint4(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
AS_UINT4(&sharedMemory[0x064]) = make_uint4(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
AS_UINT4(&sharedMemory[0x068]) = make_uint4(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
AS_UINT4(&sharedMemory[0x06C]) = make_uint4(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
AS_UINT4(&sharedMemory[0x070]) = make_uint4(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
AS_UINT4(&sharedMemory[0x074]) = make_uint4(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
AS_UINT4(&sharedMemory[0x078]) = make_uint4(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
AS_UINT4(&sharedMemory[0x07C]) = make_uint4(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
break;
case 4:
AS_UINT4(&sharedMemory[0x080]) = make_uint4(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
AS_UINT4(&sharedMemory[0x084]) = make_uint4(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
AS_UINT4(&sharedMemory[0x088]) = make_uint4(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
AS_UINT4(&sharedMemory[0x08C]) = make_uint4(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
AS_UINT4(&sharedMemory[0x090]) = make_uint4(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
AS_UINT4(&sharedMemory[0x094]) = make_uint4(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
AS_UINT4(&sharedMemory[0x098]) = make_uint4(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
AS_UINT4(&sharedMemory[0x09C]) = make_uint4(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
break;
case 5:
AS_UINT4(&sharedMemory[0x0A0]) = make_uint4(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
AS_UINT4(&sharedMemory[0x0A4]) = make_uint4(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
AS_UINT4(&sharedMemory[0x0A8]) = make_uint4(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
AS_UINT4(&sharedMemory[0x0AC]) = make_uint4(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
AS_UINT4(&sharedMemory[0x0B0]) = make_uint4(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
AS_UINT4(&sharedMemory[0x0B4]) = make_uint4(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
AS_UINT4(&sharedMemory[0x0B8]) = make_uint4(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
AS_UINT4(&sharedMemory[0x0BC]) = make_uint4(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
break;
case 6:
AS_UINT4(&sharedMemory[0x0C0]) = make_uint4(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
AS_UINT4(&sharedMemory[0x0C4]) = make_uint4(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
AS_UINT4(&sharedMemory[0x0C8]) = make_uint4(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
AS_UINT4(&sharedMemory[0x0CC]) = make_uint4(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
AS_UINT4(&sharedMemory[0x0D0]) = make_uint4(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
AS_UINT4(&sharedMemory[0x0D4]) = make_uint4(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
AS_UINT4(&sharedMemory[0x0D8]) = make_uint4(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
AS_UINT4(&sharedMemory[0x0DC]) = make_uint4(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
break;
case 7:
AS_UINT4(&sharedMemory[0x0E0]) = make_uint4(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
AS_UINT4(&sharedMemory[0x0E4]) = make_uint4(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
AS_UINT4(&sharedMemory[0x0E8]) = make_uint4(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
AS_UINT4(&sharedMemory[0x0EC]) = make_uint4(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
AS_UINT4(&sharedMemory[0x0F0]) = make_uint4(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
AS_UINT4(&sharedMemory[0x0F4]) = make_uint4(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
AS_UINT4(&sharedMemory[0x0F8]) = make_uint4(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
AS_UINT4(&sharedMemory[0x0FC]) = make_uint4(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
break;
}
// AES 1
switch (threadIdx.x) {
case 0:
AS_UINT4(&sharedMemory[0x100]) = make_uint4(0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d);
AS_UINT4(&sharedMemory[0x104]) = make_uint4(0xf2f2ff0d, 0x6b6bd6bd, 0x6f6fdeb1, 0xc5c59154);
AS_UINT4(&sharedMemory[0x108]) = make_uint4(0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d);
AS_UINT4(&sharedMemory[0x10C]) = make_uint4(0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a);
AS_UINT4(&sharedMemory[0x110]) = make_uint4(0xcaca8f45, 0x82821f9d, 0xc9c98940, 0x7d7dfa87);
AS_UINT4(&sharedMemory[0x114]) = make_uint4(0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b);
AS_UINT4(&sharedMemory[0x118]) = make_uint4(0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea);
AS_UINT4(&sharedMemory[0x11C]) = make_uint4(0x9c9c23bf, 0xa4a453f7, 0x7272e496, 0xc0c09b5b);
break;
case 1:
AS_UINT4(&sharedMemory[0x120]) = make_uint4(0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a);
AS_UINT4(&sharedMemory[0x124]) = make_uint4(0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f);
AS_UINT4(&sharedMemory[0x128]) = make_uint4(0x3434685c, 0xa5a551f4, 0xe5e5d134, 0xf1f1f908);
AS_UINT4(&sharedMemory[0x12C]) = make_uint4(0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f);
AS_UINT4(&sharedMemory[0x130]) = make_uint4(0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e);
AS_UINT4(&sharedMemory[0x134]) = make_uint4(0x18183028, 0x969637a1, 0x05050a0f, 0x9a9a2fb5);
AS_UINT4(&sharedMemory[0x138]) = make_uint4(0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d);
AS_UINT4(&sharedMemory[0x13C]) = make_uint4(0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f);
break;
case 2:
AS_UINT4(&sharedMemory[0x140]) = make_uint4(0x0909121b, 0x83831d9e, 0x2c2c5874, 0x1a1a342e);
AS_UINT4(&sharedMemory[0x144]) = make_uint4(0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb);
AS_UINT4(&sharedMemory[0x148]) = make_uint4(0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce);
AS_UINT4(&sharedMemory[0x14C]) = make_uint4(0x2929527b, 0xe3e3dd3e, 0x2f2f5e71, 0x84841397);
AS_UINT4(&sharedMemory[0x150]) = make_uint4(0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c);
AS_UINT4(&sharedMemory[0x154]) = make_uint4(0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed);
AS_UINT4(&sharedMemory[0x158]) = make_uint4(0x6a6ad4be, 0xcbcb8d46, 0xbebe67d9, 0x3939724b);
AS_UINT4(&sharedMemory[0x15C]) = make_uint4(0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a);
break;
case 3:
AS_UINT4(&sharedMemory[0x160]) = make_uint4(0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16);
AS_UINT4(&sharedMemory[0x164]) = make_uint4(0x434386c5, 0x4d4d9ad7, 0x33336655, 0x85851194);
AS_UINT4(&sharedMemory[0x168]) = make_uint4(0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81);
AS_UINT4(&sharedMemory[0x16C]) = make_uint4(0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3);
AS_UINT4(&sharedMemory[0x170]) = make_uint4(0x5151a2f3, 0xa3a35dfe, 0x404080c0, 0x8f8f058a);
AS_UINT4(&sharedMemory[0x174]) = make_uint4(0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104);
AS_UINT4(&sharedMemory[0x178]) = make_uint4(0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263);
AS_UINT4(&sharedMemory[0x17C]) = make_uint4(0x10102030, 0xffffe51a, 0xf3f3fd0e, 0xd2d2bf6d);
break;
case 4:
AS_UINT4(&sharedMemory[0x180]) = make_uint4(0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f);
AS_UINT4(&sharedMemory[0x184]) = make_uint4(0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39);
AS_UINT4(&sharedMemory[0x188]) = make_uint4(0xc4c49357, 0xa7a755f2, 0x7e7efc82, 0x3d3d7a47);
AS_UINT4(&sharedMemory[0x18C]) = make_uint4(0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695);
AS_UINT4(&sharedMemory[0x190]) = make_uint4(0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f);
AS_UINT4(&sharedMemory[0x194]) = make_uint4(0x22224466, 0x2a2a547e, 0x90903bab, 0x88880b83);
AS_UINT4(&sharedMemory[0x198]) = make_uint4(0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c);
AS_UINT4(&sharedMemory[0x19C]) = make_uint4(0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76);
break;
case 5:
AS_UINT4(&sharedMemory[0x1A0]) = make_uint4(0xe0e0db3b, 0x32326456, 0x3a3a744e, 0x0a0a141e);
AS_UINT4(&sharedMemory[0x1A4]) = make_uint4(0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4);
AS_UINT4(&sharedMemory[0x1A8]) = make_uint4(0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6);
AS_UINT4(&sharedMemory[0x1AC]) = make_uint4(0x919139a8, 0x959531a4, 0xe4e4d337, 0x7979f28b);
AS_UINT4(&sharedMemory[0x1B0]) = make_uint4(0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7);
AS_UINT4(&sharedMemory[0x1B4]) = make_uint4(0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0);
AS_UINT4(&sharedMemory[0x1B8]) = make_uint4(0x6c6cd8b4, 0x5656acfa, 0xf4f4f307, 0xeaeacf25);
AS_UINT4(&sharedMemory[0x1BC]) = make_uint4(0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018);
break;
case 6:
AS_UINT4(&sharedMemory[0x1C0]) = make_uint4(0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72);
AS_UINT4(&sharedMemory[0x1C4]) = make_uint4(0x1c1c3824, 0xa6a657f1, 0xb4b473c7, 0xc6c69751);
AS_UINT4(&sharedMemory[0x1C8]) = make_uint4(0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21);
AS_UINT4(&sharedMemory[0x1CC]) = make_uint4(0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85);
AS_UINT4(&sharedMemory[0x1D0]) = make_uint4(0x7070e090, 0x3e3e7c42, 0xb5b571c4, 0x6666ccaa);
AS_UINT4(&sharedMemory[0x1D4]) = make_uint4(0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12);
AS_UINT4(&sharedMemory[0x1D8]) = make_uint4(0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0);
AS_UINT4(&sharedMemory[0x1DC]) = make_uint4(0x86861791, 0xc1c19958, 0x1d1d3a27, 0x9e9e27b9);
break;
case 7:
AS_UINT4(&sharedMemory[0x1E0]) = make_uint4(0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233);
AS_UINT4(&sharedMemory[0x1E4]) = make_uint4(0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7);
AS_UINT4(&sharedMemory[0x1E8]) = make_uint4(0x9b9b2db6, 0x1e1e3c22, 0x87871592, 0xe9e9c920);
AS_UINT4(&sharedMemory[0x1EC]) = make_uint4(0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a);
AS_UINT4(&sharedMemory[0x1F0]) = make_uint4(0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17);
AS_UINT4(&sharedMemory[0x1F4]) = make_uint4(0xbfbf65da, 0xe6e6d731, 0x424284c6, 0x6868d0b8);
AS_UINT4(&sharedMemory[0x1F8]) = make_uint4(0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11);
AS_UINT4(&sharedMemory[0x1FC]) = make_uint4(0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a);
break;
}
// AES 2
switch (threadIdx.x) {
case 0:
AS_UINT4(&sharedMemory[0x200]) = make_uint4(0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b);
AS_UINT4(&sharedMemory[0x204]) = make_uint4(0xf2ff0df2, 0x6bd6bd6b, 0x6fdeb16f, 0xc59154c5);
AS_UINT4(&sharedMemory[0x208]) = make_uint4(0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b);
AS_UINT4(&sharedMemory[0x20C]) = make_uint4(0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76);
AS_UINT4(&sharedMemory[0x210]) = make_uint4(0xca8f45ca, 0x821f9d82, 0xc98940c9, 0x7dfa877d);
AS_UINT4(&sharedMemory[0x214]) = make_uint4(0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0);
AS_UINT4(&sharedMemory[0x218]) = make_uint4(0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf);
AS_UINT4(&sharedMemory[0x21C]) = make_uint4(0x9c23bf9c, 0xa453f7a4, 0x72e49672, 0xc09b5bc0);
break;
case 1:
AS_UINT4(&sharedMemory[0x220]) = make_uint4(0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26);
AS_UINT4(&sharedMemory[0x224]) = make_uint4(0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc);
AS_UINT4(&sharedMemory[0x228]) = make_uint4(0x34685c34, 0xa551f4a5, 0xe5d134e5, 0xf1f908f1);
AS_UINT4(&sharedMemory[0x22C]) = make_uint4(0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15);
AS_UINT4(&sharedMemory[0x230]) = make_uint4(0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3);
AS_UINT4(&sharedMemory[0x234]) = make_uint4(0x18302818, 0x9637a196, 0x050a0f05, 0x9a2fb59a);
AS_UINT4(&sharedMemory[0x238]) = make_uint4(0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2);
AS_UINT4(&sharedMemory[0x23C]) = make_uint4(0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75);
break;
case 2:
AS_UINT4(&sharedMemory[0x240]) = make_uint4(0x09121b09, 0x831d9e83, 0x2c58742c, 0x1a342e1a);
AS_UINT4(&sharedMemory[0x244]) = make_uint4(0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0);
AS_UINT4(&sharedMemory[0x248]) = make_uint4(0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3);
AS_UINT4(&sharedMemory[0x24C]) = make_uint4(0x29527b29, 0xe3dd3ee3, 0x2f5e712f, 0x84139784);
AS_UINT4(&sharedMemory[0x250]) = make_uint4(0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced);
AS_UINT4(&sharedMemory[0x254]) = make_uint4(0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b);
AS_UINT4(&sharedMemory[0x258]) = make_uint4(0x6ad4be6a, 0xcb8d46cb, 0xbe67d9be, 0x39724b39);
AS_UINT4(&sharedMemory[0x25C]) = make_uint4(0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf);
break;
case 3:
AS_UINT4(&sharedMemory[0x260]) = make_uint4(0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb);
AS_UINT4(&sharedMemory[0x264]) = make_uint4(0x4386c543, 0x4d9ad74d, 0x33665533, 0x85119485);
AS_UINT4(&sharedMemory[0x268]) = make_uint4(0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f);
AS_UINT4(&sharedMemory[0x26C]) = make_uint4(0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8);
AS_UINT4(&sharedMemory[0x270]) = make_uint4(0x51a2f351, 0xa35dfea3, 0x4080c040, 0x8f058a8f);
AS_UINT4(&sharedMemory[0x274]) = make_uint4(0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5);
AS_UINT4(&sharedMemory[0x278]) = make_uint4(0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321);
AS_UINT4(&sharedMemory[0x27C]) = make_uint4(0x10203010, 0xffe51aff, 0xf3fd0ef3, 0xd2bf6dd2);
break;
case 4:
AS_UINT4(&sharedMemory[0x280]) = make_uint4(0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec);
AS_UINT4(&sharedMemory[0x284]) = make_uint4(0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917);
AS_UINT4(&sharedMemory[0x288]) = make_uint4(0xc49357c4, 0xa755f2a7, 0x7efc827e, 0x3d7a473d);
AS_UINT4(&sharedMemory[0x28C]) = make_uint4(0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573);
AS_UINT4(&sharedMemory[0x290]) = make_uint4(0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc);
AS_UINT4(&sharedMemory[0x294]) = make_uint4(0x22446622, 0x2a547e2a, 0x903bab90, 0x880b8388);
AS_UINT4(&sharedMemory[0x298]) = make_uint4(0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14);
AS_UINT4(&sharedMemory[0x29C]) = make_uint4(0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db);
break;
case 5:
AS_UINT4(&sharedMemory[0x2A0]) = make_uint4(0xe0db3be0, 0x32645632, 0x3a744e3a, 0x0a141e0a);
AS_UINT4(&sharedMemory[0x2A4]) = make_uint4(0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c);
AS_UINT4(&sharedMemory[0x2A8]) = make_uint4(0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662);
AS_UINT4(&sharedMemory[0x2AC]) = make_uint4(0x9139a891, 0x9531a495, 0xe4d337e4, 0x79f28b79);
AS_UINT4(&sharedMemory[0x2B0]) = make_uint4(0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d);
AS_UINT4(&sharedMemory[0x2B4]) = make_uint4(0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9);
AS_UINT4(&sharedMemory[0x2B8]) = make_uint4(0x6cd8b46c, 0x56acfa56, 0xf4f307f4, 0xeacf25ea);
AS_UINT4(&sharedMemory[0x2BC]) = make_uint4(0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808);
break;
case 6:
AS_UINT4(&sharedMemory[0x2C0]) = make_uint4(0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e);
AS_UINT4(&sharedMemory[0x2C4]) = make_uint4(0x1c38241c, 0xa657f1a6, 0xb473c7b4, 0xc69751c6);
AS_UINT4(&sharedMemory[0x2C8]) = make_uint4(0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f);
AS_UINT4(&sharedMemory[0x2CC]) = make_uint4(0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a);
AS_UINT4(&sharedMemory[0x2D0]) = make_uint4(0x70e09070, 0x3e7c423e, 0xb571c4b5, 0x66ccaa66);
AS_UINT4(&sharedMemory[0x2D4]) = make_uint4(0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e);
AS_UINT4(&sharedMemory[0x2D8]) = make_uint4(0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9);
AS_UINT4(&sharedMemory[0x2DC]) = make_uint4(0x86179186, 0xc19958c1, 0x1d3a271d, 0x9e27b99e);
break;
case 7:
AS_UINT4(&sharedMemory[0x2E0]) = make_uint4(0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311);
AS_UINT4(&sharedMemory[0x2E4]) = make_uint4(0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794);
AS_UINT4(&sharedMemory[0x2E8]) = make_uint4(0x9b2db69b, 0x1e3c221e, 0x87159287, 0xe9c920e9);
AS_UINT4(&sharedMemory[0x2EC]) = make_uint4(0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf);
AS_UINT4(&sharedMemory[0x2F0]) = make_uint4(0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d);
AS_UINT4(&sharedMemory[0x2F4]) = make_uint4(0xbf65dabf, 0xe6d731e6, 0x4284c642, 0x68d0b868);
AS_UINT4(&sharedMemory[0x2F8]) = make_uint4(0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f);
AS_UINT4(&sharedMemory[0x2FC]) = make_uint4(0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16);
break;
}
// AES 3
switch (threadIdx.x) {
case 0:
AS_UINT4(&sharedMemory[0x300]) = make_uint4(0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b);
AS_UINT4(&sharedMemory[0x304]) = make_uint4(0xff0df2f2, 0xd6bd6b6b, 0xdeb16f6f, 0x9154c5c5);
AS_UINT4(&sharedMemory[0x308]) = make_uint4(0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b);
AS_UINT4(&sharedMemory[0x30C]) = make_uint4(0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676);
AS_UINT4(&sharedMemory[0x310]) = make_uint4(0x8f45caca, 0x1f9d8282, 0x8940c9c9, 0xfa877d7d);
AS_UINT4(&sharedMemory[0x314]) = make_uint4(0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0);
AS_UINT4(&sharedMemory[0x318]) = make_uint4(0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf);
AS_UINT4(&sharedMemory[0x31C]) = make_uint4(0x23bf9c9c, 0x53f7a4a4, 0xe4967272, 0x9b5bc0c0);
break;
case 1:
AS_UINT4(&sharedMemory[0x320]) = make_uint4(0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626);
AS_UINT4(&sharedMemory[0x324]) = make_uint4(0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc);
AS_UINT4(&sharedMemory[0x328]) = make_uint4(0x685c3434, 0x51f4a5a5, 0xd134e5e5, 0xf908f1f1);
AS_UINT4(&sharedMemory[0x32C]) = make_uint4(0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515);
AS_UINT4(&sharedMemory[0x330]) = make_uint4(0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3);
AS_UINT4(&sharedMemory[0x334]) = make_uint4(0x30281818, 0x37a19696, 0x0a0f0505, 0x2fb59a9a);
AS_UINT4(&sharedMemory[0x338]) = make_uint4(0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2);
AS_UINT4(&sharedMemory[0x33C]) = make_uint4(0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575);
break;
case 2:
AS_UINT4(&sharedMemory[0x340]) = make_uint4(0x121b0909, 0x1d9e8383, 0x58742c2c, 0x342e1a1a);
AS_UINT4(&sharedMemory[0x344]) = make_uint4(0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0);
AS_UINT4(&sharedMemory[0x348]) = make_uint4(0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3);
AS_UINT4(&sharedMemory[0x34C]) = make_uint4(0x527b2929, 0xdd3ee3e3, 0x5e712f2f, 0x13978484);
AS_UINT4(&sharedMemory[0x350]) = make_uint4(0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded);
AS_UINT4(&sharedMemory[0x354]) = make_uint4(0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b);
AS_UINT4(&sharedMemory[0x358]) = make_uint4(0xd4be6a6a, 0x8d46cbcb, 0x67d9bebe, 0x724b3939);
AS_UINT4(&sharedMemory[0x35C]) = make_uint4(0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf);
break;
case 3:
AS_UINT4(&sharedMemory[0x360]) = make_uint4(0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb);
AS_UINT4(&sharedMemory[0x364]) = make_uint4(0x86c54343, 0x9ad74d4d, 0x66553333, 0x11948585);
AS_UINT4(&sharedMemory[0x368]) = make_uint4(0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f);
AS_UINT4(&sharedMemory[0x36C]) = make_uint4(0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8);
AS_UINT4(&sharedMemory[0x370]) = make_uint4(0xa2f35151, 0x5dfea3a3, 0x80c04040, 0x058a8f8f);
AS_UINT4(&sharedMemory[0x374]) = make_uint4(0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5);
AS_UINT4(&sharedMemory[0x378]) = make_uint4(0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121);
AS_UINT4(&sharedMemory[0x37C]) = make_uint4(0x20301010, 0xe51affff, 0xfd0ef3f3, 0xbf6dd2d2);
break;
case 4:
AS_UINT4(&sharedMemory[0x380]) = make_uint4(0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec);
AS_UINT4(&sharedMemory[0x384]) = make_uint4(0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717);
AS_UINT4(&sharedMemory[0x388]) = make_uint4(0x9357c4c4, 0x55f2a7a7, 0xfc827e7e, 0x7a473d3d);
AS_UINT4(&sharedMemory[0x38C]) = make_uint4(0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373);
AS_UINT4(&sharedMemory[0x390]) = make_uint4(0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc);
AS_UINT4(&sharedMemory[0x394]) = make_uint4(0x44662222, 0x547e2a2a, 0x3bab9090, 0x0b838888);
AS_UINT4(&sharedMemory[0x398]) = make_uint4(0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414);
AS_UINT4(&sharedMemory[0x39C]) = make_uint4(0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb);
break;
case 5:
AS_UINT4(&sharedMemory[0x3A0]) = make_uint4(0xdb3be0e0, 0x64563232, 0x744e3a3a, 0x141e0a0a);
AS_UINT4(&sharedMemory[0x3A4]) = make_uint4(0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c);
AS_UINT4(&sharedMemory[0x3A8]) = make_uint4(0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262);
AS_UINT4(&sharedMemory[0x3AC]) = make_uint4(0x39a89191, 0x31a49595, 0xd337e4e4, 0xf28b7979);
AS_UINT4(&sharedMemory[0x3B0]) = make_uint4(0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d);
AS_UINT4(&sharedMemory[0x3B4]) = make_uint4(0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9);
AS_UINT4(&sharedMemory[0x3B8]) = make_uint4(0xd8b46c6c, 0xacfa5656, 0xf307f4f4, 0xcf25eaea);
AS_UINT4(&sharedMemory[0x3BC]) = make_uint4(0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808);
break;
case 6:
AS_UINT4(&sharedMemory[0x3C0]) = make_uint4(0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e);
AS_UINT4(&sharedMemory[0x3C4]) = make_uint4(0x38241c1c, 0x57f1a6a6, 0x73c7b4b4, 0x9751c6c6);
AS_UINT4(&sharedMemory[0x3C8]) = make_uint4(0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f);
AS_UINT4(&sharedMemory[0x3CC]) = make_uint4(0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a);
AS_UINT4(&sharedMemory[0x3D0]) = make_uint4(0xe0907070, 0x7c423e3e, 0x71c4b5b5, 0xccaa6666);
AS_UINT4(&sharedMemory[0x3D4]) = make_uint4(0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e);
AS_UINT4(&sharedMemory[0x3D8]) = make_uint4(0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9);
AS_UINT4(&sharedMemory[0x3DC]) = make_uint4(0x17918686, 0x9958c1c1, 0x3a271d1d, 0x27b99e9e);
break;
case 7:
AS_UINT4(&sharedMemory[0x3E0]) = make_uint4(0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111);
AS_UINT4(&sharedMemory[0x3E4]) = make_uint4(0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494);
AS_UINT4(&sharedMemory[0x3E8]) = make_uint4(0x2db69b9b, 0x3c221e1e, 0x15928787, 0xc920e9e9);
AS_UINT4(&sharedMemory[0x3EC]) = make_uint4(0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf);
AS_UINT4(&sharedMemory[0x3F0]) = make_uint4(0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d);
AS_UINT4(&sharedMemory[0x3F4]) = make_uint4(0x65dabfbf, 0xd731e6e6, 0x84c64242, 0xd0b86868);
AS_UINT4(&sharedMemory[0x3F8]) = make_uint4(0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f);
AS_UINT4(&sharedMemory[0x3FC]) = make_uint4(0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616);
break;
}
}
|
the_stack
|
namespace SCAMP {
/////////////////////////////////////////////////////////////////////////////////////
// THESE HEADERS DEFINE COMPUTE STRATEGIES USED TO COMPUTE VARIOUS
// PROFILE TYPES
///////////////////////////////////////////////////////////////////////////////////
#include "kernels_compute.h"
#include "kernels_smem.h"
// Computes the matrix profile given the sliding dot products for the first
// query and the precomputed data statisics
template <typename DATA_TYPE, typename VEC2_DATA_TYPE, typename VEC4_DATA_TYPE,
typename ACCUM_TYPE, typename PROFILE_OUTPUT_TYPE,
typename PROFILE_DATA_TYPE, typename DISTANCE_TYPE, bool COMPUTE_ROWS,
bool COMPUTE_COLS, SCAMPProfileType PROFILE_TYPE, int blocks_per_sm,
int tile_height, int BLOCKSZ>
__global__ void __launch_bounds__(BLOCKSZ, blocks_per_sm)
do_tile(SCAMPKernelInputArgs<double> args, PROFILE_OUTPUT_TYPE *profile_A,
PROFILE_OUTPUT_TYPE *profile_B) {
constexpr int tile_width = tile_height + BLOCKSZ * DIAGS_PER_THREAD;
SCAMPThreadInfo<ACCUM_TYPE> thread_info;
extern __shared__ char smem_raw[];
// Wrap the shared memory in a struct which contains handles shared memory
// accesses
SCAMPSmem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_TYPE> smem(
smem_raw, COMPUTE_ROWS, COMPUTE_COLS, tile_width, tile_height,
args.opt.num_extra_operands);
// Find the starting diagonal of the distance matrix
const unsigned int start_diag = args.exclusion_lower +
(threadIdx.x * DIAGS_PER_THREAD) +
blockIdx.x * (blockDim.x * DIAGS_PER_THREAD);
// This is the index of the meta-diagonal that this thread block will work on
const unsigned int meta_diagonal_idx = blockIdx.x;
// The first diagonals constitiure a trivial match between the same
// subsequence, we must exclude these from the calculation according to
// args.exclusion_lower
uint32_t tile_start_col =
meta_diagonal_idx * (BLOCKSZ * DIAGS_PER_THREAD) + args.exclusion_lower;
uint32_t tile_start_row = 0;
// Initialize the column and row position of the current thread
thread_info.global_col = tile_start_col + threadIdx.x * DIAGS_PER_THREAD;
thread_info.global_row = 0;
// num_diags is the number of diagonals in the distance matrix, less any
// diagonals at the end we are not computing
const unsigned int num_diags = args.n_x - args.exclusion_upper + 1;
// Load the first dot product values
if (thread_info.global_col < args.n_x) {
thread_info.cov1 = args.cov[thread_info.global_col];
}
if (thread_info.global_col + 1 < args.n_x) {
thread_info.cov2 = args.cov[thread_info.global_col + 1];
}
if (thread_info.global_col + 2 < args.n_x) {
thread_info.cov3 = args.cov[thread_info.global_col + 2];
}
if (thread_info.global_col + 3 < args.n_x) {
thread_info.cov4 = args.cov[thread_info.global_col + 3];
}
/////////////////////////////////////
// Main loop
/////////////////////////////////////
// Each threadblock finds all the distances on a 'metadiagonal'
// We use a tiled approach for each thread block
// The tiles are horizontal slices of the diagonal, think of a parallelogram
// cut from a diagonal slice of the distance matrix. Each thread starts on the
// first row and works its way down-right towards right side of the distance
// matrix
while (tile_start_col < args.n_x && tile_start_row < args.n_y) {
// Initialize the next tile's shared memory
init_smem<DATA_TYPE, PROFILE_DATA_TYPE, PROFILE_OUTPUT_TYPE, COMPUTE_ROWS,
COMPUTE_COLS, tile_width, tile_height, BLOCKSZ>(
args, smem, profile_A, profile_B, tile_start_col, tile_start_row);
thread_info.local_col = threadIdx.x * DIAGS_PER_THREAD;
thread_info.local_row = 0;
// Start of new tile, sync so we don't have data races with shared memory
// initializaton
__syncthreads();
// There are 2 pathways here, most of the time we take the fast path (top),
// the last tile in every thread-block will take the slower path (bottom)
if (tile_start_col + tile_width < args.n_x &&
tile_start_row + tile_height < args.n_y &&
start_diag + DIAGS_PER_THREAD <= num_diags) {
// Fast Path
while (thread_info.local_row < tile_height) {
do_iteration_fast<DATA_TYPE, VEC2_DATA_TYPE, VEC4_DATA_TYPE, ACCUM_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS,
COMPUTE_COLS, PROFILE_TYPE>(thread_info, smem,
args.opt);
}
} else if (start_diag < num_diags) {
// Slow Path
while (thread_info.global_col < args.n_x &&
thread_info.global_row < args.n_y &&
thread_info.local_row < tile_height) {
do_row_edge<DATA_TYPE, PROFILE_DATA_TYPE, ACCUM_TYPE, DISTANCE_TYPE,
PROFILE_TYPE, COMPUTE_ROWS, COMPUTE_COLS>(
thread_info, smem, args.n_x, start_diag, num_diags, args.opt);
++thread_info.global_col;
++thread_info.global_row;
++thread_info.local_col;
++thread_info.local_row;
}
}
// After this sync, the caches will be updated with the best so far values
// for this tile
__syncthreads();
// Write back our best-so-far computed for this tile to global memory
write_back<DATA_TYPE, PROFILE_OUTPUT_TYPE, PROFILE_DATA_TYPE, COMPUTE_COLS,
COMPUTE_ROWS, tile_width, tile_height, BLOCKSZ>(
args, smem, tile_start_col, tile_start_row, args.n_x, args.n_y,
profile_A, profile_B);
// Update the tile position
tile_start_col += tile_height;
tile_start_row += tile_height;
// Make sure our updates were committed before we pull in the next tile
__threadfence_block();
if (NeedsCheckIfDone(PROFILE_TYPE)) {
// Copy the latest value of the profile length to shared memory
if (threadIdx.x == 0) {
*smem.profile_a_length = *args.profile_a_length;
*smem.profile_b_length = *args.profile_b_length;
}
// Sync so that the write to shared memory is visible by all other threads
__syncthreads();
// If we have too many results, break this thread block out of the kernel
// as more computation is pointless. We need to break the entire thread
// block out at once otherwise this is undefined behavior.
if (*smem.profile_a_length > args.max_matches_per_tile ||
*smem.profile_b_length > args.max_matches_per_tile) {
break;
}
}
}
}
template <typename PROFILE_OUTPUT_TYPE, typename PROFILE_DATA_TYPE,
typename DISTANCE_TYPE, SCAMPProfileType PROFILE_TYPE,
int BLOCKSPERSM>
SCAMPError_t LaunchDoTile(SCAMPKernelInputArgs<double> args,
PROFILE_OUTPUT_TYPE *profile_A,
PROFILE_OUTPUT_TYPE *profile_B,
SCAMPPrecisionType fp_type, bool computing_rows,
bool computing_cols, uint64_t blocksz,
uint64_t num_blocks, uint64_t smem, cudaStream_t s) {
dim3 block(blocksz, 1, 1);
dim3 grid(num_blocks, 1, 1);
if (computing_rows && computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_ULTRA:
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
return SCAMP_NO_ERROR;
} else if (computing_cols) {
constexpr bool COMPUTE_COLS = true;
constexpr bool COMPUTE_ROWS = false;
switch (fp_type) {
case PRECISION_ULTRA:
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
} else if (computing_rows) {
constexpr bool COMPUTE_COLS = false;
constexpr bool COMPUTE_ROWS = true;
switch (fp_type) {
case PRECISION_ULTRA:
case PRECISION_DOUBLE: {
do_tile<double, double2, double4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_DP, BLOCKSZ_DP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_MIXED: {
do_tile<float, float2, float4, double, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
case PRECISION_SINGLE: {
do_tile<float, float2, float4, float, PROFILE_OUTPUT_TYPE,
PROFILE_DATA_TYPE, DISTANCE_TYPE, COMPUTE_ROWS, COMPUTE_COLS,
PROFILE_TYPE, BLOCKSPERSM, TILE_HEIGHT_SP, BLOCKSZ_SP>
<<<grid, block, smem, s>>>(args, profile_A, profile_B);
break;
}
default:
return SCAMP_CUDA_ERROR;
}
}
gpuErrchk(cudaPeekAtLastError());
return SCAMP_NO_ERROR;
}
SCAMPError_t compute_gpu_resources_and_launch(SCAMPKernelInputArgs<double> args,
Tile *t, void *profile_a,
void *profile_b, bool do_rows,
bool do_cols) {
int exclusion_total = args.exclusion_lower + args.exclusion_upper;
uint64_t blocksz = get_blocksz(t);
uint64_t num_workers = ceil((args.n_x - exclusion_total) /
static_cast<double>(DIAGS_PER_THREAD));
uint64_t num_blocks = ceil(num_workers / static_cast<double>(blocksz));
uint64_t smem = get_smem(t->info(), blocksz);
if (!t->info()->silent_mode) {
std::cout << "Launching " << num_blocks << " thread blocks of size "
<< blocksz << " with a total of " << smem
<< " bytes of shared memory per block." << std::endl;
}
if (exclusion_total < args.n_x) {
switch (t->info()->profile_type) {
case PROFILE_TYPE_SUM_THRESH:
return LaunchDoTile<double, double, double, PROFILE_TYPE_SUM_THRESH,
BLOCKSPERSM>(
args, reinterpret_cast<double *>(profile_a),
reinterpret_cast<double *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN_INDEX:
return LaunchDoTile<uint64_t, uint64_t, float, PROFILE_TYPE_1NN_INDEX,
BLOCKSPERSM>(
args, reinterpret_cast<uint64_t *>(profile_a),
reinterpret_cast<uint64_t *>(profile_b), t->info()->fp_type,
do_rows, do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_1NN:
return LaunchDoTile<float, float, float, PROFILE_TYPE_1NN, BLOCKSPERSM>(
args, reinterpret_cast<float *>(profile_a),
reinterpret_cast<float *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_APPROX_ALL_NEIGHBORS:
return LaunchDoTile<SCAMPmatch, uint64_t, float,
PROFILE_TYPE_APPROX_ALL_NEIGHBORS, BLOCKSPERSM>(
args, reinterpret_cast<SCAMPmatch *>(profile_a),
reinterpret_cast<SCAMPmatch *>(profile_b), t->info()->fp_type,
do_rows, do_cols, blocksz, num_blocks, smem, t->get_stream());
case PROFILE_TYPE_MATRIX_SUMMARY:
return LaunchDoTile<float, uint64_t, float, PROFILE_TYPE_MATRIX_SUMMARY,
BLOCKSPERSM>(
args, reinterpret_cast<float *>(profile_a),
reinterpret_cast<float *>(profile_b), t->info()->fp_type, do_rows,
do_cols, blocksz, num_blocks, smem, t->get_stream());
default:
return SCAMP_FUNCTIONALITY_UNIMPLEMENTED;
}
}
return SCAMP_NO_ERROR;
}
SCAMPError_t gpu_kernel_self_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_self_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, false);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
SCAMPError_t gpu_kernel_ab_join_upper(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, false, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_a(), t->profile_b(), t->info()->computing_rows,
t->info()->computing_cols);
}
SCAMPError_t gpu_kernel_ab_join_lower(Tile *t) {
SCAMPKernelInputArgs<double> tile_args(t, true, true);
return compute_gpu_resources_and_launch(
tile_args, t, t->profile_b(), t->profile_a(), t->info()->computing_cols,
t->info()->computing_rows);
}
void match_gpu_sort(SCAMPmatch *matches, int64_t len, cudaStream_t stream) {
thrust::device_ptr<SCAMPmatch> ptr = thrust::device_pointer_cast(matches);
thrust::sort(thrust::cuda::par.on(stream), ptr, ptr + len);
}
} // namespace SCAMP
|
the_stack
|
#include "unit_test.h"
#include "matrix.h"
#include "csr_multiply.h"
namespace amgx
{
DECLARE_UNITTEST_BEGIN(CsrSparsityTests_Base);
std::string base_keywords()
{
return "csr";
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Index_vector >
void
count_non_zeroes( const Index_vector &A_rows, const Index_vector &A_cols,
const Index_vector &B_rows, const Index_vector &B_cols,
Index_vector &C_rows )
{
typedef typename Index_vector::value_type Index_type;
int nRows = static_cast<int>( A_rows.size( ) - 1 );
#pragma omp parallel for shared(nRows)
for ( int aRowId = 0 ; aRowId < nRows ; ++aRowId )
{
#ifdef USE_CPP_TR1
std::tr1::unordered_set<Index_type> cCols;
#else
std::set<Index_type> cCols;
#endif
for ( Index_type aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt )
{
Index_type bRowId = A_cols[aColIt];
for ( Index_type bColIt = B_rows[bRowId], bColEnd = B_rows[bRowId + 1] ; bColIt < bColEnd ; ++bColIt )
{
cCols.insert( B_cols[bColIt] );
}
}
C_rows[aRowId] = static_cast<Index_type>( cCols.size( ) );
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Index_vector >
void
compute_sparsity( const Index_vector &A_rows, const Index_vector &A_cols,
const Index_vector &B_rows, const Index_vector &B_cols,
const Index_vector &C_rows, Index_vector &C_cols )
{
typedef typename Index_vector::value_type Index_type;
#ifdef USE_CPP_TR1
typedef std::tr1::unordered_set<Index_type> Set;
#else
typedef std::set<Index_type> Set;
#endif
int nRows = static_cast<int>( A_rows.size( ) - 1 );
#pragma omp parallel for shared(nRows)
for ( int aRowId = 0 ; aRowId < nRows ; ++aRowId )
{
Set cCols;
for ( Index_type aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt )
{
Index_type bRowId = A_cols[aColIt];
for ( Index_type bColIt = B_rows[bRowId], bColEnd = B_rows[bRowId + 1] ; bColIt < bColEnd ; ++bColIt )
{
cCols.insert( B_cols[bColIt] );
}
}
Index_type cRowIt = C_rows[aRowId];
for ( typename Set::const_iterator it = cCols.begin() ; it != cCols.end() ; ++it, ++cRowIt )
{
assert( cRowIt < C_rows[aRowId + 1] );
C_cols[cRowIt] = *it;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Config >
void
compare_matrices( Matrix<Config> &A, Matrix<Config> &B )
{
A.sortByRowAndColumn();
B.sortByRowAndColumn();
UNITTEST_ASSERT_EQUAL_DESC( "Rows", A.row_offsets, B.row_offsets );
UNITTEST_ASSERT_EQUAL_DESC( "Cols", A.col_indices, B.col_indices );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision >
void
check_csr_sparsity( const Matrix<TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> > &A_h, void *wk )
{
typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h;
typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d;
typedef Matrix<Config_h> Matrix_h;
typedef Matrix<Config_d> Matrix_d;
Matrix_h B_h( A_h ), C_h;
C_h.set_num_rows( A_h.get_num_rows() );
C_h.set_num_cols( B_h.get_num_rows() );
C_h.row_offsets.resize( A_h.get_num_rows() + 1 );
std::ostringstream buffer;
{
count_non_zeroes( A_h.row_offsets, A_h.col_indices, B_h.row_offsets, B_h.col_indices, C_h.row_offsets );
thrust::exclusive_scan( C_h.row_offsets.begin( ), C_h.row_offsets.end( ), C_h.row_offsets.begin( ) );
cudaCheckError();
int nVals = C_h.row_offsets[A_h.get_num_rows()];
C_h.col_indices.resize( nVals );
C_h.values.resize( nVals );
C_h.set_num_nz( nVals );
compute_sparsity( A_h.row_offsets, A_h.col_indices, B_h.row_offsets, B_h.col_indices, C_h.row_offsets, C_h.col_indices );
}
Matrix_d C_d;
{
Matrix_d A_d( A_h ), B_d( B_h );
CSR_Multiply<Config_d>::csr_sparsity( A_d, B_d, C_d, wk );
}
Matrix_d C_d_ref( C_h );
compare_matrices( C_d, C_d_ref );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision >
void
check_csr_sparsity_square( const Matrix<TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> > &A_h, void *wk )
{
typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h;
typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d;
typedef Matrix<Config_h> Matrix_h;
typedef Matrix<Config_d> Matrix_d;
Matrix_h C_h;
C_h.set_num_rows( A_h.get_num_rows() );
C_h.set_num_cols( A_h.get_num_rows() );
C_h.row_offsets.resize( A_h.get_num_rows() + 1 );
std::ostringstream buffer;
{
count_non_zeroes( A_h.row_offsets, A_h.col_indices, A_h.row_offsets, A_h.col_indices, C_h.row_offsets );
thrust::exclusive_scan( C_h.row_offsets.begin( ), C_h.row_offsets.end( ), C_h.row_offsets.begin( ) );
cudaCheckError();
int nVals = C_h.row_offsets[A_h.get_num_rows()];
C_h.col_indices.resize( nVals );
C_h.values.resize( nVals );
C_h.set_num_nz( nVals );
compute_sparsity( A_h.row_offsets, A_h.col_indices, A_h.row_offsets, A_h.col_indices, C_h.row_offsets, C_h.col_indices );
}
Matrix_d C_d;
{
Matrix_d A_d( A_h );
CSR_Multiply<Config_d>::csr_sparsity( A_d, C_d, wk );
}
Matrix_d C_d_ref( C_h );
compare_matrices( C_d, C_d_ref );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision >
void
check_csr_sparsity_poisson( int points, int nx, int ny, int nz, AMG_Config &cfg )
{
typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h;
typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d;
typedef Matrix<Config_h> Matrix_h;
Matrix_h A_h;
A_h.set_initialized(0);
A_h.addProps(CSR);
MatrixCusp<Config_h, cusp::csr_format> wA(&A_h);
switch (points)
{
case 5:
cusp::gallery::poisson5pt(wA, nx, ny);
break;
case 7:
cusp::gallery::poisson7pt(wA, nx, ny, nz);
break;
case 9:
cusp::gallery::poisson9pt(wA, nx, ny);
break;
case 27:
cusp::gallery::poisson27pt(wA, nx, ny, nz);
break;
default:
printf("Error invalid number of poisson points specified, valid numbers are 5, 7, 9, 27\n");
}
A_h.set_initialized(1);
void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" );
check_csr_sparsity( A_h, wk );
CSR_Multiply<Config_d>::csr_workspace_delete( wk );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision >
void
check_csr_sparsity_square_poisson( int points, int nx, int ny, int nz, AMG_Config &cfg )
{
typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h;
typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d;
typedef Matrix<Config_h> Matrix_h;
Matrix_h A_h;
A_h.set_initialized(0);
switch (points)
{
case 5:
case 7:
case 9:
case 27:
generatePoissonForTest(A_h, 1, 0, points, nx, ny, nz);
break;
default:
printf("Error invalid number of poisson points specified, valid numbers are 5, 7, 9, 27\n");
}
A_h.set_initialized(1);
void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" );
check_csr_sparsity_square( A_h, wk );
CSR_Multiply<Config_d>::csr_workspace_delete( wk );
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision >
void
check_csr_sparsity_square_file( const std::string &filename, AMG_Config &cfg )
{
typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h;
typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d;
typedef Matrix<Config_h> Matrix_h;
typedef Vector<Config_h> Vector_h;
Matrix_h A_h;
Vector_h x_h, b_h;
A_h.set_initialized(0);
A_h.addProps(CSR);
UNITTEST_ASSERT_TRUE(MatrixIO<Config_h>::readSystem( filename.c_str(), A_h, b_h, x_h ) == AMGX_OK);
A_h.set_initialized(1);
void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" );
check_csr_sparsity_square( A_h, wk );
CSR_Multiply<Config_d>::csr_workspace_delete( wk );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Base);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson5_10_10, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson5_10_10)
CsrSparsityTests_Poisson5_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson5_10_10_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson5_100_100, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson5_100_100)
CsrSparsityTests_Poisson5_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson5_100_100_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson7_10_10, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson7_10_10)
CsrSparsityTests_Poisson7_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson7_10_10_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson7_100_100, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson7_100_100)
CsrSparsityTests_Poisson7_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson7_100_100_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson9_10_10, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson9_10_10)
CsrSparsityTests_Poisson9_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson9_10_10_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson9_100_100, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson9_100_100)
CsrSparsityTests_Poisson9_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson9_100_100_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson27_10_10, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson27_10_10)
CsrSparsityTests_Poisson27_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson27_10_10_dDDI;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityTests_Poisson27_100_100, CsrSparsityTests_Base<T_Config>);
void run()
{
AMG_Config cfg;
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
CsrSparsityTests_Base<T_Config>::template check_csr_sparsity_square_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg );
}
DECLARE_UNITTEST_END(CsrSparsityTests_Poisson27_100_100)
CsrSparsityTests_Poisson27_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityTests_Poisson27_100_100_dDDI;
} // namespace amgx
|
the_stack
|
#pragma once
#include <gunrock/app/problem_base.cuh>
#include <gunrock/app/louvain/louvain_test.cuh>
namespace gunrock {
namespace app {
namespace louvain {
/**
* @brief Speciflying parameters for Louvain Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
return retval;
}
/**
* @brief Louvain Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, typename _ValueT = typename _GraphT::ValueT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename util::PreDefinedValues<VertexT>::PromoteType EdgePairT;
static const int BITS_VERTEXT = sizeof(VertexT) * 8;
static const EdgePairT VERTEXT_MASK =
(EdgePairT)util::PreDefinedValues<VertexT>::AllOnes;
typedef _ValueT ValueT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// COnverters
static __host__ __device__ __forceinline__ EdgePairT
MakePair(const VertexT &first, const VertexT &second) {
return (((EdgePairT)first) << BITS_VERTEXT) + second;
}
static __host__ __device__ __forceinline__ VertexT
GetFirst(const EdgePairT &pair) {
return pair >> BITS_VERTEXT;
}
static __host__ __device__ __forceinline__ VertexT
GetSecond(const EdgePairT &pair) {
return pair & VERTEXT_MASK;
}
// Helper structures
/**
* @brief Data structure containing Louvain-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// communities the vertices are current in
util::Array1D<SizeT, VertexT> current_communities;
// communities to move in
util::Array1D<SizeT, VertexT> next_communities;
// size of communities
util::Array1D<SizeT, VertexT> community_sizes;
// sum of edge weights from vertices
util::Array1D<SizeT, ValueT> w_v2;
// sum of edge weights from vertices to self
util::Array1D<SizeT, ValueT> w_v2self;
// sum of edge weights from communities
util::Array1D<SizeT, ValueT> w_c2;
// communities each edge belongs to
// util::Array1D<SizeT, VertexT> edge_comms0;
// util::Array1D<SizeT, VertexT> edge_comms1;
// weights of edges
util::Array1D<SizeT, ValueT> edge_weights0;
util::Array1D<SizeT, ValueT> edge_weights1;
// segment offsets
util::Array1D<SizeT, SizeT> seg_offsets0;
util::Array1D<SizeT, SizeT> seg_offsets1;
// edge pairs for sorting
util::Array1D<SizeT, EdgePairT> edge_pairs0;
util::Array1D<SizeT, EdgePairT> edge_pairs1;
// temp space for cub
util::Array1D<uint64_t, char> cub_temp_space;
// number of neighbor communities
util::Array1D<SizeT, SizeT> num_neighbor_comms;
// Number of new communities
util::Array1D<SizeT, SizeT> num_new_comms;
// Number of new edges
util::Array1D<SizeT, SizeT> num_new_edges;
// base of modularity grain
util::Array1D<SizeT, ValueT> gain_bases;
// gain of each moves
util::Array1D<SizeT, ValueT> max_gains;
// gain from current iteration
util::Array1D<SizeT, ValueT> iter_gain;
// gain from current pass
ValueT pass_gain;
// sum of edge weights
ValueT m2;
// modularity
ValueT q;
// Contracted graph
GraphT new_graphs[2];
// std::vector<util::Array1D<SizeT, VertexT>*> pass_communities;
util::Array1D<SizeT, VertexT> *pass_communities;
int num_pass, max_iters;
// Whether to use cubRedixSort instead of cubSegmentRadixSort
// bool unify_segments;
/*
* @brief Default constructor
*/
DataSlice()
: BaseDataSlice()
// new_graph (NULL),
// unify_segments(false)
{
current_communities.SetName("current_communities");
next_communities.SetName("next_communities");
community_sizes.SetName("community_sizes");
w_v2.SetName("w_v2");
w_v2self.SetName("w_v2self");
w_c2.SetName("w_c2");
// edge_comms0 .SetName("edge_comms0" );
// edge_comms1 .SetName("edge_comms1" );
edge_weights0.SetName("edge_weights0");
edge_weights1.SetName("edge_weights1");
seg_offsets0.SetName("seg_offsets0");
seg_offsets1.SetName("seg_offsets1");
edge_pairs0.SetName("edge_pairs0");
edge_pairs1.SetName("edge_pairs1");
cub_temp_space.SetName("cub_temp_space");
num_neighbor_comms.SetName("num_neighbor_comms");
num_new_comms.SetName("num_new_comms");
num_new_edges.SetName("num_new_edges");
gain_bases.SetName("gain_bases");
max_gains.SetName("max_gains");
iter_gain.SetName("iter_gain");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(current_communities.Release(target));
GUARD_CU(next_communities.Release(target));
GUARD_CU(community_sizes.Release(target));
GUARD_CU(w_v2.Release(target));
GUARD_CU(w_v2self.Release(target));
GUARD_CU(w_c2.Release(target));
// GUARD_CU(edge_comms0 .Release(target));
// GUARD_CU(edge_comms1 .Release(target));
GUARD_CU(edge_weights0.Release(target));
GUARD_CU(edge_weights1.Release(target));
GUARD_CU(seg_offsets0.Release(target));
GUARD_CU(seg_offsets1.Release(target));
GUARD_CU(edge_pairs0.Release(target));
GUARD_CU(edge_pairs1.Release(target));
GUARD_CU(cub_temp_space.Release(target));
GUARD_CU(num_neighbor_comms.Release(target));
GUARD_CU(num_new_comms.Release(target));
GUARD_CU(num_new_edges.Release(target));
GUARD_CU(gain_bases.Release(target));
GUARD_CU(max_gains.Release(target));
GUARD_CU(iter_gain.Release(target));
// if (new_graph != NULL)
{
GUARD_CU(new_graphs[0].Release(target));
GUARD_CU(new_graphs[1].Release(target));
// delete new_graph; new_graph = NULL;
}
// for (auto &pass_comm : pass_communities)
if (pass_communities != NULL)
for (int i = 0; i < max_iters; i++) {
auto &pass_comm = pass_communities[i];
// if (pass_comm == NULL)
// continue;
GUARD_CU(pass_comm.Release(target));
// delete pass_comm; pass_comm = NULL;
}
/// pass_communities.clear();
delete[] pass_communities;
pass_communities = NULL;
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing Louvain-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
GUARD_CU(current_communities.Allocate(sub_graph.nodes, target));
GUARD_CU(next_communities.Allocate(sub_graph.nodes, target));
GUARD_CU(community_sizes.Allocate(sub_graph.nodes, target));
GUARD_CU(w_v2.Allocate(sub_graph.nodes, target));
GUARD_CU(w_v2self.Allocate(sub_graph.nodes, target));
GUARD_CU(w_c2.Allocate(sub_graph.nodes, target));
// GUARD_CU(edge_comms0 .Allocate(sub_graph.edges+1, target));
// GUARD_CU(edge_comms1 .Allocate(sub_graph.edges+1, target));
GUARD_CU(edge_weights0.Allocate(sub_graph.edges + 1, target));
GUARD_CU(edge_weights1.Allocate(sub_graph.edges + 1, target));
GUARD_CU(seg_offsets0.Allocate(sub_graph.edges + 1, target));
GUARD_CU(seg_offsets1.Allocate(sub_graph.edges + 1, target));
GUARD_CU(edge_pairs0.Allocate(sub_graph.edges + 1, target));
GUARD_CU(edge_pairs1.Allocate(sub_graph.edges + 1, target));
GUARD_CU(num_neighbor_comms.Allocate(1, target | util::HOST));
GUARD_CU(num_new_comms.Allocate(1, target | util::HOST));
GUARD_CU(num_new_edges.Allocate(1, target | util::HOST));
GUARD_CU(cub_temp_space.Allocate(1, target));
GUARD_CU(gain_bases.Allocate(sub_graph.nodes, target));
GUARD_CU(max_gains.Allocate(sub_graph.nodes, target));
GUARD_CU(iter_gain.Allocate(1, target | util::HOST));
if (target & util::DEVICE) {
GUARD_CU(sub_graph.Move(util::HOST, target, this->stream));
}
pass_communities = new util::Array1D<SizeT, VertexT>[max_iters + 1];
return retval;
} // Init
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// if (new_graph != NULL)
{
// GUARD_CU(new_graphs[0].Release(target));
// GUARD_CU(new_graphs[1].Release(target));
// delete new_graph; new_graph = NULL;
}
pass_gain = 0;
// for (auto &pass_comm : pass_communities)
// for (int i = 0; i < max_iters; i++)
//{
// auto &pass_comm = pass_communities[i];
// if (pass_comm == NULL)
// continue;
// GUARD_CU(pass_comm -> Release(target));
// delete pass_comm; pass_comm = NULL;
//}
num_pass = 0;
// pass_communities.clear();
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// Methods
/**
* @brief LouvainProblem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {}
/**
* @brief LouvainProblem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
* @param[out] h_distances Host array to store computed vertex distances from
* the source. \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(
VertexT *h_communities,
std::vector<std::vector<VertexT> *> *pass_communities = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->org_graph->nodes;
bool has_pass_communities = false;
if (pass_communities != NULL)
has_pass_communities = true;
else
pass_communities = new std::vector<std::vector<VertexT> *>;
if (this->num_gpus == 1) {
for (VertexT v = 0; v < nodes; v++) h_communities[v] = v;
auto &data_slice = data_slices[0][0];
// for (auto &pass_comm : data_slice.pass_communities)
for (int i = 0; i <= data_slice.num_pass; i++) {
auto &v2c = data_slice.pass_communities[i];
for (VertexT v = 0; v < nodes; v++)
h_communities[v] = v2c[h_communities[v]];
}
} else { // num_gpus != 1
// TODO: extract the results from multiple GPUs, e.g.:
// util::Array1D<SizeT, ValueT *> th_distances;
// th_distances.SetName("bfs::Problem::Extract::th_distances");
// GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
auto &data_slice = data_slices[gpu][0];
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
// GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST));
}
// th_distances[gpu] = data_slice.distances.GetPointer(util::HOST);
} // end for(gpu)
} // end if
// Clearn-up
if (!has_pass_communities) {
for (auto it = pass_communities->begin(); it != pass_communities->end();
it++) {
(*it)->clear();
delete *it;
}
pass_communities->clear();
delete pass_communities;
pass_communities = NULL;
}
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that Louvain processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
ValueT m2 = 0;
ValueT q = Get_Modularity<GraphT, ValueT>(graph);
for (SizeT e = 0; e < graph.edges; e++) m2 += graph.CsrT::edge_values[e];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
// data_slice.unify_segments
// = this -> parameters.template Get<bool>("unify-segments");
data_slice.max_iters = this->parameters.template Get<int>("max-iters");
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], target, this->flag));
data_slice.m2 = m2;
data_slice.q = q;
} // end for (gpu)
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
// Set device
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
/** @} */
};
} // namespace louvain
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include "cudapoa_structs.cuh"
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/limits.cuh>
#include <stdio.h>
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
template <typename ScoreT>
__device__ __forceinline__
ScoreT4<ScoreT>
make_ScoreT4(ScoreT s0)
{
ScoreT4<ScoreT> t;
t.s0 = s0;
t.s1 = s0;
t.s2 = s0;
t.s3 = s0;
return t;
}
template <typename SeqT,
typename ScoreT,
typename SizeT>
__device__ __forceinline__
ScoreT4<ScoreT>
computeScore(int32_t rIdx,
SeqT4<SeqT> read4,
int32_t gIdx,
SeqT graph_base,
uint16_t pred_count,
int32_t pred_idx,
SizeT* node_id_to_pos,
SizeT* incoming_edges,
ScoreT* scores,
int32_t scores_width,
int32_t gap_score,
int32_t match_score,
int32_t mismatch_score)
{
ScoreT4<ScoreT> char_profile;
char_profile.s0 = (graph_base == read4.r0 ? match_score : mismatch_score);
char_profile.s1 = (graph_base == read4.r1 ? match_score : mismatch_score);
char_profile.s2 = (graph_base == read4.r2 ? match_score : mismatch_score);
char_profile.s3 = (graph_base == read4.r3 ? match_score : mismatch_score);
// The load instructions typically load data in 4B or 8B chunks.
// If data is 16b (2B), then a 4B load chunk is loaded into register
// and the necessary bits are extracted before returning. This wastes cycles
// as each read of 16b issues a separate load command.
// Instead it is better to load a 4B or 8B chunk into a register
// using a single load inst, and then extracting necessary part of
// of the data using bit arithmatic. Also reduces register count.
int64_t score_index = static_cast<int64_t>(pred_idx) * static_cast<int64_t>(scores_width);
ScoreT4<ScoreT>* pred_scores = (ScoreT4<ScoreT>*)&scores[score_index];
// loads 8 consecutive bytes (4 shorts)
ScoreT4<ScoreT> score4 = pred_scores[rIdx];
// need to load the next chunk of memory as well
ScoreT4<ScoreT> score4_next = pred_scores[rIdx + 1];
ScoreT4<ScoreT> score;
score.s0 = max(score4.s0 + char_profile.s0,
score4.s1 + gap_score);
score.s1 = max(score4.s1 + char_profile.s1,
score4.s2 + gap_score);
score.s2 = max(score4.s2 + char_profile.s2,
score4.s3 + gap_score);
score.s3 = max(score4.s3 + char_profile.s3,
score4_next.s0 + gap_score);
// Perform same score updates as above, but for rest of predecessors.
for (int32_t p = 1; p < pred_count; p++)
{
int32_t pred_idx = node_id_to_pos[incoming_edges[gIdx * CUDAPOA_MAX_NODE_EDGES + p]] + 1;
score_index = static_cast<int64_t>(pred_idx) * static_cast<int64_t>(scores_width);
ScoreT4<ScoreT>* pred_scores = (ScoreT4<ScoreT>*)&scores[score_index];
// Reasoning for 8B preload same as above.
ScoreT4<ScoreT> score4 = pred_scores[rIdx];
ScoreT4<ScoreT> score4_next = pred_scores[rIdx + 1];
score.s0 = max(score4.s0 + char_profile.s0,
max(score.s0, score4.s1 + gap_score));
score.s1 = max(score4.s1 + char_profile.s1,
max(score.s1, score4.s2 + gap_score));
score.s2 = max(score4.s2 + char_profile.s2,
max(score.s2, score4.s3 + gap_score));
score.s3 = max(score4.s3 + char_profile.s3,
max(score.s3, score4_next.s0 + gap_score));
}
return score;
}
/**
* @brief Device function for running Needleman-Wunsch dynamic programming loop.
*
* @param[in] nodes Device buffer with unique nodes in graph
* @param[in] graph Device buffer with sorted graph
* @param[in] node_id_to_pos Device scratch space for mapping node ID to position in graph
* @param[in] incoming_edge_count Device buffer with number of incoming edges per node
* @param[in] incoming_edges Device buffer with incoming edges per node
* @param[in] outgoing_edge_count Device buffer with number of outgoing edges per node
* @param[in] read Device buffer with sequence (read) to align
* @param[in] read_length Number of bases in read
* @param[out] scores Device scratch space that scores alignment matrix score
* @param[out] alignment_graph Device scratch space for traceback alignment of graph
* @param[out] alignment_read Device scratch space for traceback alignment of sequence
* @param[in] gap_score Score for inserting gap into alignment
* @param[in] mismatch_score Score for finding a mismatch in alignment
* @param[in] match_score Score for finding a match in alignment
*
* @return Number of nodes in final alignment.
*/
template <typename SeqT,
typename ScoreT,
typename SizeT,
int32_t CPT = 4>
__device__ __forceinline__
int32_t
needlemanWunsch(SeqT* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
SeqT* read,
int32_t read_length,
ScoreT* scores,
int32_t scores_width,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score)
{
static_assert(CPT == 4, "implementation currently supports only 4 cells per thread");
GW_CONSTEXPR ScoreT score_type_min_limit = numeric_limits<ScoreT>::min();
int16_t lane_idx = threadIdx.x % WARP_SIZE;
int64_t score_index;
// Init horizonal boundary conditions (read).
for (int32_t j = lane_idx; j < read_length + 1; j += WARP_SIZE)
{
scores[j] = j * gap_score;
}
if (lane_idx == 0)
{
#ifdef NW_VERBOSE_PRINT
printf("graph %d, read %d\n", graph_count, read_length);
#endif
// Init vertical boundary (graph).
for (int32_t graph_pos = 0; graph_pos < graph_count; graph_pos++)
{
int32_t node_id = graph[graph_pos];
int32_t i = graph_pos + 1;
uint16_t pred_count = incoming_edge_count[node_id];
if (pred_count == 0)
{
score_index = static_cast<int64_t>(i) * static_cast<int64_t>(scores_width);
scores[score_index] = gap_score;
}
else
{
int32_t penalty = score_type_min_limit;
for (int32_t p = 0; p < pred_count; p++)
{
int32_t pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p];
int32_t pred_node_graph_pos = node_id_to_pos[pred_node_id] + 1;
score_index = static_cast<int64_t>(pred_node_graph_pos) * static_cast<int64_t>(scores_width);
penalty = max(penalty, scores[score_index]);
}
score_index = static_cast<int64_t>(i) * static_cast<int64_t>(scores_width);
scores[score_index] = penalty + gap_score;
}
}
}
__syncwarp();
// readpos_bound is the first multiple of (CPT * WARP_SIZE) that is larger than read_length.
int32_t readpos_bound = (((read_length - 1) / (WARP_SIZE * CPT)) + 1) * (WARP_SIZE * CPT);
SeqT4<SeqT>* d_read4 = (SeqT4<SeqT>*)read;
// Run DP loop for calculating scores. Process each row at a time, and
// compute vertical and diagonal values in parallel.
for (int32_t graph_pos = 0; graph_pos < graph_count; graph_pos++)
{
int32_t node_id = graph[graph_pos]; // node id for the graph node
int32_t score_gIdx = graph_pos + 1; // score matrix index for this graph node
score_index = static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(scores_width);
int32_t first_element_prev_score = scores[score_index];
uint16_t pred_count = incoming_edge_count[node_id];
int32_t pred_idx = (pred_count == 0 ? 0 : node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]] + 1);
SeqT graph_base = nodes[node_id];
// readpos_bound is the first tb boundary multiple beyond read_length. This is done
// so all threads in the block enter the loop. The loop has syncwarp, so if
// any of the threads don't enter, then it'll cause a lock in the system.
for (int32_t read_pos = lane_idx * CPT; read_pos < readpos_bound; read_pos += WARP_SIZE * CPT)
{
int32_t rIdx = read_pos / CPT;
// To avoid doing extra work, we clip the extra warps that go beyond the read count.
// Warp clipping hasn't shown to help too much yet, but might if we increase the tb
// size in the future.
SeqT4<SeqT> read4 = d_read4[rIdx];
ScoreT4<ScoreT> score = make_ScoreT4(ScoreT{SHRT_MAX});
if (read_pos < read_length)
{
score = computeScore(rIdx, read4,
node_id, graph_base,
pred_count, pred_idx,
node_id_to_pos, incoming_edges,
scores, scores_width,
gap_score, match_score, mismatch_score);
}
// While there are changes to the horizontal score values, keep updating the matrix.
// So loop will only run the number of time there are corrections in the matrix.
// The any_sync warp primitive lets us easily check if any of the threads had an update.
bool loop = true;
while (__any_sync(FULL_MASK, loop))
{
loop = false;
// Note: computation of s3 depends on s2, s2 depends on s1 and s1 on s0.
// If we reverse the order of computation in this loop from s3 to s0, it will increase
// ILP. However, in longer reads where indels are more frequent, this reverse computations
// results in larger number of iterations. Since if s0 is changed, value of s1, s2 and s3 which
// already have been computed in parallel need to be updated again.
// The shfl_up lets us grab a value from the lane below.
int32_t last_score = __shfl_up_sync(FULL_MASK, score.s3, 1);
if (lane_idx == 0)
{
last_score = first_element_prev_score;
}
score.s0 = max(last_score + gap_score, score.s0);
score.s1 = max(score.s0 + gap_score, score.s1);
score.s2 = max(score.s1 + gap_score, score.s2);
int32_t tscore = max(score.s2 + gap_score, score.s3);
if (tscore > score.s3)
{
score.s3 = tscore;
loop = true;
}
}
// Copy over the last element score of the last lane into a register of first lane
// which can be used to compute the first cell of the next warp.
first_element_prev_score = __shfl_sync(FULL_MASK, score.s3, WARP_SIZE - 1);
// Index into score matrix.
score_index = static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(read_pos);
if (read_pos < read_length)
{
scores[score_index + 1L] = score.s0;
scores[score_index + 2L] = score.s1;
scores[score_index + 3L] = score.s2;
scores[score_index + 4L] = score.s3;
}
__syncwarp();
}
}
int32_t aligned_nodes = 0;
if (lane_idx == 0)
{
// Find location of the maximum score in the matrix.
int32_t i = 0;
int32_t j = read_length;
int32_t mscore = score_type_min_limit;
for (int32_t idx = 1; idx <= graph_count; idx++)
{
if (outgoing_edge_count[graph[idx - 1]] == 0)
{
score_index = static_cast<int64_t>(idx) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j);
int32_t s = scores[score_index];
if (mscore < s)
{
mscore = s;
i = idx;
}
}
}
// Fill in traceback
int32_t prev_i = 0;
int32_t prev_j = 0;
// backtrack from maximum score position to generate alignment.
// backtracking is done by re-calculating the score at each cell
// along the path to see which preceding cell the move could have
// come from. This seems computationally more expensive, but doesn't
// require storing any traceback buffer during alignment.
int32_t loop_count = 0;
while (!(i == 0 && j == 0) && loop_count < static_cast<int32_t>(read_length + graph_count + 2))
{
loop_count++;
score_index = static_cast<int64_t>(i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j);
int32_t scores_ij = scores[score_index];
bool pred_found = false;
// Check if move is diagonal.
if (i != 0 && j != 0)
{
int32_t node_id = graph[i - 1];
int32_t match_cost = (nodes[node_id] == read[j - 1] ? match_score : mismatch_score);
uint16_t pred_count = incoming_edge_count[node_id];
int32_t pred_i = (pred_count == 0 ? 0 : (node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]] + 1));
score_index = static_cast<int64_t>(pred_i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j - 1);
if (scores_ij == (scores[score_index] + match_cost))
{
prev_i = pred_i;
prev_j = j - 1;
pred_found = true;
}
if (!pred_found)
{
for (int32_t p = 1; p < pred_count; p++)
{
pred_i = (node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1);
score_index = static_cast<int64_t>(pred_i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j - 1);
if (scores_ij == (scores[score_index] + match_cost))
{
prev_i = pred_i;
prev_j = j - 1;
pred_found = true;
break;
}
}
}
}
// Check if move is vertical.
if (!pred_found && i != 0)
{
int32_t node_id = graph[i - 1];
uint16_t pred_count = incoming_edge_count[node_id];
int32_t pred_i = (pred_count == 0 ? 0 : node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]] + 1);
score_index = static_cast<int64_t>(pred_i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j);
if (scores_ij == scores[score_index] + gap_score)
{
prev_i = pred_i;
prev_j = j;
pred_found = true;
}
if (!pred_found)
{
for (int32_t p = 1; p < pred_count; p++)
{
pred_i = node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1;
score_index = static_cast<int64_t>(pred_i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j);
if (scores_ij == scores[score_index] + gap_score)
{
prev_i = pred_i;
prev_j = j;
pred_found = true;
break;
}
}
}
}
// Check if move is horizontal.
score_index = static_cast<int64_t>(i) * static_cast<int64_t>(scores_width) + static_cast<int64_t>(j - 1);
if (!pred_found && scores_ij == scores[score_index] + gap_score)
{
prev_i = i;
prev_j = j - 1;
pred_found = true;
}
alignment_graph[aligned_nodes] = (i == prev_i ? -1 : graph[i - 1]);
alignment_read[aligned_nodes] = (j == prev_j ? -1 : j - 1);
aligned_nodes++;
i = prev_i;
j = prev_j;
} // end of while
if (loop_count >= (read_length + graph_count + 2))
{
aligned_nodes = CUDAPOA_KERNEL_NW_BACKTRACKING_LOOP_FAILED;
}
#ifdef NW_VERBOSE_PRINT
printf("aligned nodes %d\n", aligned_nodes);
#endif
}
aligned_nodes = __shfl_sync(0xffffffff, aligned_nodes, 0);
return aligned_nodes;
}
// global kernel used in testing, hence uses int16_t for SizeT and ScoreT,
// may need to change if test inputs change to long reads
template <typename SizeT>
__global__ void runNeedlemanWunschKernel(uint8_t* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
uint8_t* read,
int32_t read_length,
int16_t* scores,
int32_t scores_width,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score,
SizeT* aligned_nodes)
{
static_assert(std::is_same<SizeT, int16_t>::value, "This function only accepts int16_t as SizeT.");
*aligned_nodes = needlemanWunsch<uint8_t, int16_t, int16_t>(nodes,
graph,
node_id_to_pos,
graph_count,
incoming_edge_count,
incoming_edges,
outgoing_edge_count,
read,
read_length,
scores,
scores_width,
alignment_graph,
alignment_read,
gap_score,
mismatch_score,
match_score);
}
// Host function that calls the kernel
template <typename SizeT>
void runNW(uint8_t* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
uint8_t* read,
int32_t read_length,
int16_t* scores,
int32_t scores_width,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score,
SizeT* aligned_nodes)
{
runNeedlemanWunschKernel<<<1, CUDAPOA_THREADS_PER_BLOCK>>>(nodes,
graph,
node_id_to_pos,
graph_count,
incoming_edge_count,
incoming_edges,
outgoing_edge_count,
read,
read_length,
scores,
scores_width,
alignment_graph,
alignment_read,
gap_score,
mismatch_score,
match_score,
aligned_nodes);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
}
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.