text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include "GReduceStream.h" #include "ReduceStreamKernel.cu" #include <pycaUtils.h> #include <mem.h> #include <gmem.h> #include <Vec2D.h> // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { GReduceStream::GReduceStream(){ int *d_buf; dmemAlloc(d_buf, REDUCE_STREAM_SIZE * MAX_NUMBER_OF_REDUCE_STREAM); mdBuf = d_buf; } GReduceStream::~GReduceStream() { if (mdBuf != NULL) { dmemFree(mdBuf); mdBuf = NULL; } } //////////////////////////////////////////////////////////////////////////////// // Single operator single output function // d_o = reduce(oper, d_i); //////////////////////////////////////////////////////////////////////////////// template<typename T, MATH_OPS op> void GReduceStream::reduce(T* d_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce_L1_kernel<T, MOpers<T, op> > <<<M, N, 0, stream>>>((T*)mdBuf, d_i, n); if (update) { reduce_L2_kernel<T, MOpers<T, op>, true><<<1, N, 0, stream>>>(d_o, (T*) mdBuf); } else { reduce_L2_kernel<T, MOpers<T, op>, false><<<1, N, 0, stream>>>(d_o, (T*) mdBuf); } } //////////////////////////////////////////////////////////////////////////////// // Double operator single input single output function // d_o = reduce(oper, oper1(d_i)) //////////////////////////////////////////////////////////////////////////////// template <typename T, MATH_OPS op, MATH_OPS op1> void GReduceStream::compReduce(T* d_o, const T* d_i, size_t n, bool update, StreamT stream) { compReduce_L1_kernel<T, MOpers<T, op>, MOpers<T, op1> > <<<M, N, 0, stream>>>((T*)mdBuf, d_i, n); if (update) reduce_L2_kernel<T, MOpers<T, op>, true><<<1, N, 0, stream>>>(d_o, (T*)mdBuf); else reduce_L2_kernel<T, MOpers<T, op>, false><<<1, N, 0, stream>>>(d_o, (T*)mdBuf); } //////////////////////////////////////////////////////////////////////////////// // Double operator Double output single input function // d_o[0] = reduce(op, d_i) // d_o[1] = reduce(op1, d_i) //////////////////////////////////////////////////////////////////////////////// template <typename T, MATH_OPS op, MATH_OPS op1> void GReduceStream::bireduce(T* d_o, const T* d_i, size_t n, bool update, StreamT stream) { PRECONDITION(d_i!= NULL, "null pointer"); T* d_buf = (T*) mdBuf; T* d_buf1 = d_buf + REDUCE_STREAM_SIZE; bireduce_L1_kernel<T, MOpers<T,op>, MOpers<T,op1> > <<<M, N, 0, stream>>>(d_buf, d_buf1, d_i, n); if (update) reduce_ip2_op2_L2_kernel<T, MOpers<T,op>, MOpers<T,op1>, true> <<<1, N, 0, stream>>>(d_o, d_buf, d_buf1); else reduce_ip2_op2_L2_kernel<T, MOpers<T,op>, MOpers<T,op1>, false> <<<1, N, 0, stream>>>(d_o, d_buf, d_buf1); } //////////////////////////////////////////////////////////////////////////////// // Double operator single output double input function // d_o[0] = reduce(op, op1(d_i, d_i1)) //////////////////////////////////////////////////////////////////////////////// template <typename T, MATH_OPS op, MATH_OPS op1> void GReduceStream::product(T* d_o, const T* d_i0, const T* d_i1, size_t n, bool update, StreamT stream) { PRECONDITION((d_i0!= NULL) && (d_i1!= NULL), "null pointer"); product_L1_kernel<T, MOpers<T,op>, MOpers<T,op1> > <<<M, N, 0, stream>>>( (T*)mdBuf, d_i0, d_i1, n); if (update) reduce_L2_kernel<T, MOpers<T,op>, true> <<<1, N, 0, stream>>>(d_o, (T*)mdBuf); else reduce_L2_kernel<T, MOpers<T,op>, false> <<<1, N, 0, stream>>>(d_o, (T*)mdBuf); } //////////////////////////////////////////////////////////////////////////////// // Instantiate for implementation //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduceStream::Max(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Max>((T*)&d_o, d_i, n, update, stream); } template void GReduceStream::Max(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Max(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Max(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduceStream::Min(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Min>((T*)&d_o, d_i, n, update, stream); } template void GReduceStream::Min(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Min(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Min(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduceStream::Sum(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Add>((T*)&d_o, d_i, n, update, stream); } template void GReduceStream::Sum(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Sum(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Sum(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduceStream::LInf(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Max, MATH_Abs>((T*)&d_o, d_i, n, update, stream); }; template void GReduceStream::LInf(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::LInf(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::LInf(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduceStream::L1(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Add, MATH_Abs>((T*)&d_o, d_i, n, update, stream); }; template void GReduceStream::L1(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::L1(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::L1(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduceStream::Sum2(T& d_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Add, MATH_Sqr>((T*)&d_o, d_i, n, update, stream); }; template void GReduceStream::Sum2(float& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Sum2(int& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::Sum2(uint& d_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduceStream::MaxMin(Vec2D<T>&d_o, const T* d_i, size_t n, bool update, StreamT stream){ bireduce<T, MATH_Max, MATH_Min>((T*)&d_o.x, d_i, n, update, stream); } template void GReduceStream::MaxMin(Vec2D<float>& d_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::MaxMin(Vec2D<int>& d_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduceStream::MaxMin(Vec2D<uint>& d_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduceStream::Dot(T& d_o, const T* d_i, const T* d_i1, size_t n, bool update, StreamT stream){ product<T, MATH_Add, MATH_Mul>((T*)&d_o, d_i, d_i1, n, update, stream); } template void GReduceStream::Dot(float& d_o, const float* d_i, const float* d_i1, size_t n, bool update, StreamT stream); template void GReduceStream::Dot(int& d_o, const int* d_i, const int* d_i1, size_t n, bool update, StreamT stream); template void GReduceStream::Dot(uint& d_o, const uint* d_i, const uint* d_i1, size_t n, bool update, StreamT stream); bool GReduceStream::selfTest(size_t n){ int test = true; int* h_i = new int [n]; int* h_i1 = new int [n]; for (size_t j=0; j< n; ++j) h_i[j] = (rand() & 0xff); for (size_t j=0; j< n; ++j) h_i1[j] = (rand() & 0xff); int *d_i; dmemAlloc(d_i, n); cpyArrayH2D(d_i, h_i, n); int *d_i1; dmemAlloc(d_i1,n); cpyArrayH2D(d_i1,h_i1,n); int *d_o; dmemAlloc(d_o, 256); int h_max = -INT_MAX, h_min = INT_MAX; int h_LInf = 0; int h_sum2 = 0; int h_sum = 0; int h_L1 = 0; int h_dot = 0; for (size_t i=0; i< n; ++i) { h_max = fmaxf(h_max, h_i[i]); h_min = fminf(h_min, h_i[i]); h_sum += h_i[i]; h_LInf = fmaxf(h_LInf, h_i[i]); h_L1 += fabsf(h_i[i]); h_sum2+= h_i[i]*h_i[i]; h_dot += h_i1[i] * h_i[i]; } int d_max = -INT_MAX, d_min = INT_MAX; int d_LInf= 0; int d_L1= 0; int d_sum2= 0; int d_sum = 0; int d_dot = 0; Vec2D<int> d_pair; Vec2D<int>* d_maxmin = createGObj(Vec2Di(0,0)); this->Sum(d_o[0], d_i, n);cpyArrayD2H(&d_sum, d_o, 1); this->Max(d_o[0], d_i, n);cpyArrayD2H(&d_max, d_o, 1); this->Min(d_o[0], d_i, n);cpyArrayD2H(&d_min, d_o, 1); this->LInf(d_o[0],d_i, n);cpyArrayD2H(&d_LInf, d_o, 1); this->L1(d_o[0], d_i, n);cpyArrayD2H(&d_L1, d_o, 1); this->Sum2(d_o[0],d_i, n);cpyArrayD2H(&d_sum2, d_o, 1); this->Dot(d_o[0],d_i,d_i1, n);cpyArrayD2H(&d_dot, d_o, 1); this->MaxMin(*d_maxmin, d_i, n);d_pair = getGObj(d_maxmin); fprintf(stderr, "Maximum value from CPU %d from GPU %d\n",h_max, d_max); fprintf(stderr, "Minumum value from CPU %d from GPU %d\n",h_min, d_min); fprintf(stderr, "Total value from CPU %d from GPU %d\n",h_sum, d_sum); fprintf(stderr, "Maximum abosulte value from CPU %d from GPU %d\n",h_LInf, d_LInf); fprintf(stderr, "Total square value from CPU %d from GPU %d\n",h_sum2, d_sum2); fprintf(stderr, "Dot product value from CPU %d from GPU %d\n",h_dot, d_dot); fprintf(stderr, "Max Min value from CPU %d %d from GPU %d %d\n",h_max, h_min, d_pair.x, d_pair.y); //Extensive test h_max = -INT_MAX, h_min = INT_MAX; h_LInf = 0; h_sum2 = 0; h_sum = 0; h_dot = 0; h_L1 = 0; for (int l=1; l < 10001;++l){ h_max = fmaxf(h_max, h_i[l-1]); h_LInf = fmaxf(h_LInf, h_i[l-1]); h_min = fminf(h_min, h_i[l-1]); h_sum += h_i[l-1]; h_sum2 += h_i[l-1]*h_i[l-1]; h_dot += h_i1[l-1] * h_i[l-1]; h_L1 += fabsf(h_i[l-1]); this->Sum(d_o[0], d_i, l);cpyArrayD2H(&d_sum, d_o, 1); this->Max(d_o[0], d_i, l);cpyArrayD2H(&d_max, d_o, 1); this->Min(d_o[0], d_i, l);cpyArrayD2H(&d_min, d_o, 1); this->LInf(d_o[0],d_i, l);cpyArrayD2H(&d_LInf, d_o, 1); this->L1(d_o[0], d_i, l);cpyArrayD2H(&d_L1, d_o, 1); this->Sum2(d_o[0],d_i, l);cpyArrayD2H(&d_sum2, d_o, 1); this->Dot(d_o[0],d_i,d_i1, l);cpyArrayD2H(&d_dot, d_o, 1); this->MaxMin(*d_maxmin, d_i, l);d_pair = getGObj(d_maxmin); if (d_max != h_max){ fprintf(stderr, "Max Test FAILED at %d GPU %d CPU %d\n", l, d_max, h_max ); test = false; } if (d_min != h_min){ fprintf(stderr, "Min Test FAILED at %d GPU %d CPU %d\n", l, d_min, h_min ); test = false; } if (d_LInf!= h_LInf){ fprintf(stderr, "MaxAbs Test FAILED at %d GPU %d CPU %d\n", l, d_LInf, h_LInf ); test = false; } if (d_sum!= h_sum){ fprintf(stderr, "Sum Test FAILED at %d GPU %d CPU %d\n", l, d_sum, h_sum ); test = false; } if (d_sum2!= h_sum2){ fprintf(stderr, "Sum SQR Test FAILED at %d GPU %d CPU %d\n", l, d_sum2, h_sum2 ); test = false; } if (d_dot!= h_dot){ fprintf(stderr, "Dot Test FAILED at %d GPU %d CPU %d\n", l, d_dot, h_dot ); test = false; } if ( d_pair.x != h_max || d_pair.y != h_min){ fprintf(stderr, "Max Min Test FAILED at %d GPU %d %d CPU %d %d\n", l, d_pair.x, d_pair.y, h_max, h_min); test = false; } if (test == false) break; } if (test) fprintf(stderr, "Test PASSED \n"); delete []h_i1; delete []h_i; cudaFree(d_i); cudaFree(d_i1); return test; } } // end namespace PyCA
the_stack
namespace k2 { template <int32_t NUM_KEY_BITS> void TestHashConstruct() { for (auto &c : {GetCpuContext(), GetCudaContext()}) { for (int32_t size : { 128, 1024, 2048, 65536, 1048576}) { Hash hash(c, size, NUM_KEY_BITS); // obviously we're not going to fill it completely... this hash is not // resizable. int32_t num_elems = size / 2; // Some keys may be identical. int32_t key_bound = num_elems * 2; Array1<uint32_t> keys = RandUniformArray1<uint32_t>(c, num_elems, 0, key_bound - 1), values = RandUniformArray1<uint32_t>(c, num_elems, 0, 10000), success(c, num_elems, 0); Array1<int32_t> count_per_key = GetCounts(reinterpret_cast<Array1<int32_t> &>(keys), key_bound); if (size <= 2048) { K2_LOG(INFO) << "keys = " << keys << ", values = " << values << ", counts = " << count_per_key; } uint32_t *keys_data = keys.Data(), *values_data = values.Data(), *success_data = success.Data(); int32_t *counts_data = count_per_key.Data(); Hash::Accessor<NUM_KEY_BITS> acc = hash.GetAccessor<Hash::Accessor<NUM_KEY_BITS>>(); K2_EVAL(c, num_elems, lambda_insert_pairs, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success; int32_t count = counts_data[key]; uint64_t *key_value_location; if (acc.Insert(key, value, nullptr, &key_value_location)) { success = 1; } else { success = 0; K2_CHECK(count > 1) << ", key = " << key << ", i = " << i; } uint64_t keyval = *key_value_location; if (success) { acc.SetValue(key_value_location, key, value); K2_DCHECK_EQ(keyval, *key_value_location); } success_data[i] = success; }); K2_EVAL(c, num_elems, lambda_check_find, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success = success_data[i]; uint64_t val = 0; uint64_t *key_val_addr = nullptr; bool ans = acc.Find(key, &val, &key_val_addr), ans2 = acc.Find(key + key_bound, &val, &key_val_addr); K2_CHECK(ans); // key should be present. K2_CHECK(!ans2); // key == key + key_bound should not be present. if (success) { // if this was the key that won the data race, its value should be // present. K2_CHECK_EQ(val, value); K2_CHECK_EQ(*key_val_addr, ((uint64_t(key) | ((uint64_t)value << NUM_KEY_BITS)))); } }); K2_EVAL(c, num_elems, lambda_check_delete, (int32_t i) -> void { uint32_t key = (uint32_t)keys_data[i]; uint32_t success = success_data[i]; if (success) acc.Delete(key); }); } } } void TestHashConstructGeneric(int32_t num_key_bits) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { for (int32_t size : {128, 1024, 2048, 65536, 1048576}) { Hash hash(c, size, num_key_bits); // obviously we're not going to fill it completely... this hash is not // resizable. int32_t num_elems = size / 2; // Some keys may be identical. int32_t key_bound = num_elems * 2; Array1<uint32_t> keys = RandUniformArray1<uint32_t>(c, num_elems, 0, key_bound - 1), values = RandUniformArray1<uint32_t>(c, num_elems, 0, 10000), success(c, num_elems, 0); Array1<int32_t> count_per_key = GetCounts(reinterpret_cast<Array1<int32_t> &>(keys), key_bound); if (size <= 2048) { K2_LOG(INFO) << "keys = " << keys << ", values = " << values << ", counts = " << count_per_key; } uint32_t *keys_data = keys.Data(), *values_data = values.Data(), *success_data = success.Data(); int32_t *counts_data = count_per_key.Data(); Hash::GenericAccessor acc = hash.GetAccessor<Hash::GenericAccessor>(); K2_EVAL(c, num_elems, lambda_insert_pairs, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success; int32_t count = counts_data[key]; uint64_t *key_value_location; if (acc.Insert(key, value, nullptr, &key_value_location)) { success = 1; } else { success = 0; K2_CHECK(count > 1) << ", key = " << key << ", i = " << i; } uint64_t keyval = *key_value_location; if (success) { acc.SetValue(key_value_location, key, value); K2_DCHECK_EQ(keyval, *key_value_location); } success_data[i] = success; }); hash.Resize(hash.NumBuckets() * 2, num_key_bits); acc = hash.GetAccessor<Hash::GenericAccessor>(); K2_EVAL(c, num_elems, lambda_check_find, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success = success_data[i]; uint64_t val = 0; uint64_t *key_val_addr = nullptr; bool ans = acc.Find(key, &val, &key_val_addr), ans2 = acc.Find(key + key_bound, &val, &key_val_addr); K2_CHECK(ans); // key should be present. K2_CHECK(!ans2); // key == key + key_bound should not be present. if (success) { // if this was the key that won the data race, its value should be // present. K2_CHECK_EQ(val, value); K2_CHECK_EQ(*key_val_addr, ((uint64_t(value) << num_key_bits) | (uint64_t)key)); } }); K2_EVAL(c, num_elems, lambda_check_delete, (int32_t i) -> void { uint32_t key = (uint32_t)keys_data[i]; uint32_t success = success_data[i]; if (success) acc.Delete(key); }); } } } void TestHashConstructPacked(int32_t num_key_bits, int32_t num_value_bits) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { for (int32_t size : { 2048, 65536, 1048576}) { Hash hash(c, size, num_key_bits, num_value_bits); // obviously we're not going to fill it completely... this hash is not // resizable. int32_t num_elems = size / 2; // Some keys may be identical. int32_t key_bound = num_elems * 2; Array1<uint32_t> keys = RandUniformArray1<uint32_t>(c, num_elems, 0, key_bound - 1), values = RandUniformArray1<uint32_t>(c, num_elems, 0, 10000), success(c, num_elems, 0); Array1<int32_t> count_per_key = GetCounts(reinterpret_cast<Array1<int32_t> &>(keys), key_bound); if (size <= 2048) { K2_LOG(INFO) << "keys = " << keys << ", values = " << values << ", counts = " << count_per_key; } uint32_t *keys_data = keys.Data(), *values_data = values.Data(), *success_data = success.Data(); int32_t *counts_data = count_per_key.Data(); Hash::PackedAccessor acc = hash.GetAccessor<Hash::PackedAccessor>(); K2_EVAL(c, num_elems, lambda_insert_pairs, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success; int32_t count = counts_data[key]; uint64_t *key_value_location; if (acc.Insert(key, value, nullptr, &key_value_location)) { success = 1; } else { success = 0; K2_CHECK(count > 1) << ", key = " << key << ", i = " << i; } uint64_t keyval = *key_value_location; if (success) { acc.SetValue(key_value_location, key, value); K2_DCHECK_EQ(keyval, *key_value_location); } success_data[i] = success; }); if (size != 65535) // just for some variety.. num_value_bits += 1; // Try changing the number of value bits, so we // can test Resize() with changes in that. hash.Resize(hash.NumBuckets() * 2, num_key_bits, num_value_bits); acc = hash.GetAccessor<Hash::PackedAccessor>(); const uint64_t *hash_data = hash.Data(); K2_EVAL(c, num_elems, lambda_check_find, (int32_t i) -> void { uint32_t key = keys_data[i], value = values_data[i], success = success_data[i]; int32_t num_implicit_key_bits = num_key_bits + num_value_bits - 64, num_kept_key_bits = num_key_bits - num_implicit_key_bits; uint64_t implicit_key_bits_mask = (uint64_t(1) << num_implicit_key_bits) - 1; uint64_t val = 0; uint64_t *key_val_addr = nullptr; bool ans = acc.Find(key, &val, &key_val_addr), ans2 = acc.Find(key + key_bound, &val, &key_val_addr); K2_CHECK(ans); // key should be present. K2_CHECK(!ans2); // key == key + key_bound should not be present. if (success) { // if this was the key that won the data race, its value should be // present. K2_CHECK_EQ(val, value); K2_CHECK_EQ(*key_val_addr, ((uint64_t(value) << num_kept_key_bits) | (((uint64_t)key) >> num_implicit_key_bits))); K2_CHECK_EQ(key & implicit_key_bits_mask, (key_val_addr - hash_data) & implicit_key_bits_mask); } }); K2_EVAL(c, num_elems, lambda_check_delete, (int32_t i) -> void { uint32_t key = (uint32_t)keys_data[i]; uint32_t success = success_data[i]; if (success) acc.Delete(key); }); } } } TEST(Hash, Construct) { // This indirection gets around a limitation of the CUDA compiler. TestHashConstruct<32>(); TestHashConstruct<40>(); TestHashConstruct<28>(); for (int32_t key_bits = 28; key_bits <= 40; key_bits += 4) TestHashConstructGeneric(key_bits); for (int32_t key_bits = 30; key_bits <= 40; key_bits += 4) { for (int32_t value_bits = (64 - key_bits) + 1; value_bits < (64 - key_bits) + 4; ++value_bits) TestHashConstructPacked(key_bits, value_bits); } } } // namespace k2
the_stack
#include "Normalization.h" // 宏:用来定义使用 online 算法求平均值和方差 //#define USE_ONLINE // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 #ifdef USE_ONLINE // Kernel 函数:_nomalizeKer(实现对输入图像的每个点进行正规化) // 对输入图像的每一个点,以该点为中心的邻域,求出该邻域内的平均值和总体方差, // 然后将该点的像素与平均值作差,再除以总体方差,得到的值作为输出 static __global__ void // Kernel 无返回值 _nomalizeKer( ImageCuda inimg, // 输入图像 Template tpl, // 模版,用来指定邻域范围 float *res, // 输出的计算的结果 size_t pitch // res 的 pitch 值 ); #endif // Host 函数:_creatTemplate(创建模版) // 创建指定大小的方形模版,模版必须为空模版 static __host__ int // 返回值:函数是否正确指向,若函数正确指向,返回 // NO_ERROR _creatTemplate( int k, // 指定要创建的方形模版的边长 Template *tpl // 模版指针,模版必须为空模版 ); // Host 函数:_creatTemplate(创建模版) static __host__ int _creatTemplate(int k, Template *tpl) { int errcode; // 局部变量,错误码 // 判断 tpl 是否为空 if (tpl == NULL) return NULL_POINTER; // 判断 k 是否合法 if (k <= 0) return INVALID_DATA; // 计算模版中点的数量 int count = k * k; // 计算中心点 int center = k / 2; // 为模版构建数据 errcode = TemplateBasicOp::makeAtHost(tpl, count); if (errcode != NO_ERROR) return errcode; // 构造方形模版中的点 for (int i = 0; i < count; i++) { tpl->tplData[2 * i] = i % k - center; tpl->tplData[2 * i + 1] = i / k - center; } // 计算完毕,返回 return NO_ERROR; } #ifdef USE_ONLINE // Kernel 函数:_nomalizeKer(实现对输入图像的每个点进行正规化) static __global__ void _nomalizeKer(ImageCuda inimg, Template tpl, float *res, size_t pitch) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 // c 表示 column, r 表示 row)。由于采用并行度缩减策略 ,令一个线程 // 处理 4 个输出像素,这四个像素位于统一列的相邻 4 行上,因此,对于 // dstr 需要进行乘 4 的计算 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致系统崩溃 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量 int dx, dy; // 用来记录当前模版所在位置的指针 int *curtplptr = tpl.tplData; // 用来记录当前输入图像所在位置的指针 unsigned char *curinptr; // 计数器,用来记录某点在模版范围内拥有的点的个数 int count[4] = { 0 , 0, 0, 0 }; // 迭代求平均值和总体方差使用的中间值 float m[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 计算得到的平均值 float mean[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; // 计算得到的总体方差 float variance[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; int pix; // 局部变量,临时存储像素值 // 扫描模版范围内的每个输入图像的像素点 for (int i = 0; i < tpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); float temp; // 局部变量,在进行迭代时的中间变量 // 先判断当前像素的 x 分量是否越界,如果越界,则跳过,扫描下一个模版点, // 如果没有越界,则分别处理当前列的相邻的 4 个像素 if (dx >= 0 && dx < inimg.imgMeta.width) { // 根据 dx 和 dy 获取第一个像素的指针 curinptr = inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes; // 检测此像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第一个点利用 on-line 算法进行迭代 pix = *(curinptr); count[0]++; temp = pix - mean[0]; mean[0] += temp / count[0]; m[0] += temp * (pix - mean[0]); } // 分别处理剩下三个像素点 for (int j = 1; j < 4; j++) { // 获取下一个像素点的指针 curinptr = curinptr + inimg.pitchBytes; dy++; // 检测第二个像素点的 y 分量是否越界 if (dy >= 0 && dy < inimg.imgMeta.height) { // 对第二个点利用 on-line 算法进行迭代 pix = *(curinptr); count[j]++; temp = pix - mean[j]; mean[j] += temp / count[j]; m[j] += temp * (pix - mean[j]); } } } } // 计算 4 个像素点中每个的正规化值 // 计算第一个像素点的正规化 // 定义并计算指向第一个像素在输出数组中的指针 float *outptr =(float *)((char *)res + dstr * pitch) + dstc; // 第一个点的像素值 curinptr = inimg.imgMeta.imgData + dstc + dstr * inimg.pitchBytes; pix = *(curinptr); // 判断 m 值是否为 0,如果为 0,则将对应的正规化值设置为 0 if (m[0] <= 0.000001f && m[0] >= -0.000001f) { *outptr = 0.0f; } else { // 计算第一个像素点的总体方差 variance[0] = sqrtf(m[0] / count[0]); // 计算第一个像素点的正规化画像值 *outptr = (pix - mean[0]) / variance[0]; } // 分别计算剩下三个点的像素值 for (int i = 1; i < 4; i++) { // 判断该点的 y 分量是否越界,如果越界,则可以确定后面的点也越界,直接 // 返回 if (++dstr >= inimg.imgMeta.height) return; // 计算该点在输出数组中的指针 outptr = (float *)((char *)outptr + pitch); // 该点的像素值 curinptr = curinptr + inimg.pitchBytes; pix = *(curinptr); // 判断 m 值是否为 0,如果为 0,则将对应的正规化值设置为 0 if (m[i] <= 0.000001f && m[i] >= -0.000001f) { *outptr = 0.0f; } else { // 计算该像素点的标准方差 variance[i] = sqrtf(m[i] / count[i]); // 计算该像素点的正规化画像值 *outptr = (pix - mean[i]) / variance[i]; } } } #endif // Kernel 函数:_nomalizeKer(使用常规方法求平均值和方差) static __global__ void _nomalizenorKer(ImageCuda inimg, Template tpl, float *res, size_t pitch) { int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; int *curtplptr = tpl.tplData; int dx, dy; int count = 0; int sum = 0; float mean; float variance = 0.0f; for (int i = 0; i < tpl.count; i++) { dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); if (dx < 0 || dx >= inimg.imgMeta.width || dy < 0 || dy >= inimg.imgMeta.height) { continue; } count++; sum += *(inimg.imgMeta.imgData + dy * inimg.pitchBytes + dx); } mean = (float)sum / count; curtplptr = tpl.tplData; for (int i = 0; i < tpl.count; i++) { dx = dstc + *(curtplptr++); dy = dstr + *(curtplptr++); if (dx < 0 || dx >= inimg.imgMeta.width || dy < 0 || dy >= inimg.imgMeta.height) { continue; } int pix = *(inimg.imgMeta.imgData + dy * inimg.pitchBytes + dx); variance += (mean - pix) * (mean - pix); } float *outptr = (float *)((char *)res + dstr * pitch) + dstc; if (variance < 0.00001f) *outptr = 0.0f; else { int pix = *(inimg.imgMeta.imgData + dstr * inimg.pitchBytes + dstc); *outptr = (mean - pix) / sqrtf(variance); } } // Host 成员方法:normalize(对输入图像进行正规化) __host__ int Normalization::normalize(Image *inimg, float *out, size_t pitch, int width, int height, bool ishost) { int errcode; // 局部变量,错误码 cudaError_t cudaerr; // 局部变量,CUDA 调用返回的错误码 dim3 gridsize; dim3 blocksize; // 判断 inimg 和 out 是否是为空 if (inimg == NULL || out == NULL) return NULL_POINTER; // 判断 width 和 height 参数的合法性 if (width <= 0 || height <= 0) return INVALID_DATA; // 判断 pitch 的合法性 if (pitch < width * sizeof (float)) return INVALID_DATA; // 对输入图像申请 Device 存储空间 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图 ImageCuda inimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &inimgCud); if (errcode != NO_ERROR) return errcode; // 调整输入图像和输出数组的长和宽 if (inimgCud.imgMeta.width > width) inimgCud.imgMeta.width = width; if (inimgCud.imgMeta.height > height) inimgCud.imgMeta.height = height; // 计算线程块的数量 // blocksize 使用默认线程块 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; // 使用最普通的方法划分 Grid gridsize.x = (inimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (inimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; // 创建模版 Template *tpl; errcode = TemplateBasicOp::newTemplate(&tpl); if (errcode != NO_ERROR) return errcode; // 设置模版形状 errcode = _creatTemplate(k, tpl); if (errcode != NO_ERROR) return errcode; errcode = TemplateBasicOp::copyToCurrentDevice(tpl); if (errcode != NO_ERROR) return errcode; float *resCud; // 指向 Device 内存,用来存储正规化得到的结果 size_t respitch; // resCud 的 pitch // 判断 out 是否指向 host,如果是,需要在创建 Device 中创建空间 if (ishost) { // 为 resCud 申请内存空间 cudaerr = cudaMallocPitch((void **)&resCud, &respitch, width * sizeof (float), height); if (cudaerr != cudaSuccess) return CUDA_ERROR; } else { resCud = out; respitch = pitch; } dim3 blocksize1; dim3 gridsize1; // blocksize 使用默认线程块 blocksize1.x = DEF_BLOCK_X; blocksize1.y = DEF_BLOCK_Y; // 使用最普通的方法划分 Grid gridsize1.x = (inimgCud.imgMeta.width + blocksize1.x - 1) / blocksize1.x; gridsize1.y = (inimgCud.imgMeta.height + blocksize1.y - 1) / blocksize1.y; #ifdef USE_ONLINE _nomalizeKer<<<gridsize, blocksize>>>(inimgCud, *tpl, resCud, respitch); #else _nomalizenorKer<<<gridsize1, blocksize1>>>(inimgCud, *tpl, resCud, respitch); #endif // 调用 Kernel 函数进行正规化操作 // _nomalizeKer<<<gridsize, blocksize>>>(inimgCud, *tpl, resCud, respitch); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 如果 out 是指向 host 内存的,则需要将 resCud 的内容拷贝到 out 中, // 并且释放 resCud 指向的内存空间 if (ishost) { // 将正规化得到的结果从 Device 内存中拷贝到 Host 内存 cudaerr = cudaMemcpy2D(out, width * sizeof (float), resCud, respitch, width * sizeof (float), height, cudaMemcpyDeviceToHost); if (cudaerr != cudaSuccess) errcode = CUDA_ERROR; else errcode = NO_ERROR; // 释放 resCud 指向的内存空间 cudaFree(resCud); } // 释放模版空间 TemplateBasicOp::deleteTemplate(tpl); // 处理完毕,返回 return errcode; }
the_stack
#include <exception> #include <memory> #include <string> #include <unordered_map> #include <claraparabricks/genomeworks/cudamapper/types.hpp> #include <claraparabricks/genomeworks/utils/allocator.hpp> #include <claraparabricks/genomeworks/cudamapper/index.hpp> namespace claraparabricks { namespace genomeworks { namespace io { class FastaParser; } // namespace io namespace cudamapper { class IndexHostCopyBase; class HostIndexCache; /// CacheType - Specifies if query or target cache enum class CacheType { query_cache, target_cache }; /// DeviceIndexCache - Owns copies of indices on device /// /// These object are created by HostIndexCache::start_copying_indices_to_device() class DeviceIndexCache { public: /// \brief Constructor /// \param cache_type /// \param host_cache HostIndexCache that created this object DeviceIndexCache(CacheType cache_type, HostIndexCache* host_cache); DeviceIndexCache(const DeviceIndexCache&) = delete; DeviceIndexCache(DeviceIndexCache&&) = delete; DeviceIndexCache& operator=(const DeviceIndexCache&) = delete; DeviceIndexCache& operator=(DeviceIndexCache&&) = delete; /// \brief Destructor ~DeviceIndexCache(); /// \brief Returns requested index for which it is guarateed that it is ready, i.e. is has been fully copied from host memory is needed /// Calling this function before wait_for_data_to_be_ready() results in an exception /// \param index_descriptor /// \throw IndexNotFoundException if requested index is not cached /// \throw DeviceCacheNotReadyException is cache is not ready, i.e. wait_for_data_to_be_ready() has not been called yet /// \return requested index std::shared_ptr<Index> get_index(IndexDescriptor index_descriptor) const; /// \brief Returns whether given index is present in cache /// \param index_descriptor /// \return is given index present in cache bool has_index(IndexDescriptor index_descriptor) const; /// \brief Waits for indices to be copied from host memory. Must be called before get_index() void wait_for_data_to_be_ready(); /// \brief Returns whether indices have been copied to device and get_index() can be called, i.e. whether wait_for_data_to_be_ready() has already been called /// \return whether indices have been copied to device bool is_ready() const; private: friend HostIndexCache; using device_cache_t = std::unordered_map<IndexDescriptor, std::shared_ptr<Index>, IndexDescriptorHash>; /// \brief Adds index to cache /// To be called by HostIndexCache::start_copying_indices_to_device() /// \param index_descriptor /// \param device_index void add_index(IndexDescriptor index_descriptor, std::shared_ptr<Index> device_index); /// \brief Returns requested index, returned index might not be ready and has to be synchronized directly /// \param index_descriptor /// \throw IndexNotFoundException if requested index is not cached /// \return requested index std::shared_ptr<Index> get_index_no_check_if_ready(IndexDescriptor index_descriptor) const; device_cache_t cache_; CacheType cache_type_; // HostIndexCache which created this DeviceIndexCache HostIndexCache* host_cache_; // wait_for_data_to_be_ready bool is_ready_; }; /// HostIndexCache - Creates indices, stores them in host memory and on demands moves them to device memory /// /// Class contains separate caches for query and target. The user chooses between query and target by specifying CacheType in function calls. /// The user generates indices and stores them in host memory using generate_content(). The user then copies some of those indices /// to device memory using start_copying_indices_to_device() and the function returns a pointer to DeviceIndexCache. To wait for indices to be /// fully copied one should call DeviceIndexCache::wait_for_data_to_be_ready(). /// It is user's responsibility to make sure that indices requested by start_copying_indices_to_device() were generated by generate_content(). /// Memory copy to device is done asynchronously, the user should make sure that every call to start_copying_indices_to_device() is /// accompanied by a call DeviceIndexCache::wait_for_data_to_be_ready(). /// The class tries to minimize the number of index creation and movements, e.g. by reusing already existing indices, but not guarantees are given. class HostIndexCache { public: /// \brief Constructor only initializes cache, no index is generated at this point, generate_content() does that /// /// \param same_query_and_target true means that both query and target files are the same, meaning that if some index exists in query cache it can also be used by target cache directly /// \param allocator allocator to use for device arrays /// \param query_parser /// \param target_parser /// \param kmer_size see Index /// \param window_size see Index /// \param hash_representations see Index /// \param filtering_parameter see Index /// \param cuda_stream_generation index generation is done one this stream, device memory in resulting device copies of index will only we freed once all previously scheduled work on this stream has finished /// \param cuda_stream_copy D2H and H2D copies of indices will be done on this stream, device memory in resulting device copies of index will only we freed once all previously scheduled work on this stream has finished HostIndexCache(bool same_query_and_target, genomeworks::DefaultDeviceAllocator allocator, std::shared_ptr<genomeworks::io::FastaParser> query_parser, std::shared_ptr<genomeworks::io::FastaParser> target_parser, std::uint64_t kmer_size, std::uint64_t window_size, bool hash_representations = true, double filtering_parameter = 1.0, cudaStream_t cuda_stream_generation = 0, cudaStream_t cuda_stream_copy = 0); HostIndexCache(const HostIndexCache&) = delete; HostIndexCache(HostIndexCache&&) = delete; HostIndexCache& operator=(const HostIndexCache&) = delete; HostIndexCache& operator=(HostIndexCache&&) = delete; ~HostIndexCache() = default; /// \brief Generates indices on device and copies them to host memory /// /// If index already exists on host is may be reused. /// Indices from descriptors_of_indices_to_keep_on_device will be kept on device in addition to being to host. This is useful if the same indices /// are going to be requested by start_copying_indices_to_device() immediately after this call /// If skip_copy_to_host is true indices are going to be kept on device and not copied to host. In that case descriptors_of_indices_to_cache must /// be equal to descriptors_of_indices_to_keep_on_device and there must be only one call to start_copying_indices_to_device() with exactly these indices /// Calling this function invalidates any previously cached data for the same cache type /// /// \param cache_type /// \param descriptors_of_indices_to_cache /// \param descriptors_of_indices_to_keep_on_device /// \param skip_copy_to_host void generate_content(CacheType cache_type, const std::vector<IndexDescriptor>& descriptors_of_indices_to_cache, const std::vector<IndexDescriptor>& descriptors_of_indices_to_keep_on_device = {}, bool skip_copy_to_host = false); /// \brief Begins copying indices to device /// /// If index already exists on device it may be reused. /// This copy is done asynchronously. Function returns a DeviceIndexCache object which should be used to access the indices. /// Copy to device is finised by calling DeviceIndexCache::wait_for_data_to_be_ready(). /// The user should make sure that every call to start_copying_indices_to_device() is accompanied by a call to DeviceIndexCache::wait_for_data_to_be_ready() /// /// \param cache_type /// \param descriptors_of_indices_to_cache /// \throw IndexNotFoundException if an index that is not cached by call to generate_content() is requested /// \return DeviceIndexCache object std::shared_ptr<DeviceIndexCache> start_copying_indices_to_device(CacheType cache_type, const std::vector<IndexDescriptor>& descriptors_of_indices_to_cache); private: friend DeviceIndexCache; using host_cache_t = std::unordered_map<IndexDescriptor, std::shared_ptr<const IndexHostCopyBase>, IndexDescriptorHash>; using device_cache_t = std::unordered_map<IndexDescriptor, std::shared_ptr<Index>, IndexDescriptorHash>; /// \brief Registers DeviceIndexCache object /// To be called by the constructor of DeviceIndexCache /// \param cache_type /// \param index_cache void register_device_cache(CacheType cache_type, DeviceIndexCache* index_cache); /// \brief Deregisters DeviceIndexCache object /// To be called by the destructor of DeviceIndexCache /// \param cache_type /// \param index_cache void deregister_device_cache(CacheType cache_type, DeviceIndexCache* index_cache); // Indices kept on host host_cache_t query_host_cache_; host_cache_t target_host_cache_; // Indices kept of device because of descriptors_of_indices_to_keep_on_device device_cache_t query_indices_kept_on_device_; device_cache_t target_indices_kept_on_device_; // Currently existing DeviceIndexCaches created by this HostIndexCache std::vector<DeviceIndexCache*> query_device_caches_; std::vector<DeviceIndexCache*> target_device_caches_; const bool same_query_and_target_; genomeworks::DefaultDeviceAllocator allocator_; std::shared_ptr<genomeworks::io::FastaParser> query_parser_; std::shared_ptr<genomeworks::io::FastaParser> target_parser_; const std::uint64_t kmer_size_; const std::uint64_t window_size_; const bool hash_representations_; const double filtering_parameter_; const cudaStream_t cuda_stream_generation_; const cudaStream_t cuda_stream_copy_; }; /// IndexNotFoundException - Exception to be thrown if Index is reuqsted, but not found class IndexNotFoundException : public std::exception { public: /// IndexLocation - Was the Index requested from host or device cache enum class IndexLocation { host_cache, device_cache }; /// \brief constructor /// \param cache_type was Index requested from host or device cache /// \param index_location /// \param index_descriptor IndexNotFoundException(CacheType cache_type, IndexLocation index_location, IndexDescriptor index_descriptor); /// \brief Returns the error message of the exception const char* what() const noexcept override; private: const std::string message_; }; /// DeviceCacheNotReadyException - Exception ot be thrown when an index is requested before it has been copied completely class DeviceCacheNotReadyException : public std::exception { public: /// \brief constructor /// \param cache_type /// \param index_descriptor DeviceCacheNotReadyException(CacheType cache_type, IndexDescriptor index_descriptor); /// \brief Returns the error message of the exception const char* what() const noexcept override; private: const std::string message_; }; } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
the_stack
#define MAX_K 4 #define MAX_BATCH_SIZE 32 #define MAX_N 12 Tensor FFModel::aggregate(const Tensor* inputs, /* gate_preds, gate_assign, full_gate_pred, n * exp_pred */ int n, float lambda_bal, const char* name) { Aggregate* aggr = new Aggregate(*this, inputs, n, lambda_bal, name); layers.push_back(aggr); return aggr->outputs[0]; } Aggregate::Aggregate(FFModel& model, const Tensor* _inputs, int _n, float _lambda_bal, const char* name) : Op(model, OP_AGGREGATE, name, _n+4, _inputs), n(_n), lambda_bal(_lambda_bal), profiling(model.config.profiling) { // FIXME: For now, set upper limits Better: Do as follows, but memory is // assigned per block, so requires to check that // https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640 assert(n <= MAX_N && "Increase MAX_N in #define"); assert(inputs[0].adim[0] <= MAX_K && "Increase MAX_K in #define"); assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define"); assert(n+4 == numInputs); assert(n > 0); assert(inputs[0].numDim == 2); assert(inputs[1].numDim == 2); assert(inputs[2].numDim == 2); assert(inputs[3].numDim == 2); for(int i = 0; i < inputs[0].numDim; i++) { assert(inputs[0].adim[i] == inputs[1].adim[i]); assert(inputs[0].adim[i] == inputs[2].adim[i]); } assert(inputs[0].adim[1] == inputs[3].adim[1]); assert(inputs[3].adim[0] == n); // expert inputs int num_dim = inputs[4].numDim; int out_dim = inputs[4].adim[0]; for(int i = 1; i < n; i++) { assert(inputs[i+4].numDim == num_dim); assert(inputs[i+4].adim[0] == out_dim); } // output outputs[0].numDim = num_dim; for(int i = 0; i < num_dim-1; i++) outputs[0].adim[i] = inputs[4].adim[i]; outputs[0].adim[num_dim-1] = inputs[0].adim[num_dim-1]; numWeights = 0; } void Aggregate::create_weights(FFModel& model) { // Do nothing } void Aggregate::create_output_and_partition(FFModel& model) { // Retrieve the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); int num_dim = inputs[4].numDim; int dims[num_dim]; dims[0] = inputs[0].adim[1]; for (int i = 1; i < num_dim; i++) dims[i] = inputs[4].adim[num_dim-1-i]; outputs[0] = model.create_tensor<2>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; // Compute partition bound for input for(int i = 0; i < n+4; i++) { Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<2>( inputs[i], (IndexSpaceT<2>)task_is, input_lps[i], input_grad_lps[i]); } } } OpMeta* Aggregate::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Aggregate* agg = (Aggregate*) task->args; FFHandler handle = *((FFHandler*)task->local_args); AggregateMeta* m = new AggregateMeta(handle, agg->n); m->profiling = agg->profiling; return m; } void Aggregate::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGGREGATE_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Aggregate)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } __global__ void agg_forward_kernel(float** exp_preds, const int* exp_assign, const float* gate_net_preds, float* output, int n, const int k, // num chosen experts int exp_samples, // max samples per expert const int batch_size, int out_dim) { __shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE]; // Get pred pointers, single thread pre block if(threadIdx.x == 0) { int expert_idx[MAX_N] = {0}; for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { // Get pointer to chosen expert predictions int expert = exp_assign[i*k+j]; if(expert_idx[expert] >= exp_samples) { // dropped sample chosen_exp_preds[i*k+j] = 0; continue; } chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_idx[expert]*out_dim; expert_idx[expert]++; } } } // set output tensor to 0 CUDA_KERNEL_LOOP(i, batch_size*out_dim) { output[i] = 0.0f; } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k*out_dim*batch_size) { if(chosen_exp_preds[i/out_dim] != 0) { float res = gate_net_preds[i/out_dim] * chosen_exp_preds[i/out_dim][i%(out_dim)]; int out_id = (i/(k*out_dim))*out_dim + (i%out_dim); atomicAdd(output+out_id, res); } } } __device__ void agg_backward_kernel_gate(const float* output_grad, float* full_gate_grads, float** exp_preds, const int* expert_assign, const bool* cache_corr, int* expert_bal, float lambda_bal, int batch_size, int k, int n, int out_dim) { // gate gradient CUDA_KERNEL_LOOP(i, batch_size*k*out_dim) { if (exp_preds[i/out_dim] != 0 && cache_corr[i/(k*out_dim)]) { int out_id = (i/(k*out_dim))*out_dim + (i%out_dim); float res = output_grad[out_id] * exp_preds[i/out_dim][i%out_dim]; float* gate_grad_idx = full_gate_grads + (i/(out_dim*k))*n + expert_assign[(i/(out_dim*k))*k+(i/out_dim)%k]; atomicAdd(gate_grad_idx, res); } } // balance term CUDA_KERNEL_LOOP(i, n*batch_size) { atomicAdd(full_gate_grads+i, lambda_bal*expert_bal[i%n]); } __syncthreads(); // make 0 mean CUDA_KERNEL_LOOP(i, batch_size*n) { int start = (i/n)*n; float sub = -full_gate_grads[i]/n; for(int j = 0; j < n; j++) { atomicAdd(full_gate_grads+start+j, sub); } } } __device__ void agg_backward_kernel_exp(const float* output_grad, const float* gate_preds, float** exp_grads, int batch_size, int k, int out_dim) { // compute expert gradients CUDA_KERNEL_LOOP(i, k*out_dim*batch_size) { if (exp_grads[i/out_dim] != 0) { int out_id = (i/(k*out_dim))*out_dim + (i%out_dim); exp_grads[i/out_dim][i%out_dim] += gate_preds[i/out_dim] * output_grad[out_id]; } } } __global__ void agg_backward_kernel(float** exp_preds, float** exp_grads, const int* exp_assign, const int* true_exp_assign, const float* gating_net_preds, float* full_gating_grads, const float* output_grads, int n, // num experts int k, // num chosen experts int exp_samples, // max samples per expert float lambda_bal, int batch_size, int out_dim) { __shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE]; __shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE]; __shared__ int expert_bal[MAX_N]; __shared__ bool cache_corr[MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { // init arrays for(int i = 0; i < n; i++) expert_bal[i] = 0; for(int i = 0; i < batch_size; i++) cache_corr[i] = true; // Get pointer to chosen expert predictions and expert counts for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { int expert = true_exp_assign[k*i + j]; if(expert != exp_assign[k*i + j]) cache_corr[i] = false; if(expert_bal[expert] >= exp_samples) { // dropped sample chosen_exp_preds[i*k+j] = 0; chosen_exp_grads[i*k+j] = 0; expert_bal[expert]++; continue; } chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_bal[expert]*out_dim; chosen_exp_grads[i*k+j] = exp_grads[expert] + expert_bal[expert]*out_dim; expert_bal[expert]++; } } } __syncthreads(); // FIXME: These 2 functions could execute independently in parallel // get expert gradients agg_backward_kernel_exp(output_grads, gating_net_preds, chosen_exp_grads, batch_size, k, out_dim); // get gating net gradients agg_backward_kernel_gate(output_grads, full_gating_grads, chosen_exp_preds, exp_assign, cache_corr, expert_bal, (lambda_bal*n)/batch_size, batch_size, k, n, out_dim); } void Aggregate::forward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { int n = ((Aggregate*)task->args)->n; assert((int)regions.size() == n+3); assert((int)task->regions.size() == n+3); const AggregateMeta* m = *((AggregateMeta**)task->local_args); // get gate_pred, gate_assign, output const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorWO<float, 2> acc_output(regions[n+2], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_output = runtime->get_index_space_domain( ctx, task->regions[n+2].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] == rect_gate_assign.hi[0] - rect_gate_assign.lo[0]); assert(batch_size == rect_output.hi[1] - rect_output.lo[1] + 1); coord_t out_dim = rect_output.hi[0] - rect_output.lo[0] + 1; // get exp_preds float* exp_preds[n]; // get first exp_pred and row and out_dim Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); exp_preds[0] = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+2].region.get_index_space()); exp_preds[i] = helperGetTensorPointerWO<float>( regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } int k = (int)(rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call forward_kernel cudaMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), cudaMemcpyHostToDevice); agg_forward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>( m->dev_exp_preds, acc_gate_assign.ptr(rect_gate_assign), acc_gate_pred.ptr(rect_gate_pred), acc_output.ptr(rect_output), n, k, rows, batch_size, out_dim); } void Aggregate::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { const AggregateMeta* m = *((AggregateMeta**)task->local_args); int n = ((Aggregate*)task->args)->n; float lambda_bal = ((Aggregate*)task->args)->lambda_bal; assert((int)regions.size() == 2*n+5); assert((int)task->regions.size() == 2*n+5); // get gate_pred, gate_grad, gate_assign, output_grad const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorRO<int, 2> acc_true_gate_assign(regions[2], FID_DATA); const AccessorWO<float, 2> full_acc_gate_grad(regions[3], FID_DATA); const AccessorRO<float, 2> acc_output_grad(regions[2*n+4], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_true_gate_assign = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<2> rect_full_gate_grad = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<2> rect_out_grad = runtime->get_index_space_domain( ctx, task->regions[2*n+4].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); assert(rect_gate_assign == rect_true_gate_assign); assert(batch_size == rect_out_grad.hi[1] - rect_out_grad.lo[1] + 1); assert(batch_size == rect_full_gate_grad.hi[1] - rect_full_gate_grad.lo[1] + 1); coord_t k = rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1; assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1 == k); coord_t out_dim = rect_out_grad.hi[0] - rect_out_grad.lo[0] + 1; assert(n == rect_full_gate_grad.hi[0] - rect_full_gate_grad.lo[0] + 1); // get exp_preds float* exp_preds[n]; // get first exp_pred and row Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); exp_preds[0] = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+4].region.get_index_space()); exp_preds[i] = helperGetTensorPointerRW<float>( regions[i+4], task->regions[i+4], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } // get chosen_exp_grads float* exp_grads[n]; for(int i = 0; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[n+i+4].region.get_index_space()); exp_grads[i] = helperGetTensorPointerRW<float>( regions[n+i+4], task->regions[n+i+4], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call backward kernel cudaMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(m->dev_exp_grads, exp_grads, n*sizeof(float*), cudaMemcpyHostToDevice); agg_backward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>( m->dev_exp_preds, m->dev_exp_grads, acc_gate_assign.ptr(rect_gate_assign), acc_true_gate_assign.ptr(rect_true_gate_assign), acc_gate_pred.ptr(rect_gate_pred), full_acc_gate_grad.ptr(rect_full_gate_grad), acc_output_grad.ptr(rect_out_grad), n, k, rows, lambda_bal, batch_size, out_dim); } void Aggregate::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGGREGATE_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Aggregate)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // exp_preds for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region)); launcher.add_field(i+2, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(n+2, FID_DATA); runtime->execute_index_space(ctx, launcher); } void Aggregate::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGGREGATE_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Aggregate)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // true gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[2], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[2].region)); launcher.add_field(2, FID_DATA); // full_gate gradients launcher.add_region_requirement( RegionRequirement(input_grad_lps[3], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[3].region_grad)); launcher.add_field(3, FID_DATA); // exp_preds for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region)); launcher.add_field(i+4, FID_DATA); } // exp_preds gradients for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region_grad)); launcher.add_field(i+n+4, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(2*n+4, FID_DATA); runtime->execute_index_space(ctx, launcher); } AggregateMeta::AggregateMeta(FFHandler handler, int n) : OpMeta(handler) { checkCUDA(cudaMalloc(&dev_exp_preds, n*sizeof(float*))); checkCUDA(cudaMalloc(&dev_exp_grads, n*sizeof(float*))); } AggregateMeta::~AggregateMeta(void) { checkCUDA(cudaFree(&dev_exp_preds)); checkCUDA(cudaFree(&dev_exp_grads)); } bool Aggregate::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { //TODO: implement cost_metrics.forward_time = 0.0f; cost_metrics.backward_time = 0.0f; cost_metrics.memory_requirement = 0; return false; }
the_stack
#include <isce3/core/DateTime.h> #include <isce3/core/Orbit.h> #include <isce3/core/StateVector.h> #include <isce3/core/TimeDelta.h> #include <isce3/core/Vector.h> #include <isce3/except/Error.h> #include <isce3/cuda/core/Orbit.h> using isce3::core::DateTime; using isce3::core::OrbitInterpBorderMode; using isce3::core::OrbitInterpMethod; using isce3::core::StateVector; using isce3::core::TimeDelta; using isce3::core::Vec3; using HostOrbit = isce3::core::Orbit; using DeviceOrbit = isce3::cuda::core::Orbit; namespace isce3 { namespace core { /** Serialize DateTime to ostream */ std::ostream & operator<<(std::ostream & os, const DateTime & dt) { return os << dt.isoformat(); } /** Serialize Vector to ostream */ std::ostream & operator<<(std::ostream & os, const Vec3 & v) { return os << "{ " << v[0] << ", " << v[1] << ", " << v[2] << " }"; } }} /** Check if two DateTimes are equivalent to within errtol seconds */ bool compareDatetimes(const DateTime & lhs, const DateTime & rhs, double errtol) { return lhs.isClose(rhs, TimeDelta(errtol)); } /** Check if two Vectors are pointwise equivalent to within errtol */ bool compareVecs(const Vec3 & lhs, const Vec3 & rhs, double errtol) { return std::abs(lhs[0] - rhs[0]) < errtol && std::abs(lhs[1] - rhs[1]) < errtol && std::abs(lhs[2] - rhs[2]) < errtol; } /** Analytical linear orbit with constant velocity */ class LinearOrbit { public: LinearOrbit() = default; LinearOrbit(const Vec3 & initial_position, const Vec3 & velocity) : _initial_position(initial_position), _velocity(velocity) {} /** Get position at time t */ Vec3 position(double t) const { return _initial_position + _velocity * t; } /** Get velocity at time t */ Vec3 velocity(double /*t*/) const { return _velocity; } private: Vec3 _initial_position; Vec3 _velocity; }; /** Analytical orbit defined by a polynomial */ class PolynomialOrbit { public: PolynomialOrbit() = default; PolynomialOrbit(const std::vector<Vec3> & coeffs) : _coeffs(coeffs), _order(int(coeffs.size())) {} /** Get position at time t */ Vec3 position(double t) const { Vec3 position(0., 0., 0.); double tt = 1.; for (int i = 0; i < _order; ++i) { position += tt * _coeffs[i]; tt *= t; } return position; } /** Get velocity at time t */ Vec3 velocity(double t) const { Vec3 velocity(0., 0., 0.); double tt = 1.; for (int i = 1; i < _order; ++i) { velocity += i * tt * _coeffs[i]; tt *= t; } return velocity; } private: std::vector<Vec3> _coeffs; int _order; }; /** Analytical circular orbit with constant angular velocity */ class CircularOrbit { public: CircularOrbit() = default; CircularOrbit(double theta0, double phi0, double dtheta, double dphi, double r) : _theta0(theta0), _phi0(phi0), _dtheta(dtheta), _dphi(dphi), _r(r) {} /** Get position at time t */ Vec3 position(double t) const { double theta = _theta0 + t * _dtheta; double phi = _phi0 + t * _dphi; double x = _r * std::cos(theta); double y = _r * (std::sin(theta) + std::cos(phi)); double z = _r * std::sin(phi); return {x, y, z}; } /** Get velocity at time t */ Vec3 velocity(double t) const { double theta = _theta0 + t * _dtheta; double phi = _phi0 + t * _dphi; double vx = -1. * _r * _dtheta * std::sin(theta); double vy = _r * ((_dtheta * std::cos(theta)) - (_dphi * std::sin(phi))); double vz = _r * _dphi * std::cos(phi); return {vx, vy, vz}; } private: double _theta0, _phi0, _dtheta, _dphi, _r; }; struct OrbitTest : public testing::Test { std::vector<StateVector> statevecs; void SetUp() override { DateTime starttime(2000, 1, 1); double spacing = 10.; int size = 11; Vec3 initial_position = {0., 0., 0.}; Vec3 velocity = {4000., -1000., 4500.}; LinearOrbit reforbit(initial_position, velocity); statevecs.resize(size); for (int i = 0; i < size; ++i) { double t = i * spacing; statevecs[i].datetime = starttime + TimeDelta(t); statevecs[i].position = reforbit.position(t); statevecs[i].velocity = reforbit.velocity(t); } } }; TEST_F(OrbitTest, FromHostOrbit) { HostOrbit h_orbit(statevecs); DeviceOrbit d_orbit(h_orbit); EXPECT_EQ( d_orbit.referenceEpoch(), h_orbit.referenceEpoch() ); EXPECT_EQ( d_orbit.interpMethod(), h_orbit.interpMethod() ); EXPECT_EQ( d_orbit.time(), h_orbit.time() ); for (int i = 0; i < h_orbit.size(); ++i) { EXPECT_EQ( d_orbit.position(i), h_orbit.position(i) ); EXPECT_EQ( d_orbit.velocity(i), h_orbit.velocity(i) ); } } TEST_F(OrbitTest, Constructor) { DeviceOrbit orbit(statevecs); // reference epoch defaults to time of first state vector DateTime refepoch = statevecs[0].datetime; double dt = (statevecs[1].datetime - statevecs[0].datetime).getTotalSeconds(); int size = statevecs.size(); EXPECT_EQ( orbit.referenceEpoch(), refepoch ); EXPECT_DOUBLE_EQ( orbit.spacing(), dt ); EXPECT_EQ( orbit.size(), size ); for (int i = 0; i < size; ++i) { double t = (statevecs[i].datetime - refepoch).getTotalSeconds(); EXPECT_DOUBLE_EQ( orbit.time(i), t ); EXPECT_EQ( orbit.position(i), statevecs[i].position ); EXPECT_EQ( orbit.velocity(i), statevecs[i].velocity ); } } TEST_F(OrbitTest, GetStateVectors) { DeviceOrbit orbit(statevecs); std::vector<StateVector> orbit_statevecs = orbit.getStateVectors(); EXPECT_EQ( orbit_statevecs.size(), statevecs.size() ); int size = statevecs.size(); double errtol = 1e-13; for (int i = 0; i < size; ++i) { DateTime t1 = orbit_statevecs[i].datetime; DateTime t2 = statevecs[i].datetime; EXPECT_PRED3( compareDatetimes, t1, t2, errtol ); EXPECT_EQ( orbit_statevecs[i].position, statevecs[i].position ); EXPECT_EQ( orbit_statevecs[i].velocity, statevecs[i].velocity ); } } TEST_F(OrbitTest, SetStateVectors) { DateTime refepoch = statevecs[0].datetime; double dt = (statevecs[1].datetime - statevecs[0].datetime).getTotalSeconds(); int size = statevecs.size(); DeviceOrbit orbit; orbit.referenceEpoch(refepoch); orbit.setStateVectors(statevecs); EXPECT_EQ( orbit.referenceEpoch(), refepoch ); EXPECT_DOUBLE_EQ( orbit.spacing(), dt ); EXPECT_EQ( orbit.size(), size ); for (int i = 0; i < size; ++i) { double t = (statevecs[i].datetime - refepoch).getTotalSeconds(); EXPECT_DOUBLE_EQ( orbit.time(i), t ); EXPECT_EQ( orbit.position(i), statevecs[i].position ); EXPECT_EQ( orbit.velocity(i), statevecs[i].velocity ); } } TEST_F(OrbitTest, InvalidStateVectors) { DeviceOrbit orbit(statevecs); // two or more state vectors are required { std::vector<StateVector> new_statevecs(1); EXPECT_THROW( orbit.setStateVectors(new_statevecs), std::invalid_argument ); } // state vectors must be uniformly sampled { std::vector<StateVector> new_statevecs(3); new_statevecs[0].datetime = DateTime(); new_statevecs[1].datetime = DateTime() + TimeDelta(1.); new_statevecs[2].datetime = DateTime() + TimeDelta(10.); EXPECT_THROW( orbit.setStateVectors(new_statevecs), std::invalid_argument ); } } TEST_F(OrbitTest, ReferenceEpoch) { DeviceOrbit orbit(statevecs); DateTime new_refepoch = statevecs[1].datetime; orbit.referenceEpoch(new_refepoch); EXPECT_EQ( orbit.referenceEpoch(), new_refepoch ); double errtol = 1e-13; for (int i = 0; i < orbit.size(); ++i) { DateTime t1 = statevecs[i].datetime; DateTime t2 = orbit.referenceEpoch() + TimeDelta(orbit.time(i)); EXPECT_PRED3( compareDatetimes, t1, t2, errtol ); } } TEST_F(OrbitTest, InterpMethod) { DeviceOrbit orbit(statevecs); OrbitInterpMethod new_method = OrbitInterpMethod::Legendre; orbit.interpMethod(new_method); EXPECT_EQ( orbit.interpMethod(), new_method ); } TEST_F(OrbitTest, StartMidEndTime) { // Orbit with two state vectors separated by 1 second { std::vector<StateVector> statevecs(2); statevecs[0].datetime = DateTime(2000, 1, 1, 0, 0, 0); statevecs[1].datetime = DateTime(2000, 1, 1, 0, 0, 1); DeviceOrbit orbit(statevecs); EXPECT_DOUBLE_EQ( orbit.startTime(), 0. ); EXPECT_DOUBLE_EQ( orbit.midTime(), 0.5 ); EXPECT_DOUBLE_EQ( orbit.endTime(), 1. ); } // Orbit with three state vectors with 1 second spacing { std::vector<StateVector> statevecs(3); statevecs[0].datetime = DateTime(2000, 1, 1, 0, 0, 0); statevecs[1].datetime = DateTime(2000, 1, 1, 0, 0, 1); statevecs[2].datetime = DateTime(2000, 1, 1, 0, 0, 2); DeviceOrbit orbit(statevecs); EXPECT_DOUBLE_EQ( orbit.startTime(), 0. ); EXPECT_DOUBLE_EQ( orbit.midTime(), 1. ); EXPECT_DOUBLE_EQ( orbit.endTime(), 2. ); } } TEST_F(OrbitTest, StartMidEndDateTime) { double errtol = 1e-13; // Orbit with two state vectors separated by 1 second { std::vector<StateVector> statevecs(2); statevecs[0].datetime = DateTime(2000, 1, 1, 0, 0, 0); statevecs[1].datetime = DateTime(2000, 1, 1, 0, 0, 1); DeviceOrbit orbit(statevecs); EXPECT_PRED3( compareDatetimes, orbit.startDateTime(), DateTime(2000, 1, 1, 0, 0, 0), errtol ); EXPECT_PRED3( compareDatetimes, orbit.midDateTime(), DateTime(2000, 1, 1, 0, 0, 0.5), errtol ); EXPECT_PRED3( compareDatetimes, orbit.endDateTime(), DateTime(2000, 1, 1, 0, 0, 1), errtol ); } // Orbit with three state vectors with 1 second spacing { std::vector<StateVector> statevecs(3); statevecs[0].datetime = DateTime(2000, 1, 1, 0, 0, 0); statevecs[1].datetime = DateTime(2000, 1, 1, 0, 0, 1); statevecs[2].datetime = DateTime(2000, 1, 1, 0, 0, 2); DeviceOrbit orbit(statevecs); EXPECT_PRED3( compareDatetimes, orbit.startDateTime(), DateTime(2000, 1, 1, 0, 0, 0), errtol ); EXPECT_PRED3( compareDatetimes, orbit.midDateTime(), DateTime(2000, 1, 1, 0, 0, 1), errtol ); EXPECT_PRED3( compareDatetimes, orbit.endDateTime(), DateTime(2000, 1, 1, 0, 0, 2), errtol ); } } TEST_F(OrbitTest, Comparison) { DeviceOrbit orbit1(statevecs); DeviceOrbit orbit2(statevecs); DeviceOrbit orbit3; EXPECT_TRUE( orbit1 == orbit2 ); EXPECT_TRUE( orbit1 != orbit3 ); } TEST_F(OrbitTest, OrbitInterpBorderMode) { DeviceOrbit orbit(statevecs); // throw exception on attempt to interpolate outside orbit domain { OrbitInterpBorderMode border_mode = OrbitInterpBorderMode::Error; double t = orbit.endTime() + 1.; Vec3 pos, vel; EXPECT_THROW( orbit.interpolate(&pos, &vel, t, border_mode), isce3::except::OutOfRange ); } // output NaN on attempt to interpolate outside orbit domain { OrbitInterpBorderMode border_mode = OrbitInterpBorderMode::FillNaN; double t = orbit.endTime() + 1.; Vec3 pos, vel; orbit.interpolate(&pos, &vel, t, border_mode); EXPECT_TRUE( std::isnan(pos[0]) && std::isnan(pos[1]) && std::isnan(pos[2]) ); EXPECT_TRUE( std::isnan(vel[0]) && std::isnan(vel[1]) && std::isnan(vel[2]) ); } } struct LinearOrbitInterpTest : public testing::Test { LinearOrbit reforbit; std::vector<StateVector> statevecs; std::vector<double> interp_times; double errtol; void SetUp() override { DateTime starttime(2000, 1, 1); double spacing = 10.; int size = 11; Vec3 initial_position = {0., 0., 0.}; Vec3 velocity = {4000., -1000., 4500.}; reforbit = LinearOrbit(initial_position, velocity); statevecs.resize(size); for (int i = 0; i < size; ++i) { double t = i * spacing; statevecs[i].datetime = starttime + TimeDelta(t); statevecs[i].position = reforbit.position(t); statevecs[i].velocity = reforbit.velocity(t); } interp_times = { 23.3, 36.7, 54.5, 89.3 }; errtol = 1e-8; } }; TEST_F(LinearOrbitInterpTest, Hermite) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Hermite); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } TEST_F(LinearOrbitInterpTest, Legendre) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Legendre); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } struct PolynomialOrbitInterpTest : public testing::Test { PolynomialOrbit reforbit; std::vector<StateVector> statevecs; std::vector<double> interp_times; double errtol; void SetUp() override { DateTime starttime(2000, 1, 1); double spacing = 10.; int size = 11; std::vector<Vec3> coeffs = {{ -7000000.0, 5400000.0 , 0.0}, { 5435.0, -4257.0 , 7000.0}, { -45.0, 23.0 , 11.0}, { 7.3, 3.9 , 0.0}, { 0.0, 0.01, 0.0}}; reforbit = PolynomialOrbit(coeffs); statevecs.resize(size); for (int i = 0; i < size; ++i) { double t = i * spacing; statevecs[i].datetime = starttime + TimeDelta(t); statevecs[i].position = reforbit.position(t); statevecs[i].velocity = reforbit.velocity(t); } interp_times = { 23.3, 36.7, 54.5, 89.3 }; errtol = 1e-8; } }; TEST_F(PolynomialOrbitInterpTest, Hermite) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Hermite); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } TEST_F(PolynomialOrbitInterpTest, Legendre) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Legendre); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } struct CircularOrbitInterpTest : public testing::Test { CircularOrbit reforbit; std::vector<StateVector> statevecs; std::vector<double> interp_times; double errtol; void SetUp() override { DateTime starttime(2000, 1, 1); double spacing = 5.; int size = 11; double theta0 = 2. * M_PI / 8.; double phi0 = 2. * M_PI / 12.; double dtheta = 2. * M_PI / 7000.; double dphi = 2. * M_PI / 4000.; double r = 8000000.; reforbit = CircularOrbit(theta0, phi0, dtheta, dphi, r); statevecs.resize(size); for (int i = 0; i < size; ++i) { double t = i * spacing; statevecs[i].datetime = starttime + TimeDelta(t); statevecs[i].position = reforbit.position(t); statevecs[i].velocity = reforbit.velocity(t); } interp_times = { 11.65, 18.35, 27.25, 44.65 }; errtol = 1e-8; } }; TEST_F(CircularOrbitInterpTest, Hermite) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Hermite); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } TEST_F(CircularOrbitInterpTest, Legendre) { DeviceOrbit orbit(statevecs, OrbitInterpMethod::Legendre); for (auto t : interp_times) { Vec3 pos, vel; orbit.interpolate(&pos, &vel, t); EXPECT_PRED3( compareVecs, pos, reforbit.position(t), errtol ); EXPECT_PRED3( compareVecs, vel, reforbit.velocity(t), errtol ); } } int main(int argc, char * argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
the_stack
#define DEFINE_EXTTYPE1(T, NAME) \ struct exttype_##T##_##1 : NAME##1 { \ DACE_HDFI exttype_##T##_##1 operator*(const exttype_##T##_##1 &other) const { \ exttype_##T##_##1 result; \ result.x = other.x * x; \ return result; \ } \ DACE_HDFI exttype_##T##_##1 operator+(const exttype_##T##_##1 &other) const { \ exttype_##T##_##1 result; \ result.x = other.x + x; \ return result; \ } \ DACE_HDFI exttype_##T##_##1 operator-(const exttype_##T##_##1 &other) const { \ exttype_##T##_##1 result; \ result.x = x - other.x; \ return result; \ } \ DACE_HDFI exttype_##T##_##1 operator/(const exttype_##T##_##1 &other) const { \ exttype_##T##_##1 result; \ result.x = x / other.x; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##1 operator*(const U &other) const { \ exttype_##T##_##1 result; \ result.x = other * x; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##1 operator+(const U &other) const { \ exttype_##T##_##1 result; \ result.x = other + x; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##1 operator-(const U &other) const { \ exttype_##T##_##1 result; \ result.x = x - other; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##1 operator/(const U &other) const { \ exttype_##T##_##1 result; \ result.x = x / other; \ return result; \ } \ template <typename U> \ DACE_HDFI T operator[](const U &index) const { \ return x; \ } \ }; #define DEFINE_EXTTYPE2(T, NAME) \ struct exttype_##T##_##2 : NAME##2 { \ DACE_HDFI exttype_##T##_##2 operator*(const exttype_##T##_##2 &other) const { \ exttype_##T##_##2 result; \ result.x = other.x * x; \ result.y = other.y * y; \ return result; \ } \ DACE_HDFI exttype_##T##_##2 operator+(const exttype_##T##_##2 &other) const { \ exttype_##T##_##2 result; \ result.x = other.x + x; \ result.y = other.y + y; \ return result; \ } \ DACE_HDFI exttype_##T##_##2 operator-(const exttype_##T##_##2 &other) const { \ exttype_##T##_##2 result; \ result.x = x - other.x; \ result.y = y - other.y; \ return result; \ } \ DACE_HDFI exttype_##T##_##2 operator/(const exttype_##T##_##2 &other) const { \ exttype_##T##_##2 result; \ result.x = x / other.x; \ result.y = y / other.y; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##2 operator*(const U &other) const { \ exttype_##T##_##2 result; \ result.x = other * x; \ result.y = other * y; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##2 operator+(const U &other) const { \ exttype_##T##_##2 result; \ result.x = other + x; \ result.y = other + y; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##2 operator-(const U &other) const { \ exttype_##T##_##2 result; \ result.x = x - other; \ result.y = y - other; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##2 operator/(const U &other) const { \ exttype_##T##_##2 result; \ result.x = x / other; \ result.y = y / other; \ return result; \ } \ template <typename U> \ DACE_HDFI T operator[](const U &index) const { \ if (index == U(0)) return x; \ return y; \ } \ };\ template <typename U> \ static DACE_HDFI exttype_##T##_##2 operator+(const U &a, const exttype_##T##_##2& b) { \ exttype_##T##_##2 result; \ result.x = a + b.x; \ result.y = a + b.y; \ return result; \ }\ template <typename U> \ static DACE_HDFI exttype_##T##_##2 operator-(const U &a, const exttype_##T##_##2& b) { \ exttype_##T##_##2 result; \ result.x = a - b.x; \ result.y = a - b.y; \ return result; \ }\ template <typename U> \ static DACE_HDFI exttype_##T##_##2 operator*(const U &a, const exttype_##T##_##2& b) { \ exttype_##T##_##2 result; \ result.x = a * b.x; \ result.y = a * b.y; \ return result; \ } #define DEFINE_EXTTYPE3(T, NAME) \ struct exttype_##T##_##3 : NAME##3 { \ DACE_HDFI exttype_##T##_##3 operator*(const exttype_##T##_##3 &other) const { \ exttype_##T##_##3 result; \ result.x = other.x * x; \ result.y = other.y * y; \ result.z = other.z * z; \ return result; \ } \ DACE_HDFI exttype_##T##_##3 operator+(const exttype_##T##_##3 &other) const { \ exttype_##T##_##3 result; \ result.x = other.x + x; \ result.y = other.y + y; \ result.z = other.z + z; \ return result; \ } \ DACE_HDFI exttype_##T##_##3 operator-(const exttype_##T##_##3 &other) const { \ exttype_##T##_##3 result; \ result.x = x - other.x; \ result.y = y - other.y; \ result.z = z - other.z; \ return result; \ } \ DACE_HDFI exttype_##T##_##3 operator/(const exttype_##T##_##3 &other) const { \ exttype_##T##_##3 result; \ result.x = x / other.x; \ result.y = y / other.y; \ result.z = z / other.z; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##3 operator*(const U &other) const { \ exttype_##T##_##3 result; \ result.x = other * x; \ result.y = other * y; \ result.z = other * z; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##3 operator+(const U &other) const { \ exttype_##T##_##3 result; \ result.x = other + x; \ result.y = other + y; \ result.z = other + z; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##3 operator-(const U &other) const { \ exttype_##T##_##3 result; \ result.x = x - other; \ result.y = y - other; \ result.z = z - other; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##3 operator/(const U &other) const { \ exttype_##T##_##3 result; \ result.x = x / other; \ result.y = y / other; \ result.z = z / other; \ return result; \ } \ template <typename U> \ DACE_HDFI T operator[](const U &index) const { \ if (index == U(0)) return x; \ else if (index == U(1)) return y; \ return z; \ } \ }; #define DEFINE_EXTTYPE4(T, NAME) \ struct exttype_##T##_##4 : NAME##4 { \ DACE_HDFI exttype_##T##_##4 operator*(const exttype_##T##_##4 &other) const { \ exttype_##T##_##4 result; \ result.x = other.x * x; \ result.y = other.y * y; \ result.z = other.z * z; \ result.w = other.w * w; \ return result; \ } \ DACE_HDFI exttype_##T##_##4 operator+(const exttype_##T##_##4 &other) const { \ exttype_##T##_##4 result; \ result.x = other.x + x; \ result.y = other.y + y; \ result.z = other.z + z; \ result.w = other.w + w; \ return result; \ } \ DACE_HDFI exttype_##T##_##4 operator-(const exttype_##T##_##4 &other) const { \ exttype_##T##_##4 result; \ result.x = x - other.x; \ result.y = y - other.y; \ result.z = z - other.z; \ result.w = w - other.w; \ return result; \ } \ DACE_HDFI exttype_##T##_##4 operator/(const exttype_##T##_##4 &other) const { \ exttype_##T##_##4 result; \ result.x = x / other.x; \ result.y = y / other.y; \ result.z = z / other.z; \ result.w = w / other.w; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##4 operator*(const U &other) const { \ exttype_##T##_##4 result; \ result.x = other * x; \ result.y = other * y; \ result.z = other * z; \ result.w = other * w; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##4 operator+(const U &other) const { \ exttype_##T##_##4 result; \ result.x = other + x; \ result.y = other + y; \ result.z = other + z; \ result.w = other + w; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##4 operator-(const U &other) const { \ exttype_##T##_##4 result; \ result.x = x - other; \ result.y = y - other; \ result.z = z - other; \ result.w = w - other; \ return result; \ } \ template <typename U> \ DACE_HDFI exttype_##T##_##4 operator/(const U &other) const { \ exttype_##T##_##4 result; \ result.x = x / other; \ result.y = y / other; \ result.z = z / other; \ result.w = w / other; \ return result; \ } \ template <typename U> \ DACE_HDFI T operator[](const U &index) const { \ if (index == U(0)) return x; \ else if (index == U(1)) return y; \ else if (index == U(2)) return z; \ return w; \ } \ }; #define DEFINE_ALL_EXT_TYPES(T, NAME) \ DEFINE_EXTTYPE1(T, NAME); \ DEFINE_EXTTYPE2(T, NAME); \ DEFINE_EXTTYPE3(T, NAME); \ DEFINE_EXTTYPE4(T, NAME); #define DEFINE_TWO_EXT_TYPES(T, NAME) \ DEFINE_EXTTYPE1(T, NAME); \ DEFINE_EXTTYPE2(T, NAME); DEFINE_ALL_EXT_TYPES(int8, char); DEFINE_ALL_EXT_TYPES(uint8, uchar); DEFINE_ALL_EXT_TYPES(int16, short); DEFINE_ALL_EXT_TYPES(uint16, ushort); DEFINE_ALL_EXT_TYPES(int32, int); DEFINE_ALL_EXT_TYPES(uint32, uint); DEFINE_ALL_EXT_TYPES(int64, longlong); DEFINE_TWO_EXT_TYPES(uint64, ulonglong); DEFINE_ALL_EXT_TYPES(float32,float); DEFINE_ALL_EXT_TYPES(float64,double); ///////////////////////////////////////////////////////////////////////////// #define DEFINE_VECTYPE(T, N) \ template<> \ struct _vtype<T, N> \ { \ typedef exttype_##T##_##N aligned; \ typedef aligned unaligned; \ }; #define DEFINE_ARRVECTYPE(T, N) \ template<> \ struct _vtype<T, N> \ { \ typedef T aligned[N]; \ typedef aligned unaligned; \ }; DEFINE_VECTYPE(int8, 2); DEFINE_VECTYPE(int8, 3); DEFINE_VECTYPE(int8, 4); DEFINE_VECTYPE(uint8, 2); DEFINE_VECTYPE(uint8, 3); DEFINE_VECTYPE(uint8, 4); DEFINE_VECTYPE(int16, 2); DEFINE_VECTYPE(int16, 3); DEFINE_VECTYPE(int16, 4); DEFINE_VECTYPE(uint16, 2); DEFINE_VECTYPE(uint16, 3); DEFINE_VECTYPE(uint16, 4); DEFINE_VECTYPE(int32, 2); DEFINE_VECTYPE(int32, 3); DEFINE_VECTYPE(int32, 4); DEFINE_ARRVECTYPE(int32, 8); DEFINE_VECTYPE(uint32, 2); DEFINE_VECTYPE(uint32, 3); DEFINE_VECTYPE(uint32, 4); DEFINE_ARRVECTYPE(uint32, 8); DEFINE_VECTYPE(int64, 2); DEFINE_VECTYPE(uint64, 2); DEFINE_VECTYPE(float32, 2); DEFINE_VECTYPE(float32, 3); DEFINE_VECTYPE(float32, 4); DEFINE_VECTYPE(float64, 2); // Special case for half-precision template<> struct _vtype<half, 2> { typedef half2 aligned; typedef aligned unaligned; }; template<> struct _vtype<half, 4> { typedef half4 aligned; typedef aligned unaligned; }; template<> struct _vtype<half, 8> { typedef half8 aligned; typedef aligned unaligned; };
the_stack
template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation(TetMesh* meshPtr, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx, int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize) { // Getting the neighbors for the mesh meshPtr->need_neighbors(); // Vertex count: int nn = meshPtr->vertices.size(); // Counting up edges for adjacency: int edgeCount = 0; for (int vIt = 0; vIt < nn; vIt++) { edgeCount += meshPtr->neighbors[vIt].size(); } //Allocating storage for array values of adjacency int* xadj = new int[nn + 1]; int* adjncy = new int[edgeCount]; // filling the arrays: xadj[0] = 0; int idx = 0; // Populating the arrays: for (int i = 1; i < nn + 1; i++) { xadj[i] = xadj[i - 1] + meshPtr->neighbors[i - 1].size(); for (int j = 0; j < meshPtr->neighbors[i - 1].size(); j++) { adjncy[idx++] = meshPtr->neighbors[i - 1][j]; } } // Calling the other override to finish: computePermutation(nn, xadj, adjncy, permutation, ipermutation, aggregateIdx, partitionIdx, partitionlabel, nnout, xadjout, adjncyout, metissize); // Freeing up memories: delete[] xadj; delete[] adjncy; } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation(TriMesh* meshPtr, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx, int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize) { // Getting the neighbors for the mesh meshPtr->need_neighbors(); // Vertex count: int nn = meshPtr->vertices.size(); // Counting up edges for adjacency: int edgeCount = 0; for (int vIt = 0; vIt < nn; vIt++) { edgeCount += meshPtr->neighbors[vIt].size(); } //Allocating storage for array values of adjacency int* xadj = new int[nn + 1]; int* adjncy = new int[edgeCount]; // filling the arrays: xadj[0] = 0; int idx = 0; // Populating the arrays: for (int i = 1; i < nn + 1; i++) { xadj[i] = xadj[i - 1] + meshPtr->neighbors[i - 1].size(); for (int j = 0; j < meshPtr->neighbors[i - 1].size(); j++) { adjncy[idx++] = meshPtr->neighbors[i - 1][j]; } } // Calling the other override to finish: computePermutation(nn, xadj, adjncy, permutation, ipermutation, aggregateIdx, partitionIdx, partitionlabel, nnout, xadjout, adjncyout, metissize); // Freeing up memories: delete[] xadj; delete[] adjncy; } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation(int nn, int* xadj, int* adjncy, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx, int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize) { // Starting off by finding a fine aggregation of the mesh int *fineAggregate = new int[nn]; // get the initial depth from parameter int depth = metissize / 10; int finePartCount; extendedMIS(nn, depth, xadj, adjncy, fineAggregate, &finePartCount); // Building the next level graph int notNeighbor = finePartCount + 1; int *permutedFullAdjacency = new int[xadj[nn]]; int *initialPermutationOldToNew = new int[nn]; int *initialPermutationNewToOld = new int[nn]; int *permutationCheck = new int[nn]; int *nextAggregateOffset = new int[finePartCount + 1]; int *tempAggregateOffset = new int[finePartCount + 1]; int *aggregateVertexCounts = new int[finePartCount]; int *vertexNeighborCounts = new int[nn]; int *aggregateNeighborCounts = new int[finePartCount]; int *vertexAdjacencyOffsets = new int[nn + 1]; int *aggregateAdjacency = new int[finePartCount]; int *newAdjacencyIndexes = new int[finePartCount + 1]; int *newAdjacency; // Clearing aggregate counts for (int i = 0; i < finePartCount; i++) { aggregateVertexCounts[i] = 0; aggregateNeighborCounts[i] = 0; } // Counting vertices in each aggregate, and total neighbors for (int vIt = 0; vIt < nn; vIt++) { aggregateVertexCounts[fineAggregate[vIt]]++; aggregateNeighborCounts[fineAggregate[vIt]] += xadj[vIt + 1] - xadj[vIt]; } // Finding min/max aggregates: int min = aggregateVertexCounts[0]; int max = aggregateVertexCounts[0]; for (int i = 0; i < finePartCount; i++) { if (aggregateVertexCounts[i] > max) max = aggregateVertexCounts[i]; if (aggregateVertexCounts[i] < min) min = aggregateVertexCounts[i]; } //printf("There are: %d aggregates, minimum size: %d maximum size: %d\n", finePartCount, min, max); // Calculating the new offsets of each aggregate nextAggregateOffset[0] = 0; tempAggregateOffset[0] = 0; for (int aIt = 1; aIt < finePartCount + 1; aIt++) { // Doing a prefix sum: nextAggregateOffset[aIt] = nextAggregateOffset[aIt - 1] + aggregateVertexCounts[aIt - 1]; tempAggregateOffset[aIt] = nextAggregateOffset[aIt - 1] + aggregateVertexCounts[aIt - 1]; } // Filling in the initialPermutation array: for (int vIt = 0; vIt < nn; vIt++) { int aggID = fineAggregate[vIt]; initialPermutationOldToNew[vIt] = tempAggregateOffset[aggID]; initialPermutationNewToOld[tempAggregateOffset[aggID]] = vIt; tempAggregateOffset[aggID]++; } // For testing check the permutation array for consistency for (int vIt = 0; vIt < nn; vIt++) { permutationCheck[vIt] = initialPermutationOldToNew[initialPermutationNewToOld[vIt]]; } // Counting neighbors of each (permuted) node for (int vIt = 0; vIt < nn; vIt++) { int oldNodeID = initialPermutationNewToOld[vIt]; vertexNeighborCounts[vIt] = xadj[oldNodeID + 1] - xadj[oldNodeID]; } // Calculating the new vertex offsets: vertexAdjacencyOffsets[0] = 0; for (int vIt = 1; vIt < nn + 1; vIt++) { vertexAdjacencyOffsets[vIt] = vertexAdjacencyOffsets[vIt - 1] + vertexNeighborCounts[vIt - 1]; } // Filling in the permutedFullAdjacency for (int vIt = 0; vIt < nn; vIt++) { int permIdx = initialPermutationOldToNew[vIt]; int currentPart = fineAggregate[vIt]; int newOffset = vertexAdjacencyOffsets[permIdx]; int oldOffset = xadj[vIt]; for (int nIt = 0; nIt < vertexNeighborCounts[permIdx]; nIt++) { int partID = fineAggregate[adjncy[oldOffset + nIt]]; if (partID == currentPart) permutedFullAdjacency[newOffset + nIt] = notNeighbor; else permutedFullAdjacency[newOffset + nIt] = partID; //permutedFullAdjacency[newOffset + nIt] = adjacency[oldOffset + nIt]; } } // Sorting each aggregates neighbors (with duplicates) for (int aIt = 0; aIt < finePartCount; aIt++) { int beginAddr = vertexAdjacencyOffsets[nextAggregateOffset[aIt]]; int endAddr = vertexAdjacencyOffsets[nextAggregateOffset[aIt + 1]]; std::sort(permutedFullAdjacency + beginAddr, permutedFullAdjacency + endAddr); } // Setting counts to zero for (int aIt = 0; aIt < finePartCount; aIt++) { aggregateAdjacency[aIt] = 1; } // Counting the distinct neighbors of each aggregate: for (int aIt = 0; aIt < finePartCount; aIt++) { int begin = vertexAdjacencyOffsets[nextAggregateOffset[aIt]]; int end = vertexAdjacencyOffsets[nextAggregateOffset[aIt + 1]]; for (int i = begin + 1; i < end; i++) { if (permutedFullAdjacency[i] < notNeighbor && permutedFullAdjacency[i - 1] != permutedFullAdjacency[i]) { permutedFullAdjacency[begin + aggregateAdjacency[aIt]] = permutedFullAdjacency[i]; aggregateAdjacency[aIt]++; } } } // Finding the offsets for the aggregate adjacency newAdjacencyIndexes[0] = 0; for (int aIt = 1; aIt < finePartCount + 1; aIt++) { newAdjacencyIndexes[aIt] = newAdjacencyIndexes[aIt - 1] + aggregateAdjacency[aIt - 1]; } // Allocating the adjacency array newAdjacency = new int[newAdjacencyIndexes[finePartCount]]; // Writing the new adjacency to the list: for (int aIt = 0; aIt < finePartCount; aIt++) { int oldOffset = vertexAdjacencyOffsets[nextAggregateOffset[aIt]]; int newOffset = newAdjacencyIndexes[aIt]; for (int i = 0; i < aggregateAdjacency[aIt]; i++) { newAdjacency[newOffset + i] = permutedFullAdjacency[oldOffset + i]; } } // Allocating an array for the block partition: int *blockPartition = new int[finePartCount]; int blockCount; // Setting the depth from parameter: depth = metissize % 10; // Calling extendedMIS to get the block partition: extendedMIS(finePartCount, depth, newAdjacencyIndexes, newAdjacency, blockPartition, &blockCount); // Allocating block level arrays: int *blockAggregateCounts = new int[blockCount]; int *blockNeighborCounts = new int[blockCount]; int *nextBlockOffset = new int[blockCount + 1]; int *tempBlockOffset = new int[blockCount + 1]; int *blockPermutationOldToNew = new int[finePartCount]; int *blockPermutationNewToOld = new int[finePartCount]; int *permutedBlockAdjacency = new int[newAdjacencyIndexes[finePartCount]]; int *newAggregateNeighborCounts = new int[finePartCount]; int *aggregateAdjacencyOffsets = new int[finePartCount + 1]; partitionIdx.resize(blockCount + 1); // Clearing block counts for (int i = 0; i < blockCount; i++) { blockAggregateCounts[i] = 0; blockNeighborCounts[i] = 0; } // Counting aggregates in each block, and total neighbors for (int aIt = 0; aIt < finePartCount; aIt++) { blockAggregateCounts[blockPartition[aIt]]++; blockNeighborCounts[blockPartition[aIt]] += newAdjacencyIndexes[aIt + 1] - newAdjacencyIndexes[aIt]; } // Calculating the new offsets of each block nextBlockOffset[0] = 0; tempBlockOffset[0] = 0; for (int bIt = 1; bIt < blockCount + 1; bIt++) { // Doing a prefix sum: nextBlockOffset[bIt] = nextBlockOffset[bIt - 1] + blockAggregateCounts[bIt - 1]; tempBlockOffset[bIt] = nextBlockOffset[bIt - 1] + blockAggregateCounts[bIt - 1]; partitionIdx[bIt] = nextBlockOffset[bIt - 1] + blockAggregateCounts[bIt - 1]; } // Filling in the blockPermutation array: for (int aIt = 0; aIt < finePartCount; aIt++) { int blockID = blockPartition[aIt]; blockPermutationOldToNew[aIt] = tempBlockOffset[blockID]; blockPermutationNewToOld[tempBlockOffset[blockID]] = aIt; tempBlockOffset[blockID]++; } // Counting neighbors of each (permuted) aggregate for (int aIt = 0; aIt < finePartCount; aIt++) { int oldNodeID = blockPermutationNewToOld[aIt]; newAggregateNeighborCounts[aIt] = newAdjacencyIndexes[oldNodeID + 1] - newAdjacencyIndexes[oldNodeID]; } // Calculating the new aggregate offsets: aggregateAdjacencyOffsets[0] = 0; for (int aIt = 1; aIt < finePartCount + 1; aIt++) { aggregateAdjacencyOffsets[aIt] = aggregateAdjacencyOffsets[aIt - 1] + newAggregateNeighborCounts[aIt - 1]; } // Filling in the permutedBlockAdjacency for (int aIt = 0; aIt < finePartCount; aIt++) { int permIdx = blockPermutationOldToNew[aIt]; int newOffset = aggregateAdjacencyOffsets[permIdx]; int oldOffset = newAdjacencyIndexes[aIt]; for (int nIt = 0; nIt < newAggregateNeighborCounts[permIdx]; nIt++) { // Permute the neighbor's index and write to new location in array. permutedBlockAdjacency[newOffset + nIt] = blockPermutationOldToNew[newAdjacency[oldOffset + nIt]]; } } // Finding the new permutation on the original vertices // Reusing the original arrays as they are no longer needed. permutation.resize(nn); ipermutation.resize(nn); int *permutedFineAggregate = new int[nn]; aggregateIdx.resize(finePartCount + 1); // filling the permutedFineAggregate array: for (int vIt = 0; vIt < nn; vIt++) { permutedFineAggregate[vIt] = blockPermutationOldToNew[fineAggregate[vIt]]; } // Clearing aggregate counts for (int i = 0; i < finePartCount; i++) { aggregateVertexCounts[i] = 0; aggregateNeighborCounts[i] = 0; } // Counting vertices in each aggregate, and total neighbors for (int vIt = 0; vIt < nn; vIt++) { aggregateVertexCounts[permutedFineAggregate[vIt]]++; } // Calculating the new offsets of each aggregate tempAggregateOffset[0] = 0; aggregateIdx[0] = 0; for (int aIt = 1; aIt < finePartCount + 1; aIt++) { // Doing a prefix sum: tempAggregateOffset[aIt] = tempAggregateOffset[aIt - 1] + aggregateVertexCounts[aIt - 1]; aggregateIdx[aIt] = tempAggregateOffset[aIt - 1] + aggregateVertexCounts[aIt - 1]; } // Filling in the finalPermutation array: for (int vIt = 0; vIt < nn; vIt++) { int aggID = permutedFineAggregate[vIt]; permutation[vIt] = tempAggregateOffset[aggID]; ipermutation[tempAggregateOffset[aggID]] = vIt; tempAggregateOffset[aggID]++; } // Setting values for return: *nnout = finePartCount; xadjout = aggregateAdjacencyOffsets; adjncyout = permutedBlockAdjacency; // Setting the partitionlabel: for (int i = 0; i < blockCount; i++) { int startAt = aggregateIdx[partitionIdx[i]]; int nextBlockAt = aggregateIdx[partitionIdx[i + 1]]; for (int j = startAt; j < nextBlockAt; j++) partitionlabel[j] = i; } // Deleting the temporary arrays: delete[] fineAggregate; delete[] permutedFineAggregate; delete[] blockAggregateCounts; delete[] blockNeighborCounts; delete[] nextBlockOffset; delete[] tempBlockOffset; delete[] blockPermutationOldToNew; delete[] blockPermutationNewToOld; delete[] newAggregateNeighborCounts; delete[] permutedFullAdjacency; delete[] initialPermutationOldToNew; delete[] initialPermutationNewToOld; delete[] permutationCheck; delete[] nextAggregateOffset; delete[] tempAggregateOffset; delete[] aggregateVertexCounts; delete[] vertexNeighborCounts; delete[] aggregateNeighborCounts; delete[] vertexAdjacencyOffsets; delete[] aggregateAdjacency; delete[] newAdjacencyIndexes; delete[] newAdjacency; // And Done. return; } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation_d(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int agg_type, int parameters, int part_max_size, bool verbose) { if (agg_type == 0) { if (verbose) printf("Calling Old MIS Aggregation method.\n"); misHelpers::CP::OldMIS(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (agg_type == 1) { if (verbose) printf("Calling Metis bottom up method\n"); misHelpers::CP::MetisBottomUp(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (agg_type == 2) { if (verbose) printf("Calling Metis top down method\n"); misHelpers::CP::MetisTopDown(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (agg_type == 3) { if (verbose) printf("Calling AggMIS GPU method\n"); misHelpers::CP::NewMIS(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (agg_type == 4) { if (verbose) printf("Calling AggMIS CPU method\n"); misHelpers::CP::NewMIS_CPU(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (agg_type == 5) { if (verbose) printf("Calling AggMIS Light CPU method\n"); misHelpers::CP::LightMIS_CPU(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, parameters, part_max_size, verbose); } else if (verbose) printf("Aggregation method %d not recognized!\n", agg_type); if (verbose) std::cout << "Finished with RandMIS_Aggregator::computePermutation_d" << std::endl; } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation_d(TriMesh *meshPtr, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int aggregation_type, int parameters, int part_max_size, bool verbose) { IdxVector_d adjIndexesIn, adjacencyIn; misHelpers::getAdjacency(meshPtr, adjIndexesIn, adjacencyIn); computePermutation_d(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, aggregation_type, parameters, part_max_size, verbose); } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::computePermutation_d(TetMesh *meshPtr, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int aggregation_type, int parameters, int part_max_size, bool verbose) { IdxVector_d adjIndexesIn, adjacencyIn; misHelpers::getAdjacency(meshPtr, adjIndexesIn, adjacencyIn); computePermutation_d(adjIndexesIn, adjacencyIn, permutation, ipermutation, aggregateIdx, partitionIdx, partitionLabel, adjIndexesOut, adjacencyOut, aggregation_type, parameters, part_max_size, verbose); } template <class Matrix, class Vector> void RandMIS_Aggregator<Matrix, Vector>::extendedMIS(int n, int partSize, int *adjIndexes, int *adjacency, int *partition, int *partCount, bool verbose) { // If the input is too small just return a single partition if (n < 32) { for (int i = 0; i < n; i++) partition[i] = 0; *partCount = 1; return; } clock_t starttime, endtime; starttime = clock(); if (verbose) printf("Beginning extended MIS call %d nodes\n", n); // Creating a graph with edges for every distinct path less than k std::vector< std::vector <int> > inducedAdj(n); std::vector<int> clusterCounts(n); // Every vertex for (int vIt = 0; vIt < n; vIt++) { std::vector< std::vector <int> > nodeRings(partSize); // Add neighbors to nodeRing 0 for (int nIt = adjIndexes[vIt]; nIt < adjIndexes[vIt + 1]; nIt++) { nodeRings[0].push_back(adjacency[nIt]); } // For every level of nodeRings for (int level = 1; level < nodeRings.size(); level++) { // Every node at the previous level: for (int lowerNode = 0; lowerNode < nodeRings[level - 1].size(); lowerNode++) { // Every neighbor of lower nodes int currentNode = nodeRings[level - 1][lowerNode]; for (int nIt = adjIndexes[currentNode]; nIt < adjIndexes[currentNode + 1]; nIt++) { int candidate = adjacency[nIt]; // Checking the candidate is not the root... if (candidate != vIt) { // If the node is not present in nodeRings add to current level for (int i = 0; i <= level && candidate != -1; i++) { if (nodeRings[i].size() == 0) nodeRings[i].push_back(candidate); for (int j = 0; j < nodeRings[i].size(); j++) if (nodeRings[i][j] == candidate) candidate = -1; } if (candidate != -1) { nodeRings[level].push_back(candidate); } } } } } // Now that nodeRings are populated add edges to all nodes in upper level (k-path's) int clusterCount = 1; for (int i = 0; i < nodeRings.size(); i++) { for (int j = 0; j < nodeRings[i].size(); j++) { inducedAdj[vIt].push_back(nodeRings[i][j]); clusterCount++; } } clusterCounts[vIt] = clusterCount; } if (verbose) printf("Finished generating induced graph.\n"); // Calculating average cluster count to determine random weighting int totalClusterCount = 0; int maxDegree = clusterCounts[0]; for (int i = 0; i < clusterCounts.size(); i++) { totalClusterCount += clusterCounts[i]; if (maxDegree < clusterCounts[i]) maxDegree = clusterCounts[i]; } double averageClusterSize = (double)totalClusterCount / (double)clusterCounts.size(); double probPositive = (1.0 / (averageClusterSize + 1.0)); if (verbose) printf("ProbPositive = %f\n", probPositive); //printf("Random Weight is: %f\n", randWeight); // Clearing partitions: for (int i = 0; i < n; i++) { partition[i] = -1; } // Finding a maximal independent set randomly: std::vector<int> MIS(n, -1); std::vector<double> RandValues(n); std::vector<double> randThreshold(n); std::vector<int> rootDistance(n, -1); // Setting probability thresholds for each node based on degree for (int vIt = 0; vIt < n; vIt++) { // The degreeFactor is the percent difference between this degree and average // double degreeFactor = clusterCounts[vIt] - averageClusterSize; // if (degreeFactor < 0) // degreeFactor *= - 1; double degreeFactor = (double)clusterCounts[vIt] / (double)maxDegree; degreeFactor /= averageClusterSize; degreeFactor = 1 - degreeFactor; if (degreeFactor < .1) printf("Low degreeFactor: %f degree: %d average degree: %f\n", degreeFactor, clusterCounts[vIt], averageClusterSize); // The threshold value is the probPositive times the degreeFactor randThreshold[vIt] = degreeFactor * probPositive; if (randThreshold[vIt] > 1 || randThreshold[vIt] < 0) printf("Random threshold out of range: %f degreeFactor = %f probPositive = %f!\n", randThreshold[vIt], degreeFactor, probPositive); } if (verbose) printf("Finished generation of random thresholds.\n"); bool incomplete = true; // srand(time(NULL)); srand(0); int iterations = 0; while (incomplete) { iterations++; if (iterations > 10000) { printf("Something seems to be going wrong with the random assignments!\n"); for (int i = 0; i < n; i++) partition[i] = 0; *partCount = 1; return; } // Maybe we are done? incomplete = false; // Independent for loop for (int i = 0; i < n; i++) { if (MIS[i] == -1) { // This should assign to a random value between 0 and 1 double randValue = (double)rand() / (double)(RAND_MAX); // If the value is below the randThreshold than 1 else -1 if (randValue < randThreshold[i]) RandValues[i] = 1; else RandValues[i] = -1; // There is still work to do incomplete = true; } else if (MIS[i] == 1) { RandValues[i] = 1; } else { RandValues[i] = -1; } } // Independent for loop for (int i = 0; i < n && incomplete; i++) { if (RandValues[i] > 0) { bool negativeNeighbors = true; for (int j = 0; j < inducedAdj[i].size(); j++) { if (RandValues[inducedAdj[i][j]] > 0) negativeNeighbors = false; } if (negativeNeighbors) { // Mark the node as in MIS MIS[i] = 1; rootDistance[i] = 0; // Mark all neighbors as out for (int j = 0; j < inducedAdj[i].size(); j++) { MIS[inducedAdj[i][j]] = 0; //rootDistance[i] = 1; } } } } } if (verbose) printf("Found a MIS of the graph in %d iterations.\n", iterations); // Setting each member of the independent set to be the root of a partition std::vector<int> rootNodes; int curPart = 0; for (int i = 0; i < n; i++) { if (MIS[i] == 1) { partition[i] = curPart; rootNodes.push_back(i); curPart++; } } // Setting the partCount *partCount = curPart; // An array to hold partition assignments to apply int *newPartition = new int[n]; for (int i = 0; i < n; i++) newPartition[i] = partition[i]; std::vector<int> partSizes(curPart, 1); // Adding unpartitioned nodes to best partition for them: incomplete = true; int its = 0; // new rootDistance array int *newRootDist = new int[n]; for (int i = 0; i < n; i++) newRootDist[i] = rootDistance[i]; while (incomplete) { incomplete = false; its++; // If this has been going on too long: if (its > 2 * n) { printf("There was an error in the node allocation section: Too many iterations!\n"); for (int i = 0; i < n; i++) partition[i] = 0; *partCount = 1; return; } for (int i = 0; i < n; i++) { if (partition[i] == -1) { int adjSize = adjIndexes[i + 1] - adjIndexes[i]; // printf("adjSize is: %d\n", adjSize); int *adjParts = new int[adjSize]; int *adjRootDist = new int[adjSize]; int *adjSizes = new int[adjSize]; int *adjCounts = new int[adjSize]; int *adjScore = new int[adjSize]; // Getting adjacent partitions: for (int j = 0; j < adjSize; j++) { int adjacentNodePart = partition[adjacency[adjIndexes[i] + j]]; adjParts[j] = adjacentNodePart; // Getting the size of the aggregate adjSizes[j] = partSizes[adjacentNodePart]; // Getting the distance of the adjacent node to the root of its partition: if (adjacentNodePart == -1) { adjRootDist[j] = 1000; } else { adjRootDist[j] = rootDistance[adjacency[adjIndexes[i] + j]]; } } // Finding the smallest partition distance: int smallestDistance = 1000; int largestDistance = 0; int largestSize = adjSizes[0]; int smallestSize = adjSizes[0]; for (int j = 0; j < adjSize; j++) adjCounts[j] = 1; for (int j = 0; j < adjSize; j++) { if (adjParts[j] != -1) { if (smallestDistance > adjRootDist[j]) { smallestDistance = adjRootDist[j]; } if (smallestSize > adjSizes[j]) { smallestSize = adjSizes[j]; } if (adjRootDist[j] < 1000 && largestDistance < adjRootDist[j]) { largestDistance = adjRootDist[j]; } if (largestSize > adjSizes[j]) { largestSize = adjSizes[j]; } for (int jj = j + 1; jj < adjSize; jj++) { if (adjParts[j] == adjParts[jj] && adjCounts[j] < 3) { adjCounts[j]++; adjCounts[jj]++; } } } } // Calculating score factor for each entry: double highestScore = -1; int scoringPart = -1; for (int j = 0; j < adjSize; j++) { if (adjParts[j] != -1) { double sizeScore = 1.0 / (adjSizes[j] - smallestSize + 1); double distScore = ((double)smallestDistance + 1) / (adjRootDist[j] + 1); double adjScore = std::pow(0.75, 4. - static_cast<double>(adjCounts[j])); double totalScore = (sizeScore + distScore) * adjScore; if (totalScore > highestScore) { highestScore = totalScore; scoringPart = adjParts[j]; } } } // Adding the node to best part found: newPartition[i] = scoringPart; if (scoringPart >= 0) { partSizes[scoringPart]++; newRootDist[i] = smallestDistance + 1; } else incomplete = true; delete[] adjParts; delete[] adjRootDist; delete[] adjSizes; delete[] adjCounts; delete[] adjScore; } } // Write changes to partition: for (int i = 0; i < n; i++) { partition[i] = newPartition[i]; rootDistance[i] = newRootDist[i]; } if (!incomplete) { // To store the parts that are too small: std::vector<int> partsToRemove; // Check for too small partitions for (int i = 0; i < partSizes.size(); i++) { if (partSizes[i] < 6) { partsToRemove.push_back(i); incomplete = true; } } if (partsToRemove.size() != partSizes.size()) { if (verbose) printf("Starting removal of runty parts.\n"); int originalPartCount = *partCount; // Removing runty aggregates: for (int i = partsToRemove.size() - 1; i > -1; i--) { // Unmark the partition label and rootnode dist for (int j = 0; j < n; j++) { if (partition[j] == partsToRemove[i]) { partition[j] = -1; newPartition[j] = -1; rootDistance[j] = -1; newRootDist[j] = -1; } if (partition[j] > partsToRemove[i]) { partition[j] = partition[j] - 1; newPartition[j] = newPartition[j] - 1; } } // Remove the entry from the partSizes array *partCount = *partCount - 1; partSizes.erase(partSizes.begin() + partsToRemove[i]); } if (verbose) printf("Removed %lu undersized aggregates out of %d total. Leaving %d\n", partsToRemove.size(), originalPartCount, *partCount); } } if (!incomplete && verbose) { endtime = clock(); double duration = (double)(endtime - starttime) * 1000 / CLOCKS_PER_SEC; printf("Finished with a call to extendedMIS in %f ms\n", duration); printf("\t%d nodes aggregated to depth: %d \n\n", n, partSize); } } delete[] newRootDist; if (verbose) printf("GoodBye.\n"); } /**************************************** * Explicit instantiations ***************************************/ template class RandMIS_Aggregator<Matrix_h, Vector_h>; template class RandMIS_Aggregator<Matrix_d, Vector_d>;
the_stack
__constant__ float kEpsilon = 1.0e-10; // used to prevent division by 0 extern "C" { // CUDA version of the scenario_step() in // "ai_economist.foundation.scenarios.covid19_env.py" // CUDA version of the sir_step() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ void cuda_sir_step( float* susceptible, float* infected, float* recovered, float* vaccinated, float* deaths, int* num_vaccines_available_t, const int* kRealWorldStringencyPolicyHistory, const float kStatePopulation, const int kNumAgents, const int kBetaDelay, const float kBetaSlope, const float kbetaIntercept, int* stringency_level, float* beta, const float kGamma, const float kDeathRate, const int kEnvId, const int kAgentId, int timestep, const int kEpisodeLength, const int kArrayIdxCurrentTime, const int kArrayIdxPrevTime, const int kTimeIndependentArrayIdx ) { float susceptible_fraction_vaccinated = min( 1.0, num_vaccines_available_t[kTimeIndependentArrayIdx] / (susceptible[kArrayIdxPrevTime] + kEpsilon)); float vaccinated_t = min( static_cast<float>(num_vaccines_available_t[ kTimeIndependentArrayIdx]), susceptible[kArrayIdxPrevTime]); // (S/N) * I in place of (S*I) / N to prevent overflow float neighborhood_SI_over_N = susceptible[kArrayIdxPrevTime] / kStatePopulation * infected[kArrayIdxPrevTime]; int stringency_level_tmk; if (timestep < kBetaDelay) { stringency_level_tmk = kRealWorldStringencyPolicyHistory[ (timestep - 1) * (kNumAgents - 1) + kAgentId]; } else { stringency_level_tmk = stringency_level[kEnvId * ( kEpisodeLength + 1) * (kNumAgents - 1) + (timestep - kBetaDelay) * (kNumAgents - 1) + kAgentId]; } beta[kTimeIndependentArrayIdx] = stringency_level_tmk * kBetaSlope + kbetaIntercept; float dS_t = -(neighborhood_SI_over_N * beta[ kTimeIndependentArrayIdx] * (1 - susceptible_fraction_vaccinated) + vaccinated_t); float dR_t = kGamma * infected[kArrayIdxPrevTime] + vaccinated_t; float dI_t = - dS_t - dR_t; susceptible[kArrayIdxCurrentTime] = max( 0.0, susceptible[kArrayIdxPrevTime] + dS_t); infected[kArrayIdxCurrentTime] = max( 0.0, infected[kArrayIdxPrevTime] + dI_t); recovered[kArrayIdxCurrentTime] = max( 0.0, recovered[kArrayIdxPrevTime] + dR_t); vaccinated[kArrayIdxCurrentTime] = vaccinated_t + vaccinated[kArrayIdxPrevTime]; float recovered_but_not_vaccinated = recovered[kArrayIdxCurrentTime] - vaccinated[kArrayIdxCurrentTime]; deaths[kArrayIdxCurrentTime] = recovered_but_not_vaccinated * kDeathRate; } // CUDA version of the softplus() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ float softplus(float x) { const float kBeta = 1.0; const float kThreshold = 20.0; if (kBeta * x < kThreshold) { return 1.0 / kBeta * log(1.0 + exp(kBeta * x)); } else { return x; } } __device__ float signal2unemployment( const int kEnvId, const int kAgentId, float* signal, const float* kUnemploymentConvolutionalFilters, const float kUnemploymentBias, const int kNumAgents, const int kFilterLen, const int kNumFilters ) { float unemployment = 0.0; const int kArrayIndexOffset = kEnvId * (kNumAgents - 1) * kNumFilters * kFilterLen + kAgentId * kNumFilters * kFilterLen; for (int index = 0; index < (kFilterLen * kNumFilters); index ++) { unemployment += signal[kArrayIndexOffset + index] * kUnemploymentConvolutionalFilters[index]; } return softplus(unemployment) + kUnemploymentBias; } // CUDA version of the unemployment_step() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ void cuda_unemployment_step( float* unemployed, int* stringency_level, int* delta_stringency_level, const float* kGroupedConvolutionalFilterWeights, const float* kUnemploymentConvolutionalFilters, const float* kUnemploymentBias, float* convolved_signal, const int kFilterLen, const int kNumFilters, const float kStatePopulation, const int kNumAgents, const int kEnvId, const int kAgentId, int timestep, const int kArrayIdxCurrentTime, const int kArrayIdxPrevTime ) { // Shift array by kNumAgents - 1 for (int idx = 0; idx < kFilterLen - 1; idx ++) { delta_stringency_level[ kEnvId * kFilterLen * (kNumAgents - 1) + idx * (kNumAgents - 1) + kAgentId ] = delta_stringency_level[ kEnvId * kFilterLen * (kNumAgents - 1) + (idx + 1) * (kNumAgents - 1) + kAgentId ]; } delta_stringency_level[ kEnvId * kFilterLen * (kNumAgents - 1) + (kFilterLen - 1) * (kNumAgents - 1) + kAgentId ] = stringency_level[kArrayIdxCurrentTime] - stringency_level[kArrayIdxPrevTime]; // convolved_signal refers to the convolution between the filter weights // and the delta stringency levels for (int filter_idx = 0; filter_idx < kNumFilters; filter_idx ++) { for (int idx = 0; idx < kFilterLen; idx ++) { convolved_signal[ kEnvId * (kNumAgents - 1) * kNumFilters * kFilterLen + kAgentId * kNumFilters * kFilterLen + filter_idx * kFilterLen + idx ] = delta_stringency_level[kEnvId * kFilterLen * (kNumAgents - 1) + idx * (kNumAgents - 1) + kAgentId] * kGroupedConvolutionalFilterWeights[kAgentId * kNumFilters + filter_idx]; } } float unemployment_rate = signal2unemployment( kEnvId, kAgentId, convolved_signal, kUnemploymentConvolutionalFilters, kUnemploymentBias[kAgentId], kNumAgents, kFilterLen, kNumFilters); unemployed[kArrayIdxCurrentTime] = unemployment_rate * kStatePopulation / 100.0; } // CUDA version of the economy_step() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ void cuda_economy_step( float* infected, float* deaths, float* unemployed, float* incapacitated, float* cant_work, float* num_people_that_can_work, const float kStatePopulation, const float kInfectionTooSickToWorkRate, const float kPopulationBetweenAge18And65, const float kDailyProductionPerWorker, float* productivity, float* subsidy, float* postsubsidy_productivity, int timestep, const int kArrayIdxCurrentTime, int kTimeIndependentArrayIdx ) { incapacitated[kTimeIndependentArrayIdx] = kInfectionTooSickToWorkRate * infected[kArrayIdxCurrentTime] + deaths[kArrayIdxCurrentTime]; cant_work[kTimeIndependentArrayIdx] = incapacitated[kTimeIndependentArrayIdx] * kPopulationBetweenAge18And65 + unemployed[kArrayIdxCurrentTime]; int num_workers = static_cast<int>(kStatePopulation) * kPopulationBetweenAge18And65; num_people_that_can_work[kTimeIndependentArrayIdx] = max( 0.0, num_workers - cant_work[kTimeIndependentArrayIdx]); productivity[kArrayIdxCurrentTime] = num_people_that_can_work[kTimeIndependentArrayIdx] * kDailyProductionPerWorker; postsubsidy_productivity[kArrayIdxCurrentTime] = productivity[kArrayIdxCurrentTime] + subsidy[kArrayIdxCurrentTime]; } // CUDA version of crra_nonlinearity() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ float crra_nonlinearity( float x, const float kEta, const int kNumDaysInAnYear ) { float annual_x = kNumDaysInAnYear * x; float annual_x_clipped = annual_x; if (annual_x < 0.1) { annual_x_clipped = 0.1; } else if (annual_x > 3.0) { annual_x_clipped = 3.0; } float annual_crra = 1 + (pow(annual_x_clipped, (1 - kEta)) - 1) / (1 - kEta); float daily_crra = annual_crra / kNumDaysInAnYear; return daily_crra; } // CUDA version of min_max_normalization() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ float min_max_normalization( float x, const float kMinX, const float kMaxX ) { return (x - kMinX) / (kMaxX - kMinX + kEpsilon); } // CUDA version of get_rew() in // "ai_economist.foundation.scenarios.covid19_env.py" __device__ float get_rew( const float kHealthIndexWeightage, float health_index, const float kEconomicIndexWeightage, float economic_index ) { return ( kHealthIndexWeightage * health_index + kEconomicIndexWeightage * economic_index) / (kHealthIndexWeightage + kEconomicIndexWeightage); } // CUDA version of scenario_step() in // "ai_economist.foundation.scenarios.covid19_env.py" __global__ void CudaCovidAndEconomySimulationStep( float* susceptible, float* infected, float* recovered, float* deaths, float* vaccinated, float* unemployed, float* subsidy, float* productivity, int* stringency_level, const int kNumStringencyLevels, float* postsubsidy_productivity, int* num_vaccines_available_t, const int* kRealWorldStringencyPolicyHistory, const int kBetaDelay, const float* kBetaSlopes, const float* kbetaIntercepts, float* beta, const float kGamma, const float kDeathRate, float* incapacitated, float* cant_work, float* num_people_that_can_work, const int* us_kStatePopulation, const float kInfectionTooSickToWorkRate, const float kPopulationBetweenAge18And65, const int kFilterLen, const int kNumFilters, int* delta_stringency_level, const float* kGroupedConvolutionalFilterWeights, const float* kUnemploymentConvolutionalFilters, const float* kUnemploymentBias, float* signal, const float kDailyProductionPerWorker, const float* maximum_productivity, float* obs_a_world_agent_state, float* obs_a_world_agent_postsubsidy_productivity, float* obs_a_world_lagged_stringency_level, float* obs_a_time, float* obs_p_world_agent_state, float* obs_p_world_agent_postsubsidy_productivity, float* obs_p_world_lagged_stringency_level, float* obs_p_time, int * env_timestep_arr, const int kNumAgents, const int kEpisodeLength ) { const int kEnvId = blockIdx.x; const int kAgentId = threadIdx.x; assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= kEpisodeLength); assert (kAgentId <= kNumAgents - 1); const int kNumFeatures = 6; if (kAgentId < (kNumAgents - 1)) { // Indices for time-dependent and time-independent arrays // Time dependent arrays have shapes (num_envs, // kEpisodeLength + 1, kNumAgents - 1) // Time independent arrays have shapes (num_envs, kNumAgents - 1) const int kArrayIndexOffset = kEnvId * (kEpisodeLength + 1) * (kNumAgents - 1); int kArrayIdxCurrentTime = kArrayIndexOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + kAgentId; int kArrayIdxPrevTime = kArrayIndexOffset + (env_timestep_arr[kEnvId] - 1) * (kNumAgents - 1) + kAgentId; const int kTimeIndependentArrayIdx = kEnvId * (kNumAgents - 1) + kAgentId; const float kStatePopulation = static_cast<float>(us_kStatePopulation[kAgentId]); cuda_sir_step( susceptible, infected, recovered, vaccinated, deaths, num_vaccines_available_t, kRealWorldStringencyPolicyHistory, kStatePopulation, kNumAgents, kBetaDelay, kBetaSlopes[kAgentId], kbetaIntercepts[kAgentId], stringency_level, beta, kGamma, kDeathRate, kEnvId, kAgentId, env_timestep_arr[kEnvId], kEpisodeLength, kArrayIdxCurrentTime, kArrayIdxPrevTime, kTimeIndependentArrayIdx); cuda_unemployment_step( unemployed, stringency_level, delta_stringency_level, kGroupedConvolutionalFilterWeights, kUnemploymentConvolutionalFilters, kUnemploymentBias, signal, kFilterLen, kNumFilters, kStatePopulation, kNumAgents, kEnvId, kAgentId, env_timestep_arr[kEnvId], kArrayIdxCurrentTime, kArrayIdxPrevTime); cuda_economy_step( infected, deaths, unemployed, incapacitated, cant_work, num_people_that_can_work, kStatePopulation, kInfectionTooSickToWorkRate, kPopulationBetweenAge18And65, kDailyProductionPerWorker, productivity, subsidy, postsubsidy_productivity, env_timestep_arr[kEnvId], kArrayIdxCurrentTime, kTimeIndependentArrayIdx); // CUDA version of generate observations // Agents' observations int kFeatureArrayIndexOffset = kEnvId * kNumFeatures * (kNumAgents - 1) + kAgentId; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 0 * (kNumAgents - 1) ] = susceptible[kArrayIdxCurrentTime] / kStatePopulation; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 1 * (kNumAgents - 1) ] = infected[kArrayIdxCurrentTime] / kStatePopulation; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 2 * (kNumAgents - 1) ] = recovered[kArrayIdxCurrentTime] / kStatePopulation; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 3 * (kNumAgents - 1) ] = deaths[kArrayIdxCurrentTime] / kStatePopulation; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 4 * (kNumAgents - 1) ] = vaccinated[kArrayIdxCurrentTime] / kStatePopulation; obs_a_world_agent_state[ kFeatureArrayIndexOffset + 5 * (kNumAgents - 1) ] = unemployed[kArrayIdxCurrentTime] / kStatePopulation; for (int feature_id = 0; feature_id < kNumFeatures; feature_id ++) { const int kIndex = feature_id * (kNumAgents - 1); obs_p_world_agent_state[kFeatureArrayIndexOffset + kIndex ] = obs_a_world_agent_state[kFeatureArrayIndexOffset + kIndex]; } obs_a_world_agent_postsubsidy_productivity[ kTimeIndependentArrayIdx ] = postsubsidy_productivity[kArrayIdxCurrentTime] / maximum_productivity[kAgentId]; obs_p_world_agent_postsubsidy_productivity[ kTimeIndependentArrayIdx ] = obs_a_world_agent_postsubsidy_productivity[ kTimeIndependentArrayIdx ]; int t_beta = env_timestep_arr[kEnvId] - kBetaDelay + 1; if (t_beta < 0) { obs_a_world_lagged_stringency_level[ kTimeIndependentArrayIdx ] = kRealWorldStringencyPolicyHistory[ env_timestep_arr[kEnvId] * (kNumAgents - 1) + kAgentId ] / static_cast<float>(kNumStringencyLevels); } else { obs_a_world_lagged_stringency_level[ kTimeIndependentArrayIdx ] = stringency_level[ kArrayIndexOffset + t_beta * (kNumAgents - 1) + kAgentId ] / static_cast<float>(kNumStringencyLevels); } obs_p_world_lagged_stringency_level[ kTimeIndependentArrayIdx ] = obs_a_world_lagged_stringency_level[ kTimeIndependentArrayIdx]; // Below, we assume observation scaling = True // (otherwise, 'obs_a_time[kTimeIndependentArrayIdx] = // static_cast<float>(env_timestep_arr[kEnvId]) obs_a_time[kTimeIndependentArrayIdx] = env_timestep_arr[kEnvId] / static_cast<float>(kEpisodeLength); } else if (kAgentId == kNumAgents - 1) { obs_p_time[kEnvId] = env_timestep_arr[kEnvId] / static_cast<float>(kEpisodeLength); } } // CUDA version of the compute_reward() in // "ai_economist.foundation.scenarios.covid19_env.py" __global__ void CudaComputeReward( float* rewards_a, float* rewards_p, const int kNumDaysInAnYear, const int kValueOfLife, const float kRiskFreeInterestRate, const float kEconomicRewardCrraEta, const float* kMinMarginalAgentHealthIndex, const float* kMaxMarginalAgentHealthIndex, const float* kMinMarginalAgentEconomicIndex, const float* kMaxMarginalAgentEconomicIndex, const float kMinMarginalPlannerHealthIndex, const float kMaxMarginalPlannerHealthIndex, const float kMinMarginalPlannerEconomicIndex, const float kMaxMarginalPlannerEconomicIndex, const float* kWeightageOnMarginalAgentHealthIndex, const float* kWeightageOnMarginalPlannerHealthIndex, const float kWeightageOnMarginalAgentEconomicIndex, const float kWeightageOnMarginalPlannerEconomicIndex, const float* kAgentsHealthNorm, const float* kAgentsEconomicNorm, const float kPlannerHealthNorm, const float kPlannerEconomicNorm, float* deaths, float* subsidy, float* postsubsidy_productivity, int* env_done_arr, int* env_timestep_arr, const int kNumAgents, const int kEpisodeLength ) { const int kEnvId = blockIdx.x; const int kAgentId = threadIdx.x; assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= kEpisodeLength); assert (kAgentId <= kNumAgents - 1); const int kArrayIndexOffset = kEnvId * (kEpisodeLength + 1) * (kNumAgents - 1); if (kAgentId < (kNumAgents - 1)) { // Agents' rewards // Indices for time-dependent and time-independent arrays // Time dependent arrays have shapes (num_envs, // kEpisodeLength + 1, kNumAgents - 1) // Time independent arrays have shapes (num_envs, kNumAgents - 1) int kArrayIdxCurrentTime = kArrayIndexOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + kAgentId; int kArrayIdxPrevTime = kArrayIndexOffset + (env_timestep_arr[kEnvId] - 1) * (kNumAgents - 1) + kAgentId; const int kTimeIndependentArrayIdx = kEnvId * (kNumAgents - 1) + kAgentId; float marginal_deaths = deaths[kArrayIdxCurrentTime] - deaths[kArrayIdxPrevTime]; // Note: changing the order of operations to prevent overflow float marginal_agent_health_index = - marginal_deaths / (kAgentsHealthNorm[kAgentId] / static_cast<float>(kValueOfLife)); float marginal_agent_economic_index = crra_nonlinearity( postsubsidy_productivity[kArrayIdxCurrentTime] / kAgentsEconomicNorm[kAgentId], kEconomicRewardCrraEta, kNumDaysInAnYear); marginal_agent_health_index = min_max_normalization( marginal_agent_health_index, kMinMarginalAgentHealthIndex[kAgentId], kMaxMarginalAgentHealthIndex[kAgentId]); marginal_agent_economic_index = min_max_normalization( marginal_agent_economic_index, kMinMarginalAgentEconomicIndex[kAgentId], kMaxMarginalAgentEconomicIndex[kAgentId]); rewards_a[kTimeIndependentArrayIdx] = get_rew( kWeightageOnMarginalAgentHealthIndex[kAgentId], marginal_agent_health_index, kWeightageOnMarginalPlannerHealthIndex[kAgentId], marginal_agent_economic_index); } else if (kAgentId == kNumAgents - 1) { // Planner's rewards float total_marginal_deaths = 0; for (int ag_id = 0; ag_id < (kNumAgents - 1); ag_id ++) { total_marginal_deaths += ( deaths[kArrayIndexOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + ag_id] - deaths[kArrayIndexOffset + (env_timestep_arr[kEnvId] - 1) * (kNumAgents - 1) + ag_id]); } // Note: changing the order of operations to prevent overflow float marginal_planner_health_index = -total_marginal_deaths / (kPlannerHealthNorm / static_cast<float>(kValueOfLife)); float total_subsidy = 0.0; float total_postsubsidy_productivity = 0.0; for (int ag_id = 0; ag_id < (kNumAgents - 1); ag_id ++) { total_subsidy += subsidy[kArrayIndexOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + ag_id]; total_postsubsidy_productivity += postsubsidy_productivity[kArrayIndexOffset + env_timestep_arr[kEnvId] * (kNumAgents - 1) + ag_id]; } float cost_of_subsidy = (1 + kRiskFreeInterestRate) * total_subsidy; float marginal_planner_economic_index = crra_nonlinearity( (total_postsubsidy_productivity - cost_of_subsidy) / kPlannerEconomicNorm, kEconomicRewardCrraEta, kNumDaysInAnYear); marginal_planner_health_index = min_max_normalization( marginal_planner_health_index, kMinMarginalPlannerHealthIndex, kMaxMarginalPlannerHealthIndex); marginal_planner_economic_index = min_max_normalization( marginal_planner_economic_index, kMinMarginalPlannerEconomicIndex, kMaxMarginalPlannerEconomicIndex); rewards_p[kEnvId] = get_rew( kWeightageOnMarginalAgentEconomicIndex, marginal_planner_health_index, kWeightageOnMarginalPlannerEconomicIndex, marginal_planner_economic_index); } // Wait here for all agents to finish computing rewards __syncthreads(); // Use only agent 0's thread to set done_arr if (kAgentId == 0) { if (env_timestep_arr[kEnvId] == kEpisodeLength) { env_timestep_arr[kEnvId] = 0; env_done_arr[kEnvId] = 1; } } } }
the_stack
#include "ValueQuantizer.h" #include "ColumnQuantizer.h" #include "QuantizedMatrix.h" namespace Microsoft { namespace MSR { namespace CNTK { // ======================================================================= // thread layout helpers // ======================================================================= // --- distribute array elements naively over threads __host__ static void ParallelizeOverRangeDim(size_t size, dim3& griddim, dim3& blockdim, const size_t warpsize = 64) { // <<< griddim, blockdim, sharedmemsize, stream >>> griddim = (unsigned int) ((size + warpsize - 1) / warpsize); // 'warpsize' threads on each block (-> threadIdx.x) blockdim = (unsigned int) warpsize; // -> blockIdx.x } // get the array index for the current thread __device__ __inline__ static size_t ParallelizeOverRangeIndex() { return threadIdx.x + (blockIdx.x * blockDim.x); } // ======================================================================= // quantization // ======================================================================= // helper to reduce all T across all threads of a block template <typename T, int BLOCKSIZE> __device__ void allreduce(T& var) { __shared__ T buf[BLOCKSIZE]; volatile T* vBuf = buf; buf[threadIdx.x] = var; __syncthreads(); // We assume BLOCKSIZE is a power of 2 if (BLOCKSIZE >= 1024) { if (threadIdx.x < 512) { var = var + buf[threadIdx.x + 512]; buf[threadIdx.x] = var; } __syncthreads(); } if (BLOCKSIZE >= 512) { if (threadIdx.x < 256) { var = var + buf[threadIdx.x + 256]; buf[threadIdx.x] = var; } __syncthreads(); } if (BLOCKSIZE >= 256) { if (threadIdx.x < 128) { var = var + buf[threadIdx.x + 128]; buf[threadIdx.x] = var; } __syncthreads(); } if (BLOCKSIZE >= 128) { if (threadIdx.x < 64) { var = var + buf[threadIdx.x + 64]; buf[threadIdx.x] = var; } __syncthreads(); } // Intra warp reduce if ((BLOCKSIZE >= 64) && (threadIdx.x < 32)) { var = var + vBuf[threadIdx.x + 32]; vBuf[threadIdx.x] = var; } if ((BLOCKSIZE >= 32) && (threadIdx.x < 16)) { var = var + vBuf[threadIdx.x + 16]; vBuf[threadIdx.x] = var; } if ((BLOCKSIZE >= 16) && (threadIdx.x < 8)) { var = var + vBuf[threadIdx.x + 8]; vBuf[threadIdx.x] = var; } if ((BLOCKSIZE >= 8) && (threadIdx.x < 4)) { var = var + vBuf[threadIdx.x + 4]; vBuf[threadIdx.x] = var; } if ((BLOCKSIZE >= 4) && (threadIdx.x < 2)) { var = var + vBuf[threadIdx.x + 2]; vBuf[threadIdx.x] = var; } if ((BLOCKSIZE >= 2) && (threadIdx.x == 0)) { var = var + vBuf[1]; vBuf[0] = var; } __syncthreads(); var = buf[0]; } #define REDUCTION_BLOCK_SIZE 128 // 256 is much worse; 64 is somewhat worse // version optimized for collated memory access template <class ElemType, bool ZeroThresholdFor1Bit> __global__ void _ComputeQuantiStatParj(const ElemType* us, const ElemType* inResidual, long M, long N, size_t ldNbits, char* qpackage) { size_t subset = threadIdx.x; // first thread computes 0, 64, 128; second thread 1, 65, 129 etc. size_t j = blockIdx.x; // we process one column per *block*, j=column index; note: j is never out of range size_t rows = M; // we compute from 0..rows-1 size_t bits = 1 << ldNbits; const size_t colSizeByte = Microsoft::MSR::CNTK::QuantizedColumn<ElemType>::QuantizedColumnSize(bits, rows); auto& qcol = *(Microsoft::MSR::CNTK::QuantizedColumn<ElemType>*) &qpackage[colSizeByte * j]; Microsoft::MSR::CNTK::ColumnQuantizer<ElemType>::ComputeRangeStatColjSubset<ZeroThresholdFor1Bit>(us, inResidual, M, j, bits, qcol.lower, qcol.upper, subset, REDUCTION_BLOCK_SIZE, allreduce<ElemType, REDUCTION_BLOCK_SIZE>, allreduce<unsigned int, REDUCTION_BLOCK_SIZE>); } //caller: griddim and blockdim should be both 1d //total thread number is: totalNumQWordsAlMatrix = numCols() * numQWordsPerCol //called to quantize a GPU matrix template <class ElemType, bool ZeroThresholdFor1Bit> __global__ void _QuantizeStripjOneQWord( const ElemType* us, ElemType* curResidual, long M, long N, char* qMat, size_t qColSize, size_t numQWordsPerCol, size_t ldNbits, ElemType* newResidual) { // map our thread index into a linear index const size_t linindex = ParallelizeOverRangeIndex(); // map to (QWord index, column index) const size_t j = linindex / numQWordsPerCol; if (j >= N) // out of col range return; const size_t iQWord = linindex % numQWordsPerCol; // get data pointers to the quantized column auto& qCol = *(Microsoft::MSR::CNTK::QuantizedColumn<ElemType>*) &qMat[qColSize * j]; // and quantizer const Microsoft::MSR::CNTK::ColumnQuantizer<ElemType> q(ldNbits, qCol.lower, qCol.upper); // quantize one QWord to qCol[iQWord] qCol.bits[iQWord] = q.QuantizeOneQWord<ZeroThresholdFor1Bit>(us, curResidual, M, iQWord, M, numQWordsPerCol, j, newResidual); } template <class ElemType> __global__ void UnquantizeStripejOneQWord(ElemType* us, const long M, const long N, const char* qpackage, size_t colsize, size_t numQWordsPerCol, size_t ldNbits, bool add) { // this follows the same as quantizestripej() // map our thread index into a linear index const size_t linindex = ParallelizeOverRangeIndex(); // map to (QWord index, column index) const size_t j = linindex / numQWordsPerCol; if (j >= N) // out of col range return; const size_t iQWord = linindex % numQWordsPerCol; // get data pointers and quantizer const auto& qcol = *(const Microsoft::MSR::CNTK::QuantizedColumn<ElemType>*) &qpackage[colsize * j]; const ElemType lower = qcol.lower; const ElemType upper = qcol.upper; Microsoft::MSR::CNTK::ColumnQuantizer<ElemType> q(ldNbits, lower, upper); // unquantize from this one QWord q.UnquantizeOneQWord(us, M, iQWord, M, numQWordsPerCol, j, qcol.bits[iQWord], add); } //maybe should move out into another class? template <class ElemType> void _QuantizeMatrix( const ElemType* us, ElemType* curResidual, long M, long N, char* qPackage, size_t Nbits, cudaStream_t stream, ElemType* newResidual, bool zeroThresholdFor1Bit) { /* verify buffer allocation size if (msra::math::matrixquantizer::buffersize(bits, rows(), cols()) != gpubuffer.size()) LogicError("quantizestripe: dimension of patch to be quantized does not match allocated buffer size for quantized data"); if (rows() != curresidual.rows() || cols() != curresidual.cols() || rows() != newresidual.rows() || cols() != newresidual.cols()) LogicError("quantizestripe: dimension of patch to be quantized does not match residual buffer"); if (gpubuffer.size() == 0) // empty buffer: empty matrix, we are done (explicit test needed since launch will fail with 0 threads) return;*/ // determine mean and variance -> value range (stored in quant package) --for 1 bit, refine it in a second pass const size_t ldNbits = ValueQuantizer<ElemType>::ld(Nbits); size_t nRow = M; size_t nCol = N; dim3 mvgriddim, mvblockdim; // using specialized CUDA code (not shared with CPU) for collated memory access // each thread column computes 'warpsize' elements mvgriddim = (unsigned int) nCol; // column number mvblockdim = REDUCTION_BLOCK_SIZE; if (zeroThresholdFor1Bit) { _ComputeQuantiStatParj<ElemType, true><<<mvgriddim, mvblockdim, 0, stream>>>(us, curResidual, M, N, ldNbits, qPackage); } else { _ComputeQuantiStatParj<ElemType, false><<<mvgriddim, mvblockdim, 0, stream>>>(us, curResidual, M, N, ldNbits, qPackage); } // quantize data (also computing the residual at once) // optimizing for collated memory access: // - each 32-bit word represents an interleaved (not consecutive) set of floats -> parallel threads can do collated accesses // example: // - total number of 32-bit words(1-bit quant): 1100 * 2048 / 32 = 70k // - thread x dimension: index into 32-bit word (e.g. 1100/32 = 35 threads) // - thread y dimension and thread position: column (e.g. 2048) // - using 128 threads on one proc -> 70k/128 = 550 blocks // - threads are indexed by a global index into quantized 32-bit words in increasing order; each thread must // - re-linearize block index and thread index // - map to (i,j) coordinate (start of the set of floats) const size_t numQWordsPerCol = Microsoft::MSR::CNTK::ColumnQuantizer<ElemType>::QWordsPerCol(nRow, Nbits); const size_t totalQWords = nCol * numQWordsPerCol; const size_t colsizebyte = Microsoft::MSR::CNTK::QuantizedColumn<ElemType>::QuantizedColumnSize(Nbits, nRow); dim3 griddim, blockdim; ParallelizeOverRangeDim(totalQWords, griddim, blockdim, 256); if (zeroThresholdFor1Bit) { _QuantizeStripjOneQWord<ElemType, true><<<griddim, blockdim, 0, stream>>>(us, curResidual, M, N, qPackage, colsizebyte, numQWordsPerCol, ldNbits, newResidual); } else { _QuantizeStripjOneQWord<ElemType, false><<<griddim, blockdim, 0, stream>>>(us, curResidual, M, N, qPackage, colsizebyte, numQWordsPerCol, ldNbits, newResidual); } } // unquantize // Process the quantization package to recover (unquantize) the matrix patch. template <class ElemType> void _UnquantizeMatrix(const char* gpuBuffer, size_t gpuBufferSize, ElemType* us, long M, long N, size_t nBits, bool add, cudaStream_t stream) { // verify buffer allocation size /*if (msra::math::matrixquantizer::buffersize(bits, rows(), cols()) != gpubuffer.size()) LogicError("unquantizestripe: dimension of patch to be unquantized does not match size of quantized data"); if (gpubuffer.size() == 0) // empty buffer: empty matrix, we are done (explicit test needed since launch will fail with 0 threads) return; */ size_t qSize = QuantizedColumn<ElemType>::QuantizedColumnSize(nBits, M) * N; if (qSize != gpuBufferSize) LogicError("unquantizestripe: dimension of patch to be unquantized does not match size of quantized data"); if (gpuBufferSize == 0) // empty buffer: empty matrix, we are done (explicit test needed since launch will fail with 0 threads) return; // #bits must be a power of two; we operate on shift values const size_t ldNbits = ValueQuantizer<ElemType>::ld(nBits); // unquantize in the same thread layout as quantize(), see there const size_t numQWordsPerCol = ColumnQuantizer<ElemType>::QWordsPerCol(M, nBits); const size_t totalQWords = N * numQWordsPerCol; const size_t colsize = QuantizedColumn<ElemType>::QuantizedColumnSize(nBits, M); dim3 griddim, blockdim; ParallelizeOverRangeDim(totalQWords, griddim, blockdim, 256); UnquantizeStripejOneQWord<<<griddim, blockdim, 0, stream>>>(us, M, N, gpuBuffer, colsize, numQWordsPerCol, ldNbits, add); } } } } #endif
the_stack
* Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * -------------------------------------------------------------------------- * * This file has been modified by Megvii ("Megvii Modifications"). * * All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights * reserved. * -------------------------------------------------------------------------- */ #include "filter_act_templates.cuh" namespace megdnn { namespace cuda { /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * * imgsPerThread images. threadIdx.x determines image threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgSizeY, imgSizeX, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModulesY, numModulesX, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * no restrictions on pixelCache * The imgSize here is the size of the actual image without the padding. * As always, try to make B_X * imgsPerThread == B_Y * filtersPerThread for maximum * efficiency. * */ template < int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse2( float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters [colorCache] [B_Y * filtersPerThread]; // pre-load 1 pixel from B_Y*filtersPerThread filters __shared__ float shImages[colorCache] [B_X * imgsPerThread]; // pre-load 1 pixel from B_X*imgsPerThread images fill_shared_mem<float>((float*)shFilters, sizeof(shFilters) / sizeof(float), 0); fill_shared_mem<float>((float*)shImages, sizeof(shImages) / sizeof(float), 0); __syncthreads(); const int imgPixels = imgSizeY * imgSizeX; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = DIVUP(numFilters, (B_Y * filtersPerThread)); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesY; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += (blockColorIdx + threadIdx.y) * imgPixels * imgStride + myImgIdx; filters += blockFilterIdx + shFilterLoadX + shFilterLoadY * numFilters * filterPixels; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } bool active_thread_y = (blockFilterIdx + shFilterLoadX) < numFilters; targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } const int imgStartX = MAX(0, imgLoadModPosX); const int imgStartY = MAX(0, imgLoadModPosY); const int imgEndX = MIN(imgLoadModPosX + filterSize, imgSizeX); const int imgEndY = MIN(imgLoadModPosY + filterSize, imgSizeY); // __shared__ int imgPos[] for (int imgY = imgStartY; imgY < imgEndY; ++imgY) { const int filterPxY = imgY - imgLoadModPosY; for (int imgX = imgStartX; imgX < imgEndX; ++imgX) { const int filterPxX = imgX - imgLoadModPosX; const int p = filterPxY * filterSize + filterPxX; for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) /* * Load a pixel from B_Y*filtersPerThread filters * This condition covers the case when B_X is not divisible by filtersPerThread. * In this case, not all of the threads will participate in the loading operation. * This ensures that in each loop iteration, an integer number of rows of shFilters * are filled, which makes indexing simple. * nvcc is behaving in a completely insane way: removing this condition under * template parameters that guarantee it to be true actually slows down * the computation. * */ if (/*B_X % filtersPerThread == 0 ||*/ shFilterLoadY < B_X / filtersPerThread) { #pragma unroll for (int c = 0; c < colorCache; c += B_X / filtersPerThread) { if (colorCache % (B_X / filtersPerThread) == 0 || c + shFilterLoadY < colorCache) { if (active_thread_y) { shFilters[c + shFilterLoadY][shFilterLoadX] = filters[((oc + c) * filterPixels + p) * numFilters]; } else { shFilters[c + shFilterLoadY][shFilterLoadX] = 0; } } } } /* * Load a pixel from B_X*imgsPerThread images. */ const int pixIdx = imgY * imgSizeX + imgX; // Pixel index in img float* m = &images[imgStride * (oc * imgPixels + pixIdx)]; #pragma unroll for (int c = 0; c < colorCache; c += B_Y) { if (colorCache % B_Y == 0 || threadIdx.y + c < colorCache) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } else { shImages[c + threadIdx.y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); for (int c = 0; c < colorCache; c++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][g] += shImages[c][g * B_X + threadIdx.x] * shFilters[c][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } } int filtersThisThread = filtersPerThread; // if(checkFilterBounds) { int filtersThisBlock = numFilters - (blockIdx.y % blocksPerModule) * (B_Y * filtersPerThread); if (filtersThisBlock < (B_Y * filtersPerThread)) { filtersThisThread = (filtersThisBlock - threadIdx.y + filtersPerThread - 1) / filtersPerThread; } //} if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersThisThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { // Note: reversing order of these loops saves 2 registers, but costs time #pragma unroll for (int f = 0; f < filtersThisThread; f++) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } #define FILTER_SPARSE2_HEAD template __global__ void filterActs_YxX_sparse2 // <B_Y, B_X, imgsPerThread, filtersPerThread, colorCache, scale, checkImgBounds> #define FILTER_SPARSE2(scale, ckImg) \ FILTER_SPARSE2_HEAD<4, 32, 4, 8, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 4, 4, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ \ FILTER_SPARSE2_HEAD<8, 32, 2, 16, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 2, 16, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 2, 8, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 2, 4, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ \ FILTER_SPARSE2_HEAD<8, 32, 1, 16, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 1, 16, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 1, 8, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 1, 4, 8, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ \ FILTER_SPARSE2_HEAD<4, 32, 4, 16, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 4, 8, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 4, 4, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ \ FILTER_SPARSE2_HEAD<4, 32, 2, 16, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 2, 8, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 2, 4, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ \ FILTER_SPARSE2_HEAD<4, 32, 1, 16, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 1, 8, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); \ FILTER_SPARSE2_HEAD<4, 32, 1, 4, 4, scale, ckImg>(FILTER_SPARSE2_PARAMS); } // namespace cuda } // namespace megdnn
the_stack
static inline void THNN_(TemporalRowConvolution_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *weight, THCTensor *bias, int kW, int dW, int padW) { THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THCUNN_argCheck(state, weight->nDimension == 2 || weight->nDimension == 3, 3, weight, "2D or 3D weight tensor expected, but got: %s"); if (bias != NULL) { THCUNN_check_dim_size(state, bias, 1, 0, weight->size[0]); } int ndim = input->nDimension; int dimF = 0; // feature dimension int dimS = 1; // sequence dimension if (ndim == 3) { ++dimF; ++dimS; } THCUNN_argCheck(state, ndim == 2 || ndim == 3, 1, input, "2D or 3D (batch mode) input tensor expected, but got :%s"); long inputFrameSize = weight->size[0]; long nInputFrame = input->size[dimS]; long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (nOutputFrame < 1) { THError("Given input size: (%d x %d). " "Calculated output size: (%d x %d). Output size is too small", inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); } THCUNN_check_dim_size(state, input, ndim, dimF, inputFrameSize); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimF, inputFrameSize); THCUNN_check_dim_size(state, gradOutput, ndim, dimS, nOutputFrame); } } void THNN_(TemporalRowConvolution_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias, THCTensor *finput, THCTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { // aliases THCTensor *columns = finput; THCTensor *ones = fgradInput; // assert same GPU THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); if (bias != NULL) { THCUNN_assertSameGPU(state, 2, weight, bias); } THArgCheck(THCTensor_(isContiguous)(state, weight), 4, "weight must be contiguous"); THArgCheck(!bias || THCTensor_(isContiguous)(state, bias), 5, "bias must be contiguous"); // reshape weight if necessary int ndim = input->nDimension; THCTensor *tinput; if (!featFirst) { tinput = THCTensor_(newTranspose)(state, input, ndim - 1, ndim - 2); input = THCTensor_(newContiguous)(state, tinput); } else { input = THCTensor_(newContiguous)(state, input); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, NULL, weight, bias, kW, dW, padW); int batch = 1; if (ndim == 2) { // Force batch batch = 0; THCTensor_(resize3d)(state, input, 1, input->size[0], input->size[1]); } // Params: long inputFrameSize = weight->size[0]; long nInputFrame = input->size[2]; long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; // Batch size long batchSize = input->size[0]; // Resize output THCTensor_(resize3d)(state, output, batchSize, inputFrameSize, nOutputFrame); // Augment the input THCTensor_(resize3d)(state, columns, inputFrameSize, kW, nOutputFrame); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever // gets increased and always contains ones. if (ones->nDimension != 2 || ones->size[0] * ones->size[1] < nOutputFrame) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, 1, nOutputFrame); THCTensor_(fill)(state, ones, ScalarConvert<int, real>::to(1)); } // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *output_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; ++elt) { // Matrix multiply per output: THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, output_n, output, 0, elt); // Do bias first: // m_, n_, k_ are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) long m_ = inputFrameSize; long n_ = nOutputFrame; long k_ = 1; // Do GEMM (note: this is a bit confusing because gemm asummes // column-major matrices) if (bias != NULL) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemm( #elif defined(THC_REAL_IS_HALF) THCudaBlas_Hgemm( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm( #endif state, 't', 'n', n_, m_, k_, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, ones), k_, THCTensor_(data)(state, bias), k_, ScalarConvert<int, real>::to(0), THCTensor_(data)(state, output_n), n_); } else { THCTensor_(zero)(state, output_n); } // Extract columns: row2col(THCState_getCurrentStream(state), THCTensor_(data)(state, input_n), inputFrameSize, nInputFrame, kW, padW, dW, 1, THCTensor_(data)(state, columns)); THCTensor *output3d = THCTensor_(newWithStorage3d)( state, output_n->storage, output_n->storageOffset, inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x 1 x kW // columns: inputFrameSize x kW x nOutputFrame THCTensor_(baddbmm)(state, output3d, ScalarConvert<int, real>::to(1), output3d, ScalarConvert<int, real>::to(1), weight, columns); // output3d: inputFrameSize x 1 x nOutputFrame THCTensor_(free)(state, output3d); } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, output_n); // Resize output if (batch == 0) { THCTensor_(resize2d)(state, output, inputFrameSize, nOutputFrame); THCTensor_(resize2d)(state, input, inputFrameSize, nInputFrame); } if (!featFirst) { THCTensor_(transpose)(state, output, output, ndim - 1, ndim - 2); THCTensor_(free)(state, tinput); } THCTensor_(free)(state, input); } void THNN_(TemporalRowConvolution_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, THCTensor *finput, THCTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { // aliases THCTensor *gradColumns = finput; THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, gradColumns, gradInput); THArgCheck(THCTensor_(isContiguous)(state, weight), 4, "weight must be contiguous"); int ndim = input->nDimension; THCTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THCTensor_(newTranspose)(state, input, ndim - 1, ndim - 2); tgradOutput = THCTensor_(newTranspose)(state, gradOutput, ndim - 1, ndim - 2); input = THCTensor_(newContiguous)(state, tinput); gradOutput = THCTensor_(newContiguous)(state, tgradOutput); } else { input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, weight, NULL, kW, dW, padW); int batch = 1; if (ndim == 2) { // Force batch batch = 0; THCTensor_(resize3d)(state, input, 1, input->size[0], input->size[1]); THCTensor_(resize3d)(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1]); } // Params: long inputFrameSize = weight->size[0]; long nInputFrame = input->size[2]; long nOutputFrame = gradOutput->size[2]; // Batch size long batchSize = input->size[0]; // Resize output THCTensor_(resize3d)(state, gradInput, batchSize, inputFrameSize, nInputFrame); // Resize temporary columns THCTensor_(resize3d)(state, gradColumns, inputFrameSize, kW, nOutputFrame); // Helpers THCTensor *gradInput_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); THCTensor *tweight = THCTensor_(new)(state); THCTensor_(transpose)(state, tweight, weight, 1, 2); for (int elt = 0; elt < batchSize; ++elt) { // Matrix multiply per sample: THCTensor_(select)(state, gradInput_n, gradInput, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); THCTensor *gradOutput3d = THCTensor_(newWithStorage3d)( state, gradOutput_n->storage, gradOutput_n->storageOffset, inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame THCTensor_(baddbmm)(state, gradColumns, ScalarConvert<int, real>::to(0), gradColumns, ScalarConvert<int, real>::to(1), tweight, gradOutput3d); // gradColumns: inputFrameSize x kW x nOutputFrame // Unpack columns back into input: col2row<real, accreal>(THCState_getCurrentStream(state), THCTensor_(data)(state, gradColumns), inputFrameSize, nInputFrame, kW, padW, dW, 1, THCTensor_(data)(state, gradInput_n)); THCTensor_(free)(state, gradOutput3d); } // Free THCTensor_(free)(state, gradInput_n); THCTensor_(free)(state, gradOutput_n); // Resize output if (batch == 0) { THCTensor_(resize2d)(state, gradOutput, inputFrameSize, nOutputFrame); THCTensor_(resize2d)(state, input, inputFrameSize, nInputFrame); THCTensor_(resize2d)(state, gradInput, inputFrameSize, nInputFrame); } THCTensor_(free)(state, tweight); if (!featFirst) { THCTensor_(transpose)(state, gradInput, gradInput, ndim - 1, ndim - 2); THCTensor_(free)(state, tinput); THCTensor_(free)(state, tgradOutput); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } void THNN_(TemporalRowConvolution_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *finput, THCTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { real scale = ScalarConvert<accreal, real>::to(scale_); // Aliases THCTensor *columns = finput; THCTensor *ones = fgradInput; THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, columns, ones); if (gradBias != NULL) { THCUNN_assertSameGPU(state, 2, gradWeight, gradBias); } int ndim = input->nDimension; THCTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THCTensor_(newTranspose)(state, input, ndim - 1, ndim - 2); tgradOutput = THCTensor_(newTranspose)(state, gradOutput, ndim - 1, ndim - 2); input = THCTensor_(newContiguous)(state, tinput); gradOutput = THCTensor_(newContiguous)(state, tgradOutput); } else { input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); int batch = 1; if (ndim == 2) { // Force batch batch = 0; THCTensor_(resize3d)(state, input, 1, input->size[0], input->size[1]); THCTensor_(resize3d)(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1]); } // Params: long inputFrameSize = gradWeight->size[0]; long nInputFrame = input->size[2]; long nOutputFrame = gradOutput->size[2]; // Batch size long batchSize = input->size[0]; // Define a buffer of ones, for bias accumulation if (ones->nDimension != 2 || ones->size[0] * ones->size[1] < nOutputFrame) { // Resize plane and fill with ones... THCTensor_(resize2d)(state, ones, 1, nOutputFrame); THCTensor_(fill)(state, ones, ScalarConvert<int, real>::to(1)); } // // Resize temporary columns THCTensor_(resize3d)(state, columns, inputFrameSize, kW, nOutputFrame); // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; ++elt) { // Matrix multiply per output THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); THCTensor *gradOutput3d = THCTensor_(newWithStorage3d)( state, gradOutput_n->storage, gradOutput_n->storageOffset, inputFrameSize, -1, 1, -1, nOutputFrame, -1); // Extract columns row2col(THCState_getCurrentStream(state), THCTensor_(data)(state, input_n), inputFrameSize, nInputFrame, kW, padW, dW, 1, THCTensor_(data)(state, columns)); THCTensor *tcolumns = THCTensor_(new)(state); THCTensor_(transpose)(state, tcolumns, columns, 1, 2); // gradOutput3d: inputFrameSize x 1 x nOutputFrame // columns: inputFrameSize x nOutputFrame x kW THCTensor_(baddbmm)(state, gradWeight, ScalarConvert<int, real>::to(1), gradWeight, scale, gradOutput3d, tcolumns); // gradWeight: inputFrameSize x 1 x kW THCTensor_(free)(state, tcolumns); THCTensor_(free)(state, gradOutput3d); if (gradBias != NULL) { long m_ = inputFrameSize; long k_ = nOutputFrame; #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv( #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv( #endif state, 't', k_, m_, scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), 1, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradBias), 1); #endif #ifdef THC_REAL_IS_HALF // half not supported due to baddbmm THCudaBlas_Hgemm(state, 't', 'n', m_, 1, k_, scale, THCTensor_(data)(state, gradOutput_n), k_, THCTensor_(data)(state, ones), k_, ScalarConvert<int, real>::to(1), THCTensor_(data)(state, gradBias), m_); #endif } } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, gradOutput_n); // Resize if (batch == 0) { THCTensor_(resize2d)(state, gradOutput, inputFrameSize, nOutputFrame); THCTensor_(resize2d)(state, input, inputFrameSize, nInputFrame); } if (!featFirst) { THCTensor_(free)(state, tinput); THCTensor_(free)(state, tgradOutput); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
the_stack
using namespace std; /* ========================= * IReducerSegment * ========================= */ // Null mat --> reducer on host IReduceSegment::IReduceSegment(IEightGPUReducer& parent, int deviceID, Queue<int>* finishQueue) : _deviceID(deviceID), _next(NULL), _finishQueue(finishQueue), Thread(true, getDeviceCPUs(parent.getTgtDeviceID())) { } IReduceSegment::~IReduceSegment() { } NVMatrix& IReduceSegment::getChunk(const NVMatrix& mat, int chunkSize, int chunkIdx) { NVMatrix& line = mat.reshaped(1, mat.getNumElements()); int start = chunkIdx * chunkSize; int end = min((chunkIdx+1) * chunkSize, mat.getNumElements()); // _mat->printShape("_mat"); NVMatrix& chunk = line.sliceCols(start, end); delete &line; // chunk.printShape("chunk"); return chunk; } void* IReduceSegment::run() { bool exit = false; while (!exit) { ReduceMessage& msg = *_queue.dequeue(); if (msg.getType() == EXIT) { exit = true; } else { bool term = processMessage(msg); if (term) { assert(_finishQueue); _finishQueue->enqueue(1); } } delete &msg; } return NULL; } inline NVMatrix& IReduceSegment::getMatrix(ReduceMessage& msg) { return msg.getMatrix(getDeviceID()); } Queue<ReduceMessage*>& IReduceSegment::getQueue() { return _queue; } inline int IReduceSegment::getDeviceID() const { return _deviceID; } void IReduceSegment::addPrev(IReduceSegment& c) { _prev.push_back(&c); } void IReduceSegment::addNext(ReducePeer& c) { assert(_next == NULL); _next = &c; c.addPrev(*this); } bool IReduceSegment::isTerminal() const { return _next == NULL; } /* ========================= * ReducerSource * ========================= */ ReducerSource::ReducerSource(IEightGPUReducer& parent, int deviceID) : IReduceSegment(parent, deviceID, NULL) { } bool ReducerSource::processMessage(ReduceMessage& msg) { assert(msg.getType() == REDUCE_START); int numChunks = min(getMatrix(msg).getNumElements(), max(REDUCE_MIN_CHUNKS, min(REDUCE_MAX_CHUNKS, DIVUP(getMatrix(msg).getNumElements(), REDUCE_MIN_CHUNK_SIZE)))); int chunkSize = DIVUP(getMatrix(msg).getNumElements(), numChunks); //printf("num chunks: %d\n", numChunks); for (int c = 0; c <= numChunks; ++c) { _next->getQueue().enqueue(new ReduceChunkMessage(*this, c, chunkSize, numChunks, msg.getScaleIntermediates(), msg.getScaleTarget(), msg.getMatrices())); } return false; } /* ========================= * ReducerPeer * ========================= */ ReducePeer::ReducePeer(IEightGPUReducer& parent,int deviceID, Queue<int>* finishQueue) : IReduceSegment(parent, deviceID, finishQueue), _numInputsFinished(0) { _add = deviceID != DEVICE_HOST; } ReducePeer::ReducePeer(IEightGPUReducer& parent) : IReduceSegment(parent, DEVICE_HOST, NULL), _numInputsFinished(0), _add(false) { } ReducePeer::~ReducePeer() { for(std::map<int,cudaStream_t>::iterator it = _streams.begin(); it != _streams.end(); ++it) { checkCudaErrors(cudaStreamDestroy(it->second)); } _streams.clear(); } inline cudaStream_t ReducePeer::getStream(int deviceID) { if (deviceID < 0) { return NULL; } if (_streams.count(deviceID) == 0) { NVMatrix::setDeviceID(deviceID); checkCudaErrors(cudaStreamCreateWithFlags(&_streams[deviceID], cudaStreamNonBlocking)); } return _streams[deviceID]; } bool ReducePeer::processMessage(ReduceMessage& msg) { assert(msg.getType() == REDUCE_CHUNK); ReduceChunkMessage& cmsg = *static_cast<ReduceChunkMessage*>(&msg); // if (_numInputsReceived.count(cmsg.getChunkIdx()) == 0) { // _numInputsReceived[cmsg.getChunkIdx()] = 0; // } int& inputsRcvd = ++_numInputsReceived[cmsg.getChunkIdx()]; // printf("reducer on device %d got msg chunk idx %d of %d, inputs rcvd for this chunk idx: %d/%d\n", // getDeviceID(), cmsg.getChunkIdx(), cmsg.getNumChunks(),_numInputsReceived[cmsg.getChunkIdx()], _prev.size()); if (cmsg.getChunkIdx() < cmsg.getNumChunks()) { IReduceSegment& src = cmsg.getSource(); float scalePrev = isTerminal() ? cmsg.getScaleIntermediates() : 1; float scaleSelf = inputsRcvd == 1 ? _add * (isTerminal() ? cmsg.getScaleTarget() : 1): 1; if (scaleSelf == 0 || isTerminal()) { if (getDeviceID() >= 0) { NVMatrix::setDeviceID(getDeviceID()); } getMatrix(msg).resize(src.getMatrix(msg)); } assert(getMatrix(msg).isSameDims(src.getMatrix(msg))); NVMatrix& prevChunk = getChunk(src.getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); NVMatrix& myChunk = getChunk(getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); int execDeviceID = getDeviceID() >= 0 ? getDeviceID() : src.getDeviceID(); if (execDeviceID >= 0) { NVMatrix::setDeviceID(execDeviceID); prevChunk.add(myChunk, scalePrev, scaleSelf, myChunk, getStream(execDeviceID)); NVMatrix::syncStream(getStream(execDeviceID)); } else { assert(!isTerminal()); hostAdd(prevChunk.getDevData(), myChunk.getDevData(), prevChunk.getNumElements(), scaleSelf); } delete &prevChunk; delete &myChunk; } else { _numInputsFinished++; } if (!isTerminal() && inputsRcvd == _prev.size()) { // printf(" device %d enqueueing msg for next on device %d\n", getDeviceID(), _next->getDeviceID()); _next->getQueue().enqueue( new ReduceChunkMessage(*this, cmsg.getChunkIdx(), cmsg.getChunkSize(), cmsg.getNumChunks(), cmsg.getScaleIntermediates(), cmsg.getScaleTarget(), cmsg.getMatrices())); } bool finished = _numInputsFinished == _prev.size(); if (finished) { _numInputsFinished = 0; _numInputsReceived.clear(); } return finished && isTerminal(); } void ReducePeer::hostAdd(const float* src, float* tgt, const int n, const float scaleTgt) { if (scaleTgt != 0) { for (int i = 0; i < n; ++i) { tgt[i] = scaleTgt * tgt[i] + src[i]; } } else { for (int i = 0; i < n; ++i) { tgt[i] = src[i]; } } } inline NVMatrix& ReducePeer::getMatrix(ReduceMessage& msg) { if (getDeviceID() != DEVICE_HOST) { return IReduceSegment::getMatrix(msg); } return _mat; } /* ========================= * EightGPUReducer * ========================= */ IEightGPUReducer::IEightGPUReducer(int tgtDeviceID) : _tgtDeviceID(tgtDeviceID) { } IEightGPUReducer::~IEightGPUReducer() { vector<IReduceSegment*> v; v.insert(v.end(), _sources.begin(), _sources.end()); v.insert(v.end(), _peers.begin(), _peers.end()); for (vector<IReduceSegment*>::iterator it = v.begin(); it != v.end(); ++it) { (*it)->getQueue().enqueue(new ReduceMessage(EXIT)); (*it)->join(); delete *it; } } IEightGPUReducer& IEightGPUReducer::construct() { vector<int> same, other; for (int i = 0; i < 8; ++i) { if (i != _tgtDeviceID) { if (NVMatrix::canAccessPeer(_tgtDeviceID, i)) { same.insert(same.begin() + rand() % (1 + same.size()), i); } else { other.insert(other.begin() + rand() % (1 + other.size()), i); } } } assert(same.size() == 3); assert(other.size() == 4); makeConnections(same, other); for (vector<ReducerSource*>::const_iterator it = _sources.begin(); it != _sources.end(); ++it) { (*it)->start(); } for (vector<ReducePeer*>::const_iterator it = _peers.begin(); it != _peers.end(); ++it) { (*it)->start(); } return *this; } void IEightGPUReducer::reduce(std::map<int, NVMatrix*>& mats, float scaleIntermediates, float scaleTarget) { assert(mats.size() == 8); // Check if source matrices are 0-sized bool zero = true; for (map<int,NVMatrix*>::const_iterator it = mats.begin(); it != mats.end(); ++it) { if (it->first != _tgtDeviceID && it->second->getNumElements() != 0) { zero = false; break; } } if (zero) { mats[_tgtDeviceID]->resize(*mats[(_tgtDeviceID + 1) % 8]); } else { for (vector<ReducerSource*>::const_iterator it = _sources.begin(); it != _sources.end(); ++it) { (*it)->getQueue().enqueue(new ReduceStartMessage(scaleIntermediates, scaleTarget, mats)); } _finishQueue.dequeue(); } assert(_finishQueue.getNumElements() == 0); } void IEightGPUReducer::reduce(std::map<int, NVMatrix*>& mats, float scaleIntermediates) { reduce(mats, scaleIntermediates, 1); } void IEightGPUReducer::reduce(std::map<int, NVMatrix*>& mats) { reduce(mats, 1, 1); } int IEightGPUReducer::getTgtDeviceID() const { return _tgtDeviceID; } /* ========================= * EightGPUReducer1 * ========================= */ EightGPUReducer1::EightGPUReducer1(int tgtDeviceID) : IEightGPUReducer(tgtDeviceID) { } void EightGPUReducer1::makeConnections(vector<int>& same, vector<int>&other) { // Setup segments on same truck _peers.push_back(new ReducePeer(*this, _tgtDeviceID, &_finishQueue)); // peers[0] = tgt _peers.push_back(new ReducePeer(*this,same[0], &_finishQueue)); // peers[1] = same truck 1 _peers.push_back(new ReducePeer(*this,same[1], &_finishQueue)); // peers[2] = same truck 2 _sources.push_back(new ReducerSource(*this,same[2])); // sources[0] = same truck 3 _sources[0]->addNext(*_peers[2]); _peers[2]->addNext(*_peers[1]); _peers[1]->addNext(*_peers[0]); // Setup segments on other truck _sources.push_back(new ReducerSource(*this,other[0])); // sources[1] = other truck 1 _peers.push_back(new ReducePeer(*this,other[1], &_finishQueue)); // peers[3] = other truck 2 _peers.push_back(new ReducePeer(*this,other[2], &_finishQueue)); // peers[4] = other truck 3 _sources.push_back(new ReducerSource(*this,other[3])); // sources[2] = other truck 4 _peers.push_back(new ReducePeer(*this)); // peers[5] = host 1 _peers.push_back(new ReducePeer(*this)); // peers[6] = host 2 _peers.push_back(new ReducePeer(*this)); // peers[7] = host 3 _sources[1]->addNext(*_peers[3]); _peers[3]->addNext(*_peers[5]); _peers[5]->addNext(*_peers[7]); _peers[7]->addNext(*_peers[0]); _peers[4]->addNext(*_peers[6]); _peers[6]->addNext(*_peers[7]); _sources[2]->addNext(*_peers[4]); } /* ========================= * EightGPUReducer2 * ========================= */ EightGPUReducer2::EightGPUReducer2(int tgtDeviceID) : IEightGPUReducer(tgtDeviceID) { } void EightGPUReducer2::makeConnections(vector<int>& same, vector<int>&other) { // Setup segments on same truck _peers.push_back(new ReducePeer(*this,_tgtDeviceID, &_finishQueue)); // peers[0] = tgt _peers.push_back(new ReducePeer(*this,same[0], &_finishQueue)); // peers[1] = same truck 1 _peers.push_back(new ReducePeer(*this,same[1], &_finishQueue)); // peers[2] = same truck 2 _sources.push_back(new ReducerSource(*this,same[2])); // sources[0] = same truck 3 _sources[0]->addNext(*_peers[2]); _peers[2]->addNext(*_peers[1]); _peers[1]->addNext(*_peers[0]); // Setup segments on other truck _sources.push_back(new ReducerSource(*this,other[0])); // sources[1] = other truck 1 _peers.push_back(new ReducePeer(*this,other[1], &_finishQueue)); // peers[3] = other truck 2 _peers.push_back(new ReducePeer(*this,other[2], &_finishQueue)); // peers[4] = other truck 3 _peers.push_back(new ReducePeer(*this,other[3], &_finishQueue)); // peers[5] = other truck 4 _peers.push_back(new ReducePeer(*this)); // peers[6] = host 1 _sources[1]->addNext(*_peers[3]); _peers[3]->addNext(*_peers[4]); _peers[4]->addNext(*_peers[5]); _peers[5]->addNext(*_peers[6]); _peers[6]->addNext(*_peers[0]); }
the_stack
#include "node_specs.h" #include <stdio.h> typedef unsigned int uint; typedef float real; typedef float4 real4; /* int3 gives problems with memory copies therefor im using int4 Wrong type for attribute nocapture void (i64, i8*, <unrecognized-type>, i32)* @llvm.memcpy.i64 Argument value does not match function argument type! void %2 <unrecognized-type>Broken module found, compilation aborted! Aborted typedef struct int3 { int x,y,z; } int3; */ __device__ int undilate3(uint2 key) { int x, value = 0; key.x = key.x & 0x09249249; key.y = key.y & 0x09249249; // undilate first 10 bits x = key.y & 0x3FFFF; x = ((x << 4) + (x << 2) + x) & 0x0E070381; x = ((x << 12) + (x << 6) + x) & 0x0FF80001; x = ((x << 18) + x) & 0x0FFC0000; value = value | (x >> 18); x = (key.y >> 18) & 0x3FFFF; x = ((x << 4) + (x << 2) + x) & 0x0E070381; x = ((x << 12) + (x << 6) + x) & 0x0FF80001; x = ((x << 18) + x) & 0x0FFC0000; value = value | (x >> 12); // undilate second 10 bits x = key.x & 0x3FFFF; x = ((x << 4) + (x << 2) + x) & 0x0E070381; x = ((x << 12) + (x << 6) + x) & 0x0FF80001; x = ((x << 18) + x) & 0x0FFC0000; value = value | ((x >> 18) << 10); x = (key.x >> 18) & 0x3FFFF; x = ((x << 4) + (x << 2) + x) & 0x0E070381; x = ((x << 12) + (x << 6) + x) & 0x0FF80001; x = ((x << 18) + x) & 0x0FFC0000; value = value | ((x >> 12) << 10); return value; } __device__ uint2 dilate3(int value) { unsigned int x; uint2 key; // dilate first 10 bits x = value & 0x03FF; x = ((x << 16) + x) & 0xFF0000FF; x = ((x << 8) + x) & 0x0F00F00F; x = ((x << 4) + x) & 0xC30C30C3; x = ((x << 2) + x) & 0x49249249; key.y = x; // dilate second 10 bits x = (value >> 10) & 0x03FF; x = ((x << 16) + x) & 0xFF0000FF; x = ((x << 8) + x) & 0x0F00F00F; x = ((x << 4) + x) & 0xC30C30C3; x = ((x << 2) + x) & 0x49249249; key.x = x; return key; } #if 0 __device__ uint2 get_key(int4 crd) { uint2 key, key1; key = dilate3(crd.x); key1 = dilate3(crd.y); key.x = key.x | (key1.x << 1); key.y = key.y | (key1.y << 1); key1 = dilate3(crd.z); key.x = key.x | (key1.x << 2); key.y = key.y | (key1.y << 2); return key; } #else #if 0 __device__ uint4 get_key(int4 crd) { const int bits = 20; //20 to make it same number as morton order int i,xi, yi, zi; int mask; int key; //0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100 //000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7 const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5}; int temp; mask = 1 << (bits - 1); key = 0; uint4 key_new; for(i = 0; i < bits; i++, mask >>= 1) { xi = (crd.x & mask) ? 1 : 0; yi = (crd.y & mask) ? 1 : 0; zi = (crd.z & mask) ? 1 : 0; int index = (xi << 2) + (yi << 1) + zi; if(index == 0) { temp = crd.z; crd.z = crd.y; crd.y = temp; } else if(index == 1 || index == 5) { temp = crd.x; crd.x = crd.y; crd.y = temp; } else if(index == 4 || index == 6) { crd.x = (crd.x) ^ (-1); crd.z = (crd.z) ^ (-1); } else if(index == 7 || index == 3) { temp = (crd.x) ^ (-1); crd.x = (crd.y) ^ (-1); crd.y = temp; } else { temp = (crd.z) ^ (-1); crd.z = (crd.y) ^ (-1); crd.y = temp; } key = (key << 3) + C[index]; if(i == 9) { key_new.x = key; key = 0; } } //end for key_new.y = key; return key_new; } #else __device__ uint4 get_key(int4 crd) { const int bits = 30; //20 to make it same number as morton order int i,xi, yi, zi; int mask; int key; //0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100 //000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7 const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5}; int temp; mask = 1 << (bits - 1); key = 0; uint4 key_new; for(i = 0; i < bits; i++, mask >>= 1) { xi = (crd.x & mask) ? 1 : 0; yi = (crd.y & mask) ? 1 : 0; zi = (crd.z & mask) ? 1 : 0; int index = (xi << 2) + (yi << 1) + zi; if(index == 0) { temp = crd.z; crd.z = crd.y; crd.y = temp; } else if(index == 1 || index == 5) { temp = crd.x; crd.x = crd.y; crd.y = temp; } else if(index == 4 || index == 6) { crd.x = (crd.x) ^ (-1); crd.z = (crd.z) ^ (-1); } else if(index == 7 || index == 3) { temp = (crd.x) ^ (-1); crd.x = (crd.y) ^ (-1); crd.y = temp; } else { temp = (crd.z) ^ (-1); crd.z = (crd.y) ^ (-1); crd.y = temp; } key = (key << 3) + C[index]; // Hier gebleven, zorgen dat juiste bits op juiste plek komen if(i == 19) { key_new.y = key; key = 0; } if(i == 9) { key_new.x = key; key = 0; } } //end for key_new.z = key; return key_new; } #endif #if 0 __device__ uint2 get_key(int4 crd) { const int bits = 20; //20 to make it same number as morton order int i,xi, yi, zi; int mask; long key; //0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100 //000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7 const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5}; int temp; mask = 1 << (bits - 1); key = 0; for(i = 0; i < bits; i++, mask >>= 1) { xi = (crd.x & mask) ? 1 : 0; yi = (crd.y & mask) ? 1 : 0; zi = (crd.z & mask) ? 1 : 0; int index = (xi << 2) + (yi << 1) + zi; if(index == 0) { temp = crd.z; crd.z = crd.y; crd.y = temp; } else if(index == 1 || index == 5) { temp = crd.x; crd.x = crd.y; crd.y = temp; } else if(index == 4 || index == 6) { crd.x = (crd.x) ^ (-1); crd.z = (crd.z) ^ (-1); } else if(index == 7 || index == 3) { temp = (crd.x) ^ (-1); crd.x = (crd.y) ^ (-1); crd.y = temp; } else { temp = (crd.z) ^ (-1); crd.z = (crd.y) ^ (-1); crd.y = temp; } key = (key << 3) + C[index]; } uint2 key_new; // key_new.x = key & 0xFFFFFFFF; // key_new.y = (key >> 32) & 0xFFFFFFFF; key_new.y = key & 0xFFFFFFFF; key_new.x = (key >> 32) & 0xFFFFFFFF; return key_new; } #endif #endif /* __device__ uint2 get_mask(int level) { int mask_levels = 3*max(MAXLEVELS - level, 0); uint2 mask = {0x3FFFFFFF, 0xFFFFFFFF}; if (mask_levels > 30) { mask.y = 0; mask.x = (mask.x >> (mask_levels - 30)) << (mask_levels - 30); } else { mask.y = (mask.y >> mask_levels) << mask_levels; } return mask; }*/ __device__ uint4 get_mask(int level) { int mask_levels = 3*max(MAXLEVELS - level, 0); uint4 mask = {0x3FFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,0xFFFFFFFF}; if (mask_levels > 60) { mask.z = 0; mask.y = 0; mask.x = (mask.x >> (mask_levels - 60)) << (mask_levels - 60); } else if (mask_levels > 30) { mask.z = 0; mask.y = (mask.y >> (mask_levels - 30)) << (mask_levels - 30); } else { mask.z = (mask.z >> mask_levels) << mask_levels; } // if(threadIdx.x == 0 && blockIdx.x == 0) // { // printf("ON DEV TEST: lvl: %d mlvl: %d x: %d y: %d z: %d \n", level, mask_levels, mask.x, mask.y, mask.z); // } // return mask; } /* __device__ uint2 get_imask(uint2 mask) { return (uint2){0x3FFFFFFF ^ mask.x, 0xFFFFFFFF ^ mask.y}; }*/ __device__ uint4 get_imask(uint4 mask) { return (uint4){0x3FFFFFFF ^ mask.x, 0xFFFFFFFF ^ mask.y, 0xFFFFFFFF ^ mask.z, 0}; } __device__ int4 get_crd(uint2 key) { int4 crd; crd.x = undilate3(key); crd.y = undilate3((uint2){key.x >> 1, key.y >> 1}); crd.z = undilate3((uint2){key.x >> 2, key.y >> 2}); return crd; } __device__ int cmp_uint2(uint2 a, uint2 b) { if (a.x < b.x) return -1; else if (a.x > b.x) return +1; else { if (a.y < b.y) return -1; else if (a.y > b.y) return +1; return 0; } } __device__ int cmp_uint4(uint4 a, uint4 b) { if (a.x < b.x) return -1; else if (a.x > b.x) return +1; else { if (a.y < b.y) return -1; else if (a.y > b.y) return +1; else { if (a.z < b.z) return -1; else if (a.z > b.z) return +1; return 0; } //end z } //end y } //end x, function #if 0 //Binary search of the key within certain bounds (cij.x, cij.y) __device__ int find_key(uint2 key, uint2 cij, uint2 *keys) { int l = cij.x; int r = cij.y - 1; while (r - l > 1) { int m = (r + l) >> 1; int cmp = cmp_uint2(keys[m], key); if (cmp == -1) { l = m; } else { r = m; } } if (cmp_uint2(keys[l], key) >= 0) return l; return r; } #endif //Binary search of the key within certain bounds (cij.x, cij.y) __device__ int find_key(uint4 key, uint2 cij, uint4 *keys) { int l = cij.x; int r = cij.y - 1; while (r - l > 1) { int m = (r + l) >> 1; int cmp = cmp_uint4(keys[m], key); if (cmp == -1) { l = m; } else { r = m; } } if (cmp_uint4(keys[l], key) >= 0) return l; return r; } __device__ float2 ds_accumulate(float2 a, float b){ float tmp = a.x + b; float del = (tmp - a.x) - b; a.x = tmp; a.y -= del; return a; } __device__ float2 ds_regularise(float2 a){ float tmp = a.x + a.y; a.y -= (tmp - a.x); a.x = tmp; return a; } // __device__ void sh_MinMax(int i, int j, volatile float3 *sh_rmin, volatile float3 *sh_rmax) // { // sh_rmin[i].x = fminf(sh_rmin[i].x, sh_rmin[j].x); // sh_rmin[i].y = fminf(sh_rmin[i].y, sh_rmin[j].y); // sh_rmin[i].z = fminf(sh_rmin[i].z, sh_rmin[j].z); // sh_rmax[i].x = fmaxf(sh_rmax[i].x, sh_rmax[j].x); // sh_rmax[i].y = fmaxf(sh_rmax[i].y, sh_rmax[j].y); // sh_rmax[i].z = fmaxf(sh_rmax[i].z, sh_rmax[j].z); // } __device__ void sh_MinMax(int i, int j, float3 *r_min, float3 *r_max, volatile float3 *sh_rmin, volatile float3 *sh_rmax) { sh_rmin[i].x = (*r_min).x = fminf((*r_min).x, sh_rmin[j].x); sh_rmin[i].y = (*r_min).y = fminf((*r_min).y, sh_rmin[j].y); sh_rmin[i].z = (*r_min).z = fminf((*r_min).z, sh_rmin[j].z); sh_rmax[i].x = (*r_max).x = fmaxf((*r_max).x, sh_rmax[j].x); sh_rmax[i].y = (*r_max).y = fmaxf((*r_max).y, sh_rmax[j].y); sh_rmax[i].z = (*r_max).z = fmaxf((*r_max).z, sh_rmax[j].z); } __device__ void MinMaxPos(float4 pos, float4 &rmax, float4 &rmin) { rmin.x = fminf(pos.x, rmin.x); rmin.y = fminf(pos.y, rmin.y); rmin.z = fminf(pos.z, rmin.z); rmax.x = fmaxf(pos.x, rmax.x); rmax.y = fmaxf(pos.y, rmax.y); rmax.z = fmaxf(pos.z, rmax.z); } __device__ real4 get_pos(uint2 key, float size, float4 corner) { real4 pos; pos.w = size; int4 crd = get_crd(key); float domain_fac = corner.w; pos.x = crd.x*domain_fac + corner.x; pos.y = crd.y*domain_fac + corner.y; pos.z = crd.z*domain_fac + corner.z; return pos; } /*** **** --> prefix calculation via Horn(2005) data-parallel algoritm ***/ #define BTEST(x) (-(int)(x)) template<int DIM2> __device__ int calc_prefix(int N, int* prefix_in, int tid) { int x, y = 0; const int DIM = 1 << DIM2; for (int p = 0; p < N; p += DIM) { int *prefix = &prefix_in[p]; x = prefix[tid - 1]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 1); __syncthreads(); x = prefix[tid - 2]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 2); __syncthreads(); x = prefix[tid - 4]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 4); __syncthreads(); x = prefix[tid - 8]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 8); __syncthreads(); x = prefix[tid - 16]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 16); __syncthreads(); if (DIM2 >= 6) {x = prefix[tid - 32]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 32); __syncthreads();} if (DIM2 >= 7) {x = prefix[tid - 64]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 64); __syncthreads();} if (DIM2 >= 8) {x = prefix[tid -128]; __syncthreads(); prefix[tid] += x & BTEST(tid >=128); __syncthreads();} prefix[tid] += y; __syncthreads(); y = prefix[DIM-1]; __syncthreads(); } return y; } template<int DIM2> __device__ int calc_prefix(int* prefix, int tid, int value) { int x; const int DIM = 1 << DIM2; prefix[tid] = value; __syncthreads(); #if 1 x = prefix[tid - 1]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 1); __syncthreads(); x = prefix[tid - 2]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 2); __syncthreads(); x = prefix[tid - 4]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 4); __syncthreads(); x = prefix[tid - 8]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 8); __syncthreads(); x = prefix[tid - 16]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 16); __syncthreads(); if (DIM2 >= 6) {x = prefix[tid - 32]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 32); __syncthreads();} if (DIM2 >= 7) {x = prefix[tid - 64]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 64); __syncthreads();} if (DIM2 >= 8) {x = prefix[tid -128]; __syncthreads(); prefix[tid] += x & BTEST(tid >=128); __syncthreads();} x = prefix[DIM - 1]; __syncthreads(); return x; #else int offset = 0; int tid2 = tid << 1; #pragma unroll for (int d = DIM >> 1; d > 0; d >>= 1) { __syncthreads(); int iflag = BTEST(tid < d); int ai = (((tid2 + 1) << offset) - 1) & iflag; int bi = (((tid2 + 2) << offset) - 1) & iflag; prefix[bi] += prefix[ai] & iflag; offset++; } // clear the last element if (tid == 0) prefix[DIM - 1] = 0; // traverse down the tree building the scan in place #pragma unroll for (int d = 1; d < DIM; d <<= 1) { offset--; __syncthreads(); int iflag = BTEST(tid < d); int ai = (((tid2 + 1) << offset) - 1) & iflag; int bi = (((tid2 + 2) << offset) - 1) & iflag; int t = prefix[ai]; if (tid < d) { prefix[ai] = (prefix[bi] & iflag) + (t & BTEST(tid >= d)); prefix[bi] += t & iflag; } } __syncthreads(); prefix[tid] += value; __syncthreads(); x = prefix[DIM - 1]; __syncthreads(); return x; #endif }
the_stack
#if !defined(CUDA_VERSION) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) typedef unsigned long long uint64_t; #endif // CHECK-LABEL: test_wmma_buitins __device__ void test_wmma_buitins(int *src, int *dst, float *fsrc, float *fdst, double *dsrc, double *ddst, int ldm) { #if (PTX >= 60) && (SM >= 70) // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX60_SM70: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 60) && (SM >= 70) #if (PTX >= 61) && (SM >= 70) // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32 // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX61_SM70: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite // expected-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature (sm_70{{.*}},(ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); #endif // (PTX >= 61) && (SM >= 70) #if (PTX >= 63) && (SM >= 72) // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m16n16k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m16n16k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m32n8k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m32n8k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_a_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_a_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_s8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8 // expected-error-re@+1 {{'__imma_m8n32k16_ld_b_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_b_u8(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_ld_c' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n32k16_st_c_i32' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m16n16k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m16n16k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m32n8k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m32n8k16_mma_u8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_s8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_s8(dst, src, src, src, 0, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 3, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 2, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 1, 1); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8 // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 0); // CHECK_PTX63_SM72: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite // expected-error-re@+1 {{'__imma_m8n32k16_mma_u8' needs target feature (sm_72{{.*}},(ptx63{{.*}}}} __imma_m8n32k16_mma_u8(dst, src, src, src, 0, 1); #endif // (PTX >= 63) && (SM >= 72) #if (PTX >= 63) && (SM >= 75) // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_a_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_a_b1(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_b_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_b_b1(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32 // expected-error-re@+1 {{'__bmma_m8n8k128_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_s4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_a_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_a_u4(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_s4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4 // expected-error-re@+1 {{'__imma_m8n8k32_ld_b_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_b_u4(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_ld_c' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_ld_c(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32 // expected-error-re@+1 {{'__imma_m8n8k32_st_c_i32' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_st_c_i32(dst, src, ldm, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_xor_popc_b1' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __bmma_m8n8k128_mma_xor_popc_b1(dst, src, src, src, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_s4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_s4(dst, src, src, src, 1, 1); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4 // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 0); // CHECK_PTX63_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite // expected-error-re@+1 {{'__imma_m8n8k32_mma_u4' needs target feature (sm_75{{.*}},(ptx63{{.*}}}} __imma_m8n8k32_mma_u4(dst, src, src, src, 1, 1); #endif // (PTX >= 63) && (SM >= 75) #if (PTX >= 70) && (SM >= 80) // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_ld_c(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32 // expected-error-re@+1 {{'__mma_m16n16k8_st_c_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_m16n16k8_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_a' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_a(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_b' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_b(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_ld_c' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_ld_c(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 1); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_st_c_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_st_c_f64(ddst, dsrc, ldm, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m16n16k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m16n16k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32 // expected-error-re@+1 {{'__mma_tf32_m16n16k8_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_tf32_m16n16k8_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m32n8k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m32n8k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16 // expected-error-re@+1 {{'__mma_bf16_m8n32k16_mma_f32' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __mma_bf16_m8n32k16_mma_f32(fdst, src, src, fsrc, 0, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 3, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.col.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 2, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.col.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 1, 0); // CHECK_PTX70_SM80: call {{.*}} @llvm.nvvm.wmma.m8n8k4.mma.row.row.f64 // expected-error-re@+1 {{'__dmma_m8n8k4_mma_f64' needs target feature (sm_80{{.*}},(ptx70{{.*}}}} __dmma_m8n8k4_mma_f64(ddst, dsrc, dsrc, dsrc, 0, 0); #endif // (PTX >= 70) && (SM >= 80) #if (PTX >= 71) && (SM >= 75) // CHECK_PTX71_SM75: call {{.*}} @llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1 // expected-error-re@+1 {{'__bmma_m8n8k128_mma_and_popc_b1' needs target feature (sm_75{{.*}},(ptx71{{.*}}}} __bmma_m8n8k128_mma_and_popc_b1(dst, src, src, src, 1); #endif // (PTX >= 71) && (SM >= 75) }
the_stack
namespace cgbn { typedef enum { dlimbs_algs_common, dlimbs_algs_half, dlimbs_algs_full, dlimbs_algs_multi } dlimbs_algs_t; template<class core, dlimbs_algs_t implementation> class dispatch_dlimbs_t; template<class core> class dispatch_dlimbs_t<core, dlimbs_algs_common> { public: static const uint32_t TPI=core::TPI; static const uint32_t LIMBS=core::LIMBS; static const uint32_t DLIMBS=core::DLIMBS; static const uint32_t LIMB_OFFSET=DLIMBS*TPI-LIMBS; __device__ __forceinline__ static void dlimbs_scatter(uint32_t r[DLIMBS], const uint32_t x[LIMBS], const uint32_t source_thread) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t; mpzero<DLIMBS>(r); #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, x[index], source_thread, TPI); r[(index+LIMB_OFFSET)%DLIMBS]=(group_thread==(index+LIMB_OFFSET)/DLIMBS) ? t : r[(index+LIMB_OFFSET)%DLIMBS]; } } __device__ __forceinline__ static void dlimbs_gather(uint32_t r[LIMBS], const uint32_t x[DLIMBS], const uint32_t destination_thread) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, x[(index+LIMB_OFFSET)%DLIMBS], (index+LIMB_OFFSET)/DLIMBS, TPI); r[index]=(group_thread==destination_thread) ? t : r[index]; } } __device__ __forceinline__ static void dlimbs_all_gather(uint32_t r[LIMBS], const uint32_t x[DLIMBS]) { uint32_t sync=core::sync_mask(); #pragma unroll for(int32_t index=0;index<LIMBS;index++) r[index]=__shfl_sync(sync, x[(index+LIMB_OFFSET)%DLIMBS], (index+LIMB_OFFSET)/DLIMBS, TPI); } }; template<class core> class dispatch_dlimbs_t<core, dlimbs_algs_half> { public: static const uint32_t TPI=core::TPI; static const uint32_t LIMBS=core::LIMBS; static const uint32_t DLIMBS=core::DLIMBS; static const uint32_t LIMB_OFFSET=DLIMBS*TPI-LIMBS; // these algorithms require that LIMBS<=TPI/2 __device__ __forceinline__ static void dlimbs_approximate(uint32_t approx[DLIMBS], const uint32_t denom[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t x, d0, d1, x0, x1, x2, est, a, h, l; int32_t c, top; // computes (beta^2 - 1) / denom - beta, where beta=1<<32*LIMBS x=0xFFFFFFFF-denom[0]; d1=__shfl_sync(sync, denom[0], TPI-1, TPI); d0=__shfl_sync(sync, denom[0], TPI-2, TPI); approx[0]=0; a=uapprox(d1); #pragma nounroll for(int32_t thread=LIMBS-1;thread>=0;thread--) { x0=__shfl_sync(sync, x, TPI-3, TPI); x1=__shfl_sync(sync, x, TPI-2, TPI); x2=__shfl_sync(sync, x, TPI-1, TPI); est=udiv(x0, x1, x2, d0, d1, a); l=madlo_cc(est, denom[0], 0); h=madhic(est, denom[0], 0); x=sub_cc(x, h); c=subc(0, 0); // thread TPI-1 is zero top=__shfl_sync(sync, x, TPI-1, TPI); x=__shfl_sync(sync, x, threadIdx.x-1, TPI); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); x=sub_cc(x, l); c=subc(c, 0); if(top+core::resolve_sub(c, x)<0) { // means a correction is required, should be very rare x=add_cc(x, denom[0]); c=addc(0, 0); core::fast_propagate_add(c, x); est--; } approx[0]=(group_thread==thread+TPI-LIMBS) ? est : approx[0]; } } __device__ __forceinline__ static uint32_t dlimbs_sqrt_rem_wide(uint32_t s[DLIMBS], uint32_t r[DLIMBS], const uint32_t lo[DLIMBS], const uint32_t hi[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t x, x0, x1, t0, t1, divisor, approx, p, q, c; // computes s=sqrt(x), r=x-s^2, where x=(hi<<32*LIMBS) + lo t0=__shfl_sync(sync, lo[0], threadIdx.x+LIMBS, TPI); x=hi[0] | t0; x0=__shfl_sync(sync, x, TPI-2, TPI); x1=__shfl_sync(sync, x, TPI-1, TPI); divisor=usqrt(x0, x1); approx=uapprox(divisor); t0=madlo(divisor, divisor, 0); t1=madhi(divisor, divisor, 0); x0=sub_cc(x0, t0); x1=subc(x1, t1); x=__shfl_up_sync(sync, x, 1, TPI); x=(group_thread==TPI-1) ? x0 : x; s[0]=(group_thread==TPI-1) ? divisor+divisor : 0; // silent 1 at the top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-LIMBS);index--) { x0=__shfl_sync(sync, x, TPI-1, TPI); q=usqrt_div(x0, x1, divisor, approx); s[0]=(group_thread==index) ? q : s[0]; p=madhi(q, s[0], 0); x=sub_cc(x, p); c=subc(0, 0); core::fast_propagate_sub(c, x); x1=__shfl_sync(sync, x, TPI-1, TPI)-q; // we subtract q because of the silent 1 at the top of s x=__shfl_up_sync(sync, x, 1, TPI); p=madlo(q, s[0], 0); x=sub_cc(x, p); c=subc(0, 0); x1-=core::fast_propagate_sub(c, x); while(0>(int32_t)x1) { x1++; q--; // correction step: add q and s x=add_cc(x, (group_thread==index) ? q : 0); c=addc(0, 0); x=add_cc(x, s[0]); c=addc(c, 0); x1+=core::resolve_add(c, x); // update s s[0]=(group_thread==index) ? q : s[0]; } s[0]=(group_thread==index+1) ? s[0]+(q>>31) : s[0]; s[0]=(group_thread==index) ? q+q : s[0]; } t0=__shfl_down_sync(sync, s[0], 1, TPI); t0=(group_thread==TPI-1) ? 1 : t0; s[0]=uright_wrap(s[0], t0, 1); r[0]=x; return x1; } __device__ __forceinline__ static void dlimbs_div_estimate(uint32_t q[DLIMBS], const uint32_t x[DLIMBS], const uint32_t approx[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, c; uint64_t w; // computes q=(x*approx>>32*LIMBS) + x + 3 // q=min(q, (1<<32*LIMBS)-1); // // Notes: leaves junk in lower words of q w=0; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, x[0], TPI-LIMBS+index, TPI); w=mad_wide(t, approx[0], w); t=__shfl_sync(sync, ulow(w), threadIdx.x+1, TPI); // half size: take advantage of zero wrapping w=(w>>32)+t; } // increase the estimate by 3 t=(group_thread==TPI-LIMBS) ? 3 : 0; w=w + t + x[0]; q[0]=ulow(w); c=uhigh(w); if(core::resolve_add(c, q[0])!=0) q[0]=0xFFFFFFFF; } __device__ __forceinline__ static void dlimbs_sqrt_estimate(uint32_t q[DLIMBS], uint32_t top, const uint32_t x[DLIMBS], const uint32_t approx[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, high, low; uint64_t w; // computes: // 1. num=((top<<32*LIMBS) + x) / 2 // 2. q=(num*approx>>32*LIMBS) + num + 4 // 3. q=min(q, (1<<32*LIMBS)-1); // // Note: Leaves junk in lower words of q // shift x right by 1 bit. Fill high bit with top. t=__shfl_down_sync(sync, x[0], 1, TPI); t=(group_thread==TPI-1) ? top : t; low=uright_wrap(x[0], t, 1); // if we're exactly half the size, need to clear out low limb if(TPI==2*LIMBS) low=(group_thread>=LIMBS) ? low : 0; // estimate is in low w=0; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, low, TPI-LIMBS+index, TPI); w=mad_wide(t, approx[0], w); t=__shfl_sync(sync, ulow(w), threadIdx.x+1, TPI); // half size: take advantage of zero wrapping w=(w>>32)+t; } // increase the estimate by 4 -- because we might have cleared low bit, estimate can be off by 4 t=(group_thread==TPI-LIMBS) ? 4 : 0; w=w + t + low; low=ulow(w); high=uhigh(w); if(core::resolve_add(high, low)!=0) low=0xFFFFFFFF; q[0]=low; } }; template<class core> class dispatch_dlimbs_t<core, dlimbs_algs_full> { public: static const uint32_t TPI=core::TPI; static const uint32_t LIMBS=core::LIMBS; static const uint32_t DLIMBS=core::DLIMBS; static const uint32_t LIMB_OFFSET=DLIMBS*TPI-LIMBS; // These algorithms are used then LIMBS<=TPI. Almost the same as the half size ones, few tweaks here and there. __device__ __forceinline__ static void dlimbs_approximate(uint32_t approx[DLIMBS], const uint32_t denom[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t x, d0, d1, x0, x1, x2, est, a, h, l; int32_t c, top; // computes (beta^2 - 1) / denom - beta, where beta=1<<32*LIMBS x=0xFFFFFFFF-denom[0]; d1=__shfl_sync(sync, denom[0], TPI-1, TPI); d0=__shfl_sync(sync, denom[0], TPI-2, TPI); approx[0]=0; a=uapprox(d1); #pragma nounroll for(int32_t thread=LIMBS-1;thread>=0;thread--) { x0=__shfl_sync(sync, x, TPI-3, TPI); x1=__shfl_sync(sync, x, TPI-2, TPI); x2=__shfl_sync(sync, x, TPI-1, TPI); est=udiv(x0, x1, x2, d0, d1, a); l=madlo_cc(est, denom[0], 0); h=madhic(est, denom[0], 0); x=sub_cc(x, h); c=subc(0, 0); // thread TPI-1 is zero top=__shfl_sync(sync, x, TPI-1, TPI); x=__shfl_up_sync(sync, x, 1, TPI); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); x=(group_thread==0) ? 0xFFFFFFFF : x; x=sub_cc(x, l); c=subc(c, 0); if(top+core::resolve_sub(c, x)<0) { // means a correction is required, should be very rare x=add_cc(x, denom[0]); c=addc(0, 0); core::fast_propagate_add(c, x); est--; } approx[0]=(group_thread==thread+TPI-LIMBS) ? est : approx[0]; } } __device__ __forceinline__ static uint32_t dlimbs_sqrt_rem_wide(uint32_t s[DLIMBS], uint32_t r[DLIMBS], const uint32_t lo[DLIMBS], const uint32_t hi[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t x, x0, x1, t0, t1, divisor, approx, p, q, c, low; // computes s=sqrt(x), r=x-s^2, where x=(hi<<32*LIMBS) + lo low=lo[0]; x=hi[0]; if(TPI!=LIMBS) { low=__shfl_sync(sync, low, threadIdx.x-TPI+LIMBS, TPI); x=((int32_t)group_thread>=(int32_t)(TPI-LIMBS)) ? x : low; // use casts to silence warning } x0=__shfl_sync(sync, x, TPI-2, TPI); x1=__shfl_sync(sync, x, TPI-1, TPI); divisor=usqrt(x0, x1); approx=uapprox(divisor); t0=madlo_cc(divisor, divisor, 0); t1=madhic(divisor, divisor, 0); x0=sub_cc(x0, t0); x1=subc(x1, t1); x=(group_thread==TPI-1) ? low : x; x=__shfl_sync(sync, x, threadIdx.x-1, TPI); x=(group_thread==TPI-1) ? x0 : x; s[0]=(group_thread==TPI-1) ? divisor+divisor : 0; // silent 1 at the top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-LIMBS);index--) { x0=__shfl_sync(sync, x, TPI-1, TPI); q=usqrt_div(x0, x1, divisor, approx); s[0]=(group_thread==index) ? q : s[0]; p=madhi(q, s[0], 0); x=sub_cc(x, p); c=subc(0, 0); core::fast_propagate_sub(c, x); x1=__shfl_sync(sync, x, TPI-1, TPI)-q; // we subtract q because of the silent 1 at the top of s t0=__shfl_sync(sync, low, index, TPI); x=__shfl_up_sync(sync, x, 1, TPI); x=(group_thread==0) ? t0 : x; p=madlo(q, s[0], 0); x=sub_cc(x, p); c=subc(0, 0); x1-=core::fast_propagate_sub(c, x); while(0>(int32_t)x1) { x1++; q--; // correction step: add q and s x=add_cc(x, (group_thread==index) ? q : 0); c=addc(0, 0); x=add_cc(x, s[0]); c=addc(c, 0); x1+=core::resolve_add(c, x); // update s s[0]=(group_thread==index) ? q : s[0]; } s[0]=(group_thread==index+1) ? s[0]+(q>>31) : s[0]; s[0]=(group_thread==index) ? q+q : s[0]; } t0=__shfl_down_sync(sync, s[0], 1, TPI); t0=(group_thread==TPI-1) ? 1 : t0; s[0]=uright_wrap(s[0], t0, 1); r[0]=x; return x1; } __device__ __forceinline__ static void dlimbs_div_estimate(uint32_t q[DLIMBS], const uint32_t x[DLIMBS], const uint32_t approx[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, c; uint64_t w; // computes q=(x*approx>>32*LIMBS) + x + 3 // q=min(q, (1<<32*LIMBS)-1); // // Notes: leaves junk in lower words of q w=0; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, x[0], TPI-LIMBS+index, TPI); w=mad_wide(t, approx[0], w); t=__shfl_sync(sync, ulow(w), threadIdx.x+1, TPI); t=(group_thread==TPI-1) ? 0 : t; w=(w>>32)+t; } // increase the estimate by 3 t=(group_thread==TPI-LIMBS) ? 3 : 0; w=w + t + x[0]; q[0]=ulow(w); c=uhigh(w); if(core::resolve_add(c, q[0])!=0) q[0]=0xFFFFFFFF; } __device__ __forceinline__ static void dlimbs_sqrt_estimate(uint32_t q[DLIMBS], uint32_t top, const uint32_t x[DLIMBS], const uint32_t approx[DLIMBS]) { uint32_t sync=core::sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t, high, low; uint64_t w; // computes: // 1. num=((top<<32*LIMBS) + x) / 2 // 2. q=(num*approx>>32*LIMBS) + num + 4 // 3. q=min(q, (1<<32*LIMBS)-1); // // Note: Leaves junk in lower words of q // shift x right by 1 bit. Fill high bit with top. t=__shfl_down_sync(sync, x[0], 1, TPI); t=(group_thread==TPI-1) ? top : t; low=uright_wrap(x[0], t, 1); // estimate is in low w=0; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_sync(sync, low, TPI-LIMBS+index, TPI); w=mad_wide(t, approx[0], w); t=__shfl_down_sync(sync, ulow(w), 1, TPI); t=(group_thread==TPI-1) ? 0 : t; w=(w>>32)+t; } // increase the estimate by 4 -- because we might have cleared low bit, estimate can be off by 4 t=(group_thread==TPI-LIMBS) ? 4 : 0; w=w + t + low; low=ulow(w); high=uhigh(w); if(core::resolve_add(high, low)!=0) low=0xFFFFFFFF; q[0]=low; } }; } /* namespace cgbn */
the_stack
namespace rxmesh { namespace patcher { Patcher::Patcher(uint32_t patch_size, const std::vector<uint32_t>& ff_offset, const std::vector<uint32_t>& ff_values, const std::vector<std::vector<uint32_t>>& fv, const std::unordered_map<std::pair<uint32_t, uint32_t>, uint32_t, detail::edge_key_hash> edges_map, const uint32_t num_vertices, const uint32_t num_edges, const bool quite) : m_patch_size(patch_size), m_num_patches(0), m_num_vertices(num_vertices), m_num_edges(num_edges), m_num_faces(fv.size()), m_num_seeds(0), m_max_num_patches(0), m_num_components(0), m_num_lloyd_run(0), m_d_face_patch(nullptr), m_d_vertex_patch(nullptr), m_d_edge_patch(nullptr), m_d_patches_offset(nullptr), m_d_patches_size(nullptr), m_d_patches_val(nullptr), m_patching_time_ms(0.0), m_d_seeds(nullptr), m_d_ff_values(nullptr), m_d_ff_offset(nullptr), m_d_queue(nullptr), m_d_queue_ptr(nullptr), m_d_new_num_patches(nullptr), m_d_max_patch_size(nullptr), m_d_cub_temp_storage_scan(nullptr), m_d_cub_temp_storage_max(nullptr), m_cub_scan_bytes(0), m_cub_max_bytes(0) { m_num_patches = m_num_faces / m_patch_size + ((m_num_faces % m_patch_size) ? 1 : 0); m_max_num_patches = 5 * m_num_patches; m_num_seeds = m_num_patches; allocate_memory(); // degenerate cases if (m_num_patches <= 1) { m_patches_offset[0] = m_num_faces; m_num_seeds = 1; m_num_components = 1; m_num_lloyd_run = 0; for (uint32_t i = 0; i < m_num_faces; ++i) { m_face_patch[i] = 0; m_patches_val[i] = i; } allocate_device_memory(ff_offset, ff_values); assign_patch(fv, edges_map); } else { initialize_random_seeds(ff_offset, ff_values); allocate_device_memory(ff_offset, ff_values); run_lloyd(); postprocess(fv, ff_offset, ff_values); assign_patch(fv, edges_map); } if (!quite) { print_statistics(); } } Patcher::~Patcher() { GPU_FREE(m_d_face_patch); GPU_FREE(m_d_vertex_patch); GPU_FREE(m_d_edge_patch); } void Patcher::allocate_memory() { m_seeds.reserve(m_num_seeds); // patches assigned to each face, vertex, and edge m_face_patch.resize(m_num_faces); std::fill(m_face_patch.begin(), m_face_patch.end(), INVALID32); m_vertex_patch.resize(m_num_vertices); std::fill(m_vertex_patch.begin(), m_vertex_patch.end(), INVALID32); m_edge_patch.resize(m_num_edges); std::fill(m_edge_patch.begin(), m_edge_patch.end(), INVALID32); // explicit patches in compressed format m_patches_val.resize(m_num_faces); // we allow up to double the number of faces due to patch bisecting m_patches_offset.resize(m_max_num_patches); // external ribbon. it assumes first that all faces will be in there and // then shrink to fit after the construction is done m_ribbon_ext_offset.resize(m_max_num_patches, 0); m_ribbon_ext_val.resize(m_num_faces); } void Patcher::allocate_device_memory(const std::vector<uint32_t>& ff_offset, const std::vector<uint32_t>& ff_values) { // ff CUDA_ERROR(cudaMalloc((void**)&m_d_ff_values, ff_values.size() * sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_ff_offset, ff_offset.size() * sizeof(uint32_t))); CUDA_ERROR(cudaMemcpy((void**)m_d_ff_values, ff_values.data(), ff_values.size() * sizeof(uint32_t), cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMemcpy((void**)m_d_ff_offset, ff_offset.data(), ff_offset.size() * sizeof(uint32_t), cudaMemcpyHostToDevice)); // face/vertex/edge patch CUDA_ERROR( cudaMalloc((void**)&m_d_face_patch, m_num_faces * sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_vertex_patch, m_num_vertices * sizeof(uint32_t))); CUDA_ERROR( cudaMalloc((void**)&m_d_edge_patch, m_num_edges * sizeof(uint32_t))); // seeds CUDA_ERROR( cudaMalloc((void**)&m_d_seeds, m_max_num_patches * sizeof(uint32_t))); CUDA_ERROR(cudaMemcpy((void**)m_d_seeds, m_seeds.data(), m_num_patches * sizeof(uint32_t), cudaMemcpyHostToDevice)); // utility // 0 -> queue start // 1-> queue end // 2-> next queue end std::vector<uint32_t> h_queue_ptr{0, m_num_patches, m_num_patches}; CUDA_ERROR(cudaMalloc((void**)&m_d_queue, m_num_faces * sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_queue_ptr, 3 * sizeof(uint32_t))); CUDA_ERROR(cudaMemcpy(m_d_queue_ptr, h_queue_ptr.data(), 3 * sizeof(uint32_t), cudaMemcpyHostToDevice)); // patch offset/size/value and max patch size CUDA_ERROR(cudaMalloc((void**)&m_d_patches_offset, m_max_num_patches * sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_patches_size, m_max_num_patches * sizeof(uint32_t))); CUDA_ERROR( cudaMalloc((void**)&m_d_patches_val, m_num_faces * sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_max_patch_size, sizeof(uint32_t))); CUDA_ERROR(cudaMalloc((void**)&m_d_new_num_patches, sizeof(uint32_t))); CUDA_ERROR(cudaMemcpy((void**)m_d_new_num_patches, &m_num_patches, sizeof(uint32_t), cudaMemcpyHostToDevice)); // CUB temp memory m_d_cub_temp_storage_scan = nullptr; m_d_cub_temp_storage_max = nullptr; m_cub_scan_bytes = 0; m_cub_max_bytes = 0; ::cub::DeviceScan::InclusiveSum(m_d_cub_temp_storage_scan, m_cub_scan_bytes, m_d_patches_size, m_d_patches_offset, m_max_num_patches); ::cub::DeviceReduce::Max(m_d_cub_temp_storage_max, m_cub_max_bytes, m_d_patches_size, m_d_max_patch_size, m_max_num_patches); CUDA_ERROR( cudaMalloc((void**)&m_d_cub_temp_storage_scan, m_cub_scan_bytes)); CUDA_ERROR(cudaMalloc((void**)&m_d_cub_temp_storage_max, m_cub_max_bytes)); } void Patcher::print_statistics() { RXMESH_TRACE("Patcher: num_patches = {}", m_num_patches); RXMESH_TRACE("Patcher: patches_size = {}", m_patch_size); RXMESH_TRACE("Patcher: num_components = {}", m_num_components); // patching time RXMESH_TRACE("Patcher: Num lloyd run = {}", m_num_lloyd_run); RXMESH_TRACE( "Patcher: Parallel patches construction time = {} (ms) and {} " "(ms/lloyd_run)", m_patching_time_ms, m_patching_time_ms / float(m_num_lloyd_run)); // max-min patch size uint32_t max_patch_size(0), min_patch_size(m_num_faces), avg_patch_size(0); get_max_min_avg_patch_size(min_patch_size, max_patch_size, avg_patch_size); RXMESH_TRACE( "Patcher: max_patch_size= {}, min_patch_size= {}, avg_patch_size= {}", max_patch_size, min_patch_size, avg_patch_size); RXMESH_TRACE("Patcher: number external ribbon faces = {} ({:02.2f}%)", get_num_ext_ribbon_faces(), get_ribbon_overhead()); } void Patcher::initialize_random_seeds(const std::vector<uint32_t>& ff_offset, const std::vector<uint32_t>& ff_values) { // 1) Identify the components i.e., for each component list the faces // that belong to that it // 2) Generate number of (random) seeds in each component // proportional to the number of faces it contain std::vector<std::vector<uint32_t>> components; get_multi_components(components, ff_offset, ff_values); m_num_components = components.size(); if (m_num_components == 1) { initialize_random_seeds_single_component(); } else { if (m_num_seeds <= m_num_components) { // we have too many components so we increase the number of // seeds. this case should not be encountered frequently // since we generate only one seed per component m_num_seeds = m_num_components; for (auto& comp : components) { generate_random_seed_from_component(comp, 1); } } else { // if we have more seeds to give than the number of components, // then first secure that we have at least one seed per // component then we calculate the number of extra/remaining // seeds that will need be added. Every component then will have // a weight proportional to its size that tells how many of // these remaining seeds it can take uint32_t num_remaining_seeds = m_num_seeds - m_num_components; uint32_t num_extra_seeds_inserted = 0; // sort the order of the component to be processed by their size std::vector<size_t> component_order(components.size()); fill_with_sequential_numbers(component_order.data(), component_order.size()); std::sort(component_order.begin(), component_order.end(), [&components](const size_t& a, const size_t& b) { return components[a].size() > components[b].size(); }); // process components in descending order with respect to their // size for (size_t c = 0; c < component_order.size(); ++c) { std::vector<uint32_t>& comp = components[component_order[c]]; uint32_t size = comp.size(); // this weight tells how many extra faces this component // have from num_remaining_seeds float weight = static_cast<float>(size) / static_cast<float>(m_num_faces); uint32_t component_num_seeds = static_cast<uint32_t>(std::ceil( weight * static_cast<float>(num_remaining_seeds))); num_extra_seeds_inserted += component_num_seeds; if (num_extra_seeds_inserted > num_remaining_seeds) { if (num_extra_seeds_inserted - num_remaining_seeds > component_num_seeds) { component_num_seeds = 0; } else { component_num_seeds -= (num_extra_seeds_inserted - num_remaining_seeds); } } component_num_seeds += 1; generate_random_seed_from_component(comp, component_num_seeds); } } } assert(m_num_patches == m_seeds.size()); } void Patcher::initialize_random_seeds_single_component() { // if not multi-component, just generate random number std::vector<uint32_t> rand_num(m_num_faces); fill_with_sequential_numbers(rand_num.data(), rand_num.size()); random_shuffle(rand_num.data(), rand_num.size()); m_seeds.resize(m_num_seeds); std::memcpy( m_seeds.data(), rand_num.data(), m_num_seeds * sizeof(uint32_t)); } void Patcher::generate_random_seed_from_component( std::vector<uint32_t>& component, const uint32_t num_seeds) { // generate seeds from faces in component. // num_seeds is the number of seeds that will be generated uint32_t num_seeds_before = m_seeds.size(); if (num_seeds < 1) { RXMESH_ERROR( "Patcher::generate_random_seed_in_component() num_seeds should be " "larger than 1"); } random_shuffle(component.data(), component.size()); m_seeds.resize(num_seeds_before + num_seeds); std::memcpy(m_seeds.data() + num_seeds_before, component.data(), num_seeds * sizeof(uint32_t)); } void Patcher::get_multi_components( std::vector<std::vector<uint32_t>>& components, const std::vector<uint32_t>& ff_offset, const std::vector<uint32_t>& ff_values) { std::vector<bool> visited(m_num_faces, false); for (uint32_t f = 0; f < m_num_faces; ++f) { if (!visited[f]) { std::vector<uint32_t> current_component; // just a guess current_component.reserve( static_cast<uint32_t>(static_cast<double>(m_num_faces) / 10.0)); std::queue<uint32_t> face_queue; face_queue.push(f); while (!face_queue.empty()) { uint32_t face = face_queue.front(); face_queue.pop(); uint32_t start = (face == 0) ? 0 : ff_offset[face - 1]; uint32_t end = ff_offset[face]; for (uint32_t f = start; f < end; ++f) { uint32_t n_face = ff_values[f]; if (!visited[n_face]) { current_component.push_back(n_face); face_queue.push(n_face); visited[n_face] = true; } } } components.push_back(current_component); } } } void Patcher::postprocess(const std::vector<std::vector<uint32_t>>& fv, const std::vector<uint32_t>& ff_offset, const std::vector<uint32_t>& ff_values) { // Post process the patches by extracting the ribbons // // For patch P, we start first by identifying boundary faces; faces that has // an edge on P's boundary. These faces are captured by querying the // adjacent faces for each face in P. If any of these adjacent faces are not // in the same patch, then this face is a boundary face. From these boundary // faces we can extract boundary vertices. We also now know which patch is // neighbor to P. Then we can use the boundary vertices to find the faces // that are incident to these vertices on the neighbor patches std::vector<uint32_t> frontier; frontier.reserve(m_num_faces); std::vector<uint32_t> bd_vertices; bd_vertices.reserve(m_patch_size); // build vertex incident faces std::vector<std::vector<uint32_t>> vertex_incident_faces( m_num_vertices, std::vector<uint32_t>(10)); for (uint32_t i = 0; i < vertex_incident_faces.size(); ++i) { vertex_incident_faces[i].clear(); } for (uint32_t face = 0; face < m_num_faces; ++face) { for (uint32_t v = 0; v < fv[face].size(); ++v) { vertex_incident_faces[fv[face][v]].push_back(face); } } for (uint32_t cur_p = 0; cur_p < m_num_patches; ++cur_p) { uint32_t p_start = (cur_p == 0) ? 0 : m_patches_offset[cur_p - 1]; uint32_t p_end = m_patches_offset[cur_p]; bd_vertices.clear(); frontier.clear(); //***** Pass One // 1) build a frontier of the boundary faces by loop over all faces and // add those that has an edge on the patch boundary for (uint32_t fb = p_start; fb < p_end; ++fb) { uint32_t face = m_patches_val[fb]; bool added = false; uint32_t start = (face == 0) ? 0 : ff_offset[face - 1]; uint32_t end = ff_offset[face]; for (uint32_t g = start; g < end; ++g) { uint32_t n = ff_values[g]; uint32_t n_patch = get_face_patch_id(n); // n is boundary face if its patch is not the current patch we // are processing if (n_patch != cur_p) { if (!added) { frontier.push_back(face); added = true; } // find/add the boundary vertices; these are the vertices // that are shared between face and n // add the common vertices in fv[face] and fv[n] for (uint32_t i = 0; i < fv[face].size(); ++i) { auto it_vf = std::find(fv[n].begin(), fv[n].end(), fv[face][i]); if (it_vf != fv[n].end()) { bd_vertices.push_back(fv[face][i]); } } // we don't break out of this loop because we want to get // all the boundary vertices // break; } } } // Sort boundary vertices so we can use binary_search std::sort(bd_vertices.begin(), bd_vertices.end()); // remove duplicated vertices inplace_remove_duplicates_sorted(bd_vertices); //***** Pass Two // 3) for every vertex on the patch boundary, we add all the faces // that are incident to it and not in the current patch m_ribbon_ext_offset[cur_p] = (cur_p == 0) ? 0 : m_ribbon_ext_offset[cur_p - 1]; uint32_t r_start = m_ribbon_ext_offset[cur_p]; for (uint32_t v = 0; v < bd_vertices.size(); ++v) { uint32_t vert = bd_vertices[v]; for (uint32_t f = 0; f < vertex_incident_faces[vert].size(); ++f) { uint32_t face = vertex_incident_faces[vert][f]; if (get_face_patch_id(face) != cur_p) { // make sure we have not added face before bool added = false; uint32_t r_end = m_ribbon_ext_offset[cur_p]; for (uint32_t r = r_start; r < r_end; ++r) { if (m_ribbon_ext_val[r] == face) { added = true; break; } } if (!added) { m_ribbon_ext_val[m_ribbon_ext_offset[cur_p]] = face; m_ribbon_ext_offset[cur_p]++; if (m_ribbon_ext_offset[cur_p] == m_num_faces) { // need to expand m_ribbon_ext_val. This occurs // mostly for small meshes with small patch size // such that the amount overlap between exterior // ribbon of different patches is larger than // m_num_faces uint32_t new_size = m_ribbon_ext_val.size() * 2; m_ribbon_ext_val.resize(new_size); } assert(m_ribbon_ext_offset[cur_p] <= m_ribbon_ext_val.size()); } } } } } m_ribbon_ext_val.resize(m_ribbon_ext_offset[m_num_patches - 1]); } void Patcher::assign_patch( const std::vector<std::vector<uint32_t>>& fv, const std::unordered_map<std::pair<uint32_t, uint32_t>, uint32_t, ::rxmesh::detail::edge_key_hash> edges_map) { // For every patch p, for every face in the patch, find the three edges // that bound that face, and assign them to the patch. For boundary vertices // and edges assign them to one patch (TODO smallest face count). For now, // we assign it to the first patch for (uint32_t cur_p = 0; cur_p < m_num_patches; ++cur_p) { uint32_t p_start = (cur_p == 0) ? 0 : m_patches_offset[cur_p - 1]; uint32_t p_end = m_patches_offset[cur_p]; for (uint32_t f = p_start; f < p_end; ++f) { uint32_t face = m_patches_val[f]; uint32_t v1 = fv[face].back(); for (uint32_t v = 0; v < fv[face].size(); ++v) { uint32_t v0 = fv[face][v]; std::pair<uint32_t, uint32_t> key = ::rxmesh::detail::edge_key(v0, v1); uint32_t edge_id = edges_map.at(key); if (m_vertex_patch[v0] == INVALID32) { m_vertex_patch[v0] = cur_p; } if (m_edge_patch[edge_id] == INVALID32) { m_edge_patch[edge_id] = cur_p; } v1 = v0; } } } CUDA_ERROR(cudaMemcpy(m_d_edge_patch, m_edge_patch.data(), sizeof(uint32_t) * (m_num_edges), cudaMemcpyHostToDevice)); CUDA_ERROR(cudaMemcpy(m_d_vertex_patch, m_vertex_patch.data(), sizeof(uint32_t) * (m_num_vertices), cudaMemcpyHostToDevice)); } void Patcher::run_lloyd() { std::vector<uint32_t> h_queue_ptr{0, m_num_patches, m_num_patches}; //CUDA_ERROR(cudaProfilerStart()); GPUTimer timer; timer.start(); m_num_lloyd_run = 0; while (true) { ++m_num_lloyd_run; const uint32_t threads_s = 256; const uint32_t blocks_s = DIVIDE_UP(m_num_patches, threads_s); const uint32_t threads_f = 256; const uint32_t blocks_f = DIVIDE_UP(m_num_faces, threads_f); // add more seeds if needed if (m_num_lloyd_run % 5 == 0 && m_num_lloyd_run > 0) { uint32_t threshold = m_patch_size; CUDA_ERROR(cudaMemcpy(m_d_new_num_patches, &m_num_patches, sizeof(uint32_t), cudaMemcpyHostToDevice)); add_more_seeds<<<m_num_patches, 1>>>(m_num_patches, m_d_new_num_patches, m_d_seeds, m_d_patches_offset, m_d_patches_val, threshold); CUDA_ERROR(cudaMemcpy(&m_num_patches, m_d_new_num_patches, sizeof(uint32_t), cudaMemcpyDeviceToHost)); if (m_num_patches >= m_max_num_patches) { RXMESH_ERROR( "Patcher::run_lloyd() m_num_patches exceeds " "m_max_num_patches"); } } h_queue_ptr[0] = 0; h_queue_ptr[1] = m_num_patches; h_queue_ptr[2] = m_num_patches; CUDA_ERROR(cudaMemcpy(m_d_queue_ptr, h_queue_ptr.data(), 3 * sizeof(uint32_t), cudaMemcpyHostToDevice)); rxmesh::memset<<<blocks_f, threads_f>>>( m_d_face_patch, INVALID32, m_num_faces); rxmesh::memcpy<<<blocks_s, threads_s>>>( m_d_queue, m_d_seeds, m_num_patches); rxmesh::memset<<<blocks_s, threads_s>>>( m_d_patches_size, 0u, m_num_patches); write_initial_face_patch<<<blocks_s, threads_s>>>( m_num_patches, m_d_face_patch, m_d_seeds, m_d_patches_size); // Cluster seed propagation while (true) { // Launch enough threads to cover all the faces. However, only // subset will do actual work depending on the queue size cluster_seed_propagation<<<blocks_f, threads_f>>>(m_num_faces, m_num_patches, m_d_queue_ptr, m_d_queue, m_d_face_patch, m_d_patches_size, m_d_ff_offset, m_d_ff_values); reset_queue_ptr<<<1, 1>>>(m_d_queue_ptr); CUDA_ERROR(cudaMemcpy(h_queue_ptr.data(), m_d_queue_ptr, sizeof(uint32_t), cudaMemcpyDeviceToHost)); if (h_queue_ptr[0] >= m_num_faces) { break; } } uint32_t max_patch_size = construct_patches_compressed_format(); // Interior uint32_t threads_i = 512; uint32_t shmem_bytes = max_patch_size * (sizeof(uint32_t)); rxmesh::memset<<<blocks_f, threads_f>>>( m_d_queue, INVALID32, m_num_faces); interior<<<m_num_patches, threads_i, shmem_bytes>>>(m_num_patches, m_d_patches_offset, m_d_patches_val, m_d_face_patch, m_d_seeds, m_d_ff_offset, m_d_ff_values, m_d_queue); if (max_patch_size < m_patch_size) { shift<<<blocks_f, threads_f>>>( m_num_faces, m_d_face_patch, m_d_patches_val); break; } } timer.stop(); CUDA_ERROR(cudaDeviceSynchronize()); CUDA_ERROR(cudaGetLastError()); m_patching_time_ms = timer.elapsed_millis(); //CUDA_ERROR(cudaProfilerStop()); // move data to host m_num_seeds = m_num_patches; m_seeds.resize(m_num_seeds); CUDA_ERROR(cudaMemcpy(m_seeds.data(), m_d_seeds, m_num_seeds * sizeof(uint32_t), cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(m_face_patch.data(), m_d_face_patch, sizeof(uint32_t) * m_num_faces, cudaMemcpyDeviceToHost)); m_patches_offset.resize(m_num_patches); CUDA_ERROR(cudaMemcpy(m_patches_offset.data(), m_d_patches_offset, sizeof(uint32_t) * m_num_patches, cudaMemcpyDeviceToHost)); CUDA_ERROR(cudaMemcpy(m_patches_val.data(), m_d_patches_val, sizeof(uint32_t) * m_num_faces, cudaMemcpyDeviceToHost)); GPU_FREE(m_d_ff_values); GPU_FREE(m_d_ff_offset); GPU_FREE(m_d_new_num_patches); GPU_FREE(m_d_max_patch_size); GPU_FREE(m_d_cub_temp_storage_scan); GPU_FREE(m_d_cub_temp_storage_max); m_cub_max_bytes = 0; m_cub_scan_bytes = 0; GPU_FREE(m_d_seeds); GPU_FREE(m_d_queue); GPU_FREE(m_d_queue_ptr); GPU_FREE(m_d_patches_offset); GPU_FREE(m_d_patches_size); GPU_FREE(m_d_patches_val); } uint32_t Patcher::construct_patches_compressed_format() { uint32_t max_patch_size = 0; const uint32_t threads_s = 256; const uint32_t blocks_s = DIVIDE_UP(m_num_patches, threads_s); const uint32_t threads_f = 256; const uint32_t blocks_f = DIVIDE_UP(m_num_faces, threads_f); // Compute max patch size max_patch_size = 0; ::cub::DeviceReduce::Max(m_d_cub_temp_storage_max, m_cub_max_bytes, m_d_patches_size, m_d_max_patch_size, m_num_patches); CUDA_ERROR(cudaMemcpy(&max_patch_size, m_d_max_patch_size, sizeof(uint32_t), cudaMemcpyDeviceToHost)); // Construct compressed patches ::cub::DeviceScan::InclusiveSum(m_d_cub_temp_storage_scan, m_cub_scan_bytes, m_d_patches_size, m_d_patches_offset, m_num_patches); rxmesh::memset<<<blocks_s, threads_s>>>( m_d_patches_size, 0u, m_num_patches); construct_patches_compressed<<<blocks_f, threads_f>>>(m_num_faces, m_d_face_patch, m_num_patches, m_d_patches_offset, m_d_patches_size, m_d_patches_val); return max_patch_size; } } // namespace patcher } // namespace rxmesh
the_stack
#include "include/common.h" #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 using Matf31da = Eigen::Matrix<float, 3, 1, Eigen::DontAlign>; namespace kinectfusion { namespace internal { namespace cuda { // 卧槽, nvcc 支持模板函数啊, 牛逼 // 设备端的函数, 用于执行归约累加的操作 template<int SIZE> static __device__ __forceinline__ // volatile 关键字禁止了 nvcc 编译器优化掉这个变量, 确保每次都要读值, 避免了潜在的使用上次用剩下的指针的可能 void reduce(volatile double* buffer) { // step 0 获取当前线程id , 每个线程对应其中的一个 const int thread_id = threadIdx.y * blockDim.x + threadIdx.x; double value = buffer[thread_id]; // step 1 归约过程开始, 之所以这样做是为了充分利用 GPU 的并行特性 if (SIZE >= 1024) { if (thread_id < 512) buffer[thread_id] = value = value + buffer[thread_id + 512]; // 一定要同步! 因为如果block规模很大的话, 其中的线程是分批次执行的, 这里就会得到错误的结果 __syncthreads(); } if (SIZE >= 512) { if (thread_id < 256) buffer[thread_id] = value = value + buffer[thread_id + 256]; __syncthreads(); } if (SIZE >= 256) { if (thread_id < 128) buffer[thread_id] = value = value + buffer[thread_id + 128]; __syncthreads(); } if (SIZE >= 128) { if (thread_id < 64) buffer[thread_id] = value = value + buffer[thread_id + 64]; __syncthreads(); } // step 2 随着归约过程的进行, 当最后剩下的几个线程都在一个warp中时, 就不用考虑线程间同步的问题了, 这样操作可以更快 // 因为在 128 折半之后, 有64个数据等待加和, 此时需要使用的线程数目不会超过32个. // 而一个warp,正好是32个线程, 所以如果我们使用这32个线程(或者更少的话)就不会遇到线程间同步的问题了(单指令多数据模式, 这32个线程会共享一套取指令单元, 一定是同时完成工作的) // 只激活低32个线程, CUDA 中底层的这32个线程一定是在一个warp上进行的. if (thread_id < 32) { if (SIZE >= 64) buffer[thread_id] = value = value + buffer[thread_id + 32]; if (SIZE >= 32) buffer[thread_id] = value = value + buffer[thread_id + 16]; if (SIZE >= 16) buffer[thread_id] = value = value + buffer[thread_id + 8]; if (SIZE >= 8) buffer[thread_id] = value = value + buffer[thread_id + 4]; if (SIZE >= 4) buffer[thread_id] = value = value + buffer[thread_id + 2]; if (SIZE >= 2) buffer[thread_id] = value = value + buffer[thread_id + 1]; } // 判断当前需要激活的线程是否少于32个 } __global__ void estimate_kernel( const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation_current, // 上次迭代得到的旋转 Rwc const Matf31da translation_current, // 上次迭代得到的平移 twc const PtrStep<float3> vertex_map_current, // 当前帧对应图层的顶点图 const PtrStep<float3> normal_map_current, // 当前帧对应图层的法向图 const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation_previous_inv, // 上一帧相机的旋转, Rcw const Matf31da translation_previous, // 上一帧相机的平移, twc const CameraParameters cam_params, // 当前图层的相机内参 const PtrStep<float3> vertex_map_previous, // 上一帧相机位姿推理得到的表面顶点图 const PtrStep<float3> normal_map_previous, // 上一帧相机位姿推理得到的表面法向图 const float distance_threshold, // ICP 中关联匹配的最大距离阈值 const float angle_threshold, // ICP 中关联匹配的最大角度阈值 const int cols, // 当前图层的图像列数 const int rows, // 当前图层的图像行数 PtrStep<double> global_buffer) // 数据缓冲区, 暂存每个 block 中的累加和结果 { // step 0 数据准备 // 获取当前线程处理的像素坐标 const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; Matf31da n, // 目标点的法向, KinectFusion中为上一帧的点云对应的法向量 d, // 目标点, KinectFusion中为上一帧的点云 s; // 源点, KinectFusion中为当前帧的点云 // 匹配点的状态, 表示是否匹配, 初始值为 false bool correspondence_found = false; // step 1 当处理的像素位置合法时进行 // ? -- 进行投影数据关联 if (x < cols && y < rows) { // step 1.1 获取当前帧点云法向量的x坐标, 判断其法向是否存在 Matf31da normal_current; normal_current.x() = normal_map_current.ptr(y)[x].x; // 如果是个非数, 就认为这个法向是不存在的 if (!isnan(normal_current.x())) { // step 1.2 获取的点云法向量确实存在, // 获取当前帧的顶点 Matf31da vertex_current; vertex_current.x() = vertex_map_current.ptr(y)[x].x; vertex_current.y() = vertex_map_current.ptr(y)[x].y; vertex_current.z() = vertex_map_current.ptr(y)[x].z; // 将当前帧的顶点坐标转换到世界坐标系下 Pw = Rwc * Pc + twc Matf31da vertex_current_global = rotation_current * vertex_current + translation_current; // 这个顶点在上一帧相机坐标系下的坐标 Pc(k-1) = Rcw(k-1) * (Pw - twc(k-1)) // ! 这里就是为什么要对旋转求逆的原因了 Matf31da vertex_current_camera = rotation_previous_inv * (vertex_current_global - translation_previous); // 接着将该空间点投影到上一帧的图像中坐标系中 Eigen::Vector2i point; // __float2int_rd 向下舍入, +0.5 是为了实现"四舍五入"的效果 point.x() = __float2int_rd( vertex_current_camera.x() * cam_params.focal_x / vertex_current_camera.z() + cam_params.principal_x + 0.5f); point.y() = __float2int_rd( vertex_current_camera.y() * cam_params.focal_y / vertex_current_camera.z() + cam_params.principal_y + 0.5f); // 检查投影点是否在图像中 if (point.x() >= 0 && point.y() >= 0 && point.x() < cols && point.y() < rows && vertex_current_camera.z() >= 0) { // 如果在的话, 说明数据关联有戏. 但是还需要检查两个地方 // 我们先获取上一帧的疑似关联点的法向 Matf31da normal_previous_global; normal_previous_global.x() = normal_map_previous.ptr(point.y())[point.x()].x; // 如果它确认存在 if (!isnan(normal_previous_global.x())) { // 获取对应顶点 Matf31da vertex_previous_global; vertex_previous_global.x() = vertex_map_previous.ptr(point.y())[point.x()].x; vertex_previous_global.y() = vertex_map_previous.ptr(point.y())[point.x()].y; vertex_previous_global.z() = vertex_map_previous.ptr(point.y())[point.x()].z; // 距离检查, 如果顶点距离相差太多则认为不是正确的点 const float distance = (vertex_previous_global - vertex_current_global).norm(); if (distance <= distance_threshold) { // 获取完整的当前帧该顶点的法向, 获取的过程移动到这里的主要目的也是为了避免不必要的计算 normal_current.y() = normal_map_current.ptr(y)[x].y; normal_current.z() = normal_map_current.ptr(y)[x].z; // 上面获取的法向是在当前帧相机坐标系下表示的, 这里需要转换到世界坐标系下的表示 Matf31da normal_current_global = rotation_current * normal_current; // 同样获取完整的, 在上一帧中对应顶点的法向. 注意在平面推理阶段得到的法向就是在世界坐标系下的表示 // TODO 确认一下 normal_previous_global.y() = normal_map_previous.ptr(point.y())[point.x()].y; normal_previous_global.z() = normal_map_previous.ptr(point.y())[point.x()].z; // 通过计算叉乘得到两个向量夹角的正弦值. 由于 |axb|=|a||b|sin \alpha, 所以叉乘计算得到的向量的模就是 sin \alpha const float sine = normal_current_global.cross(normal_previous_global).norm(); // ? 应该是夹角越大, sine 越大啊, 为什么这里是大于等于??? if (sine >= angle_threshold) { // 认为通过检查, 保存关联结果和产生的数据 n = normal_previous_global; d = vertex_previous_global; s = vertex_current_global; correspondence_found = true; }// 通过关联的角度检查 }// 通过关联的距离检查 }// 上一帧中的关联点有法向 }// 当前帧的顶点对应的空间点的对上一帧的重投影点在图像中 }// 当前帧的顶点的法向量存在 }// 当前线程处理的像素位置在图像范围中 // 保存计算结果. 根据推导, 对于每个点, 对矩阵A贡献有6个元素, 对向量b贡献有一个元素 float row[7]; // 只有对成功匹配的点才会进行的操作. 这个判断也会滤除那些线程坐标不在图像中的线程, 这样做可以减少程序中的分支数目 if (correspondence_found) { // 前面的强制类型转换符号, 目测是为了转换成为 Eigen 中表示矩阵中浮点数元素的类型, 可以将计算结果直接一次写入到 row[0] row[1] row[2] // 矩阵A中的两个主要元素 *(Matf31da*) &row[0] = s.cross(n); *(Matf31da*) &row[3] = n; // 矩阵b中当前点贡献的部分 row[6] = n.dot(d - s); } else // 如果没有找到匹配的点, 或者说是当前线程的id不在图像区域中, 就全都给0 // 这样反映在最后的结果中, 就是图像中的这个区域对最后的误差项没有任何贡献, 相当于不存在一样 // 貌似这样计算量是多了,但是相比之下GPU更不适合在计算总矩阵A的时候进行多种分支的处理 row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; // 存放在 shared_memory. 每一个 block 中的线程共享一个区域的shared_memory // smem = Shared MEMory __shared__ double smem[BLOCK_SIZE_X * BLOCK_SIZE_Y]; // 计算当前线程的一维索引 const int tid = threadIdx.y * blockDim.x + threadIdx.x; int shift = 0; for (int i = 0; i < 6; ++i) { // Rows for (int j = i; j < 7; ++j) { // Columns and B // 同步当前线程块中的所有线程执行到这里, 避免出现竞争的情况 __syncthreads(); // 如果把向量中的每个元素都拆分出来的话, 可以发现本质上是对这27个元素累加, 如果我们拿到了最后这27项的累加和, 我们就可以构造矩阵A和向量b了 // 这里就是在计算其中的一项, 当前线程, 或者说当前的这个像素给的贡献 smem[tid] = row[i] * row[j]; // 再同步一次, 确保所有的线程都完成了写入操作, 避免出现"某个线程还在写数据,但是出现了另外的线程还在读数据"的情况 __syncthreads(); // Block 内对该元素归约 // 调用这个函数的时候使用当前线程自己的线程id // 因为我们最终的目的是对于这一项, 要将所有线程的贡献累加; 累加的过程分为两个阶段, 一个是每个block 内相加,而是对于所有的Block的和,再进行相加. // 这里进行的是每个 block 中相加的一步 reduce<BLOCK_SIZE_X * BLOCK_SIZE_Y>(smem); // 当前 block 中的线程#0 负责将归约之后的结果保存到 global_buffer 中. // shift 其实就是对应着"当前累加的和是哪一项"这一点; 当前block的结果先放在指定位置, 等全部完事之后再在每个block中的累加和已知的基础上,进行归约求和 if (tid == 0) global_buffer.ptr(shift++)[gridDim.x * blockIdx.y + blockIdx.x] = smem[0]; } }// 归约累加 } // 在每个 Block 已经完成累加的基础上, 进行全局的归约累加 __global__ void reduction_kernel(PtrStep<double> global_buffer, const int length, PtrStep<double> output) { double sum = 0.0; // 每个线程对应一个 block 的某项求和的结果, 获取之 // 但是 blocks 可能很多, 这里是以512为一批进行获取, 加和处理的. 640x480只用到300个blocks. for (int t = threadIdx.x; t < length; t += 512) sum += *(global_buffer.ptr(blockIdx.x) + t); // 对于 GTX 1070, 每个 block 的 shared_memory 最大大小是 48KB, 足够使用了, 这里相当于只用了 1/12 // 前面设置线程个数为这些, 也是为了避免每个 block 中的 shared memory 超标, 又能够尽可能地使用所有的 shared memory __shared__ double smem[512]; // 注意超过范围的线程也能够执行到这里, 上面的循环不会执行, sum=0, 因此保存到 smem 对后面的归约过程没有影响 smem[threadIdx.x] = sum; // 同时运行512个, 一个warp装不下,保险处理就是进行同步 __syncthreads(); // 512个线程都归约计算 reduce<512>(smem); // 第0线程负责将每一项的最终求和结果进行转存 if (threadIdx.x == 0) output.ptr(blockIdx.x)[0] = smem[0]; }; // 使用GPU并行计算矩阵A和向量b void estimate_step( const Eigen::Matrix3f& rotation_current, // 上次迭代得到的旋转 Rwc const Matf31da& translation_current, // 上次迭代得到的平移 twc const cv::cuda::GpuMat& vertex_map_current, // 当前帧对应图层的的顶点图 const cv::cuda::GpuMat& normal_map_current, // 当前帧对应图层的的法向图 const Eigen::Matrix3f& rotation_previous_inv, // 上一帧相机外参中的旋转的逆, Rcw const Matf31da& translation_previous, // 上一帧相机的平移 twc const CameraParameters& cam_params, // 当前图层的相机内参 const cv::cuda::GpuMat& vertex_map_previous, // 对应图层的推理得到的平面顶点图 const cv::cuda::GpuMat& normal_map_previous, // 对应图层的推理得到的平面法向图 float distance_threshold, // ICP迭代过程中视为外点的距离阈值 float angle_threshold, // ICP迭代过程中视为外点的角度阈值(角度变正弦值) Eigen::Matrix<double, 6, 6, Eigen::RowMajor>& A, // 计算得到的矩阵 A, 行优先 Eigen::Matrix<double, 6, 1>& b) // 计算得到的向量 b { // step 0 计算需要的线程规模, 每个线程处理当前图像中的一个像素 const int cols = vertex_map_current.cols; const int rows = vertex_map_current.rows; // 32 x 32, 但是这里相当于设置的 threads dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y); // 这里还开了多个 Grid -- 但是这里相当于设置的 blocks dim3 grid(1, 1); grid.x = static_cast<unsigned int>(std::ceil(cols / block.x)); grid.y = static_cast<unsigned int>(std::ceil(rows / block.y)); // step 1 创建缓冲区 // 首先需要解释一下为什么是27项. 这个部分需要根据 estimate_kernel 函数中的 row[7] 得出. // 每一对匹配点对矩阵A的贡献是: // Ai = | pi x ni | | (pi x ni)^T, ni^T | // | ni | // row[0]~row[2] 存放 pi x ni // row[3]~row[5] 存放 ni // 每一对匹配点对向量b的贡献是: // bi = | (pi x ni)*((di-si)*ni)| // | ni *((di-si)*ni)| // 所以 row[6] 存放 ((di-si)*ni // 如果我们只看下标, 那么展开的: // | 0x0 0x1 0x2 | // Ci(0,0) = (pi x ni)(pi x ni)^T = | 0x1 1x1 1x2 |, 其中有 0x0 0x1 0x2 1x1 1x2 2x2 共6项 // | 0x2 1x2 2x2 | 下标: 0 1 2 7 8 13 (对应在buffer中的页id) // // Ci(0,1) = Ci(1,0)^T | 0x3 0x4 0x5 | // = (pi x ni)ni^T = | 1x3 1x4 1x5 |, 其中有 0x3 0x4 0x5 1x3 1x4 1x5 2x3 2x4 2x5 共9项 // | 2x3 2x4 2x5 | 下标: 3 4 5 9 10 11 14 15 16 // // | 3x3 3x4 3x5 | // Ci(1,1) = ni ni^T = | 3x4 4x4 4x5 |, 其中有 3x3 3x4 3x5 4x4 4x5 5x5 共6项 // | 3x5 4x5 5x5 | 下标: 18 19 20 22 23 25 // // | 0x6 | // bi(0) = (pi x ni)*rows[7] = | 1x6 |, 其中有 0x6 1x6 2x6 共3项 // | 2x6 | 下标: 6 12 17 // // | 3x6 | // bi(1) = ni*rows[7] = | 4x6 |, 其中有 3x6 4x6 5x6 共3项 // | 5x6 | 下标: 21 24 26 // 因此, 对于每一对点的对于最终的矩阵A和向量b的贡献可以拆分成上述27项. 所以如果我们能够分别对每一项, 求出来所有的匹配点对每一项的贡献, // 那么我们就可以组合得到最后的矩阵A和向量b. // 求和过程分为两个阶段, 第一阶段每个 blocks 得到的先累加在一起, 保存在 global_buffer 中, 其中的每一项是一页, 每一页的尺寸和 Blocks 的尺寸相同 cv::cuda::GpuMat global_buffer { cv::cuda::createContinuous(27, grid.x * grid.y, CV_64FC1) }; // 第二阶段再将所有blocks的和再加在一起, 得到最终的27项的和, 存储在 sum_buffer 中 // 存储最终的每项加和, 一共27项 cv::cuda::GpuMat sum_buffer { cv::cuda::createContinuous(27, 1, CV_64FC1) }; // step 2.1 启动核函数, 对于图像上的每个像素执行: 数据关联, 计算误差贡献, 并且每个Block中累加本block的误差总贡献 // 其实这里可以看到前面的 block 和 grid 都是相当于之前的 threads 和 blocks // estimate_kernel<<<grid, block>>>( rotation_current, // 上次迭代得到的旋转 Rwc translation_current, // 上次迭代得到的平移 twc vertex_map_current, // 当前帧对应图层的顶点图 normal_map_current, // 当前帧对应图层的法向图 rotation_previous_inv, // 上一帧相机外参的旋转, Rcw translation_previous, // 上一帧相机的平移, twc cam_params, // 对应图层的相机内参 vertex_map_previous, // 上一帧位姿处推理得到的表面顶点图 normal_map_previous, // 上一帧位姿处推理得到的表面法向图 distance_threshold, // ICP 中关联匹配的最大距离阈值 angle_threshold, // ICP 中关联匹配的最大角度阈值 cols, // 当前图层的图像列数 rows, // 当前图层的图像行数 global_buffer); // 暂存每个Block贡献和的缓冲区 // step 2.2 在得到了每一个block累加和结果的基础上, 进行全局的归约累加 reduction_kernel<<<27, 512>>>( // 27 = 项数, 512 对应blocks数目, 这里是一次批获取多少个 blocks 先前的和. //如果实际blocks数目超过这些, 超出的部分就类似归约形式累加, 直到累加后的blocks的数目小于512 global_buffer, // 每个Block累加的结果 grid.x * grid.y, // 27项对应global_buffer中的27页,每一页中的每一个元素记录了一个block的累加结果,这里是每一页的尺寸 sum_buffer); // 输出, 结果是所有的匹配点对这27项的贡献 // step 3 将 GPU 中计算好的矩阵A和向量b下载到CPU中,并且组装数据 // 下载 cv::Mat host_data { 27, 1, CV_64FC1 }; sum_buffer.download(host_data); // 组装 // 按照前面的推导, 矩阵A和向量b的最终形式使用rows[*]的下标表示分别为: // | 0x0 0x1 0x2 0x3 0x4 0x5 | | 00 01 02 03 04 05 | // | 0x1 1x1 1x2 1x3 1x4 1x5 | | 01 07 08 09 10 11 | // | 0x2 1x2 2x2 2x3 2x4 2x5 | 按buffer下标 | 02 08 13 14 15 16 | // A = | 0x3 1x3 2x3 3x3 3x4 3x5 | =============== | 03 19 14 18 19 20 | => 斜三角对称矩阵, 只要构造上三角, 下三角对称复制就可以了 // | 0x4 1x4 2x4 3x4 4x4 4x5 | | 04 10 15 19 22 23 | // | 0x5 1x5 2x5 3x5 4x5 5x5 | | 05 11 16 20 23 25 | // // | 0x6 | | 06 | // | 1x6 | | 12 | // | 2x6 | 按buffer下标 | 17 | // b = | 3x6 | ================ | 21 | => j=6 都是 b // | 4x6 | | 24 | // | 5x6 | | 26 | // int shift = 0; for (int i = 0; i < 6; ++i) { // Rows for (int j = i; j < 7; ++j) { // Columns and B // 获取值.[0]因为这个 host_data 就一列, []中只能填0 double value = host_data.ptr<double>(shift++)[0]; // j=6 都是 b if (j == 6) b.data()[i] = value; else A.data()[j * 6 + i] = A.data()[i * 6 + j] // 对称赋值 = value; } } } } } }
the_stack
#include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // CONVOLVE // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_convolve( const T *__restrict__ inp, const int inpW, const T *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, T *__restrict__ out, const int outW ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < outW; tid += stride ) { T temp {}; if ( mode == 0 ) { // Valid if ( tid >= 0 && tid < inpW ) { for ( int j = 0; j < kerW; j++ ) { temp += inp[tid + j] * kernel[( kerW - 1 ) - j]; } } } else if ( mode == 1 ) { // Same const int P1 { kerW / 2 }; int start {}; if ( !swapped_inputs ) { start = 0 - P1 + tid; } else { start = ( ( inpW - 1 ) / 2 ) - ( kerW - 1 ) + tid; } for ( int j = 0; j < kerW; j++ ) { if ( ( start + j >= 0 ) && ( start + j < inpW ) ) { temp += inp[start + j] * kernel[( kerW - 1 ) - j]; } } } else { // Full const int P1 { kerW - 1 }; const int start { 0 - P1 + tid }; for ( int j = 0; j < kerW; j++ ) { if ( ( start + j >= 0 ) && ( start + j < inpW ) ) { temp += inp[start + j] * kernel[( kerW - 1 ) - j]; } } } out[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_int32( const int *__restrict__ inp, const int inpW, const int *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, int *__restrict__ out, const int outW ) { _cupy_convolve<int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_int64( const long int *__restrict__ inp, const int inpW, const long int *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, long int *__restrict__ out, const int outW ) { _cupy_convolve<long int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_float32( const float *__restrict__ inp, const int inpW, const float *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, float *__restrict__ out, const int outW ) { _cupy_convolve<float>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_float64( const double *__restrict__ inp, const int inpW, const double *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, double *__restrict__ out, const int outW ) { _cupy_convolve<double>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_complex64( thrust::complex<float> *__restrict__ inp, const int inpW, thrust::complex<float> *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_convolve<thrust::complex<float>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_convolve<thrust::complex<double>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } /////////////////////////////////////////////////////////////////////////////// // CORRELATE // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_correlate( const T *__restrict__ inp, const int inpW, const T *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, T *__restrict__ out, const int outW ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < outW; tid += stride ) { T temp {}; if ( mode == 0 ) { // Valid if ( tid >= 0 && tid < inpW ) { for ( int j = 0; j < kerW; j++ ) { temp += inp[tid + j] * kernel[j]; } } } else if ( mode == 1 ) { // Same const int P1 { kerW / 2 }; int start {}; if ( !swapped_inputs ) { start = 0 - P1 + tid; } else { start = ( ( inpW - 1 ) / 2 ) - ( kerW - 1 ) + tid + 1; } for ( int j = 0; j < kerW; j++ ) { if ( ( start + j >= 0 ) && ( start + j < inpW ) ) { temp += inp[start + j] * kernel[j]; } } } else { // Full const int P1 { kerW - 1 }; const int start { 0 - P1 + tid }; for ( int j = 0; j < kerW; j++ ) { if ( ( start + j >= 0 ) && ( start + j < inpW ) ) { temp += inp[start + j] * kernel[j]; } } } if ( swapped_inputs ) { out[outW - tid - 1] = temp; // TODO: Move to shared memory } else { out[tid] = temp; } } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_int32( const int *__restrict__ inp, const int inpW, const int *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, int *__restrict__ out, const int outW ) { _cupy_correlate<int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_int64( const long int *__restrict__ inp, const int inpW, const long int *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, long int *__restrict__ out, const int outW ) { _cupy_correlate<long int>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_float32( const float *__restrict__ inp, const int inpW, const float *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, float *__restrict__ out, const int outW ) { _cupy_correlate<float>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_float64( const double *__restrict__ inp, const int inpW, const double *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, double *__restrict__ out, const int outW ) { _cupy_correlate<double>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_complex64( thrust::complex<float> *__restrict__ inp, const int inpW, thrust::complex<float> *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_correlate<thrust::complex<float>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_correlate_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int mode, const bool swapped_inputs, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_correlate<thrust::complex<double>>( inp, inpW, kernel, kerW, mode, swapped_inputs, out, outW ); } /////////////////////////////////////////////////////////////////////////////// // CONVOLVE 2D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_convolve2D( const T *__restrict__ inp, const int inpW, const int inpH, const T *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, T *__restrict__ out, const int outW, const int outH, const int pick ) { const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) }; int i {}; if ( pick != 3 ) { i = tx + S0; } else { i = tx + S1; } int j { ty + S0 }; int2 oPixelPos { tx, ty }; if ( ( tx < outH ) && ( ty < outW ) ) { T temp {}; // Odd kernel if ( pick == 1 ) { for ( int k = -S0; k < ( S0 + 1 ); k++ ) { for ( int l = -S0; l < ( S0 + 1 ); l++ ) { int2 iPixelPos { ( i + k ), ( j + l ) }; int2 coefPos { ( -k + S0 ), ( -l + S0 ) }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y]; } } // Even kernel } else if ( pick == 2 ) { for ( int k = -S0; k < S0; k++ ) { for ( int l = -S0; l < S0; l++ ) { int2 iPixelPos { ( i + k ), ( j + l ) }; int2 coefPos { ( -k + S0 - 1 ), ( -l + S0 - 1 ) }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y]; } } // Non-squares kernel } else { for ( int k = 0; k < S0; k++ ) { for ( int l = 0; l < S1; l++ ) { int2 iPixelPos { ( i + k - S1 ), ( j + l - S0 ) }; int2 coefPos { ( -k + S0 - 1 ), ( -l + S1 - 1 ) }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerH + coefPos.y]; } } } out[oPixelPos.x * outW + oPixelPos.y] = temp; } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_int32( const int *__restrict__ inp, const int inpW, const int inpH, const int *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, int *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_int64( const long int *__restrict__ inp, const int inpW, const int inpH, const long int *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, long int *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<long int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_float32( const float *__restrict__ inp, const int inpW, const int inpH, const float *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, float *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<float>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_float64( const double *__restrict__ inp, const int inpW, const int inpH, const double *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, double *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<double>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_complex64( const thrust::complex<float> *__restrict__ inp, const int inpW, const int inpH, const thrust::complex<float> *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, thrust::complex<float> *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<thrust::complex<float>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_convolve2D_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const int inpH, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, thrust::complex<double> *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_convolve2D<thrust::complex<double>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } /////////////////////////////////////////////////////////////////////////////// // CORRELATE 2D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_correlate2D( const T *__restrict__ inp, const int inpW, const int inpH, const T *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, T *__restrict__ out, const int outW, const int outH, const int pick ) { const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) }; int i {}; if ( pick != 3 ) { i = tx + S0; } else { i = tx + S1; } int j { ty + S0 }; int2 oPixelPos { tx, ty }; if ( ( tx < outH ) && ( ty < outW ) ) { T temp {}; // Odd if ( pick == 1 ) { for ( int k = -S0; k < ( S0 + 1 ); k++ ) { for ( int l = -S0; l < ( S0 + 1 ); l++ ) { int2 iPixelPos { ( i + k ), ( j + l ) }; int2 coefPos { ( k + S0 ), ( l + S0 ) }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y]; } } // Even } else if ( pick == 2 ) { for ( int k = -S0; k < S0; k++ ) { for ( int l = -S0; l < S0; l++ ) { int2 iPixelPos { ( i + k ), ( j + l ) }; // iPixelPos[1], [0] int2 coefPos { ( k + S0 ), ( l + S0 ) }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerW + coefPos.y]; } } // Non-squares } else { for ( int k = 0; k < S0; k++ ) { for ( int l = 0; l < S1; l++ ) { int2 iPixelPos { ( i + k - S1 ), ( j + l - S0 ) }; int2 coefPos { k, l }; temp += inp[iPixelPos.x * inpW + iPixelPos.y] * kernel[coefPos.x * kerH + coefPos.y]; } } } out[oPixelPos.x * outW + oPixelPos.y] = temp; } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_int32( const int *__restrict__ inp, const int inpW, const int inpH, const int *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, int *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_int64( const long int *__restrict__ inp, const int inpW, const int inpH, const long int *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, long int *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<long int>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_float32( const float *__restrict__ inp, const int inpW, const int inpH, const float *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, float *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<float>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__(256 ) _cupy_correlate2D_float64( const double *__restrict__ inp, const int inpW, const int inpH, const double *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, double *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<double>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__(256 ) _cupy_correlate2D_complex64( const thrust::complex<float> *__restrict__ inp, const int inpW, const int inpH, const thrust::complex<float> *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, thrust::complex<float> *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<thrust::complex<float>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_correlate2D_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const int inpH, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int kerH, const int S0, const int S1, thrust::complex<double> *__restrict__ out, const int outW, const int outH, const int pick ) { _cupy_correlate2D<thrust::complex<double>>( inp, inpW, inpH, kernel, kerW, kerH, S0, S1, out, outW, outH, pick ); } /////////////////////////////////////////////////////////////////////////////// // CONVOLVE 1D2O // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_convolve1D2O( const T *__restrict__ inp, const int inpW, const T *__restrict__ kernel, const int kerW, const int kerH, const int mode, T *__restrict__ out, const int outW ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < outW; tid += stride ) { T temp {}; if ( mode == 0 ) { // Valid if ( tid >= 0 && tid < inpW ) { for ( int i = 0; i < kerW; i++ ) { for ( int j = 0; j < kerH; j++ ) { temp += inp[tid + kerW - i - 1] * inp[tid + kerH - j - 1] * kernel[ kerW * i + j]; } } } } out[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_int32( const int *__restrict__ inp, const int inpW, const int *__restrict__ kernel, const int kerW, const int kerH, const int mode, int *__restrict__ out, const int outW ) { _cupy_convolve1D2O<int>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_int64( const long int *__restrict__ inp, const int inpW, const long int *__restrict__ kernel, const int kerW, const int kerH, const int mode, long int *__restrict__ out, const int outW ) { _cupy_convolve1D2O<long int>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_float32( const float *__restrict__ inp, const int inpW, const float *__restrict__ kernel, const int kerW, const int kerH, const int mode, float *__restrict__ out, const int outW ) { _cupy_convolve1D2O<float>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_float64( const double *__restrict__ inp, const int inpW, const double *__restrict__ kernel, const int kerW, const int kerH, const int mode, double *__restrict__ out, const int outW ) { _cupy_convolve1D2O<double>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_complex64( thrust::complex<float> *__restrict__ inp, const int inpW, thrust::complex<float> *__restrict__ kernel, const int kerW, const int kerH, const int mode, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_convolve1D2O<thrust::complex<float>>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D2O_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int kerH, const int mode, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_convolve1D2O<thrust::complex<double>>( inp, inpW, kernel, kerW, kerH, mode, out, outW ); } /////////////////////////////////////////////////////////////////////////////// // CONVOLVE 1D3O // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_convolve1D3O( const T *__restrict__ inp, const int inpW, const T *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, T *__restrict__ out, const int outW ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < outW; tid += stride ) { T temp {}; if ( mode == 0 ) { // Valid if ( tid >= 0 && tid < inpW ) { for ( int i = 0; i < kerW; i++ ) { for ( int j = 0; j < kerH; j++ ) { for ( int k = 0; k < kerD; k++ ) { temp += inp[tid + kerW - i - 1] * inp[tid + kerH - j - 1] * inp[tid + kerD - k - 1] * kernel[ (kerW * i + j) * kerH + k ]; } } } } } out[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_int32( const int *__restrict__ inp, const int inpW, const int *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, int *__restrict__ out, const int outW ) { _cupy_convolve1D3O<int>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_int64( const long int *__restrict__ inp, const int inpW, const long int *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, long int *__restrict__ out, const int outW ) { _cupy_convolve1D3O<long int>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_float32( const float *__restrict__ inp, const int inpW, const float *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, float *__restrict__ out, const int outW ) { _cupy_convolve1D3O<float>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_float64( const double *__restrict__ inp, const int inpW, const double *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, double *__restrict__ out, const int outW ) { _cupy_convolve1D3O<double>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_complex64( thrust::complex<float> *__restrict__ inp, const int inpW, thrust::complex<float> *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_convolve1D3O<thrust::complex<float>>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_convolve1D3O_complex128( const thrust::complex<double> *__restrict__ inp, const int inpW, const thrust::complex<double> *__restrict__ kernel, const int kerW, const int kerH, const int kerD, const int mode, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_convolve1D3O<thrust::complex<double>>( inp, inpW, kernel, kerW, kerH, kerD, mode, out, outW ); }
the_stack
#include "nnbnorm_cudnn.hpp" #include "cudnnhelper.hpp" #include "../datacu.hpp" #include "copy.hpp" #include <assert.h> #define CHECK(x) \ { \ cudnnError = x ; \ if (cudnnError != CUDNN_STATUS_SUCCESS) { \ error = context.setError(context.getCudaHelper().catchCudnnError(cudnnError, \ STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \ goto done ; \ } } template<typename T> __global__ void var_to_std(T * var, unsigned int num, T scale, T epsilon) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { var[idx] = sqrt(scale * var[idx] + epsilon) ; } } template<typename T> __global__ void inverse(T * ivar, unsigned int num) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { ivar[idx] = ((T)1) / ivar[idx] ; } } template<vl::DataType dataType> vl::ErrorCode vl::impl::nnbnorm_cudnn<dataType>::forward(vl::Context& context, vl::Tensor output, vl::Tensor moments, // can be null vl::Tensor data, vl::Tensor multipliers, vl::Tensor biases, double epsilon) { assert(output) ; assert(data) ; assert(multipliers) ; assert(biases) ; typedef typename DataTypeTraits<dataType>::type type ; cudnnTensorDescriptor_t dataDesc, momentDesc ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::DataType dynDataType = output.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, data.getSize(), data.getDepth(), data.getWidth(), data.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, data.getDepth(), 1, 1)) ; // Run CuDNN batch normalization implementation. { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = NULL ; type * varMemory = NULL ; if (moments) { meanMemory = (type*)moments.getMemory() ; varMemory = meanMemory + data.getDepth() ; vl::impl::operations<vl::VLDT_GPU,type>::fill (meanMemory, 2 * data.getDepth() * sizeof(type), 0) ; } CHECK(cudnnBatchNormalizationForwardTraining (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, dataDesc, data.getMemory(), dataDesc, output.getMemory(), momentDesc, multipliers.getMemory(), biases.getMemory(), 0, NULL, NULL, epsilon, meanMemory, varMemory)) ; if (varMemory) { // CuDNN computes the variance without epsilon, whereas MCN // returns the standard deviation after adding epsilon. // Also, CuDNN returns the unbiased variance estimate, but it is // debatable that this is appropriate. // // We pick instead the caches, which are closer to the values we compute. // Also they do not need to be pre-initialized with zeros. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(data.getDepth(),blockSize),blockSize>>> (varMemory, data.getDepth()) ; } } // Cleanup. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return context.passError(error, "nnbnorm_cudnn::forward") ; } template<typename T> __global__ void std_to_var(T * var, T const * std, unsigned int num, T epsilon) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { var[idx] = std[idx]*std[idx] - epsilon ; } } template<vl::DataType dataType> vl::ErrorCode vl::impl::nnbnorm_cudnn<dataType>::forward_given_moments(vl::Context& context, vl::Tensor output, vl::Tensor moments, vl::Tensor data, vl::Tensor multipliers, vl::Tensor biases) { assert(output) ; assert(data) ; assert(moments) ; assert(multipliers) ; assert(biases) ; typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; cudnnTensorDescriptor_t dataDesc, momentDesc ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::DataType dynDataType = output.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, data.getSize(), data.getDepth(), data.getWidth(), data.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, data.getDepth(), 1, 1)) ; // Allocate workspace. workspaceSize = data.getDepth() ; workspace = (type*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; // Run CuDNN batch normalization implementation. { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = moments ? (type*)moments.getMemory() : workspace ; type * stdMemory = meanMemory + data.getDepth() ; type * varMemory = workspace ; size_t const blockSize = VL_CUDA_NUM_THREADS ; std_to_var<type> <<<divideAndRoundUp(data.getDepth(),blockSize),blockSize>>> (varMemory, stdMemory, data.getDepth(), CUDNN_BN_MIN_EPSILON) ; CHECK(cudnnBatchNormalizationForwardInference (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, dataDesc, data.getMemory(), dataDesc, output.getMemory(), momentDesc, multipliers.getMemory(), biases.getMemory(), meanMemory, varMemory, CUDNN_BN_MIN_EPSILON)) ; } // Cleanup. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return context.passError(error, "nnbnorm_cudnn::forward") ; } template<vl::DataType dataType> vl::ErrorCode vl::impl::nnbnorm_cudnn<dataType>::backward(Context& context, vl::Tensor derData, vl::Tensor derMultipliers, vl::Tensor derBiases, vl::Tensor moments, vl::Tensor data, vl::Tensor multipliers, vl::Tensor biases, vl::Tensor derOutput, double epsilon) { assert(derData) ; assert(derMultipliers) ; assert(derBiases) ; assert(moments) ; assert(data) ; assert(multipliers) ; assert(biases) ; assert(derOutput) ; typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; size_t volume ; cudnnTensorDescriptor_t derOutputDesc, momentDesc ; bool derOutputDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::DataType dynDataType = derOutput.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, derOutput.getSize(), // sizes derOutput.getDepth(), derOutput.getWidth(), derOutput.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; momentDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, data.getDepth(), 1, 1)) ; // Compute moments using CuDNN. Unfortunately CuDNN does not expose // the values of the moments in the backward pass, so we need to run // the forward code to get them. volume = derData.getNumElements() ; workspaceSize = (moments ? 0 : 2 * derData.getDepth()) + volume ; workspace = (type*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; { type alpha = 1.0f ; type beta = 0.0f ; type * outMemory = workspace ; type * meanMemory = moments ? (type*)moments.getMemory() : workspace + volume ; type * varMemory = meanMemory + data.getDepth() ; CHECK(cudnnBatchNormalizationForwardTraining (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, derOutputDesc, data.getMemory(), derOutputDesc, outMemory, // will be discarded momentDesc, multipliers.getMemory(), biases.getMemory(), 1.0, // cumulative factor for moments NULL, NULL, epsilon, meanMemory, varMemory)) ; CHECK(cudnnBatchNormalizationBackward (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, // data &alpha, &beta, // params derOutputDesc, data.getMemory(), // input derOutputDesc, derOutput.getMemory(), // input derOutputDesc, derData.getMemory(), // output momentDesc, multipliers.getMemory(), // input derMultipliers.getMemory(), // output derBiases.getMemory(), // output epsilon, meanMemory, varMemory)) ; // Note: the CuDNN manual describes the varMemory output above // as inverse variance, but it is the inverse standard deviation instead. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(data.getDepth(),blockSize),blockSize>>> (varMemory, data.getDepth()) ; } // Cleanup. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } return context.passError(error, "nnbnorm_cudnn::backward") ; } template<typename T> __global__ void inverse(T * out, T * in, unsigned int num) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { out[idx] = ((T)1) / in[idx] ; } } template<vl::DataType dataType> vl::ErrorCode vl::impl::nnbnorm_cudnn<dataType>::backward_given_moments(Context& context, vl::Tensor derData, vl::Tensor derMultipliers, vl::Tensor derBiases, vl::Tensor moments, vl::Tensor data, vl::Tensor multipliers, vl::Tensor biases, vl::Tensor derOutput, double epsilon) { assert(derData) ; assert(derMultipliers) ; assert(derBiases) ; assert(moments) ; assert(data) ; assert(multipliers) ; assert(biases) ; assert(derOutput) ; typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; cudnnTensorDescriptor_t derOutputDesc, dataDesc, momentDesc ; bool derOutputDescInitialized = false ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::DataType dynDataType = derOutput.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, derOutput.getSize(), // sizes derOutput.getDepth(), derOutput.getWidth(), derOutput.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, data.getSize(), data.getDepth(), data.getWidth(), data.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, data.getDepth(), 1, 1)) ; // Compute moments using CuDNN. workspaceSize = derData.getDepth() ; workspace = (type*)context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = (type*)moments.getMemory() ; type * stdMemory = meanMemory + data.getDepth() ; type * istdMemory = workspace ; // Note: the CuDNN manual describes the varMemory output above // as inverse variance, but it is the inverse standard deviation instead. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(data.getDepth(),blockSize),blockSize>>> (istdMemory, stdMemory, data.getDepth()) ; CHECK(cudnnBatchNormalizationBackward (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, // data &alpha, &beta, // params dataDesc, data.getMemory(), // input derOutputDesc, derOutput.getMemory(), // input dataDesc, derData.getMemory(), // output momentDesc, multipliers.getMemory(), // input derMultipliers.getMemory(), // output derBiases.getMemory(), // output epsilon, meanMemory, istdMemory)) ; } // Cleanup. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } return context.passError(error, "nnbnorm_cudnn::backward_given_moments") ; } template struct vl::impl::nnbnorm_cudnn<vl::VLDT_Float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::nnbnorm_cudnn<vl::VLDT_Double> ; #endif
the_stack
#include <string> #include <vector> #include "optimization/kernels/kernel_common.h" #include "model/host_only_model.h" #include "model/mirrored_model.h" #include "util/dart_io.h" #include "util/dart_types.h" #include "util/mirrored_memory.h" #include "mesh/assimp_mesh_reader.h" namespace { __global__ void getErrorJacobianOfSingleModelPoint(float * J, const float4 * point_m, const int frame, const float3 * errorGrad3D_m, const int dims, const int * dependencies, const dart::JointType * jointTypes, const float3 * jointAxes, const dart::SE3 * T_fms, const dart::SE3 * T_mfs) { dart::getErrorJacobianOfModelPoint(J,*point_m,frame,*errorGrad3D_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); } TEST(TestModelJacobianGPU,TestModelArticulationJacobianGPU) { const float dPose = 1e-3; const float magTolerance = 1e-4; const int nPoses = 20; std::vector<std::string> testModels; testModels.push_back("../models/leftHand/leftHand.xml"); dart::Model::initializeRenderer(new dart::AssimpMeshReader()); dim3 block(1,1,1); dim3 grid(1,1,1); for (int m=0; m<testModels.size(); ++m) { cudaError_t err = cudaGetLastError(); ASSERT_EQ(err,cudaSuccess); dart::HostOnlyModel hostModel; dart::readModelXML(testModels[m].c_str(),hostModel); hostModel.computeStructure(); hostModel.voxelize(0.1,0.0); dart::MirroredModel model(hostModel,make_uint3(64,64,64),1); const int dims = model.getPoseDimensionality(); std::vector<float4> testPoints_f; testPoints_f.push_back(make_float4(0,0,0,1)); testPoints_f.push_back(make_float4(0.1,0.1,0.1,1)); dart::MirroredVector<float> Jx(dims); dart::MirroredVector<float> Jy(dims); dart::MirroredVector<float> Jz(dims); dart::MirroredVector<float3> X(3); X[0] = make_float3(1,0,0); X[1] = make_float3(0,1,0); X[2] = make_float3(0,0,1); X.syncHostToDevice(); for (int n=0; n<nPoses; ++n) { float pose[dims]; for (int i=6; i<dims; ++i) { const int joint = i-6; const float jointMin = model.getJointMin(joint) + dPose; const float jointMax = model.getJointMax(joint) - dPose; pose[i] = jointMin + (jointMax - jointMin) * rand() / (float(RAND_MAX)); // TODO: proper randomness } float tmpPose[dims]; for (int frame=0; frame < model.getNumFrames(); ++frame) { for (int p=0; p<testPoints_f.size(); ++p) { float4 framePoint = testPoints_f[p]; model.setArticulation(pose); dart::MirroredVector<float4> modelPoint(1); modelPoint[0] = model.getTransformFrameToModel(frame)*framePoint; modelPoint.syncHostToDevice(); // TODO: parallelize!!! getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jx.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr(),dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jx.syncDeviceToHost(); getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jy.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr()+1,dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jy.syncDeviceToHost(); getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jz.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr()+2,dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jz.syncDeviceToHost(); for (int joint=0; joint<model.getNumJoints(); ++joint) { int i = joint+6; memcpy(tmpPose,pose,dims*sizeof(float)); tmpPose[i] = pose[i] - dPose; model.setArticulation(tmpPose); float4 neg = model.getTransformFrameToModel(frame)*framePoint; tmpPose[i] = pose[i] + dPose; model.setArticulation(tmpPose); float4 pos = model.getTransformFrameToModel(frame)*framePoint; float3 J3Dnumeric = make_float3((1/(2*dPose))*(pos-neg)); float3 J3Danalytic = make_float3(Jx[i],Jy[i],Jz[i]); EXPECT_NEAR(J3Dnumeric.x,J3Danalytic.x,magTolerance); EXPECT_NEAR(J3Dnumeric.y,J3Danalytic.y,magTolerance); EXPECT_NEAR(J3Dnumeric.z,J3Danalytic.z,magTolerance); // float magNumeric = length(J3Dnumeric); // float magAnalytic = length(J3Danalytic); // EXPECT_NEAR(magAnalytic,magNumeric,magTolerance); } } err = cudaGetLastError(); ASSERT_EQ(err,cudaSuccess); } } } dart::Model::shutdownRenderer(); } TEST(TestModelJacobianGPU,TestModel6DoFJacobianGPU) { const float dPose = 1e-3; const float magTolerance = 1e-4; const int nPoses = 20; std::vector<std::string> testModels; testModels.push_back("../models/leftHand/leftHand.xml"); dart::Model::initializeRenderer(new dart::AssimpMeshReader()); dim3 block(1,1,1); dim3 grid(1,1,1); for (int m=0; m<testModels.size(); ++m) { dart::HostOnlyModel hostModel; dart::readModelXML(testModels[m].c_str(),hostModel); hostModel.computeStructure(); hostModel.voxelize(0.1,0.0); dart::MirroredModel model(hostModel,make_uint3(64,64,64),1); const int dims = model.getPoseDimensionality(); std::vector<float4> testPoints_f; testPoints_f.push_back(make_float4(0,0,0,1)); testPoints_f.push_back(make_float4(0.1,0.1,0.1,1)); dart::MirroredVector<float> Jx(dims); dart::MirroredVector<float> Jy(dims); dart::MirroredVector<float> Jz(dims); dart::MirroredVector<float3> X(3); X[0] = make_float3(1,0,0); X[1] = make_float3(0,1,0); X[2] = make_float3(0,0,1); std::vector<float> mins(dims); std::vector<float> maxs(dims); std::vector<std::string> names(dims); dart::Pose pose(new dart::NullReduction(dims,mins.data(),maxs.data(),names.data())); for (int n=0; n<nPoses; ++n) { // float pose[dims]; for (int joint=0; joint<dims-6; ++joint) { const float jointMin = model.getJointMin(joint) + dPose; const float jointMax = model.getJointMax(joint) - dPose; pose.getArticulation()[joint] = jointMin + (jointMax - jointMin) * ( rand() / ((float)(RAND_MAX)) ); // TODO: proper randomness } dart::se3 t_mc; for (int i=0; i<3; ++i) { t_mc.p[i] = -0.5 + rand() / (float)(RAND_MAX); t_mc.p[i+3] = -M_PI + 2*M_PI*( rand() / ((float)(RAND_MAX)) ); } dart::SE3 T_mc = dart::SE3Fromse3(t_mc); pose.setTransformCameraToModel(T_mc); for (int frame=0; frame < model.getNumFrames(); ++frame) { for (int p=0; p<testPoints_f.size(); ++p) { float4 framePoint = testPoints_f[p]; model.setPose(pose); dart::MirroredVector<float4> modelPoint(1); modelPoint[0] = model.getTransformFrameToModel(frame)*framePoint; modelPoint.syncHostToDevice(); // TODO: parallelize!!! getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jx.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr(),dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jx.syncDeviceToHost(); getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jy.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr()+1,dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jy.syncDeviceToHost(); getErrorJacobianOfSingleModelPoint<<<grid,block>>>(Jz.devicePtr(),modelPoint.devicePtr(),frame,X.devicePtr()+2,dims, model.getDeviceDependencies(),model.getDeviceJointTypes(), model.getDeviceJointAxes(),model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel()); Jz.syncDeviceToHost(); dart::se3 dt_mc; for (int i=0; i<6; ++i) { memset(dt_mc.p,0,6*sizeof(float)); dt_mc.p[i] = -dPose; pose.setTransformCameraToModel(dart::SE3Fromse3(dt_mc)*T_mc); model.setPose(pose); float4 neg = model.getTransformFrameToCamera(frame)*framePoint; dt_mc.p[i] = dPose; pose.setTransformCameraToModel(dart::SE3Fromse3(dt_mc)*T_mc); model.setPose(pose); float4 pos = model.getTransformFrameToCamera(frame)*framePoint; float3 J3Dnumeric_c = make_float3((1/(2*dPose))*(pos-neg)); float3 J3Dnumeric_m = dart::SE3Rotate(T_mc,J3Dnumeric_c); float3 J3Danalytic = make_float3(Jx[i],Jy[i],Jz[i]); EXPECT_NEAR(J3Dnumeric_m.x,J3Danalytic.x,magTolerance); EXPECT_NEAR(J3Dnumeric_m.y,J3Danalytic.y,magTolerance); EXPECT_NEAR(J3Dnumeric_m.z,J3Danalytic.z,magTolerance); // float magNumeric = length(J3Dnumeric_m); // float magAnalytic = length(J3Danalytic[i]); // EXPECT_NEAR(magAnalytic,magNumeric,magTolerance); } } } } } dart::Model::shutdownRenderer(); } } // namespace
the_stack
cv::Mat surfelwarp::downloadDepthImage(const DeviceArray2D<unsigned short>& image_gpu) { const auto num_rows = image_gpu.rows(); const auto num_cols = image_gpu.cols(); cv::Mat depth_cpu(num_rows, num_cols, CV_16UC1); image_gpu.download(depth_cpu.data, sizeof(unsigned short) * num_cols); return depth_cpu; } cv::Mat surfelwarp::downloadDepthImage(cudaTextureObject_t image_gpu) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image_gpu, width, height); DeviceArray2D<unsigned short> map; map.create(height, width); //Transfer and download textureToMap2D<unsigned short>(image_gpu, map); return downloadDepthImage(map); } cv::Mat surfelwarp::downloadRGBImage( const DeviceArray<uchar3>& image_gpu, const unsigned rows, const unsigned cols ) { assert(rows * cols == image_gpu.size()); cv::Mat rgb_cpu(rows, cols, CV_8UC3); image_gpu.download((uchar3*) (rgb_cpu.data)); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(const DeviceArray2D<float4>& rgb_img) { cv::Mat rgb_cpu(rgb_img.rows(), rgb_img.cols(), CV_32FC4); rgb_img.download(rgb_cpu.data, sizeof(float4) * rgb_img.cols()); return rgb_cpu; } cv::Mat surfelwarp::downloadNormalizeRGBImage(cudaTextureObject_t rgb_img) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(rgb_img, width, height); DeviceArray2D<float4> map; map.create(height, width); //Transfer and download textureToMap2D<float4>(rgb_img, map); return downloadNormalizeRGBImage(map); } cv::Mat surfelwarp::rgbImageFromColorTimeMap(cudaTextureObject_t color_time_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(color_time_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(color_time_map, map); //Donwload to host std::vector<float4> color_time_host; int cols = width; map.download(color_time_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 color_time_value = color_time_host[flatten_idx]; uchar3 rgb_value; float_decode_rgb(color_time_value.x, rgb_value); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } cv::Mat surfelwarp::normalMapForVisualize(cudaTextureObject_t normal_map) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(normal_map, width, height); //First download to device array DeviceArray2D<float4> map; map.create(height, width); textureToMap2D<float4>(normal_map, map); //Donwload to host std::vector<float4> normal_map_host; int cols = width; map.download(normal_map_host, cols); cv::Mat rgb_cpu(height, width, CV_8UC3); for (auto i = 0; i < width; i++) { for (auto j = 0; j < height; j++) { const auto flatten_idx = i + j * width; const float4 normal_value = normal_map_host[flatten_idx]; uchar3 rgb_value; rgb_value.x = (unsigned char) ((normal_value.x + 1) * 120.0f); rgb_value.y = (unsigned char) ((normal_value.y + 1) * 120.0f); rgb_value.z = (unsigned char) ((normal_value.z + 1) * 120.0f); rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 0) = rgb_value.x; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 1) = rgb_value.y; rgb_cpu.at<unsigned char>(j, sizeof(uchar3) * i + 2) = rgb_value.z; } } return rgb_cpu; } void surfelwarp::downloadSegmentationMask(cudaTextureObject_t mask, std::vector<unsigned char>& h_mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host int h_cols; d_mask.download(h_mask, h_cols); } cv::Mat surfelwarp::downloadRawSegmentationMask(cudaTextureObject_t mask) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(mask, width, height); //Download it to device DeviceArray2D<unsigned char> d_mask; d_mask.create(height, width); textureToMap2D<unsigned char>(mask, d_mask); //Download it to host std::vector<unsigned char> h_mask_vec; int h_cols; d_mask.download(h_mask_vec, h_cols); cv::Mat raw_mask(height, width, CV_8UC1); for (auto row = 0; row < height; row++) { for (auto col = 0; col < width; col++) { const auto offset = col + row * width; raw_mask.at<unsigned char>(row, col) = h_mask_vec[offset]; } } return raw_mask; } void surfelwarp::downloadGrayScaleImage(cudaTextureObject_t image, cv::Mat& h_image, float scale) { //Query the size of texture unsigned width = 0, height = 0; query2DTextureExtent(image, width, height); //Download it to device DeviceArray2D<float> d_meanfield; d_meanfield.create(height, width); textureToMap2D<float>(image, d_meanfield); //To host cv::Mat h_meanfield_prob = cv::Mat(height, width, CV_32FC1); d_meanfield.download(h_meanfield_prob.data, sizeof(float) * width); //Transfer it h_meanfield_prob.convertTo(h_image, CV_8UC1, scale * 255.f); } void surfelwarp::downloadTransferBinaryMeanfield(cudaTextureObject_t meanfield_q, cv::Mat& h_meanfield_uchar) { downloadGrayScaleImage(meanfield_q, h_meanfield_uchar); } /* The point cloud downloading method */ PointCloud3f_Pointer surfelwarp::downloadPointCloud(const surfelwarp::DeviceArray<float4>& vertex) { PointCloud3f_Pointer point_cloud(new PointCloud3f); std::vector<float4> h_vertex; vertex.download(h_vertex); setPointCloudSize(point_cloud, vertex.size()); for (auto idx = 0; idx < vertex.size(); idx++) { setPoint(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, point_cloud, idx); } return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); size_t valid_count = 0; setPointCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float x = host_ptr[idx].x; float y = host_ptr[idx].y; float z = host_ptr[idx].z; if (std::abs(x > 1e-3) || std::abs(y > 1e-3) || std::abs(z > 1e-3)) { valid_count++; } setPoint(x, y, z, point_cloud, idx); } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<unsigned int> indicator) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<unsigned> h_indicator; indicator.Download(h_indicator); #ifdef WITH_CILANTRO int valid_point_count = 0; for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) valid_point_count++; } setPointCloudSize(point_cloud, valid_point_count); #endif for (int idx = 0; idx < total_size; idx += 1) { if (h_indicator[idx]) { setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, idx); } } //LOG(INFO) << "The number of valid point cloud is " << valid_count << std::endl; delete[] host_ptr; return point_cloud; } PointCloud3f_Pointer surfelwarp::downloadPointCloud( const DeviceArray2D<float4>& vertex_map, DeviceArrayView<ushort2> pixel ) { PointCloud3f_Pointer point_cloud(new PointCloud3f); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); std::vector<ushort2> h_pixels; pixel.Download(h_pixels); setPointCloudSize(point_cloud, h_pixels.size()); for (auto i = 0; i < h_pixels.size(); i++) { const auto idx = h_pixels[i].x + h_pixels[i].y * vertex_map.cols(); setPoint(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z, point_cloud, i); } delete[] host_ptr; return point_cloud; } void surfelwarp::downloadPointCloud(const DeviceArray2D<float4>& vertex_map, std::vector<float4>& point_cloud) { point_cloud.clear(); const auto num_rows = vertex_map.rows(); const auto num_cols = vertex_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; vertex_map.download(host_ptr, num_cols * sizeof(float4)); for (int idx = 0; idx < total_size; idx += 1) { float4 point; point.x = host_ptr[idx].x; point.y = host_ptr[idx].y; point.z = host_ptr[idx].z; if (std::abs(point.x > 1e-3) || std::abs(point.y > 1e-3) || std::abs(point.z > 1e-3)) point_cloud.push_back(point); } delete[] host_ptr; } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, DeviceArrayView<unsigned int> indicator) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, indicator); } PointCloud3f_Pointer surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, DeviceArrayView<ushort2> pixel) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); return downloadPointCloud(vertex_map_array, pixel); } void surfelwarp::downloadPointCloud(cudaTextureObject_t vertex_map, std::vector<float4>& point_cloud) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array; vertex_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); downloadPointCloud(vertex_map_array, point_cloud); } #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal) { std::vector<float4> h_normal; d_normal.download(h_normal); PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, normal_cloud, idx); } return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray<float4>& d_normal, PointCloudNormal_Pointer& point_cloud) { std::vector<float4> h_normal; d_normal.download(h_normal); setNormalCloudSize(point_cloud, d_normal.size()); for (auto idx = 0; idx < d_normal.size(); idx++) { setNormal(h_normal[idx].x, h_normal[idx].y, h_normal[idx].z, point_cloud, idx); } } #endif #ifdef WITH_PCL PointCloudNormal_Pointer surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map) { PointCloudNormal_Pointer normal_cloud(new PointCloudNormal); const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, normal_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; return normal_cloud; } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(const DeviceArray2D<float4>& normal_map, PointCloudNormal_Pointer& point_cloud) { const auto num_rows = normal_map.rows(); const auto num_cols = normal_map.cols(); const auto total_size = num_cols * num_rows; float4* host_ptr = new float4[total_size]; normal_map.download(host_ptr, num_cols * sizeof(float4)); int valid_count = 0; setNormalCloudSize(point_cloud, total_size); for (int idx = 0; idx < total_size; idx += 1) { float4 normal_dev = host_ptr[idx]; SURFELWARP_CHECK(!isnan(normal_dev.x)); SURFELWARP_CHECK(!isnan(normal_dev.y)); SURFELWARP_CHECK(!isnan(normal_dev.z)); if (norm(make_float3(host_ptr[idx].x, host_ptr[idx].y, host_ptr[idx].z)) > 1e-4) { valid_count++; } setNormal(normal_dev.x, normal_dev.y, normal_dev.z, point_cloud, idx); } //LOG(INFO) << "The number of valid normals is " << valid_count; delete[] host_ptr; } #endif #ifdef WITH_PCL pcl::PointCloud<pcl::Normal>::Ptr surfelwarp::downloadNormalCloud(cudaTextureObject_t normal_map) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); return downloadNormalCloud(normal_map_array); } #elif defined(WITH_CILANTRO) void surfelwarp::downloadNormalCloud(cudaTextureObject_t normal_map, PointCloudNormal_Pointer& point_cloud) { unsigned rows, cols; query2DTextureExtent(normal_map, cols, rows); DeviceArray2D<float4> normal_map_array; normal_map_array.create(rows, cols); textureToMap2D<float4>(normal_map, normal_map_array); downloadNormalCloud(normal_map_array, point_cloud); } #endif void surfelwarp::downloadPointNormalCloud( const surfelwarp::DeviceArray<DepthSurfel>& surfel_array, PointCloud3f_Pointer& point_cloud, #ifdef WITH_PCL PointCloudNormal_Pointer& normal_cloud, #endif const float point_scale ) { //Prepare the data point_cloud = PointCloud3f_Pointer(new PointCloud3f); #ifdef WITH_PCL normal_cloud = PointCloudNormal_Pointer(new PointCloudNormal); #elif defined(WITH_CILANTRO) // in cilantro, the normals are a field within the point cloud, we don't need a separate cloud auto& normal_cloud = point_cloud; #endif //Download it std::vector<DepthSurfel> surfel_array_host; surfel_array.download(surfel_array_host); setPointCloudSize(point_cloud, surfel_array_host.size()); setNormalCloudSize(normal_cloud, surfel_array_host.size()); //Construct the output for (auto i = 0; i < surfel_array_host.size(); i++) { DepthSurfel surfel = surfel_array_host[i]; setPoint(surfel.vertex_confid.x, surfel.vertex_confid.y, surfel.vertex_confid.z, point_cloud, i, point_scale); setNormal(surfel.normal_radius.x, surfel.normal_radius.y, surfel.normal_radius.z, normal_cloud, i); } } void surfelwarp::separateDownloadPointCloud(const surfelwarp::DeviceArrayView<float4>& point_cloud, const surfelwarp::DeviceArrayView<unsigned int>& indicator, PointCloud3f_Pointer& fused_cloud, PointCloud3f_Pointer& unfused_cloud) { std::vector<float4> h_surfels; std::vector<unsigned> h_indicator; point_cloud.Download(h_surfels); indicator.Download(h_indicator); SURFELWARP_CHECK(h_indicator.size() == h_surfels.size()); #ifdef WITH_CILANTRO int fused_cloud_size = 0; int unfused_cloud_size = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; if (indicator > 0) { fused_cloud_size++; } else { unfused_cloud_size++; } } setPointCloudSize(fused_cloud, fused_cloud_size); setPointCloudSize(unfused_cloud, unfused_cloud_size); #endif int i_fused = 0; int i_unfused = 0; for (auto i = 0; i < h_surfels.size(); i++) { const auto indicator = h_indicator[i]; const auto flat_point = h_surfels[i]; if (indicator > 0) { setPoint(flat_point.x, flat_point.y, flat_point.z, fused_cloud, i_fused); i_fused++; } else { setPoint(flat_point.x, flat_point.y, flat_point.z, unfused_cloud, i_unfused); i_unfused++; } } } void surfelwarp::separateDownloadPointCloud( const surfelwarp::DeviceArrayView<float4>& point_cloud, unsigned num_remaining_surfels, PointCloud3f_Pointer& remaining_cloud, PointCloud3f_Pointer& appended_cloud ) { //Clear the existing point cloud #ifdef WITH_PCL remaining_cloud->points.clear(); appended_cloud->points.clear(); #endif setPointCloudSize(remaining_cloud, num_remaining_surfels); setPointCloudSize(appended_cloud, point_cloud.Size() - num_remaining_surfels); std::vector<float4> h_surfels; point_cloud.Download(h_surfels); int i_appended = 0; for (auto i = 0; i < point_cloud.Size(); i++) { const auto flat_point = h_surfels[i]; if (i < num_remaining_surfels) { setPoint(flat_point.x, flat_point.y, flat_point.z, remaining_cloud, i); } else { setPoint(flat_point.x, flat_point.y, flat_point.z, appended_cloud, i_appended); i_appended++; } } } /* The download function for colored point cloud */ PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( const surfelwarp::DeviceArray<float4>& vertex_confid, const surfelwarp::DeviceArray<float4>& color_time ) { PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); std::vector<float4> h_vertex, h_color_time; vertex_confid.download(h_vertex); color_time.download(h_color_time); SURFELWARP_CHECK_EQ(h_vertex.size(), h_color_time.size()); setPointCloudRGBSize(point_cloud, h_vertex.size()); for (auto idx = 0; idx < h_vertex.size(); idx++) { float encoded_rgb = h_color_time[idx].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); setPointRGB(h_vertex[idx].x, h_vertex[idx].y, h_vertex[idx].z, rgb.x, rgb.y, rgb.z, point_cloud, idx); } return point_cloud; } PointCloud3fRGB_Pointer surfelwarp::downloadColoredPointCloud( cudaTextureObject_t vertex_map, cudaTextureObject_t color_time_map, bool flip_color ) { unsigned rows, cols; query2DTextureExtent(vertex_map, cols, rows); DeviceArray2D<float4> vertex_map_array, color_map_array; vertex_map_array.create(rows, cols); color_map_array.create(rows, cols); textureToMap2D<float4>(vertex_map, vertex_map_array); textureToMap2D<float4>(color_time_map, color_map_array); //Download it float4* h_vertex = new float4[rows * cols]; float4* h_color_time = new float4[rows * cols]; vertex_map_array.download(h_vertex, cols * sizeof(float4)); color_map_array.download(h_color_time, cols * sizeof(float4)); //Construct the point cloud PointCloud3fRGB_Pointer point_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(point_cloud, rows * cols); for (auto i = 0; i < rows * cols; i++) { float encoded_rgb = h_color_time[i].x; uchar3 rgb; float_decode_rgb(encoded_rgb, rgb); if (flip_color) { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.z, rgb.y, rgb.x, point_cloud, i); } else { setPointRGB(h_vertex[i].x, h_vertex[i].y, h_vertex[i].z, rgb.x, rgb.y, rgb.z, point_cloud, i); } } delete[] h_vertex; delete[] h_color_time; return point_cloud; } //The method to add color to point cloud PointCloud3fRGB_Pointer surfelwarp::addColorToPointCloud( const PointCloud3f_Pointer& point_cloud, uchar4 rgba ) { PointCloud3fRGB_Pointer color_cloud(new PointCloud3fRGB()); setPointCloudRGBSize(color_cloud, point_cloud->size()); for (auto i = 0; i < point_cloud->size(); i++) { #ifdef WITH_PCL const auto& point_xyz = point_cloud->points[i]; float x = point_xyz.x; float y = point_xyz.y; float z = point_xyz.z; #elif defined(WITH_CILANTRO) const auto& point_xyz = point_cloud->points.col(i); float x = point_xyz.x(); float y = point_xyz.y(); float z = point_xyz.z(); #endif setPointRGB(x, y, z, rgba.x, rgba.y, rgba.z, color_cloud, i,1.0f); } return color_cloud; } /* The index map query methods */ namespace surfelwarp { namespace device { __global__ void queryIndexMapFromPixelKernel( cudaTextureObject_t index_map, const DeviceArrayView<ushort4> pixel_array, unsigned* index_array ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < pixel_array.Size()) { const auto x = pixel_array[idx].x; const auto y = pixel_array[idx].y; const auto index = tex2D<unsigned>(index_map, x, y); index_array[idx] = index; } } } // namespace device } // namespace surfelwarp void surfelwarp::queryIndexMapFromPixels( cudaTextureObject_t index_map, const DeviceArrayView<ushort4>& pixel_array, DeviceArray<unsigned>& index_array ) { //Simple sanity check SURFELWARP_CHECK_EQ(pixel_array.Size(), index_array.size()); //Invoke the kernel dim3 blk(256); dim3 grid(pixel_array.Size(), blk.x); device::queryIndexMapFromPixelKernel << < grid, blk >> > (index_map, pixel_array, index_array); }
the_stack
#include "aggregation/coarseAgenerators/coarse_A_generator.h" #include "aggregation/selectors/agg_selector.h" #include "matrix_coloring/matrix_coloring.h" #include "matrix_coloring/min_max.h" #include "solvers/solver.h" #include "classical/selectors/selector.h" #include "classical/interpolators/interpolator.h" #include "classical/strength/strength.h" #include <cusp/print.h> #include <cusp/gallery/poisson.h> #ifdef AMGX_WITH_MPI #include <mpi.h> #endif namespace amgx { // generates matrix without diagonal property (diagonal inside values[]) with few diagonal values that are not stored in the values array (implicit zeros) ///////////////////////////////////////////// template<class TConfig> struct generateMatrixRandomStructCustom { static void generate(Matrix<TConfig> &A, int max_rows, int bsize, bool symmetric, int max_nnz_per_row = 10); static void generateExact(Matrix<TConfig> &A, int num_rows, int bsize, bool symmetric, int max_nnz_per_row = 10); }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> struct generateMatrixRandomStructCustom<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > { typedef Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > Matrix_h; typedef typename Matrix_h::index_type IndexType; static void generate (Matrix_h &A, int max_rows, int bsize, bool symmetric, int max_nnz_per_row = 10) { int new_rows = max((int)(((float)rand() / RAND_MAX) * max_rows), 1); generateExact(A, new_rows, bsize, symmetric, max_nnz_per_row); } static void generateExact (Matrix_h &A, int new_rows, int bsize, bool symmetric, int max_nnz_per_row = 10) { Matrix_h newA; newA.set_initialized(0); int props = CSR; int bsize_sq = bsize * bsize; int num_zero_diag = 5; newA.addProps(props); newA.resize(new_rows, new_rows, new_rows, bsize, bsize, 1); newA.values.resize(0); newA.col_indices.resize(0); std::vector<IndexType> row_col_idx; int cur_nnz = 0; IndexType cur_ro = 0; for (int i = 0; i < newA.get_num_rows(); i++) { row_col_idx.clear(); newA.row_offsets[i] = cur_ro; if ((num_zero_diag > 0) && (rand() % 5) ) { num_zero_diag--; } else { row_col_idx.push_back(i); } if (row_col_idx.size() < max_nnz_per_row) { int add_nnz = min (new_rows, max( 1, (int)( ((float)rand() / RAND_MAX) * (max_nnz_per_row - row_col_idx.size()) )) ); while (add_nnz > 0) { IndexType new_col = rand() % new_rows; if ( row_col_idx.end() == std::find(row_col_idx.begin(), row_col_idx.end(), new_col) ) { row_col_idx.push_back(new_col); } --add_nnz; } } std::sort(row_col_idx.begin(), row_col_idx.end()); for (unsigned int j = 0; j < row_col_idx.size(); j++) { for (int k = 0; k < bsize_sq; k++) { newA.values.push_back(1.0); } newA.col_indices.push_back(row_col_idx[j]); cur_nnz++; } cur_ro = cur_nnz; } newA.row_offsets[new_rows] = cur_ro; newA.set_num_nz(cur_nnz); int new_vals = (newA.get_num_nz() + 1) * bsize_sq; newA.values.resize(new_vals); thrust::fill(newA.values.begin() + (newA.get_num_nz()*newA.get_block_size()), newA.values.end(), 0.0); newA.computeDiagonal(); A = newA; A.set_initialized(1); } }; template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> struct generateMatrixRandomStructCustom<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > { typedef Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > Matrix_h; typedef Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > Matrix_d; static void generate (Matrix_d &A, int max_rows, int bsize, bool symmetric, int max_nnz_per_row = 10) { Matrix_h tA; generateMatrixRandomStructCustom< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generate(tA, max_rows, bsize, symmetric, max_nnz_per_row); A = tA; } static void generateExact (Matrix_d &A, int num_rows, int bsize, bool symmetric, int max_nnz_per_row = 10) { Matrix_h tA; generateMatrixRandomStructCustom< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::generateExact(tA, num_rows, bsize, symmetric, max_nnz_per_row); A = tA; } }; DECLARE_UNITTEST_BEGIN(ImplicitZeroInDiagonal); typedef typename TConfig_h::template setVecPrec<(AMGX_VecPrecision)AMGX_GET_MODE_VAL(AMGX_MatPrecision, TConfig::mode)>::Type vvec_h; typedef typename TConfig::template setVecPrec<AMGX_vecInt>::Type ivec; typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_h; // setup restriction on HOST void fillRowOffsetsAndColIndices(const int num_aggregates, Vector<ivec_h> aggregates, const int R_num_cols, Vector<ivec_h> &R_row_offsets, Vector<ivec_h> &R_col_indices) { for (int i = 0; i < num_aggregates + 1; i++) { R_row_offsets[i] = 0; } // Count number of neighbors for each row for (int i = 0; i < R_num_cols; i++) { int I = aggregates[i]; R_row_offsets[I]++; } R_row_offsets[num_aggregates] = R_num_cols; for (int i = num_aggregates - 1; i >= 0; i--) { R_row_offsets[i] = R_row_offsets[i + 1] - R_row_offsets[i]; } /* Set column indices. */ for (int i = 0; i < R_num_cols; i++) { int I = aggregates[i]; int Ip = R_row_offsets[I]++; R_col_indices[Ip] = i; } /* Reset r[i] to start of row memory. */ for (int i = num_aggregates - 1; i > 0; i--) { R_row_offsets[i] = R_row_offsets[i - 1]; } R_row_offsets[0] = 0; } void test_coarsers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope) { Matrix<T_Config> Ac; int num_aggregates = A.get_num_rows(); Vector<ivec_h> h_aggregates; h_aggregates.resize( A.get_num_rows() ); for ( int i = 0; i < h_aggregates.size(); i++ ) { h_aggregates[i] = i; } Vector<ivec_h> h_R_row_offsets; Vector<ivec_h> h_R_col_indices; h_R_row_offsets.resize( num_aggregates + 1 ); h_R_col_indices.resize( A.get_num_rows() ); fillRowOffsetsAndColIndices( num_aggregates, h_aggregates, A.get_num_rows(), h_R_row_offsets, h_R_col_indices ); Vector<ivec> aggregates = h_aggregates; Vector<ivec> R_row_offsets = h_R_row_offsets; Vector<ivec> R_col_indices = h_R_col_indices; cudaCheckError(); typename aggregation::CoarseAGeneratorFactory<T_Config>::Iterator iter = aggregation::CoarseAGeneratorFactory<T_Config>::getIterator(); aggregation::CoarseAGenerator<TConfig> *generator; while (!aggregation::CoarseAGeneratorFactory<T_Config>::isIteratorLast(iter)) { generator = NULL; generator = iter->second->create(cfg, cfg_scope); PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); UNITTEST_ASSERT_TRUE_DESC("Generator is not created\n", generator != NULL); UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); generator->computeAOperator(A, Ac, aggregates, R_row_offsets, R_col_indices, num_aggregates); UNITTEST_ASSERT_TRUE_DESC("Coarser matrix contains nans\n", !containsNan<ValueTypeA>(Ac.values.raw(), Ac.values.size())); UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (generator != NULL) { delete generator; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope) { typename aggregation::SelectorFactory<T_Config>::Iterator iter = aggregation::SelectorFactory<T_Config>::getIterator(); aggregation::Selector<TConfig> *selector; IVector vec, vec1; int num; while (!aggregation::SelectorFactory<T_Config>::isIteratorLast(iter)) { string m_name = iter->first.c_str(); if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0) || (m_name.compare("PARALLEL_GREEDY_SELECTOR") == 0)) { ++iter; continue; } selector = NULL; PrintOnFail("processing: %s\n", iter->first.c_str()); selector = iter->second->create(cfg, "default"); PrintOnFail("Selector creation\n"); UNITTEST_ASSERT_TRUE(selector != NULL); UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); selector->setAggregates(A, vec, vec1, num); UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (selector != NULL) { delete selector; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void test_matrix_coloring(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope) { MatrixColoring<TConfig> *color; typename MatrixColoringFactory<T_Config>::Iterator iter = MatrixColoringFactory<T_Config>::getIterator(); while (!MatrixColoringFactory<T_Config>::isIteratorLast(iter)) { color = NULL; UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); color = iter->second->create(cfg, cfg_scope); UNITTEST_ASSERT_TRUE(color != NULL); A.set_initialized(0); A.colorMatrix(cfg, cfg_scope); A.set_initialized(1); int num_colors = A.getMatrixColoring().getNumColors(); UNITTEST_ASSERT_TRUE(num_colors != 0); UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (color != NULL) { delete color; } ++iter; UNITTEST_ASSERT_TRUE(true); } } template<class TConfig> bool check_solver_mode_pair(string solver) { return (solver != "FIXCOLOR_GS" && solver != "AMG" && solver != "DENSE_LU_SOLVER" && solver != "MULTICOLOR_ILU" && solver != "MULTICOLOR_GS" && // solver != "KACZMARZ" && solver != "IDR" && solver != "IDRMSYNC"); } void test_solvers(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope) { #ifdef AMGX_WITH_MPI int mpiFlag; MPI_Initialized(&mpiFlag); if ( !mpiFlag ) { int argc = 1; char **argv = NULL; MPI_Init( &argc, &argv); } #endif Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy()); cusp::blas::fill(b, 1); b.set_block_dimx(1); b.set_block_dimy(A.get_block_dimy()); x.set_block_dimx(1); x.set_block_dimy(A.get_block_dimx()); Vector_h hx; Solver<TConfig> *solver; typename SolverFactory<T_Config>::Iterator iter = SolverFactory<T_Config>::getIterator(); while (!SolverFactory<T_Config>::isIteratorLast(iter)) { //std::cout << "solver=" << iter->first << std::endl; solver = NULL; thrust::fill(x.begin(), x.end(), static_cast<ValueTypeB>(1.0)); UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); solver = iter->second->create(cfg, cfg_scope); // its known that jacobi_l1 implementation for 4x4 fails because of block inverse in setup // its known that fixcolor_gs fails on solve phase because of bad values during setup if (solver != NULL && check_solver_mode_pair<TConfig>(iter->first)) { solver->setup(A, false); solver->set_max_iters(1); if (TConfig::matPrec != AMGX_matFloat) { solver->solve(b, x, false); } hx = x; cudaDeviceSynchronize(); cudaCheckError(); // NaNs are expected since there are zero elements // UNITTEST_ASSERT_TRUE_DESC("Smoother result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size())); } UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (solver != NULL) { delete solver; solver = NULL; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void generatePoissonForTest(Matrix<TConfig > &Aout, int block_size, bool diag_prop, int points, int x, int y, int z = 1) { Matrix<TConfig_h > Ac; { Matrix<TConfig_h > A; A.set_initialized(0); A.addProps(CSR); MatrixCusp<TConfig_h, cusp::csr_format> wA(&A); switch (points) { case 5: cusp::gallery::poisson5pt(wA, x, y); break; case 7: cusp::gallery::poisson7pt(wA, x, y, z); break; case 9: cusp::gallery::poisson9pt(wA, x, y); break; case 27: cusp::gallery::poisson27pt(wA, x, y, z); break; } A.set_initialized(1); Ac.convert( A, ( diag_prop ? DIAG : 0 ) | CSR, block_size, block_size ); Ac.set_initialized(1); } Aout = Ac; } void test_levels(Resources *res, Matrix<T_Config> &A) { Vector<T_Config> b (A.get_num_rows()*A.get_block_dimy()), x (A.get_num_rows()*A.get_block_dimy()); cusp::blas::fill(b, 1); cusp::blas::fill(x, 1); int bsize = A.get_block_dimy(); b.set_block_dimx(1); b.set_block_dimy(bsize); x.set_block_dimy(1); x.set_block_dimx(bsize); if (bsize > 1) { return; } //early out for non-support block sizes, classical will fail on these. AMGX_STATUS solve_status; { AMG_Configuration cfg; AMGX_ERROR err = AMGX_OK; UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=CLASSICAL, smoother=MULTICOLOR_DILU, presweeps=1, postsweeps=1, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK); AMG_Solver<TConfig> amg(res, cfg); err = amg.setup(A); if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED) { PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); UNITTEST_ASSERT_EQUAL(err, AMGX_OK); err = amg.solve( b, x, solve_status, true); if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED) { PrintOnFail("Classical algorithm: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); UNITTEST_ASSERT_EQUAL(err, AMGX_OK); // UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size())); } } } cusp::blas::fill(x, 1); { AMG_Configuration cfg; AMGX_ERROR err = AMGX_OK; UNITTEST_ASSERT_TRUE( cfg.parseParameterString("config_version=2, algorithm=AGGREGATION, smoother(pcg)=PCG, pcg:preconditioner=BLOCK_JACOBI, presweeps=1, postsweeps=1, selector=SIZE_2, coarseAgenerator=LOW_DEG, matrix_coloring_scheme=MIN_MAX, determinism_flag=1, max_levels=2, max_iters=1, norm=L1, coloring_level=1") == AMGX_OK); AMG_Solver<TConfig> amg(res, cfg); err = amg.setup(A); if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED) { PrintOnFail("Aggregation algorithm setup: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); UNITTEST_ASSERT_EQUAL(err, AMGX_OK); cudaCheckError(); err = amg.solve( b, x, solve_status, true); if (err != AMGX_ERR_NOT_SUPPORTED_TARGET && err != AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE && err != AMGX_ERR_NOT_IMPLEMENTED) { PrintOnFail("Aggregation algorithm solve: Matrix properties: blocksize = %d, diag_prop = %d\n", A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); UNITTEST_ASSERT_EQUAL(err, AMGX_OK); // UNITTEST_ASSERT_TRUE_DESC("Level solve result contains nans\n", !containsNan<ValueTypeB>(x.raw(), x.size())); } } } } void test_strength(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> **good ) { //allocate necessary memory typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector; FVector weights(A.get_num_rows(), 0.0); BVector s_con(A.get_num_nz(), false); IVector cf_map(A.get_num_rows(), 0); IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows //compute strong connections and weights double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope); Strength<T_Config> *strength; typename StrengthFactory<T_Config>::Iterator iter = StrengthFactory<T_Config>::getIterator(); while (!StrengthFactory<T_Config>::isIteratorLast(iter)) { strength = NULL; UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); strength = iter->second->create(cfg, cfg_scope); UNITTEST_ASSERT_TRUE(strength != NULL); if (strength != NULL) { strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum); UNITTEST_ASSERT_TRUE_DESC("Strength result contains nans\n", !containsNan<float>(weights.raw(), weights.size())); *good = iter->second; } UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (strength != NULL) { delete strength; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void test_selectors(Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> **good ) { //allocate necessary memory typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector; FVector weights(A.get_num_rows(), 0.0); BVector s_con(A.get_num_nz(), false); IVector cf_map(A.get_num_rows(), 0); IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows //compute strong connections and weights double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope); Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope); strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum); classical::Selector<T_Config> *selector; typename classical::SelectorFactory<T_Config>::Iterator iter = classical::SelectorFactory<T_Config>::getIterator(); while (!classical::SelectorFactory<T_Config>::isIteratorLast(iter)) { string m_name = iter->first.c_str(); if ((m_name.compare("GEO") == 0) || (m_name.compare("GEO_ONE_PHASE_HANDSHAKING") == 0)) { ++iter; continue; } selector = NULL; UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); selector = iter->second->create(); UNITTEST_ASSERT_TRUE(strength != NULL); if (selector != NULL) { selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch); for (int i = 0; i < A.get_num_rows(); i++) { UNITTEST_ASSERT_TRUE(cf_map[i] != UNASSIGNED); } *good = iter->second; } UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (selector != NULL) { delete selector; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void test_interpolators(Resources *res, Matrix<T_Config> &A, AMG_Config &cfg, const std::string &cfg_scope, StrengthFactory<TConfig> *strengthf, classical::SelectorFactory<TConfig> *selectorf ) { //allocate necessary memory typedef Vector<typename T_Config::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename T_Config::template setVecPrec<AMGX_vecFloat>::Type> FVector; Matrix<TConfig> P; FVector weights(A.get_num_rows(), 0.0); BVector s_con(A.get_num_nz(), false); IVector cf_map(A.get_num_rows(), 0); IVector scratch(A.get_num_rows(), 0); //scratch memory of size num_rows //compute strong connections and weights double max_row_sum = cfg.getParameter<double>("max_row_sum", cfg_scope); Strength<T_Config> *strength = strengthf->create(cfg, cfg_scope); classical::Selector<T_Config> *selector = selectorf->create(); strength->computeStrongConnectionsAndWeights(A, s_con, weights, max_row_sum); selector->markCoarseFinePoints(A, weights, s_con, cf_map, scratch); Interpolator<T_Config> *interpolator; typename InterpolatorFactory<T_Config>::Iterator iter = InterpolatorFactory<T_Config>::getIterator(); AMG_Configuration scfg; AMG_Solver<TConfig> amg(res, scfg); while (!InterpolatorFactory<T_Config>::isIteratorLast(iter)) { interpolator = NULL; //printf("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0));fflush(stdout); UNITTEST_ASSERT_EXCEPTION_START; PrintOnFail("%s : Matrix properties: blocksize = %d, diag_prop = %d\n", iter->first.c_str(), A.get_block_dimy(), (A.hasProps(DIAG) ? 1 : 0)); interpolator = iter->second->create(cfg, cfg_scope); UNITTEST_ASSERT_TRUE(strength != NULL); if (interpolator != NULL) { interpolator->generateInterpolationMatrix(A, cf_map, s_con, scratch, P, &amg); } UNITTEST_ASSERT_EXCEPTION_END_NOT_IMPLEMENTED; if (interpolator != NULL) { delete interpolator; } ++iter; UNITTEST_ASSERT_TRUE(true); } } void run() { randomize( 32 ); set_forge_ahead(true); int nrows = 6000; for (int bsize = 1; bsize < 6; ++bsize) { AMG_Config cfg; cfg.parseParameterString("config_version=2, determinism_flag=1, coloring_level=1, reorder_cols_by_color=1, insert_diag_while_reordering=1, preconditioner=BLOCK_JACOBI, min_block_rows=2,max_iters=50"); const std::string cfg_scope = "default"; Resources res; // default resources { MatrixA A; VVector tb; generateMatrixRandomStructCustom<TConfig>::generateExact(A, nrows, bsize, false); //generateMatrixRandomStruct<TConfig>::generateExact(A, nrows, false, bsize, false); random_fill(A); ////////////////////////// //MatrixIO<TConfig>::writeSystemMatrixMarket("test.mtx", A, tb); //////////////////////////// // aggregation test_coarsers(A, cfg, cfg_scope); test_selectors(A, cfg, cfg_scope); test_matrix_coloring(A, cfg, cfg_scope); A.set_initialized(0); A.colorMatrix(cfg, cfg_scope); A.set_initialized(1); test_solvers(A, cfg, cfg_scope); // classical //TODO: if strength cannot process matrix if (bsize == 1) { StrengthFactory<TConfig> *good_strength = NULL; test_strength(A, cfg, cfg_scope, &good_strength); if (good_strength != NULL) { classical::SelectorFactory<TConfig> *good_selector = NULL; test_selectors(A, cfg, cfg_scope, good_strength, &good_selector); if (good_selector != NULL) { //test_interpolators(&res, A, cfg, cfg_scope, good_strength, good_selector ); } } } // levels test_levels(&res, A); // delete color; } } } DECLARE_UNITTEST_END(ImplicitZeroInDiagonal); #define AMGX_CASE_LINE(CASE) ImplicitZeroInDiagonal <TemplateMode<CASE>::Type> ImplicitZeroInDiagonal_##CASE; AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } //namespace amgx
the_stack
// Avoid warnings in includes with CUDA compiler #pragma GCC diagnostic ignored "-Wattributes" #pragma diag_suppress code_is_unreachable #include "libvis/cuda/patch_match_stereo.cuh" #include <math_constants.h> #include "libvis/cuda/cuda_auto_tuner.h" #include "libvis/cuda/cuda_unprojection_lookup.cuh" #include "libvis/cuda/cuda_util.h" namespace vis { constexpr float kMinInvDepth = 1e-5f; // TODO: Make parameter __forceinline__ __device__ float SampleAtProjectedPosition( const float x, const float y, const float z, const PixelCornerProjector_& projector, const CUDAMatrix3x4& stereo_tr_reference, cudaTextureObject_t stereo_texture) { float3 pnxy = stereo_tr_reference * make_float3(x, y, z); if (pnxy.z <= 0.f) { return CUDART_NAN_F; } const float2 pxy = projector.Project(pnxy); if (pxy.x < 0.5f || pxy.y < 0.5f || pxy.x >= projector.width - 0.5f || pxy.y >= projector.height - 0.5f) { return CUDART_NAN_F; } else { return 255.0f * tex2D<float>(stereo_texture, pxy.x, pxy.y); } } __forceinline__ __device__ float CalculatePlaneDepth2( float d, const float2& normal_xy, float normal_z, float query_x, float query_y) { return d / (query_x * normal_xy.x + query_y * normal_xy.y + normal_z); } __forceinline__ __device__ float CalculatePlaneInvDepth2( float d, const float2& normal_xy, float normal_z, float query_x, float query_y) { return (query_x * normal_xy.x + query_y * normal_xy.y + normal_z) / d; } template <int kContextRadius> __forceinline__ __device__ float ComputeCostsSSD( int x, int y, const float2& normal_xy, const float inv_depth, const CUDAUnprojectionLookup2D_& unprojector, const CUDABuffer_<u8>& reference_image, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& projector, cudaTextureObject_t stereo_image) { if (inv_depth < kMinInvDepth) { return CUDART_NAN_F; } const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float2 center_nxy = unprojector.UnprojectPoint(x, y); const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float cost = 0; #pragma unroll for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { #pragma unroll for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); nxy.x *= plane_depth; nxy.y *= plane_depth; float sample = SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth, projector, stereo_tr_reference, stereo_image); const float diff = sample - reference_image(y + dy, x + dx); cost += diff * diff; } } return cost; } // Computes 0.5f * (1 - ZNCC), so that the result can be used // as a cost value with range [0; 1]. __forceinline__ __device__ float ComputeZNCCBasedCost( const int num_samples, const float sum_a, const float squared_sum_a, const float sum_b, const float squared_sum_b, const float product_sum) { const float normalizer = 1.0f / num_samples; const float numerator = product_sum - normalizer * (sum_a * sum_b); const float denominator_reference = squared_sum_a - normalizer * sum_a * sum_a; const float denominator_other = squared_sum_b - normalizer * sum_b * sum_b; // NOTE: Using a threshold on homogeneous patches is required here since // otherwise the optimum might be a noisy value in a homogeneous area. constexpr float kHomogeneousThreshold = 0.1f; if (denominator_reference < kHomogeneousThreshold || denominator_other < kHomogeneousThreshold) { return 1.0f; } else { return 0.5f * (1.0f - numerator * rsqrtf(denominator_reference * denominator_other)); } } constexpr int kNumSamples = 81; __constant__ float kSamplesCUDA[kNumSamples][2]; // // Sphere: // constexpr float kSamples[kNumSamples][2] = { // {-0.352334470334, -0.698301652151}, // {0.30186894608, -0.855127426665}, // {0.0717640086134, -0.268622166175}, // {-0.884002150451, 0.0148714663788}, // {-0.925008683116, -0.132708632675}, // {-0.150961621715, 0.653704249344}, // {-0.752396077701, -0.553522070786}, // {0.254866444811, 0.895417884914}, // {0.154205897235, -0.206639050698}, // {0.716936918097, -0.420781427337}, // {-0.383036351796, 0.63225271824}, // {-0.638547240152, 0.163200327325}, // {0.277826937852, -0.255204914549}, // {0.0954889314191, -0.874422050053}, // {0.360799946364, -0.144815388661}, // {-0.371705659246, 0.171123727015}, // {-0.0936312472584, -0.400466006273}, // {0.588758963045, 0.397988867459}, // {-0.511806978556, 0.148847420517}, // {0.0503930076229, 0.750274991147}, // {0.458890578878, -0.42412447022}, // {-0.16375435643, 0.51428185913}, // {-0.696030930679, -0.0220737990484}, // {-0.921585485905, 0.336431713069}, // {0.529141732426, 0.146051880555}, // {0.750955623662, -0.372504974304}, // {0.390590732547, 0.18873975421}, // {0.159790408565, -0.0875893373972}, // {-0.0518033251607, 0.328304410949}, // {-0.878661144806, 0.402984042609}, // {0.643849573219, -0.430808935812}, // {-0.228417115107, 0.337305431768}, // {-0.954874143889, -0.0766094274005}, // {-0.741319555963, -0.504770332606}, // {-0.218100593734, 0.742843948253}, // {-0.8388373976, -0.101625198101}, // {0.0988798182881, 0.766767652883}, // {0.638559675671, 0.727968939397}, // {-0.443157870972, -0.169406965577}, // {-0.282457669337, 0.768385654396}, // {-0.647564543019, -0.536086266361}, // {-0.533327832638, -0.0300745393173}, // {0.178247007465, -0.474506761403}, // {-0.261492854211, 0.132682447413}, // {0.90619585105, 0.380987314272}, // {0.0309828661416, 0.235185498818}, // {0.352400164899, -0.892014213552}, // {0.799066020116, 0.559938981412}, // {0.749026368269, 0.595746242393}, // {-0.215242186217, -0.202042335359}, // {-0.792925812579, 0.268579131371}, // {-0.582473629108, -0.675393624456}, // {-0.319892695535, -0.894848792219}, // {-0.797071263955, -0.272780155931}, // {0.228137975577, -0.702899029338}, // {-0.495484486886, -0.305220907893}, // {-0.271673120943, -0.754315538476}, // {-0.0680210816801, -0.0323306871675}, // {-0.314728323514, -0.470486216566}, // {0.657710756243, -0.677122778947}, // {0.0565147900842, -0.706794922202}, // {0.0863448517642, -0.945915017156}, // {0.0562188818766, 0.957002485438}, // {0.726650060579, 0.392393571816}, // {-0.477769605541, -0.266600416478}, // {-0.665915930931, 0.543875816804}, // {0.0651847949858, 0.558109782676}, // {-0.340670009904, -0.553916653794}, // {0.705257597493, 0.612157169571}, // {0.636665886651, 0.479746040751}, // {-0.546521019937, 0.035277448487}, // {-0.28887491329, -0.942039698517}, // {-0.481651273464, 0.3850438834}, // {0.913030152683, -0.105544644467}, // {0.910001262643, -0.270728229276}, // {-0.559075354008, -0.546308346539}, // {-0.606587673161, -0.591253273448}, // {0.248132794876, 0.800616675768}, // {0.680871054559, -0.0410531474769}, // {0.305956085682, 0.599287489699}, // {-0.830443027099, 0.32117130041} // }; // Square: constexpr float kSamples[kNumSamples][2] = { {0.912068543778f, 0.895654974119f}, {-0.886897264546f, -0.830256009682f}, {0.670997756259f, 0.471939978137f}, {0.33946080288f, -0.383727084822f}, {0.211888331357f, 0.213603467282f}, {0.162408034224f, -0.68323425949f}, {-0.138660719417f, -0.212936359589f}, {0.446024162475f, 0.989639125899f}, {0.898790946186f, 0.0883540948586f}, {-0.110291622548f, -0.463518516701f}, {-0.928151341214f, -0.945110285818f}, {-0.0702122758054f, -0.363069744293f}, {-0.239970156199f, 0.783578915657f}, {0.0515055382921f, 0.121020722053f}, {-0.52775318577f, -0.952283841718f}, {-0.349714142478f, -0.726605214027f}, {0.0204476916744f, 0.997367136385f}, {0.348959394692f, -0.636313006354f}, {0.787143073166f, 0.593519842843f}, {0.468803383788f, 0.813187299795f}, {0.525770967666f, 0.579495274924f}, {-0.292426044317f, 0.961953146144f}, {0.923801875796f, -0.677630693392f}, {0.508008143304f, 0.430301796475f}, {-0.077186604516f, 0.0607114322469f}, {-0.0199721562996f, 0.849664144189f}, {0.00168212526131f, 0.663048979584f}, {-0.292151590263f, 0.765701837163f}, {0.799401177513f, -0.0779756702367f}, {0.13541014084f, 0.840660878384f}, {0.447545907744f, -0.0267828902768f}, {-0.556377978018f, -0.350665512462f}, {0.39914327614f, -0.667860629012f}, {0.815880993252f, -0.4637249742f}, {0.822755671736f, -0.380873750101f}, {0.914723423112f, 0.412411612735f}, {0.00849763396664f, 0.0354955122971f}, {0.302828797934f, 0.175889423589f}, {-0.376311350898f, -0.584363050924f}, {0.0237833167106f, 0.868308718268f}, {0.246530173452f, -0.849249261852f}, {0.640799989424f, 0.451898574955f}, {0.815307241903f, -0.617194533392f}, {0.489565448555f, -0.882482207203f}, {0.305819854869f, -0.453800535326f}, {-0.54676694151f, 0.750982342896f}, {-0.787468034709f, 0.0447253307179f}, {0.70788601437f, -0.510336044062f}, {-0.579042122609f, 0.761163518733f}, {-0.154164703221f, 0.43392219781f}, {-0.936253859747f, -0.275286177394f}, {-0.656238015749f, 0.345530882827f}, {-0.834193645191f, 0.909124330691f}, {-0.949310570346f, 0.458847014884f}, {-0.957710260554f, -0.488619891885f}, {0.626708774805f, -0.685763422626f}, {-0.632522381498f, 0.382990852027f}, {-0.228868237295f, -0.913678008405f}, {0.980003092406f, -0.697159782487f}, {-0.927462011513f, -0.311597988926f}, {0.23047896665f, 0.484919246252f}, {-0.773770193964f, -0.325572453607f}, {-0.938378284747f, -0.102693475015f}, {0.531939873176f, 0.479893327441f}, {0.8040403167f, 0.51132430735f}, {0.724891552651f, 0.410690280102f}, {-0.0544409758034f, -0.548944859304f}, {0.321656997314f, -0.367388146543f}, {-0.795901789901f, -0.104356277162f}, {0.749526082696f, -0.744927070762f}, {0.169911396221f, -0.21409489979f}, {0.0296053935284f, -0.712341072046f}, {0.919462372974f, -0.481807153496f}, {0.212155878107f, -0.160488908689f}, {-0.96393356175f, 0.115900247678f}, {-0.718861242077f, -0.886438008354f}, {-0.932887507503f, -0.677669969638f}, {-0.808256112734f, 0.270151395077f}, {0.0165183680713f, 0.966932188095f}, {0.868260637394f, 0.989050466519f}, {-0.535052318664f, -0.110605089761f} }; template <int kContextRadius> __forceinline__ __device__ float ComputeCostsZNCC( int x, int y, const float2& normal_xy, const float inv_depth, const CUDAUnprojectionLookup2D_& unprojector, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& projector, cudaTextureObject_t stereo_image) { if (inv_depth < kMinInvDepth) { return CUDART_NAN_F; } const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float2 center_nxy = unprojector.UnprojectPoint(x, y); const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float sum_a = 0; float squared_sum_a = 0; float sum_b = 0; float squared_sum_b = 0; float product_sum = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = 1.25f * kContextRadius * kSamplesCUDA[sample][0]; // TODO: magic constant factor float dy = 1.25f * kContextRadius * kSamplesCUDA[sample][1]; // TODO: magic constant factor float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); nxy.x *= plane_depth; nxy.y *= plane_depth; float stereo_value = SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth, projector, stereo_tr_reference, stereo_image); sum_a += stereo_value; squared_sum_a += stereo_value * stereo_value; float reference_value = 255.f * tex2D<float>(reference_texture, x + dx + 0.5f, y + dy + 0.5f); sum_b += reference_value; squared_sum_b += reference_value * reference_value; product_sum += stereo_value * reference_value; } // #pragma unroll // for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { // #pragma unroll // for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { // float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); // float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); // nxy.x *= plane_depth; // nxy.y *= plane_depth; // // float stereo_value = // SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth, // projector, // stereo_tr_reference, // stereo_image); // // sum_a += stereo_value; // squared_sum_a += stereo_value * stereo_value; // // float reference_value = reference_image(y + dy, x + dx); // // sum_b += reference_value; // squared_sum_b += reference_value * reference_value; // // product_sum += stereo_value * reference_value; // } // } return ComputeZNCCBasedCost( kNumSamples, sum_a, squared_sum_a, sum_b, squared_sum_b, product_sum); } template <int kContextRadius> __forceinline__ __device__ float ComputeCostsCensus( int x, int y, const float2& normal_xy, const float inv_depth, const CUDAUnprojectionLookup2D_& unprojector, const CUDABuffer_<u8>& reference_image, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& projector, cudaTextureObject_t stereo_image) { if (inv_depth < kMinInvDepth) { return CUDART_NAN_F; } const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float2 center_nxy = unprojector.UnprojectPoint(x, y); const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float stereo_center_value = SampleAtProjectedPosition(center_nxy.x * depth, center_nxy.y * depth, depth, projector, stereo_tr_reference, stereo_image); u8 reference_center_value = reference_image(y, x); float cost = 0; constexpr int kSpreadFactor = 2; // TODO: Make parameter #pragma unroll for (int dy = -kSpreadFactor * kContextRadius; dy <= kSpreadFactor * kContextRadius; dy += kSpreadFactor) { #pragma unroll for (int dx = -kSpreadFactor * kContextRadius; dx <= kSpreadFactor * kContextRadius; dx += kSpreadFactor) { if (dx == 0 && dy == 0) { continue; } if (x + dx < 0 || y + dy < 0 || x + dx >= reference_image.width() || y + dy >= reference_image.height()) { continue; } float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); nxy.x *= plane_depth; nxy.y *= plane_depth; float stereo_value = SampleAtProjectedPosition(nxy.x, nxy.y, plane_depth, projector, stereo_tr_reference, stereo_image); if (::isnan(stereo_value)) { return CUDART_NAN_F; } int stereo_bit = stereo_value > stereo_center_value; u8 reference_value = reference_image(y + dy, x + dx); int reference_bit = reference_value > reference_center_value; cost += stereo_bit != reference_bit; } } return cost; } template <int kContextRadius> __forceinline__ __device__ float ComputeCosts( int x, int y, const float2& normal_xy, const float inv_depth, const CUDAUnprojectionLookup2D_& unprojector, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& projector, cudaTextureObject_t stereo_image, int match_metric, float second_best_min_distance_factor, CUDABuffer_<float> best_inv_depth_map) { if (second_best_min_distance_factor > 0) { // Reject estimates which are too close to the best inv depth. float best_inv_depth = best_inv_depth_map(y, x); float factor = best_inv_depth / inv_depth; if (factor < 1) { factor = 1 / factor; } if (factor < second_best_min_distance_factor) { return CUDART_NAN_F; } } // TODO: Commented out for higher compile speed (and since only ZNCC is consistent with outlier filtering etc.) // if (match_metric == kPatchMatchStereo_MatchMetric_SSD) { // return ComputeCostsSSD<kContextRadius>( // x, y, normal_xy, inv_depth, unprojector, reference_image, // stereo_tr_reference, projector, stereo_image); // } else if (match_metric == kPatchMatchStereo_MatchMetric_ZNCC) { return ComputeCostsZNCC<kContextRadius>( x, y, normal_xy, inv_depth, unprojector, reference_image, reference_texture, stereo_tr_reference, projector, stereo_image); // } else if (match_metric == kPatchMatchStereo_MatchMetric_Census) { // return ComputeCostsCensus<kContextRadius>( // x, y, normal_xy, inv_depth, unprojector, reference_image, // stereo_tr_reference, projector, stereo_image); // } // This should never be reached since all metrics should be handled above. return 0; } template <int kContextRadius> __global__ void InitPatchMatchCUDAKernel( int match_metric, float max_normal_2d_length, CUDAUnprojectionLookup2D_ unprojector, CUDABuffer_<u8> reference_image, cudaTextureObject_t reference_texture, CUDAMatrix3x4 stereo_tr_reference, PixelCornerProjector_ projector, cudaTextureObject_t stereo_image, float inv_min_depth, float inv_max_depth, CUDABuffer_<float> inv_depth_map, CUDABuffer_<char2> normals, CUDABuffer_<float> costs, CUDABuffer_<curandState> random_states, CUDABuffer_<float> lambda, float second_best_min_distance_factor, CUDABuffer_<float> best_inv_depth_map) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // Initialize random states // TODO: Would it be better to do this only once for each PatchMatchStereo object? int id = x + inv_depth_map.width() * y; curand_init(id, 0, 0, &random_states(y, x)); // Initialize random initial normals constexpr float kNormalRange = 1.0f; float2 normal_xy; normal_xy.x = kNormalRange * (curand_uniform(&random_states(y, x)) - 0.5f); normal_xy.y = kNormalRange * (curand_uniform(&random_states(y, x)) - 0.5f); float length = sqrtf(normal_xy.x * normal_xy.x + normal_xy.y * normal_xy.y); if (length > max_normal_2d_length) { normal_xy.x *= max_normal_2d_length / length; normal_xy.y *= max_normal_2d_length / length; } normals(y, x) = make_char2(normal_xy.x * 127.f, normal_xy.y * 127.f); // Initialize random initial depths const float inv_depth = inv_max_depth + (inv_min_depth - inv_max_depth) * curand_uniform(&random_states(y, x)); inv_depth_map(y, x) = inv_depth; // Initialize lambda lambda(y, x) = 1.02f; // TODO: tune // Compute initial costs costs(y, x) = ComputeCosts<kContextRadius>( x, y, normal_xy, inv_depth, unprojector, reference_image, reference_texture, stereo_tr_reference, projector, stereo_image, match_metric, second_best_min_distance_factor, best_inv_depth_map); } } void InitPatchMatchCUDA( cudaStream_t stream, int match_metric, int context_radius, float max_normal_2d_length, cudaTextureObject_t reference_unprojection_lookup, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& stereo_camera, const cudaTextureObject_t stereo_image, float inv_min_depth, float inv_max_depth, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<char2>* normals, CUDABuffer_<float>* costs, CUDABuffer_<curandState>* random_states, CUDABuffer_<float>* lambda, float second_best_min_distance_factor, CUDABuffer_<float>* best_inv_depth_map) { // TODO: Do this separately static bool initialized = false; if (!initialized) { cudaMemcpyToSymbol(kSamplesCUDA, kSamples, kNumSamples * 2 * sizeof(float)); initialized = true; } CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D( InitPatchMatchCUDAKernel<_context_radius>, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, /* kernel parameters */ match_metric, max_normal_2d_length, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, inv_min_depth, inv_max_depth, *inv_depth_map, *normals, *costs, *random_states, *lambda, second_best_min_distance_factor, best_inv_depth_map ? *best_inv_depth_map : CUDABuffer_<float>())); CHECK_CUDA_NO_ERROR(); } template <int kContextRadius, bool mutate_depth, bool mutate_normal> __global__ void PatchMatchMutationStepCUDAKernel( int match_metric, float max_normal_2d_length, CUDAUnprojectionLookup2D_ unprojector, CUDABuffer_<u8> reference_image, cudaTextureObject_t reference_texture, CUDAMatrix3x4 stereo_tr_reference, PixelCornerProjector_ projector, cudaTextureObject_t stereo_image, float step_range, CUDABuffer_<float> inv_depth_map, CUDABuffer_<char2> normals, CUDABuffer_<float> costs, CUDABuffer_<curandState> random_states, float second_best_min_distance_factor, CUDABuffer_<float> best_inv_depth_map) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { float proposed_inv_depth = inv_depth_map(y, x); if (mutate_depth) { proposed_inv_depth = max(kMinInvDepth, fabsf(proposed_inv_depth + step_range * (curand_uniform(&random_states(y, x)) - 0.5f))); } constexpr float kRandomNormalRange = 1.0f; const char2 proposed_normal_char = normals(y, x); float2 proposed_normal = make_float2( proposed_normal_char.x * (1 / 127.f), proposed_normal_char.y * (1 / 127.f)); if (mutate_normal) { proposed_normal.x += kRandomNormalRange * (curand_uniform(&random_states(y, x)) - 0.5f); proposed_normal.y += kRandomNormalRange * (curand_uniform(&random_states(y, x)) - 0.5f); float length = sqrtf(proposed_normal.x * proposed_normal.x + proposed_normal.y * proposed_normal.y); if (length > max_normal_2d_length) { proposed_normal.x *= max_normal_2d_length / length; proposed_normal.y *= max_normal_2d_length / length; } } // Test whether to accept the proposal float proposal_costs = ComputeCosts<kContextRadius>( x, y, proposed_normal, proposed_inv_depth, unprojector, reference_image, reference_texture, stereo_tr_reference, projector, stereo_image, match_metric, second_best_min_distance_factor, best_inv_depth_map); if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { costs(y, x) = proposal_costs; normals(y, x) = make_char2(proposed_normal.x * 127.f, proposed_normal.y * 127.f); inv_depth_map(y, x) = proposed_inv_depth; } } } void PatchMatchMutationStepCUDA( cudaStream_t stream, int match_metric, int context_radius, float max_normal_2d_length, cudaTextureObject_t reference_unprojection_lookup, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& stereo_camera, const cudaTextureObject_t stereo_image, float step_range, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<char2>* normals, CUDABuffer_<float>* costs, CUDABuffer_<curandState>* random_states, float second_best_min_distance_factor, CUDABuffer_<float>* best_inv_depth_map) { CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchMutationStepCUDAKernel, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, TEMPLATE_ARGUMENTS(_context_radius, true, true), /* kernel parameters */ match_metric, max_normal_2d_length, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, step_range, *inv_depth_map, *normals, *costs, *random_states, second_best_min_distance_factor, best_inv_depth_map ? *best_inv_depth_map : CUDABuffer_<float>())); CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchMutationStepCUDAKernel, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, TEMPLATE_ARGUMENTS(_context_radius, true, false), /* kernel parameters */ match_metric, max_normal_2d_length, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, step_range, *inv_depth_map, *normals, *costs, *random_states, second_best_min_distance_factor, best_inv_depth_map ? *best_inv_depth_map : CUDABuffer_<float>())); CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchMutationStepCUDAKernel, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, TEMPLATE_ARGUMENTS(_context_radius, false, true), /* kernel parameters */ match_metric, max_normal_2d_length, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, step_range, *inv_depth_map, *normals, *costs, *random_states, second_best_min_distance_factor, best_inv_depth_map ? *best_inv_depth_map : CUDABuffer_<float>())); CHECK_CUDA_NO_ERROR(); } // (Mostly) auto-generated function. typedef float Scalar; // opcount = 243 __forceinline__ __device__ void ComputeResidualAndJacobian( Scalar cx, Scalar cy, Scalar fx, Scalar fy, Scalar inv_depth, Scalar n_x, Scalar n_y, Scalar nx, Scalar ny, Scalar other_nx, Scalar other_ny, Scalar ref_intensity, Scalar str_0_0, Scalar str_0_1, Scalar str_0_2, Scalar str_0_3, Scalar str_1_0, Scalar str_1_1, Scalar str_1_2, Scalar str_1_3, Scalar str_2_0, Scalar str_2_1, Scalar str_2_2, Scalar str_2_3, cudaTextureObject_t stereo_texture, Scalar* residuals, Scalar* jacobian) { const Scalar term0 = sqrt(-n_x*n_x - n_y*n_y + 1); const Scalar term1 = n_x*other_nx + n_y*other_ny - term0; const Scalar term2 = 1.0f/term1; const Scalar term3 = str_1_2*term2; const Scalar term4 = 1.0f/inv_depth; const Scalar term5 = n_x*nx; const Scalar term6 = n_y*ny; const Scalar term7 = -term0*term4 + term4*term5 + term4*term6; const Scalar term8 = other_nx*str_1_0*term2; const Scalar term9 = other_ny*str_1_1*term2; const Scalar term10 = str_1_3 + term3*term7 + term7*term8 + term7*term9; const Scalar term11 = str_2_2*term2; const Scalar term12 = other_nx*str_2_0*term2; const Scalar term13 = other_ny*str_2_1*term2; const Scalar term14 = str_2_3 + term11*term7 + term12*term7 + term13*term7; const Scalar term15 = 1.0f/term14; const Scalar term16 = fy*term15; float py = cy + term10*term16; int iy = static_cast<int>(py); const Scalar term17 = py - iy; const Scalar term18 = str_0_2*term2; const Scalar term19 = other_nx*str_0_0*term2; const Scalar term20 = other_ny*str_0_1*term2; const Scalar term21 = str_0_3 + term18*term7 + term19*term7 + term20*term7; const Scalar term22 = fx*term15; float px = cx + term21*term22; int ix = static_cast<int>(px); const Scalar term23 = px - ix; Scalar top_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 0.5f); Scalar top_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 0.5f); Scalar bottom_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 1.5f); Scalar bottom_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 1.5f); const Scalar term24 = -term23 + 1; const Scalar term25 = bottom_left*term24 + bottom_right*term23; const Scalar term26 = -term17 + 1; const Scalar term27 = term23*top_right; const Scalar term28 = term24*top_left; const Scalar term29 = -term17*(bottom_left - bottom_right) - term26*(top_left - top_right); const Scalar term30 = term4 * term4; const Scalar term31 = term0 - term5 - term6; const Scalar term32 = term30*term31; const Scalar term33 = term15 * term15; const Scalar term34 = term30*term31*term33*(term11 + term12 + term13); const Scalar term35 = term25 - term27 - term28; const Scalar term36 = 1.0f/term0; const Scalar term37 = n_x*term36; const Scalar term38 = nx*term4 + term37*term4; const Scalar term39 = -other_nx - term37; const Scalar term40 = term2 * term2; const Scalar term40Xterm7 = term40*term7; const Scalar term41 = str_0_2*term40Xterm7; const Scalar term42 = other_nx*str_0_0*term40Xterm7; const Scalar term43 = other_ny*str_0_1*term40Xterm7; const Scalar term44 = fx*term21*term33; const Scalar term45 = str_2_2*term40Xterm7; const Scalar term46 = other_nx*str_2_0*term40Xterm7; const Scalar term47 = other_ny*str_2_1*term40Xterm7; const Scalar term48 = -term11*term38 - term12*term38 - term13*term38 - term39*term45 - term39*term46 - term39*term47; const Scalar term49 = str_1_2*term40Xterm7; const Scalar term50 = other_nx*str_1_0*term40Xterm7; const Scalar term51 = other_ny*str_1_1*term40Xterm7; const Scalar term52 = fy*term10*term33; const Scalar term53 = n_y*term36; const Scalar term54 = ny*term4 + term4*term53; const Scalar term55 = -other_ny - term53; const Scalar term56 = -term11*term54 - term12*term54 - term13*term54 - term45*term55 - term46*term55 - term47*term55; *residuals = -ref_intensity + term17*term25 + term26*(term27 + term28); jacobian[0] = term29*(-fx*term21*term34 + term22*(term18*term32 + term19*term32 + term20*term32)) + term35*(-fy*term10*term34 + term16*(term3*term32 + term32*term8 + term32*term9)); jacobian[1] = term29*(term22*(term18*term38 + term19*term38 + term20*term38 + term39*term41 + term39*term42 + term39*term43) + term44*term48) + term35*(term16*(term3*term38 + term38*term8 + term38*term9 + term39*term49 + term39*term50 + term39*term51) + term48*term52); jacobian[2] = term29*(term22*(term18*term54 + term19*term54 + term20*term54 + term41*term55 + term42*term55 + term43*term55) + term44*term56) + term35*(term16*(term3*term54 + term49*term55 + term50*term55 + term51*term55 + term54*term8 + term54*term9) + term52*term56); } // template <int kContextRadius> // __global__ void PatchMatchOptimizationStepCUDAKernel( // int match_metric, // float max_normal_2d_length, // CUDAUnprojectionLookup2D_ unprojector, // CUDABuffer_<u8> reference_image, // cudaTextureObject_t reference_texture, // CUDAMatrix3x4 stereo_tr_reference, // PixelCornerProjector projector, // cudaTextureObject_t stereo_image, // CUDABuffer_<float> inv_depth_map, // CUDABuffer_<char2> normals, // CUDABuffer_<float> costs, // CUDABuffer_<curandState> random_states, // CUDABuffer_<float> lambda) { // unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // // if (x >= kContextRadius && y >= kContextRadius && // x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // float inv_depth = inv_depth_map(y, x); // char2 normal_xy_char = normals(y, x); // float2 normal_xy = make_float2( // normal_xy_char.x * (1 / 127.f), normal_xy_char.y * (1 / 127.f)); // float2 nxy = unprojector.UnprojectPoint(x, y); // // // Gauss-Newton update equation coefficients. // float H[3 + 2 + 1] = {0, 0, 0, 0, 0, 0}; // float b[3] = {0, 0, 0}; // // #pragma unroll // for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { // #pragma unroll // for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { // float raw_residual; // float jacobian[3]; // // float2 other_nxy = unprojector.UnprojectPoint(x + dx, y + dy); // // ComputeResidualAndJacobian( // projector.cx - 0.5f, projector.cy - 0.5f, projector.fx, projector.fy, // inv_depth, normal_xy.x, normal_xy.y, // nxy.x, nxy.y, // other_nxy.x, other_nxy.y, // reference_image(y + dy, x + dx), // stereo_tr_reference.row0.x, stereo_tr_reference.row0.y, stereo_tr_reference.row0.z, stereo_tr_reference.row0.w, // stereo_tr_reference.row1.x, stereo_tr_reference.row1.y, stereo_tr_reference.row1.z, stereo_tr_reference.row1.w, // stereo_tr_reference.row2.x, stereo_tr_reference.row2.y, stereo_tr_reference.row2.z, stereo_tr_reference.row2.w, // stereo_image, // &raw_residual, jacobian); // // // Accumulate // b[0] += raw_residual * jacobian[0]; // b[1] += raw_residual * jacobian[1]; // b[2] += raw_residual * jacobian[2]; // // H[0] += jacobian[0] * jacobian[0]; // H[1] += jacobian[0] * jacobian[1]; // H[2] += jacobian[0] * jacobian[2]; // // H[3] += jacobian[1] * jacobian[1]; // H[4] += jacobian[1] * jacobian[2]; // // H[5] += jacobian[2] * jacobian[2]; // } // } // // /*// TEST: Optimize inv_depth only // b[0] = b[0] / H[0]; // inv_depth -= b[0];*/ // // // Levenberg-Marquardt // const float kDiagLambda = lambda(y, x); // H[0] *= kDiagLambda; // H[3] *= kDiagLambda; // H[5] *= kDiagLambda; // // // Solve for the update using Cholesky decomposition // // (H[0] ) (H[0] H[1] H[2]) (x[0]) (b[0]) // // (H[1] H[3] ) * ( H[3] H[4]) * (x[1]) = (b[1]) // // (H[2] H[4] H[5]) ( H[5]) (x[2]) (b[2]) // H[0] = sqrtf(H[0]); // // H[1] = 1.f / H[0] * H[1]; // H[3] = sqrtf(H[3] - H[1] * H[1]); // // H[2] = 1.f / H[0] * H[2]; // H[4] = 1.f / H[3] * (H[4] - H[1] * H[2]); // H[5] = sqrtf(H[5] - H[2] * H[2] - H[4] * H[4]); // // // Re-use b for the intermediate vector // b[0] = (b[0] / H[0]); // b[1] = (b[1] - H[1] * b[0]) / H[3]; // b[2] = (b[2] - H[2] * b[0] - H[4] * b[1]) / H[5]; // // // Re-use b for the delta vector // b[2] = (b[2] / H[5]); // b[1] = (b[1] - H[4] * b[2]) / H[3]; // b[0] = (b[0] - H[1] * b[1] - H[2] * b[2]) / H[0]; // // // Apply the update, sanitize normal if necessary // inv_depth -= b[0]; // normal_xy.x -= b[1]; // normal_xy.y -= b[2]; // // float length = sqrtf(normal_xy.x * normal_xy.x + normal_xy.y * normal_xy.y); // if (length > max_normal_2d_length) { // normal_xy.x *= max_normal_2d_length / length; // normal_xy.y *= max_normal_2d_length / length; // } // // // Test whether the update lowers the cost // float proposal_costs = ComputeCosts<kContextRadius>( // x, y, // normal_xy, // inv_depth, // unprojector, // reference_image, // reference_texture, // stereo_tr_reference, // projector, // stereo_image, // match_metric, // 0, // TODO: Update if using this function again // CUDABuffer_<float>()); // TODO: Update if using this function again // // if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { // costs(y, x) = proposal_costs; // normals(y, x) = make_char2(normal_xy.x * 127.f, normal_xy.y * 127.f); // TODO: in this and similar places: rounding? // inv_depth_map(y, x) = inv_depth; // // lambda(y, x) *= 0.5f; // } else { // lambda(y, x) *= 2.f; // } // } // } // // void PatchMatchOptimizationStepCUDA( // cudaStream_t stream, // int match_metric, // int context_radius, // float max_normal_2d_length, // cudaTextureObject_t reference_unprojection_lookup, // const CUDABuffer_<u8>& reference_image, // cudaTextureObject_t reference_texture, // const CUDAMatrix3x4& stereo_tr_reference, // const PixelCornerProjector_& stereo_camera, // const cudaTextureObject_t stereo_image, // CUDABuffer_<float>* inv_depth_map, // CUDABuffer_<char2>* normals, // CUDABuffer_<float>* costs, // CUDABuffer_<curandState>* random_states, // CUDABuffer_<float>* lambda) { // CHECK_CUDA_NO_ERROR(); // COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D( // PatchMatchOptimizationStepCUDAKernel<_context_radius>, // 16, 16, // inv_depth_map->width(), inv_depth_map->height(), // 0, stream, // /* kernel parameters */ // match_metric, // max_normal_2d_length, // CUDAUnprojectionLookup2D_(reference_unprojection_lookup), // reference_image, // reference_texture, // stereo_tr_reference, // stereo_camera, // stereo_image, // stereo_camera.width(), // stereo_camera.height(), // *inv_depth_map, // *normals, // *costs, // *random_states, // *lambda)); // cudaDeviceSynchronize(); // CHECK_CUDA_NO_ERROR(); // } template <int kContextRadius> __global__ void PatchMatchPropagationStepCUDAKernel( int match_metric, CUDAUnprojectionLookup2D_ unprojector, CUDABuffer_<u8> reference_image, cudaTextureObject_t reference_texture, CUDAMatrix3x4 stereo_tr_reference, PixelCornerProjector_ projector, cudaTextureObject_t stereo_image, CUDABuffer_<float> inv_depth_map, CUDABuffer_<char2> normals, CUDABuffer_<float> costs, CUDABuffer_<curandState> random_states, float second_best_min_distance_factor, CUDABuffer_<float> best_inv_depth_map) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // "Pulling" the values inwards. float2 nxy = unprojector.UnprojectPoint(x, y); #pragma unroll for (int dy = -1; dy <= 1; ++ dy) { #pragma unroll for (int dx = -1; dx <= 1; ++ dx) { if ((dx == 0 && dy == 0) || (dx != 0 && dy != 0)) { continue; } // Compute inv_depth for propagating the pixel at (x + dx, y + dy) to the center pixel. float2 other_nxy = unprojector.UnprojectPoint(x + dx, y + dy); float other_inv_depth = inv_depth_map(y + dy, x + dx); float other_depth = 1.f / other_inv_depth; char2 other_normal_xy_char = normals(y + dy, x + dx); const float2 other_normal_xy = make_float2( other_normal_xy_char.x * (1 / 127.f), other_normal_xy_char.y * (1 / 127.f)); float other_normal_z = -sqrtf(1.f - other_normal_xy.x * other_normal_xy.x - other_normal_xy.y * other_normal_xy.y); float plane_d = (other_nxy.x * other_depth) * other_normal_xy.x + (other_nxy.y * other_depth) * other_normal_xy.y + other_depth * other_normal_z; float inv_depth = CalculatePlaneInvDepth2(plane_d, other_normal_xy, other_normal_z, nxy.x, nxy.y); // Test whether to propagate float proposal_costs = ComputeCosts<kContextRadius>( x, y, other_normal_xy, inv_depth, unprojector, reference_image, reference_texture, stereo_tr_reference, projector, stereo_image, match_metric, second_best_min_distance_factor, best_inv_depth_map); if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { costs(y, x) = proposal_costs; // NOTE: Other threads could read these values while they are written, // but it should not be very severe if that happens. // Could use ping-pong buffers to avoid that. normals(y, x) = make_char2(other_normal_xy.x * 127.f, other_normal_xy.y * 127.f); inv_depth_map(y, x) = inv_depth; } } // loop over dx } // loop over dy } } void PatchMatchPropagationStepCUDA( cudaStream_t stream, int match_metric, int context_radius, cudaTextureObject_t reference_unprojection_lookup, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& stereo_camera, const cudaTextureObject_t stereo_image, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<char2>* normals, CUDABuffer_<float>* costs, CUDABuffer_<curandState>* random_states, float second_best_min_distance_factor, CUDABuffer_<float>* best_inv_depth_map) { CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D( PatchMatchPropagationStepCUDAKernel<_context_radius>, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, /* kernel parameters */ match_metric, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, *inv_depth_map, *normals, *costs, *random_states, second_best_min_distance_factor, best_inv_depth_map ? *best_inv_depth_map : CUDABuffer_<float>())); CHECK_CUDA_NO_ERROR(); } template <int kContextRadius> __global__ void PatchMatchDiscreteRefinementStepCUDAKernel( int match_metric, CUDAUnprojectionLookup2D_ unprojector, CUDABuffer_<u8> reference_image, cudaTextureObject_t reference_texture, CUDAMatrix3x4 stereo_tr_reference, PixelCornerProjector_ projector, cudaTextureObject_t stereo_image, int num_steps, float range_factor, CUDABuffer_<float> inv_depth_map, CUDABuffer_<char2> normals, CUDABuffer_<float> costs) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { float original_inv_depth = inv_depth_map(y, x); const char2 normal_char = normals(y, x); float2 normal = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); for (int step = 0; step < num_steps; ++ step) { float proposed_inv_depth = (1 + range_factor * 2 * ((step / (num_steps - 1.f)) - 0.5f)) * original_inv_depth; // Test whether to accept the proposal float proposal_costs = ComputeCosts<kContextRadius>( x, y, normal, proposed_inv_depth, unprojector, reference_image, reference_texture, stereo_tr_reference, projector, stereo_image, match_metric, 0, // TODO: Update if using this function within the second best cost step inv_depth_map); // TODO: Update if using this function within the second best cost step if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { costs(y, x) = proposal_costs; inv_depth_map(y, x) = proposed_inv_depth; } } } } void PatchMatchDiscreteRefinementStepCUDA( cudaStream_t stream, int match_metric, int context_radius, cudaTextureObject_t reference_unprojection_lookup, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& stereo_camera, const cudaTextureObject_t stereo_image, int num_steps, float range_factor, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<char2>* normals, CUDABuffer_<float>* costs) { CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchDiscreteRefinementStepCUDAKernel, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, TEMPLATE_ARGUMENTS(_context_radius), /* kernel parameters */ match_metric, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, stereo_camera, stereo_image, num_steps, range_factor, *inv_depth_map, *normals, *costs)); CHECK_CUDA_NO_ERROR(); } template <int kContextRadius> __global__ void PatchMatchLeftRightConsistencyCheckCUDAKernel( float lr_consistency_factor_threshold, CUDAUnprojectionLookup2D_ unprojector, CUDAMatrix3x4 stereo_tr_reference, PixelCornerProjector_ projector, CUDABuffer_<float> lr_consistency_inv_depth, CUDABuffer_<float> inv_depth_map, CUDABuffer_<float> inv_depth_map_out) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { float inv_depth = inv_depth_map(y, x); float depth = 1 / inv_depth; float2 center_nxy = unprojector.UnprojectPoint(x, y); float3 reference_point = make_float3(depth * center_nxy.x, depth * center_nxy.y, depth); float3 pnxy = stereo_tr_reference * reference_point; if (pnxy.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmin_pxy = projector.Project(pnxy); if (rmin_pxy.x < kContextRadius || rmin_pxy.y < kContextRadius || rmin_pxy.x >= projector.width - 1 - kContextRadius || rmin_pxy.y >= projector.height - 1 - kContextRadius) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float lr_check_inv_depth = lr_consistency_inv_depth(rmin_pxy.y, rmin_pxy.x); if (lr_check_inv_depth == kInvalidInvDepth) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float factor = pnxy.z * lr_check_inv_depth; if (factor < 1) { factor = 1 / factor; } if (factor > lr_consistency_factor_threshold) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { inv_depth_map_out(y, x) = inv_depth; } } else if (x < inv_depth_map.width() && y < inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; } } void PatchMatchLeftRightConsistencyCheckCUDA( cudaStream_t stream, int context_radius, float lr_consistency_factor_threshold, cudaTextureObject_t reference_unprojection_lookup, const CUDAMatrix3x4& stereo_tr_reference, const PixelCornerProjector_& stereo_camera, const CUDABuffer_<float>& lr_consistency_inv_depth, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<float>* inv_depth_map_out) { CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D( PatchMatchLeftRightConsistencyCheckCUDAKernel<_context_radius>, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, /* kernel parameters */ lr_consistency_factor_threshold, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), stereo_tr_reference, stereo_camera, lr_consistency_inv_depth, *inv_depth_map, *inv_depth_map_out)); CHECK_CUDA_NO_ERROR(); } // TODO: move to better place __forceinline__ __device__ void CrossProduct(const float3& a, const float3& b, float3* result) { *result = make_float3(a.y * b.z - b.y * a.z, b.x * a.z - a.x * b.z, a.x * b.y - b.x * a.y); } // TODO: move to better place __forceinline__ __device__ float Dot(const float3& a, const float3& b) { return a.x * b.x + a.y * b.y + a.z * b.z; } // TODO: move to better place __forceinline__ __device__ float Norm(const float3& vec) { return sqrtf(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z); } // TODO: move to better place __forceinline__ __device__ float3 operator-(const float3& a, const float3& b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } // TODO: move to better place __forceinline__ __device__ float SquaredLength(const float3& vec) { return vec.x * vec.x + vec.y * vec.y + vec.z * vec.z; } template <int kContextRadius> __global__ void PatchMatchFilterOutliersCUDAKernel( float min_inv_depth, float required_range_min_depth, float required_range_max_depth, CUDAUnprojectionLookup2D_ unprojector, CUDABuffer_<u8> reference_image, cudaTextureObject_t reference_texture, CUDAMatrix3x4 stereo_tr_reference, CUDAMatrix3x4 reference_tr_stereo, PixelCornerProjector_ projector, cudaTextureObject_t stereo_image, CUDABuffer_<float> inv_depth_map, CUDABuffer_<float> inv_depth_map_out, CUDABuffer_<char2> normals, CUDABuffer_<float> costs, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= kContextRadius && y >= kContextRadius && x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { if (!(costs(y, x) <= cost_threshold) || // includes NaNs !(inv_depth_map(y, x) >= min_inv_depth)) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. if (second_best_min_cost_factor > 1) { if (!(second_best_costs(y, x) >= second_best_min_cost_factor * costs(y, x))) { // includes NaNs inv_depth_map_out(y, x) = kInvalidInvDepth; return; } } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = unprojector.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); float3 rmin_pnxy = stereo_tr_reference * range_min_point; if (rmin_pnxy.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmin_pxy = projector.Project(rmin_pnxy); if (rmin_pxy.x < kContextRadius || rmin_pxy.y < kContextRadius || rmin_pxy.x >= projector.width - 1 - kContextRadius || rmin_pxy.y >= projector.height - 1 - kContextRadius) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float3 rmax_pnxy = stereo_tr_reference * range_max_point; if (rmax_pnxy.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmax_pxy = projector.Project(rmax_pnxy); if (rmax_pxy.x < kContextRadius || rmax_pxy.y < kContextRadius || rmax_pxy.x >= projector.width - 1 - kContextRadius || rmax_pxy.y >= projector.height - 1 - kContextRadius) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = inv_depth_map(y, x); const char2 normal_char = normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = 1.25f * kContextRadius * kSamplesCUDA[sample][0]; // TODO: magic constant factor float dy = 1.25f * kContextRadius * kSamplesCUDA[sample][1]; // TODO: magic constant factor float2 nxy = unprojector.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = stereo_tr_reference * original_reference_point; constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo * shifted_stereo_point; const float2 shifted_projection = projector.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude < epipolar_gradient_threshold) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / inv_depth_map(y, x); float right_depth = 1.f / inv_depth_map(y, x + 1); float left_depth = 1.f / inv_depth_map(y, x - 1); float bottom_depth = 1.f / inv_depth_map(y + 1, x); float top_depth = 1.f / inv_depth_map(y - 1, x); float2 left_nxy = unprojector.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = unprojector.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = unprojector.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = unprojector.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { inv_depth_map_out(y, x) = inv_depth_map(y, x); } } } else if (x < inv_depth_map.width() && y < inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; } } void PatchMatchFilterOutliersCUDA( cudaStream_t stream, int context_radius, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, cudaTextureObject_t reference_unprojection_lookup, const CUDABuffer_<u8>& reference_image, cudaTextureObject_t reference_texture, const CUDAMatrix3x4& stereo_tr_reference, const CUDAMatrix3x4& reference_tr_stereo, const PixelCornerProjector_& stereo_camera, const cudaTextureObject_t stereo_image, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<float>* inv_depth_map_out, CUDABuffer_<char2>* normals, CUDABuffer_<float>* costs, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor) { CHECK_CUDA_NO_ERROR(); COMPILE_INT_4_OPTIONS(context_radius, 1, 2, 4, 5, CUDA_AUTO_TUNE_2D( PatchMatchFilterOutliersCUDAKernel<_context_radius>, 16, 16, inv_depth_map->width(), inv_depth_map->height(), 0, stream, /* kernel parameters */ min_inv_depth, required_range_min_depth, required_range_max_depth, CUDAUnprojectionLookup2D_(reference_unprojection_lookup), reference_image, reference_texture, stereo_tr_reference, reference_tr_stereo, stereo_camera, stereo_image, *inv_depth_map, *inv_depth_map_out, *normals, *costs, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor)); CHECK_CUDA_NO_ERROR(); } __global__ void MedianFilterDepthMap3x3CUDAKernel( int context_radius, CUDABuffer_<float> inv_depth_map, CUDABuffer_<float> inv_depth_map_out, CUDABuffer_<float> costs, CUDABuffer_<float> costs_out, CUDABuffer_<float> second_best_costs, CUDABuffer_<float> second_best_costs_out) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; // TODO: De-duplicate with above if (x >= context_radius && y >= context_radius && x < inv_depth_map.width() - context_radius && y < inv_depth_map.height() - context_radius) { // Collect valid depth values of 3x3 neighborhood int count = 1; float inv_depths[9]; float cost[9]; float second_best_cost[9]; inv_depths[0] = inv_depth_map(y, x); if (inv_depths[0] == kInvalidInvDepth) { inv_depth_map_out(y, x) = kInvalidInvDepth; costs_out(y, x) = CUDART_NAN_F; second_best_costs_out(y, x) = CUDART_NAN_F; return; } cost[0] = costs(y, x); second_best_cost[0] = second_best_costs(y, x); #pragma unroll for (int dy = -1; dy <= 1; ++ dy) { if (y + dy < context_radius || y + dy >= inv_depth_map.height() - context_radius) { continue; } #pragma unroll for (int dx = -1; dx <= 1; ++ dx) { if (dy == 0 && dx == 0) { continue; } if (x + dx < context_radius || x + dx >= inv_depth_map.width() - context_radius) { continue; } float inv_depth = inv_depth_map(y + dy, x + dx); if (inv_depth != kInvalidInvDepth) { inv_depths[count] = inv_depth; cost[count] = costs(y + dy, x + dx); second_best_cost[count] = second_best_costs(y + dy, x + dx); ++ count; } } } // Sort depth values up to the middle of the maximum count for (int i = 0; i <= 4; ++ i) { for (int k = i + 1; k < 9; ++ k) { if (k < count && inv_depths[i] > inv_depths[k]) { // Swap. float temp = inv_depths[i]; inv_depths[i] = inv_depths[k]; inv_depths[k] = temp; temp = cost[i]; cost[i] = cost[k]; cost[k] = temp; temp = second_best_cost[i]; second_best_cost[i] = second_best_cost[k]; second_best_cost[k] = temp; } } } // Assign the median if (count % 2 == 1) { inv_depth_map_out(y, x) = inv_depths[count / 2]; costs_out(y, x) = cost[count / 2]; second_best_costs_out(y, x) = second_best_cost[count / 2]; } else { // For disambiguation in the even-count case, use the value which is // closer to the average of the two middle values. float average = 0.5f * (inv_depths[count / 2 - 1] + inv_depths[count / 2]); if (fabs(average - inv_depths[count / 2 - 1]) < fabs(average - inv_depths[count / 2])) { inv_depth_map_out(y, x) = inv_depths[count / 2 - 1]; costs_out(y, x) = cost[count / 2 - 1]; second_best_costs_out(y, x) = second_best_cost[count / 2 - 1]; } else { inv_depth_map_out(y, x) = inv_depths[count / 2]; costs_out(y, x) = cost[count / 2]; second_best_costs_out(y, x) = second_best_cost[count / 2]; } } } else if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; costs_out(y, x) = CUDART_NAN_F; second_best_costs_out(y, x) = CUDART_NAN_F; } } void MedianFilterDepthMap3x3CUDA( cudaStream_t stream, int context_radius, CUDABuffer_<float>* inv_depth_map, CUDABuffer_<float>* inv_depth_map_out, CUDABuffer_<float>* costs, CUDABuffer_<float>* costs_out, CUDABuffer_<float>* second_best_costs, CUDABuffer_<float>* second_best_costs_out) { CHECK_CUDA_NO_ERROR(); CUDA_AUTO_TUNE_2D( MedianFilterDepthMap3x3CUDAKernel, 32, 32, inv_depth_map->width(), inv_depth_map->height(), 0, stream, /* kernel parameters */ context_radius, *inv_depth_map, *inv_depth_map_out, *costs, *costs_out, *second_best_costs, *second_best_costs_out); CHECK_CUDA_NO_ERROR(); } __global__ void BilateralFilterCUDAKernel( float denom_xy, float denom_value, int radius, int radius_squared, CUDABuffer_<float> inv_depth_map, CUDABuffer_<float> inv_depth_map_out) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; // TODO: De-duplicate with above if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) { const float center_value = inv_depth_map(y, x); if (center_value == kInvalidInvDepth) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Bilateral filtering. float sum = 0; float weight = 0; const int min_y = max(static_cast<int>(0), static_cast<int>(y - radius)); const int max_y = min(static_cast<int>(inv_depth_map_out.height() - 1), static_cast<int>(y + radius)); for (int sample_y = min_y; sample_y <= max_y; ++ sample_y) { const int dy = sample_y - y; const int min_x = max(static_cast<int>(0), static_cast<int>(x - radius)); const int max_x = min(static_cast<int>(inv_depth_map_out.width() - 1), static_cast<int>(x + radius)); for (int sample_x = min_x; sample_x <= max_x; ++ sample_x) { const int dx = sample_x - x; const int grid_distance_squared = dx * dx + dy * dy; if (grid_distance_squared > radius_squared) { continue; } const float sample = inv_depth_map(sample_y, sample_x); if (sample == kInvalidInvDepth) { continue; } float value_distance_squared = center_value - sample; value_distance_squared *= value_distance_squared; float w = exp(-grid_distance_squared / denom_xy + -value_distance_squared / denom_value); sum += w * sample; weight += w; } } inv_depth_map_out(y, x) = (weight == 0) ? kInvalidInvDepth : (sum / weight); } } void BilateralFilterCUDA( cudaStream_t stream, float sigma_xy, float sigma_value, float radius_factor, const CUDABuffer_<float>& inv_depth_map, CUDABuffer_<float>* inv_depth_map_out) { CHECK_CUDA_NO_ERROR(); int radius = radius_factor * sigma_xy + 0.5f; CUDA_AUTO_TUNE_2D( BilateralFilterCUDAKernel, 32, 32, inv_depth_map_out->width(), inv_depth_map_out->height(), 0, stream, /* kernel parameters */ 2.0f * sigma_xy * sigma_xy, 2.0f * sigma_value * sigma_value, radius, radius * radius, inv_depth_map, *inv_depth_map_out); CHECK_CUDA_NO_ERROR(); } __global__ void FillHolesCUDAKernel( CUDABuffer_<float> inv_depth_map, CUDABuffer_<float> inv_depth_map_out) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; // TODO: De-duplicate with above if (x < inv_depth_map_out.width() && y < inv_depth_map_out.height()) { const float center_inv_depth = inv_depth_map(y, x); if (center_inv_depth != kInvalidInvDepth || x < 1 || y < 1 || x >= inv_depth_map.width() - 1 || y >= inv_depth_map.height() - 1) { inv_depth_map_out(y, x) = center_inv_depth; return; } // Get the average depth of the neighbor pixels. float sum = 0; int count = 0; #pragma unroll for (int dy = -1; dy <= 1; ++ dy) { #pragma unroll for (int dx = -1; dx <= 1; ++ dx) { if (dx == 0 && dy == 0) { continue; } float inv_depth = inv_depth_map(y + dy, x + dx); if (inv_depth != kInvalidInvDepth) { sum += inv_depth; ++ count; } } } float avg_inv_depth = sum / count; // Fill in this pixel if there are at least a minimum number of valid // neighbor pixels nearby which have similar depth. constexpr float kSimilarDepthFactorThreshold = 1.01f; // TODO: Make parameter constexpr int kMinSimilarPixelsForFillIn = 6; // TODO: Make parameter sum = 0; count = 0; #pragma unroll for (int dy = -1; dy <= 1; ++ dy) { #pragma unroll for (int dx = -1; dx <= 1; ++ dx) { if (dx == 0 && dy == 0) { continue; } float inv_depth = inv_depth_map(y + dy, x + dx); if (inv_depth != kInvalidInvDepth) { float factor = inv_depth / avg_inv_depth; if (factor < 1) { factor = 1 / factor; } if (factor <= kSimilarDepthFactorThreshold) { sum += inv_depth; ++ count; } } } } inv_depth_map_out(y, x) = (count >= kMinSimilarPixelsForFillIn) ? (sum / count) : kInvalidInvDepth; } } void FillHolesCUDA( cudaStream_t stream, const CUDABuffer_<float>& inv_depth_map, CUDABuffer_<float>* inv_depth_map_out) { CHECK_CUDA_NO_ERROR(); CUDA_AUTO_TUNE_2D( FillHolesCUDAKernel, 32, 32, inv_depth_map_out->width(), inv_depth_map_out->height(), 0, stream, /* kernel parameters */ inv_depth_map, *inv_depth_map_out); CHECK_CUDA_NO_ERROR(); } }
the_stack
Implementing Breadth first search on CUDA using algorithm given in DAC'10 paper "An Effective GPU Implementation of Breadth-First Search" Copyright (c) 2010 University of Illinois at Urbana-Champaign. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Author: Lijiuan Luo (lluo3@uiuc.edu) */ #ifndef _KERNEL_H_ #define _KERNEL_H_ /* Define colors for BFS 1) the definition of White, gray and black comes from the text book "Introduction to Algorithms" 2) For path search problems, people may choose to use different colors to record the found paths. Therefore we reserve numbers (0-16677216) for this purpose. Only nodes with colors bigger than UP_LIMIT are free to visit 3) We define two gray shades to differentiate between the new frontier nodes and the old frontier nodes that have not been marked BLACK */ #define UP_LIMIT 16677216//2^24 #define WHITE 16677217 #define GRAY 16677218 #define GRAY0 16677219 #define GRAY1 16677220 #define BLACK 16677221 //Distribute computation //tries to distribute the computation among all the SMs. However, it does not seem to help improve the //performance. //#define DIS_COMP /*** The maximum size of each w-queue (row-major order) FIXME This should be chosen more carefully to avoid bank conflict. A better implementation will be to arrange w-queues in column-major order, but the program will be less readable. ****/ #define LOCAL_MEM 400 texture<Node> g_graph_node_ref; texture<Edge> g_graph_edge_ref; volatile __device__ int count = 0; volatile __device__ int no_of_nodes_vol = 0; volatile __device__ int stay_vol = 0; //GPU synchronization. //implementing the algorithm proposed in //S. Xiao and W. Feng, "Inter-block GPU communication via fast barrier //synchronization," Technical Report TR-09-19, Dept. of Computer Science, VT //NOTE the algorithm originally given in the report is inaccurate __device__ void start_global_barrier(int fold){ //This synchronization is missing in the report __syncthreads(); if(threadIdx.x == 0){ atomicAdd((int*)&count, 1); while( count < NUM_SM*fold){ ; } } __syncthreads(); } //------------------------------------------------- //This is the version for one-block situation. The propagation idea is basically the same as //BFS_kernel. //The major differences are: // 1) This kernel can propagate though multiple BFS levels (while loop) using __synchThreads() between levels // 2) the intermediate queues are stored in shared memory (next_wf) //\param q1: the current frontier queue when the kernel is launched //\param q2: the new frontier queue when the kernel returns //-------------------------------------------------- __global__ void BFS_in_GPU_kernel( int * q1, int * q2, Node* g_graph_nodes, Edge* g_graph_edges, int* g_color, int * g_cost, int no_of_nodes, int * tail, int gray_shade, int k ) { __shared__ int local_q_tail[NUM_BIN]; __shared__ int local_q[NUM_BIN][LOCAL_MEM]; __shared__ int prefix_q[NUM_BIN]; __shared__ int thread_n_q[NUM_BIN]; //next/new wave front __shared__ int next_wf[MAX_THREADS_PER_BLOCK]; __shared__ int tot_sum; if(threadIdx.x == 0) tot_sum = 0;//total number of new frontier nodes while(1){//propage through multiple BFS levels until the wavfront overgrows one-block limit if(threadIdx.x < NUM_BIN){ local_q_tail[threadIdx.x] = 0; thread_n_q[threadIdx.x] = blockDim.x>>EXP; if((blockDim.x&MOD_OP) > threadIdx.x){ thread_n_q[threadIdx.x]++; } } __syncthreads(); int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes) { int pid; if(tot_sum == 0)//this is the first BFS level of current kernel call pid = q1[tid]; else pid = next_wf[tid];//read the current frontier info from last level's propagation g_color[pid] = BLACK; int cur_cost = g_cost[pid]; int q_i = threadIdx.x&MOD_OP; Node cur_node = tex1Dfetch(g_graph_node_ref,pid); for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++) { Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i); int id = cur_edge.x; int cost = cur_edge.y; cost += cur_cost; int orig_cost = atomicMin(&g_cost[id],cost); if(orig_cost > cost){ int old_color = atomicExch(&g_color[id],gray_shade); if(old_color != gray_shade) { //push to the queue int index = atomicAdd(&local_q_tail[q_i],1); local_q[q_i][index] = id; } } } } __syncthreads(); if(threadIdx.x == 0){ prefix_q[0] = 0; for(int i = 1; i < NUM_BIN; i++){ prefix_q[i] = prefix_q[i-1]+local_q_tail[i-1]; } tot_sum = prefix_q[NUM_BIN-1] + local_q_tail[NUM_BIN-1]; *tail = tot_sum; } __syncthreads(); int q_i = threadIdx.x&MOD_OP; int local_shift = threadIdx.x>>EXP; if(tot_sum == 0)//the new frontier becomes empty; BFS is over return; if(tot_sum <= MAX_THREADS_PER_BLOCK){//the new frontier is still within one-block limit; //stay in current kernel while (local_shift < local_q_tail[q_i]){ next_wf[prefix_q[q_i]+local_shift] = local_q[q_i][local_shift]; local_shift += thread_n_q[q_i]; } __syncthreads(); no_of_nodes = tot_sum; if(threadIdx.x == 0){ if(gray_shade == GRAY0) gray_shade = GRAY1; else gray_shade = GRAY0; } } else{//the new frontier outgrows one-block limit; terminate current kernel while(local_shift < local_q_tail[q_i]){ q2[prefix_q[q_i]+local_shift] = local_q[q_i][local_shift]; local_shift += thread_n_q[q_i]; } return; } }//while } //---------------------------------------------------------------- //This BFS kernel propagates through multiple levels using global synchronization //The basic propagation idea is the same as "BFS_kernel" //The major differences are: // 1) propagate through multiple levels by using GPU global sync ("start_global_barrier") // 2) use q1 and q2 alternately for the intermediate queues //\param q1: the current frontier when the kernel is called //\param q2: possibly the new frontier when the kernel returns depending on how many levels of propagation // has been done in current kernel; the new frontier could also be stored in q1 //\param switch_k: whether or not to adjust the "k" value on the host side // Normally on the host side, when "k" is even, q1 is the current frontier; when "k" is // odd, q2 is the current frontier; since this kernel can propagate through multiple levels, // the k value may need to be adjusted when this kernel returns. //\param max_nodes_per_block: the maximum frontier node assigned to a block. It is only useful when "DIS_COMP" // is enabled //\param global_kt: the total number of global synchronizations, // or the number of times to call "start_global_barrier" //-------------------------------------------------------------- __global__ void BFS_kernel_multi_blk_inGPU( int * q1, int * q2, Node* g_graph_nodes, Edge* g_graph_edges, int* g_color, int * g_cost, int *no_of_nodes, int * tail, int gray_shade, int k, int * switch_k, int * max_nodes_per_block, int * global_kt ) { __shared__ int local_q_tail[NUM_BIN]; __shared__ int local_q[NUM_BIN][LOCAL_MEM]; __shared__ int prefix_q[NUM_BIN]; __shared__ int thread_n_q[NUM_BIN]; __shared__ int shift; __shared__ int no_of_nodes_sm; __shared__ int odd_time;// the odd level of propagation within current kernel if(threadIdx.x == 0){ odd_time = 1;//true; if(blockIdx.x == 0) no_of_nodes_vol = *no_of_nodes; } int kt = *global_kt;// the total count of GPU global synchronization while (1){//propagate through multiple levels if(threadIdx.x < NUM_BIN){ local_q_tail[threadIdx.x] = 0; thread_n_q[threadIdx.x] = blockDim.x>>EXP; if((blockDim.x&MOD_OP) > threadIdx.x){ thread_n_q[threadIdx.x]++; } } if(threadIdx.x == 0) no_of_nodes_sm = no_of_nodes_vol; __syncthreads(); #ifdef DIS_COMP int tid = blockIdx.x*(*max_nodes_per_block) + threadIdx.x; #else int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; #endif if( tid<no_of_nodes_sm) { int pid; if(odd_time == 1) pid = atomicOr((int*)&q1[tid], 0); else pid = atomicOr((int*)&q2[tid], 0); g_color[pid] = BLACK; int cur_cost = atomicOr((int*)&g_cost[pid], 0); int q_i = threadIdx.x&MOD_OP; Node cur_node = tex1Dfetch(g_graph_node_ref,pid); for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++) { Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i); int id = cur_edge.x; int cost = cur_edge.y; cost += cur_cost; int orig_cost = atomicMin(&g_cost[id],cost); if(orig_cost > cost){ if(g_color[id] > UP_LIMIT){ int old_color = atomicExch(&g_color[id],gray_shade); if(old_color != gray_shade) { //push to the queue int index = atomicAdd(&local_q_tail[q_i],1); local_q[q_i][index] = id; } } } } } __syncthreads(); if(threadIdx.x == 0){ prefix_q[0] = 0; for(int i = 1; i < NUM_BIN; i++){ prefix_q[i] = prefix_q[i-1] + local_q_tail[i-1]; } int tot_sum = prefix_q[NUM_BIN-1] + local_q_tail[NUM_BIN-1]; shift = atomicAdd(tail,tot_sum); } __syncthreads(); int q_i = threadIdx.x&MOD_OP; int local_shift = threadIdx.x>>EXP; while (local_shift < local_q_tail[q_i]){ if(odd_time) q2[shift+prefix_q[q_i]+local_shift] = local_q[q_i][local_shift]; else q1[shift+prefix_q[q_i]+local_shift] = local_q[q_i][local_shift]; local_shift += thread_n_q[q_i]; } if(threadIdx.x == 0){ odd_time = (odd_time+1)%2; if(gray_shade == GRAY0) gray_shade = GRAY1; else gray_shade = GRAY0; } //synchronize among all the blks start_global_barrier(kt+1); if(blockIdx.x == 0 && threadIdx.x == 0){ stay_vol = 0; if(*tail< NUM_SM*MAX_THREADS_PER_BLOCK && *tail > MAX_THREADS_PER_BLOCK){ stay_vol = 1; no_of_nodes_vol = *tail; //this is only useful when DIS_COMP is enabled *max_nodes_per_block = ceil(float(*no_of_nodes)/NUM_SM); *tail = 0; } } start_global_barrier(kt+2); kt+= 2; if(stay_vol == 0) { if(blockIdx.x == 0 && threadIdx.x == 0) { *global_kt = kt; *switch_k = (odd_time+1)%2; *no_of_nodes = no_of_nodes_vol; } return; } } } /***************************************************************************** This is the most general version of BFS kernel, i.e. no assumption about #block in the grid \param q1: the array to hold the current frontier \param q2: the array to hold the new frontier \param g_graph_nodes: the nodes in the input graph \param g_graph_edges: the edges i nthe input graph \param g_color: the colors of nodes \param g_cost: the costs of nodes \param no_of_nodes: the number of nodes in the current frontier \param tail: pointer to the location of the tail of the new frontier. *tail is the size of the new frontier \param gray_shade: the shade of the gray in current BFS propagation. See GRAY0, GRAY1 macro definitions for more details \param k: the level of current propagation in the BFS tree. k= 0 for the first propagation. ***********************************************************************/ __global__ void BFS_kernel( int * q1, int * q2, Node* g_graph_nodes, Edge* g_graph_edges, int* g_color, int * g_cost, int no_of_nodes, int * tail, int gray_shade, int k ) { __shared__ int local_q_tail[NUM_BIN];//the tails of each local warp-level queue __shared__ int local_q[NUM_BIN][LOCAL_MEM];//the local warp-level queues __shared__ int prefix_q[NUM_BIN];//the number of elementss in the w-queues ahead of //current w-queue, a.k.a prefix sum __shared__ int thread_n_q[NUM_BIN];//#thread which writes into the current w-queue __shared__ int shift; if(threadIdx.x < NUM_BIN){ local_q_tail[threadIdx.x] = 0;//initialize the tail of w-queue thread_n_q[threadIdx.x] = blockDim.x>>EXP;//#thread/NUM_BIN if((blockDim.x&MOD_OP) > threadIdx.x){//#thread%NUM_BIN > threadIdx.x thread_n_q[threadIdx.x]++; } } __syncthreads(); //first, propagate and add the new frontier elements into w-queues int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes) { int pid = q1[tid]; //the current frontier node, or the parent node of the new frontier nodes g_color[pid] = BLACK; int cur_cost = g_cost[pid]; int q_i = threadIdx.x&MOD_OP; //the id of the queue which new frontier nodes will be pushed //into Node cur_node = tex1Dfetch(g_graph_node_ref,pid); for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++)//visit each neighbor of the //current frontier node. { Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i); int id = cur_edge.x; int cost = cur_edge.y; cost += cur_cost; int orig_cost = atomicMin(&g_cost[id],cost); if(orig_cost > cost){//the node should be visited if(g_color[id] > UP_LIMIT){ int old_color = atomicExch(&g_color[id],gray_shade); //this guarantees that only one thread will push this node //into a queue if(old_color != gray_shade) { //atomic operation guarantees the correctness //even if multiple warps are executing simultaneously int index = atomicAdd(&local_q_tail[q_i],1); local_q[q_i][index] = id; } } } } } __syncthreads(); if(threadIdx.x == 0){ //now calculate the prefix sum prefix_q[0] = 0; for(int i = 1; i < NUM_BIN; i++){ //the prefix sum of one queue is equal to the prefix sum of its predecessor queue //plus the number of elements in the predecessor queue prefix_q[i] = prefix_q[i-1]+local_q_tail[i-1]; } //the total number of elements in the block-level queue is the prefix sum of the last w-queue //plus the number of elements in the last w-queue int tot_sum = prefix_q[NUM_BIN-1] + local_q_tail[NUM_BIN-1]; //the offset or "shift" of the block-level queue within the grid-level queue //is determined by atomic operation shift = atomicAdd(tail,tot_sum); } __syncthreads(); //now copy the elements from w-queues into grid-level queues. //Note that we have bypassed the copy to/from block-level queues for efficiency reason int q_i = threadIdx.x&MOD_OP;//w-queue index int local_shift = threadIdx.x>>EXP;//shift within a w-queue //loop unrolling was originally used for better performance, but removed for better readability while(local_shift < local_q_tail[q_i]){ q2[shift+prefix_q[q_i]+local_shift] = local_q[q_i][local_shift]; local_shift+= thread_n_q[q_i];//multiple threads are copying elements at the same time, //so we shift by multiple elements for next iteration } //FIXME, the above implementation has bad coalescing. Better implementation should make //consecutive threads write into consecutive locations } #endif
the_stack
#include <iostream> using namespace std; namespace cufhe { using BootstrappingKeyNTT = TGSWSampleArray_T<FFP>; BootstrappingKeyNTT* bk_ntt = nullptr; MemoryDeleter bk_ntt_deleter = nullptr; KeySwitchingKey* ksk_dev = nullptr; MemoryDeleter ksk_dev_deleter = nullptr; CuNTTHandler<>* ntt_handler = nullptr; __global__ void __BootstrappingKeyToNTT__(BootstrappingKeyNTT bk_ntt, BootstrappingKey bk, CuNTTHandler<> ntt) { __shared__ FFP sh_temp[1024]; TGSWSample tgsw; bk.ExtractTGSWSample(&tgsw, blockIdx.z); TLWESample tlwe; tgsw.ExtractTLWESample(&tlwe, blockIdx.y); Torus* poly_in = tlwe.ExtractPoly(blockIdx.x); TGSWSample_T<FFP> tgsw_ntt; bk_ntt.ExtractTGSWSample(&tgsw_ntt, blockIdx.z); TLWESample_T<FFP> tlwe_ntt; tgsw_ntt.ExtractTLWESample(&tlwe_ntt, blockIdx.y); FFP* poly_out = tlwe_ntt.ExtractPoly(blockIdx.x); ntt.NTT<Torus>(poly_out, poly_in, sh_temp, 0); } void BootstrappingKeyToNTT(const BootstrappingKey* bk) { BootstrappingKey* d_bk; d_bk = new BootstrappingKey(bk->n(), bk->k(), bk->l(), bk->w(), bk->t()); std::pair<void*, MemoryDeleter> pair; pair = AllocatorGPU::New(d_bk->SizeMalloc()); d_bk->set_data((BootstrappingKey::PointerType)pair.first); MemoryDeleter d_bk_deleter = pair.second; CuSafeCall(cudaMemcpy(d_bk->data(), bk->data(), d_bk->SizeMalloc(), cudaMemcpyHostToDevice)); Assert(bk_ntt == nullptr); bk_ntt = new BootstrappingKeyNTT(bk->n(), bk->k(), bk->l(), bk->w(), bk->t()); pair = AllocatorGPU::New(bk_ntt->SizeMalloc()); bk_ntt->set_data((BootstrappingKeyNTT::PointerType)pair.first); bk_ntt_deleter = pair.second; Assert(ntt_handler == nullptr); ntt_handler = new CuNTTHandler<>(); ntt_handler->Create(); ntt_handler->CreateConstant(); cudaDeviceSynchronize(); CuCheckError(); dim3 grid(bk->k() + 1, (bk->k() + 1) * bk->l(), bk->t()); dim3 block(128); __BootstrappingKeyToNTT__<<<grid, block>>>(*bk_ntt, *d_bk, *ntt_handler); cudaDeviceSynchronize(); CuCheckError(); d_bk_deleter(d_bk->data()); delete d_bk; } void DeleteBootstrappingKeyNTT() { bk_ntt_deleter(bk_ntt->data()); delete bk_ntt; bk_ntt = nullptr; ntt_handler->Destroy(); delete ntt_handler; } void KeySwitchingKeyToDevice(const KeySwitchingKey* ksk) { Assert(ksk_dev == nullptr); ksk_dev = new KeySwitchingKey(ksk->n(), ksk->l(), ksk->w(), ksk->m()); std::pair<void*, MemoryDeleter> pair; pair = AllocatorGPU::New(ksk_dev->SizeMalloc()); ksk_dev->set_data((KeySwitchingKey::PointerType)pair.first); ksk_dev_deleter = pair.second; CuSafeCall(cudaMemcpy(ksk_dev->data(), ksk->data(), ksk->SizeMalloc(), cudaMemcpyHostToDevice)); } void DeleteKeySwitchingKey() { ksk_dev_deleter(ksk_dev->data()); delete ksk_dev; ksk_dev = nullptr; } __device__ inline uint32_t ModSwitch2048(uint32_t a) { return (((uint64_t)a << 32) + (0x1UL << 52)) >> 53; } template <uint32_t lwe_n = 500, uint32_t tlwe_n = 1024, uint32_t decomp_bits = 2, uint32_t decomp_size = 8> __device__ inline void KeySwitch(Torus* lwe, Torus* tlwe, Torus* ksk) { static const Torus decomp_mask = (1u << decomp_bits) - 1; static const Torus decomp_offset = 1u << (31 - decomp_size * decomp_bits); uint32_t tid = ThisThreadRankInBlock(); uint32_t bdim = ThisBlockSize(); Torus tmp; Torus res = 0; Torus val = 0; #pragma unroll 0 for (int i = tid; i <= lwe_n; i += bdim) { if (i == lwe_n) res = tlwe[tlwe_n]; #pragma unroll 0 for (int j = 0; j < tlwe_n; j ++) { if (j == 0) tmp = tlwe[0]; else tmp = -tlwe[1024 - j]; tmp += decomp_offset; for (int k = 0; k < decomp_size; k ++) { val = (tmp >> (32 - (k + 1) * decomp_bits)) & decomp_mask; if (val != 0) res -= ksk[(j << 14) | (k << 11) | (val << 9) | i]; } } lwe[i] = res; } } __device__ void Accumulate(Torus* tlwe, FFP* sh_acc_ntt, FFP* sh_res_ntt, uint32_t a_bar, FFP* tgsw_ntt, CuNTTHandler<> ntt) { static const uint32_t decomp_bits = 10; static const uint32_t decomp_mask = (1 << decomp_bits) - 1; static const int32_t decomp_half = 1 << (decomp_bits - 1); static const uint32_t decomp_offset = (0x1u << 31) + (0x1u << (31 - decomp_bits)); uint32_t tid = ThisThreadRankInBlock(); uint32_t bdim = ThisBlockSize(); // temp[2] = sh_acc[2] * (x^exp - 1) // sh_acc_ntt[0, 1] = Decomp(temp[0]) // sh_acc_ntt[2, 3] = Decomp(temp[1]) // This algorithm is tested in cpp. Torus temp; #pragma unroll for (int i = tid; i < 1024; i += bdim) { uint32_t cmp = (uint32_t)(i < (a_bar & 1023)); uint32_t neg = -(cmp ^ (a_bar >> 10)); uint32_t pos = -((1 - cmp) ^ (a_bar >> 10)); #pragma unroll for (int j = 0; j < 2; j ++) { temp = tlwe[(j << 10) | ((i - a_bar) & 1023)]; temp = (temp & pos) + ((-temp) & neg); temp -= tlwe[(j << 10) | i]; // decomp temp temp += decomp_offset; sh_acc_ntt[(2*j)*1024+i] = FFP(Torus( ((temp >> (32 - decomp_bits)) & decomp_mask) - decomp_half )); sh_acc_ntt[(2*j+1)*1024+i] = FFP(Torus( ((temp >> (32 - 2 * decomp_bits)) & decomp_mask) - decomp_half )); } } __syncthreads(); // must // 4 NTTs with 512 threads. // Input/output/buffer use the same shared memory location. if (tid < 512) { FFP* tar = &sh_acc_ntt[tid >> 7 << 10]; ntt.NTT<FFP>(tar, tar, tar, tid >> 7 << 7); } else { // must meet 4 sync made by NTTInv __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); } __syncthreads(); // Multiply with bootstrapping key in global memory. #pragma unroll for (int i = tid; i < 1024; i += bdim) { sh_res_ntt[4096+i] = 0; #pragma unroll for (int j = 0; j < 4; j ++) sh_res_ntt[4096+i] += sh_acc_ntt[j*1024+i] * tgsw_ntt[((2 * j + 1) << 10) + i]; } __syncthreads(); // new #pragma unroll for (int i = tid; i < 1024; i += bdim) { FFP temp = 0; #pragma unroll for (int j = 0; j < 4; j ++) temp += sh_acc_ntt[j*1024+i] * tgsw_ntt[((2 * j) << 10) + i]; sh_res_ntt[i] = temp; } __syncthreads(); // must // 2 NTTInvs and add acc with 256 threads. if (tid < 256) { FFP* src = &sh_res_ntt[tid >> 7 << 12]; ntt.NTTInvAdd<Torus>(&tlwe[tid >> 7 << 10], src, src, tid >> 7 << 7); } else { // must meet 4 sync made by NTTInv __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); } __syncthreads(); // must } __global__ void __Bootstrap__(Torus* out, Torus* in, Torus mu, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { // Assert(bk.k() == 1); // Assert(bk.l() == 2); // Assert(bk.n() == 1024); __shared__ FFP sh[6 * 1024]; // FFP* sh_acc_ntt[4] = { sh, sh + 1024, sh + 2048, sh + 3072 }; // FFP* sh_res_ntt[2] = { sh, sh + 4096 }; Torus* tlwe = (Torus*)&sh[5120]; // test vector // acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(in[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(in[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } static const uint32_t lwe_n = 500; static const uint32_t tlwe_n = 1024; static const uint32_t ks_bits = 2; static const uint32_t ks_size = 8; KeySwitch<lwe_n, tlwe_n, ks_bits, ks_size>(out, tlwe, ksk); } __global__ void __NandBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix - in0[500] - in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 - in0[i] - in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } __global__ void __OrBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix + in0[500] + in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 + in0[i] + in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } __global__ void __AndBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix + in0[500] + in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 + in0[i] + in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } __global__ void __NorBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix - in0[500] - in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 - in0[i] - in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } __global__ void __XorBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix + 2*in0[500] + 2*in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 + 2*in0[i] + 2*in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } __global__ void __XnorBootstrap__(Torus* out, Torus* in0, Torus* in1, Torus mu, Torus fix, FFP* bk, Torus* ksk, CuNTTHandler<> ntt) { __shared__ FFP sh[6 * 1024]; Torus* tlwe = (Torus*)&sh[5120]; // test vector: acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048) register int32_t bar = 2048 - ModSwitch2048(fix - 2*in0[500] - 2*in1[500]); register uint32_t tid = ThisThreadRankInBlock(); register uint32_t bdim = ThisBlockSize(); register uint32_t cmp, neg, pos; #pragma unroll for (int i = tid; i < 1024; i += bdim) { tlwe[i] = 0; // part a if (bar == 2048) tlwe[i + 1024] = mu; else { cmp = (uint32_t)(i < (bar & 1023)); neg = -(cmp ^ (bar >> 10)); pos = -((1 - cmp) ^ (bar >> 10)); tlwe[i + 1024] = (mu & pos) + ((-mu) & neg); // part b } } __syncthreads(); // accumulate #pragma unroll for (int i = 0; i < 500; i ++) { // 500 iterations bar = ModSwitch2048(0 - 2*in0[i] - 2*in1[i]); Accumulate(tlwe, sh, sh, bar, bk + (i << 13), ntt); } KeySwitch<500, 1024, 2, 8>(out, tlwe, ksk); } void Bootstrap(LWESample* out, LWESample* in, Torus mu, cudaStream_t st) { dim3 grid(1); dim3 block(512); __Bootstrap__<<<grid, block, 0, st>>>(out->data(), in->data(), mu, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void NandBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __NandBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void OrBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __OrBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void AndBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __AndBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void NorBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __NorBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void XorBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __XorBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } void XnorBootstrap(LWESample* out, LWESample* in0, LWESample* in1, Torus mu, Torus fix, cudaStream_t st) { __XnorBootstrap__<<<1, 512, 0, st>>>(out->data(), in0->data(), in1->data(), mu, fix, bk_ntt->data(), ksk_dev->data(), *ntt_handler); CuCheckError(); } } // namespace cufhe
the_stack
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* a, double b) { return b; } #endif #include <algorithm> #include <cfloat> #include <vector> #include "deformable_psroi_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename DType> __device__ DType bilinear_interp( const DType* data, const DType x, const DType y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); DType dist_x = static_cast<DType>(x - x1); DType dist_y = static_cast<DType>(y - y1); DType value11 = data[y1*width + x1]; DType value12 = data[y2*width + x1]; DType value21 = data[y1*width + x2]; DType value22 = data[y2*width + x2]; DType value = (1 - dist_x)*(1 - dist_y)*value11 + (1 - dist_x)*dist_y*value12 + dist_x*(1 - dist_y)*value21 + dist_x*dist_y*value22; return value; } template <typename DType> __global__ void DeformablePSROIPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, DType* top_data, DType* top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h)*part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h)*part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; DType sum = 0; int count = 0; int gw = floor(static_cast<DType>(pw) * group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; DType val = bilinear_interp(offset_bottom_data + c*height*width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count; top_count[index] = count; } } template <typename Dtype> void DeformablePSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype *bottom_trans = no_trans_ ? NULL : bottom[2]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); const int num_classes = no_trans_ ? 1 : bottom[2]->channels()/ 2; const int channels_each_class = no_trans_ ? output_dim_ : output_dim_ / num_classes; caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, Dtype(0), mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) /* PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); */ DeformablePSROIPoolForwardKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>> >( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, bottom_trans, no_trans_, trans_std_, sample_per_part_, output_dim_, group_size_, part_size_, num_classes, channels_each_class, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename DType> __global__ void DeformablePSROIPoolBackwardAccKernel( const int count, const DType* top_diff, const DType* top_count, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, DType* bottom_data_diff, DType* bottom_trans_diff, const DType* bottom_data, const DType* bottom_rois, const DType* bottom_trans, const bool no_trans, const DType trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part); DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part); int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size); int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size); int class_id = ctop / channels_each_class; DType trans_x = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h)*part_size + part_w] * trans_std; DType trans_y = no_trans ? static_cast<DType>(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h)*part_size + part_w] * trans_std; DType wstart = static_cast<DType>(pw)* bin_size_w + roi_start_w; wstart += trans_x * roi_width; DType hstart = static_cast<DType>(ph) * bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } DType diff_val = top_diff[index] / top_count[index]; const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { DType w = wstart + iw*sub_bin_size_w; DType h = hstart + ih*sub_bin_size_h; // bilinear interpolation if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop*group_size + gh)*group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); DType dist_x = w - x0, dist_y = h - y0; DType q00 = (1 - dist_x)*(1 - dist_y); DType q01 = (1 - dist_x)*dist_y; DType q10 = dist_x*(1 - dist_y); DType q11 = dist_x*dist_y; int bottom_index_base = c * height *width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x0, q00*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x0, q01*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x1, q10*diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x1, q11*diff_val); if (no_trans) { continue; } DType U00 = offset_bottom_data[bottom_index_base + y0*width + x0]; DType U01 = offset_bottom_data[bottom_index_base + y1*width + x0]; DType U10 = offset_bottom_data[bottom_index_base + y0*width + x1]; DType U11 = offset_bottom_data[bottom_index_base + y1*width + x1]; DType diff_x = (U11*dist_y + U10*(1 - dist_y) - U01*dist_y - U00*(1 - dist_y)) *trans_std*diff_val; diff_x *= roi_width; DType diff_y = (U11*dist_x + U01*(1 - dist_x) - U10*dist_x - U00*(1 - dist_x)) *trans_std*diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h)*part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1)*part_size + part_h)*part_size + part_w, diff_y); } } } } template <typename Dtype> void DeformablePSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype *bottom_trans = no_trans_ ? NULL : bottom[2]->gpu_data(); Dtype* bottom_data_diff = bottom[0]->mutable_gpu_diff(); Dtype *bottom_trans_diff = no_trans_ ? NULL : bottom[2]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const Dtype* mapping_channel_ptr = mapping_channel_.gpu_data(); const int num_classes = no_trans_ ? 1 : bottom[2]->channels()/ 2; const int channels_each_class = no_trans_ ? output_dim_ : output_dim_ / num_classes; caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); if(!no_trans_) caffe_gpu_set(bottom[2]->count(), Dtype(0), bottom[2]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_data_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DeformablePSROIPoolBackwardAccKernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>> >( count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois, bottom_trans, no_trans_, trans_std_, sample_per_part_, group_size_, part_size_, num_classes, channels_each_class); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(DeformablePSROIPoolingLayer); } // namespace caffe
the_stack
#include <cuda_helper.h> #include <cuda_vectors.h> #include <miner.h> __constant__ static uint32_t _ALIGN(16) c_midstate112[8]; __constant__ static uint32_t _ALIGN(16) c_midbuffer112[8]; __constant__ static uint32_t _ALIGN(16) c_dataEnd112[12]; __constant__ static const uint32_t _ALIGN(8) c_K[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; #ifdef __INTELLISENSE__ #define atomicExch(p,y) y #endif // ------------------------------------------------------------------------------------------------ static const uint32_t cpu_H256[8] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; static const uint32_t cpu_K[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; __host__ static void sha256_step1_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared){ uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); uint32_t t1 = h + bsg21 + vxandx + Kshared + in; uint32_t t2 = bsg20 + andorv; d = d + t1; h = t1 + t2; } __host__ static void sha256_step2_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t* in, uint32_t pc, const uint32_t Kshared){ uint32_t pcidx1 = (pc-2) & 0xF; uint32_t pcidx2 = (pc-7) & 0xF; uint32_t pcidx3 = (pc-15) & 0xF; uint32_t inx0 = in[pc]; uint32_t inx1 = in[pcidx1]; uint32_t inx2 = in[pcidx2]; uint32_t inx3 = in[pcidx3]; uint32_t ssg21 = ROTR32(inx1, 17) ^ ROTR32(inx1, 19) ^ SPH_T32((inx1) >> 10); //ssg2_1(inx1); uint32_t ssg20 = ROTR32(inx3, 7) ^ ROTR32(inx3, 18) ^ SPH_T32((inx3) >> 3); //ssg2_0(inx3); uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); uint32_t t1,t2; in[pc] = ssg21 + inx2 + ssg20 + inx0; t1 = h + bsg21 + vxandx + Kshared + in[pc]; t2 = bsg20 + andorv; d = d + t1; h = t1 + t2; } __host__ static void sha256_round_body_host(uint32_t* in, uint32_t* state, const uint32_t* Kshared){ uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha256_step1_host(a,b,c,d,e,f,g,h,in[0], Kshared[0]); sha256_step1_host(h,a,b,c,d,e,f,g,in[1], Kshared[1]); sha256_step1_host(g,h,a,b,c,d,e,f,in[2], Kshared[2]); sha256_step1_host(f,g,h,a,b,c,d,e,in[3], Kshared[3]); sha256_step1_host(e,f,g,h,a,b,c,d,in[4], Kshared[4]); sha256_step1_host(d,e,f,g,h,a,b,c,in[5], Kshared[5]); sha256_step1_host(c,d,e,f,g,h,a,b,in[6], Kshared[6]); sha256_step1_host(b,c,d,e,f,g,h,a,in[7], Kshared[7]); sha256_step1_host(a,b,c,d,e,f,g,h,in[8], Kshared[8]); sha256_step1_host(h,a,b,c,d,e,f,g,in[9], Kshared[9]); sha256_step1_host(g,h,a,b,c,d,e,f,in[10],Kshared[10]); sha256_step1_host(f,g,h,a,b,c,d,e,in[11],Kshared[11]); sha256_step1_host(e,f,g,h,a,b,c,d,in[12],Kshared[12]); sha256_step1_host(d,e,f,g,h,a,b,c,in[13],Kshared[13]); sha256_step1_host(c,d,e,f,g,h,a,b,in[14],Kshared[14]); sha256_step1_host(b,c,d,e,f,g,h,a,in[15],Kshared[15]); for (uint32_t i=0; i<3; i++) { sha256_step2_host(a,b,c,d,e,f,g,h,in,0, Kshared[16+16*i]); sha256_step2_host(h,a,b,c,d,e,f,g,in,1, Kshared[17+16*i]); sha256_step2_host(g,h,a,b,c,d,e,f,in,2, Kshared[18+16*i]); sha256_step2_host(f,g,h,a,b,c,d,e,in,3, Kshared[19+16*i]); sha256_step2_host(e,f,g,h,a,b,c,d,in,4, Kshared[20+16*i]); sha256_step2_host(d,e,f,g,h,a,b,c,in,5, Kshared[21+16*i]); sha256_step2_host(c,d,e,f,g,h,a,b,in,6, Kshared[22+16*i]); sha256_step2_host(b,c,d,e,f,g,h,a,in,7, Kshared[23+16*i]); sha256_step2_host(a,b,c,d,e,f,g,h,in,8, Kshared[24+16*i]); sha256_step2_host(h,a,b,c,d,e,f,g,in,9, Kshared[25+16*i]); sha256_step2_host(g,h,a,b,c,d,e,f,in,10,Kshared[26+16*i]); sha256_step2_host(f,g,h,a,b,c,d,e,in,11,Kshared[27+16*i]); sha256_step2_host(e,f,g,h,a,b,c,d,in,12,Kshared[28+16*i]); sha256_step2_host(d,e,f,g,h,a,b,c,in,13,Kshared[29+16*i]); sha256_step2_host(c,d,e,f,g,h,a,b,in,14,Kshared[30+16*i]); sha256_step2_host(b,c,d,e,f,g,h,a,in,15,Kshared[31+16*i]); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } __host__ void lbry_sha256_setBlock_112_merged(uint32_t *pdata){ uint32_t in[16], buf[8], end[16]; for (uint32_t i=0;i<16;i++) in[i] = cuda_swab32(pdata[i]); for (uint32_t i=0; i<8;i++) buf[i] = cpu_H256[i]; for (uint32_t i=0;i<11;i++) end[i] = cuda_swab32(pdata[16+i]); sha256_round_body_host(in, buf, cpu_K); cudaMemcpyToSymbol(c_midstate112, buf, 32, 0, cudaMemcpyHostToDevice); uint32_t a = buf[0]; uint32_t b = buf[1]; uint32_t c = buf[2]; uint32_t d = buf[3]; uint32_t e = buf[4]; uint32_t f = buf[5]; uint32_t g = buf[6]; uint32_t h = buf[7]; sha256_step1_host(a,b,c,d,e,f,g,h,end[0], cpu_K[0]); sha256_step1_host(h,a,b,c,d,e,f,g,end[1], cpu_K[1]); sha256_step1_host(g,h,a,b,c,d,e,f,end[2], cpu_K[2]); sha256_step1_host(f,g,h,a,b,c,d,e,end[3], cpu_K[3]); sha256_step1_host(e,f,g,h,a,b,c,d,end[4], cpu_K[4]); sha256_step1_host(d,e,f,g,h,a,b,c,end[5], cpu_K[5]); sha256_step1_host(c,d,e,f,g,h,a,b,end[6], cpu_K[6]); sha256_step1_host(b,c,d,e,f,g,h,a,end[7], cpu_K[7]); sha256_step1_host(a,b,c,d,e,f,g,h,end[8], cpu_K[8]); sha256_step1_host(h,a,b,c,d,e,f,g,end[9], cpu_K[9]); sha256_step1_host(g,h,a,b,c,d,e,f,end[10],cpu_K[10]); sha256_step1_host(f, g, h, a, b, c, d, e, 0, cpu_K[11]); buf[0] = a; buf[1] = b; buf[2] = c; buf[3] = d; buf[4] = e; buf[5] = f; buf[6] = g; buf[7] = h; cudaMemcpyToSymbol(c_midbuffer112, buf, 32, 0, cudaMemcpyHostToDevice); end[12] = 0x80000000; end[13] = 0; end[14] = 0; end[15] = 0x380; uint32_t x2_0,x2_1; x2_0 = ROTR32(end[1], 7) ^ ROTR32(end[1], 18) ^ SPH_T32(end[1] >> 3); //ssg2_0(inx3);//ssg2_0(end[1]); // x2_1 = ROTR32(end[14], 17) ^ ROTR32(end[14], 19) ^ SPH_T32(end[14] >> 10) + x2_0; //ssg2_1(inx1); ssg2_1(end[14]) + x2_0; end[0] = end[0] + end[9] + x2_0; x2_0 = ROTR32(end[2], 7) ^ ROTR32(end[2], 18) ^ SPH_T32(end[2] >> 3); x2_1 = (ROTR32(end[15], 17) ^ ROTR32(end[15], 19) ^ SPH_T32(end[15] >> 10)) + x2_0; end[1] = end[1] + end[10] + x2_1; x2_0 = ROTR32(end[3], 7) ^ ROTR32(end[3], 18) ^ SPH_T32(end[3] >> 3);//ssg2_0(end[3]); x2_1 = (ROTR32(end[0], 17) ^ ROTR32(end[0], 19) ^ SPH_T32(end[0] >> 10)) + x2_0; end[2]+= x2_1; x2_0 = ROTR32(end[4], 7) ^ ROTR32(end[4], 18) ^ SPH_T32(end[4] >> 3);//ssg2_0(end[4]); x2_1 = (ROTR32(end[1], 17) ^ ROTR32(end[1], 19) ^ SPH_T32(end[1] >> 10)) + x2_0; end[3] = end[3] + end[12] + x2_1; x2_0 = ROTR32(end[5], 7) ^ ROTR32(end[5], 18) ^ SPH_T32(end[5] >> 3);//ssg2_0(end[4]); end[4] = end[4] + end[13] + x2_0; x2_0 = ROTR32(end[6], 7) ^ ROTR32(end[6], 18) ^ SPH_T32(end[6] >> 3);//ssg2_0(end[6]); x2_1 = (ROTR32(end[3], 17) ^ ROTR32(end[3], 19) ^ SPH_T32(end[3] >> 10)) + x2_0; end[5] = end[5] + end[14] + x2_1; x2_0 = ROTR32(end[7], 7) ^ ROTR32(end[7], 18) ^ SPH_T32(end[7] >> 3);//ssg2_0(end[7]); end[6] = end[6] + end[15] + x2_0; x2_0 = ROTR32(end[8], 7) ^ ROTR32(end[8], 18) ^ SPH_T32(end[8] >> 3);//ssg2_0(end[8]); x2_1 = (ROTR32(end[5], 17) ^ ROTR32(end[5], 19) ^ SPH_T32(end[5] >> 10)) + x2_0; end[7] = end[7] + end[0] + x2_1; x2_0 = ROTR32(end[9], 7) ^ ROTR32(end[9], 18) ^ SPH_T32(end[9] >> 3);//ssg2_0(end[9]); end[8] = end[8] + end[1] + x2_0; x2_0 = ROTR32(end[10], 7) ^ ROTR32(end[10], 18) ^ SPH_T32(end[10] >> 3);//ssg2_0(end[10]); x2_1 = (ROTR32(end[7], 17) ^ ROTR32(end[7], 19) ^ SPH_T32(end[7] >> 10)) + x2_0; end[9] = end[9] + x2_1; cudaMemcpyToSymbol(c_dataEnd112, end, sizeof(end), 0, cudaMemcpyHostToDevice); } //END OF HOST FUNCTIONS ------------------------------------------------------------------- //SHA256 MACROS --------------------------------------------------------------------------- #define xor3b(a,b,c) (a ^ b ^ c) __device__ __forceinline__ uint32_t bsg2_0(const uint32_t x) { return xor3b(ROTR32(x,2),ROTR32(x,13),ROTR32(x,22)); } __device__ __forceinline__ uint32_t bsg2_1(const uint32_t x) { return xor3b(ROTR32(x,6),ROTR32(x,11),ROTR32(x,25)); } __device__ __forceinline__ uint32_t ssg2_0(const uint32_t x){ return xor3b(ROTR32(x,7),ROTR32(x,18),(x>>3)); } __device__ __forceinline__ uint32_t ssg2_1(const uint32_t x){ return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10)); } __device__ __forceinline__ uint32_t ssg2_11(const uint32_t x){ return xor3b(ROTR32(x,17),ROTR32(x,19),shr_u32(x,10)); } #define Maj(x, y, z) ((x & (y | z)) | (y & z)) #define Ch(a, b, c) (((b^c) & a) ^ c) __device__ __forceinline__ uint64_t vectorizeswap(const uint64_t v){ uint2 result; asm volatile ("mov.b64 {%0,%1},%2;" : "=r"(result.y), "=r"(result.x) : "l"(v)); return devectorize(result); } __device__ __forceinline__ static void sha2_step(const uint32_t a,const uint32_t b,const uint32_t c, uint32_t &d,const uint32_t e,const uint32_t f,const uint32_t g, uint32_t &h,const uint32_t in, const uint32_t Kshared) { uint32_t bsg2_1 = ROTR32(e,6) ^ ROTR32(e,11) ^ ROTR32(e,25); uint32_t bsg2_0 = ROTR32(a,2) ^ ROTR32(a,13) ^ ROTR32(a,22); uint32_t Ch = ((f ^ g) & e) ^ g; uint32_t Maj= (b & c) | ((b | c) & a); const uint32_t t1 = h + bsg2_1 + Ch + Kshared + in; h = t1 + Maj + bsg2_0; d+= t1; } __device__ __forceinline__ static void sha256_round_first(uint32_t *in,uint32_t *buf,const uint32_t *state) { uint32_t a = buf[0] + in[11]; uint32_t b = buf[1]; uint32_t c = buf[2]; uint32_t d = buf[3]; uint32_t e = buf[4] + in[11]; uint32_t f = buf[5]; uint32_t g = buf[6]; uint32_t h = buf[7]; // 10 first steps made on host //sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[11]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[12]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[13]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[14]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[15]); //in is partially precomputed on host in[2]+= in[11]; in[4]+= ssg2_11(in[2]); in[6]+= ssg2_11(in[4]); in[8]+= ssg2_11(in[6]); in[9]+= in[ 2]; sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25]); #pragma unroll 6 for (uint32_t j = 10; j < 16; j++){ in[j]+= ssg2_11(in[(j + 14) & 15]) + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]); } sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31]); #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j]+= ssg2_11(in[(j + 14) & 15]) + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]); } sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16+16]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17+16]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18+16]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19+16]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20+16]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21+16]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22+16]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23+16]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24+16]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25+16]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26+16]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27+16]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28+16]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29+16]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30+16]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31+16]); #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j]+= ssg2_11(in[(j + 14) & 15]) + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]); } sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16+16*2]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17+16*2]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18+16*2]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19+16*2]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20+16*2]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21+16*2]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22+16*2]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23+16*2]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24+16*2]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25+16*2]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26+16*2]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27+16*2]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28+16*2]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29+16*2]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30+16*2]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31+16*2]); buf[ 0] = state[0] + a; buf[ 1] = state[1] + b; buf[ 2] = state[2] + c; buf[ 3] = state[3] + d; buf[ 4] = state[4] + e; buf[ 5] = state[5] + f; buf[ 6] = state[6] + g; buf[ 7] = state[7] + h; } //END OF SHA256 MACROS -------------------------------------------------------------------- //SHA512 MACROS --------------------------------------------------------------------------- static __constant__ const uint64_t K_512[80] = { 0x428A2F98D728AE22, 0x7137449123EF65CD, 0xB5C0FBCFEC4D3B2F, 0xE9B5DBA58189DBBC, 0x3956C25BF348B538, 0x59F111F1B605D019, 0x923F82A4AF194F9B, 0xAB1C5ED5DA6D8118, 0xD807AA98A3030242, 0x12835B0145706FBE, 0x243185BE4EE4B28C, 0x550C7DC3D5FFB4E2, 0x72BE5D74F27B896F, 0x80DEB1FE3B1696B1, 0x9BDC06A725C71235, 0xC19BF174CF692694, 0xE49B69C19EF14AD2, 0xEFBE4786384F25E3, 0x0FC19DC68B8CD5B5, 0x240CA1CC77AC9C65, 0x2DE92C6F592B0275, 0x4A7484AA6EA6E483, 0x5CB0A9DCBD41FBD4, 0x76F988DA831153B5, 0x983E5152EE66DFAB, 0xA831C66D2DB43210, 0xB00327C898FB213F, 0xBF597FC7BEEF0EE4, 0xC6E00BF33DA88FC2, 0xD5A79147930AA725, 0x06CA6351E003826F, 0x142929670A0E6E70, 0x27B70A8546D22FFC, 0x2E1B21385C26C926, 0x4D2C6DFC5AC42AED, 0x53380D139D95B3DF, 0x650A73548BAF63DE, 0x766A0ABB3C77B2A8, 0x81C2C92E47EDAEE6, 0x92722C851482353B, 0xA2BFE8A14CF10364, 0xA81A664BBC423001, 0xC24B8B70D0F89791, 0xC76C51A30654BE30, 0xD192E819D6EF5218, 0xD69906245565A910, 0xF40E35855771202A, 0x106AA07032BBD1B8, 0x19A4C116B8D2D0C8, 0x1E376C085141AB53, 0x2748774CDF8EEB99, 0x34B0BCB5E19B48A8, 0x391C0CB3C5C95A63, 0x4ED8AA4AE3418ACB, 0x5B9CCA4F7763E373, 0x682E6FF3D6B2B8A3, 0x748F82EE5DEFB2FC, 0x78A5636F43172F60, 0x84C87814A1F0AB72, 0x8CC702081A6439EC, 0x90BEFFFA23631E28, 0xA4506CEBDE82BDE9, 0xBEF9A3F7B2C67915, 0xC67178F2E372532B, 0xCA273ECEEA26619C, 0xD186B8C721C0C207, 0xEADA7DD6CDE0EB1E, 0xF57D4F7FEE6ED178, 0x06F067AA72176FBA, 0x0A637DC5A2C898A6, 0x113F9804BEF90DAE, 0x1B710B35131C471B, 0x28DB77F523047D84, 0x32CAAB7B40C72493, 0x3C9EBE0A15C9BEBC, 0x431D67C49C100D4C, 0x4CC5D4BECB3E42B6, 0x597F299CFC657E2A, 0x5FCB6FAB3AD6FAEC, 0x6C44198C4A475817 }; #define bsg5_0(x) (ROTR64(x,28) ^ ROTR64(x,34) ^ ROTR64(x,39)) #define bsg5_1(x) (ROTR64(x,14) ^ ROTR64(x,18) ^ ROTR64(x,41)) #define ssg5_0(x) (ROTR64(x, 1) ^ ROTR64(x, 8) ^ shr_u64(x,7)) #define ssg5_1(x) (ROTR64(x,19) ^ ROTR64(x,61) ^ shr_u64(x,6)) #define andor64(a,b,c) ((a & (b | c)) | (b & c)) #define xandx64(e,f,g) (g ^ (e & (g ^ f))) // RIPEMD MACROS----------------------------------------------------------------------------- /* * Round constants for RIPEMD-160. */ static __constant__ const uint32_t c_IV[5] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0}; static __constant__ const uint32_t KL[5] = {0x00000000, 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xA953FD4E}; static __constant__ const uint32_t KR[5] = {0x50A28BE6, 0x5C4DD124, 0x6D703EF3, 0x7A6D76E9, 0x00000000}; /* Left line */ static __constant__ const uint32_t RL[5][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, /* Round 1: id */ { 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8 }, /* Round 2: rho */ { 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12 }, /* Round 3: rho^2 */ { 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2 }, /* Round 4: rho^3 */ { 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13 } /* Round 5: rho^4 */ }; /* Right line */ static __constant__ const uint32_t RR[5][16] = { { 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12 }, /* Round 1: pi */ { 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2 }, /* Round 2: rho pi */ { 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13 }, /* Round 3: rho^2 pi */ { 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14 }, /* Round 4: rho^3 pi */ { 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11 } /* Round 5: rho^4 pi */ }; /* Shifts, left line */ static __constant__ const uint32_t SL[5][16] = { { 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8 }, /* Round 1 */ { 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12 }, /* Round 2 */ { 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5 }, /* Round 3 */ { 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12 }, /* Round 4 */ { 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6 } /* Round 5 */ }; /* Shifts, right line */ static __constant__ const uint32_t SR[5][16] = { { 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6 }, /* Round 1 */ { 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11 }, /* Round 2 */ { 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5 }, /* Round 3 */ { 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8 }, /* Round 4 */ { 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 } /* Round 5 */ }; __device__ __forceinline__ static uint32_t ROTATE(const uint32_t x,const uint32_t r){ if(r==8) return __byte_perm(x, 0, 0x2103); else return ROTL32(x,r); } /* * Round functions for RIPEMD-160. */ //#define F1(x, y, z) xor3x(x, y, z) __device__ __forceinline__ uint32_t F1(const uint32_t a,const uint32_t b,const uint32_t c){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); #else result = a^b^c; #endif return result; } //#define F2(x, y, z) ((x & (y ^ z)) ^ z) __device__ __forceinline__ uint32_t F2(const uint32_t a,const uint32_t b,const uint32_t c){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0xCA;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); //0xCA=((F0∧(CC⊻AA))⊻AA) #else result = ((a & (b ^ c)) ^ c); #endif return result; } //#define F3(x, y, z) ((x | ~y) ^ z) __device__ __forceinline__ uint32_t F3(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0x59=((F0∨(¬CC))⊻AA) #else result = ((x | ~y) ^ z); #endif return result; } //#define F4(x, y, z) (y ^ ((x ^ y) & z)) __device__ __forceinline__ uint32_t F4(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0xE4;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0xE4=(CC⊻((F0⊻CC)∧AA)) #else result = (y ^ ((x ^ y) & z)); #endif return result; } //#define F5(x, y, z) (x ^ (y | ~z)) __device__ __forceinline__ uint32_t F5(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x2D;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0x2D=(F0⊻(CC∨(¬AA))) #else result = (x ^ (y | ~z)); #endif return result; } __device__ __forceinline__ static void RIPEMD160_ROUND_BODY(const uint32_t *in, uint32_t *h){ uint32_t T; uint32_t AL, BL, CL, DL, EL; /* left line */ uint32_t AR, BR, CR, DR, ER; /* right line */ AL = AR = h[0]; BL = BR = h[1]; CL = CR = h[2]; DL = DR = h[3]; EL = ER = h[4]; /* Round 1 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F1(BL, CL, DL) + in[RL[0][w]] + KL[0], SL[0][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F5(BR, CR, DR) + in[RR[0][w]] + KR[0], SR[0][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 2 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F2(BL, CL, DL) + in[RL[1][w]] + KL[1], SL[1][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F4(BR, CR, DR) + in[RR[1][w]] + KR[1], SR[1][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 3 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F3(BL, CL, DL) + in[RL[2][w]] + KL[2], SL[2][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F3(BR, CR, DR) + in[RR[2][w]] + KR[2], SR[2][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 4 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F4(BL, CL, DL) + in[RL[3][w]] + KL[3], SL[3][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F2(BR, CR, DR) + in[RR[3][w]] + KR[3], SR[3][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 5 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F5(BL, CL, DL) + in[RL[4][w]] + KL[4], SL[4][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F1(BR, CR, DR) + in[RR[4][w]] + KR[4], SR[4][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } T = h[1] + CL + DR; h[1] = h[2] + DL + ER; h[2] = h[3] + EL + AR; h[3] = h[4] + AL + BR; h[4] = h[0] + BL + CR; h[0] = T; } // END OF RIPEMD MACROS---------------------------------------------------------------------- #define NPT 4 __global__ __launch_bounds__(768,1) void gpu_lbry_merged(const uint32_t threads,const uint32_t startNounce, uint32_t *resNonces,const uint64_t target64){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t buf[8], state[8]; const uint32_t c_H256[8] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; const uint64_t IV512[8] = { 0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1, 0x510E527FADE682D1, 0x9B05688C2B3E6C1F, 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179 }; uint64_t r[8]; uint64_t W[16]; uint32_t dat[16]; uint32_t h[5]; const uint64_t step = gridDim.x * blockDim.x; const uint64_t maxNonce = startNounce + threads; for(uint64_t nounce = startNounce + thread; nounce<maxNonce;nounce+=step){ *(uint2x4*)&dat[0] = *(uint2x4*)&c_dataEnd112[0]; *(uint2*)&dat[ 8] = *(uint2*)&c_dataEnd112[ 8]; dat[10] = c_dataEnd112[10]; dat[11] = _LODWORD(nounce); dat[12] = 0x80000000; dat[13] = 0; dat[14] = 0; dat[15] = 0x380; *(uint2x4*)&state[0] = *(uint2x4*)&c_midstate112[0]; *(uint2x4*)&buf[0] = *(uint2x4*)&c_midbuffer112[0]; sha256_round_first(dat, buf, state); // second sha256 #pragma unroll 8 for(uint32_t i=0;i<8;i++){ dat[ i] = buf[ i]; } dat[8] = 0x80000000; #pragma unroll 6 for (int i=9; i<15; i++) dat[i] = 0; dat[15] = 0x100; #pragma unroll 8 for(uint32_t i=0;i<8;i++) buf[i] = c_H256[ i]; #pragma unroll for (int i = 0; i < 16; i++) { const uint32_t t1 = buf[7] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]) + c_K[i] + dat[i]; #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } #pragma unroll for (int i = 16; i < 64; i++) { dat[i & 15]+= ssg2_1(dat[(i - 2) & 15]) + dat[(i - 7) & 15] + ssg2_0(dat[(i - 15) & 15]); const uint32_t t1 = dat[i & 15] + buf[7] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]) + c_K[i]; #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } #pragma unroll 8 for(uint32_t i=0;i<8;i++) buf[ i] += c_H256[ i]; //SHA512------------------------------------------------------------------------------------- #pragma unroll 8 for(uint32_t i=0;i<8;i++) r[ i] = IV512[ i]; W[0] = vectorizeswap(((uint64_t*)buf)[0]); W[1] = vectorizeswap(((uint64_t*)buf)[1]); W[2] = vectorizeswap(((uint64_t*)buf)[2]); W[3] = vectorizeswap(((uint64_t*)buf)[3]); W[4] = 0x8000000000000000; // end tag #pragma unroll 10 for (int i = 5; i < 15; i++) W[i] = 0; W[15] = 0x100; // 256 bits uint64_t t1; #pragma unroll 16 for (int i = 0; i < 16; i++) { t1 = W[i] + r[ 7] + bsg5_1(r[ 4]) + xandx64(r[ 4], r[ 5], r[ 6]) + K_512[i]; #pragma unroll for (int l = 6; l >= 0; l--) r[l + 1] = r[l]; r[0] = t1 + andor64(r[ 1], r[ 2], r[ 3]) + bsg5_0(r[ 1]); r[4]+= t1; } #pragma unroll for (int i = 16; i < 80; i+=16) { #pragma unroll for(uint32_t j=0;j<16;j++){ W[j] = ssg5_1(W[(j + 14) & 15]) + ssg5_0(W[(j + 1) & 15]) + W[j] + W[(j + 9) & 15]; } #pragma unroll for(uint32_t j=0;j<16;j++){ t1 = r[ 7] + W[j] + bsg5_1(r[ 4]) + xandx64(r[ 4], r[ 5], r[ 6]) + K_512[i+j]; // t1 = r[ 7] + W[j] + K_512[i+j] + xandx64(r[ 4], r[ 5], r[ 6]) + bsg5_1(r[ 4]); #pragma unroll for (int l = 6; l >= 0; l--) r[l + 1] = r[l]; r[0] = t1 + andor64(r[ 1], r[ 2], r[ 3]) + bsg5_0(r[ 1]); r[4]+= t1; } } //END OF SHA512------------------------------------------------------------------------------ *(uint2x4*)&dat[ 0] = *(uint2x4*)&r[ 0] + *(uint2x4*)&IV512[ 0]; // #pragma unroll 4 for (int i = 0; i < 4; i++) *(uint64_t*)&dat[i<<1] = cuda_swab64(*(uint64_t*)&dat[i<<1]); dat[8] = 0x80; #pragma unroll 7 for (int i=9;i<16;i++) dat[i] = 0; dat[14] = 0x100; // size in bits #pragma unroll 5 for (int i=0; i<5; i++) h[i] = c_IV[i]; RIPEMD160_ROUND_BODY(dat, h); #pragma unroll 5 for (int i=0; i<5; i++) buf[i] = h[i]; // second 32 bytes block hash *(uint2x4*)&dat[ 0] = *(uint2x4*)&r[ 4] + *(uint2x4*)&IV512[ 4]; // #pragma unroll 4 for (int i = 0; i < 4; i++) *(uint64_t*)&dat[i<<1] = cuda_swab64(*(uint64_t*)&dat[i<<1]); dat[8] = 0x80; #pragma unroll 7 for (int i=9;i<16;i++) dat[i] = 0; dat[14] = 0x100; // size in bits #pragma unroll 5 for (int i=0; i<5; i++) h[i] = c_IV[i]; RIPEMD160_ROUND_BODY(dat, h); // first final sha256 #pragma unroll 5 for (int i=0;i<5;i++) dat[i] = cuda_swab32(buf[i]); #pragma unroll 5 for (int i=0;i<5;i++) dat[i+5] = cuda_swab32(h[i]); dat[10] = 0x80000000; #pragma unroll 4 for (int i=11; i<15; i++) dat[i] = 0; dat[15] = 0x140; #pragma unroll 8 for(uint32_t i=0;i<8;i++){ buf[ i] = c_H256[ i]; } #pragma unroll for (int i = 0; i < 16; i++) { const uint32_t t1 = buf[7] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]) + c_K[i] + dat[i]; #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } #pragma unroll for (int i = 16; i < 64; i+=16) { #pragma unroll for(uint32_t j=0;j<16;j++) dat[j] = ssg2_11(dat[(j + 14) & 15]) + dat[j] + dat[(j + 9) & 15] + ssg2_0(dat[(j + 1) & 15]); #pragma unroll for(uint32_t j=0;j<16;j++){ const uint32_t t1 = dat[j] + buf[7] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]) + c_K[i+j]; #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } } // second sha256 #pragma unroll 8 for(uint32_t i=0;i<8;i++) dat[i] = buf[i] + c_H256[i]; dat[8] = 0x80000000; #pragma unroll 6 for (int i=9; i<15; i++) dat[i] = 0; dat[15] = 0x100; #pragma unroll 8 for(uint32_t i=0;i<8;i++){ buf[ i] = c_H256[ i]; } // sha256_round_body_final(dat, buf); #pragma unroll for (int i = 0; i < 16; i++) { const uint32_t t1 = buf[7] + dat[i] + c_K[i] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]); #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } #pragma unroll for (int i = 16; i < 58; i++) { dat[i & 15]+= ssg2_1(dat[(i - 2) & 15]) + dat[(i - 7) & 15] + ssg2_0(dat[(i - 15) & 15]); const uint32_t t1 = buf[7] + dat[i & 15] + c_K[i] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]); #pragma unroll 7 for (int l = 6; l >= 0; l--) buf[l + 1] = buf[l]; buf[0] = t1 + Maj(buf[1], buf[2], buf[3]) + bsg2_0(buf[1]); buf[4]+= t1; } dat[10]+= ssg2_1(dat[8]) + dat[3] + ssg2_0(dat[11]); buf[3]+= buf[7] + dat[10] + c_K[58] + bsg2_1(buf[4]) + Ch(buf[4], buf[5], buf[6]); dat[11]+= ssg2_1(dat[9]) + dat[4] + ssg2_0(dat[12]); buf[2]+= buf[6] + dat[11] + c_K[59] + bsg2_1(buf[3]) + Ch(buf[3], buf[4], buf[5]); dat[12]+= ssg2_1(dat[10]) + dat[5] + ssg2_0(dat[13]); buf[1]+= buf[5] + dat[12] + c_K[60] + bsg2_1(buf[2]) + Ch(buf[2], buf[3], buf[4]); buf[6] = cuda_swab32(c_H256[6] + buf[0] + buf[4] + dat[13]+ ssg2_1(dat[11]) + dat[6] + ssg2_0(dat[14]) + c_K[61] + bsg2_1(buf[1]) + Ch(buf[1], buf[2], buf[3])); buf[7] = cuda_swab32(c_H256[7] + buf[1]); // valid nonces if (*(uint64_t*)&buf[ 6] <= target64){ uint32_t tmp = atomicExch(&resNonces[0], nounce-startNounce); //we actually return "thread" if (tmp != UINT32_MAX) resNonces[1] = tmp; return; } } } __host__ void lbry_merged(int thr_id,uint32_t startNonce, uint32_t threads, uint32_t *d_resNonce, const uint64_t target64){ uint32_t threadsperblock = 768; dim3 grid((threads + (NPT*threadsperblock) - 1) / (NPT*threadsperblock)); dim3 block(threadsperblock); gpu_lbry_merged <<<grid, block>>> (threads,startNonce, d_resNonce, target64); }
the_stack
#include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" // ------------------------------------------------- // set // ------------------------------------------------- __global__ void kernal_fp32_Vector_set ( float* dst, float a, int size ) { int index = threadIdx.x; while ( index < size ) { dst[index] = a; index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_set( float* dev_dst, float a, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_set<<<1, 1024, 0, streamId>>> ( dev_dst, a, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // add_ex // ------------------------------------------------- __global__ void kernal_fp32_Vector_add_ex ( float* dst, const float* src0, const float* src1, float a, float b, float c, int size ) { int index = threadIdx.x; while ( index < size ) { dst[index] = a * src0[index] + b * src1[index] + c; index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_add_ex( float* dev_dst, const float* dev_src0, const float* dev_src1, float a, float b, float c, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_add_ex<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src0, dev_src1, a, b, c, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // mul_ex // ------------------------------------------------- __global__ void kernal_fp32_Vector_mul_ex( float* dst, const float* src0, const float* src1, float a, float b, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = a * src0[index] * src1[index] + b; index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_mul_ex ( float* dev_dst, const float* dev_src0, const float* dev_src1, float a, float b, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_mul_ex<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src0, dev_src1, a, b, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // div_ex // ------------------------------------------------- __global__ void kernal_fp32_Vector_div_ex( float* dst, const float* src0, const float* src1, float a, float b, float c, float d, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = (a * src0[index] + b) / (c * src1[index] + d); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_div_ex( float *dev_dst, float const *dev_src0, float const *dev_src1, float a, float b, float c, float d, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_div_ex<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src0, dev_src1, a, b, c, d, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // sqrt // ------------------------------------------------- __global__ void kernal_fp32_Vector_sqrt( float* dst, const float* src, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = sqrt(src[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_sqrt( float *dev_dst, float const *dev_src, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_sqrt<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // exp // ------------------------------------------------- __global__ void kernal_fp32_Vector_exp( float* dst, const float* src, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = exp(src[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_exp( float *dev_dst, float const *dev_src, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_exp<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // min // ------------------------------------------------- // ベクトル同士 __global__ void kernal_fp32_Vector_min( float* dst, const float* src0, const float* src1, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = min(src0[index], src1[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_min( float *dev_dst, float const *dev_src0, float const *dev_src1, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_min<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src0, dev_src1, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // 係数 __global__ void kernal_fp32_Vector_min_v( float* dst, const float* src, float a, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = min(a, src[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_min_v( float *dev_dst, float const *dev_src, float a, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_min_v<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src, a, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // max // ------------------------------------------------- // ベクトル同士 __global__ void kernal_fp32_Vector_max( float* dst, const float* src0, const float* src1, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = max(src0[index], src1[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_max( float *dev_dst, float const *dev_src0, float const *dev_src1, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_max<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src0, dev_src1, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // 係数 __global__ void kernal_fp32_Vector_max_v( float* dst, const float* src, float a, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = max(a, src[index]); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_max_v( float *dev_dst, float const *dev_src, float a, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_max_v<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src, a, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // ------------------------------------------------- // clamp // ------------------------------------------------- __global__ void kernal_fp32_Vector_clamp( float* dst, const float* src, float lo, float hi, int size) { int index = threadIdx.x; while ( index < size ) { dst[index] = max(lo, min(hi, src[index])); index += blockDim.x; } } BBCU_DLL_EXPORT int bbcu_fp32_Vector_clamp( float *dev_dst, float const *dev_src, float lo, float hi, int size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); kernal_fp32_Vector_clamp<<<1, 1024, 0, streamId>>> ( dev_dst, dev_src, lo, hi, size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
the_stack
// These are loosely adapted from libc++'s tests. In general, we don't care a // ton about verifying the return types or results we get, on the assumption // that our standard library is correct. But we care deeply about calling every // overload of every function (so that we verify that everything compiles). // // We do care about the results of complex multiplication / division, since // these use code we've written. #include <stdio.h> // These tests are pretty annoying to write without C++11, so we require that. // // In addition, these tests don't work in C++14 mode with pre-C++14 versions of // libstdc++ (compile errors in <complex>). #if __cplusplus >= 201103L && (__cplusplus < 201402L || STDLIB_VERSION >= 2014) // Support for non-fp std::complex is unspecified: // http://eel.is/c++draft/complex.numbers.general#2.sentence-1 #if defined(__GLIBCXX__) && _GLIBCXX_RELEASE >= 9 // newer versions of libstdc++ do not support implicit conversion from such // types. #undef TEST_NONFLOAT_COMPLEX #else // libc++ and the older versions of libstdc++ have better support for non-float // complex, so we can still test them. #define TEST_NONFLOAT_COMPLEX 1 #endif #include <assert.h> #include <complex> #include <type_traits> template <class T> __device__ double promote( T, typename std::enable_if<std::is_integral<T>::value>::type* = 0); __device__ float promote(float); __device__ double promote(double); __device__ void is_about(float x, float y) { assert(std::abs((x - y) / (x + y)) < 1.e-6); } __device__ void is_about(double x, double y) { assert(std::abs((x - y) / (x + y)) < 1.e-14); } template <class T> __device__ void test_promotion_impl(T x) { assert(std::imag(x) == 0); assert(std::real(x) == x); using Promoted = decltype(promote(x)); assert(std::arg(x) == arg(std::complex<Promoted>(x, 0))); assert(std::conj(x) == conj(std::complex<Promoted>(x, 0))); assert(std::norm(x) == norm(std::complex<Promoted>(x, 0))); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(x) == proj(std::complex<Promoted>(x, 0))); #endif } __device__ void test_promotion() { int vals[] = {0, 1, 10}; for (int i : vals) { test_promotion_impl<float>(i); test_promotion_impl<double>(i); test_promotion_impl<int>(i); test_promotion_impl<unsigned>(i); test_promotion_impl<long long>(i); } } __device__ void test_literals() { #if __cplusplus >= 201402L && STDLIB_VERSION >= 2014 using namespace std::literals::complex_literals; { std::complex<double> c1 = 3.0i; assert(c1 == std::complex<double>(0, 3.0)); auto c2 = 3i; assert(c1 == c2); } { std::complex<float> c1 = 3.0if; assert(c1 == std::complex<float>(0, 3.0)); auto c2 = 3if; assert(c1 == c2); } #endif } template <class T> __device__ void test_assignment_real() { std::complex<T> c; c = 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } template <class T, class U> __device__ void test_assignment_complex() { std::complex<T> c; std::complex<T> c2(1.5, 2.5); c = c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); } template <class T> __device__ void test_plus_equals() { { std::complex<T> c; c += 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); c += c2; c += c2; assert(c.real() == 3); assert(c.imag() == 5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic(1, 1); c3 += ic; assert(c3.real() == 4); assert(c3.imag() == 6); #endif c3 = c; std::complex<float> fc(1, 1); c3 += fc; assert(c3.real() == 4); assert(c3.imag() == 6); } } template <class T> __device__ void test_minus_equals() { { std::complex<T> c; c -= 1.5; assert(c.real() == -1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); assert(c.real() == 0); assert(c.imag() == 0); c -= c2; assert(c.real() == -1.5); assert(c.imag() == -2.5); c -= c2; assert(c.real() == -3); assert(c.imag() == -5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 -= ic; assert(c3.real() == -4); assert(c3.imag() == -6); #endif c3 = c; std::complex<float> fc (1,1); c3 -= fc; assert(c3.real() == -4); assert(c3.imag() == -6); } } template <class T> __device__ void test_times_equals() { { std::complex<T> c(1); c *= 1.5; c *= 1.5; c *= -1.5; c.imag(2); c *= 1.5; assert(c.real() == -5.0625); assert(c.imag() == 3); } { std::complex<T> c(1); const std::complex<T> c2(1.5, 2.5); c *= c2; c *= c2; assert(c.real() == -4); assert(c.imag() == 7.5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 *= ic; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); #endif c3 = c; std::complex<float> fc (1,1); c3 *= fc; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); } } template <class T> __device__ void test_divide_equals() { { std::complex<T> c(1); c /= 0.5; c /= 0.5; c /= -0.5; c.imag(2); c /= 0.5; assert(c.real() == -16); assert(c.imag() == 4); } { std::complex<T> c(-4, 7.5); const std::complex<T> c2(1.5, 2.5); assert(c.real() == -4); assert(c.imag() == 7.5); c /= c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); c /= c2; assert(c.real() == 1); assert(c.imag() == 0); std::complex<T> c3; c3 = c; #if TEST_NONFLOAT_COMPLEX std::complex<int> ic (1,1); c3 /= ic; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); #endif c3 = c; std::complex<float> fc (1,1); c3 /= fc; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); } } template <class T> __device__ void test_construct() { { const std::complex<T> c; assert(c.real() == 0); assert(c.imag() == 0); } { const std::complex<T> c = 7.5; assert(c.real() == 7.5); assert(c.imag() == 0); } { const std::complex<T> c(8.5); assert(c.real() == 8.5); assert(c.imag() == 0); } { const std::complex<T> c(10.5, -9.5); assert(c.real() == 10.5); assert(c.imag() == -9.5); } #if __cplusplus >= 201103L { constexpr std::complex<T> c; static_assert(c.real() == 0, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c = 7.5; static_assert(c.real() == 7.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(8.5); static_assert(c.real() == 8.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(10.5, -9.5); static_assert(c.real() == 10.5, ""); static_assert(c.imag() == -9.5, ""); } #endif } template <class T> __device__ void test_construct_integral() { #if __cplusplus >= 201402L constexpr std::complex<T> c1; static_assert(c1.real() == 0, ""); static_assert(c1.imag() == 0, ""); constexpr std::complex<T> c2(3); static_assert(c2.real() == 3, ""); static_assert(c2.imag() == 0, ""); constexpr std::complex<T> c3(3, 4); static_assert(c3.real() == 3, ""); static_assert(c3.imag() == 4, ""); #endif } template <class T> __device__ void test_set_real_imag() { std::complex<T> c; c.real(3.5); assert(c.real() == 3.5); assert(c.imag() == 0); c.imag(4.5); assert(c.real() == 3.5); assert(c.imag() == 4.5); } template <class T> __device__ void test_transcendentals_etc() { assert(sin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(sinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(cos(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(cosh(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); { std::complex<T> c = acos(std::complex<T>(0, 0)); is_about(real(c), T(M_PI_2)); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = acosh(std::complex<T>(0, 0)); assert(std::abs(real(c)) < 1.e-6); is_about(imag(c), T(M_PI_2)); } assert(tan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(tanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(exp(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(log10(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); assert(log(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); { std::complex<T> c = pow(std::complex<T>(2, 3), std::complex<T>(2, 0)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(std::complex<T>(2, 3), T(2)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(T(2), std::complex<T>(2)); is_about(real(c), 4); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = sqrt(std::complex<T>(64, 0)); is_about(real(c), 8); assert(std::abs(imag(c)) < 1.e-6); } // "etc." assert(abs(std::complex<T>(3, 4)) == 5); assert(norm(std::complex<T>(3, 4)) == 25); assert(arg(std::complex<T>(1, 0)) == 0); assert(conj(std::complex<T>(1, 2)) == std::complex<T>(1, -2)); assert(std::polar(T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1)) == std::complex<T>(1, 0)); assert(std::polar(T(100)) == std::complex<T>(100, 0)); assert(std::polar(T(0), T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1), T(0)) == std::complex<T>(1, 0)); assert(std::polar(T(100), T(0)) == std::complex<T>(100, 0)); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(std::complex<T>(1, 2)) == std::complex<T>(1, 2)); assert(std::proj(std::complex<T>(-1, 2)) == std::complex<T>(-1, 2)); assert(std::proj(std::complex<T>(1, -2)) == std::complex<T>(1, -2)); assert(std::proj(std::complex<T>(-1, -2)) == std::complex<T>(-1, -2)); #endif } __global__ void tests() { test_promotion(); test_literals(); test_assignment_real<float>(); test_assignment_real<double>(); test_assignment_complex<float, float>(); test_assignment_complex<float, double>(); test_assignment_complex<double, float>(); test_assignment_complex<double, double>(); test_plus_equals<float>(); test_plus_equals<double>(); test_minus_equals<float>(); test_minus_equals<double>(); test_times_equals<float>(); test_times_equals<double>(); test_divide_equals<float>(); test_divide_equals<double>(); test_construct<float>(); test_construct<double>(); test_construct_integral<int>(); test_set_real_imag<float>(); test_set_real_imag<double>(); test_transcendentals_etc<float>(); test_transcendentals_etc<double>(); } #else __global__ void tests() {} #endif int main() { tests<<<1, 1>>>(); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("CUDA error %d\n", (int)err); return 1; } printf("Success!\n"); return 0; }
the_stack
///////////////////////////////////////////////////////////////////// Headers // #include "GDelKernels.h" #include "Geometry.h" #include "GDelPredDevice.h" ///////////////////////////////////////////////////////////////////// Kernels // template < bool doFast > __forceinline__ __device__ void makeInitialCone ( PredicateInfo curPredInfo, KerPointData pointData, KerStarData starData, KerBeneathData beneathData, KerInsertData insertData, int star ) { // For pentachoron of the orientation 0123v, the following are // the 4 link-triangles orientation as seen from v. // Opposite triangle indices are also the same! const int LinkTri[4][3] = { { 1, 2, 3 }, { 0, 3, 2 }, { 0, 1, 3 }, { 0, 2, 1 }, }; const int OppVi[4][3] = { { 0, 0, 0 }, { 0, 2, 1 }, { 1, 2, 1 }, { 2, 2, 1 }, }; //// // Initialize star-triangle map and other data //// const int vertBeg = insertData._starVertMap[ star ]; const int nextVertBeg = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; const int vertNum = nextVertBeg - vertBeg; CudaAssert( ( vertNum >= 4 ) && "Working set too small to create a cone!" ); const int triIdxBeg = get2SphereTriangleNum( star, vertBeg ); if ( doFast ) { starData._starTriMap[0][ star ] = triIdxBeg; starData._starTriMap[1][ star ] = 0; starData._maxSizeArr[ star ] = 4; // 4 triangles in beginning } //// // Read 4 points to form cone //// int linkVert[4]; for ( int pi = 0; pi < 4; ++pi ) { linkVert[ pi ] = insertData._vertArr[ vertBeg + pi ]; if ( doFast ) { insertData._vertStarArr[ vertBeg + pi ] = flipToNeg( star ); // Mark insertion as successful } } //// // Form 4-simplex with 4 points and star point //// // Orientation const Orient ord = doFast ? orientation4Fast( curPredInfo, pointData, linkVert[0], linkVert[1], linkVert[2], linkVert[3], star ) : orientation4SoS( curPredInfo, pointData, linkVert[0], linkVert[1], linkVert[2], linkVert[3], star ); if ( doFast && ( OrientZero == ord ) ) { // Need exact check const int exactListIdx = atomicAdd( &beneathData._flagArr[ ExactTriCount ], 1 ); beneathData._exactTriPosArr[ exactListIdx ] = star; // Meant for triPos, but we use it for storing star during initial cone creation return; // Get out! } CudaAssert( ( OrientZero != ord ) && "Orientation is zero!" ); // Swap for -ve order if ( OrientNeg == ord ) { cuSwap( linkVert[0], linkVert[1] ); } //// // Write 4 triangles of 4-simplex //// for ( int ti = 0; ti < 4; ++ti ) { Triangle tri; TriangleOpp triOpp; for ( int vi = 0; vi < 3; ++vi ) { tri._v[ vi ] = linkVert[ LinkTri[ ti ][ vi ] ]; triOpp.setOpp( vi, LinkTri[ ti ][ vi ], OppVi[ ti ][ vi ] ); } CudaAssert( ( star != tri._v[ 0 ] ) && ( star != tri._v[ 1 ] ) && ( star != tri._v[ 2 ] ) && "Star vertex same as one of its cone vertices!" ); const TriPositionEx triPosEx = makeTriPosEx( 0, triIdxBeg + ti ); starData.triangleAt( triPosEx ) = tri; starData.triOppAt( triPosEx ) = triOpp; starData.triStarAt( triPosEx ) = star; starData.triStatusAt( triPosEx ) = ValidAndUnchecked; } return; } __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) kerMakeInitialConeFast ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerBeneathData beneathData, KerInsertData insertData ) { const PredicateInfo curPredInfo = getCurThreadPredInfo( predInfo ); // Iterate through stars for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() ) { makeInitialCone< true >( curPredInfo, pointData, starData, beneathData, insertData, star ); } return; } __global__ void __launch_bounds__( MAX_PRED_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) kerMakeInitialConeExact ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerBeneathData beneathData, KerInsertData insertData ) { //// // Check if *any* exact check needed //// const int exactVertNum = beneathData._flagArr[ ExactTriCount ]; if ( 0 == exactVertNum ) { return; // No exact checks needed } //// // Do exact check //// const PredicateInfo curPredInfo = getCurThreadPredInfo( predInfo ); // Iterate stars needing exact check for ( int idx = getCurThreadIdx(); idx < exactVertNum; idx += getThreadNum() ) { const int star = beneathData._exactTriPosArr[ idx ]; // Stored by fast check makeInitialCone< false >( curPredInfo, pointData, starData, beneathData, insertData, star ); } return; } __global__ void __launch_bounds__( MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) kerMarkBeneathTrianglesFast ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerBeneathData beneathData, KerInsertData insertData, KerIntArray activeTriPosArr, KerShortArray activeTriInsNumArr, int insIdx ) { CudaAssert( ( insIdx >= 0 ) && "Invalid insertion index!" ); const PredicateInfo curThreadPredInfo = getCurThreadPredInfo( predInfo ); // Iterate active triangles for ( int idx = getCurThreadIdx(); idx < activeTriPosArr._num; idx += getThreadNum() ) { // Check if any insertion for triangle if ( activeTriInsNumArr._arr[ idx ] <= insIdx ) { continue; } // Read triangle position and status const TriPosition triPos = activeTriPosArr._arr[ idx ]; const TriPositionEx triPosEx = triPosToEx( triPos ); TriangleStatus& triStatus = starData.triStatusAt( triPosEx ); // Ignore free triangle if ( Free == triStatus ) { continue; } CudaAssert( ( ( Valid == triStatus ) || ( ValidAndUnchecked == triStatus ) || ( NewValidAndUnchecked == triStatus ) ) && "Invalid triangle status for fast-exact check!" ); //// // Get insertion point //// const int star = starData.triStarAt( triPosEx ); const int insBeg = insertData._starVertMap[ star ]; const int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; const int insLoc = insBeg + insIdx; CudaAssert( insLoc < insEnd ); //// // Check if triangle beneath point //// const Triangle tri = starData.triangleAt( triPosEx ); const int insVert = insertData._vertArr[ insLoc ]; const Orient ord = orientation4Fast( curThreadPredInfo, pointData, tri._v[0], tri._v[1], tri._v[2], star, insVert ); // Needs exact predicate if ( OrientZero == ord ) { // Embed original status in exact status triStatus = ( Valid == triStatus ) ? DoExactOnValid : DoExactOnUnchecked; //// // Store triangle position for later exact predicate // Note: Done only if (< ExactTriangleMax) triangles requiring exact check //// const int exactListIdx = atomicAdd( &beneathData._flagArr[ ExactTriCount ], 1 ); if ( exactListIdx < ExactTriangleMax ) { beneathData._exactTriPosArr[ exactListIdx ] = exToTriPos( triPosEx ); } } // Triangle is beneath insertion point else if ( OrientNeg == ord ) { beneathData._beneathTriPosArr[ star ] = exToTriPos( triPosEx ); // Store beneath triangle position triStatus = Free; } // Triangle is beyond, but created during recent insertion else if ( NewValidAndUnchecked == triStatus ) { triStatus = ValidAndUnchecked; // Set it to normal triangle } } return; } __global__ void __launch_bounds__( MAX_PRED_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) kerMarkBeneathTrianglesExact ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerBeneathData beneathData, KerInsertData insertData, KerIntArray activeTriPosArr, KerShortArray activeTriInsNumArr, int insIdx ) { CudaAssert( ( insIdx >= 0 ) && "Invalid insertion index!" ); // Check if NO exact check needed int exactTriCount = beneathData._flagArr[ ExactTriCount ]; if ( 0 == exactTriCount ) { return; } // Check if few OR all triangles need exact check const PredicateInfo curThreadPredInfo = getCurThreadPredInfo( predInfo ); const bool exactCheckAll = ( exactTriCount >= ExactTriangleMax ); if ( exactCheckAll ) { exactTriCount = activeTriPosArr._num; } // Iterate triangles for ( int idx = getCurThreadIdx(); idx < exactTriCount; idx += getThreadNum() ) { //// // Check if any insertion for triangle //// if ( exactCheckAll && ( insIdx >= activeTriInsNumArr._arr[ idx ] ) ) { continue; } //// // Check if triangle needs exact check //// const TriPosition triPos = exactCheckAll ? activeTriPosArr._arr[ idx ] : beneathData._exactTriPosArr[ idx ]; const TriPositionEx triPosEx = triPosToEx( triPos ); TriangleStatus& triStatus = starData.triStatusAt( triPosEx ); // Ignore triangle not needing exact check if ( !triNeedsExactCheck( triStatus ) ) { continue; } //// // Read insertion point //// const int star = starData.triStarAt( triPosEx ); const int insBeg = insertData._starVertMap[ star ]; const int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum; const int insLoc = insBeg + insIdx; CudaAssert( insLoc < insEnd ); //// // Check if triangle beneath point //// const int insVert = insertData._vertArr[ insLoc ]; const Triangle tri = starData.triangleAt( triPosEx ); const Orient ord = orientation4SoS( curThreadPredInfo, pointData, tri._v[0], tri._v[1], tri._v[2], star, insVert ); // Triangle beneath insertion point if ( OrientNeg == ord ) { beneathData._beneathTriPosArr[ star ] = exToTriPos( triPosEx ); // Store beneath triangle position triStatus = Free; } else { triStatus = ( DoExactOnValid == triStatus ) ? Valid : ValidAndUnchecked; // Set back to old triStatus } } return; } //// // The containment proof is star plus 4 points from link of star that encloses input point. // Returns true if exact check is needed. //// template< bool doExact > __device__ bool findStarContainmentProof ( PredicateInfo curPredInfo, KerPointData pointData, KerStarData starData, int star, // Star that encloses input point int inVert, // Input point that lies inside star int* insStarArr, int* insVertArr, int insBeg ) { const StarInfo starInfo = starData.getStarInfo( star ); //// // Pick one triangle as facet intersected by plane //// int locTriIdx = 0; for ( ; locTriIdx < starInfo._locTriNum; ++locTriIdx ) { const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPosEx ); // Ignore free triangles if ( Free != status ) break; } // Pick this valid triangle! const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx ); const Triangle& firstTri = starData.triangleAt( triPosEx ); const int exVert = firstTri._v[ 0 ]; // First proof point CudaAssert( ( locTriIdx < starInfo._locTriNum ) && "No valid triangle found!" ); //// // Iterate through triangles to find another triangle // intersected by plane of (star, inVert, exVert) //// for ( ; locTriIdx < starInfo._locTriNum; ++locTriIdx ) { // Ignore free triangles const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx ); const TriangleStatus status = starData.triStatusAt( triPosEx ); if ( Free == status ) continue; // Ignore triangle if it has exVert const Triangle tri = starData.triangleAt( triPosEx ); if ( tri.hasVertex( exVert ) ) continue; Orient ord[3]; int vi = 0; // Iterate through vertices in order for ( ; vi < 3; ++vi ) { const int planeVert = tri._v[ vi ]; const int testVert = tri._v[ ( vi + 1 ) % 3 ]; // Get order of testVert against the plane formed by (inVert, starVert, exVert, planeVert) Orient order = orientation4Fast( curPredInfo, pointData, star, inVert, exVert, planeVert, testVert ); if ( OrientZero == order ) { if ( doExact ) order = orientation4SoS( curPredInfo, pointData, star, inVert, exVert, planeVert, testVert ); else return true; } ord[ vi ] = order; // Check if orders match, they do if plane intersects facet if ( ( vi > 0 ) && ( ord[ vi - 1 ] != ord[ vi ] ) ) break; } // All the orders match, we got our proof if ( vi >= 3 ) break; } CudaAssert( ( locTriIdx < starInfo._locTriNum ) && "Could not find proof in star!" ); //// // Write proof vert insertions //// const TriPositionEx proofTriPosEx = starInfo.locToTriPosEx( locTriIdx ); const Triangle proofTri = starData.triangleAt( proofTriPosEx ); // First proof point insStarArr[ insBeg ] = ( inVert < exVert ) ? inVert : exVert; insVertArr[ insBeg ] = ( inVert < exVert ) ? exVert : inVert; // Next 3 proof points: write at i+drownedNum, i+2-drownedNum, ... for ( int vi = 0; vi < 3; ++vi ) { const int triVert = proofTri._v[ vi ]; const int insIdx = insBeg + vi + 1; insStarArr[ insIdx ] = ( inVert < triVert ) ? inVert : triVert; insVertArr[ insIdx ] = ( inVert < triVert ) ? triVert : inVert; } return false; } __global__ void kerGetProofFast ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerIntArray drownedStarArr, int* drownedVertArr, int* proofStarArr, int* proofVertArr ) { const PredicateInfo curThreadPredInfo = getCurThreadPredInfo( predInfo ); // Iterate drowned items for ( int idx = getCurThreadIdx(); idx < drownedStarArr._num; idx += getThreadNum() ) { const int star = drownedStarArr._arr[ idx ]; // Killer const int vert = drownedVertArr[ idx ]; // Killed //// // Go ahead and write the destination star of proof insertions // (so no need to write this in exact check) //// int toIdx = idx * ProofPointsPerStar; // Find proof insertions using fast check const bool needExact = findStarContainmentProof< false >( curThreadPredInfo, pointData, starData, star, vert, proofStarArr, proofVertArr, toIdx ); if ( needExact ) { // These will be picked up by exact check drownedStarArr._arr[ idx ] = flipToNeg( star ); } } return; } __global__ void __launch_bounds__( MAX_PRED_THREADS_PER_BLOCK, MIN_BLOCKS_PER_MP ) kerGetProofExact ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerIntArray drownedStarArr, int* drownedVertArr, int* proofStarArr, int* proofVertArr ) { const PredicateInfo curThreadPredInfo = getCurThreadPredInfo( predInfo ); // Iterate drowned items for ( int idx = getCurThreadIdx(); idx < drownedStarArr._num; idx += getThreadNum() ) { // Ignore items whose proof is already found by fast check const int negStar = drownedStarArr._arr[ idx ]; if ( negStar >= 0 ) continue; const int star = flipToPos( negStar ); const int vert = drownedVertArr[ idx ]; const int toIdx = ProofPointsPerStar * idx; // Write proof vertices findStarContainmentProof< true >( curThreadPredInfo, pointData, starData, star, vert, proofStarArr, proofVertArr, toIdx ); } return; } __global__ void kerMarkLowerHullTetra ( PredicateInfo predInfo, KerPointData pointData, KerStarData starData, KerIntArray tetraTriMap ) { const int tetraNum = tetraTriMap._num; // Iterate both lower- and upper-hull tetra for ( int tetIdx = getCurThreadIdx(); tetIdx < tetraNum; tetIdx += getThreadNum() ) { // Owner triangle of tetrahedron const int triIdx = tetraTriMap._arr[ tetIdx ]; const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx ); const Triangle tri = starData.triangleAt( triPosEx ); //// // Orientation of tetra // Note: Non-SoS check is enough since flat and -ve tetra are removed //// const int curStar = starData.triStarAt( triPosEx ); const Point3* ptArr = pointData._pointArr; const Point3* p[] = { &( ptArr[ tri._v[0] ] ), &( ptArr[ tri._v[1] ] ), &( ptArr[ tri._v[2] ] ), &( ptArr[ curStar ] ) }; Orient ord = shewchukOrient3D( predInfo._consts, p[0]->_p, p[1]->_p, p[2]->_p, p[3]->_p ); ord = flipOrient( ord ); // Invalidate upper-hull tetra if ( OrientPos != ord ) { tetraTriMap._arr[ tetIdx ] = -1; } } return; } __global__ void kerMakeCloneFacets ( KerStarData starData, KerIntArray tetraTriMap, int* triTetraMap, int* facetStarArr, int* facetTriArr ) { const int tetraNum = tetraTriMap._num; // Iterate tetra for ( int tetIdx = getCurThreadIdx(); tetIdx < tetraNum; tetIdx += getThreadNum() ) { // Owner triangle of tetrahedron const int triIdx = tetraTriMap._arr[ tetIdx ]; const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx ); const Triangle& tri = starData.triangleAt( triPosEx ); triTetraMap[ triIdx ] = tetIdx; // Map owner triangle to its tetra facetStarArr[ tetIdx ] = tri._v[ 0 ]; // Set facet info facetTriArr[ tetIdx ] = triIdx; // ... } return; } ////////////////////////////////////////////////////////////////////////////////
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include "PermutohedralLatticeGPU.cuh" #include "DeviceMemoryAllocator.h" #include <vector> // for kernels that are actually only implemented in single-precision // (here because of needing atomicMinf) #define AT_DISPATCH_SINGLE_FLOAT(TYPE, NAME, ...) \ [&] { \ const at::Type& the_type = TYPE; \ switch (the_type.scalarType()) { \ AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ default: \ AT_ERROR(#NAME, " not implemented for '", the_type.toString(), "'"); \ } \ }() template <typename scalar_t> __inline__ __device__ scalar_t TOME_get_point_depth(scalar_t* __restrict__ camera, scalar_t* __restrict__ win) { return camera[8]*win[0] + camera[9]*win[1] + camera[10]*win[2]+ camera[11]; } template <typename scalar_t> __inline__ __device__ bool TOME_project_point(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, int *out, int input_width, int input_height) { scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3]; scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7]; scalar_t cz = TOME_get_point_depth(camera, win); out[0] = int(cx / cz + 0.5f); out[1] = int(cy / cz + 0.5f); return (out[0] >= 0) && (out[1] >= 0) && (out[0]<input_width) && (out[1]<input_height); } template <typename scalar_t> __inline__ __device__ bool TOME_project_pointf(scalar_t* __restrict__ camera, scalar_t* __restrict__ win, scalar_t* __restrict__ out, int input_width, int input_height) { scalar_t cx = camera[0]*win[0] + camera[1]*win[1] + camera[2]*win[2] + camera[3]; scalar_t cy = camera[4]*win[0] + camera[5]*win[1] + camera[6]*win[2] + camera[7]; scalar_t cz = TOME_get_point_depth(camera, win); out[0] = cx / cz; out[1] = cy / cz; return (out[0] >= 0) && (out[1] >= 0) && (out[0]<=input_width-1.0f) && (out[1]<=input_height-1.0f); } template <typename scalar_t> __inline__ __device__ void TOME_unproject_point(scalar_t* __restrict__ camloc, scalar_t* __restrict__ invKR, int u, int v, scalar_t z, scalar_t* __restrict__ out) { out[0] = camloc[0] + (invKR[0] * u + invKR[1] * v + invKR[2]) * z; out[1] = camloc[1] + (invKR[3] * u + invKR[4] * v + invKR[5]) * z; out[2] = camloc[2] + (invKR[6] * u + invKR[7] * v + invKR[8]) * z; } __device__ static float TOME_atomicMinf(float* addr, float val) { float old; old = (val >= 0) ? __int_as_float(atomicMin((int *)addr, __float_as_int(val))) : __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(val))); return old; } // input depth: BxHxW depth tensor // output depth: BxKxHxW depth tensor // cameras: BxKx3x4 tensor (receiving cameras) // invKRs: Bx3x3 tensor (central camera) // camlocs: Bx3x1 tensor (central camera) template <typename scalar_t> __global__ void depth_reprojection_cuda_kernel( scalar_t* __restrict__ input, scalar_t* __restrict__ output, scalar_t* __restrict__ cameras, scalar_t* __restrict__ invKRs, scalar_t* __restrict__ camlocs, int B, int K, int inH, int inW, int outH, int outW) { int proj[2]; scalar_t wloc[3]; for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) { scalar_t* camloc = camlocs + b * 3; scalar_t* invKR = invKRs + b * 9; for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) { for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) { // cast this point into space scalar_t depth = input[b * inH * inW + h * inW + w]; if(depth > 0) { for (int k = 0; k < K; k++) { scalar_t* camera = cameras + b * K * 12 + k * 12; TOME_unproject_point(camloc, invKR, w, h, depth, wloc); // project it onto the first camera again if(TOME_project_point(camera, wloc, proj, outW, outH)) { TOME_atomicMinf( output + b * K * outH * outW + k * outH * outW + proj[1] * outW + proj[0], TOME_get_point_depth(camera, wloc) ); } } } } } } } at::Tensor depth_reprojection_cuda( at::Tensor input_depth, at::Tensor cameras, at::Tensor invKR, at::Tensor camloc, int outH, int outW) { auto blkdim = 16; const auto B = cameras.size(0); const auto K = cameras.size(1); const auto inH = input_depth.size(1); const auto inW = input_depth.size(2); const dim3 block = dim3(1, blkdim, blkdim); const dim3 grid = dim3(1, 8, 8); auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type()); auto sentinel = 1e9; output_depth.fill_(sentinel); if(input_depth.type().scalarType() == at::ScalarType::Float) { depth_reprojection_cuda_kernel<float><<<grid, block>>>( input_depth.data<float>(), output_depth.data<float>(), cameras.data<float>(), invKR.data<float>(), camloc.data<float>(), B, K, inH, inW, outH, outW); } else{ AT_ERROR("depth_reprojection_cuda not implemented for '", input_depth.type().toString(), "'"); } output_depth.fmod_(sentinel); return output_depth; } // input depth: BxinHxinW depth tensor // output depth: BxKxoutHxoutW depth tensor // cameras: Bx3x4 tensor (central camera) // invKRs: BxKx3x3 tensor (receiving cameras) // camlocs: BxKx3x1 tensor (receiving cameras) template <typename scalar_t> __global__ void depth_reprojection_bound_cuda_kernel( scalar_t *input, scalar_t *output, scalar_t *cameras, scalar_t *invKRs, scalar_t *camlocs, int B, int K, int inH, int inW, int outH, int outW, scalar_t dmin, scalar_t dmax, scalar_t dstep) { int proj[2]; scalar_t wloc[3]; for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) { for (int k = 0; k < K; k++) { scalar_t* camloc = camlocs + b * K * 3 + k * 3; scalar_t* invKR = invKRs + b * K * 9 + k * 9; scalar_t *camera = cameras + b * 12; for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < outH; h += blockDim.y * gridDim.y) { for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < outW; w += blockDim.z * gridDim.z) { // cast this point into space at increasingly large depths (from camera 0) // the first depth at which it is invisible in view n (i.e. lies behind its depth map) // that is the lowest permissible depth for this pixel according to that view // for very sharp depth edges, this results in an interpolation of the depth map // for aliased reprojections, this results in a filling of the holes // bool projected_in = false; scalar_t dhyp = dmin; for (; dhyp <= dmax; dhyp += dstep) { TOME_unproject_point(camloc, invKR, w, h, dhyp, wloc); // project it onto the first camera again if(TOME_project_point(camera, wloc, proj, inW, inH)) { // projected_in = true; scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc); scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]]; if(dhyp_depth_n > depth_n && depth_n > 0) { break; } } // else if (projected_in) { // // just give up -- no value here is acceptable // // dhyp = dmax; // break; // } } if(dhyp < dmax) { // refine the estimate scalar_t ndhyp = dhyp; for (; ndhyp >= dhyp - dstep; ndhyp -= dstep/10) { TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc); // project it onto the first camera again if(TOME_project_point(camera, wloc, proj, inW, inH)) { scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc); scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]]; if(dhyp_depth_n < depth_n) { break; } } else { break; } } dhyp = ndhyp; for (; ndhyp < dhyp + dstep/10; ndhyp += dstep/50) { TOME_unproject_point(camloc, invKR, w, h, ndhyp, wloc); // project it onto the first camera again if(TOME_project_point(camera, wloc, proj, inW, inH)) { scalar_t dhyp_depth_n = TOME_get_point_depth(camera, wloc); scalar_t depth_n = input[b * inH * inW + proj[1] * inW + proj[0]]; if(dhyp_depth_n > depth_n && depth_n > 0) { break; } } else { break; } } dhyp = ndhyp; } else { dhyp = 0.0f; } output[b * K * outH * outW + k * outH * outW + h * outW + w] = dhyp; } } } } } at::Tensor depth_reprojection_bound_cuda( at::Tensor input_depth, at::Tensor cameras, at::Tensor invKR, at::Tensor camloc, int outH, int outW, float dmin, float dmax, float dstep) { auto blkdim = 16; const auto B = invKR.size(0); const auto K = invKR.size(1); const auto inH = input_depth.size(1); const auto inW = input_depth.size(2); const dim3 block = dim3(1, blkdim, blkdim); const dim3 grid = dim3(1, 8, 8); auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type()); auto sentinel = 1e9; output_depth.fill_(sentinel); if(input_depth.type().scalarType() == at::ScalarType::Float) { depth_reprojection_bound_cuda_kernel<float><<<grid, block>>>( input_depth.data<float>(), output_depth.data<float>(), cameras.data<float>(), invKR.data<float>(), camloc.data<float>(), B, K, inH, inW, outH, outW, dmin, dmax, dstep); } else{ AT_ERROR("depth_reprojection_bound_cuda_kernel not implemented for '", input_depth.type().toString(), "'"); } output_depth.fmod_(sentinel); return output_depth; } // input depth: BxHxW depth tensor // output depth: BxKxHxW depth tensor // cameras: BxKx3x4 tensor (receiving cameras) // invKRs: Bx3x3 tensor (central camera) // camlocs: Bx3x1 tensor (central camera) template <typename scalar_t> __global__ void depth_reprojection_splat_cuda_kernel( scalar_t* __restrict__ input, scalar_t* __restrict__ output_depth, scalar_t* __restrict__ output_weights, scalar_t* __restrict__ cameras, scalar_t* __restrict__ invKRs, scalar_t* __restrict__ camlocs, scalar_t radius, scalar_t depth_scale, int B, int K, int inH, int inW, int outH, int outW) { scalar_t proj[2]; scalar_t wloc[3]; // twice the stddev: 95% of the mass int iradius = int(ceil(2*radius)); scalar_t expdiv = radius>0?2*radius*radius:1.0; for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) { scalar_t* camloc = camlocs + b * 3; scalar_t* invKR = invKRs + b * 9; for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) { for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) { // cast this point into space scalar_t depth = input[b * inH * inW + h * inW + w]; if(depth > 0) { TOME_unproject_point(camloc, invKR, w, h, depth, wloc); for (int k = 0; k < K; k++) { scalar_t* camera = cameras + b * K * 12 + k * 12; TOME_project_pointf(camera, wloc, proj, outW, outH); scalar_t depth_k = TOME_get_point_depth(camera, wloc); int px = int(floor(proj[0]+0.5f)); int py = int(floor(proj[1]+0.5f)); for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) { for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) { scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]); // mass: what fraction of the blob in this pixel scalar_t mass_k = exp(-dist_k / expdiv); // weight: softmaxing depth in this pixel scalar_t weight_k = exp(-depth_k / depth_scale); atomicAdd( output_depth + b * K * outH * outW + k * outH * outW + yk * outW + xk, depth_k * mass_k * weight_k ); atomicAdd( output_weights + b * K * outH * outW + k * outH * outW + yk * outW + xk, mass_k * weight_k ); } } } } } } } } template <typename scalar_t> __global__ void depth_reprojection_splat_visibilities_cuda_kernel( scalar_t* __restrict__ input, scalar_t* __restrict__ output_depth, scalar_t* __restrict__ output_visibilities, scalar_t* __restrict__ cameras, scalar_t* __restrict__ invKRs, scalar_t* __restrict__ camlocs, scalar_t radius, scalar_t depth_scale, int B, int K, int inH, int inW, int outH, int outW) { scalar_t proj[2]; scalar_t wloc[3]; // twice the stddev: 95% of the mass int iradius = int(ceil(2*radius)); scalar_t expdiv = radius>0?2*radius*radius:1.0; for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < B; b += blockDim.x * gridDim.x) { scalar_t* camloc = camlocs + b * 3; scalar_t* invKR = invKRs + b * 9; for (int h = blockIdx.y * blockDim.y + threadIdx.y; h < inH; h += blockDim.y * gridDim.y) { for (int w = blockIdx.z * blockDim.z + threadIdx.z; w < inW; w += blockDim.z * gridDim.z) { // cast this point into space scalar_t depth = input[b * inH * inW + h * inW + w]; if(depth > 0) { TOME_unproject_point(camloc, invKR, w, h, depth, wloc); for (int k = 0; k < K; k++) { scalar_t* camera = cameras + b * K * 12 + k * 12; TOME_project_pointf(camera, wloc, proj, outW, outH); scalar_t depth_k = TOME_get_point_depth(camera, wloc); scalar_t visiblemass_sum = 0; scalar_t mass_sum = 0; int px = int(floor(proj[0]+0.5f)); int py = int(floor(proj[1]+0.5f)); for(int xk = max(0, px - iradius); xk <= min(px + iradius, outW-1); xk++) { for(int yk = max(0, py - iradius); yk <= min(py + iradius, outH-1); yk++) { scalar_t dist_k = (xk-proj[0])*(xk-proj[0]) + (yk-proj[1])*(yk-proj[1]); // mass: what fraction of the blob in this pixel scalar_t mass_k = exp(-dist_k / expdiv); scalar_t zbuffer_k = output_depth[b * K * outH * outW + k * outH * outW + yk * outW + xk]; // weight: softmaxing depth in this pixel scalar_t visibility_k = exp((zbuffer_k - depth_k) / depth_scale); visibility_k = min(visibility_k, 1.0); visiblemass_sum += mass_k * visibility_k; mass_sum += mass_k; } } if(mass_sum > 0) { output_visibilities[ b * K * inH * inW + k * inH * inW + h * inW + w ] = visiblemass_sum / mass_sum; } } } } } } } std::vector<at::Tensor> depth_reprojection_splat_cuda( at::Tensor input_depth, at::Tensor cameras, at::Tensor invKR, at::Tensor camloc, float radius, float zbuffer_scale, float visibility_scale, int outH, int outW) { auto blkdim = 16; const auto B = cameras.size(0); const auto K = cameras.size(1); const auto inH = input_depth.size(1); const auto inW = input_depth.size(2); const dim3 block = dim3(1, blkdim, blkdim); const dim3 grid = dim3(1, 8, 8); auto output_depth = at::zeros({B, K, outH, outW}, input_depth.type()); auto output_weights = at::zeros({B, K, outH, outW}, input_depth.type()); auto output_visibilities = at::zeros({B, K, inH, inW}, input_depth.type()); if(input_depth.type().scalarType() == at::ScalarType::Float) { depth_reprojection_splat_cuda_kernel<float><<<grid, block>>>( input_depth.data<float>(), output_depth.data<float>(), output_weights.data<float>(), cameras.data<float>(), invKR.data<float>(), camloc.data<float>(), radius, zbuffer_scale, B, K, inH, inW, outH, outW); output_depth.div_(output_weights); depth_reprojection_splat_visibilities_cuda_kernel<float><<<grid, block>>>( input_depth.data<float>(), output_depth.data<float>(), output_visibilities.data<float>(), cameras.data<float>(), invKR.data<float>(), camloc.data<float>(), radius, visibility_scale, B, K, inH, inW, outH, outW); } else{ AT_ERROR("depth_reprojection_splat_cuda not implemented for '", input_depth.type().toString(), "'"); } return {output_depth, output_weights, output_visibilities}; } at::Tensor permutohedral_filter_cuda( at::Tensor input, at::Tensor positions, at::Tensor weights, bool reverse ) { auto blkdim = 16; const auto H = input.size(0); const auto W = input.size(1); const auto num_pixels = H*W; const dim3 block = dim3(1, blkdim, blkdim); const dim3 grid = dim3(1, 8, 8); const auto pd = positions.size(2); const auto id = input.size(2); auto output = at::zeros({H, W, id}, input.type()); auto allocator = DeviceMemoryAllocator(); if(input.type().scalarType() == at::ScalarType::Float) { if(pd == 5 && id == 3) { auto lattice = PermutohedralLatticeGPU<float, 5, 4>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 2 && id == 3) { auto lattice = PermutohedralLatticeGPU<float, 2, 4>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 2 && id == 2) { auto lattice = PermutohedralLatticeGPU<float, 2, 3>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 2 && id == 1) { auto lattice = PermutohedralLatticeGPU<float, 2, 2>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 3 && id == 1) { auto lattice = PermutohedralLatticeGPU<float, 3, 2>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 3 && id == 2) { auto lattice = PermutohedralLatticeGPU<float, 3, 3>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 3 && id == 3) { auto lattice = PermutohedralLatticeGPU<float, 3, 4>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 2) { auto lattice = PermutohedralLatticeGPU<float, 6, 3>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 3) { auto lattice = PermutohedralLatticeGPU<float, 6, 4>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 4) { auto lattice = PermutohedralLatticeGPU<float, 6, 5>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 5) { auto lattice = PermutohedralLatticeGPU<float, 6, 6>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 6) { auto lattice = PermutohedralLatticeGPU<float, 6, 7>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 7) { auto lattice = PermutohedralLatticeGPU<float, 6, 8>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else if(pd == 6 && id == 8) { auto lattice = PermutohedralLatticeGPU<float, 6, 9>(num_pixels, &allocator); lattice.filter( output.data<float>(), input.data<float>(), positions.data<float>(), weights.data<float>(), reverse ); } else{ AT_ASSERTM(false, "permutohedral filter: this (pd,id) is not present in the compiled binary"); } } else{ AT_ERROR("permutohedral_filter_cuda not implemented for '", input.type().toString(), "'"); } return output; }
the_stack
struct SoftmaxDPInitParams { DnnHandle handle; int batchSize; bool profiling; }; Tensor RnnModel::add_softmaxDP_node(Tensor logit, Tensor label, ParallelConfig pc) { assert(logit.numDim == 3); assert(logit.adim[2] == LSTM_PER_NODE_LENGTH); assert(logit.pdim[2] == LSTM_PER_NODE_LENGTH); SoftmaxDP* node = new SoftmaxDP(config, logit, label, pc); layers.push_back(node); return node->outputs[0]; } SoftmaxDP::SoftmaxDP(RnnConfig config, Tensor logit, Tensor _label, ParallelConfig pc) : RnnOp(logit, pc, SharedVariable::NO_VARIABLE), label(_label) { Context ctx = config.lg_ctx; Runtime* runtime = config.lg_hlr; assert(pc.nDims == 1); int num_par_n = pc.dim[0]; { Rect<1> rect(Point<1>(0), Point<1>(num_par_n-1)); part_rect = rect; } IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect); int batch_size = logit.adim[1]; int output_size = logit.adim[0]; FieldSpace fs = config.field_space; Rect<3, coord_t> y_rect(Point<3>(0, 0, 0), Point<3>(output_size-1, batch_size-1, LSTM_PER_NODE_LENGTH-1)); IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect); LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs); LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs); assert(batch_size % num_par_n == 0); int extent_n = batch_size / num_par_n; Rect<3, coord_t> extent(Point<3>(0, 0, 0), Point<3>(output_size-1, extent_n-1, LSTM_PER_NODE_LENGTH-1)); Transform<3, 1, coord_t> trans; trans[0][0] = 0; trans[1][0] = extent_n; trans[2][0] = 0; IndexPartition y_ip = runtime->create_partition_by_restriction(ctx, y_is, part_is, trans, extent); assert(runtime->is_index_partition_disjoint(ctx, y_ip)); assert(runtime->is_index_partition_complete(ctx, y_ip)); LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip); LogicalPartition y_grad_lp = runtime->get_logical_partition(ctx, y_grad_lr, y_ip); outputs[0].numDim = 3; outputs[0].adim[0] = output_size; outputs[0].adim[1] = batch_size; outputs[0].adim[2] = LSTM_PER_NODE_LENGTH; outputs[0].pdim[0] = output_size; outputs[0].pdim[1] = extent_n; outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH; outputs[0].region = y_lr; outputs[0].partition = y_lp; outputs[0].region_grad = y_grad_lr; outputs[0].partition_grad = y_grad_lp; // Every partition reads all input_channels // Use the same partitioning as outputs //if (inputs[0].pdim[0] == outputs[0].pdim[0] // && inputs[0].pdim[1] == outputs[0].pdim[1]) { // logit_lp = inputs[0].partition; // logit_grad_lp = inputs[0].partition_grad; //} else { IndexSpaceT<3> logit_is(inputs[0].region.get_index_space()); IndexPartition logit_ip = runtime->create_partition_by_restriction(ctx, logit_is, part_is, trans, extent); logit_lp = runtime->get_logical_partition(ctx, inputs[0].region, logit_ip); logit_grad_lp = runtime->get_logical_partition(ctx, inputs[0].region_grad, logit_ip); //} } /* regions[0](I): x regions[1](O): y */ OpMeta* SoftmaxDP::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); const SoftmaxDPInitParams* softmaxDP = (SoftmaxDPInitParams*) task->args; const AccessorRO<float, 3> acc_x(regions[0], FID_DATA); const AccessorWO<float, 3> acc_y(regions[1], FID_DATA); Rect<3> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); SoftmaxDPMeta* m = new SoftmaxDPMeta(softmaxDP->handle); m->profiling_runtime = softmaxDP->profiling; m->batchSize = softmaxDP->batchSize; #ifndef DISABLE_COMPUTATION checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor)); assert(rect_x == rect_y); int input_c = rect_x.hi[0] - rect_x.lo[0] + 1; int input_n = (rect_x.hi[1] - rect_x.lo[1] + 1) * LSTM_PER_NODE_LENGTH; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, 1, 1)); #endif return m; } void SoftmaxDP::init(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { SoftmaxDPInitParams initParams; initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]]; initParams.batchSize = model.config.batchSize; initParams.profiling = false; TaskLauncher launcher(RNN_SOFTMAXDP_INIT_TASK_ID, TaskArgument(&initParams, sizeof(initParams)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); meta[idx] = f.get_result<OpMeta*>(); } } /* regions[0](I): x regions[1](O): y */ void SoftmaxDP::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 2); assert(task->regions.size() == 2); float alpha = 1.0f, beta = 0.0f; const SoftmaxDPMeta* m = *((SoftmaxDPMeta**) task->args); const AccessorRO<float, 3> acc_x(regions[0], FID_DATA); const AccessorWO<float, 3> acc_y(regions[1], FID_DATA); Rect<3> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); const float *x_ptr = acc_x.ptr(rect_x.lo); float *y_ptr = acc_y.ptr(rect_y.lo); cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); checkCUDNN(cudnnSoftmaxForward(m->handle.dnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, m->inputTensor, x_ptr, &beta, m->inputTensor, y_ptr)); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("SoftmaxDP forward time = %.2fms\n", elapsed); } #ifdef PRINT_INTERMEDIATE_RESULT print_tensor<3, float>(y_ptr, rect_y, "softmax"); #endif #endif } void SoftmaxDP::forward(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(RNN_SOFTMAXDP_FWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(logit_lp, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); } runtime->execute_task(ctx, launcher); } } __global__ void SoftmaxLossBackprop(float *input, const int *label, int vocab_size, int batch_size) { CUDA_KERNEL_LOOP(i, batch_size) { int label_idx = label[i]; input[i * vocab_size + label_idx] -= 1.0f; } } /* regions[0](O): x_grad regions[1](I): y regions[2](I): labels */ void SoftmaxDP::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 3); assert(task->regions.size() == 3); const SoftmaxDPMeta* m = *((SoftmaxDPMeta**) task->args); const AccessorWO<float, 3> acc_x_grad(regions[0], FID_DATA); const AccessorRO<float, 3> acc_y(regions[1], FID_DATA); const AccessorRO<int, 2> acc_label(regions[2], FID_DATA); Rect<3> rect_x_grad = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<2> rect_label = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(acc_x_grad.accessor.is_dense_arbitrary(rect_x_grad)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); assert(acc_label.accessor.is_dense_arbitrary(rect_label)); float *x_grad_ptr = acc_x_grad.ptr(rect_x_grad.lo); const float *y_ptr = acc_y.ptr(rect_y.lo); const int *label_ptr = acc_label.ptr(rect_label.lo); assert(rect_x_grad == rect_y); assert(rect_y.hi[1] - rect_y.lo[1] == rect_label.hi[0] - rect_label.lo[0]); assert(rect_y.hi[2] - rect_y.lo[2] == rect_label.hi[1] - rect_label.lo[1]); int num_labels = rect_label.volume(); int vocab_size = rect_y.hi[0] - rect_y.lo[0] + 1; cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } checkCUDA(cudaMemcpyAsync(x_grad_ptr, y_ptr, rect_x_grad.volume() * sizeof(float), cudaMemcpyDeviceToDevice)); SoftmaxLossBackprop<<<GET_BLOCKS(num_labels), CUDA_NUM_THREADS>>>( x_grad_ptr, label_ptr, vocab_size, num_labels); // Accouting for batch size in SGD float scalVal = 1.0f / static_cast<float>(m->batchSize); scale_kernel<<<GET_BLOCKS(rect_x_grad.volume()), CUDA_NUM_THREADS>>>( x_grad_ptr, rect_x_grad.volume(), 0.0f, scalVal); //checkCUDA(cublasSscal(m->handle.blas, rect_x_grad.volume(), // &scalVal, x_grad_ptr, 1)); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Softmax backward time = %.2fms\n", elapsed); } #ifdef PRINT_INTERMEDIATE_RESULT print_tensor<3, float>(x_grad_ptr, rect_x_grad, "softmax bwd:x_grad"); float* host_ptr; checkCUDA(cudaHostAlloc(&host_ptr, sizeof(float) * rect_x_grad.volume(), cudaHostAllocPortable | cudaHostAllocMapped)); checkCUDA(cudaMemcpy(host_ptr, x_grad_ptr, sizeof(float) * rect_x_grad.volume(), cudaMemcpyDeviceToHost)); int idx = 0; float loss = 0.0f; for (PointInRectIterator<3> it(rect_x_grad); it(); it++, idx++) { if (host_ptr[idx] < 0) loss += -std::log(host_ptr[idx]+1); } printf("lost = %.4lf\n", loss); checkCUDA(cudaFreeHost(host_ptr)); #endif #endif } void SoftmaxDP::backward(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(RNN_SOFTMAXDP_BWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(logit_grad_lp, dp); launcher.add_region_requirement( RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); } { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); } { LogicalRegion l = runtime->get_logical_subregion_by_color(label.partition, dp); launcher.add_region_requirement( RegionRequirement(l, READ_ONLY, EXCLUSIVE, label.region)); launcher.add_field(2, FID_DATA); } runtime->execute_task(ctx, launcher); } } void SoftmaxDP::update(const RnnModel& model) {}
the_stack
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-25 */ #include "../../XTensor.h" #include "../../XDevice.h" #include "../../XUtility.h" #include "Spread.cuh" #include "CopyValues.h" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* This is core assignment for spread function. >> sData - the data pointer of the source tensor >> cData - the data pointer of collection tensor >> blockNum - the number of data blocks >> blockSizeSrc - the size of source data block >> blockSizeColl - the size of source data block >> stride - the stride of a data block */ __global__ void KernelSpread(DTYPE * sData, DTYPE * cData, int blockNum, int blockSizeSrc, int blockSizeColl, int stride) { /* block id */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* offset in each block */ int j = blockDim.y * blockIdx.y + threadIdx.y; if(i >= blockNum || j >= stride) return; DTYPE * s = sData + blockSizeSrc * i; DTYPE * c = cData + blockSizeColl * i; s[j] = c[j]; } /* This is core assignment for spread function. >> sData - the data pointer of the source tensor >> cData - the data pointer of collection tensor >> blockNum - number of data blocks >> blockSizeSrc - size of source data block >> blockSizeColl - size of source data block >> stride - stride of a data block >> subtensorNum - number of sub-tensors >> srcIndex - index of the source sub-tensor >> colIndex - index of the sub-tensor in the collection tensor */ __global__ void KernelSpreadFuzed(DTYPE * sData, DTYPE * cData, int blockNum, int blockSizeSrc, int blockSizeColl, int stride, int subtensorNum, int * srcIndex, int * colIndex) { __shared__ DTYPE * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE * cp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* block id */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* offset in each block */ int offset = blockDim.y * blockIdx.y + threadIdx.y; int blockId = i % blockNum; int subtensorId = i / blockNum; if(subtensorId >= subtensorNum || offset >= stride) return; if(threadIdx.y == 0){ sp[threadIdx.x] = sData + srcIndex[subtensorId] * stride; cp[threadIdx.x] = cData + colIndex[subtensorId] * stride; } __syncthreads(); DTYPE * s = sp[threadIdx.x] + blockSizeSrc * blockId; DTYPE * c = cp[threadIdx.x] + blockSizeColl * blockId; s[offset] = c[offset]; } /* spread a collection tensor to source tensor (cuda version). This is a inverse operation compared to gather. >> source - the source tensor whose data would be modified >> collection - the collection whose data would be spread to source tensor >> dim - the leading dimension to define "sub-tensors" e.g., for a tensor of size (3, 2, 4) and dim = 2, we have 4 sub-tensors of size (3, 2) >> srcIndex - index of the source sub-tensors >> indexSize - length of srcIndex (and collIndex) >> collIndex - index of the gathered sub-tensors */ void _CudaSpread(XTensor * source, XTensor * collection, int dim, int * srcIndex, int indexSize, int * collIndex) { int order = source->order; CheckNTErrors(source->dataType == DEFAULT_DTYPE, "TODO!"); CheckNTErrors(dim >= 0 && dim < order, "Illegal dimension!"); int blockSizeSrc = 1; int blockSizeColl = 1; int blockNum = 1; int stride = 1; for (int i = dim + 1; i < order; i++) { stride *= source->GetDim(i); } blockSizeSrc = stride * source->GetDim(dim); blockSizeColl = stride * collection->GetDim(dim); blockNum = source->unitNum / blockSizeSrc; int cudaGrids[3]; int cudaBlocks[3]; GDevs.GetCudaThread2D(source->devID, blockNum, stride, MAX_INT, cudaGrids, cudaBlocks); dim3 blocks(cudaGrids[0], cudaGrids[1]); dim3 threads(cudaBlocks[0], cudaBlocks[1]); int devIDBackup; ProtectCudaDev(source->devID, devIDBackup); if(indexSize < 4){ GDevs.GetCudaThread2D(source->devID, blockNum, stride, MAX_INT, cudaGrids, cudaBlocks); dim3 blocks(cudaGrids[0], cudaGrids[1]); dim3 threads(cudaBlocks[0], cudaBlocks[1]); DTYPE * sData = (DTYPE*)source->data; DTYPE * cData = (DTYPE*)collection->data; for(int i = 0; i < indexSize; i++) { int src = srcIndex[i]; int tgt = collIndex[i]; DTYPE * s = sData + src * stride; DTYPE * c = cData + tgt * stride; KernelSpread<<<blocks, threads >>>(s, c, blockNum, blockSizeSrc, blockSizeColl, stride); } } else{ GDevs.GetCudaThread2D(source->devID, blockNum * indexSize, stride, MAX_INT, cudaGrids, cudaBlocks); dim3 blocks(cudaGrids[0], cudaGrids[1]); dim3 threads(cudaBlocks[0], cudaBlocks[1]); DTYPE * s = (DTYPE*)source->data; DTYPE * c = (DTYPE*)collection->data; XMem * mem = source->mem; int * si = mem != NULL ? (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize * 2) : (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize * 2); int * ci = si + indexSize; XMemCopy(si, mem->devID, srcIndex, -1, sizeof(int) * indexSize); XMemCopy(ci, mem->devID, collIndex, -1, sizeof(int) * indexSize); KernelSpreadFuzed<<<blocks, threads >>>(s, c, blockNum, blockSizeSrc, blockSizeColl, stride, indexSize, si, ci); if(mem != NULL) mem->ReleaseBuf(mem->devID, sizeof(int) * indexSize * 2); else XMemFree(mem->devID, si); } BacktoCudaDev(source->devID, devIDBackup); } /* spread a collection tensor to source tensor (kernel version). And this is a special spread function for backward computation of CopyIndexed function. >> sData - the data pointer of the source tensor >> cData - the data pointer of collection tensor >> sIndex - index of the source sub-tensor >> cIndex - index of the sub-tensor in the collection tensor >> blockNum - number of data blocks >> blockSizeSrc - size of source data block >> blockSizeColl - size of source data block >> stride - stride of a data block >> indexSize - number of indexs >> copyNum - number of the sub-tensors we copy for each source index */ __global__ void KernelSpreadForCopyIndexed(DTYPE * sData, DTYPE * cData, int * sIndex, int * cIndex, int blockNum, int blockSizeSrc, int blockSizeColl, int stride, int indexSize, int copyNum) { __shared__ DTYPE * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE * cp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* block id */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* offset in each block */ int offset = blockDim.y * blockIdx.y + threadIdx.y; int realIndexSize = indexSize * copyNum; int realBlockNum = i / realIndexSize; int tmp = i % realIndexSize; int realIndex = tmp / copyNum; int realCopyNum = tmp % copyNum; if (realBlockNum >= blockNum || offset >= stride || realIndex >= indexSize || realCopyNum >= copyNum) return; //if(i >= blockNum * indexSize * copyNum || offset >= stride) // return; int realSrcIndex = sIndex[realIndex] + realCopyNum; int realCollIndex = cIndex[realIndex] + realCopyNum; //int realSrcIndex = sIndex[realIndex / copyNum] + realIndex % copyNum; //int realCollIndex = cIndex[realIndex / copyNum] + realIndex % copyNum; if(threadIdx.y == 0){ sp[threadIdx.x] = sData + realBlockNum * blockSizeSrc + realSrcIndex * stride; cp[threadIdx.x] = cData + realBlockNum * blockSizeColl + realCollIndex * stride; } __syncthreads(); DTYPE * s = sp[threadIdx.x]; DTYPE * c = cp[threadIdx.x]; atomicAdd(s + offset, c[offset]); } /* spread a collection tensor to source tensor. And this is a special spread function for backward computation of CopyIndexed function. >> s - the source tensor whose data would be modified >> c - the collection whose data would be spread to source tensor >> dim - the leading dimension to define "sub-tensors" e.g., for a tensor of size (3, 2, 4) and dim = 2, we have 4 sub-tensors of size (3, 2) >> srcIndex - the tensor to save the index of the source sub-tensors >> collIndex - the tensor to save the index of the collection sub-tensors >> copyNum - number of the sub-tensors we copy for each source index, e.g., for srcIndex = [1,4] and copyNum = 2, we actually copy the source sub-tensors 1, 2, 4, 5 */ void _CudaSpreadForCopyIndexed(XTensor * s, XTensor * c, int dim, XTensor * srcIndex, XTensor * collIndex, int copyNum) { int devID = s->devID; int order = s->order; int indexSize = srcIndex->unitNum; int blockNum = 1; int stride = 1; int blockSizeSrc = 1; int blockSizeTgt = 1; for (int i = 0; i < dim; i++) blockNum *= s->GetDim(i); for (int i = dim + 1; i < order; i++) stride *= s->GetDim(i); blockSizeSrc = stride * s->GetDim(dim); blockSizeTgt = stride * c->GetDim(dim); int cudaGrids[3]; int cudaBlocks[3]; int devIDBackup; ProtectCudaDev(devID, devIDBackup); GDevs.GetCudaThread2D(devID, blockNum * indexSize * copyNum, stride, MAX_INT, cudaGrids, cudaBlocks); dim3 blocks(cudaGrids[0], cudaGrids[1]); dim3 threads(cudaBlocks[0], cudaBlocks[1]); DTYPE * sData = (DTYPE*)s->data; DTYPE * cData = (DTYPE*)c->data; int * sIndex = (int *)srcIndex->data; int * cIndex = (int *)collIndex->data; KernelSpreadForCopyIndexed<<<blocks, threads >>>(sData, cData, sIndex, cIndex, blockNum, blockSizeSrc, blockSizeTgt, stride, indexSize, copyNum); BacktoCudaDev(devID, devIDBackup); } /* This is core assignment for backward computation of gather function. Care of the operator "+=" instead of "=". >> sData - the data pointer of the source tensor >> cData - the data pointer of collection tensor >> srcIndex - index of the source sub-tensor >> indexSize - the number of index >> stride - stride of a data block */ template <class T> __global__ void KernelSpreadForGather(T * sData, T * cData, int * srcIndex, int indexSize, int stride) { __shared__ T * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T * cp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* block id */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* offset in each block */ int offset = blockDim.y * blockIdx.y + threadIdx.y; if(i >= indexSize || offset >= stride) return; if (threadIdx.y == 0) { sp[threadIdx.x] = sData + srcIndex[i] * stride; cp[threadIdx.x] = cData + i * stride; } __syncthreads(); T * s = sp[threadIdx.x]; T * c = cp[threadIdx.x]; //DTYPE * s = sData + srcIndex[i] * stride; //DTYPE * c = cData + i * stride; atomicAdd(s + offset, c[offset]); } /* spread a collection tensor to source tensor (cuda version). And this is a special spread function for backward computation of gather function. >> source - the source tensor whose data would be modified >> collection - the collection whose data would be spread to source tensor >> srcIndex - index of the source sub-tensors */ void _CudaSpreadForGather(XTensor * source, XTensor * collection, XTensor * srcIndex) { int devID = source->devID; XMem * mem = source->mem; int stride = source->GetDim(1); int indexSize = srcIndex->unitNum; int cudaGrids[3]; int cudaBlocks[3]; int devIDBackup; ProtectCudaDev(source->devID, devIDBackup); int * sIndex = NULL; GDevs.GetCudaThread2D(devID, indexSize, stride, MAX_INT, cudaGrids, cudaBlocks); dim3 blocks(cudaGrids[0], cudaGrids[1]); dim3 threads(cudaBlocks[0], cudaBlocks[1]); if (srcIndex->devID < 0) { sIndex = mem != NULL ? (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) : (int*)XMemAlloc(devID, sizeof(int) * indexSize); XMemCopy(sIndex, devID, srcIndex->data, -1, sizeof(int) * indexSize); } else sIndex = (int *)srcIndex->data; if (source->dataType == DEFAULT_DTYPE && collection->dataType == DEFAULT_DTYPE) { DTYPE * sData = (DTYPE*)source->data; DTYPE * cData = (DTYPE*)collection->data; KernelSpreadForGather<DTYPE> << <blocks, threads >> >(sData, cData, sIndex, indexSize, stride); } else if (source->dataType == X_FLOAT16 && collection->dataType == X_FLOAT16) { #ifdef HALF_PRECISION __half2 * sData = (__half2*)source->data; __half2 * cData = (__half2*)collection->data; KernelSpreadForGather<__half2> << <blocks, threads >> >(sData, cData, sIndex, indexSize, stride); #else ShowNTErrors("Recompile the code with HALF_PRECISION!"); #endif } else { ShowNTErrors("Unsupported dataType!"); } if (srcIndex->devID < 0) { if(mem != NULL) mem->ReleaseBuf(mem->devID, sizeof(int) * indexSize); else XMemFree(devID, sIndex); } BacktoCudaDev(source->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
the_stack
#pragma once #include "cuda/Complex.cuh" #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/MemoryAccess.cuh" #include "cuda/fbfft/Twiddles.cuh" #include <cuda_runtime.h> #include <glog/logging.h> using namespace facebook::cuda; namespace facebook { namespace cuda { namespace fbfft { template <typename T> __device__ __host__ T numHermitian(T commonCols) { return commonCols / 2 + 1; } namespace detail { __device__ inline unsigned int reverse(unsigned int x, unsigned int nbits) { return __brev(x) >> (WARP_SIZE - nbits); } // This adjustment modulo FFTSize is used as a stepping stone to cram multiple // FFTs of size < WARP_SIZE into a single warp. // The invariant is: // assert(FFTPerWarp * FFTSize == blockDim.x || FFTPerWarp == 1); // This has no effect if FFTSize >= WARP_SIZE or FFTPerWarp == 1. // This is for the cases 2, 4, 8 and 16 and buys us additional perf. template <int FFTSize> __device__ inline int adjustedThreadIdxX() { if (FFTSize < WARP_SIZE) { return (threadIdx.x & (FFTSize - 1)); } else { return threadIdx.x; } } template <int FFTSize> __device__ inline int adjustedThreadIdxY() { if (FFTSize < WARP_SIZE) { return (threadIdx.y & (FFTSize - 1)); } else { return threadIdx.y; } } // Computes the batch number based on the fact that batches are divided by: // - blockIdx.x, each block computes a chunk of bacthes, // - threadIdx.z, each z dimensions computes a subchcunk of batches to // increase occupancy, // - exactly FFTPerWarp FFTs are processed by one warp // These 3 subdivisions interact to compute the actual batch size. template <int FFTSize, int FFTPerWarp> __device__ inline int adjustedBatch() { if (FFTSize < WARP_SIZE) { int LogFFTSize = getMSB(FFTSize); int LogFFTPerWarp = getMSB(FFTPerWarp); return (threadIdx.x >> LogFFTSize) + (blockIdx.x << LogFFTPerWarp) + ((threadIdx.z * gridDim.x) << LogFFTPerWarp); } else { return blockIdx.x + threadIdx.z * gridDim.x; } } // Computes the batch number based on the fact that batches are divided by: // - blockIdx.x, each block computes a chunk of batches, // - threadIdx.z, each z dimensions computes a subchunk of batches to // increase occupancy, // - exactly FFTPerWarp FFTs are processed by one warp // These 3 subdivisions interact to compute the actual batch size. // In the R2C case, we additionally compute 2 real FFTs as a single complex FFT template <int FFTSize, int FFTPerWarp, bool ForwardFFT> __device__ inline int adjustedBatchR2C() { if (FFTSize < WARP_SIZE) { int LogFFTSize = getMSB(FFTSize); int LogFFTPerWarp = getMSB(FFTPerWarp); return 2 * ((threadIdx.x >> LogFFTSize) + (blockIdx.x << LogFFTPerWarp) + ((threadIdx.z * gridDim.x) << LogFFTPerWarp)); } else { return 2 * (blockIdx.x + threadIdx.z * gridDim.x); } } template <int FFTSize> struct FFT1DCoeffs { enum { ColumnsPerWarp = (FFTSize + WARP_SIZE - 1) / WARP_SIZE }; __device__ inline Complex& operator[](int i) { return coeff[i]; } __device__ inline Complex operator[](int i) const { return coeff[i]; } Complex coeff[ColumnsPerWarp]; }; __device__ inline Complex ldg(const Complex* p) { return Complex(__ldg(&(p->re())), __ldg(&(p->im())) ); } template <int FFTSize> struct FFT1DRoots : public FFT1DCoeffs<FFTSize> { template <bool ForwardFFT> __device__ inline void twiddles() { twiddlesFromMemory<ForwardFFT>(); } template <bool ForwardFFT> __device__ inline void twiddlesFromMemory() { #pragma unroll for (int index = 0; index < ceil((int)this->ColumnsPerWarp, 2); ++index) { int x = threadIdx.x % FFTSize + index * WARP_SIZE; (*this)[index] = (ForwardFFT) ? ldg(&((Complex*)twiddleFactors)[ 2 * x * (kNumTwiddles / FFTSize)]).conjugate() : ldg(&((Complex*)twiddleFactors)[ 2 * x * (kNumTwiddles / FFTSize)]); } } }; // Given an FFT of size FFTSize and given the threadIdx.x of the current // thread, what are the successive twiddles that the thread needs to apply. // Hoisting this computation out allows trading off a log number of registers // for a linear number of shuffle operations. template <int FFTSize> struct FFT1DRegisterTwiddles { static const int LogFFTSize = getMSB(FFTSize); __device__ __forceinline__ FFT1DRegisterTwiddles(bool forward) { // logStep starts at 1 #pragma unroll for (int logStep = 1; logStep <= LogFFTSize; ++logStep) { // Every thread with the bit (FFTSize >> logStep) set has a non trivial // twiddle. bool twiddling = (threadIdx.x & (FFTSize >> logStep)); if (!twiddling) { roots[logStep - 1] = Complex(1.0f); } else { int subPosition = threadIdx.x & ((FFTSize >> logStep) - 1); // For instance FFT size of 16, logStep 2, thread 13 -> // exp(+/- 2 * 2 pi / 16) int twiddleFactor = subPosition << (logStep - 1); // Increment accounts for the fact that the twiddleFactors array has // kNumTwiddles entries and we only want the ones for FFTSize. // Also we are always talking in multiples of 2 pi constexpr int increment = 2 * (kNumTwiddles / FFTSize); roots[logStep - 1] = (forward) ? ldg(((Complex*)twiddleFactors) + twiddleFactor * increment).conjugate() : ldg(((Complex*)twiddleFactors) + twiddleFactor * increment); } } } __device__ __forceinline__ const Complex& operator[](int logStep) const { return roots[logStep - 1]; } Complex roots[LogFFTSize]; }; template <int FFTSize> struct FFT1DBitReversal { enum { ColumnsPerWarp = (FFTSize + WARP_SIZE - 1) / WARP_SIZE }; __device__ inline int& operator[](int i) { return bitReversed[i]; } __device__ inline int operator[](int i) const { return bitReversed[i]; } __device__ inline void computeBitReversal(const int index) { int LogFFTSize = cuda::getMSB(FFTSize); int x = adjustedThreadIdxX<FFTSize>() + index * blockDim.x; bitReversed[index] = reverse(x, LogFFTSize); } int bitReversed[ColumnsPerWarp]; }; // Pure within a warp reversal for FFT sizes <= WARP_SIZE. // For sizes >= 64 this is trickier since we need a cross-register, // cross-warp bit reversal. // Can be done inefficiently with a loop or local memory. // Q: How can we make sure it will always unroll statically ? // A: Just use shared memory for the bit reversal portion, it will only // consume 2 * FFTSize floats per block. template <int FFTSize, int FFTPerWarp> __device__ inline void bitReverse1DWarp(FFT1DCoeffs<FFTSize>& coeffs, const FFT1DBitReversal<FFTSize>& bits, const int index) { assert(coeffs.ColumnsPerWarp == 1); assert(index == 0); assert(FFTSize <= WARP_SIZE); // Only reverse and permute within blockDim.x boundary which allows to cram // multiple FFTs smaller than WARP_SIZE into a single warp int LogFFTPerWarp = cuda::getMSB(FFTPerWarp); coeffs[index] = shfl(coeffs[index], bits[index], blockDim.x >> LogFFTPerWarp); } // Helper function useful for maintaining the twiddle factor distribution // invariant. Assuming registers r1 and r2, distributed across warps, // we write r1[0, ... 31] and r2[0, ... 31]. // This concatenates r1 | r2 and keeps only the entries from the even warps. // r1 and r2 both contain these values on exit. // This is useful for simplifying the distribution of twiddle factors. // // Consider the case FFT-128, by construction: // r1[0, .. 31] == r3[0, .. 31] = [w^0 , .. w^31] // r2[0, .. 31] == r4[0, .. 31] = [w^32, .. w^63] // // After selectEvenWarpDistributed, all registers are equal and we have: // r1[0, .. 31] == ... == r4[0, .. 31] == [w^0, w^2, .. w^62] // // This occurs one more time to obtain: // r1[0, .. 31] == ... == r4[0, .. 31] == [w^0, w^4, .. w^60, 16 x garbage] // // The garbage is never read in decimateInFrequency1DWarp. // // Formally: // r1[k] <- concat(r1, r2) [2k] for k \in [0 .. WARP_SIZE - 1] // r2 <- r1 // __device__ inline void selectEvenWarpDistributed(Complex& r1, Complex& r2) { // E.g. stating from: // r1[w^0, w^1, ... w^31] and r2[w^32, w^33, ...w^63] // // Set // r1[w^0 , w^2 , ... w^30 | 16 x garbage] // r2[16 x garbage | w^32, w^34, ... w^62] // // And merge into: // r1[w^0 , w^2 , ... w^30 | w^32, w^34, ... w^62] // // Dark compiler magic: trying to reduce this down to Complex loses 10% // perf. This seems related to instruction mix, divergence and the compiler // not able to reorder instructions past divergent points (which is // reasonable). r1.re() = shfl(r1.re(), 2 * getLaneId()); r2.re() = shfl(r2.re(), 2 * getLaneId() - WARP_SIZE); if (threadIdx.x >= HALF_WARP_SIZE) { r1.re() = r2.re(); } r1.im() = shfl(r1.im(), 2 * getLaneId()); r2.im() = shfl(r2.im(), 2 * getLaneId() - WARP_SIZE); if (threadIdx.x >= HALF_WARP_SIZE) { r1.im() = r2.im(); } r2 = r1; } template <int FFTSize, bool ForwardFFT> __device__ inline void load1D(const DeviceTensor<float, 2>& real, const DeviceTensor<float, 3>& complex, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int index, const int padL) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + index * blockDim.x; // Support zero padding without a need to copy the input data to a larger // array. // TODO: center the kernel wrt to zeros. // TODO: support reflection padding: pass the kernel size to fill with // reflection and then zero after that to pad till the FFT size. // TODO: support complex input (just read the imaginary part) // TODO: try to do something with float4 and shuffles if (ForwardFFT) { coeffs[index] = Complex((0 <= x - padL && x < real.getSize(1)) ? real[batch][x - padL].ldg() : 0.0f, 0.0f); } else { coeffs[index] = (x < complex.getSize(1)) ? ldg(complex[batch][x].dataAs<Complex>()) : ldg(complex[batch][2 * (complex.getSize(1) - 1) - x]. dataAs<Complex>()).conjugate(); } } template <int FFTSize, bool ForwardFFT, bool EvenDivideBatches> __device__ inline void load1DR2C(const DeviceTensor<float, 2>& real, const DeviceTensor<float, 3>& complex, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int index, const int padL) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + index * blockDim.x; // Support zero padding without a need to copy the input data to a larger // array. // TODO: center the kernel wrt to zeros. // TODO: support reflection padding: pass the kernel size to fill with // reflection and then zero after that to pad till the FFT size. if (ForwardFFT) { // R2C coeffs[index] = (0 <= x - padL && x < real.getSize(1)) ? // y = x1 + i. x2 Complex(real[batch][x - padL], (EvenDivideBatches || batch + 1 < complex.getSize(0)) ? real[batch + 1][x - padL] : 0.0f) : Complex(0.0f); } else { // C2R Complex tmp1 = (x < complex.getSize(1)) ? *(complex[batch][x].dataAs<Complex>()) : complex[batch][2 * (complex.getSize(1) - 1) - x]. dataAs<Complex>()->conjugate(); Complex tmp2 = (EvenDivideBatches || batch + 1 < complex.getSize(0)) ? ((x < complex.getSize(1)) ? *(complex[batch + 1][x].dataAs<Complex>()) : complex[batch + 1][2 * (complex.getSize(1) - 1) - x]. dataAs<Complex>()->conjugate()) : Complex(0.0f); // y = x1 + i. x2 coeffs[index] = Complex(tmp1.re() - tmp2.im(), tmp1.im() + tmp2.re()); } } template <int FFTSize, bool ForwardFFT> __device__ inline void store1D(DeviceTensor<float, 2>& real, DeviceTensor<float, 3>& complex, const FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int index, const int padL) { // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + index * blockDim.x; if (ForwardFFT && x < complex.getSize(1)) { // TODO: try to do something with float4 and shuffles complex[batch][x][0].as<Complex>() = coeffs[index]; } else if (0 <= x - padL && x - padL < real.getSize(1)) { // TODO: try to do something with float4 and shuffles real[batch][x - padL] = coeffs[index].re(); } } template <int FFTSize> __device__ inline const Complex HermitianModuloCoefficient( const FFT1DCoeffs<FFTSize>& coeffs, int index) { assert(FFTSize > 32); // This monstrosisty below is unfortunately necessary to recover the // proper index from (N - m) % N. // As is, it results in local memory spilling. // return ((FFTSize - (index * WARP_SIZE + threadIdx.x)) % FFTSize) / // WARP_SIZE; // After unrolling by hand, it turns out it can be expressed as follows. return (threadIdx.x == 0) ? coeffs[(coeffs.ColumnsPerWarp - index) % coeffs.ColumnsPerWarp] : coeffs[coeffs.ColumnsPerWarp - index - 1]; } template <int FFTSize, bool ForwardFFT, bool EvenDivideBatches> __device__ inline void store1DR2C(DeviceTensor<float, 2>& real, DeviceTensor<float, 3>& complex, const FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int index, const int padL) { // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + index * blockDim.x; // Express x[batch] = coeffs[0]{N - m}.re() + coeffs[0]{m}.re() + // i . (coeffs[0]{m}.im() - coeffs[0]{N - m}.im()) // Shfl each if (ForwardFFT) { // This is coeffs[0]{m}, other is coeffs[0]{N - m} Complex tmp = (FFTSize <= WARP_SIZE) ? coeffs[index] : HermitianModuloCoefficient<FFTSize>(coeffs, index); Complex other = (FFTSize <= WARP_SIZE) ? shfl(tmp, FFTSize - adjustedThreadIdxX<FFTSize>(), FFTSize) : shfl(tmp, (WARP_SIZE - threadIdx.x) % WARP_SIZE, WARP_SIZE); // Need conditional below shfl for threads participating in shfl reasons if (x < complex.getSize(1)) { Complex c1 = Complex(0.5f * (coeffs[index].re() + other.re()), 0.5f * (coeffs[index].im() - other.im())); complex[batch][x][0].as<Complex>() = c1; } if (EvenDivideBatches || batch + 1 < complex.getSize(0)) { // Need conditional below shfl for threads participating in shfl reasons if (x < complex.getSize(1)) { Complex c2 = Complex( 0.5f * ( coeffs[index].im() + other.im()), 0.5f * (-coeffs[index].re() + other.re())); complex[batch + 1][x][0].as<Complex>() = c2; } } } else if (0 <= x - padL && x - padL < real.getSize(1)) { real[batch][x] = coeffs[index].re(); if (EvenDivideBatches || batch + 1 < complex.getSize(0)) { real[batch + 1][x - padL] = coeffs[index].im(); } } } template <int FFTSize> __device__ inline void decimateInFrequency1DWarp(Complex& coeff, Complex& root) { // Cannot be static due to upstream mix of function calls assert(FFTSize <= WARP_SIZE); int LogFFTSize = getMSB(FFTSize); #pragma unroll for (int logStep = 1; logStep <= LogFFTSize; ++logStep) { // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency // Step 1 amongst 2, // Step 2 amongst 4, // Step 4 amongst 8, // ... Complex otherCoeff = shfl_xor(coeff, FFTSize >> logStep, FFTSize >> (logStep - 1)); // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency // Vals {1} U {3} U {5} U {7} amongst 2, // Vals [2, 3] U [6, 7] amongst 4, // Vals [4, 7] amongst 8, // ... otherCoeff = (threadIdx.x & (FFTSize >> logStep)) ? otherCoeff - coeff : coeff + otherCoeff; if (logStep < LogFFTSize) { // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency // Twiddles [w^0, [w^0], w^0, [w^0], w^0, [w^0], w^0, [w^0]] amongst 2, // Twiddles [w^0, w^0, [w^0, w^2], w^0, w^0, [w^0, w^2]] amongst 4, // Twiddles [w^0, w^0, w^0, w^0, [w^0, w^1, w^2, w^3]] amongst 8, // ... int twiddleDee = (!(threadIdx.x & (FFTSize >> logStep))) ? 0 : ((threadIdx.x & ((FFTSize >> logStep) - 1)) << (logStep - 1)); Complex otherRoot = shfl(root, twiddleDee); coeff = otherCoeff * otherRoot; } else { // Last step just does radix-2 + / - which is what otherCoeff contains coeff = otherCoeff; } } } template <int FFTSize> __device__ inline void decimateInFrequency1DWarp( Complex& coeff, const FFT1DRegisterTwiddles<FFTSize>& roots) { // Cannot be static due to upstream mix of function calls assert(FFTSize <= WARP_SIZE); int LogFFTSize = getMSB(FFTSize); #pragma unroll for (int logStep = 1; logStep <= LogFFTSize; ++logStep) { Complex otherCoeff = shfl_xor(coeff, FFTSize >> logStep, FFTSize >> (logStep - 1)); otherCoeff = (threadIdx.x & (FFTSize >> logStep)) ? otherCoeff - coeff : coeff + otherCoeff; coeff = otherCoeff * roots[logStep]; } } template <int FFTSize> struct TwiddleRebalancer { static __device__ inline void rebalance(FFT1DRoots<FFTSize>&, int); }; template <> struct TwiddleRebalancer<64> { static __device__ inline void rebalance(FFT1DRoots<64>& roots, int) { selectEvenWarpDistributed(roots[0], roots[1]); } }; template <> struct TwiddleRebalancer<128> { static __device__ inline void rebalance(FFT1DRoots<128>& roots, int logStep) { if (logStep == 1) { selectEvenWarpDistributed(roots[0], roots[1]); selectEvenWarpDistributed(roots[2], roots[3]); roots[1] = roots[2]; roots[2] = roots[0]; } else { assert(logStep == 2); selectEvenWarpDistributed(roots[0], roots[1]); roots[2] = roots[0]; roots[3] = roots[0]; } } }; template <> struct TwiddleRebalancer<256> { static __device__ inline void rebalance(FFT1DRoots<256>& roots, int logStep) { if (logStep == 1) { selectEvenWarpDistributed(roots[0], roots[1]); selectEvenWarpDistributed(roots[2], roots[3]); selectEvenWarpDistributed(roots[4], roots[5]); selectEvenWarpDistributed(roots[6], roots[7]); roots[1] = roots[2]; roots[2] = roots[4]; roots[3] = roots[6]; roots[4] = roots[0]; roots[5] = roots[1]; roots[6] = roots[2]; roots[7] = roots[3]; } else if (logStep == 2) { assert(logStep == 2); selectEvenWarpDistributed(roots[0], roots[1]); selectEvenWarpDistributed(roots[2], roots[3]); roots[1] = roots[2]; roots[2] = roots[0]; roots[3] = roots[1]; roots[4] = roots[0]; roots[5] = roots[1]; roots[6] = roots[0]; roots[7] = roots[1]; } else { assert(logStep == 3); selectEvenWarpDistributed(roots[0], roots[1]); roots[1] = roots[0]; roots[2] = roots[0]; roots[3] = roots[0]; roots[4] = roots[0]; roots[5] = roots[0]; roots[6] = roots[0]; roots[7] = roots[0]; } } }; // The following ASCII shows the breakdown of a 1-D FFT-256 into // the size 128 and 64-steps. // Each 64 step is followed by 2 32-steps. // A 32 step is the granularity of distributed storage (each warp holding 1 // value per 32-step). // At this granularity, communication is exclusively across registers. // Twiddle factors are continuously readjusted at each step. // |-------| |-------| // | Reg0 | | Reg0 | // | | |-------| // |-------| | Reg1 | // | Reg1 | |-------| // |-------| |-------| w^0 // | Reg2 | | Reg2 | . // |-------| |-------| . // | Reg3 | | Reg3 | . // |-------| |-------| w^126 (increment 2) // // |-------| w^0 |-------| // | Reg4 | | Reg4 | // | | |-------| // |-------| | Reg5 | // | Reg5 | . |-------| // |-------| . |-------| w^0 // | Reg6 | . | Reg6 | . // |-------| |-------| . // | Reg7 | | Reg7 | . // |-------| w^127 (+= 1) |-------| w^126 (increment 2) // // E.g. for FFTSize = 256, we have 3 logSteps: // the first with 8 registers: // registers {{0, 4}, {1, 5}, {2, 6}, {3, 7}} communicate // the second with 4 registers: // registers {{0, 2}, {1, 3}, {4, 6}, {5, 7}} communicate // the third with 2 register // registers {{0, 1}, {2, 3}, {4, 5}, {6, 7}} communicate // // Note that everything is properly aligned modulo 32 and we don't need warp // shuffles at all. The only exception may be the bit reversal phase which // is currently implemented fully in shared memory since it would require // fully unrolled, cross-register twiddles. // template <int FFTSize, int BatchUnroll, int RowsPerWarp, int RowBegin, int RowEnd> __device__ inline void decimateInFrequency1D(FFT1DCoeffs<FFTSize> coeffsArray[RowsPerWarp], FFT1DRoots<FFTSize>& roots, const int batch) { int LogFFTSize = getMSB(FFTSize); const int kDeltaLog = LogFFTSize - LOG_WARP_SIZE; { // Computation is all within the same warp across registers. // Unlike shuffles, things do not update in parallel so we do have // WAR (a.k.a false) dependences -> need a swap temporary storage ! // Make swap registers local to this scope FFT1DCoeffs<FFTSize> swap; #pragma unroll for (int logStep = 1; logStep <= kDeltaLog; ++logStep) { #pragma unroll for (int row = RowBegin; row < RowEnd; ++row) { FFT1DCoeffs<FFTSize>& coeffs = coeffsArray[row]; assert(coeffs.ColumnsPerWarp == 1 << (LogFFTSize - LOG_WARP_SIZE)); // Always need to process all the registers, this is not a function of // the logStep but only of the coeffs.ColumnsPerWarp. // The spacing between registers that communicate is however a function // of logStep. #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { // By how many registers are we stepping ? // e.g. LogFFTSize == 8, LOG_WARP_SIZE == 5, logStep == 1 -> // kDeltaLog == 3, kDeltaStep = 4 const int kDeltaStep = (1 << (kDeltaLog - logStep)); assert(kDeltaStep >= 0); assert(kDeltaStep < coeffs.ColumnsPerWarp); // If bit kDeltaStep is step then sub else add int reg2 = (reg & kDeltaStep) ? reg - kDeltaStep : reg + kDeltaStep; // Sanity check assert(reg != reg2); Complex otherCoeff = coeffs[reg2]; otherCoeff = (reg > reg2) ? otherCoeff - coeffs[reg] : coeffs[reg] + otherCoeff; // Only second half requires twiddling if (reg > reg2) { // Enforce this invariant: // the register is exactly reg2 and no shuffle necessary until <= // WARP_SIZE Complex otherRoot = roots[reg2]; // Here we could write directly to vals and not swap but performance // is higher writing swap, likely due to same register writing // across branches and predicated code generated by the compiler. swap.coeff[reg] = otherCoeff * otherRoot; } else { swap.coeff[reg] = otherCoeff; } } // Recover values from swap #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { coeffs[reg] = swap.coeff[reg]; } } // This piece of code serves the purpose of rebalancing the twiddle // factors across registers within a warp by merging 2 consecutive // registers and selecting the odd entries (effectively keeping: // w^0, w^2 ... w^2*(N/2) out of w^0, w^1, ... w^N). // Once this is done, we have something like: // w^0 .. w^62 | garbage | w^64 .. w^128 | garbage // That needs to be copied into: // w^0 .. w^62 | w^64 .. w^128 | w^0 .. w^62 | w^64 .. w^128 // // In the general case, this has a recursive behavior with log-style RAW // / WAR dependencies. // It requires full unrolling or perf will die. // This is what limits the FFT size to 256 atm. // Cannot be static due to upstream mix of function calls assert(WARP_SIZE <= FFTSize && FFTSize <= 256); // TODO: Figure out how to replace the monstruosity within TwiddleRebalancer<FFTSize>::rebalance(roots, logStep); } } // At this point we reached the FFT of WARP_SIZE, do them all in sequence #pragma unroll for (int i = 0; i < (1 << kDeltaLog); ++i) { #pragma unroll for (int row = RowBegin; row < RowEnd; ++row) { FFT1DCoeffs<FFTSize>& coeffs = coeffsArray[row]; decimateInFrequency1DWarp<WARP_SIZE>(coeffs[i], roots[i]); } } } template <int FFTSize, int BatchUnroll, bool ForwardFFT, bool EvenDivideBatches> __device__ inline void decimateInFrequency1D(DeviceTensor<float, 2>& real, DeviceTensor<float, 3>& complex, FFT1DCoeffs<FFTSize> (&coeffsArray)[1], const int batch, const int padL) { // Cannot be static due to upstream mix of function calls assert(FFTSize >= WARP_SIZE); assert(blockDim.x == WARP_SIZE); FFT1DCoeffs<FFTSize>& coeffs = coeffsArray[0]; FFT1DBitReversal<FFTSize> bits; #pragma unroll for (int i = 0; i < coeffs.ColumnsPerWarp; ++i) { load1DR2C<FFTSize, ForwardFFT, EvenDivideBatches>( real, complex, coeffs, batch, i, padL); bits.computeBitReversal(i); } FFT1DRoots<FFTSize> roots; roots.template twiddles<ForwardFFT>(); decimateInFrequency1D<FFTSize, BatchUnroll, 1, 0, 1>( coeffsArray, roots, batch); { // Bit reversal through shared memory because double indirection is not // easily unrolled. // TODO: see if we can use float4 // TODO: purely in registers, starting at 256 smem already gnaws at // occupancy. // No need to sync, dependences within a single warp __shared__ Complex buffer[BatchUnroll][FFTSize]; assert(blockDim.z == BatchUnroll); #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { int x = getLaneId() + reg * WARP_SIZE; buffer[threadIdx.z][x] = coeffs[reg]; } // No need to sync, dependences within a single warp #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { coeffs[reg] = buffer[threadIdx.z][bits[reg]]; } // No need to sync, dependences within a single warp #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { store1DR2C<FFTSize, ForwardFFT, EvenDivideBatches>( real, complex, coeffs, batch, reg, padL); } } } template <int FFTSize, int BatchUnroll, int FFTPerWarp, bool ForwardFFT, bool EvenDivideBatches> __device__ void decimateInFrequency1DKernel(DeviceTensor<float, 2> real, DeviceTensor<float, 3> complex, int batch, const int padL) { int LogFFTSize = getMSB(FFTSize); int LogFFTPerWarp = getMSB(FFTPerWarp); if (FFTSize <= WARP_SIZE) { FFT1DCoeffs<FFTSize> coeffs; load1DR2C<FFTSize, ForwardFFT, EvenDivideBatches>( real, complex, coeffs, batch, 0, padL); FFT1DBitReversal<FFTSize> bits; bits.computeBitReversal(0); FFT1DRoots<FFTSize> roots; roots.template twiddles<ForwardFFT>(); decimateInFrequency1DWarp<FFTSize>(coeffs[0], roots[0]); bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffs, bits, 0); store1DR2C<FFTSize, ForwardFFT, EvenDivideBatches>( real, complex, coeffs, batch, 0, padL); } else { FFT1DCoeffs<FFTSize> coeffs[1]; decimateInFrequency1D<FFTSize, BatchUnroll, ForwardFFT, EvenDivideBatches>( real, complex, coeffs, batch, padL); } } template <int FFTSize, int BatchUnroll, int FFTPerWarp, bool ForwardFFT> __global__ void decimateInFrequency1DKernel(DeviceTensor<float, 2> real, DeviceTensor<float, 3> complex, const int padL) { // Ensure proper usage of the BatchUnroll template parameter which controls // static shared memory allocation for bit reversals of FFTs >= 64 // TODO: default template parameter cuda-7 cuda_static_assert((FFTSize > WARP_SIZE && BatchUnroll >= 1) || (FFTSize <= WARP_SIZE && BatchUnroll == 1)); cuda_static_assert(!(FFTPerWarp & (FFTPerWarp - 1))); cuda_static_assert(FFTPerWarp * FFTSize <= WARP_SIZE || FFTPerWarp == 1); assert(FFTPerWarp * FFTSize == blockDim.x || FFTPerWarp == 1); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs. assert(real.getSize(0) % FFTPerWarp == 0); const int batch = adjustedBatchR2C<FFTSize, FFTPerWarp, ForwardFFT>(); if (batch >= real.getSize(0)) { return; } if ((FFTSize != 32 && FFTSize != 64 ) || // Ad-hoc but nvcc likes it batch + 1 >= real.getSize(0) ) { decimateInFrequency1DKernel< FFTSize, BatchUnroll, FFTPerWarp, ForwardFFT, false> ( real, complex, batch, padL); } else { decimateInFrequency1DKernel< FFTSize, BatchUnroll, FFTPerWarp, ForwardFFT, true> ( real, complex, batch, padL); } } // Performs cross warp transpose of the data in registers, synchronously for // each register at a time and takes advantage of Hermitian symmetry. // // Invariants are: // - not synchronized on entry of the loop // - synchronized at each step of the loop // - synchronized on exit template <int FFTSize, int SMemRows, int RowsPerWarp> __device__ inline void transpose2D( FFT1DCoeffs<FFTSize>& coeffs, Complex(*buffer)[SMemRows][SMemRows + 1]) { #pragma unroll for (int row = 0; row < RowsPerWarp; ++row) { #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { buffer[threadIdx.z][threadIdx.y][threadIdx.x] = coeffs.coeff[reg]; __syncthreads(); coeffs.coeff[reg] = buffer[threadIdx.z][threadIdx.x][threadIdx.y]; __syncthreads(); } } } // Performs cross warp transpose of the data in registers, synchronously for // each register at a time and takes advantage of Hermitian symmetry. // // Supports multiple FFTs per warp. // // Invariants are: // - not synchronized on entry of the loop // - synchronized at each step of the loop // - synchronized on exit template <int FFTSize, int SMemRows, int RowsPerWarp, int FFTPerWarp> __device__ inline void transpose2DMultiple( FFT1DCoeffs<FFTSize>& coeffs, Complex(*buffer)[SMemRows][SMemRows + 1]) { const int LogFFTSize = getMSB(FFTSize); const int thx0 = (threadIdx.x >> LogFFTSize) << LogFFTSize; #pragma unroll for (int row = 0; row < RowsPerWarp; ++row) { #pragma unroll for (int reg = 0; reg < coeffs.ColumnsPerWarp; ++reg) { buffer[threadIdx.z][threadIdx.y][threadIdx.x] = coeffs.coeff[reg]; __syncthreads(); coeffs.coeff[reg] = buffer [threadIdx.z] [adjustedThreadIdxX<FFTSize>()] [thx0 + threadIdx.y]; __syncthreads(); } } } } // namespace }}} // namespace #include "cuda/fbfft/FBFFT-inl.cuh" #include "cuda/fbfft/FBFFT2D-inl.cuh" #include "cuda/fbfft/FBIFFT2D-inl.cuh"
the_stack
#include "ssids/gpu/kernels/datatypes.h" #include "cuda/cuda_check.h" #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) #define MAX_CUDA_BLOCKS 65535 //#define SM_3X (__CUDA_ARCH__ == 300 || __CUDA_ARCH__ == 350 || __CUDA_ARCH__ == 370) //FIXME: Verify if the code for Keplers (sm_3x) is still correct for the later GPUs. #define SM_3X (__CUDA_ARCH__ >= 300) using namespace spral::ssids::gpu; namespace /* anon */ { template< int WIDTH > inline __device__ void loadDevToSmem_generic( volatile double *const __restrict__ as, volatile double *const __restrict__ bs, const double* __restrict__ a, const double* __restrict__ b, int bx, int by, int offa, int lda, int ldb, int n, int i, int k) { switch (WIDTH) { case 4: if ( i + 3 < k ) { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = a[offa + x + (i + 1)*lda]; as[threadIdx.x + threadIdx.y*8 + 64] = a[offa + x + (i + 2)*lda]; as[threadIdx.x + threadIdx.y*8 + 96] = a[offa + x + (i + 3)*lda]; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = b[offa + x + (i + 1)*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 64] = b[offa + x + (i + 2)*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 96] = b[offa + x + (i + 3)*ldb]; } } } else if ( i + 2 < k ) { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = a[offa + x + (i + 1)*lda]; as[threadIdx.x + threadIdx.y*8 + 64] = a[offa + x + (i + 2)*lda]; as[threadIdx.x + threadIdx.y*8 + 96] = 0.0; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = b[offa + x + (i + 1)*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 64] = b[offa + x + (i + 2)*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 96] = 0.0; } } } else if ( i + 1 < k ) { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = a[offa + x + (i + 1)*lda]; as[threadIdx.x + threadIdx.y*8 + 64] = 0.0; as[threadIdx.x + threadIdx.y*8 + 96] = 0.0; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = b[offa + x + (i + 1)*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 64] = 0.0; bs[threadIdx.x + (threadIdx.y - 4)*8 + 96] = 0.0; } } } else { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = 0.0; as[threadIdx.x + threadIdx.y*8 + 64] = 0.0; as[threadIdx.x + threadIdx.y*8 + 96] = 0.0; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = 0.0; bs[threadIdx.x + (threadIdx.y - 4)*8 + 64] = 0.0; bs[threadIdx.x + (threadIdx.y - 4)*8 + 96] = 0.0; } } } break; case 2: if ( i + 1 < k ) { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = a[offa + x + (i + 1)*lda]; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = b[offa + x + (i + 1)*ldb]; } } } else { if ( threadIdx.y < 4 ) { int x = threadIdx.x + (threadIdx.y + bx*4)*8; if ( x < n ) { as[threadIdx.x + threadIdx.y*8 ] = a[offa + x + i*lda]; as[threadIdx.x + threadIdx.y*8 + 32] = 0.0; } } else { int x = threadIdx.x + (threadIdx.y - 4 + by*4)*8; if ( x < n ) { bs[threadIdx.x + (threadIdx.y - 4)*8 ] = b[offa + x + i*ldb]; bs[threadIdx.x + (threadIdx.y - 4)*8 + 32] = 0.0; } } } break; default: printf("Invalid SYRK width\n"); } } struct multisyrk_type { int first; double *lval; double *ldval; long offc; int n; int k; int lda; int ldb; }; // multisyrk kernels below compute the low trangular part of a*b^T // (stored columnwise) using 8x8 cuda blocks template< typename ELEMENT_TYPE > #if SM_3X __launch_bounds__(64, 14) #endif __global__ void cu_multisyrk_lc_r4x4( const struct multisyrk_type* msdata, int off, ELEMENT_TYPE* c ){ // The number of elements we want in each shared memory buffer depends on the shared memory:register ratio // SM 3.0+ has double the number of registers per shared memory, so need half the shared memory here. #if SM_3X #define SYRK_WIDTH 4 #define DOUBLE_BUFFERED 0 #define USE_DOUBLE2 1 #else #define SYRK_WIDTH 4 #define DOUBLE_BUFFERED 1 #define USE_DOUBLE2 0 #endif #if (USE_DOUBLE2) __shared__ volatile double2 as[32 * SYRK_WIDTH]; __shared__ volatile ELEMENT_TYPE bs[32 * SYRK_WIDTH]; #if (DOUBLE_BUFFERED) __shared__ volatile double2 as2[32 * SYRK_WIDTH]; __shared__ volatile ELEMENT_TYPE bs2[32 * SYRK_WIDTH]; #endif #else __shared__ volatile ELEMENT_TYPE as[32 * SYRK_WIDTH], bs[32 * SYRK_WIDTH]; #if (DOUBLE_BUFFERED) __shared__ volatile ELEMENT_TYPE as2[32 * SYRK_WIDTH], bs2[32 * SYRK_WIDTH]; #endif #endif msdata += blockIdx.x; int first = msdata->first; const ELEMENT_TYPE * __restrict__ a = msdata->lval; const ELEMENT_TYPE * __restrict__ b = msdata->ldval; int offc = msdata->offc; int n = msdata->n; int k = msdata->k; int lda = msdata->lda; int ldb = msdata->ldb; if ( n < 1 ) return; int bx, by; { int nb = (n - 1)/32 + 1; for ( bx = 0, by = 0; by < nb; by++ ) { if ( off + blockIdx.x - first - bx < nb - by ) { bx = off + blockIdx.x - first - bx + by; break; } bx += nb - by; } } #if (USE_DOUBLE2) double2 s[8]; for ( int i = 0; i < 8; i++ ) { s[i].x = 0.0; s[i].y = 0.0; } #else ELEMENT_TYPE s[16]; for ( int i = 0; i < 16; i++ ) s[i] = 0.0; #endif #if (SYRK_WIDTH <= 2 && DOUBLE_BUFFERED) loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as, bs, a, b, bx, by, 0, lda, ldb, n, 0, k ); #endif for ( int i = 0; i < k; i += SYRK_WIDTH ) { // We want to get these in flight as early as possible so we can hide their // latency. We would also want to get the other set of loads in flight in a // similar manner, but this degrades performance (and makes the code more // complicated). I suspect it adds register pressure as it was quite a // challenge to get it working without spilling. #if (DOUBLE_BUFFERED) if ( i + SYRK_WIDTH < k ) { loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as2, bs2, a, b, bx, by, 0, lda, ldb, n, i + SYRK_WIDTH, k ); } #endif // (DOUBLE_BUFFERED) #if (SYRK_WIDTH > 2 || DOUBLE_BUFFERED) loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as, bs, a, b, bx, by, 0, lda, ldb, n, i, k ); #endif __syncthreads(); #pragma unroll for ( int ix = 0; ix < SYRK_WIDTH; ix++) { for ( int iy = 0; iy < 4; iy++ ) { #if (USE_DOUBLE2) s[iy*2 ].x += as[threadIdx.x + ix * 16 ].x*bs[threadIdx.y + 8*iy + ix * 32]; s[iy*2 ].y += as[threadIdx.x + ix * 16 ].y*bs[threadIdx.y + 8*iy + ix * 32]; s[iy*2 + 1].x += as[threadIdx.x + ix * 16 + 8].x*bs[threadIdx.y + 8*iy + ix * 32]; s[iy*2 + 1].y += as[threadIdx.x + ix * 16 + 8].y*bs[threadIdx.y + 8*iy + ix * 32]; #else s[iy*4] += as[threadIdx.x + 32 * ix ]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 1] += as[threadIdx.x + 32 * ix + 8 ]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 2] += as[threadIdx.x + 32 * ix + 16]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 3] += as[threadIdx.x + 32 * ix + 24]*bs[threadIdx.y + 8*iy + 32 * ix]; #endif } } #if (DOUBLE_BUFFERED) i += SYRK_WIDTH; if ( i >= k ) break; __syncthreads(); if ( i + SYRK_WIDTH < k ) { #if (SYRK_WIDTH <= 2) loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as, bs, a, b, bx, by, 0, lda, ldb, n, i + SYRK_WIDTH, k ); #endif } #pragma unroll for ( int ix = 0; ix < SYRK_WIDTH; ix++) { for ( int iy = 0; iy < 4; iy++ ) { #if (USE_DOUBLE2) s[iy*2 ].x += as2[threadIdx.x + ix * 16 ].x*bs2[threadIdx.y + 8*iy + ix * 32]; s[iy*2 ].y += as2[threadIdx.x + ix * 16 ].y*bs2[threadIdx.y + 8*iy + ix * 32]; s[iy*2 + 1].x += as2[threadIdx.x + ix * 16 + 8].x*bs2[threadIdx.y + 8*iy + ix * 32]; s[iy*2 + 1].y += as2[threadIdx.x + ix * 16 + 8].y*bs2[threadIdx.y + 8*iy + ix * 32]; #else s[iy*4] += as2[threadIdx.x + 32 * ix ]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 1] += as2[threadIdx.x + 32 * ix + 8 ]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 2] += as2[threadIdx.x + 32 * ix + 16]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 3] += as2[threadIdx.x + 32 * ix + 24]*bs2[threadIdx.y + 8*iy + 32 * ix]; #endif } } #endif // DOUBLE_BUFFERED __syncthreads(); } #if (USE_DOUBLE2) for ( int iy = 0; iy < 4; iy++ ) { for ( int ix = 0; ix < 2; ix++ ) { int x = threadIdx.x * 2 + ix*16 + bx*32; int y = threadIdx.y + iy*8 + by*32; if ( x < n && y < n && y <= x ) { c[offc + x + y*n] = -s[ix + iy*2].x; } x += 1; if ( x < n && y < n && y <= x ) { c[offc + x + y*n] = -s[ix + iy*2].y; } } } #else int xMaxBase = (3 + bx*4)*8; int yMaxBase = (3 + by*4)*8; int XNPass = xMaxBase + 8 < n; int YNPass = yMaxBase + 8 < n; int YXPass = yMaxBase + 8 <= xMaxBase; // This is only a small improvement (~1%) if (XNPass && YNPass && YXPass) { for ( int iy = 0; iy < 4; iy++ ) { for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + bx*4)*8; int y = threadIdx.y + (iy + by*4)*8; c[offc + x + y*n] = -s[ix + iy*4]; } } } else if (XNPass && YNPass) { for ( int iy = 0; iy < 4; iy++ ) { for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + bx*4)*8; int y = threadIdx.y + (iy + by*4)*8; if ( y <= x ) c[offc + x + y*n] = -s[ix + iy*4]; } } } else { for ( int iy = 0; iy < 4; iy++ ) { for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + bx*4)*8; int y = threadIdx.y + (iy + by*4)*8; if ( x < n && y < n && y <= x ) c[offc + x + y*n] = -s[ix + iy*4]; } } } #endif // Release function-scope #defines #undef SYRK_WIDTH #undef DOUBLE_BUFFERED #undef USE_DOUBLE2 } struct multielm_data { int node; int offb; }; template< typename ELEMENT_TYPE > //#if SM_3X //__launch_bounds__(64, 14) //#endif __global__ void cu_multisyrk_r4x4( bool posdef, int* stat, multielm_data* mdata, int off, struct multinode_fact_type *ndatat ){ int bx, by; int n, m, k; int offa, offc; int lda, ldb; int nb; ELEMENT_TYPE s[16]; #if SM_3X #define SYRK_WIDTH 2 #define DOUBLE_BUFFERED 0 #else #define SYRK_WIDTH 4 #define DOUBLE_BUFFERED 0 #endif __shared__ volatile ELEMENT_TYPE as[32 * SYRK_WIDTH]; __shared__ volatile ELEMENT_TYPE bs[32 * SYRK_WIDTH]; #if (DOUBLE_BUFFERED) __shared__ volatile ELEMENT_TYPE as2[32 * SYRK_WIDTH]; __shared__ volatile ELEMENT_TYPE bs2[32 * SYRK_WIDTH]; #endif mdata += blockIdx.x; bx = mdata->node; ndatat += bx; k = stat[bx]; if ( k < 1 ) return; if ( ndatat->ib > ndatat->jb ) return; n = ndatat->nrows; lda = ndatat->done; m = ndatat->rght; by = lda + k; if ( by >= n || by >= m ) return; const double * __restrict__ a = ndatat->lval; const double * __restrict__ b = posdef ? ndatat->lval : ndatat->ldval; double * __restrict__ c = ndatat->lval; offa = by + lda*n; offc = by + by*n; lda = n; ldb = n; m -= by; n -= by; by = off + blockIdx.x - mdata->offb; if ( by > ((n - 1)/32 + 1)*((m - 1)/32 + 1) ) return; nb = (n - 1)/32 + 1; bx = by%nb; by = by/nb; for ( int i = 0; i < 16; i++ ) { s[i] = 0.0; } #if (DOUBLE_BUFFERED) loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as, bs, a, b, bx, by, offa, lda, ldb, n, 0, k ); #endif for ( int i = 0; i < k; i += SYRK_WIDTH ) { #if (!DOUBLE_BUFFERED) loadDevToSmem_generic<SYRK_WIDTH>( (volatile double*)as, bs, a, b, bx, by, offa, lda, ldb, n, i, k ); #endif __syncthreads(); #if (DOUBLE_BUFFERED) if (i + SYRK_WIDTH < k) { loadDevToSmem_generic<SYRK_WIDTH>( as2, bs2, a, b, bx, by, offa, lda, ldb, n, i + SYRK_WIDTH, k ); } #endif #pragma unroll for ( int ix = 0; ix < SYRK_WIDTH; ix++) { for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as[threadIdx.x + 32 * ix ]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 1] += as[threadIdx.x + 32 * ix + 8 ]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 2] += as[threadIdx.x + 32 * ix + 16]*bs[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 3] += as[threadIdx.x + 32 * ix + 24]*bs[threadIdx.y + 8*iy + 32 * ix]; } } __syncthreads(); #if (DOUBLE_BUFFERED) i += SYRK_WIDTH; if (i >= k) break; if (i + SYRK_WIDTH < k) { loadDevToSmem_generic<SYRK_WIDTH>( as, bs, a, b, bx, by, offa, lda, ldb, n, i + SYRK_WIDTH, k ); } #pragma unroll for ( int ix = 0; ix < SYRK_WIDTH; ix++) { for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as2[threadIdx.x + 32 * ix ]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 1] += as2[threadIdx.x + 32 * ix + 8 ]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 2] += as2[threadIdx.x + 32 * ix + 16]*bs2[threadIdx.y + 8*iy + 32 * ix]; s[iy*4 + 3] += as2[threadIdx.x + 32 * ix + 24]*bs2[threadIdx.y + 8*iy + 32 * ix]; } } #endif } for ( int iy = 0; iy < 4; iy++ ) for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + bx*4)*8; int y = threadIdx.y + (iy + by*4)*8; if ( x < n && y < m ) c[offc + x + y*lda] = c[offc + x + y*lda] - s[ix + iy*4]; } } template< typename ELEMENT_TYPE > __global__ void cu_syrk_r4x4( int n, int m, int k, double alpha, const double* a, int lda, const double* b, int ldb, double beta, double* c, int ldc ){ ELEMENT_TYPE s[16]; __shared__ volatile ELEMENT_TYPE as[128], bs[128]; for ( int i = 0; i < 16; i++ ) s[i] = 0; for ( int i = 0; i < k; i += 4 ) { loadDevToSmem_generic< 4 >( as, bs, a, b, blockIdx.x, blockIdx.y, 0, lda, ldb, n, i, k ); __syncthreads(); for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as[threadIdx.x ]*bs[threadIdx.y + 8*iy]; s[iy*4 + 1] += as[threadIdx.x + 8 ]*bs[threadIdx.y + 8*iy]; s[iy*4 + 2] += as[threadIdx.x + 16]*bs[threadIdx.y + 8*iy]; s[iy*4 + 3] += as[threadIdx.x + 24]*bs[threadIdx.y + 8*iy]; } for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as[threadIdx.x + 32]*bs[threadIdx.y + 8*iy + 32]; s[iy*4 + 1] += as[threadIdx.x + 40]*bs[threadIdx.y + 8*iy + 32]; s[iy*4 + 2] += as[threadIdx.x + 48]*bs[threadIdx.y + 8*iy + 32]; s[iy*4 + 3] += as[threadIdx.x + 56]*bs[threadIdx.y + 8*iy + 32]; } for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as[threadIdx.x + 64]*bs[threadIdx.y + 8*iy + 64]; s[iy*4 + 1] += as[threadIdx.x + 72]*bs[threadIdx.y + 8*iy + 64]; s[iy*4 + 2] += as[threadIdx.x + 80]*bs[threadIdx.y + 8*iy + 64]; s[iy*4 + 3] += as[threadIdx.x + 88]*bs[threadIdx.y + 8*iy + 64]; } for ( int iy = 0; iy < 4; iy++ ) { s[iy*4] += as[threadIdx.x + 96 ]*bs[threadIdx.y + 8*iy + 96]; s[iy*4 + 1] += as[threadIdx.x + 104]*bs[threadIdx.y + 8*iy + 96]; s[iy*4 + 2] += as[threadIdx.x + 112]*bs[threadIdx.y + 8*iy + 96]; s[iy*4 + 3] += as[threadIdx.x + 120]*bs[threadIdx.y + 8*iy + 96]; } __syncthreads(); } if ( beta ) { for ( int iy = 0; iy < 4; iy++ ) for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + blockIdx.x*4)*8; int y = threadIdx.y + (iy + blockIdx.y*4)*8; if ( x < n && y < m ) c[x + y*ldc] = beta*c[x + y*ldc] + alpha*s[ix + iy*4]; } } else { for ( int iy = 0; iy < 4; iy++ ) for ( int ix = 0; ix < 4; ix++ ) { int x = threadIdx.x + (ix + blockIdx.x*4)*8; int y = threadIdx.y + (iy + blockIdx.y*4)*8; if ( x < n && y < m ) c[x + y*ldc] = alpha*s[ix + iy*4]; } } } } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_dsyrk(cudaStream_t *stream, int n, int m, int k, double alpha, const double* a, int lda, const double* b, int ldb, double beta, double* c, int ldc) { int nx, ny; nx = (n - 1)/32 + 1; ny = (m - 1)/32 + 1; dim3 threads(8,8); dim3 grid(nx,ny); cu_syrk_r4x4< double > <<< grid, threads, 0, *stream >>> ( n, m, k, alpha, a, lda, b, ldb, beta, c, ldc ); } void spral_ssids_multidsyrk(cudaStream_t *stream, bool posdef, int nb, int* stat, struct multielm_data* mdata, struct multinode_fact_type *ndata) { dim3 threads(8,8); for ( int i = 0; i < nb; i += MAX_CUDA_BLOCKS ) { int blocks = min(MAX_CUDA_BLOCKS, nb - i); cu_multisyrk_r4x4< double > <<< blocks, threads, 0, *stream >>> ( posdef, stat, mdata + i, i, ndata ); } } void spral_ssids_multidsyrk_low_col(cudaStream_t *stream, int nb, struct multisyrk_type* msdata, double* c) { dim3 threads(8,8); for ( int i = 0; i < nb; i += MAX_CUDA_BLOCKS ) { int blocks = min(MAX_CUDA_BLOCKS, nb - i); cu_multisyrk_lc_r4x4< double > <<< blocks, threads, 0, *stream >>>( msdata + i, i, c ); } } } // end extern "C"
the_stack
#include <cuda_runtime.h> #include <math.h> #include <stdio.h> #include <sys/time.h> #define max(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b;}) #define min(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b;}) #define SPEED_OF_LIGHT 299792458. #define BAD_VALUE -999999. #define THRD_PER_RUN 128 struct InputImageArrs { double *lat; double *lon; double *dem; }; struct OutputImageArrs { double *azt; double *rgm; double *azoff; double *rgoff; }; struct stateVector { double t; double px; double py; double pz; double vx; double vy; double vz; }; struct Orbit { int nVec; struct stateVector *svs; }; struct Ellipsoid { double a; double e2; }; struct Poly1d { int order; double mean; double norm; double *coeffs; }; __constant__ double d_inpts_double[9]; __constant__ int d_inpts_int[3]; // Mem usage: 27 doubles (216 bytes) per call __device__ int interpolateOrbit(struct Orbit *orb, double t, double *xyz, double *vel) { double h[4], hdot[4], f0[4], f1[4], g0[4], g1[4]; double sum = 0.0; int i; int v0 = -1; if ((t < orb->svs[0].t) || (t > orb->svs[orb->nVec-1].t)) return 1; for (i=0; i<orb->nVec; i++) { if ((orb->svs[i].t >= t) && (v0 == -1)) { v0 = min(max((i-2),0),(orb->nVec-4)); } } f1[0] = t - orb->svs[v0].t; f1[1] = t - orb->svs[v0+1].t; f1[2] = t - orb->svs[v0+2].t; f1[3] = t - orb->svs[v0+3].t; sum = (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); f0[0] = 1.0 - (2.0 * (t - orb->svs[v0].t) * sum); sum = (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); f0[1] = 1.0 - (2.0 * (t - orb->svs[v0+1].t) * sum); sum = (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); f0[2] = 1.0 - (2.0 * (t - orb->svs[v0+2].t) * sum); sum = (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); f0[3] = 1.0 - (2.0 * (t - orb->svs[v0+3].t) * sum); h[0] = ((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t)); h[1] = ((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t)); h[2] = ((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t)); h[3] = ((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t)); sum = (((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)); sum += (((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)); sum += (((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t))) * (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); hdot[0] = sum; sum = (((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t))) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); hdot[1] = sum; sum = (((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t))) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t))) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); hdot[2] = sum; sum = (((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t))) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t))) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)); sum += (((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t))) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); hdot[3] = sum; g1[0] = h[0] + (2.0 * (t - orb->svs[v0].t) * hdot[0]); g1[1] = h[1] + (2.0 * (t - orb->svs[v0+1].t) * hdot[1]); g1[2] = h[2] + (2.0 * (t - orb->svs[v0+2].t) * hdot[2]); g1[3] = h[3] + (2.0 * (t - orb->svs[v0+3].t) * hdot[3]); sum = (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); g0[0] = 2.0 * ((f0[0] * hdot[0]) - (h[0] * sum)); sum = (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); g0[1] = 2.0 * ((f0[1] * hdot[1]) - (h[1] * sum)); sum = (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); g0[2] = 2.0 * ((f0[2] * hdot[2]) - (h[2] * sum)); sum = (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); g0[3] = 2.0 * ((f0[3] * hdot[3]) - (h[3] * sum)); xyz[0] = (((orb->svs[v0].px * f0[0]) + (orb->svs[v0].vx * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].px * f0[1]) + (orb->svs[v0+1].vx * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].px * f0[2]) + (orb->svs[v0+2].vx * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].px * f0[3]) + (orb->svs[v0+3].vx * f1[3])) * h[3] * h[3]); xyz[1] = (((orb->svs[v0].py * f0[0]) + (orb->svs[v0].vy * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].py * f0[1]) + (orb->svs[v0+1].vy * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].py * f0[2]) + (orb->svs[v0+2].vy * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].py * f0[3]) + (orb->svs[v0+3].vy * f1[3])) * h[3] * h[3]); xyz[2] = (((orb->svs[v0].pz * f0[0]) + (orb->svs[v0].vz * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].pz * f0[1]) + (orb->svs[v0+1].vz * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].pz * f0[2]) + (orb->svs[v0+2].vz * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].pz * f0[3]) + (orb->svs[v0+3].vz * f1[3])) * h[3] * h[3]); vel[0] = (((orb->svs[v0].px * g0[0]) + (orb->svs[v0].vx * g1[0])) * h[0]) + (((orb->svs[v0+1].px * g0[1]) + (orb->svs[v0+1].vx * g1[1])) * h[1]) + (((orb->svs[v0+2].px * g0[2]) + (orb->svs[v0+2].vx * g1[2])) * h[2]) + (((orb->svs[v0+3].px * g0[3]) + (orb->svs[v0+3].vx * g1[3])) * h[3]); vel[1] = (((orb->svs[v0].py * g0[0]) + (orb->svs[v0].vy * g1[0])) * h[0]) + (((orb->svs[v0+1].py * g0[1]) + (orb->svs[v0+1].vy * g1[1])) * h[1]) + (((orb->svs[v0+2].py * g0[2]) + (orb->svs[v0+2].vy * g1[2])) * h[2]) + (((orb->svs[v0+3].py * g0[3]) + (orb->svs[v0+3].vy * g1[3])) * h[3]); vel[2] = (((orb->svs[v0].pz * g0[0]) + (orb->svs[v0].vz * g1[0])) * h[0]) + (((orb->svs[v0+1].pz * g0[1]) + (orb->svs[v0+1].vz * g1[1])) * h[1]) + (((orb->svs[v0+2].pz * g0[2]) + (orb->svs[v0+2].vz * g1[2])) * h[2]) + (((orb->svs[v0+3].pz * g0[3]) + (orb->svs[v0+3].vz * g1[3])) * h[3]); return 0; // Successful interpolation } // 8 bytes per call __device__ void llh2xyz(struct Ellipsoid *elp, double *xyz, double *llh) { double re; re = elp->a / sqrt(1.0 - (elp->e2 * pow(sin(llh[0]),2))); xyz[0] = (re + llh[2]) * cos(llh[0]) * cos(llh[1]); xyz[1] = (re + llh[2]) * cos(llh[0]) * sin(llh[1]); xyz[2] = ((re * (1.0 - elp->e2)) + llh[2]) * sin(llh[0]); } // 36 bytes per call __device__ double evalPoly(struct Poly1d *poly, double xin) { double val, xval, scalex; int i; val = 0.; scalex = 1.; xval = (xin - poly->mean) / poly->norm; for (i=0; i<=poly->order; i++,scalex*=xval) val += scalex * poly->coeffs[i]; return val; } // 0 bytes per call __device__ double dot(double *a, double *b) { return (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2]); } __global__ void runGeo(struct Orbit orb, struct Poly1d fdvsrng, struct Poly1d fddotvsrng, struct OutputImageArrs outImgArrs, struct InputImageArrs inImgArrs, int NPIXELS, int OFFSET_LINE) { int pixel = (blockDim.x * blockIdx.x) + threadIdx.x; if (pixel < NPIXELS) { // The number of pixels in a run changes based on if it's a full run or a partial run /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Input mapping * * int[0] = demLength * int[1] = demWidth * int[2] = bistatic * * double[0] = major * double[1] = eccentricitySquared * double[2] = tstart * double[3] = tend * double[4] = wvl * double[5] = rngstart * double[6] = rngend * double[7] = dmrg * double[8] = dtaz * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ double xyz[3], llh[3], satx[3], satv[3], dr[3]; double rngpix, tline, tprev, fnprime, fdop, fdopder; int stat, i, j; bool isOutside, runIter; struct Ellipsoid elp; elp.a = d_inpts_double[0]; elp.e2 = d_inpts_double[1]; isOutside = false; runIter = true; llh[0] = inImgArrs.lat[pixel] * (M_PI / 180.); llh[1] = inImgArrs.lon[pixel] * (M_PI / 180.); llh[2] = inImgArrs.dem[pixel]; llh2xyz(&elp,xyz,llh); tline = .5 * (d_inpts_double[2] + d_inpts_double[3]); stat = interpolateOrbit(&orb, tline, satx, satv); // Originally we got xyz_mid and vel_mid, then copied into satx/satv, // but since these are all independent here it's fine if (stat != 0) isOutside = true; // Should exit, but this is next-best thing... for (i=0; i<51; i++) { // The whole "51 iterations" thing is messing with my coding OCD... if (runIter) { // Instead of breaking the loop tprev = tline; for (j=0; j<3; j++) dr[j] = xyz[j] - satx[j]; rngpix = sqrt(pow(dr[0],2) + pow(dr[1],2) + pow(dr[2],2)); // No need to add the norm function (useless one-line) fdop = .5 * d_inpts_double[4] * evalPoly(&fdvsrng, rngpix); fdopder = .5 * d_inpts_double[4] * evalPoly(&fddotvsrng, rngpix); fnprime = (((fdop / rngpix) + fdopder) * dot(dr,satv)) - dot(satv,satv); tline = tline - ((dot(dr,satv) - (fdop * rngpix)) / fnprime); stat = interpolateOrbit(&orb, tline, satx, satv); if (stat != 0) { tline = BAD_VALUE; rngpix = BAD_VALUE; runIter = false; } if (fabs(tline - tprev) < 5.e-9) runIter = false; } } if ((tline < d_inpts_double[2]) || (tline > d_inpts_double[3])) isOutside = true; rngpix = sqrt(pow((xyz[0]-satx[0]),2) + pow((xyz[1]-satx[1]),2) + pow((xyz[2]-satx[2]),2)); if ((rngpix < d_inpts_double[5]) || (rngpix > d_inpts_double[6])) isOutside = true; if (d_inpts_int[2] == 1) { // Bistatic (won't be true for awhile, not currently implemented) tline = tline + ((2. * rngpix) / SPEED_OF_LIGHT); if ((tline < d_inpts_double[2]) || (tline > d_inpts_double[3])) isOutside = true; stat = interpolateOrbit(&orb, tline, satx, satv); if (stat != 0) isOutside = true; rngpix = sqrt(pow((xyz[0]-satx[0]),2) + pow((xyz[1]-satx[1]),2) + pow((xyz[2]-satx[2]),2)); if ((rngpix < d_inpts_double[5]) || (rngpix > d_inpts_double[6])) isOutside = true; } if (!isOutside) { outImgArrs.rgm[pixel] = rngpix; outImgArrs.azt[pixel] = tline; outImgArrs.rgoff[pixel] = ((rngpix - d_inpts_double[5]) / d_inpts_double[7]) - double(int(pixel%d_inpts_int[1])); outImgArrs.azoff[pixel] = ((tline - d_inpts_double[2]) / d_inpts_double[8]) - double(int(pixel/d_inpts_int[1])+OFFSET_LINE); } else { outImgArrs.rgm[pixel] = BAD_VALUE; outImgArrs.azt[pixel] = BAD_VALUE; outImgArrs.rgoff[pixel] = BAD_VALUE; outImgArrs.azoff[pixel] = BAD_VALUE; } } } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } int nLinesPossible(int length, int width) { // 332 bytes per runGeo call (let's say 500 bytes for safety) // Device needs 7 * pixPerRun * sizeof(double) bytes malloc'ed // (56 * pixPerRun) - # bytes malloc'd on device // (500 * pixPerRun) - # bytes used by sum of all runGeo calls size_t freeByte, totalByte; int linesPerRun; cudaMemGetInfo(&freeByte, &totalByte); printf("tb %ld\n", totalByte); totalByte = size_t((double(totalByte) / 5.e8) * 5.e8); // Round down to nearest .5 GB printf("tba %ld\n", totalByte); printf("Device has roughly %.4f GB of memory, ", double(totalByte)/1.e9); linesPerRun = totalByte / (556 * width); printf("and can process roughly %d lines (each with %d pixels) per run.\n", linesPerRun, width); return linesPerRun; } void setOrbit(struct Orbit *orb) { orb->svs = (struct stateVector *)malloc(orb->nVec * sizeof(struct stateVector)); } void freeOrbit(struct Orbit *orb) { free(orb->svs); } void setPoly1d(struct Poly1d *poly) { poly->coeffs = (double *)malloc((poly->order+1) * sizeof(double)); } void freePoly1d(struct Poly1d *poly) { free(poly->coeffs); } void runGPUGeo(int iter, int numPix, double *h_inpts_dbl, int *h_inpts_int, double *h_lat, double *h_lon, double *h_dem, int h_orbNvec, double *h_orbSvs, int h_polyOrd, double h_polyMean, double h_polyNorm, double *h_polyCoeffs, double h_polyPRF, double **accArr) { double iStartCpy, iStartRun, iEndRun, iEndCpy; int i; struct stateVector *d_svs; double *d_fdPolyCoeffs, *d_fddotPolyCoeffs, *d_lat, *d_lon, *d_dem, *d_azt, *d_rgm, *d_azoff, *d_rgoff; struct InputImageArrs inImgArrs; struct OutputImageArrs outImgArrs; struct Orbit orb; struct Poly1d fdvsrng, fddotvsrng; cudaSetDevice(0); printf(" Allocating memory...\n"); size_t nb_pixels = numPix * sizeof(double); orb.nVec = h_orbNvec; setOrbit(&orb); // Malloc memory for orbit on host (sizeof(stateVector)*nvec doubles) for (i=0; i<h_orbNvec; i++) { orb.svs[i].t = h_orbSvs[7*i]; orb.svs[i].px = h_orbSvs[(7*i)+1]; orb.svs[i].py = h_orbSvs[(7*i)+2]; orb.svs[i].pz = h_orbSvs[(7*i)+3]; orb.svs[i].vx = h_orbSvs[(7*i)+4]; orb.svs[i].vy = h_orbSvs[(7*i)+5]; orb.svs[i].vz = h_orbSvs[(7*i)+6]; } fdvsrng.order = h_polyOrd; fdvsrng.mean = h_polyMean; fdvsrng.norm = h_polyNorm; setPoly1d(&fdvsrng); // Malloc memory for fdvsrng Poly1d on host (order+1 doubles) for (i=0; i<=h_polyOrd; i++) fdvsrng.coeffs[i] = h_polyPRF * h_polyCoeffs[i]; if (h_polyOrd == 0) { fddotvsrng.order = 0; fddotvsrng.mean = 0.; fddotvsrng.norm = 1.; setPoly1d(&fddotvsrng); // Malloc memory for fddotvsrng Poly1d on host fddotvsrng.coeffs[0] = 0.; } else { fddotvsrng.order = h_polyOrd-1; fddotvsrng.mean = fdvsrng.mean; fddotvsrng.norm = fdvsrng.norm; setPoly1d(&fddotvsrng); // As above for (i=1; i<=h_polyOrd; i++) fddotvsrng.coeffs[i-1] = (i * fdvsrng.coeffs[i]) / fdvsrng.norm; } cudaMalloc((void**)&d_svs, (orb.nVec*sizeof(struct stateVector))); cudaMalloc((double**)&d_fdPolyCoeffs, ((fdvsrng.order+1)*sizeof(double))); cudaMalloc((double**)&d_fddotPolyCoeffs, ((fddotvsrng.order+1)*sizeof(double))); cudaMalloc((double**)&d_lat, nb_pixels); cudaMalloc((double**)&d_lon, nb_pixels); cudaMalloc((double**)&d_dem, nb_pixels); cudaMalloc((double**)&d_azt, nb_pixels); cudaMalloc((double**)&d_rgm, nb_pixels); cudaMalloc((double**)&d_azoff, nb_pixels); cudaMalloc((double**)&d_rgoff, nb_pixels); printf(" Done.\n Copying data to GPU...\n"); iStartCpy = cpuSecond(); cudaMemcpy(d_svs, orb.svs, (orb.nVec*sizeof(struct stateVector)), cudaMemcpyHostToDevice); cudaMemcpy(d_fdPolyCoeffs, fdvsrng.coeffs, ((fdvsrng.order+1)*sizeof(double)), cudaMemcpyHostToDevice); cudaMemcpy(d_fddotPolyCoeffs, fddotvsrng.coeffs, ((fddotvsrng.order+1)*sizeof(double)), cudaMemcpyHostToDevice); cudaMemcpy(d_lat, h_lat, nb_pixels, cudaMemcpyHostToDevice); cudaMemcpy(d_lon, h_lon, nb_pixels, cudaMemcpyHostToDevice); cudaMemcpy(d_dem, h_dem, nb_pixels, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_inpts_double, h_inpts_dbl, (9*sizeof(double))); cudaMemcpyToSymbol(d_inpts_int, h_inpts_int, (3*sizeof(int))); freeOrbit(&orb); // Since the data for these is already on the GPU, we need the space again freePoly1d(&fdvsrng); freePoly1d(&fddotvsrng); orb.svs = d_svs; // Magic of the logic - we pass the objects in by value, but the variable fdvsrng.coeffs = d_fdPolyCoeffs; // size components (svs/coeffs) are malloc'ed on the GPU, fddotvsrng.coeffs = d_fddotPolyCoeffs; // so we can just have the objects store device ptrs inImgArrs.lat = d_lat; inImgArrs.lon = d_lon; inImgArrs.dem = d_dem; outImgArrs.azt = d_azt; outImgArrs.rgm = d_rgm; outImgArrs.azoff = d_azoff; outImgArrs.rgoff = d_rgoff; dim3 block(THRD_PER_RUN); dim3 grid((numPix + (THRD_PER_RUN - 1)) / THRD_PER_RUN); if ((grid.x * THRD_PER_RUN) > numPix) printf(" (NOTE: There will be %d 'empty' threads).\n", ((grid.x*THRD_PER_RUN)-numPix)); if (iter > -1) printf(" Starting GPU Geo2rdr for run %d...\n", iter); else printf(" Starting GPU Geo2rdr for remaining lines...\n"); iStartRun = cpuSecond(); if (iter > -1) runGeo <<<grid, block>>>(orb, fdvsrng, fddotvsrng, outImgArrs, inImgArrs, numPix, int((iter*numPix)/h_inpts_int[1])); else runGeo <<<grid, block>>>(orb, fdvsrng, fddotvsrng, outImgArrs, inImgArrs, numPix, (-1*iter)); // This time iter is -1*nRuns*linesPerRun (i.e. a final partial block run) cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) { printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); } if (errAsync != cudaSuccess) { printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); } iEndRun = cpuSecond(); if (iter > -1) printf(" GPU finished run %d in %f s.\n", iter, (iEndRun-iStartRun)); else printf(" GPU finished remaining lines in %f s.\n", (iEndRun-iStartRun)); printf(" Copying memory back to host...\n"); cudaMemcpy(accArr[0], outImgArrs.rgm, nb_pixels, cudaMemcpyDeviceToHost); cudaMemcpy(accArr[1], outImgArrs.azt, nb_pixels, cudaMemcpyDeviceToHost); cudaMemcpy(accArr[2], outImgArrs.rgoff, nb_pixels, cudaMemcpyDeviceToHost); cudaMemcpy(accArr[3], outImgArrs.azoff, nb_pixels, cudaMemcpyDeviceToHost); iEndCpy = cpuSecond(); if (iter > -1) printf(" GPU finished run %d (with memory copies) in %f s.\n", iter, (iEndCpy-iStartCpy)); else printf(" GPU finished remaining lines (with memory copies) in %f s.\n", (iEndCpy-iStartCpy)); printf(" Cleaning device memory and returning to main Geo2rdr function...\n"); cudaFree(d_svs); cudaFree(d_fdPolyCoeffs); cudaFree(d_fddotPolyCoeffs); cudaFree(d_lat); cudaFree(d_lon); cudaFree(d_dem); cudaFree(d_azt); cudaFree(d_rgm); cudaFree(d_azoff); cudaFree(d_rgoff); cudaDeviceReset(); }
the_stack
This example requires NVIDIA Ampere GPU or later. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS Includes #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" // CUTLASS Utility Includes #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/gemm_complex.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Define the overal warp-level problem shape int const kM = 27; int const kN = 31; int const kK = 17; /////////////////////////////////////////////////////////////////////////////////////////////////// // Define a warp-level GEMM operator. // // This template could be part of the CUTLASS Template Library or implemented internally. This // wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be // instantiated in device code. namespace cutlass { namespace gemm { namespace warp { template < typename Shape, typename InstructionShape, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementScalar > class GemmTensorOp { public: using WarpShape = GemmShape< ((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM, ((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN, InstructionShape::kK >; using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor // Layout of C matrix >::Type; // Number of 'K groups' int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK; // Define a 'FragmentIterator' to iterate over slices of accumulators using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename MmaWarp::Shape, InstructionShape, double, typename MmaWarp::Policy::Operator::FragmentC, cutlass::layout::RowMajor >; // Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical< typename MmaWarp::Shape, InstructionShape, double, cutlass::layout::RowMajor >; using TensorRefA = typename MmaWarp::IteratorA::TensorRef; using TensorRefB = typename MmaWarp::IteratorB::TensorRef; using TensorRefC = typename AccumulatorTileIterator::TensorRef; public: CUTLASS_HOST_DEVICE GemmTensorOp() { } CUTLASS_DEVICE void operator()( ElementScalar alpha, TensorRefA ref_A, TensorRefB ref_B, ElementScalar beta, TensorRefC ref_C, TensorRefC ref_D, int lane_id) const { // Instantiate iterators pointing to slices of the A and B matrices in shared memory typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id); typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id); // Instantiate and clear accumulator tile holding the C matrix typename MmaWarp::FragmentC accum; accum.clear(); // Instantiate the warp-level matrix multiply operator MmaWarp mma_op; // Instantiate fragments holding the slice of the matrix held by each warp typename MmaWarp::FragmentA frag_A[2]; typename MmaWarp::FragmentB frag_B[2]; // Load fragments from shared memory iter_A.load(frag_A[0]); iter_B.load(frag_B[0]); ++iter_A; ++iter_B; // Load fragments from shared memory CUTLASS_PRAGMA_UNROLL for (int k = 0; k < kKgroups; ++k) { // Load fragments from shared memory iter_A.load(frag_A[(k + 1) % 2]); iter_B.load(frag_B[(k + 1) % 2]); ++iter_A; ++iter_B; // Compute the matrix multiply mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum); } // Instantiate iterators FragmentIterator accum_frag_it(accum); AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id); AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id); // Define function objects for linear scaling operation cutlass::multiplies<typename FragmentIterator::Fragment> mul_source; cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator; // Iterate over the epilogue components CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) { // Define storage for slices of the accumulators typename FragmentIterator::Fragment accum_fragment; typename FragmentIterator::Fragment source_fragment; // Select a slice of accumulators from the accumulator tile accum_frag_it.load(accum_fragment); ++accum_frag_it; // Load a corresponding slice from Shared memory source_tile_it.load(source_fragment); ++source_tile_it; // Compute linear scaling - alpha * AB + beta * C source_fragment = mul_source(beta, source_fragment); accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment); // Store the result to shared memory dest_tile_it.store(accum_fragment); ++dest_tile_it; } } }; } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held // in Shared Memory. __global__ void kernel( double *D_gmem, double alpha, double const *A_gmem, double const *B_gmem, double beta, double const *C_gmem) { // Define several matrices in shared memory __shared__ double A[kM][kK]; __shared__ double B[kN][kK]; __shared__ double C[kM][kN]; // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { for (int k = 0; k < kK; ++k) { A[m][k] = A_gmem[m * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { for (int k = 0; k < kK; ++k) { B[n][k] = B_gmem[n * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { C[m][n] = C_gmem[m * kN + n]; } } } __syncthreads(); // // Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4), // overall shape, data type of each operand, and layout of each operand. // using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp< cutlass::gemm::GemmShape<kM, kN, kK>, cutlass::gemm::GemmShape<8, 8, 4>, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor, // Layout of C matrix double // Scalar type of alpha and beta >; // Instantiate the GEMM operator GemmTensorOp gemm; // Execute the warp-level GEMM operation gemm( alpha, {&A[0][0], kK}, {&B[0][0], kK}, beta, {&C[0][0], kN}, {&C[0][0], kN}, threadIdx.x); __syncthreads(); // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { D_gmem[m * kN + n] = C[m][n]; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to canonical warp-level GEMM operation int main(int argc, const char *arg[]) { bool notSupported = false; // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "This example requires compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK}); cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN}); uint64_t seed = 2020; double max = 8; double min = -8; cutlass::reference::host::TensorFillRandomUniform( A.host_view(), seed, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( B.host_view(), seed + 17, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( C.host_view(), seed + 31, max, min, 0 ); A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); dim3 grid(1,1); dim3 block(32, 1, 1); double alpha = 2.25; double beta = 1.24; kernel<<< grid, block >>>( D.device_data(), alpha, A.device_data(), B.device_data(), beta, C.device_data() ); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Failed to synchronize device after kernel launch." << std::endl; return -1; } D.sync_host(); // Compute reference on host cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false); cutlass::reference::host::GemmComplex( {kM, kN, kK}, alpha, A.host_ref(), cutlass::ComplexTransform::kNone, B.host_ref(), cutlass::ComplexTransform::kNone, beta, C.host_ref(), D_ref.host_ref(), double() ); // Verify reference matches computed if (!cutlass::reference::host::TensorEquals( D.host_view(), D_ref.host_view())) { std::cerr << "A =\n" << A.host_view() << "\n\nB = \n" << B.host_view() << "\n\nC = " << C.host_view() << "\n\nRef =\n" << D_ref.host_view() << "\n\nD =\n" << D.host_view() << "\n\n"; std::cerr << "Error - device results mismatch host reference." << std::endl; return -1; } std::cout << "Passed" << std::endl; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
#include "THCTensorMath.h" #include "THCGeneral.h" #include "THCNumerics.cuh" #include "THCReduce.cuh" #include "THCReduceAll.cuh" #include "THCThrustAllocator.cuh" #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/transform_reduce.h> #include <thrust/inner_product.h> #if CUDA_VERSION >= 7000 #include <thrust/system/cuda/execution_policy.h> #endif // Reduction operators that support `half`, unlike Thrust template <typename InT, typename AccT> struct ReduceAdd { inline __device__ AccT operator()(AccT a, InT b) const { return a + (AccT) b; } }; #ifdef CUDA_HALF_TENSOR template <> struct ReduceAdd<half, half> { inline __device__ half operator()(half a, half b) const { #ifdef CUDA_HALF_INSTRUCTIONS return __hadd(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return __float2half(fa + fb); #endif } }; template <> struct ReduceAdd<half, float> { inline __device__ float operator()(float a, half b) const { return a + __half2float(b); } }; #endif // CUDA_HALF_TENSOR template <typename InT, typename AccT> struct ReduceMultiply { inline __device__ AccT operator()(AccT a, InT b) const { return a * (AccT) b; } }; #ifdef CUDA_HALF_TENSOR template <> struct ReduceMultiply<half, half> { inline __device__ half operator()(half a, half b) const { #ifdef CUDA_HALF_INSTRUCTIONS return __hmul(a, b); #else float fa = __half2float(a); float fb = __half2float(b); return __float2half(fa * fb); #endif } }; template <> struct ReduceMultiply<half, float> { inline __device__ float operator()(float a, half b) const { return a * __half2float(b); } }; #endif // CUDA_HALF_TENSOR template <typename ResT, typename ArgT> struct SquareFunctor { SquareFunctor(ResT mean): mean_(mean) {} inline __device__ ResT operator()(ArgT x) const { return (((ResT) x) - mean_) * (((ResT) x) - mean_); } const ResT mean_; }; #ifdef CUDA_HALF_TENSOR template <typename ResT> struct SquareFunctor<ResT, half> { SquareFunctor(ResT mean): mean_(mean) {} inline __device__ ResT operator()(half x) const { return THCNumerics<ResT>::mul( THCNumerics<ResT>::sub(mean_, ScalarConvert<half, ResT>::to(x)), THCNumerics<ResT>::sub(mean_, ScalarConvert<half, ResT>::to(x)) ); } const ResT mean_; }; #endif // CUDA_HALF_TENSOR template <typename T> struct ReduceMin { inline __device__ T operator()(T a, T b) const { return THCNumerics<T>::lt(a, b) ? a : b; } }; template <typename T> struct ReduceMax { inline __device__ T operator()(T a, T b) const { return THCNumerics<T>::gt(a, b) ? a : b; } }; struct LogicalAll { inline __device__ unsigned char operator()(unsigned char x, unsigned char y) const { return (x && y); } }; struct LogicalAny { inline __device__ unsigned char operator()(unsigned char x, unsigned char y) const { return (x || y); } }; template<typename Real> __global__ void THCTensor_kernel_renorm(Real *data, const Real value, const ptrdiff_t size, const Real maxnorm) { __shared__ Real buffer[32]; long tx = threadIdx.x; long bx = blockIdx.x; long step = blockDim.x; Real *row = data + size*bx; buffer[tx] = ScalarConvert<int, Real>::to(0); // get norm of axis for (ptrdiff_t i=tx; i<size; i+=step) { buffer[tx] = THCNumerics<Real>::add( buffer[tx], THCNumerics<Real>::pow( THCNumerics<Real>::abs(row[i]), value) ); } // add (reduce) for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1) { __syncthreads(); if (tx < stride) buffer[tx] = THCNumerics<Real>::add(buffer[tx], buffer[tx+stride]); } // clip norms __syncthreads(); Real norm = THCNumerics<Real>::pow(buffer[0], THCNumerics<Real>::cinv(value)); if (THCNumerics<Real>::gt(norm, maxnorm)) { norm = THCNumerics<Real>::div( maxnorm, THCNumerics<Real>::add( norm, ScalarConvert<float, Real>::to(1e-7) ) ); // renormalize for (ptrdiff_t i=tx; i<size; i+=step) { row[i] = THCNumerics<Real>::mul(row[i], norm); } } } template <typename T> struct TensorNonZeroOp { TensorNonZeroOp() {} __host__ __device__ T operator()(T lhs) const { if (THCNumerics<T>::eq(lhs, ScalarConvert<float, T>::to(0.0))) { return ScalarConvert<int, T>::to(0); } else { return ScalarConvert<int, T>::to(1); } } }; template <typename T, int StaticExp> struct TensorNormOp { TensorNormOp(T exp) : exponent(exp) {} __host__ __device__ T operator()(T x) const { if (StaticExp == 1) { return (T) fabsf((float) x); } else if (StaticExp == 2) { return x * x; } else { return (T) powf(fabsf((float) x), (float) exponent); } } const T exponent; }; template <int StaticExp> struct TensorNormOp<double, StaticExp> { TensorNormOp(double exp) : exponent(exp) {} __host__ __device__ double operator()(double x) const { if (StaticExp == 1) { return fabs(x); } else if (StaticExp == 2) { return x * x; } else { return pow(fabs(x), exponent); } } const double exponent; }; #ifdef CUDA_HALF_TENSOR template <int StaticExp> struct TensorNormOp<half, StaticExp> { TensorNormOp(half exp) : exponent(exp) {} __host__ __device__ half operator()(half x) const { if (StaticExp == 1) { return THCNumerics<half>::abs(x); } else if (StaticExp == 2) { return THCNumerics<half>::mul(x, x); } else { return THCNumerics<half>::pow(THCNumerics<half>::abs(x), exponent); } } const half exponent; }; #endif template <typename Tacc, typename T> struct TensorDistOp { TensorDistOp(Tacc exp) : exponent(exp) {} __host__ __device__ Tacc operator()(T x, T y) const { Tacc xr = ScalarConvert<T, Tacc>::to(x); Tacc yr = ScalarConvert<T, Tacc>::to(y); return THCNumerics<Tacc>::pow( THCNumerics<Tacc>::abs(THCNumerics<Tacc>::sub(xr, yr)), exponent ); } const Tacc exponent; }; #include <thrust/functional.h> // Given the sum of values and the sum of squares, compute the variance or standard deviation. template<typename Real, bool flag, bool apply_sqrt> __forceinline__ __device__ Real THCTensor_computeVar(Real sum, Real sum2, unsigned row_size) { Real rs2 = ScalarConvert<unsigned, Real>::to(row_size); Real rs2m = ScalarConvert<unsigned, Real>::to(row_size - 1); Real zero = ScalarConvert<int, Real>::to(0); if (flag) { sum = THCNumerics<Real>::div(sum, rs2); sum2 = THCNumerics<Real>::div(sum2, rs2); sum2 = THCNumerics<Real>::sub(sum2, THCNumerics<Real>::mul(sum, sum)); sum2 = (THCNumerics<Real>::lt(sum2, zero) ? zero : sum2); } else { sum = THCNumerics<Real>::div(sum, rs2); sum2 = THCNumerics<Real>::div(sum2, rs2m); sum2 = THCNumerics<Real>::sub(sum2, THCNumerics<Real>::mul( THCNumerics<Real>::div(rs2 ,rs2m), THCNumerics<Real>::mul(sum, sum))); sum2 = (THCNumerics<Real>::lt(sum2, zero) ? zero : sum2); } if (apply_sqrt) return THCNumerics<Real>::sqrt(sum2); else return sum2; } /* Compute the variance (or standard deviation) along an outer dimension of a tensor. * * - num_orows is the size of the flattened outer dimensions; * - num_irows is the size of the flattened inner dimensions; * - row_size is the size of the dimension along which to compute the variance; * - if flag is set, normalize by `row_size` instead of `row_size - 1` * - if apply_sqrt is set, compute the standard deviation instead of variance * * The dimensions to the outside and inside of the specified dimension are considered as flattened. * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened * outer dimensions, which contains several "inner rows"). * Each thread processes a single inner row at a time. */ template<typename Real, bool flag, bool apply_sqrt> __global__ void THCTensor_kernel_varOuterDim(Real *tgt, Real *src_, unsigned num_orows, unsigned num_irows, unsigned row_size) { for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { Real *src = src_ + orow * row_size * num_irows + irow; Real sum = ScalarConvert<int, Real>::to(0), sum2 = ScalarConvert<int, Real>::to(0); for (unsigned col = 0; col < row_size; ++col) { Real val = *src; sum = THCNumerics<Real>::add(sum, val); sum2 = THCNumerics<Real>::add( sum2, THCNumerics<Real>::mul(val, val) ); src += num_irows; } tgt[orow * num_irows + irow] = THCTensor_computeVar<Real, flag, apply_sqrt>(sum, sum2, row_size); } } } template<typename TensorTypeK, typename Real, bool apply_sqrt> __host__ void THCTensor_varOuterDim(THCState *state, TensorTypeK *tgt, TensorTypeK *src, long dimension, int flag) { unsigned ndim = TensorUtils<TensorTypeK>::getDims(state, src); // Treat all outer dimensions (i.e. dim < dimension) as one. unsigned num_orows = 1; for (long dim = 0; dim < dimension; dim++) { num_orows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } unsigned row_size = TensorUtils<TensorTypeK>::getSize(state, src, dimension); // Treat all inner dimensions (i.e. dim > dimension) as one. unsigned num_irows = 1; for (unsigned dim = dimension + 1; dim < ndim; dim++) { num_irows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } dim3 threads(min(512, num_irows)); unsigned maxGridDim = 1024; dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x))); if (flag) { THCTensor_kernel_varOuterDim<Real, true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt), TensorUtils<TensorTypeK>::getData(state, src), num_orows, num_irows, row_size); } else { THCTensor_kernel_varOuterDim<Real, false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt), TensorUtils<TensorTypeK>::getData(state, src), num_orows, num_irows, row_size); } cudaError errcode = cudaGetLastError(); if (errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } } /* Compute the variance (or standard deviation) of the innermost dimension of a tensor. * * - num_rows is the size of the flattened outer dimensions; * - row_size is the size of the innermost dimension; * - if flag is set, normalize by `row_size` instead of `row_size - 1` * - if apply_sqrt is set, compute the standard deviation instead of variance * * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is * considered as having 'num_rows' rows of size 'row_size'. * Each thread block processes one or more sets of contiguous rows (processing multiple rows * per thread block is quicker than processing a single row, especially for short rows). */ template<typename Real, bool flag, bool apply_sqrt> __global__ void THCTensor_kernel_varInnermostDim(Real *tgt, Real *src_, unsigned num_rows, unsigned row_size) { __shared__ Real ssum[32][16]; __shared__ Real ssum2[32][16]; for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { unsigned row = block_row + threadIdx.y; Real sum = ScalarConvert<int, Real>::to(0), sum2 = ScalarConvert<int, Real>::to(0); if (row < num_rows) { Real *src = src_ + row * row_size; // Sequential reduction within a thread. for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) { Real val = src[col]; sum = THCNumerics<Real>::add(sum, val); sum2 = THCNumerics<Real>::add(sum2, THCNumerics<Real>::mul(val, val)); } } ssum[threadIdx.y][threadIdx.x] = sum; ssum2[threadIdx.y][threadIdx.x] = sum2; __syncthreads(); // Reduce intermediate values to single value. for (unsigned s = 8; s > 1; s >>= 1) { if (row < num_rows && threadIdx.x < s) { ssum[threadIdx.y][threadIdx.x] = THCNumerics<Real>::add(ssum[threadIdx.y][threadIdx.x], ssum[threadIdx.y][threadIdx.x + s]); ssum2[threadIdx.y][threadIdx.x] = THCNumerics<Real>::add(ssum2[threadIdx.y][threadIdx.x], ssum2[threadIdx.y][threadIdx.x + s]); } __syncthreads(); } if (row < num_rows && threadIdx.x == 0) { sum = THCNumerics<Real>::add(ssum[threadIdx.y][0], ssum[threadIdx.y][1]); sum2 = THCNumerics<Real>::add(ssum2[threadIdx.y][0], ssum2[threadIdx.y][1]); tgt[row] = THCTensor_computeVar<Real, flag, apply_sqrt>(sum, sum2, row_size); } __syncthreads(); } } template<typename TensorTypeK, typename Real, bool apply_sqrt> __host__ void THCTensor_varInnermostDim(THCState *state, TensorTypeK *tgt, TensorTypeK *src, int flag) { unsigned ndim = TensorUtils<TensorTypeK>::getDims(state, src); // Treat all outer dimensions as a single dimension. unsigned num_rows = 1; for (unsigned dim = 0; dim < ndim - 1; dim++) { num_rows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } unsigned row_size = TensorUtils<TensorTypeK>::getSize(state, src, ndim - 1); // From limited testing, 16x32 seemed a good compromise for handling both long and short dimensions. dim3 threads(16, 32); dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y))); if (flag) { THCTensor_kernel_varInnermostDim<Real, true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt), TensorUtils<TensorTypeK>::getData(state, src), num_rows, row_size); } else { THCTensor_kernel_varInnermostDim<Real, false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt), TensorUtils<TensorTypeK>::getData(state, src), num_rows, row_size); } cudaError errcode = cudaGetLastError(); if (errcode != cudaSuccess) { THError(cudaGetErrorString(errcode)); } } /* A set of reduction kernels that take in binary ops on thrust pairs (of value, index). These are useful when you not only have to do a reduction, but you might have to preserve the location of contention (for example min/max operations). The structure of the kernels follows the structure of the reduction kernels. */ template <typename K, typename Index, class BinaryFunction> __global__ void kernelTransformReduceOuterDimIndex(K *tgt1, Index *tgt2, K *src_, unsigned num_orows, unsigned num_irows, unsigned row_size, thrust::pair<K, Index> init, BinaryFunction binary_op) { for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { K *src = src_ + orow * row_size * num_irows + irow; thrust::pair<K, Index> acc = init; for (unsigned col = 0; col < row_size; ++col) { // +1 for Lua index acc = binary_op(thrust::make_pair<K, Index>(*src, col + TH_INDEX_BASE), acc); src += num_irows; } tgt1[orow * num_irows + irow] = acc.first; tgt2[orow * num_irows + irow] = acc.second; } } } template <typename TensorTypeK, typename TensorTypeIndex, typename BinaryFunction> __host__ void THC_transformReduceOuterDimIndex(THCState *state, TensorTypeK *tgt1, TensorTypeIndex *tgt2, TensorTypeK *src, long rdim, const thrust::pair< typename TensorUtils<TensorTypeK>::DataType, typename TensorUtils<TensorTypeIndex>::DataType>& init, BinaryFunction binary_op) { unsigned ndim = TensorUtils<TensorTypeK>::getDims(state, src); unsigned num_orows = 1; for (long dim = 0; dim < rdim; dim++) { num_orows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } unsigned row_size = TensorUtils<TensorTypeK>::getSize(state, src, rdim); unsigned num_irows = 1; for (unsigned dim = rdim + 1; dim < ndim; dim++) { num_irows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } dim3 threads(min(512, num_irows)); unsigned maxGridDim = 1024; dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x))); kernelTransformReduceOuterDimIndex <<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt1), TensorUtils<TensorTypeIndex>::getData(state, tgt2), TensorUtils<TensorTypeK>::getData(state, src), num_orows, num_irows, row_size, init, binary_op); THCudaCheck(cudaGetLastError()); } /* Reduce the innermost dimension of a tensor (on thrust::pair functors which are (value, index)) * * For an n-d tensor (n <= 4) where the reduction is along the innermost dimension: * * - block.x is the innermost dimension, i.e. dimension 0; * - block.y and grid.y make up dimension 1; and * - grid.x and grid z are the remaining two outer dimensions (if any) * * Reduction along other dimensions is handled in a separate kernel. */ template <typename K, typename Index, class BinaryFunction> __global__ void kernelTransformReduceInnermostDimIndex(K *tgt1, Index* tgt2, K *src_, unsigned num_rows, unsigned row_size, thrust::pair<K, Index> init, BinaryFunction binary_op) { __shared__ K sbuf[32][16 + 1]; // avoid bank conflict __shared__ Index ibuf[32][16 + 1]; // avoid bank conflict for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { unsigned row = block_row + threadIdx.y; thrust::pair<K, Index> acc = init; if (row < num_rows) { K *src = src_ + row * row_size; // Sequential reduction within a thread. for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) { acc = binary_op(thrust::make_pair<K, Index>(src[col], col + TH_INDEX_BASE), acc); } } sbuf[threadIdx.y][threadIdx.x] = acc.first; ibuf[threadIdx.y][threadIdx.x] = acc.second; __syncthreads(); // Reduce intermediate values to single value. K* sline = &sbuf[threadIdx.y][0]; Index* iline = &ibuf[threadIdx.y][0]; for (unsigned s = 8; s > 0; s >>= 1) { if (row < num_rows && threadIdx.x < s) { thrust::pair<K, Index> arg1 = thrust::make_pair<K, Index>(sline[threadIdx.x], iline[threadIdx.x]); thrust::pair<K, Index> arg2 = thrust::make_pair<K, Index>(sline[threadIdx.x + s], iline[threadIdx.x + s]); thrust::pair<K, Index> res = binary_op(arg1, arg2); sline[threadIdx.x] = res.first; iline[threadIdx.x] = res.second; } __syncthreads(); } if (row < num_rows && threadIdx.x == 0) { tgt1[row] = sline[0]; tgt2[row] = iline[0]; } __syncthreads(); } } template <typename TensorTypeK, typename TensorTypeIndex, typename BinaryFunction> __host__ void THC_transformReduceInnermostDimIndex(THCState *state, TensorTypeK *tgt1, TensorTypeIndex *tgt2, TensorTypeK *src, const thrust::pair< typename TensorUtils<TensorTypeK>::DataType, typename TensorUtils<TensorTypeIndex>::DataType>& init, BinaryFunction binary_op) { unsigned ndim = TensorUtils<TensorTypeK>::getDims(state, src); unsigned num_rows = 1; for (unsigned dim = 0; dim < ndim - 1; dim++) { num_rows *= TensorUtils<TensorTypeK>::getSize(state, src, dim); } unsigned row_size = TensorUtils<TensorTypeK>::getSize(state, src, ndim - 1); dim3 threads(16, 32); dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y))); kernelTransformReduceInnermostDimIndex <<<grid, threads, 0, THCState_getCurrentStream(state)>>>( TensorUtils<TensorTypeK>::getData(state, tgt1), TensorUtils<TensorTypeIndex>::getData(state, tgt2), TensorUtils<TensorTypeK>::getData(state, src), num_rows, row_size, init, binary_op); THCudaCheck(cudaGetLastError()); } template <typename TensorTypeK, typename TensorTypeIndex, typename BinaryFunction> void THC_reduceDimIndex(THCState *state, TensorTypeK *tgt1_, TensorTypeIndex *tgt2_, TensorTypeK *src, long dimension, const thrust::pair< typename TensorUtils<TensorTypeK>::DataType, typename TensorUtils<TensorTypeIndex>::DataType>& init, BinaryFunction binary_op) { THArgCheck(dimension >= 0 && dimension < TensorUtils<TensorTypeK>::getDims(state, src), 3, "dimension out of range"); THLongStorage *dim = TensorUtils<TensorTypeK>::newSizeOf(state, src); THLongStorage_set(dim, dimension, 1); TensorUtils<TensorTypeK>::resize(state, tgt1_, dim, NULL); TensorUtils<TensorTypeIndex>::resize(state, tgt2_, dim, NULL); THLongStorage_free(dim); TensorTypeK *tgt1 = TensorUtils<TensorTypeK>::newContiguous(state, tgt1_); TensorTypeIndex *tgt2 = TensorUtils<TensorTypeIndex>::newContiguous(state, tgt2_); src = TensorUtils<TensorTypeK>::newContiguous(state, src); if (dimension == TensorUtils<TensorTypeK>::getDims(state, src) - 1) { THC_transformReduceInnermostDimIndex(state, tgt1, tgt2, src, init, binary_op); } else { THC_transformReduceOuterDimIndex(state, tgt1, tgt2, src, dimension, init, binary_op); } TensorUtils<TensorTypeK>::free(state, src); TensorUtils<TensorTypeK>::freeCopyTo(state, tgt1, tgt1_); TensorUtils<TensorTypeIndex>::freeCopyTo(state, tgt2, tgt2_); } template <typename T, typename Index> struct MaxValuePair { __host__ __device__ thrust::pair<T, Index> operator()(const thrust::pair<T, Index>& a, const thrust::pair<T, Index>& b) { return THCNumerics<T>::ge(a.first, b.first) ? a : b; } }; template <typename T, typename Index> struct MinValuePair { __host__ __device__ thrust::pair<T, Index> operator()(const thrust::pair<T, Index>& a, const thrust::pair<T, Index>& b) { return THCNumerics<T>::le(a.first, b.first) ? a : b; } }; template <typename T> struct AddOp { __device__ __forceinline__ T operator()(T &lhs, T &rhs) { return THCNumerics<T>::add(lhs, rhs); } }; template <typename T> struct MulOp { __device__ __forceinline__ T operator()(T &lhs, T &rhs) { return THCNumerics<T>::mul(lhs, rhs); } }; #endif // THC_TENSORMATH_REDUCE_CUH
the_stack
#include "amgx_types/util.h" #include "amgx_types/rand.h" #include "amgx_types/io.h" #include <sstream> #include <iomanip> namespace amgx { template <class TConfig> EigenSolver<TConfig>::EigenSolver(AMG_Config &cfg, const std::string &cfg_scope) : m_A(0), m_converged(false), m_curr_iter(0), m_num_iters(0), m_max_iters(0), m_ref_count(1) { m_want_eigenvectors = cfg.getParameter<int>("eig_eigenvector", cfg_scope); m_tolerance = cfg.getParameter<double>("eig_tolerance", cfg_scope); m_shift = types::util<ValueTypeVec>::get_one() * cfg.getParameter<double>("eig_shift", cfg_scope); m_damping_factor = cfg.getParameter<double>("eig_damping_factor", cfg_scope); m_max_iters = cfg.getParameter<int>("eig_max_iters", cfg_scope); m_verbosity_level = cfg.getParameter<int>("verbosity_level", cfg_scope); m_eigenvector_solver_name = cfg.getParameter<std::string>("eig_eigenvector_solver", cfg_scope); m_norm_type = cfg.getParameter<NormType>("norm", cfg_scope); std::string which = cfg.getParameter<std::string>("eig_which", cfg_scope); if (which == "smallest") { m_which = EIG_SMALLEST; } else if (which == "largest") { m_which = EIG_LARGEST; } else if (which == "pagerank") { m_which = EIG_PAGERANK; } else if (which == "shift") { m_which = EIG_SHIFT; } else { FatalError("EigenSolver: invalid target spectrum.", AMGX_ERR_CONFIGURATION); } // Allocate events. cudaEventCreate(&m_setup_start); cudaEventCreate(&m_setup_stop); cudaEventCreate(&m_solve_start); cudaEventCreate(&m_solve_stop); cudaEventCreate(&m_iter_start); cudaEventCreate(&m_iter_stop); m_setup_time = 0.0f; m_solve_time = 0.0f; } template <class TConfig> EigenSolver<TConfig>::~EigenSolver() { m_eigenvalues.clear(); m_eigenvectors.clear(); cudaEventDestroy(m_setup_start); cudaEventDestroy(m_setup_stop); cudaEventDestroy(m_solve_start); cudaEventDestroy(m_solve_stop); cudaEventDestroy(m_iter_start); cudaEventDestroy(m_iter_stop); } template <class TConfig> int EigenSolver<TConfig>::get_num_iters() const { return m_num_iters; } template <class TConfig> void EigenSolver<TConfig>::set_max_iters(int max_iters) { m_max_iters = max_iters; } template <class TConfig> void EigenSolver<TConfig>::set_tolerance(double tol) { m_tolerance = tol; } template <class TConfig> void EigenSolver<TConfig>::set_shift(ValueTypeVec shift) { m_shift = shift; } template <class TConfig> bool EigenSolver<TConfig>::converged() const { return m_converged; } template <class TConfig> void EigenSolver<TConfig>::setup(Operator<TConfig> &A) { m_A = &A; m_converged = false; #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_setup_start); solver_setup(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_setup_stop); cudaEventSynchronize(m_setup_stop); cudaEventElapsedTime(&m_setup_time, m_setup_start, m_setup_stop); m_setup_time *= 1e-3f; } template<class TConfig> void EigenSolver<TConfig>::exchangeSolveResultsConsolidation(AMGX_STATUS &status) { std::vector<PODVector_h> m_res_history; PODVector_h res(1); for (int i = 0; i < m_residuals.size(); i++) { res[0] = m_residuals[i]; m_res_history.push_back(res); } this->m_A->getManager()->exchangeSolveResultsConsolidation(m_num_iters, m_res_history, status, true /*looks like we always store residual history*/); } template<class TConfig> AMGX_ERROR EigenSolver<TConfig>::solve_no_throw(VVector &x, AMGX_STATUS &status) { AMGX_ERROR rc = AMGX_OK; try { // Check if fine level is consolidated and not a root partition if ( !(this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated() && !this->m_A->getManager()->isFineLevelRootPartition() )) { // If matrix is consolidated on fine level and not a root partition if (x.tag == -1) { x.tag = 4242 * 100 + 1; } status = this->solve(x); } // Exchange residual history, number of iterations, solve status if fine level consoildation was used if (this->m_A->getManager() != NULL && this->m_A->getManager()->isFineLevelConsolidated()) { this->exchangeSolveResultsConsolidation(status); } } AMGX_CATCHES(rc) return rc; } template <class TConfig> AMGX_STATUS EigenSolver<TConfig>::solve(VVector &x) { // initial vector is empty, initialize it with random values. if (x.empty()) { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int N = A.get_num_cols(); Vector_h h_x(N); for (int i = 0; i < N; ++i) { h_x[i] = types::get_rand<ValueTypeVec>(); } x = h_x; A.setView(oldView); } // This code is needed for MPI implementation of eigensolvers. x.set_block_dimx(1); x.set_block_dimy(m_A->get_block_dimx()); if (x.tag == -1) { x.tag = 1; } x.dirtybit = 1; x.delayed_send = 1; m_eigenvectors.clear(); m_eigenvalues.clear(); #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_solve_start); solve_init(x); bool done = false; for (m_curr_iter = 0; m_curr_iter < m_max_iters && !done; ++m_curr_iter) { done = solve_iteration(x); // solve_iteration did not update the residuals, add an undefined norm. if (m_residuals.size() == m_curr_iter) { m_residuals.push_back( types::util<PODValueB>::get_minus_one()); } if (m_verbosity_level == 3) { print_iter_stats(); } } print_final_stats(); if (done) { m_converged = true; } m_num_iters = m_curr_iter; solve_finalize(); if (m_want_eigenvectors && m_eigenvectors.empty()) { std::string str = "Eigenvectors requested but not provided by solver.\n"; amgx_output(str.c_str(), str.length()); if (m_eigenvector_solver_name.empty()) { FatalError("Eigenvectors requested but no eigenvector solver provided", AMGX_ERR_CONFIGURATION); } EigenVectorSolver<TConfig> *eigenvector_solver = EigenVectorSolverFactory<TConfig>::create(m_eigenvector_solver_name); ValueTypeVec eigenvalue = m_eigenvalues.front(); eigenvector_solver->setup(*m_A); m_eigenvectors.resize(m_eigenvalues.size()); eigenvector_solver->solve(eigenvalue, m_eigenvectors[0]); delete eigenvector_solver; } #ifdef AMGX_WITH_MPI #ifdef MPI_SOLVE_PROFILE MPI_Barrier(MPI_COMM_WORLD); #endif #endif cudaEventRecord(m_solve_stop); cudaEventSynchronize(m_solve_stop); cudaEventElapsedTime(&m_solve_time, m_solve_start, m_solve_stop); m_solve_time *= 1e-3f; if (m_verbosity_level == 3) { std::stringstream ss; if (m_converged) { ss << "Eigensolver converged after " << get_num_iters() << " iterations." << std::endl; std::vector<ValueTypeVec> eigenvalues = get_eigenvalues(); ss << "Eigenvalue: "; for (int i = 0; i < eigenvalues.size(); ++i) { ss << eigenvalues[i] << " "; } ss << std::endl; } else { ss << "Eigensolver did not converge after " << this->get_num_iters() << " iterations." << std::endl; } amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); print_timings(); } return m_converged ? AMGX_ST_CONVERGED : AMGX_ST_NOT_CONVERGED; } template<class TConfig> void EigenSolver<TConfig>::postprocess_eigenpairs() { // If the smallest eigenvalues were computed (with A^-1), // we need to invert the eigenvalue. if (m_which == EIG_SMALLEST) { for (int i = 0; i < m_eigenvalues.size(); ++i) { ValueTypeVec eigenvalue = m_eigenvalues[i]; ValueTypeVec inv_eigenvalue = types::util<ValueTypeVec>::get_one() / eigenvalue; m_eigenvalues[i] = inv_eigenvalue + m_shift; } } } template<class TConfig> void EigenSolver<TConfig>::print_timings() { std::stringstream ss; ss << "Total Time: " << m_setup_time + m_solve_time << std::endl; ss << " setup: " << m_setup_time << " s\n"; ss << " solve: " << m_solve_time << " s\n"; ss << " solve(per iteration): " << ((m_num_iters == 0) ? m_num_iters : m_solve_time / m_num_iters) << " s\n"; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_iter_stats() { if (m_curr_iter == 0) { std::stringstream ss; ss << std::setw(15) << "iter" << std::setw(20) << " Mem Usage (GB)" << std::setw(15) << "residual"; ss << std::setw(15) << "rate"; ss << std::endl; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } std::stringstream ss; ss << std::setw(15) << m_curr_iter; MemoryInfo::updateMaxMemoryUsage(); ss << std::setw(20) << MemoryInfo::getMaxMemoryUsage(); PODValueB iter_residual = m_residuals[m_curr_iter]; if (iter_residual >= 0) { ss << std::scientific << std::setprecision(6) << std::setw(15) << iter_residual; // Compute convergence rate. if (m_curr_iter > 0) { PODValueB prev_residual = m_residuals[m_curr_iter - 1]; if (prev_residual > 0) { ss << std::setw(15); ss << std::fixed << std::setprecision(4) << iter_residual / prev_residual; } } } ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template <typename TConfig> void EigenSolver<TConfig>::print_final_stats() { std::stringstream ss; ss << " --------------------------------------------------------------"; ss << std::endl; amgx_output(ss.str().c_str(), static_cast<int>(ss.str().length())); } template<class TConfig> typename EigenSolverFactory<TConfig>::EigenSolverFactoryMap & EigenSolverFactory<TConfig>::getFactories() { static EigenSolverFactoryMap factories; return factories; } template<class TConfig> void EigenSolverFactory<TConfig>::registerFactory(const std::string &name, EigenSolverFactory<TConfig> *f) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(name); if (it != factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactory(const std::string &name) { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); delete factory; factories.erase(it); } template<class TConfig> void EigenSolverFactory<TConfig>::unregisterFactories() { EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::iterator it = factories.begin(); for (; it != factories.end();) { EigenSolverFactory<TConfig> *factory = it->second; assert(factory != NULL); it++; delete factory; } factories.clear(); } template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &current_scope, const std::string &solverType, ThreadManager *tmng) { std::string solverName, new_scope; cfg.getParameter<std::string>(solverType, solverName, current_scope, new_scope); EigenSolverFactoryMap &factories = getFactories(); typename EigenSolverFactoryMap::const_iterator it = factories.find(solverName); if (it == factories.end()) { std::string error = "EigenSolverFactory '" + solverName + "' has not been registered\n"; FatalError(error.c_str( ), AMGX_ERR_CORE); } EigenSolver<TConfig> *solver = it->second->create(cfg, new_scope, tmng); solver->setName(solverName); return solver; } ; template<class TConfig> EigenSolver<TConfig> *EigenSolverFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &solverType, ThreadManager *tmng) { return EigenSolverFactory<TConfig>::allocate(cfg, "default", solverType, tmng); } // Explicit template instantiation. #define AMGX_CASE_LINE(CASE) template class EigenSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class EigenSolverFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE };
the_stack
namespace mxnet { namespace op { #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) #define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //#define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ float norm(){ return sqrt(x * x + y * y); } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float inner_product(const Point &A, const Point &B, const Point &C){ // vector:AB * vector:AC Point AB = B - A; Point AC = C - A; return AB.x * AC.x + AB.y * AC.y; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross_3d(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box3d(const float *box, const Point &P){ // const float MARGIN = 1e-5; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); float dot1 = inner_product(B,A,P); if (dot1 < 0) return false; float dot2 = inner_product(B,C,P); if (dot2 < 0) return false; float dot3 = inner_product(D,A,P); if (dot3 < 0) return false; float dot4 = inner_product(D,C,P); if (dot4 < 0) return false; return true; } __device__ inline int check_in_box3d_anotherway(const float *box, const Point &P){ const float MARGIN = -1e-2; // 4 points: ABCD Point A(box[0],box[1]); Point B(box[2],box[3]); Point C(box[4],box[5]); Point D(box[6],box[7]); Point AB_vec = B - A; Point BC_vec = C - B; Point CD_vec = D - C; Point DA_vec = A - D; auto is_clock_wise = cross(AB_vec,BC_vec); #ifdef DEBUG printf("AB_vec: (%f, %f)\n", AB_vec.x, AB_vec.y); printf("BC_vec: (%f, %f)\n", BC_vec.x, BC_vec.y); printf("CD_vec: (%f, %f)\n", CD_vec.x, CD_vec.y); printf("DA_vec: (%f, %f)\n", DA_vec.x, DA_vec.y); printf("is_clock_wise: %f\n", is_clock_wise); #endif Point PA_vec = A - P; float cross1 = cross(PA_vec, AB_vec); if (cross1 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross1: %f, PA.x: %f, PA.y: %f\n", cross1, PA_vec.x, PA_vec.y); #endif return false; } Point PB_vec = B - P; float cross2 = cross(PB_vec, BC_vec); if (cross2 * is_clock_wise < MARGIN){ #ifdef DEBUG printf("cross2: %f, PB.x: %f, PB.y: %f\n", cross2, PB_vec.x, PB_vec.y); #endif return false; } Point PC_vec = C - P; float cross3 = cross(PC_vec, CD_vec); if (cross3 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross3: %f, PC.x: %f, PC.y: %f\n", cross3, PC_vec.x, PC_vec.y); #endif return false; } Point PD_vec = D - P; float cross4 = cross(PD_vec, DA_vec); if (cross4 * is_clock_wise < MARGIN) { #ifdef DEBUG printf("cross4: %f, PD.x: %f, PD.y: %f\n", cross4, PD_vec.x, PD_vec.y); #endif return false; } return true; } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross_3d(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float get_area(const float* box){ float x1 = box[0], y1 = box[1], x2 = box[2], y2 = box[3], x3 = box[4], y3 = box[5]; float edge1 = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2); float edge2 = (x3 - x2) * (x3 - x2) + (y3 - y2) * (y3 - y2); return sqrt(edge1 * edge2); } __device__ inline float max4(const float x1, const float x2, const float x3, const float x4){ float max = -1000000; if (x1 > max) max = x1; if (x2 > max) max = x2; if (x3 > max) max = x3; if (x4 > max) max = x4; return max; } __device__ inline float min4(const float x1, const float x2, const float x3, const float x4){ float min = 1000000; if (x1 < min) min = x1; if (x2 < min) min = x2; if (x3 < min) min = x3; if (x4 < min) min = x4; return min; } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // float x_a_min = min4(box_a[0], box_a[2], box_a[4], box_a[6]); // float x_a_max = max4(box_a[0], box_a[2], box_a[4], box_a[6]); // float y_a_min = min4(box_a[1], box_a[3], box_a[5], box_a[7]); // float y_a_max = max4(box_a[1], box_a[3], box_a[5], box_a[7]); // float x_b_min = min4(box_b[0], box_b[2], box_b[4], box_b[6]); // float x_b_max = max4(box_b[0], box_b[2], box_b[4], box_b[6]); // float y_b_min = min4(box_b[1], box_b[3], box_b[5], box_b[7]); // float y_b_max = max4(box_b[1], box_b[3], box_b[5], box_b[7]); // if (x_a_max < x_b_min || x_a_min > x_b_max || y_a_max < y_b_min || y_a_min > y_b_max) return 0; // Point center_a; // center_a.set((box_a[0] + box_a[2] + box_a[4] + box_a[6]) / 4.0, // (box_a[1] + box_a[3] + box_a[5] + box_a[7]) / 4.0); // // printf("center_a:(%f, %f)\n", center_a.x, center_a.y); // Point center_b; // center_b.set((box_b[0] + box_b[2] + box_b[4] + box_b[6]) / 4.0, // (box_b[1] + box_b[3] + box_b[5] + box_b[7]) / 4.0); // // printf("center_b:(%f, %f)\n", center_b.x, center_b.y); // Point two_center_vec = center_a - center_b; // // printf("two_center_vec:(%f, %f)\n", two_center_vec.x, two_center_vec.y); // float center_dist = two_center_vec.norm(); // // printf("center_dist:%f\n", center_dist); // float area_a = get_area(box_a); // float area_b = get_area(box_b); // float min_area = area_a < area_b ? area_a : area_b; // if (center_dist < 0.2){ // return min_area; // } // else return 0; Point box_a_corners[5]; box_a_corners[0].set(box_a[0],box_a[1]); box_a_corners[1].set(box_a[2],box_a[3]); box_a_corners[2].set(box_a[4],box_a[5]); box_a_corners[3].set(box_a[6],box_a[7]); Point box_b_corners[5]; box_b_corners[0].set(box_b[0],box_b[1]); box_b_corners[1].set(box_b[2],box_b[3]); box_b_corners[2].set(box_b[4],box_b[5]); box_b_corners[3].set(box_b[6],box_b[7]); box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; #ifdef DEBUG printf("Intersect point (%f, %f)\n", cross_points[cnt].x, cross_points[cnt].y); #endif cnt++; } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box3d_anotherway(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_a\n", box_b_corners[k].x, box_b_corners[k].y); #endif } if (check_in_box3d_anotherway(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("Point (%f, %f) in box_b\n", box_a_corners[k].x, box_a_corners[k].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); auto thread_id = threadIdx.x; for (int i = 0; i < cnt; i++){ printf("thread: %d, All cross point %d: (%.3f, %.3f)\n", thread_id, i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { float height_a = box_a[9] - box_a[8]; float height_b = box_b[9] - box_b[8]; float overlap_height = fminf(box_a[9], box_b[9]) - fmaxf(box_a[8], box_b[8]); if (overlap_height < 0) overlap_height = 0; float area_a = get_area(box_a); float area_b = get_area(box_b); float volume_a = area_a * height_a; float volume_b = area_b * height_b; float overlap_2d = box_overlap(box_a, box_b); float volume_overlap = overlap_2d * overlap_height; float result = volume_overlap / fmaxf(volume_a + volume_b - volume_overlap, EPS); #ifdef DEBUG printf("area_a=%f\n", area_a); printf("area_b=%f\n", area_b); printf("height_a=%f\n", height_a); printf("height_b=%f\n", height_b); printf("overlap_height=%f\n", overlap_height); printf("volume_a=%f\n", volume_a); printf("volume_b=%f\n", volume_b); printf("overlap_2d=%f\n", overlap_2d); printf("volume_overlap=%f\n", volume_overlap); printf("overlap result=%f\n", result); #endif return result; } __device__ inline float iou_normal(float const * const a, float const * const b) { float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = (a[2] - a[0]) * (a[3] - a[1]); float Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_kernel_3d(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask, bool normal_iou) { //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 10]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 10 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 0]; block_boxes[threadIdx.x * 10 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 1]; block_boxes[threadIdx.x * 10 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 2]; block_boxes[threadIdx.x * 10 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 3]; block_boxes[threadIdx.x * 10 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 4]; block_boxes[threadIdx.x * 10 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 5]; block_boxes[threadIdx.x * 10 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 6]; block_boxes[threadIdx.x * 10 + 7] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 7]; block_boxes[threadIdx.x * 10 + 8] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 8]; block_boxes[threadIdx.x * 10 + 9] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 10 + 9]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 10; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { float iou_st; if(normal_iou){ iou_st = iou_normal(cur_box, block_boxes + i * 10); } else{ iou_st = iou_bev(cur_box, block_boxes + i * 10); } if (iou_st > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __global__ void prepare_output_kernel_3d(const int N, const int max_keep, const int col_blocks, unsigned long long *mask, unsigned long long * remv_cpu, int* keep_idx, const float *boxes, float *bbox_after_nms) { // unsigned long long remv_cpu[col_blocks]; // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); int num_to_keep = 0; for (int i = 0; i < N; i++) { if(num_to_keep >= max_keep) {break;} int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; // if (!(remv_cpu[nblock] & (1ULL << inblock))) { // keep_idx[num_to_keep++] = i; // unsigned long long *p = &mask[0] + i * col_blocks; // for (int j = nblock; j < col_blocks; j++) { // remv_cpu[j] |= p[j]; // } // } if (!(remv_cpu[nblock] & (1ULL << inblock))) { for (int k = 0; k < 10; k++){ bbox_after_nms[num_to_keep * 10 + k] = boxes[i * 10 + k]; } keep_idx[num_to_keep++] = i; unsigned long long *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } } template <> void NMS3DForward<gpu>(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& in_data, const std::vector<OpReqType>& req, const std::vector<TBlob>& out_data) { using namespace mshadow; size_t expected_in = 1; size_t expected_out = 2; // input: boxes(B,N,10), which is sorted with score // output: keep_idx(B,num_boxes) CHECK_EQ(in_data.size(), expected_in); CHECK_EQ(out_data.size(), expected_out); CHECK_EQ(in_data[0].shape_[2], 10); CHECK_EQ(out_data[0].shape_[0], in_data[0].shape_[0]); const NMS3DParam param = nnvm::get<NMS3DParam>(attrs.parsed); const int B = in_data[0].size(0); const int N = in_data[0].size(1); const int max_keep = param.max_keep; const float iou_thres = param.iou_thres; const bool normal_iou = param.normal_iou; CHECK_EQ(out_data[0].shape_[1], max_keep); Stream<gpu>* s = ctx.get_stream<gpu>(); auto stream = mshadow::Stream<gpu>::GetStream(s); // assume all the data and gradient have the same type MSHADOW_TYPE_SWITCH(in_data[0].type_flag_, DType, { const float* boxes = in_data[0].dptr<float>(); int* keep_idx = out_data[0].dptr<int>(); float* bbox_after_nms = out_data[1].dptr<float>(); Fill<true>(s, out_data[0], kWriteTo, -1); Fill<true>(s, out_data[1], kWriteTo, 0.0); const int col_blocks = DIVUP(N, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; CHECK_ERROR(cudaMalloc((void**)&mask_data, B * N * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(N, THREADS_PER_BLOCK_NMS), DIVUP(N, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); unsigned long long *remv_dev = NULL; CHECK_ERROR(cudaMalloc((void**)&remv_dev, col_blocks * sizeof(unsigned long long))); // iterate through batch for(int b = 0; b < B; b++) { // calculate overlap matrix nms_kernel_3d<<<blocks, threads>>>(N, iou_thres, boxes+b*N*10, mask_data+b*N*col_blocks, normal_iou); CHECK_ERROR(cudaMemset(remv_dev, 0, col_blocks * sizeof(unsigned long long))); prepare_output_kernel_3d<<<1,1,0,stream>>>(N, max_keep, col_blocks, mask_data+b*N*col_blocks, remv_dev, keep_idx + b * max_keep, boxes + b * N * 10, bbox_after_nms + b * max_keep * 10); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { LOG(FATAL) << "CUDA kernel failed : " << cudaGetErrorString(err); exit(-1); } } cudaFree(mask_data); }); } NNVM_REGISTER_OP(_contrib_NMS3D) .set_attr<FCompute>("FCompute<gpu>", NMS3DForward<gpu>); } }
the_stack
const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4; using namespace megdnn; using namespace cuda; using namespace convolution; using namespace chanwise; namespace { /*! * \brief compute grad w.r.t. filter * * block dim: out_id * kern_id * threads with the same out_id computes grad for corresponding kernel element * \tparam nr_thpf number of threads for one element in the filter; must be * power of 2; */ template <typename T, uint32_t nr_thpf> __global__ void kern_bwd_filter_float( T* flt_grad, const T* src, const T* dst_grad, Param param) { const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h, IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h, FW = param.flt_w, PH = param.pad_h, PW = param.pad_w, SH = param.stride_h, SW = param.stride_w, OH = param.out_h, OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW, DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW, BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf, OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X; uint32_t ic, chl_mul, fh, fw; { uint32_t i = OUT_IDX; i = div_mod(i, FW, fw); i = div_mod(i, FH, fh); i = div_mod(i, CHL_MUL, chl_mul); ic = i; } if (ic >= IC) { return; } src += ic * IH * IW; dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW; const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH, oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH), ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW, ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo, oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL), tid = threadIdx.x % nr_thpf; if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) { if (!tid) flt_grad[OUT_IDX] = 0; return; } T sum(0); for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) { uint32_t n, oh, ow; n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL; oh += oh_lo; ow += ow_lo; uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw, soff = ih * IW + iw + n * SRC_BATCH_STRIDE, doff = oh * OW + ow + n * DST_BATCH_STRIDE; #pragma unroll for (uint32_t i = 0; i < BATCH_UNROLL; ++i) { if (!i || n + i < N) { sum += src[soff] * dst_grad[doff]; } soff += SRC_BATCH_STRIDE; doff += DST_BATCH_STRIDE; } } if (nr_thpf == 1) { flt_grad[OUT_IDX] = sum; } else { // reduce all sums in a block extern __shared__ uint8_t shared_storage[]; volatile T* thread_sum = reinterpret_cast<T*>(shared_storage); thread_sum += THREADID_X * nr_thpf; thread_sum[tid] = sum; #pragma unroll for (uint32_t i = nr_thpf / 2; i; i >>= 1) { bool cond = nr_thpf >= i * 2 && tid < i; if (i >= WARP_SIZE) { __syncthreads(); } else { cub::WARP_SYNC(0xffffffff); } if (cond) { T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i]; thread_sum[tid] = v1; } } if (!tid) { flt_grad[OUT_IDX] = thread_sum[0]; } } } #if CUDA_VERSION >= 9000 template <typename T, uint32_t nr_thpf> __global__ void kern_bwd_filter_hf( __half* flt_grad, const __half* src, const __half* dst_grad, Param param) { const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h, IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h, FW = param.flt_w, PH = param.pad_h, PW = param.pad_w, SH = param.stride_h, SW = param.stride_w, OH = param.out_h, OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW, DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW, BLKDIM_X = (blockDim.x / nr_thpf) * 2, THREADID_X = (threadIdx.x / nr_thpf) * 2, OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X, LAST_IDX = FH * FW * CHL_MUL * IC, tid = threadIdx.x % nr_thpf; __half2 sum2{0.0, 0.0}; if (OUT_IDX % FW != FW - 1) { uint32_t ic, chl_mul, fh, fw; { uint32_t i = OUT_IDX; i = div_mod(i, FW, fw); i = div_mod(i, FH, fh); i = div_mod(i, CHL_MUL, chl_mul); ic = i; } if (ic >= IC) { return; } src += ic * IH * IW; dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW; const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH, oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH), ow_lox = max(int32_t(PW - fw + SW - 1), 0) / SW, ow_loy = max(int32_t(PW - fw + SW - 2), 0) / SW, ow_hix = min((IW - 1 + PW - fw) / SW + 1, OW), ow_hiy = min((IW - 2 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo, oblk_wx = ow_hix - ow_lox, oblk_wy = ow_hiy - ow_loy; if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1) { if (!tid) { flt_grad[OUT_IDX] = 0; flt_grad[OUT_IDX + 1] = 0; } return; } if (ow_lox >= ow_hix) { if (!tid) flt_grad[OUT_IDX] = 0; } if (IW + PW < fw + 2 || ow_loy >= ow_hiy) { if (!tid) flt_grad[OUT_IDX + 1] = 0; if (ow_lox >= ow_hix) return; } sum2.x = 0.0; sum2.y = 0.0; __half2 src2{0.0, 0.0}; __half2 dst2{0.0, 0.0}; const uint32_t oblk_w = max(ow_hix, ow_hiy) - min(ow_lox, ow_loy), oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL); for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) { uint32_t n_x, n_y, oh, ow_x, ow_y; n_x = div_mod(div_mod(oblk_idx, oblk_wx, ow_x), oblk_h, oh) * BATCH_UNROLL; n_y = div_mod(div_mod(oblk_idx, oblk_wy, ow_y), oblk_h, oh) * BATCH_UNROLL; oh += oh_lo; ow_x += ow_lox; ow_y += ow_loy; uint32_t ih = oh * SH - PH + fh, iw_x = ow_x * SW - PW + fw, iw_y = ow_y * SW - PW + fw + 1, soff_x = ih * IW + iw_x + n_x * SRC_BATCH_STRIDE, soff_y = ih * IW + iw_y + n_y * SRC_BATCH_STRIDE, doff_x = oh * OW + ow_x + n_x * DST_BATCH_STRIDE, doff_y = oh * OW + ow_y + n_y * DST_BATCH_STRIDE; #pragma unroll for (uint32_t i = 0; i < BATCH_UNROLL; ++i) { if (!i || n_x + i < N || n_y + i < N) { src2.x = 0.0; src2.y = 0.0; dst2.x = 0.0; dst2.y = 0.0; if (n_x + i < N && ow_x < ow_hix) { src2.x = src[soff_x]; dst2.x = dst_grad[doff_x]; } if (n_y + i < N && ow_y < ow_hiy) { src2.y = src[soff_y]; dst2.y = dst_grad[doff_y]; } sum2 = fma2(src2, dst2, sum2); } soff_x += SRC_BATCH_STRIDE; soff_y += SRC_BATCH_STRIDE; doff_x += DST_BATCH_STRIDE; doff_y += DST_BATCH_STRIDE; } } } else { for (size_t offset = 0; offset < 2; ++offset) { uint32_t ic, chl_mul, fh, fw; { uint32_t i = OUT_IDX + offset; i = div_mod(i, FW, fw); i = div_mod(i, FH, fh); i = div_mod(i, CHL_MUL, chl_mul); ic = i; } if (ic >= IC) { if (offset == 0) return; else break; } const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH, oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH), ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW, ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo, oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL); if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) { if (!tid) flt_grad[OUT_IDX + offset] = 0; continue; } __half sum(0.0); for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) { uint32_t n, oh, ow; n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL; oh += oh_lo; ow += ow_lo; uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw, soff = ic * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE, doff = (ic * CHL_MUL + chl_mul) * OH * OW + oh * OW + ow + n * DST_BATCH_STRIDE; #pragma unroll for (uint32_t i = 0; i < BATCH_UNROLL; ++i) { if (!i || n + i < N) { sum = fma(src[soff], dst_grad[doff], sum); } soff += SRC_BATCH_STRIDE; doff += DST_BATCH_STRIDE; } } if (!offset) sum2.x = sum; if (offset) sum2.y = sum; } } if (nr_thpf == 1) { flt_grad[OUT_IDX] = sum2.x; if (OUT_IDX != LAST_IDX) flt_grad[OUT_IDX + 1] = sum2.y; } else { extern __shared__ uint8_t shared_storage[]; __half2* thread_sum = reinterpret_cast<__half2*>(shared_storage); thread_sum += THREADID_X * nr_thpf / 2; thread_sum[tid] = sum2; #pragma unroll for (uint32_t i = nr_thpf / 2; i; i >>= 1) { bool cond = nr_thpf >= i * 2 && tid < i; if (i >= WARP_SIZE) { __syncthreads(); } else { cub::WARP_SYNC(0xffffffff); } if (cond) { __half2 one = {1.0, 1.0}; __half2 v0 = thread_sum[tid], v1 = fma2(v0, one, thread_sum[tid + i]); thread_sum[tid] = v1; } } if (!tid) { flt_grad[OUT_IDX] = thread_sum[0].x; if (OUT_IDX != LAST_IDX) flt_grad[OUT_IDX + 1] = thread_sum[0].y; } } } #endif #define GET_KERN(func, type) \ FixFunction<type> f_struct; \ switch (_p) { \ case 1 << 10: \ f_struct.f = func<type, 1 << 10>; \ break; \ case 1 << 9: \ f_struct.f = func<type, 1 << 9>; \ break; \ case 1 << 8: \ f_struct.f = func<type, 1 << 8>; \ break; \ case 1 << 7: \ f_struct.f = func<type, 1 << 7>; \ break; \ case 1 << 6: \ f_struct.f = func<type, 1 << 6>; \ break; \ case 1 << 5: \ f_struct.f = func<type, 1 << 5>; \ break; \ case 1 << 4: \ f_struct.f = func<type, 1 << 4>; \ break; \ case 1 << 3: \ f_struct.f = func<type, 1 << 3>; \ break; \ case 1 << 2: \ f_struct.f = func<type, 1 << 2>; \ break; \ case 1 << 1: \ f_struct.f = func<type, 1 << 1>; \ break; \ case 1 << 0: \ f_struct.f = func<type, 1 << 0>; \ break; \ default: \ megdnn_assert(false, "DO NOT IMP CASE FUNCTION!!"); \ } \ return f_struct; template <typename T> struct FixFunction { void (*f)(T*, const T*, const T*, Param); }; template <typename T> FixFunction<T> get_kern(const uint32_t& _p); template <> FixFunction<float> get_kern<float>(const uint32_t& _p) { GET_KERN(kern_bwd_filter_float, float); } #if CUDA_VERSION >= 9000 template <> FixFunction<__half> get_kern<__half>(const uint32_t& _p) { GET_KERN(kern_bwd_filter_hf, __half); } #endif template <> FixFunction<dt_float16> get_kern<dt_float16>(const uint32_t& _p) { GET_KERN(kern_bwd_filter_float, dt_float16); } #undef GET_KERN } // anonymous namespace namespace megdnn { namespace cuda { namespace convolution { namespace chanwise { template <typename T> void run_bwd_filter( T* filter_grad, const T* src, const T* dst_grad, const Param& param, cudaStream_t stream) { void (*kern)(T*, const T*, const T*, Param) = NULL; uint32_t nr_thread = query_blocksize_for_kernel(get_kern<T>(1024).f), nr_thpf = std::min( nr_thread, std::max<uint32_t>( 1, param.out_h * param.out_w * param.batch / (BATCH_UNROLL * 16))); // find nearest power-of-2 of nr_thpf do { #define CK(_n) \ if (nr_thpf >= _n) { \ kern = get_kern<T>(_n).f; \ nr_thpf = _n; \ break; \ } CK(1 << 10); CK(1 << 9); CK(1 << 8); CK(1 << 7); CK(1 << 6); CK(1 << 5); CK(1 << 4); CK(1 << 3); CK(1 << 2); CK(1 << 1); CK(1 << 0); #undef CK } while (0); megdnn_assert(kern); nr_thread = query_blocksize_for_kernel(kern); uint32_t nr_flt_per_blk = nr_thread / nr_thpf; while (nr_flt_per_blk * nr_thpf % WARP_SIZE) --nr_flt_per_blk; megdnn_assert(nr_flt_per_blk); int nr_block = DIVUP( param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk); nr_thread = nr_flt_per_blk * nr_thpf; uint32_t shared = nr_thread * 2 * sizeof(T); kern<<<nr_block, nr_thread, shared, stream>>>(filter_grad, src, dst_grad, param); after_kernel_launch(); } template void run_bwd_filter( float*, const float*, const float*, const Param&, cudaStream_t); #if CUDA_VERSION >= 9000 template void run_bwd_filter( __half*, const __half*, const __half*, const Param&, cudaStream_t); #endif template void run_bwd_filter( dt_float16*, const dt_float16*, const dt_float16*, const Param&, cudaStream_t); } // namespace chanwise } // namespace convolution } // namespace cuda } // namespace megdnn // vim: syntax=cuda.doxygen
the_stack
#include "utility.hpp" using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define INC(x, l) ((x + 1) >= (l) ? (x) : ((x) + 1)) #define INTER_RESIZE_COEF_BITS 11 #define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) #define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) template <typename T> __DEVICE__ T bilinearSampleUchar(T values[][2], int x0, int x1, int y0, int y1); template <> __DEVICE__ uchar2 bilinearSampleUchar(uchar2 values[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int2 ret; uchar2 final_ret; ret.x = values[0][0].x * a0 + values[0][1].x * a1 + values[1][0].x * a2 + values[1][1].x * a3; final_ret.x = (ret.x + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.y = values[0][0].y * a0 + values[0][1].y * a1 + values[1][0].y * a2 + values[1][1].y * a3; final_ret.y = (ret.y + (1 << (CAST_BITS - 1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar3 bilinearSampleUchar(uchar3 values[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int3 ret; uchar3 final_ret; ret.x = values[0][0].x * a0 + values[0][1].x * a1 + values[1][0].x * a2 + values[1][1].x * a3; final_ret.x = (ret.x + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.y = values[0][0].y * a0 + values[0][1].y * a1 + values[1][0].y * a2 + values[1][1].y * a3; final_ret.y = (ret.y + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.z = values[0][0].z * a0 + values[0][1].z * a1 + values[1][0].z * a2 + values[1][1].z * a3; final_ret.z = (ret.z + (1 << (CAST_BITS - 1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar4 bilinearSampleUchar(uchar4 values[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int4 ret; uchar4 final_ret; ret.x = values[0][0].x * a0 + values[0][1].x * a1 + values[1][0].x * a2 + values[1][1].x * a3; final_ret.x = (ret.x + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.y = values[0][0].y * a0 + values[0][1].y * a1 + values[1][0].y * a2 + values[1][1].y * a3; final_ret.y = (ret.y + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.z = values[0][0].z * a0 + values[0][1].z * a1 + values[1][0].z * a2 + values[1][1].z * a3; final_ret.z = (ret.z + (1 << (CAST_BITS - 1))) >> CAST_BITS; ret.w = values[0][0].w * a0 + values[0][1].w * a1 + values[1][0].w * a2 + values[1][1].w * a3; final_ret.w = (ret.w + (1 << (CAST_BITS - 1))) >> CAST_BITS; return final_ret; } __global__ void resizeLinearKernel(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float float_y = ((element_y + 0.5f) * row_scale - 0.5f); float float_x = ((element_x + 0.5f) * col_scale - 0.5f); int int_y0 = floor(float_y); int int_x0 = floor(float_x); float_y -= int_y0; float_x -= int_x0; if (int_y0 < 0) { int_y0 = 0; float_y = 0; } if (int_x0 < 0) { int_x0 = 0; float_x = 0; } if (int_y0 >= src_rows) { int_y0 = src_rows - 1; float_y = 0; } if (int_x0 >= src_cols) { int_x0 = src_cols - 1; float_x = 0; } int int_y1 = INC(int_y0, src_rows); int buf_y[2]; float_y = float_y * INTER_RESIZE_COEF_SCALE; buf_y[0] = rint(INTER_RESIZE_COEF_SCALE - float_y); buf_y[1] = rint(float_y); int int_x1 = INC(int_x0, src_cols); int buf_x[2]; float_x = float_x * INTER_RESIZE_COEF_SCALE; buf_x[0] = rint(INTER_RESIZE_COEF_SCALE - rint(float_x)); buf_x[1] = rint(float_x); if (channels == 1) { int src_index0 = int_y0 * src_stride + int_x0; int src_index1 = int_y0 * src_stride + int_x1; int src_index2 = int_y1 * src_stride + int_x0; int src_index3 = int_y1 * src_stride + int_x1; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = buf_y[0] * buf_x[0] * src[src_index0] + buf_y[0] * buf_x[1] * src[src_index1] + buf_y[1] * buf_x[0] * src[src_index2] + buf_y[1] * buf_x[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + int_y0 * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + int_y1 * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 values[2][2]; values[0][0] = input0[int_x0]; values[0][1] = input0[int_x1]; values[1][0] = input1[int_x0]; values[1][1] = input1[int_x1]; output[element_x] = bilinearSampleUchar(values, buf_x[0], buf_x[1], buf_y[0], buf_y[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + int_y0 * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + int_y1 * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 values[2][2]; values[0][0] = input0[int_x0]; values[0][1] = input0[int_x1]; values[1][0] = input1[int_x0]; values[1][1] = input1[int_x1]; output[element_x] = bilinearSampleUchar(values, buf_x[0], buf_x[1], buf_y[0], buf_y[1]); } } __global__ void resizeLinearKernel(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float float_x = ((element_x + 0.5f) * col_scale - 0.5f); float float_y = ((element_y + 0.5f) * row_scale - 0.5f); int int_x0 = floor(float_x); int int_y0 = floor(float_y); float_x -= int_x0; float_y -= int_y0; if (int_y0 < 0) { int_y0 = 0; float_y = 0; } if (int_x0 < 0) { int_x0 = 0; float_x = 0; } if (int_y0 >= src_rows) { int_y0 = src_rows - 1; float_y = 0; } if (int_x0 >= src_cols) { int_x0 = src_cols - 1; float_x = 0; } int int_y1 = INC(int_y0,src_rows); float buf_y[2]; buf_y[0] = 1.f - float_y; buf_y[1] = 1.f - buf_y[0]; int int_x1 = INC(int_x0,src_cols); float buf_x[2]; buf_x[0] = 1.f - float_x; buf_x[1] = 1.f - buf_x[0]; if (channels == 1) { int index = int_y0 * src_stride; float src0 = src[index + int_x0]; float src1 = src[index + int_x1]; float value0 = buf_y[0] * buf_x[0] * src0; float value1 = buf_y[0] * buf_x[1] * src1; float sum = 0.f; sum += value0 + value1; index = int_y1 * src_stride; src0 = src[index + int_x0]; src1 = src[index + int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0 + value1; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = int_y0 * src_stride; float3 src0 = ((float3*)(src + index))[int_x0]; float3 src1 = ((float3*)(src + index))[int_x1]; float3 value0 = buf_y[0] * buf_x[0] * src0; float3 value1 = buf_y[0] * buf_x[1] * src1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; index = int_y1 * src_stride; src0 = ((float3*)(src + index))[int_x0]; src1 = ((float3*)(src + index))[int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0; sum += value1; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = int_y0 * src_stride; float4 src0 = ((float4*)(src + index))[int_x0]; float4 src1 = ((float4*)(src + index))[int_x1]; float4 value0 = buf_y[0] * buf_x[0] * src0; float4 value1 = buf_y[0] * buf_x[1] * src1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; index = int_y1 * src_stride; src0 = ((float4*)(src + index))[int_x0]; src1 = ((float4*)(src + index))[int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0; sum += value1; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } template <typename T, typename Tn> __global__ void resizeNearestPointKernel(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int int_y = element_y * row_scale; int_y = MIN(int_y, src_rows - 1); int int_x = element_x * col_scale; int_x = MIN(int_x, src_cols - 1); Tn* input = (Tn*)(src + int_y* src_stride); Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = input[int_x]; } template <typename T> __global__ void resizeAreaKernel0C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int start_x = element_x * col_scale; int start_y = element_y * row_scale; int x_end = start_x + col_scale; int y_end = start_y + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - start_x) * (y_end - start_y); float sum = 0.f; T* input; for (int i = start_y; i < y_end; ++i) { input = (T*)(src + i * src_stride); for (int j = start_x; j < x_end; ++j) { sum += input[j]; } } sum /= area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturateCast(sum); } else { output[element_x] = sum; } } template <typename T, typename Tn> __global__ void resizeAreaKernel0C3(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int start_x = element_x * col_scale; int start_y = element_y * row_scale; int x_end = start_x + col_scale; int y_end = start_y + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - start_x) * (y_end - start_y); float3 sum = make_float3(0.f, 0.f, 0.f); Tn* input; for (int i = start_y; i < y_end; ++i) { input = (Tn*)(src + i * src_stride); for (int j = start_x; j < x_end; ++j) { sum += input[j]; } } sum /= area; Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<Tn, float3>(sum); } template <typename T, typename Tn> __global__ void resizeAreaKernel0C4(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int start_x = element_x * col_scale; int start_y = element_y * row_scale; int x_end = start_x + col_scale; int y_end = start_y + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - start_x) * (y_end - start_y); float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); Tn* input; for (int i = start_y; i < y_end; ++i) { input = (Tn*)(src + i * src_stride); for (int j = start_x; j < x_end; ++j) { sum += input[j]; } } sum /= area; Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<Tn, float4>(sum); } template <typename T> __global__ void resizeAreaKernel1C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float float_y0 = element_y * row_scale; float float_y1 = float_y0 + row_scale; int int_y0 = ceilf(float_y0); int int_y1 = floorf(float_y1); float float_x0 = element_x * col_scale; float float_x1 = float_x0 + col_scale; int int_x0 = ceilf(float_x0); int int_x1 = floorf(float_x1); T* input; float sum = 0.f; float area = fminf(col_scale, src_cols - float_x0) * fminf(row_scale, src_rows - float_y0); if (int_y0 - float_y0 > 1e-3) { input = (T*)(src + (int_y0 - 1) * src_stride); if (int_x0 - float_x0 > 1e-3) { sum = sum + input[int_x0 - 1] * (int_y0 - float_y0) * (int_x0 - float_x0); } for (int dx = int_x0; dx < int_x1; ++dx) { sum = sum + input[dx] * (int_y0 - float_y0); } if (float_x1 - int_x1 > 1e-3) { sum = sum + input[int_x1] * (int_y0 - float_y0) * (float_x1 - int_x1); } } input = (T*)(src + int_y0 * src_stride); for (int dy = int_y0; dy < int_y1; ++dy) { if (int_x0 - float_x0 > 1e-3) { sum = sum + input[int_x0 - 1] * ((int_x0 - float_x0)); } for (int dx = int_x0; dx < int_x1; ++dx) { sum = sum + input[dx]; } if (float_x1 - int_x1 > 1e-3) { sum = sum + input[int_x1] * ((float_x1 - int_x1)); } input += src_stride; } if (float_y1 - int_y1 > 1e-3) { if (int_x0 - float_x0 > 1e-3) { sum = sum + input[int_x0 - 1] * (float_y1 - int_y1) * (int_x0 - float_x0); } for (int dx = int_x0; dx < int_x1; ++dx) { sum = sum + input[dx] * (float_y1 - int_y1); } if (float_x1 - int_x1 > 1e-3) { sum = sum + input[int_x1] * (float_y1 - int_y1) * (float_x1 - int_x1); } } sum = sum / area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturateCast(sum); } else { output[element_x] = sum; } } template <typename T, typename Tn> __global__ void resizeAreaKernel1C3(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float float_y0 = element_y * row_scale; float float_y1 = float_y0 + row_scale; int int_y0 = ceilf(float_y0); int int_y1 = floorf(float_y1); float float_x0 = element_x * col_scale; float float_x1 = float_x0 + col_scale; int int_x0 = ceilf(float_x0); int int_x1 = floorf(float_x1); Tn* input; float3 value; float3 sum = make_float3(0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - float_x0) * fminf(row_scale, src_rows - float_y0); if (int_y0 - float_y0 > 1e-3) { input = (Tn*)(src + (int_y0 - 1) * src_stride); if (int_x0 - float_x0 > 1e-3) { value = (int_y0 - float_y0) * (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { value = (int_y0 - float_y0) * input[dx]; sum += value; } if (float_x1 - int_x1 > 1e-3) { value = (int_y0 - float_y0) * (float_x1 - int_x1) * input[int_x1]; sum += value; } } input = (Tn*)(src + int_y0 * src_stride); for (int dy = int_y0; dy < int_y1; ++dy) { if (int_x0 - float_x0 > 1e-3) { value = (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { sum += input[dx]; } if (float_x1 - int_x1 > 1e-3) { value = (float_x1 - int_x1) * input[int_x1]; sum += value; } input = (Tn*)((T*)input + src_stride); } if (float_y1 - int_y1 > 1e-3) { if (int_x0 - float_x0 > 1e-3) { value = (float_y1 - int_y1) * (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { value = (float_y1 - int_y1) * input[dx]; sum += value; } if (float_x1 - int_x1 > 1e-3) { value = (float_y1 - int_y1) * (float_x1 - int_x1) * input[int_x1]; sum += value; } } sum /= area; Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<Tn, float3>(sum); } template <typename T, typename Tn> __global__ void resizeAreaKernel1C4(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float float_y0 = element_y * row_scale; float float_y1 = float_y0 + row_scale; int int_y0 = ceilf(float_y0); int int_y1 = floorf(float_y1); float float_x0 = element_x * col_scale; float float_x1 = float_x0 + col_scale; int int_x0 = ceilf(float_x0); int int_x1 = floorf(float_x1); Tn* input; float4 value; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - float_x0) * fminf(row_scale, src_rows - float_y0); if (int_y0 - float_y0 > 1e-3) { input = (Tn*)(src + (int_y0 - 1) * src_stride); if (int_x0 - float_x0 > 1e-3) { value = (int_y0 - float_y0) * (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { value = (int_y0 - float_y0) * input[dx]; sum += value; } if (float_x1 - int_x1 > 1e-3) { value = (int_y0 - float_y0) * (float_x1 - int_x1) * input[int_x1]; sum += value; } } input = (Tn*)(src + int_y0 * src_stride); for (int dy = int_y0; dy < int_y1; ++dy) { if (int_x0 - float_x0 > 1e-3) { value = (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { sum += input[dx]; } if (float_x1 - int_x1 > 1e-3) { value = (float_x1 - int_x1) * input[int_x1]; sum += value; } input = (Tn*)((T*)input + src_stride); } if (float_y1 - int_y1 > 1e-3) { if (int_x0 - float_x0 > 1e-3) { value = (float_y1 - int_y1) * (int_x0 - float_x0) * input[int_x0 - 1]; sum += value; } for (int dx = int_x0; dx < int_x1; ++dx) { value = (float_y1 - int_y1) * input[dx]; sum += value; } if (float_x1 - int_x1 > 1e-3) { value = (float_y1 - int_y1) * (float_x1 - int_x1) * input[int_x1]; sum += value; } } sum /= area; Tn* output = (Tn*)(dst + element_y * dst_stride); output[element_x] = saturateCastVector<Tn, float4>(sum); } __global__ void resizeAreaKernel2(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int int_y0 = floor(element_y * row_scale); int int_x0 = floor(element_x * col_scale); float float_y = element_y + 1 - (int_y0 + 1) * inv_row_scale; float float_x = element_x + 1 - (int_x0 + 1) * inv_col_scale; float_y = float_y <= 0 ? 0.f : float_y - floor(float_y); float_x = float_x <= 0 ? 0.f : float_x - floor(float_x); if (int_y0 < 0) { int_y0 = 0; float_y = 0; } if (int_x0 < 0) { int_x0 = 0; float_x = 0; } if (int_y0 >= src_rows) { int_y0 = src_rows - 1; float_y = 0; } if (int_x0 >= src_cols) { int_x0 = src_cols - 1; float_x = 0; } int int_y1 = INC(int_y0, src_rows); int buf_y[2]; float_y = float_y * INTER_RESIZE_COEF_SCALE; buf_y[0] = rint(INTER_RESIZE_COEF_SCALE - float_y); buf_y[1] = rint(float_y); int int_x1 = INC(int_x0, src_cols); int buf_x[2]; float_x = float_x * INTER_RESIZE_COEF_SCALE; buf_x[0] = rint(INTER_RESIZE_COEF_SCALE - rint(float_x)); buf_x[1] = rint(float_x); if (channels == 1) { int src_index0 = int_y0 * src_stride + int_x0; int src_index1 = int_y0 * src_stride + int_x1; int src_index2 = int_y1 * src_stride + int_x0; int src_index3 = int_y1 * src_stride + int_x1; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = buf_y[0] * buf_x[0] * src[src_index0] + buf_y[0] * buf_x[1] * src[src_index1] + buf_y[1] * buf_x[0] * src[src_index2] + buf_y[1] * buf_x[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + int_y0 * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + int_y1 * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 values[2][2]; values[0][0] = input0[int_x0]; values[0][1] = input0[int_x1]; values[1][0] = input1[int_x0]; values[1][1] = input1[int_x1]; output[element_x] = bilinearSampleUchar(values, buf_x[0], buf_x[1], buf_y[0], buf_y[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + int_y0 * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + int_y1 * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 values[2][2]; values[0][0] = input0[int_x0]; values[0][1] = input0[int_x1]; values[1][0] = input1[int_x0]; values[1][1] = input1[int_x1]; output[element_x] = bilinearSampleUchar(values, buf_x[0], buf_x[1], buf_y[0], buf_y[1]); } } __global__ void resizeAreaKernel2(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int int_y0 = floor(element_y * row_scale); int int_x0 = floor(element_x * col_scale); float float_y = element_y + 1 - (int_y0 + 1) * inv_row_scale; float float_x = element_x + 1 - (int_x0 + 1) * inv_col_scale; float_y = float_y <= 0 ? 0.f : float_y - floor(float_y); float_x = float_x <= 0 ? 0.f : float_x - floor(float_x); if (int_y0 < 0) { int_y0 = 0; float_y = 0; } if (int_x0 < 0) { int_x0 = 0; float_x = 0; } if (int_y0 >= src_rows) { int_y0 = src_rows - 1; float_y = 0; } if (int_x0 >= src_cols) { int_x0 = src_cols - 1; float_x = 0; } int int_y1 = INC(int_y0,src_rows); float buf_y[2]; buf_y[0] = 1.f - float_y; buf_y[1] = 1.f - buf_y[0]; int int_x1 = INC(int_x0,src_cols); float buf_x[2]; buf_x[0] = 1.f - float_x; buf_x[1] = 1.f - buf_x[0]; if (channels == 1) { int index = int_y0 * src_stride; float src0 = src[index + int_x0]; float src1 = src[index + int_x1]; float value0 = buf_y[0] * buf_x[0] * src0; float value1 = buf_y[0] * buf_x[1] * src1; float sum = 0.f; sum += value0 + value1; index = int_y1 * src_stride; src0 = src[index + int_x0]; src1 = src[index + int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0 + value1; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = int_y0 * src_stride; float3 src0 = ((float3*)(src + index))[int_x0]; float3 src1 = ((float3*)(src + index))[int_x1]; float3 value0 = buf_y[0] * buf_x[0] * src0; float3 value1 = buf_y[0] * buf_x[1] * src1; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value0; sum += value1; index = int_y1 * src_stride; src0 = ((float3*)(src + index))[int_x0]; src1 = ((float3*)(src + index))[int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0; sum += value1; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = int_y0 * src_stride; float4 src0 = ((float4*)(src + index))[int_x0]; float4 src1 = ((float4*)(src + index))[int_x1]; float4 value0 = buf_y[0] * buf_x[0] * src0; float4 value1 = buf_y[0] * buf_x[1] * src1; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value0; sum += value1; index = int_y1 * src_stride; src0 = ((float4*)(src + index))[int_x0]; src1 = ((float4*)(src + index))[int_x1]; value0 = buf_y[1] * buf_x[0] * src0; value1 = buf_y[1] * buf_x[1] * src1; sum += value0; sum += value1; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } RetCode resize(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, InterpolationType interpolation, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows >= 1 && src_cols >= 1); PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); PPL_ASSERT(interpolation == INTERPOLATION_TYPE_LINEAR || interpolation == INTERPOLATION_TYPE_NEAREST_POINT || interpolation == INTERPOLATION_TYPE_AREA); cudaError_t code; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int kBlockX = 32; int kBlockY = 16; if (interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { kBlockY = 4; } dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (interpolation == INTERPOLATION_TYPE_LINEAR) { resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { if (channels == 1) { resizeNearestPointKernel<uchar, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeNearestPointKernel<uchar, uchar3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeNearestPointKernel<uchar, uchar4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else if (interpolation == INTERPOLATION_TYPE_AREA) { if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { resizeAreaKernel0C1<uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel0C3<uchar, uchar3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel0C4<uchar, uchar4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { resizeAreaKernel1C1<uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel1C3<uchar, uchar3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel1C4<uchar, uchar4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } } else { } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resize(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, InterpolationType interpolation, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows >= 1 && src_cols >= 1); PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); PPL_ASSERT(interpolation == INTERPOLATION_TYPE_LINEAR || interpolation == INTERPOLATION_TYPE_NEAREST_POINT || interpolation == INTERPOLATION_TYPE_AREA); cudaError_t code; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } int kBlockX = 32; int kBlockY = 16; if (interpolation == INTERPOLATION_TYPE_LINEAR || interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { kBlockY = 4; } dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX - 1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; double col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (interpolation == INTERPOLATION_TYPE_LINEAR) { resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (interpolation == INTERPOLATION_TYPE_NEAREST_POINT) { if (channels == 1) { resizeNearestPointKernel<float, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeNearestPointKernel<float, float3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeNearestPointKernel<float, float4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else if (interpolation == INTERPOLATION_TYPE_AREA) { if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { resizeAreaKernel0C1<float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel0C3<float, float3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel0C4<float, float4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { resizeAreaKernel1C1<float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel1C3<float, float3><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel1C4<float, float4><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } } else { } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode Resize<uchar, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } template <> RetCode Resize<uchar, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } template <> RetCode Resize<uchar, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } template <> RetCode Resize<float, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } template <> RetCode Resize<float, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } template <> RetCode Resize<float, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData, InterpolationType interpolation) { RetCode code = resize(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, interpolation, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
the_stack
using namespace cub; using namespace std; /** * Computes the histogram over the digit values of an array of keys that MUST have a length of an integer multiple of (KPT * blockDim.x). * The padding to the integer multiple can be done by adding 0's at the end and subtracting the number of padded 0's from the final result's 0 bin. * The 2^NUM_BITS possible counts (0..2^NUM_BITSNUM_BITS-1) will be placed in global_histo. * @param keys [IN] The keys for which to compute the histogram * @param digit [IN] * @param global_histo [OUT] The array of element counts, MUST be 256 in size. * @param per_block_histo [OUT] */ template< typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32) int NUM_BITS, // Number of bits being sorted at a time int KPT, // Number of keys per thread int TPB, // Number of threads per block int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory. > __global__ void rdxsrt_histogram(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo) { /*** TYPEDEFs***/ typedef Traits<KeyT> KeyTraits; typedef typename KeyTraits::UnsignedBits UnsignedBits; /*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/ /*** DECLARATIONS ***/ UnsignedBits tloc_keys[KPT]; uint tloc_masked[KPT]; __shared__ uint shared_bins[0x01<<NUM_BITS]; /*** INIT SHARED HISTO ***/ if(threadIdx.x < 32){ #pragma unroll for(int i=0;i<(0x01<<NUM_BITS);i+=32){ shared_bins[i+threadIdx.x] = 0; } } __syncthreads(); /*** GET KEYS & PREPARE KEYS FOR HISTO ***/ // Bucket index used to determine the memory offset of the bucket's global histogram const uint bucket_idx = 0; // This thread block's keys memory offset, pointing to the index of its first key const IndexT block_offset = (blockDim.x * blockIdx.x * KPT); // Load keys // KeyLoader(block_offset, threadIdx.x).template LoadStrided<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys); #pragma unroll for (int i=0; i<KPT; i++) { tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i]; } #if true || USE_RLE_HISTO // Mask #pragma unroll for (int i=0; i<KPT; i++) { tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]); tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); } #if 0 /*** SORT RUNS ***/ if(PRE_SORT_RUNS_LENGTH>1){ SortingNetwork<uint>::sort_runs<PRE_SORT_RUNS_LENGTH>(tloc_masked); } #endif /*** COMPUTE HISTO ***/ uint rle = 1; #pragma unroll for(int i=1; i<KPT; i++){ if(tloc_masked[i] == tloc_masked[i-1]) rle++; else{ atomicAdd(&shared_bins[tloc_masked[i-1]], rle); rle=1; } } atomicAdd(&shared_bins[tloc_masked[KPT-1]], rle); #else #pragma unroll for(int i=0; i<KPT; i++){ tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); atomicAdd(&shared_bins[tloc_masked[i]], 1); } #endif // Make sure we've got the counts from all threads __syncthreads(); /*** Write shared histo to global histo ***/ if(threadIdx.x < 32){ for(int i=0;i<(0x01<<NUM_BITS);i+=32){ atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]); // per_block_histo[blockIdx.x*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x]; } } } template< typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32) int NUM_BITS, // Number of bits being sorted at a time int KPT, // Number of keys per thread int TPB, // Number of threads per block int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory. > __global__ void rdxsrt_histogram_with_guards(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo, const IndexT total_keys, const int block_index_offset) { /*** TYPEDEFs***/ typedef Traits<KeyT> KeyTraits; typedef typename KeyTraits::UnsignedBits UnsignedBits; /*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/ /*** DECLARATIONS ***/ UnsignedBits tloc_keys[KPT]; uint tloc_masked[KPT]; __shared__ uint shared_bins[(0x01<<NUM_BITS) + 1]; /*** INIT SHARED HISTO ***/ if (threadIdx.x < 32) { #pragma unroll for(int i=0;i<(0x01<<NUM_BITS);i+=32){ shared_bins[i+threadIdx.x] = 0; } } __syncthreads(); /*** GET KEYS & PREPARE KEYS FOR HISTO ***/ // Bucket index used to determine the memory offset of the bucket's global histogram const uint bucket_idx = 0; // This thread block's keys memory offset, pointing to the index of its first key const IndexT block_offset = (blockDim.x * (block_index_offset + blockIdx.x) * KPT); // Maximum number of keys the block may fetch const IndexT block_max_num_keys = total_keys - block_offset; // KeyLoader(block_offset, threadIdx.x).template LoadStridedWithGuards<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys, block_max_num_keys); #pragma unroll for (int i=0; i<KPT; i++) { if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) { tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i]; } } #pragma unroll for(int i=0; i<KPT; i++){ // if(KeyLoader(block_offset, threadIdx.x).ThreadIndexInBounds(block_max_num_keys, i)){ if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) { tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]); tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); atomicAdd(&shared_bins[tloc_masked[i]], 1); } } // Make sure we've got the counts from all threads __syncthreads(); /*** Write shared histo to global histo ***/ if(threadIdx.x < 32){ for(int i=0;i<(0x01<<NUM_BITS);i+=32){ atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]); // per_block_histo[(block_index_offset + blockIdx.x)*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x]; } } } /** * Makes a single pass over the input array to find entries whose digit is equal to selected digit value and greater than * digit value. Entries equal to digit value are written to keys_buffer for future processing, entries greater * are written to output array. * @param d_keys_in [IN] The keys for which to compute the histogram * @param digit [IN] Digit index (0 => highest digit, 3 => lowest digit for 32-bit) * @param digit_val [IN] Digit value. * @param num_items [IN] Number of entries. * @param d_keys_buffer [OUT] Entries with x[digit] > digit_val. * @param d_keys_out [OUT] Entries with x[digit] > digit_val. * @param d_index_buffer [OUT] Index into d_keys_buffer. * @param d_index_out [OUT] Index into d_keys_out. */ template< typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32) int NUM_BITS, // Number of bits being sorted at a time int KPT, // Number of keys per thread int TPB // Number of threads per block > __global__ void select_kth_bucket(KeyT* d_keys_in, const uint digit, const uint digit_val, uint num_items, KeyT* d_keys_buffer, KeyT* d_keys_out, uint* d_index_buffer, uint* d_index_out) { typedef Traits<KeyT> KeyTraits; typedef typename KeyTraits::UnsignedBits UnsignedBits; // Specialize BlockLoad for a 1D block of TPB threads owning KPT integer items each typedef cub::BlockLoad<UnsignedBits, TPB, KPT, BLOCK_LOAD_TRANSPOSE> BlockLoadT; // Specialize BlockScan type for our thread block typedef BlockScan<int, TPB, BLOCK_SCAN_RAKING> BlockScanT; const int tile_size = TPB * KPT; int tile_idx = blockIdx.x; // Current tile index int tile_offset = tile_idx * tile_size; // Allocate shared memory for BlockLoad __shared__ union TempStorage { typename BlockLoadT::TempStorage load_items; typename BlockScanT::TempStorage scan; int offset[1]; UnsignedBits raw_exchange[2 * TPB * KPT]; } temp_storage; // Load a segment of consecutive items that are blocked across threads UnsignedBits key_entries[KPT]; /*float payload_entries[KPT];*/ int selection_flags[KPT]; int selection_indices[KPT]; int num_tiles = (num_items + tile_size - 1) / tile_size; int num_tile_items = tile_size; bool is_last_tile = false; if (tile_idx == num_tiles - 1) { num_tile_items = num_items - tile_offset; is_last_tile = true; } // Load keys if (is_last_tile) BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries, num_tile_items); else BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries); #if 0 if (is_last_tile) BlockLoadT(temp_storage.load_items).Load(payload + tile_offset, payload_entries, num_tile_items); else BlockLoadT(temp_storage.load_items).Load(payload + tile_offset, payload_entries); #endif __syncthreads(); /*** Step 1: Find keys with digit value to selected digit value ***/ #pragma unroll for (int ITEM = 0; ITEM < KPT; ++ITEM) { // Out-of-bounds items are selection_flags selection_flags[ITEM] = 0; if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) { UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]); uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); selection_flags[ITEM] = (masked_key > digit_val); } } __syncthreads(); // Compute exclusive prefix sum int num_selected; BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected); __syncthreads(); if (num_selected > 0) { int index_out; if (threadIdx.x == 0) { // Find index into keys_out array index_out = atomicAdd(d_index_out, num_selected); temp_storage.offset[0] = index_out; } __syncthreads(); index_out = temp_storage.offset[0]; __syncthreads(); // Compact and scatter items #pragma unroll for (int ITEM = 0; ITEM < KPT; ++ITEM) { int local_scatter_offset = selection_indices[ITEM]; if (selection_flags[ITEM]) { temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM]; /*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/ } } __syncthreads(); // Write out matched entries to output array for (int item = threadIdx.x; item < num_selected; item += TPB) { reinterpret_cast<UnsignedBits*>(d_keys_out)[index_out + item] = temp_storage.raw_exchange[item]; } __syncthreads(); #if 0 for (int item = threadIdx.x; item < num_selected; item += TPB) { payload_out[num_selections_prefix + item] = temp_storage.raw_exchange[tile_size + item]; } #endif } /*** Step 2: Find entries that have digit equal to digit value ***/ #pragma unroll for (int ITEM = 0; ITEM < KPT; ++ITEM) { // Out-of-bounds items are selection_flags selection_flags[ITEM] = 0; if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) { UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]); uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); selection_flags[ITEM] = (masked_key == digit_val); } } __syncthreads(); // Compute exclusive prefix sum BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected); __syncthreads(); if (num_selected > 0) { int index_buffer; if (threadIdx.x == 0) { index_buffer = atomicAdd(d_index_buffer, num_selected); temp_storage.offset[0] = index_buffer; } __syncthreads(); index_buffer = temp_storage.offset[0]; __syncthreads(); // Compact and scatter items #pragma unroll for (int ITEM = 0; ITEM < KPT; ++ITEM) { int local_scatter_offset = selection_indices[ITEM]; if (selection_flags[ITEM]) { temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM]; /*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/ } } __syncthreads(); // Write out output entries for (int item = threadIdx.x; item < num_selected; item += TPB) { reinterpret_cast<UnsignedBits*>(d_keys_buffer)[index_buffer + item] = temp_storage.raw_exchange[item]; } __syncthreads(); } } #define KPT 16 #define TPB 384 #define DIGIT_BITS 8 template<typename KeyT> cudaError_t radixSelectTopK(KeyT *d_keys_in, uint num_items, uint k, KeyT *d_keys_out, CachingDeviceAllocator& g_allocator) { cudaError error = cudaSuccess; DoubleBuffer<KeyT> d_keys; d_keys.d_buffers[0] = d_keys_in; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(KeyT) * num_items)); uint* d_histogram; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram, sizeof(uint) * num_items)); // We allocate two indices, one that maintains index into output array (this goes till K) // second maintains index into the output buffer containing reduced set of top-k candidates. uint* d_index_out; uint* d_index_buffer; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_index_out, sizeof(uint))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_index_buffer, sizeof(uint))); // Set the index into output array to 0. cudaMemset(d_index_out, 0, 4); uint* h_histogram = new uint[256]; uint KPB = KPT * TPB; for (uint digit = 0; digit < 4; digit++) { uint num_blocks = num_items / KPB;// Pass-0 rough processing blocks (floor on purpose) uint processed_elements = num_blocks * KPB;// Pass-0 number of rough processed elements uint remaining_elements = num_items - processed_elements;// Do the remaining elements with a check in the inner loop uint remainder_blocks = (KPB-1+remaining_elements) / KPB;// Number of blocks required for remaining elements (typically 0 or 1) // Zero out the histogram cudaMemset(d_histogram, 0, 256 * 4); if (num_blocks > 0) rdxsrt_histogram<KeyT, uint, DIGIT_BITS, KPT, TPB, 9><<<num_blocks, TPB, 0>>>(d_keys.Current(), digit, d_histogram); if (remaining_elements > 0) rdxsrt_histogram_with_guards<KeyT, uint, DIGIT_BITS, KPT, TPB, 9><<<remainder_blocks, TPB, 0>>>(d_keys.Current(), digit, d_histogram, num_items, num_blocks); cudaMemcpy(h_histogram, d_histogram, 256 * sizeof(uint), cudaMemcpyDeviceToHost); // Check for failure to launch CubDebugExit(error = cudaPeekAtLastError()); cudaMemcpy(h_histogram, d_histogram, 256 * sizeof(uint), cudaMemcpyDeviceToHost); uint rolling_sum = 0; uint digit_val; for (int i=255; i>=0; i--) { if ((rolling_sum + h_histogram[i]) > k) { digit_val = i; k -= rolling_sum; break; } rolling_sum += h_histogram[i]; } cudaMemset(d_index_buffer, 0, 4); select_kth_bucket<KeyT, uint, DIGIT_BITS, KPT, TPB><<<num_blocks + remainder_blocks, TPB>>>(d_keys.Current(), digit, digit_val, num_items, d_keys.Alternate(), d_keys_out, d_index_buffer, d_index_out); CubDebugExit(error = cudaPeekAtLastError()); uint h_index_out; uint h_index_buffer; cudaMemcpy(&h_index_out, d_index_out, sizeof(uint), cudaMemcpyDeviceToHost); cudaMemcpy(&h_index_buffer, d_index_buffer, sizeof(uint), cudaMemcpyDeviceToHost); // Update number of items to reflect reduced number of elements. num_items = h_index_buffer; if (k == 0) break; else if (k != 0 && digit == 3) { // We are at last digit and k != 4 implies that kth value has repetition. // Copy any of the repeated values to out array to complete the array. cudaMemcpy(d_keys_out + h_index_out, d_keys.Alternate(), k * sizeof(KeyT), cudaMemcpyDeviceToDevice); k -= k; } // Toggle the buffer index in the double buffer d_keys.selector = d_keys.selector ^ 1; } // Cleanup if (d_keys.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[1])); if (d_histogram) CubDebugExit(g_allocator.DeviceFree(d_histogram)); if (d_index_buffer) CubDebugExit(g_allocator.DeviceFree(d_index_buffer)); if (d_index_out) CubDebugExit(g_allocator.DeviceFree(d_index_out)); return error; }
the_stack
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR #include <claraparabricks/genomeworks/utils/stringutils.hpp> //array_to_string #include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size #include <numeric> #include "gtest/gtest.h" namespace claraparabricks { namespace genomeworks { namespace cudapoa { class BasicNW { public: const static int16_t gap_score_ = -8; const static int16_t mismatch_score_ = -6; const static int16_t match_score_ = 8; public: BasicNW(std::vector<uint8_t> nodes, std::vector<int16_t> sorted_graph, Int16Vec2D outgoing_edges, std::vector<uint8_t> read) : graph_(nodes, sorted_graph, outgoing_edges) , read_(read) { // do nothing } BasicNW() = delete; void get_graph_buffers(int16_t* incoming_edges, uint16_t* incoming_edge_count, int16_t* outgoing_edges, uint16_t* outgoing_edge_count, uint8_t* nodes, int16_t* node_count, int16_t* graph, int16_t* node_id_to_pos) const { graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count); graph_.get_nodes(nodes, node_count); graph_.get_sorted_graph(graph); graph_.get_node_id_to_pos(node_id_to_pos); } void get_read_buffers(uint8_t* read, uint16_t* read_count) const { for (int i = 0; i < get_size(read_); i++) { read[i] = read_[i]; } *read_count = get_size(read_); } protected: SortedGraph graph_; std::vector<uint8_t> read_; }; typedef std::pair<std::string, std::string> NWAnswer; typedef std::pair<NWAnswer, BasicNW> NWTestPair; // create a vector of test cases std::vector<NWTestPair> getNWTestCases() { std::vector<NWTestPair> test_cases; /* * read: A A T A * graph: A — A — A — A * alignment graph: 0 1 2 3 * alignment read: 0 1 2 3 * T * / \ * final graph A — A A * \ / * A */ NWAnswer ans_1("3,2,1,0", "3,2,1,0"); //alginment_graph & alignment_read are reversed BasicNW nw_1({'A', 'A', 'A', 'A'}, //nodes {0, 1, 2, 3}, //sorted_graph {{1}, {2}, {3}, {}}, //outgoing_edges {'A', 'A', 'T', 'A'}); //read test_cases.emplace_back(std::move(ans_1), std::move(nw_1)); /* * read: A T C G A * graph: A — T — C — G * alignment graph: 0 1 2 3 -1 * alignment read: 0 1 2 3 4 * * final graph A — T — C — G — A * */ NWAnswer ans_2("-1,3,2,1,0", "4,3,2,1,0"); //alginment_graph & alignment_read are reversed BasicNW nw_2({'A', 'T', 'C', 'G'}, //nodes {0, 1, 2, 3}, //sorted_graph {{1}, {2}, {3}, {}}, //outgoing_edges {'A', 'T', 'C', 'G', 'A'}); //read test_cases.emplace_back(std::move(ans_2), std::move(nw_2)); /* * read: A T C G * A * / \ * graph: A — C — C — G * alignment graph: 0 1 2 3 * alignment read: 0 1 2 3 * T * / \ * final graph A — C — C — G * \ / * A */ NWAnswer ans_3("3,2,1,0", "3,2,1,0"); //alginment_graph & alignment_read are reversed BasicNW nw_3({'A', 'A', 'C', 'G', 'C'}, //nodes {0, 4, 1, 2, 3}, //sorted_graph {{1, 4}, {2}, {3}, {}, {2}}, //outgoing_edges {'A', 'T', 'C', 'G'}); //read test_cases.emplace_back(std::move(ans_3), std::move(nw_3)); /* * read: A A * graph: A — T — T — G — A * alignment graph: 0 1 2 3 4 * alignment read: 0 -1 -1 -1 1 * * final graph A — T — T — G — A * \_____________/ * */ NWAnswer ans_4("4,3,2,1,0", "1,-1,-1,-1,0"); //alginment_graph & alignment_read are reversed BasicNW nw_4({'A', 'T', 'T', 'G', 'A'}, //nodes {0, 1, 2, 3, 4}, //sorted_graph {{1}, {2}, {3}, {4}, {}}, //outgoing_edges {'A', 'A'}); //read test_cases.emplace_back(std::move(ans_4), std::move(nw_4)); /* * read: A C T T A * T — G * / \ * graph: A — C — A — T — A * alignment graph: 0 5 6 3 4 * alignment read: 0 1 2 3 4 * T — G * / \ * final graph A — C — A — T — A * \ / * T * */ NWAnswer ans_5("4,3,6,5,0", "4,3,2,1,0"); //alignment_graph & alignment_read are reversed BasicNW nw_5({'A', 'T', 'G', 'T', 'A', 'C', 'A'}, //nodes {0, 5, 1, 6, 2, 3, 4}, //sorted_graph {{1, 5}, {2}, {3}, {4}, {}, {6}, {3}}, //outgoing_edges {'A', 'C', 'T', 'T', 'A'}); //read test_cases.emplace_back(std::move(ans_5), std::move(nw_5)); //add more test cases below return test_cases; } // host function for calling the kernel to test full-band NW device function. NWAnswer testNW(const BasicNW& obj) { //declare device buffer uint8_t* nodes = nullptr; int16_t* graph = nullptr; int16_t* node_id_to_pos = nullptr; int16_t graph_count = 0; //local uint16_t* incoming_edge_count = nullptr; int16_t* incoming_edges = nullptr; uint16_t* outgoing_edge_count = nullptr; int16_t* outgoing_edges = nullptr; uint8_t* read = nullptr; uint16_t read_count = 0; //local int16_t* scores = nullptr; int16_t* alignment_graph = nullptr; int16_t* alignment_read = nullptr; int16_t* aligned_nodes = nullptr; //local; to store num of nodes aligned (length of alignment_graph and alignment_read) BatchConfig batch_size; //default max_sequence_size = 1024, max_sequences_per_poa = 100 //allocate unified memory so they can be accessed by both host and device. GW_CU_CHECK_ERR(cudaMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&scores, batch_size.max_nodes_per_graph * batch_size.matrix_sequence_dimension * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_graph, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&read, batch_size.max_sequence_size * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_read, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&aligned_nodes, sizeof(int16_t))); //initialize all 'count' buffers memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(node_id_to_pos, 0, batch_size.max_nodes_per_graph * sizeof(int16_t)); memset(scores, 0, batch_size.max_nodes_per_graph * batch_size.matrix_sequence_dimension * sizeof(int16_t)); //calculate edge counts on host obj.get_graph_buffers(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count, nodes, &graph_count, graph, node_id_to_pos); obj.get_read_buffers(read, &read_count); int32_t gap_score = BasicNW::gap_score_; int32_t mismatch_score = BasicNW::mismatch_score_; int32_t match_score = BasicNW::match_score_; //call the host wrapper of nw kernel runNW(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_count, scores, batch_size.matrix_sequence_dimension, alignment_graph, alignment_read, gap_score, mismatch_score, match_score, aligned_nodes); GW_CU_CHECK_ERR(cudaDeviceSynchronize()); //input and output buffers are the same ones in unified memory, so the results are updated in place //results are stored in alignment_graph and alignment_read; return string representation of those auto res = std::make_pair(genomeworks::stringutils::array_to_string(alignment_graph, *aligned_nodes, ","), genomeworks::stringutils::array_to_string(alignment_read, *aligned_nodes, ",")); GW_CU_CHECK_ERR(cudaFree(nodes)); GW_CU_CHECK_ERR(cudaFree(graph)); GW_CU_CHECK_ERR(cudaFree(node_id_to_pos)); GW_CU_CHECK_ERR(cudaFree(incoming_edges)); GW_CU_CHECK_ERR(cudaFree(incoming_edge_count)); GW_CU_CHECK_ERR(cudaFree(outgoing_edges)); GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count)); GW_CU_CHECK_ERR(cudaFree(scores)); GW_CU_CHECK_ERR(cudaFree(alignment_graph)); GW_CU_CHECK_ERR(cudaFree(read)); GW_CU_CHECK_ERR(cudaFree(alignment_read)); GW_CU_CHECK_ERR(cudaFree(aligned_nodes)); return res; } using ::testing::TestWithParam; using ::testing::ValuesIn; class NWTest : public TestWithParam<NWTestPair> { public: void SetUp() {} NWAnswer runNWTest(const BasicNW& nw) { return testNW(nw); } }; TEST_P(NWTest, TestNWCorrectness) { const auto test_case = GetParam(); EXPECT_EQ(test_case.first, runNWTest(test_case.second)); } INSTANTIATE_TEST_SUITE_P(TestNW, NWTest, ValuesIn(getNWTestCases())); //--------------------------------------------------------------------------------------- // host function for calling the kernels to test static/adaptive-band NW with/without traceback buffer NWAnswer testNWbanded(const BasicNW& obj, bool adaptive, bool traceback = false) { //declare device buffer uint8_t* nodes = nullptr; int16_t* graph = nullptr; int16_t* node_id_to_pos = nullptr; int16_t graph_count = 0; //local uint16_t* incoming_edge_count = nullptr; int16_t* incoming_edges = nullptr; uint16_t* outgoing_edge_count = nullptr; int16_t* outgoing_edges = nullptr; uint8_t* read = nullptr; uint16_t read_count = 0; //local int16_t* scores = nullptr; int16_t* traces = nullptr; int16_t* alignment_graph = nullptr; int16_t* alignment_read = nullptr; int16_t* aligned_nodes = nullptr; //local; to store num of nodes aligned (length of alignment_graph and alignment_read) BandMode band_mode = traceback ? (adaptive ? BandMode::adaptive_band_traceback : BandMode::static_band_traceback) : (adaptive ? BandMode::adaptive_band : BandMode::static_band); BatchConfig batch_size(1024 /*max_sequence_size*/, 2 /*max_sequences_per_poa*/, 128 /*= band_width*/, band_mode); //allocate unified memory so they can be accessed by both host and device. GW_CU_CHECK_ERR(cudaMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_graph, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&read, batch_size.max_sequence_size * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&alignment_read, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&aligned_nodes, sizeof(int16_t))); if (traceback) { GW_CU_CHECK_ERR(cudaMallocManaged(&scores, batch_size.max_banded_pred_distance * batch_size.matrix_sequence_dimension * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&traces, batch_size.max_nodes_per_graph * batch_size.matrix_sequence_dimension * sizeof(int16_t))); } else { GW_CU_CHECK_ERR(cudaMallocManaged(&scores, batch_size.max_nodes_per_graph * batch_size.matrix_sequence_dimension * sizeof(int16_t))); } //initialize all 'count' buffers memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(node_id_to_pos, 0, batch_size.max_nodes_per_graph * sizeof(int16_t)); memset(scores, 0, batch_size.max_nodes_per_graph * batch_size.matrix_sequence_dimension * sizeof(int16_t)); //calculate edge counts on host obj.get_graph_buffers(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count, nodes, &graph_count, graph, node_id_to_pos); obj.get_read_buffers(read, &read_count); int32_t gap_score = BasicNW::gap_score_; int32_t mismatch_score = BasicNW::mismatch_score_; int32_t match_score = BasicNW::match_score_; //call the host wrapper of nw kernels if (traceback) { runNWbandedTB(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_count, scores, traces, batch_size.matrix_sequence_dimension, batch_size.max_nodes_per_graph, alignment_graph, alignment_read, batch_size.alignment_band_width, batch_size.max_banded_pred_distance, gap_score, mismatch_score, match_score, aligned_nodes, adaptive); } else { runNWbanded(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_count, scores, batch_size.matrix_sequence_dimension, batch_size.max_nodes_per_graph, alignment_graph, alignment_read, batch_size.alignment_band_width, gap_score, mismatch_score, match_score, aligned_nodes, adaptive); } GW_CU_CHECK_ERR(cudaDeviceSynchronize()); //input and output buffers are the same ones in unified memory, so the results are updated in place //results are stored in alignment_graph and alignment_read; return string representation of those auto res = std::make_pair(genomeworks::stringutils::array_to_string(alignment_graph, *aligned_nodes, ","), genomeworks::stringutils::array_to_string(alignment_read, *aligned_nodes, ",")); GW_CU_CHECK_ERR(cudaFree(nodes)); GW_CU_CHECK_ERR(cudaFree(graph)); GW_CU_CHECK_ERR(cudaFree(node_id_to_pos)); GW_CU_CHECK_ERR(cudaFree(incoming_edges)); GW_CU_CHECK_ERR(cudaFree(incoming_edge_count)); GW_CU_CHECK_ERR(cudaFree(outgoing_edges)); GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count)); GW_CU_CHECK_ERR(cudaFree(scores)); GW_CU_CHECK_ERR(cudaFree(alignment_graph)); GW_CU_CHECK_ERR(cudaFree(read)); GW_CU_CHECK_ERR(cudaFree(alignment_read)); GW_CU_CHECK_ERR(cudaFree(aligned_nodes)); if (traceback) { GW_CU_CHECK_ERR(cudaFree(traces)); } return res; } class NWbandedTest : public ::testing::Test { public: std::unique_ptr<BasicNW> nw; public: void SetUp() { // initialize nw graph and read with the following data std::string nodes_str = "TTTAACCTAATAAATCAGTGAAGATTTAAAATATGATAATTATTGATTTTGGTGAGAGTGCAAAGAAATTTGTTACCCTCATAAGCTGAGCAGACAGATAAGATAGAAAAACAGAAGATAGAATATTAAAACCATGATAGGTACAGACTGAAAAATTCTTGGATAAATATTAAAATTTAGGCTTTAGTAGTAGATTGATGACTGTGAGGAAAAAGGATGTCCAATTGTTGAGTGACATGTAGAATGCCTTAAAATAATTTTACACGTCACTGAAAGCTATATTTATATTCAGGAAGGATATATCCCAGTCATGATTTTCTTAATAAGTTGCCCCATTTTCCAAGTTTAGCTAATTAACATTTATGTCTTCTATAATCAGGAATAGTCATTAACTGACACAGAAACAATTGGAAGCATATGTAGCCAAAAACATAAAAATTATTGCATCCAAATAATGATAAAGTAAAATATTAAAAAATATAGTCTTCTAAAT"; std::string read_str = "TTTCACCTAGAAAATCAGTGAAGATTTAACAAAAAAAAAAAAAAAAAAAAAAAAATATTGATAATTATTGATTTTGGTGAGAGTGCAAAGCAATTGGCTACCCTCATAAGCTGAGCAGAAGATAAGATAGACAACAGAAGATAGAATAGTTAAACCATGATAGGTACAGACTGCAAAAAAATTCGATAAATATTAAAATTTAGGGCTTTAGTATATATTGATGACTGAGAAAAATCGTGATGTGCAATTGTGCGTGACATGTAGAATTGCCTTAAATAAAATTTAATCTGTCACTGAAGCTATATTTATATTCAGGAAGGATATATCCCAGTCATTGCTTTTCTTAATAAGTGCCCATGTTCCAAGTTTAGCCTAATTAAAAACTTTATGTCTTCTATATCAGAATAGTCATTAATGCACAGAAACAATTTGCGAAGGCATTATGTAGCAAAAACATAAAAAATTATTGCAGCCAAATAATGAATAAAAGTAACACAATCATTTAAAAAAATTATTATGTACTTCTAAAC"; // extract data to build BasicNW std::vector<uint8_t> nodes(nodes_str.begin(), nodes_str.end()); std::vector<int16_t> sorted_graph(nodes.size()); std::iota(sorted_graph.begin(), sorted_graph.end(), 0); Int16Vec2D outgoing_edges(nodes.size()); for (size_t i = 0; i < outgoing_edges.size() - 1; i++) { outgoing_edges[i].push_back(i + 1); } std::vector<uint8_t> read(read_str.begin(), read_str.end()); // setup nw nw = std::make_unique<BasicNW>(nodes, sorted_graph, outgoing_edges, read); } }; TEST_F(NWbandedTest, NWStaticBandvsFull) { auto full_alignment_results = testNW(*nw); auto static_banded_results = testNWbanded(*nw, false); // verify alignment_graph EXPECT_EQ(full_alignment_results.first, static_banded_results.first); // verify alignment_read EXPECT_EQ(full_alignment_results.second, static_banded_results.second); } TEST_F(NWbandedTest, NWAdaptiveBandvsFull) { auto full_alignment_results = testNW(*nw); auto adaptive_banded_results = testNWbanded(*nw, true); // verify alignment_graph EXPECT_EQ(full_alignment_results.first, adaptive_banded_results.first); // verify alignment_read EXPECT_EQ(full_alignment_results.second, adaptive_banded_results.second); } TEST_F(NWbandedTest, NWStaticBandTracebackvsFull) { auto full_alignment_results = testNW(*nw); auto static_banded_tb_results = testNWbanded(*nw, false, true); // verify alignment_graph EXPECT_EQ(full_alignment_results.first, static_banded_tb_results.first); // verify alignment_read EXPECT_EQ(full_alignment_results.second, static_banded_tb_results.second); } TEST_F(NWbandedTest, NWAdaptiveBandTracebackvsFull) { auto full_alignment_results = testNW(*nw); auto adaptive_banded_tb_results = testNWbanded(*nw, true, true); // verify alignment_graph EXPECT_EQ(full_alignment_results.first, adaptive_banded_tb_results.first); // verify alignment_read EXPECT_EQ(full_alignment_results.second, adaptive_banded_tb_results.second); } } // namespace cudapoa } // namespace genomeworks } // namespace claraparabricks
the_stack
#include "cuda_runtime.h" #include <cuda.h> #include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include "HoughLine.h" #include "Image.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" #include "CoordiSet.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:M_PI // π 值。对于某些操作系统,M_PI 可能没有定义,这里补充定义 M_PI。 #ifndef M_PI #define M_PI 3.14159265359 #endif // ==========================全局函数声明============================== // 根据输入点集的坐标,找到最上、最下、最左、最右的点,从而确定图像的宽和高。 static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset, int *xmin, int *ymin, int *xmax, int *ymax); // ==========================Kernel函数声明============================== static __global__ void // Kernel 函数无返回值 _houghlineImgKer( ImageCuda inimg, // 输入图像。 int *bufhoughdev, // 得票数矩阵。 int numtheta, // theta 的递增次数。 int numrho, // rho 的递增次数。 double detheta, // 每一次的角度增量。 double derho // 每一次的距离增量。 ); // Kernel 函数:_houghlineCorKer(根据输入坐标集计算得票数) // 根据输入坐标集,通过计算角度,距离等参数,计算最终的得票数。 static __global__ void _houghlineCorKer( CoordiSet guidingset, // 输入坐标集。 int *bufhoughdev, // 得票数矩阵。 int numtheta, // theta 的递增次数。 int numrho, // rho 的递增次数。 double detheta, // 每一次的角度增量。 double derho // 每一次的距离增量。 ); // Kernel 函数:_findlocalMaxKer(计算局部最大值) static __global__ void _findlocalMaxKer( int *bufhoughdev, // 得票数矩阵。 int *bufsortdev, // 局部最值矩阵。 int *sumdev, // 存在的直线数。 int numtheta, // theta 的递增次数。 int threshold // 直线的阈值。 ); // Kernel 函数:_houghoutKer(画出已检测到的直线) // 根据计算得到的直线的参数,得到输出图像。所有检测出来的直线, // 在输出图像中用像素值 128 将其画出。 static __global__ void _houghoutKer( ImageCuda outimg, // 输出图像。 LineParam *lineparamdev, // 计算得到的直线参数。 int linenum, // 最大待检测直线数量。 int derho // 每一次的距离增量。 ); // Kernel 函数:_realLineKer(检测给出线段的真实性)在inimg中检测参数给出的线段 // 是真实线段,还是某个线段的延长线, static __global__ void _realLineKer(ImageCuda inimg, int x1, int y1, int x2, int y2, int xmax, int xmin, int ymax, int ymin, int delta, int *pointnumdev); // ==========================Kernel函数定义============================== // Kernel 函数:_houghlineImgKer(根据输入图像计算得票数) static __global__ void _houghlineImgKer(ImageCuda inimg, int *bufhoughdev, int numtheta, int numrho, double detheta, double derho) { // 处理当前线程对应的图像点(c,r),其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量, int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if(c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 定义局部变量。 unsigned char intemp; int k, rho, bufidx; float theta; float irho = 1.0f / derho; float tempr; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 intemp = inimg.imgMeta.imgData[inidx]; // 如果当前像素值为 255,即有效像素值,则对该像素点进行直线检测。 if (intemp == 255) { for (k = 0; k < numtheta; k++) { // 计算当前的角度 theta。 theta = k * detheta; // 计算该角度 theta 对应直线另一个参数 rho 的值。 tempr = (int)(c * cos(theta) * irho + r * sin(theta) * irho); // 根据上一步结果进行四舍五入。 rho = (int)(tempr + (tempr >= 0 ? 0.5f : -0.5f)); rho += (numrho - 1) / 2; // 计算得到当前直线的两个参数 theta 和 rho 对应的累加器 // bufferHough 中的索引。使用原子操作,统计得票数。 bufidx = (rho + 1) * (numtheta + 2) + k + 1; atomicAdd(&bufhoughdev[bufidx], 1); } } } // Kernel 函数:_houghlineCorKer(根据输入坐标集计算得票数) static __global__ void _houghlineCorKer(CoordiSet guidingset, int *bufhoughdev, int numtheta, int numrho, double detheta, double derho) { // 计算计算当前线程的索引。 int idx = blockIdx.x * blockDim.x + threadIdx.x; // 处理coordiset中的点(dx,dy) int dx = guidingset.tplData[2 * idx]; int dy = guidingset.tplData[2 * idx + 1]; // 定义局部变量。 int k, rho, bufidx; float theta; float irho = 1.0f / derho; float tempr; // 计算得票数。 for (k = 0; k < numtheta; k ++) { // 计算当前的角度 theta。 theta = k * detheta; // 计算该角度 theta 对应直线另一个参数 rho 的值。 tempr = (int)(dx * cos(theta) * irho + dy * sin(theta) * irho); rho = (int)(tempr + (tempr >= 0 ? 0.5f : -0.5f)); rho += (numrho - 1) / 2; // 计算得到当前直线的两个参数 theta 和 rho 对应的累加器 // bufferHough 中的索引。使用原子操作,统计得票数。 bufidx = (rho + 1) * (numtheta + 2) + k + 1; atomicAdd(&bufhoughdev[bufidx], 1); } } // Kernel 函数:_findlocalMaxKer(计算局部最大值) static __global__ void _findlocalMaxKer( int *bufhoughdev, int *bufsortdev, int *sumdev, int numtheta, int threshold) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 计算该线程在块内的相对位置。 int inindex = threadIdx.y * blockDim.x + threadIdx.x; // 申请共享内存,存该块内符合条件的局部最大值个数, // 即存在的直线数。 __shared__ int totalsum[1]; // 初始化所有块内的共享内存。 if (inindex == 0) totalsum[0] = 0; // 块内同步。 __syncthreads(); // 计算当前线程在 bufHough 矩阵中的对应索引值。 int index = (r + 1) * (numtheta + 2) + (c + 1); // 当前线程的得票数大于直线阈值,并且大于邻域中的值时, // 认为他是局部最大值,即可能是直线。 if (bufhoughdev[index] > threshold && bufhoughdev[index] > bufhoughdev[index - 1] && bufhoughdev[index] >= bufhoughdev[index + 1] && bufhoughdev[index] > bufhoughdev[index - numtheta - 2] && bufhoughdev[index] >= bufhoughdev[index + numtheta + 2]) { bufsortdev[r * numtheta + c] = index; // 使用原子操作对局部最大值进行统计。 atomicAdd(&totalsum[0], 1); } else { bufsortdev[r * numtheta + c] = 0; } // 块内同步。 __syncthreads(); // (0,0)号线程负责将本块(共32*8个线程)统计出的直线的存在数统计到 sumdev 中。 if (inindex == 0 && totalsum[0] != 0) atomicAdd(&sumdev[0], totalsum[0]); } // Kernel 函数:_houghoutKer(画出已检测到的直线) static __global__ void _houghoutKer(ImageCuda outimg, LineParam *lineparamdev, int linenum, int derho) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省 // 计算资源,一方面防止由于段错误导致的程序崩溃。 if (c >= outimg.imgMeta.width || r >= outimg.imgMeta.height) return; // 计算当前坐标点对应的图像数据数组下标。 unsigned char *outptr; outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes; // 声明局部变量 int i, temp; float theta; float irho = 1.0f / derho; // 对所有已经检测出的直线进行循环,找到输入图像中该点所在的直线, // 并赋值 128。 for (i = 0; i < linenum; i++) { // 得到直线的参数 rho,theta。 theta = lineparamdev[i].angle; temp = (int)(c * cos(theta) * irho + r * sin(theta) * irho); if (temp == lineparamdev[i].distance) {*outptr = 255; break; } } } // Kernel 函数:_realLineKer(检测给出线段的真实性)在inimg中检测参数给出的线段 // 是真实线段,还是某个线段的延长线, static __global__ void _realLineKer(ImageCuda inimg, int x1, int y1, int x2, int y2, int xmax, int xmin, int ymax, int ymin, int delta, int *pointnumdev) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int cx=blockIdx.x * blockDim.x+threadIdx.x; int ry=blockIdx.y * blockDim.y+threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if(cx >= inimg.imgMeta.width || ry >= inimg.imgMeta.height) return ; int inidx, temp=0; // 检查当前点是否在线段带容许误差的包围盒范围内,不在的话,直接返回 if( cx <= xmax+delta && cx >= xmin-delta && ry <= ymax+delta && ry >= ymin-delta ) { // 计算输入坐标点对应的图像数据数组下标。 inidx=ry * inimg.pitchBytes+cx; // 读取第一个输入坐标点对应的像素值。 temp=inimg.imgMeta.imgData[inidx]; // 如果当前点在线段上,或者误差小于门限值,计数器加一 if(temp == 255){ float dis=abs( (x2-x1) * (ry-y1)-(cx-x1) * ( y2-y1) ) / sqrt( (x2-x1) * (x2-x1) * 1.0+(y2-y1) * (y2-y1)); if(dis<delta) atomicAdd(pointnumdev, 1); } }// end of if; return ; } // ==========================全局函数定义============================== // 根据输入点集的坐标,找到最上、最下、最左、最右的点,从而确定图像的宽和高。 static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset, int *xmin, int *ymin, int *xmax, int *ymax) { // 声明局部变量。 int i; int errcode; // 在 host 端申请一个新的 CoordiSet 变量。 CoordiSet *tmpcoordiset; errcode = CoordiSetBasicOp::newCoordiSet(&tmpcoordiset); if (errcode != NO_ERROR) return errcode; errcode = CoordiSetBasicOp::makeAtHost(tmpcoordiset, guidingset->count); if (errcode != NO_ERROR) return errcode; // 将坐标集拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(guidingset, tmpcoordiset); if (errcode != NO_ERROR) return errcode; // 初始化 x 和 y 方向上的最小最大值。 xmin[0] = xmax[0] = tmpcoordiset->tplData[0]; ymin[0] = ymax[0] = tmpcoordiset->tplData[1]; // 循环寻找坐标集最左、最右、最上、最下的坐标。 for (i = 1;i < tmpcoordiset->count;i++) { // 寻找 x 方向上的最小值。 if (xmin[0] > tmpcoordiset->tplData[2 * i]) xmin[0] = tmpcoordiset->tplData[2 * i]; // 寻找 x 方向上的最大值 if (xmax[0] < tmpcoordiset->tplData[2 * i]) xmax[0] = tmpcoordiset->tplData[2 * i]; // 寻找 y 方向上的最小值。 if (ymin[0] > tmpcoordiset->tplData[2 * i + 1]) ymin[0] = tmpcoordiset->tplData[2 * i + 1]; // 寻找 y 方向上的最大值 if (ymax[0] < tmpcoordiset->tplData[2 * i + 1]) ymax[0] = tmpcoordiset->tplData[2 * i + 1]; } // 释放临时坐标集变量。 CoordiSetBasicOp::deleteCoordiSet(tmpcoordiset); return errcode; } // ==========================成员函数定义============================== // 宏:FAIL_HOUGH_LINE_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_HOUGH_LINE_FREE do { \ if (alldatadev != NULL) \ cudaFree(alldatadev); \ if (alldata != NULL) \ delete[] alldata; \ if (linedata != NULL) \ delete[] linedata; \ if (line != NULL) \ delete[] line; \ } while (0) // Host 成员方法:houghlineCor(Hough 变换检测直线) __host__ int HoughLine::houghLineCor(CoordiSet *guidingset, int *linesmax, LineParam *lineparam) { // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 得到输入图像的宽和高。 int width, height; int xmin, ymin, xmax, ymax; if (guidingset != NULL) { // 输入图像为空,则根据输入点集得到最左、 // 最右、最上、最下的坐标值。 errcode = _findMinMaxCoordinates(guidingset, &xmin, &ymin, &xmax, &ymax); if (errcode != NO_ERROR) return errcode; // 计算得票数矩阵的宽和高。 width = xmax-xmin ; height = ymax-ymin; } // 计算rho 和 theta 的递增次数。为减少计算,numrho用了近似值的距离最大值。 int numrho = (int)((width + height) * 2 + 1) / derho; int numtheta = (int)(M_PI / detheta); // 声明需要的指针变量。 int *alldatadev = NULL; int *alldata = NULL; int *linedata = NULL; LineParam *line = NULL; LineParam *lineparamdev = NULL; // 一次性申请 Device 端需要的所有空间。 int *bufhoughdev = NULL, *bufsortdev = NULL, *sumdev = NULL; cudaError_t cudaerrcode; cudaerrcode = cudaMalloc((void **)&alldatadev, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 通过偏移得到各指针的地址。 sumdev = alldatadev; bufhoughdev = alldatadev + 1; bufsortdev = alldatadev + 1 + (numtheta + 2) * (numrho + 2); // 初始化 Hough 变换累加器在 Device 上的内存空间。 cudaerrcode = cudaMemset(alldatadev, 0, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; // 根据输入坐标集是否为空,分两种情况进行: if (guidingset != NULL) { // 若输入坐标集不为空,则将该点集拷贝入 Device 内存。 errcode = CoordiSetBasicOp::copyToCurrentDevice(guidingset); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = 16; blocksize.y = 1; gridsize.x = (guidingset->count+15)/16; gridsize.y = 1; // 调用核函数,对输入坐标集 guidingset 计算 Hough 累加矩阵。 _houghlineCorKer<<<gridsize, blocksize>>>(*guidingset, bufhoughdev, numtheta, numrho, detheta, derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } } // 在 Host 端一次性申请全部所需的空间。 int *bufHough = NULL, *bufsort = NULL; int sum; alldata = new int [(numtheta + 2) * (numrho + 2) + numtheta * numrho]; if (alldata == NULL) return OUT_OF_MEM; // 通过偏移得到各指针的地址。 bufHough = alldata; bufsort = alldata + (numtheta + 2) * (numrho + 2); // 将 Kernel 函数中计算的得票数矩阵 bufHoughDev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufHough, bufhoughdev, (numtheta + 2) * (numrho + 2) * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (numtheta + blocksize.x - 1) / blocksize.x; gridsize.y = (numrho + blocksize.y - 1) / blocksize.y; // 调用计算局部最大值的 kernel 函数。 _findlocalMaxKer<<<gridsize, blocksize>>>(bufhoughdev, bufsortdev, sumdev, numtheta, threshold); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } // 将 Kernel 函数中计算的得票数 sumdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(&sum, sumdev, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 将 Kernel 函数中计算的得票数矩阵 bufsortdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufsort, bufsortdev, numtheta * numrho * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 在 Host 端申请存放直线得票数和索引的数组。 int *linevote = NULL, *lineindex = NULL; // 根据计算出存在的直线数,一次性申请所需空间。 linedata = new int [sum * 2]; if (linedata == NULL) return OUT_OF_MEM; linevote = linedata; lineindex = linedata + sum; // 局部变量。 int k = 0, temp; // 统计可能存在的直线的得票数和索引值。 for (int j = 0; j < numrho; j++) { for (int i = 0; i < numtheta; i++){ temp = j * numtheta + i; if (bufsort[temp] != 0) { // 将直线的索引值赋值到 lineindex 数组。 lineindex[k] = bufsort[temp]; // 将直线的得票数赋值到 linevote 数组。 linevote[k] = bufHough[bufsort[temp]]; k++; } } } // 使用希尔排序,以得票数递减的顺序,为存在的直线排序。 int i, j, tempvote, tempindex; // 希尔排序的增量。 int gap = sum >> 1; while(gap > 0) { // 对所有相隔 gap 位置的所有元素采用直接插入排序。 for (i = gap; i < sum;i++) { tempvote = linevote[i]; tempindex = lineindex[i]; j = i - gap; // 对相隔 gap 位置的元素进行排序。 while (j >= 0 && tempvote > linevote[j]) { linevote[j + gap] = linevote[j]; lineindex[j + gap] = lineindex[j]; j = j - gap; } linevote[j + gap] = tempvote; lineindex[j + gap] = tempindex; j = j - gap; } // 减小增量。 gap = gap >> 1; } // 申请直线返回参数结构体,保存找到的可能直线。 line = new LineParam[sum]; if (line == NULL) return OUT_OF_MEM; // 计算检测出的直线的参数:rho 以及 theta 的值,并 // 保存在参数结构体中。 float scale; scale = 1.0 / (numtheta + 2); for (int i = 0; i < sum; i++) { int idx = lineindex[i]; int rho = (int)(idx * scale) - 1; // 根据原始计算方法反计算出 theta 的值。 int theta = idx - (rho + 1) * (numtheta + 2) - 1; line[i].angle = theta * detheta; // 计算出直线参数 rho 的值。 rho =(int)(rho - ((numrho - 1) / 2) - ((rho >= 0) ? -0.5f : 0.5f)); line[i].distance = rho; // 将得票数保存在直线参数结构体中。 line[i].votes = linevote[i]; } // 统计最终检测的直线的个数。 int linenum = 0; int diffdis,diffdis2; float diffang,diffang2; for (int i = 0; i < sum; i++) { // 若当前直线的参数结构体的得票数为 0,0是认为重复的直线 // 则直接进行下次循环。 if (line[i].votes <= 0) continue; for (int j = i + 1; j < sum; j++) { // 计算两条直线距离和角度的差值。 diffang=abs(line[i].angle-line[j].angle); diffdis=abs(line[i].distance-line[j].distance); // 角度为1度和179度也很相似 diffang2=abs(M_PI-line[i].angle-line[j].angle); // 角度相差180时,dis值异号,相加才相当于他们之差 diffdis2=abs(line[i].distance+line[j].distance); // 若距离和角度的差值均小于设定的阈值, // 则认为这两条直线实质上是一条直线。 if ( (diffdis<thresdis && diffang<this->thresang) || (diffdis2<thresdis && diffang2<this->thresang)){ line[j].angle = 0.0f; line[j].distance = 0; line[j].votes = 0; } } // 检测出的直线数加 1。 linenum++; } // 检测出的最大直线数。 // 检测出的最大直线数是期望检测的最大直线数 linenum 和 // 存在的直线数 linesmax[0] 二者中的较小值。 linesmax[0] = (linenum < linesmax[0]) ? linenum : linesmax[0]; // 将最终检测的直线的参数赋值到需要返回的直线参数结构体中。 int n = 0; for (int i = 0; i < sum; i++) { // 若得票数不为 0,说明是检测出的直线, // 赋值到直线参数返回结构体中。 if (n == linesmax[0]) break; if (line[i].votes > 0) { lineparam[n].angle = line[i].angle; lineparam[n].distance = line[i].distance; lineparam[n].votes = line[i].votes; // 标记加 1。 n++; } } // 释放内存空间。 FAIL_HOUGH_LINE_FREE; cudaFree(lineparamdev); // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:houghline(Hough 变换检测直线) __host__ int HoughLine::houghLine(Image *inimg, CoordiSet *guidingset, int *linesmax, LineParam *lineparam) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL && guidingset == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 得到输入图像的宽和高。 int width, height; int xmin, ymin, xmax, ymax; if (guidingset != NULL) { // 输入图像为空,则根据输入点集得到最左、 // 最右、最上、最下的坐标值。 errcode = _findMinMaxCoordinates(guidingset, &xmin, &ymin, &xmax, &ymax); if (errcode != NO_ERROR) return errcode; // 计算得票数矩阵的宽和高。 width = xmax-xmin ; height = ymax-ymin; } else { // 输入图像不为空,则根据输入图像的尺寸得到图像需要处理部分的宽和高。 width = inimg->roiX2-inimg->roiX1; height = inimg->roiY2-inimg->roiY1; } // 计算rho 和 theta 的递增次数。为减少计算,numrho用了近似值的距离最大值。 int numrho = (int)((width + height) * 2 + 1) / derho; int numtheta = (int)(M_PI / detheta); // 声明需要的指针变量。 int *alldatadev = NULL; int *alldata = NULL; int *linedata = NULL; LineParam *line = NULL; LineParam *lineparamdev = NULL; // 一次性申请 Device 端需要的所有空间。 int *bufhoughdev = NULL, *bufsortdev = NULL, *sumdev = NULL; cudaError_t cudaerrcode; cudaerrcode = cudaMalloc((void **)&alldatadev, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 通过偏移得到各指针的地址。 sumdev = alldatadev; bufhoughdev = alldatadev + 1; bufsortdev = alldatadev + 1 + (numtheta + 2) * (numrho + 2); // 初始化 Hough 变换累加器在 Device 上的内存空间。 cudaerrcode = cudaMemset(alldatadev, 0, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; // 根据输入坐标集是否为空,分两种情况进行: if (guidingset != NULL) { // 若输入坐标集不为空,则将该点集拷贝入 Device 内存。 errcode = CoordiSetBasicOp::copyToCurrentDevice(guidingset); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = 16; blocksize.y = 1; gridsize.x = (guidingset->count+15)/16; gridsize.y = 1; // 调用核函数,对输入坐标集 guidingset 计算 Hough 累加矩阵。 _houghlineCorKer<<<gridsize, blocksize>>>(*guidingset, bufhoughdev, numtheta, numrho, detheta, derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } } else { // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 若输入坐标集guidingset为空 // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用核函数,对输入图像计算 Hough 累加矩阵。 _houghlineImgKer<<<gridsize, blocksize>>>(insubimgCud, bufhoughdev, numtheta, numrho, detheta, derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } } // 在 Host 端一次性申请全部所需的空间。 int *bufHough = NULL, *bufsort = NULL; int sum; alldata = new int [(numtheta + 2) * (numrho + 2) + numtheta * numrho]; if (alldata == NULL) return OUT_OF_MEM; // 通过偏移得到各指针的地址。 bufHough = alldata; bufsort = alldata + (numtheta + 2) * (numrho + 2); // 将 Kernel 函数中计算的得票数矩阵 bufHoughDev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufHough, bufhoughdev, (numtheta + 2) * (numrho + 2) * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (numtheta + blocksize.x - 1) / blocksize.x; gridsize.y = (numrho + blocksize.y - 1) / blocksize.y; // 调用计算局部最大值的 kernel 函数。 _findlocalMaxKer<<<gridsize, blocksize>>>(bufhoughdev, bufsortdev, sumdev, numtheta, threshold); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } // 将 Kernel 函数中计算的得票数 sumdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(&sum, sumdev, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 将 Kernel 函数中计算的得票数矩阵 bufsortdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufsort, bufsortdev, numtheta * numrho * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 在 Host 端申请存放直线得票数和索引的数组。 int *linevote = NULL, *lineindex = NULL; // 根据计算出存在的直线数,一次性申请所需空间。 linedata = new int [sum * 2]; if (linedata == NULL) return OUT_OF_MEM; linevote = linedata; lineindex = linedata + sum; // 局部变量。 int k = 0, temp; // 统计可能存在的直线的得票数和索引值。 for (int j = 0; j < numrho; j++) { for (int i = 0; i < numtheta; i++){ temp = j * numtheta + i; if (bufsort[temp] != 0) { // 将直线的索引值赋值到 lineindex 数组。 lineindex[k] = bufsort[temp]; // 将直线的得票数赋值到 linevote 数组。 linevote[k] = bufHough[bufsort[temp]]; k++; } } } // 使用希尔排序,以得票数递减的顺序,为存在的直线排序。 int i, j, tempvote, tempindex; // 希尔排序的增量。 int gap = sum >> 1; while(gap > 0) { // 对所有相隔 gap 位置的所有元素采用直接插入排序。 for (i = gap; i < sum;i++) { tempvote = linevote[i]; tempindex = lineindex[i]; j = i - gap; // 对相隔 gap 位置的元素进行排序。 while (j >= 0 && tempvote > linevote[j]) { linevote[j + gap] = linevote[j]; lineindex[j + gap] = lineindex[j]; j = j - gap; } linevote[j + gap] = tempvote; lineindex[j + gap] = tempindex; j = j - gap; } // 减小增量。 gap = gap >> 1; } // 申请直线返回参数结构体,保存找到的可能直线。 line = new LineParam[sum]; if (line == NULL) return OUT_OF_MEM; // 计算检测出的直线的参数:rho 以及 theta 的值,并 // 保存在参数结构体中。 float scale; scale = 1.0 / (numtheta + 2); for (int i = 0; i < sum; i++) { int idx = lineindex[i]; int rho = (int)(idx * scale) - 1; // 根据原始计算方法反计算出 theta 的值。 int theta = idx - (rho + 1) * (numtheta + 2) - 1; line[i].angle = theta * detheta; // 计算出直线参数 rho 的值。 rho =(int)(rho - ((numrho - 1) / 2) - ((rho >= 0) ? -0.5f : 0.5f)); line[i].distance = rho; // 将得票数保存在直线参数结构体中。 line[i].votes = linevote[i]; } // 统计最终检测的直线的个数。 int linenum = 0; int diffdis,diffdis2; float diffang,diffang2; for (int i = 0; i < sum; i++) { // 若当前直线的参数结构体的得票数为 0,0是认为重复的直线 // 则直接进行下次循环。 if (line[i].votes <= 0) continue; for (int j = i + 1; j < sum; j++) { // 计算两条直线距离和角度的差值。 diffang=abs(line[i].angle-line[j].angle); diffdis=abs(line[i].distance-line[j].distance); // 角度为1度和179度也很相似 diffang2=abs(M_PI-line[i].angle-line[j].angle); // 角度相差180时,dis值异号,相加才相当于他们之差 diffdis2=abs(line[i].distance+line[j].distance); // 若距离和角度的差值均小于设定的阈值, // 则认为这两条直线实质上是一条直线。 if ( (diffdis<thresdis && diffang<this->thresang) || (diffdis2<thresdis && diffang2<this->thresang)){ line[j].angle = 0.0f; line[j].distance = 0; line[j].votes = 0; } } // 检测出的直线数加 1。 linenum++; } // 检测出的最大直线数。 // 检测出的最大直线数是期望检测的最大直线数 linenum 和 // 存在的直线数 linesmax[0] 二者中的较小值。 linesmax[0] = (linenum < linesmax[0]) ? linenum : linesmax[0]; // 将最终检测的直线的参数赋值到需要返回的直线参数结构体中。 int n = 0; for (int i = 0; i < sum; i++) { // 若得票数不为 0,说明是检测出的直线, // 赋值到直线参数返回结构体中。 if (n == linesmax[0]) break; if (line[i].votes > 0) { lineparam[n].angle = line[i].angle; lineparam[n].distance = line[i].distance; lineparam[n].votes = line[i].votes; // 标记加 1。 n++; } } // 释放内存空间。 FAIL_HOUGH_LINE_FREE; cudaFree(lineparamdev); // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:houghlineimg(Hough 变换检测直线) __host__ int HoughLine::houghLineImg(Image *inimg, CoordiSet *guidingset, Image *outimg, int *linesmax, LineParam *lineparam) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL && guidingset == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 得到输入图像的宽和高。 int width, height; int xmin, ymin, xmax, ymax; if (guidingset != NULL) { // 输入图像为空,则根据输入点集得到最左、 // 最右、最上、最下的坐标值。 errcode = _findMinMaxCoordinates(guidingset, &xmin, &ymin, &xmax, &ymax); if (errcode != NO_ERROR) return errcode; // 计算得票数矩阵的宽和高。 width = xmax-xmin ; height = ymax-ymin; } else { // 输入图像不为空,则根据输入图像的尺寸得到图像需要处理部分的宽和高。 width = inimg->roiX2-inimg->roiX1; height = inimg->roiY2-inimg->roiY1; } // 计算rho 和 theta 的递增次数。为减少计算,numrho用了近似值的距离最大值。 int numrho = (int)((width + height) * 2 + 1) / derho; int numtheta = (int)(M_PI / detheta); // 声明需要的指针变量。 int *alldatadev = NULL; int *alldata = NULL; int *linedata = NULL; LineParam *line = NULL; LineParam *lineparamdev = NULL; // 一次性申请 Device 端需要的所有空间。 int *bufhoughdev = NULL, *bufsortdev = NULL, *sumdev = NULL; cudaError_t cudaerrcode; cudaerrcode = cudaMalloc((void **)&alldatadev, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 通过偏移得到各指针的地址。 sumdev = alldatadev; bufhoughdev = alldatadev + 1; bufsortdev = alldatadev + 1 + (numtheta + 2) * (numrho + 2); // 初始化 Hough 变换累加器在 Device 上的内存空间。 cudaerrcode = cudaMemset(alldatadev, 0, (1 + (numtheta + 2) * (numrho + 2) + numtheta * numrho) * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; ImageCuda outsubimgCud; // 根据输入坐标集是否为空,分两种情况进行: if (guidingset != NULL) { // 若输入坐标集不为空,则将该点集拷贝入 Device 内存。 errcode = CoordiSetBasicOp::copyToCurrentDevice(guidingset); if (errcode != NO_ERROR) return errcode; outimg->width = xmax; outimg->height = ymax; // 将输出图片拷贝至 Device 端。 ImageBasicOp::copyToCurrentDevice(outimg); // 提取输出图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; outsubimgCud.imgMeta.width = xmax; outsubimgCud.imgMeta.height = ymax; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = 16; blocksize.y = 1; gridsize.x = (guidingset->count+15)/16; gridsize.y = 1; // 调用核函数,对输入坐标集 guidingset 计算 Hough 累加矩阵。 _houghlineCorKer<<<gridsize, blocksize>>>(*guidingset, bufhoughdev, numtheta, numrho, detheta, derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } } else { // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 若输入坐标集guidingset为空 // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用核函数,对输入图像计算 Hough 累加矩阵。 _houghlineImgKer<<<gridsize, blocksize>>>(insubimgCud, bufhoughdev, numtheta, numrho, detheta, derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } } // 在 Host 端一次性申请全部所需的空间。 int *bufHough = NULL, *bufsort = NULL; int sum; alldata = new int [(numtheta + 2) * (numrho + 2) + numtheta * numrho]; if (alldata == NULL) return OUT_OF_MEM; // 通过偏移得到各指针的地址。 bufHough = alldata; bufsort = alldata + (numtheta + 2) * (numrho + 2); // 将 Kernel 函数中计算的得票数矩阵 bufHoughDev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufHough, bufhoughdev, (numtheta + 2) * (numrho + 2) * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (numtheta + blocksize.x - 1) / blocksize.x; gridsize.y = (numrho + blocksize.y - 1) / blocksize.y; // 调用计算局部最大值的 kernel 函数。 _findlocalMaxKer<<<gridsize, blocksize>>>(bufhoughdev, bufsortdev, sumdev, numtheta, threshold); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return CUDA_ERROR; } // 将 Kernel 函数中计算的得票数 sumdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(&sum, sumdev, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 将 Kernel 函数中计算的得票数矩阵 bufsortdev 拷贝至 Host 端。 cudaerrcode = cudaMemcpy(bufsort, bufsortdev, numtheta * numrho * sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 在 Host 端申请存放直线得票数和索引的数组。 int *linevote = NULL, *lineindex = NULL; // 根据计算出存在的直线数,一次性申请所需空间。 linedata = new int [sum * 2]; if (linedata == NULL) return OUT_OF_MEM; linevote = linedata; lineindex = linedata + sum; // 局部变量。 int k = 0, temp; // 统计可能存在的直线的得票数和索引值。 for (int j = 0; j < numrho; j++) { for (int i = 0; i < numtheta; i++){ temp = j * numtheta + i; if (bufsort[temp] != 0) { // 将直线的索引值赋值到 lineindex 数组。 lineindex[k] = bufsort[temp]; // 将直线的得票数赋值到 linevote 数组。 linevote[k] = bufHough[bufsort[temp]]; k++; } } } // 使用希尔排序,以得票数递减的顺序,为存在的直线排序。 int i, j, tempvote, tempindex; // 希尔排序的增量。 int gap = sum >> 1; while(gap > 0) { // 对所有相隔 gap 位置的所有元素采用直接插入排序。 for (i = gap; i < sum;i++) { tempvote = linevote[i]; tempindex = lineindex[i]; j = i - gap; // 对相隔 gap 位置的元素进行排序。 while (j >= 0 && tempvote > linevote[j]) { linevote[j + gap] = linevote[j]; lineindex[j + gap] = lineindex[j]; j = j - gap; } linevote[j + gap] = tempvote; lineindex[j + gap] = tempindex; j = j - gap; } // 减小增量。 gap = gap >> 1; } // 申请直线返回参数结构体,保存找到的可能直线。 line = new LineParam[sum]; if (line == NULL) return OUT_OF_MEM; // 计算检测出的直线的参数:rho 以及 theta 的值,并 // 保存在参数结构体中。 float scale; scale = 1.0 / (numtheta + 2); for (int i = 0; i < sum; i++) { int idx = lineindex[i]; int rho = (int)(idx * scale) - 1; // 根据原始计算方法反计算出 theta 的值。 int theta = idx - (rho + 1) * (numtheta + 2) - 1; line[i].angle = theta * detheta; // 计算出直线参数 rho 的值。 rho =(int)(rho - ((numrho - 1) / 2) - ((rho >= 0) ? -0.5f : 0.5f)); line[i].distance = rho; // 将得票数保存在直线参数结构体中。 line[i].votes = linevote[i]; } // 统计最终检测的直线的个数。 int linenum = 0; int diffdis,diffdis2; float diffang,diffang2; for (int i = 0; i < sum; i++) { // 若当前直线的参数结构体的得票数为 0,0是认为重复的直线 // 则直接进行下次循环。 if (line[i].votes <= 0) continue; for (int j = i + 1; j < sum; j++) { // 计算两条直线距离和角度的差值。 diffang=abs(line[i].angle-line[j].angle); diffdis=abs(line[i].distance-line[j].distance); // 角度为1度和179度也很相似 diffang2=abs(M_PI-line[i].angle-line[j].angle); // 角度相差180时,dis值异号,相加才相当于他们之差 diffdis2=abs(line[i].distance+line[j].distance); // 若距离和角度的差值均小于设定的阈值, // 则认为这两条直线实质上是一条直线。 if ( (diffdis<thresdis && diffang<this->thresang) || (diffdis2<thresdis && diffang2<this->thresang)){ line[j].angle = 0.0f; line[j].distance = 0; line[j].votes = 0; } } // 检测出的直线数加 1。 linenum++; } // 检测出的最大直线数。 // 检测出的最大直线数是期望检测的最大直线数 linenum 和 // 存在的直线数 linesmax[0] 二者中的较小值。 linesmax[0] = (linenum < linesmax[0]) ? linenum : linesmax[0]; // 将最终检测的直线的参数赋值到需要返回的直线参数结构体中。 int n = 0; for (int i = 0; i < sum; i++) { // 若得票数不为 0,说明是检测出的直线, // 赋值到直线参数返回结构体中。 if (n == linesmax[0]) break; if (line[i].votes > 0) { lineparam[n].angle = line[i].angle; lineparam[n].distance = line[i].distance; lineparam[n].votes = line[i].votes; // 标记加 1。 n++; } } // 在 Device 端申请内存空间用于存储直线的返回参数。 cudaerrcode = cudaMalloc((void **)&lineparamdev, linesmax[0] * sizeof (LineParam)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; return cudaerrcode; } // 将计算得到的直线返回参数从 Host 端拷贝到 Device 端。 cudaerrcode = cudaMemcpy(lineparamdev, lineparam, linesmax[0] * sizeof (LineParam), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; cudaFree(lineparamdev); return cudaerrcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用 kernel函数,得出最终输出图像。 _houghoutKer<<<gridsize, blocksize>>>(outsubimgCud, lineparamdev, linesmax[0], derho); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_HOUGH_LINE_FREE; cudaFree(lineparamdev); return CUDA_ERROR; } // 释放内存空间。 FAIL_HOUGH_LINE_FREE; cudaFree(lineparamdev); // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:getGlobalParam(ROI局部直线参数转换成为全局坐标下的参数) // 把 hough 变换检测到的直线参数转换成为全局坐标下的参数 __host__ int // 返回值:函数是否正确执行,若函数 // 否则返回假 HoughLine::getGlobalParam( Image *inimg, // 输入图像 int *linesmax, // 检测直线的最大数量 LineParam *lineparam // 直线返回参数结构体 ){ int rx=inimg->roiX1; int ry=inimg->roiY1; if(rx==0 && ry==0) return NO_ERROR; for (int i=0; i<*linesmax; i++) lineparam[i].distance=lineparam[i].distance+ rx*cos(lineparam[i].angle)+ ry*sin(lineparam[i].angle); return NO_ERROR; } // Host 成员方法:realLine(判断给出线段的真实性) __host__ bool HoughLine::realLine( Image *inimg, // 输入图像 int x1, // 要判断的线段两端点坐标 int y1, int x2, int y2, float threshold, // 点是否在线段上的误差范围参数,1-3 float thresperc // 线段真实性判定阈值,线段上有效点和线段理论上应该有的 // 点的比值超过此阈值,认为线段真实存在 ){ // 对端点x、y坐标进行排序,方便判断范围 int xmax, xmin, ymax, ymin; if(x1>x2){ xmax=x1; xmin=x2; } else{ xmax=x2; xmin=x1; } if(y1>y2){ ymax=y1; ymin=y2; } else{ ymax=y2; ymin=y1; } // 正常线段应该的有效点个数 int pointnumfull=sqrt(0.0+(x1-x2) * (x1-x2)+(y1-y2) * (y1-y2)); // 显存空间,用来存储内核函数计算出来的线段有效点个数 int *pointnumdev=NULL; int cudaerrcode=cudaMalloc((void **)&pointnumdev, sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 cudaFree(pointnumdev); return cudaerrcode; } cudaerrcode=cudaMemset(pointnumdev, 0, sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 cudaFree(pointnumdev); return cudaerrcode; } // 将输入图像拷贝入 Device 内存。 int errcode=ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode=ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x=DEF_BLOCK_X; blocksize.y=DEF_BLOCK_Y; gridsize.x=(insubimgCud.imgMeta.width+blocksize.x-1) / blocksize.x; gridsize.y=(insubimgCud.imgMeta.height+blocksize.y-1) / blocksize.y; // 调用 kernel函数,计算线段上有效点个数 _realLineKer <<< gridsize, blocksize>>>(insubimgCud, x1, y1, x2, y2, xmax, xmin, ymax, ymin, threshold, pointnumdev); // 把显存数据复制到内存中 int pointnum=0; cudaerrcode=cudaMemcpy(&pointnum, pointnumdev, sizeof (int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess){ // 释放内存空间。 cudaFree(pointnumdev); return cudaerrcode; } // 判断计算得到的有效点数是否合理,从而判断线段真实性 #ifdef DEBUG cout << endl << "pointnum=" << pointNum << ", pointnumfull=" << pointNumfull << endl; #endif if(pointnum>pointnumfull *thresperc) return true; else return false; } // 取消前面的宏定义。 #undef FAIL_HOUGH_LINE_FREE
the_stack
#include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <ops/declarable/helpers/top_k.h> namespace sd { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_KERNEL static void inTopKCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, const sd::Unsigned k) { const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<bool*>(vz); __shared__ sd::Unsigned sharedMem[SD_CUDA_BLOCK_SIZE]; __shared__ X elemToCompare; __shared__ const X* xTad; __shared__ sd::LongType idx, xTadLen; if (threadIdx.x == 0) { xTadLen = shape::length(xTadShapeInfo); xTad = reinterpret_cast<const X*>(vx) + xTadOffsets[blockIdx.x]; idx = y[shape::getIndexOffset(blockIdx.x, yShapeInfo)]; // shape::length(yShapeInfo) == numTads elemToCompare = xTad[shape::getIndexOffset(idx, xTadShapeInfo)]; } __syncthreads(); sharedMem[threadIdx.x] = 0; for (sd::LongType i = threadIdx.x; i < xTadLen; i += blockDim.x) if (elemToCompare < xTad[shape::getIndexOffset(i, xTadShapeInfo)]) ++sharedMem[threadIdx.x]; __syncthreads(); // aggregate sum for (sd::Unsigned activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[shape::getIndexOffset(blockIdx.x, zShapeInfo)] = *sharedMem < k; } /////////////////////////////////////////////////////////////////// template <typename X, typename Y> static void inTopKCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, const void* vy, const sd::LongType* yShapeInfo, void* vz, const sd::LongType* zShapeInfo, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, const sd::Unsigned k) { inTopKCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, k); } /////////////////////////////////////////////////////////////////// sd::Status inTopKFunctor(sd::LaunchContext* context, const NDArray* predictions, const NDArray* targets, NDArray* output, const sd::Unsigned k) { PointersManager manager(context, "in_top_k"); const auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(predictions->shapeInfo(), {1}); const int threadsPerBlock = SD_CUDA_BLOCK_SIZE; const int blocksPerGrid = static_cast<int>(packX.numberOfTads()); const int sharedMem = 1024; const auto xType = predictions->dataType(); const auto yType = targets->dataType(); NDArray::prepareSpecialUse({output}, {predictions, targets}); BUILD_DOUBLE_SELECTOR( xType, yType, inTopKCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), predictions->specialBuffer(), predictions->specialShapeInfo(), targets->specialBuffer(), targets->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), packX.specialShapeInfo(), packX.specialOffsets(), k), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {predictions, targets}); manager.synchronize(); return sd::Status::OK; } template <typename X, typename Y> static SD_KERNEL void topValuesMover(void const* vx, sd::LongType const* xTadShapeInfo, sd::LongType const* xTadOffsets, void const* vi, sd::LongType const* iTadShapeInfo, sd::LongType const* iTadOffsets, void* vz, sd::LongType const* zTadShapeInfo, sd::LongType const* zTadOffsets, sd::LongType tadLength, int numTads, int k) { for (int t = blockIdx.x; t < numTads; t += gridDim.x) { auto x = reinterpret_cast<X const*>(vx) + xTadOffsets[t]; auto i = reinterpret_cast<Y const*>(vi) + iTadOffsets[t]; auto z = reinterpret_cast<X*>(vz) + zTadOffsets[t]; for (int e = threadIdx.x; e < k; e += blockDim.x) { auto idx = i[shape::getIndexOffset(e, iTadShapeInfo)]; z[shape::getIndexOffset(e, zTadShapeInfo)] = x[shape::getIndexOffset(idx, xTadShapeInfo)]; } } } template <typename X, typename Y> static SD_KERNEL void indicesAlongDimension(void const* vx, sd::LongType const* xTadShapeInfo, sd::LongType const* xTadOffsets, void* vi, sd::LongType const* iTadShapeInfo, sd::LongType const* iTadOffsets, void* vz, sd::LongType const* zTadShapeInfo, sd::LongType const* zTadOffsets, sd::LongType tadLength, int numTads, int k, int scanWidth, bool needSort) { extern __shared__ char _shmem[]; X* tempValues = reinterpret_cast<X*>(_shmem) + threadIdx.x * scanWidth; Y* tempIndices = reinterpret_cast<Y*>(reinterpret_cast<X*>(_shmem) + blockDim.x * scanWidth) + threadIdx.x * scanWidth; __shared__ X localMaximum; if (threadIdx.x == 0) localMaximum = -DataTypeUtils::max<X>(); __syncthreads(); for (int t = blockIdx.x; t < numTads; t += gridDim.x) { auto x = reinterpret_cast<X const*>(vx) + xTadOffsets[t]; auto i = reinterpret_cast<Y*>(vi) + iTadOffsets[t]; auto z = reinterpret_cast<X*>(vz) + zTadOffsets[t]; // we'll do multiple reads here for (int p = 0; p < k; p += scanWidth) { // resetting temporary storage for (int p = 0; p < scanWidth; p++) { tempValues[p] = -DataTypeUtils::max<X>(); tempIndices[p] = DataTypeUtils::max<Y>(); } // local max values/indices for (int e = threadIdx.x; e < tadLength; e++) { auto value = x[shape::getIndexOffset(e, xTadShapeInfo)]; // we'll compare this value to current stored ones for (int f = 0; f < scanWidth; f++) { if (value > tempValues[f] && (p == 0 || value < localMaximum)) { tempValues[f] = value; tempIndices[f] = e; } } } __syncthreads(); // at this point we have local part ready for merge and define global maximum for this iteration, and local // maximum for next iteration for (sd::Unsigned activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) { if (tempValues[0] < tempValues[0 + activeThreads * scanWidth]) { tempValues[0] = tempValues[0 + activeThreads * scanWidth]; tempIndices[0] = tempIndices[0 + activeThreads * scanWidth]; } } __syncthreads(); } __syncthreads(); // at this point we know local minimum for next iteration if (threadIdx.x == 0) { localMaximum = tempValues[scanWidth - 1]; z[shape::getIndexOffset(p, zTadShapeInfo)] = tempValues[scanWidth - 1]; i[shape::getIndexOffset(p, iTadShapeInfo)] = tempIndices[scanWidth - 1]; } __syncthreads(); } __syncthreads(); if (!needSort) { // if we don't need sort, we need to return values based on their indices (ascending) for (int m = 0; m < k; m++) { if (m % 2 == 0) { for (int tid = threadIdx.x; tid < k; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < k) { auto t0 = shape::getIndexOffset(top - 1, iTadShapeInfo); auto t1 = shape::getIndexOffset(top, iTadShapeInfo); if (i[t0] > i[t1]) { // swap indices first Y di0 = i[t0]; i[t0] = i[t1]; i[t1] = di0; // swap values next X dz0 = z[t0]; z[t0] = z[t1]; z[t1] = dz0; } } } } else { for (int tid = threadIdx.x; tid < k; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < k) { auto t0 = shape::getIndexOffset(top - 1, iTadShapeInfo); auto t1 = shape::getIndexOffset(top, iTadShapeInfo); if (i[t0] > i[t1]) { // swap indices first Y di0 = i[t0]; i[t0] = i[t1]; i[t1] = di0; // swap values next X dz0 = z[t0]; z[t0] = z[t1]; z[t1] = dz0; } } } } __syncthreads(); } } } } template <typename X, typename Y> static sd::Status topKFunctor_(sd::LaunchContext* context, const NDArray* input, NDArray* values, NDArray* indices, const sd::Unsigned k, bool needSort) { auto packX = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 1}); auto packI = ConstantTadHelper::getInstance().tadForDimensions(indices->shapeInfo(), {input->rankOf() - 1}); auto packZ = ConstantTadHelper::getInstance().tadForDimensions(values->shapeInfo(), {input->rankOf() - 1}); auto tadLength = shape::length(packX.primaryShapeInfo()); // we get top K values first if (k == 1) { input->applyIndexReduce(indexreduce::IndexMax, *indices, {input->rankOf() - 1}); // copy values on specified indices topValuesMover<X, Y><<<256, 256, 1024, *context->getCudaStream()>>>( input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), indices->specialBuffer(), packI.platformShapeInfo(), packI.platformOffsets(), values->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, packX.numberOfTads(), k); } else { int scanWidth = 1; int numTreads = 256; int shMemSize = (numTreads * sizeof(X) * scanWidth) + (numTreads * sizeof(Y) * scanWidth) + 512; indicesAlongDimension<X, Y><<<256, numTreads, shMemSize, *context->getCudaStream()>>>( input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), indices->specialBuffer(), packI.platformShapeInfo(), packI.platformOffsets(), values->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, packX.numberOfTads(), k, scanWidth, needSort); } return sd::Status::OK; } sd::Status topKFunctor(sd::LaunchContext* context, const NDArray* input, NDArray* values, NDArray* indices, const sd::Unsigned k, bool needSort) { input->syncToDevice(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), topKFunctor_, (context, input, values, indices, k, needSort), SD_COMMON_TYPES, SD_INDEXING_TYPES); values->tickWriteDevice(); indices->tickWriteDevice(); return sd::Status::OK; } } // namespace helpers } // namespace ops } // namespace sd
the_stack
#include "_reg_common_cuda.h" #include "_reg_tools.h" /* ******************************** */ void cudaCommon_computeGridConfiguration(dim3 &r_blocks, dim3 &r_grid, const int targetVoxelNumber) { unsigned int maxThreads = 256; unsigned int maxBlocks = 65365; unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads; blocks = (std::min)(blocks, maxBlocks); r_grid = dim3(blocks, 1, 1); r_blocks = dim3(maxThreads, 1, 1); } /* ******************************** */ /* ******************************** */ template <class NIFTI_TYPE> int cudaCommon_transferNiftiToNiftiOnDevice1(nifti_image **image_d, nifti_image *img) { const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(NIFTI_TYPE); int *g_dim; float* g_pixdim; NIFTI_TYPE* g_data; NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_dim, 8 * sizeof(int))); NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_pixdim, 8 * sizeof(float))); NR_CUDA_SAFE_CALL(cudaMalloc((void**)&g_data, memSize)); NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>( img->data ); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d ), img, sizeof(nifti_image), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy((*image_d)->data, array_h, memSize, cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d )->dim, img->dim, 8 * sizeof(int), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(( *image_d )->pixdim, img->pixdim, 8 * sizeof(float), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToNiftiOnDevice1<float>(nifti_image **image_d, nifti_image *img); template int cudaCommon_transferNiftiToNiftiOnDevice1<double>(nifti_image **image_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, const nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->nvox*sizeof(DTYPE); const NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, const nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } const float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(int **, const nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **, const nifti_image *); /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ const unsigned int memSize = img->dim[1] * img->dim[2] * img->dim[3] * sizeof(DTYPE); NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[img->dim[1] * img->dim[2] * img->dim[3]]; NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, memSize, cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, memSize, cudaMemcpyHostToDevice)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(DTYPE **array_d, DTYPE **array2_d, nifti_image *img) { if(sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); const int voxelNumber = img->nx*img->ny*img->nz; for(int i=0; i<voxelNumber; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<voxelNumber; i++) array2_h[i].w= *niftiImgValues++; } NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, array_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); NR_CUDA_SAFE_CALL(cudaMemcpy(*array2_d, array2_h, img->nx*img->ny*img->nz*sizeof(float4), cudaMemcpyHostToDevice)); free(array_h); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(array_d, array2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(float **,float **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(double **,double **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(float4 **,float4 **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; if(img->dim[5]>=2) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; } if(img->dim[5]==3) { for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; copyParams.kind = cudaMemcpyHostToDevice; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)) free(array_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<int>(cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferNiftiToArrayOnDevice1(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ NIFTI_TYPE *array_h = static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h = &array_h[img->dim[1]*img->dim[2]*img->dim[3]]; cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferNiftiToArrayOnDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, nifti_image *img) { if( sizeof(DTYPE)==sizeof(float4) ){ if( (img->datatype!=NIFTI_TYPE_FLOAT32) || (img->dim[5]<2) || (img->dim[4]>1) ) { reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The specified image is not a single precision deformation field image"); return EXIT_FAILURE; } float *niftiImgValues = static_cast<float *>(img->data); float4 *array_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); float4 *array2_h=(float4 *)calloc(img->nx*img->ny*img->nz,sizeof(float4)); for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].x= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].x= *niftiImgValues++; if(img->dim[5]>=2){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].y= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].y= *niftiImgValues++; } if(img->dim[5]>=3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].z= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].z= *niftiImgValues++; } if(img->dim[5]==3){ for(int i=0; i<img->nx*img->ny*img->nz; i++) array_h[i].w= *niftiImgValues++; for(int i=0; i<img->nx*img->ny*img->nz; i++) array2_h[i].w= *niftiImgValues++; } cudaMemcpy3DParms copyParams; memset(&copyParams, 0, sizeof(copyParams)); copyParams.extent = make_cudaExtent(img->dim[1], img->dim[2], img->dim[3]); copyParams.kind = cudaMemcpyHostToDevice; // First timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array_h); // Second timepoint copyParams.srcPtr = make_cudaPitchedPtr((void *) array2_h, copyParams.extent.width*sizeof(DTYPE), copyParams.extent.width, copyParams.extent.height); copyParams.dstArray = *cuArray2_d; NR_CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); free(array2_h); } else{ // All these else could be removed but the nvcc compiler would warn for unreachable statement switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferNiftiToArrayOnDevice1<DTYPE,float>(cuArray_d, cuArray2_d, img); default: reg_print_fct_error("cudaCommon_transferNiftiToArrayOnDevice1"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } return EXIT_SUCCESS; } template int cudaCommon_transferNiftiToArrayOnDevice<float>(cudaArray **, cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<double>(cudaArray **, cudaArray **, nifti_image *); template int cudaCommon_transferNiftiToArrayOnDevice<float4>(cudaArray **, cudaArray **, nifti_image *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); return EXIT_SUCCESS; }template int cudaCommon_allocateArrayToDevice<float>(cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(cudaArray **cuArray_d, cudaArray **cuArray2_d, int *dim) { const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); cudaChannelFormatDesc texDesc = cudaCreateChannelDesc<DTYPE>(); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray_d, &texDesc, volumeSize)); NR_CUDA_SAFE_CALL(cudaMalloc3DArray(cuArray2_d, &texDesc, volumeSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(cudaArray **,cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<double>(cudaArray **,cudaArray **, int *); template int cudaCommon_allocateArrayToDevice<float4>(cudaArray **,cudaArray **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, int *); template int cudaCommon_allocateArrayToDevice<int>(int **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int *); // for deformation field /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, int vox) { const unsigned int memSize = vox * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, int); template int cudaCommon_allocateArrayToDevice<double>(double **, int); template int cudaCommon_allocateArrayToDevice<int>(int **, int); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, int); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_allocateArrayToDevice(DTYPE **array_d, DTYPE **array2_d, int *dim) { const unsigned int memSize = dim[1] * dim[2] * dim[3] * sizeof(DTYPE); NR_CUDA_SAFE_CALL(cudaMalloc(array_d, memSize)); NR_CUDA_SAFE_CALL(cudaMalloc(array2_d, memSize)); return EXIT_SUCCESS; } template int cudaCommon_allocateArrayToDevice<float>(float **, float **, int *); template int cudaCommon_allocateArrayToDevice<double>(double **, double **, int *); template int cudaCommon_allocateArrayToDevice<float4>(float4 **, float4 **, int *); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToCpu(DTYPE *cpuPtr, DTYPE **cuPtr, const unsigned int nElements) { NR_CUDA_SAFE_CALL(cudaMemcpy((void *)cpuPtr, (void *)*cuPtr, nElements*sizeof(DTYPE), cudaMemcpyDeviceToHost)); //NR_CUDA_SAFE_CALL(cudaThreadSynchronize()); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToCpu<float>(float *cpuPtr, float **cuPtr, const unsigned int nElements); template int cudaCommon_transferFromDeviceToCpu<double>(double *cpuPtr, double **cuPtr, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else { NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, img->nvox*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNifti1<float, float>(nifti_image *img, float **array_d); template int cudaCommon_transferFromDeviceToNifti1<double, double>(nifti_image *img, double **array_d); /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].x; if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].y; } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].z; } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++) *niftiImgValues++ = array_h[i].w; } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ template <class DTYPE, class NIFTI_TYPE> int cudaCommon_transferFromDeviceToNifti1(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)!=sizeof(NIFTI_TYPE)){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti1"); reg_print_msg_error("The host and device arrays are of different types"); return EXIT_FAILURE; } else{ unsigned int voxelNumber=img->nx*img->ny*img->nz; NIFTI_TYPE *array_h=static_cast<NIFTI_TYPE *>(img->data); NIFTI_TYPE *array2_h=&array_h[voxelNumber]; NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (void *)*array_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (void *)*array2_d, voxelNumber*sizeof(DTYPE), cudaMemcpyDeviceToHost)); } return EXIT_SUCCESS; } /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNifti(nifti_image *img, DTYPE **array_d, DTYPE **array2_d) { if(sizeof(DTYPE)==sizeof(float4)){ // A nifti 5D volume is expected if(img->dim[0]<5 || img->dim[4]>1 || img->dim[5]<2 || img->datatype!=NIFTI_TYPE_FLOAT32){ reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The nifti image is not a 5D volume"); return EXIT_FAILURE; } const int voxelNumber = img->nx*img->ny*img->nz; float4 *array_h=NULL; float4 *array2_h=NULL; NR_CUDA_SAFE_CALL(cudaMallocHost(&array_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMallocHost(&array2_h, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array_h, (const void *)*array_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); NR_CUDA_SAFE_CALL(cudaMemcpy((void *)array2_h, (const void *)*array2_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToHost)); float *niftiImgValues = static_cast<float *>(img->data); for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].x; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].x; } if(img->dim[5]>=2){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].y; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].y; } } if(img->dim[5]>=3){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].z; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].z; } } if(img->dim[5]>=4){ for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array_h[i].w; } for(int i=0; i<voxelNumber; i++){ *niftiImgValues++ = array2_h[i].w; } } NR_CUDA_SAFE_CALL(cudaFreeHost(array_h)); NR_CUDA_SAFE_CALL(cudaFreeHost(array2_h)); return EXIT_SUCCESS; } else{ switch(img->datatype){ case NIFTI_TYPE_FLOAT32: return cudaCommon_transferFromDeviceToNifti1<DTYPE,float>(img, array_d, array2_d); default: reg_print_fct_error("cudaCommon_transferFromDeviceToNifti"); reg_print_msg_error("The image data type is not supported"); return EXIT_FAILURE; } } } template int cudaCommon_transferFromDeviceToNifti<float>(nifti_image *, float **, float **); template int cudaCommon_transferFromDeviceToNifti<double>(nifti_image *, double **, double **); template int cudaCommon_transferFromDeviceToNifti<float4>(nifti_image *, float4 **, float4 **); // for deformation field /* ******************************** */ /* ******************************** */ void cudaCommon_free(cudaArray **cuArray_d) { NR_CUDA_SAFE_CALL(cudaFreeArray(*cuArray_d)); return; } /* ******************************** */ /* ******************************** */ template <class DTYPE> void cudaCommon_free(DTYPE **array_d) { NR_CUDA_SAFE_CALL(cudaFree(*array_d)); return; } template void cudaCommon_free<int>(int **); template void cudaCommon_free<float>(float **); template void cudaCommon_free<double>(double **); template void cudaCommon_free<float4>(float4 **); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple(DTYPE **array_d, nifti_image *img) { NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, img->data, img->nvox * sizeof(DTYPE), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple<int>(int **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<float>(float **array_d, nifti_image *img); template int cudaCommon_transferFromDeviceToNiftiSimple<double>(double **array_d, nifti_image *img); /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferFromDeviceToNiftiSimple1(DTYPE **array_d, DTYPE *img, const unsigned int nvox) { NR_CUDA_SAFE_CALL(cudaMemcpy(*array_d, img, nvox * sizeof(DTYPE), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } template int cudaCommon_transferFromDeviceToNiftiSimple1<int>(int **array_d, int *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<float>(float **array_d, float *img, const unsigned); template int cudaCommon_transferFromDeviceToNiftiSimple1<double>(double **array_d, double *img, const unsigned); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromCpuToDevice(DTYPE *array_d, const DTYPE *array_cpu, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(cudaMemcpy(array_d, array_cpu, memSize, cudaMemcpyHostToDevice)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromCpuToDevice<int>(int *array_d, const int *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<float>(float *array_d, const float *array_cpu, const unsigned int nElements); template int cudaCommon_transferArrayFromCpuToDevice<double>(double *array_d, const double *array_cpu, const unsigned int nElements); /* ******************************** */ /* ******************************** */ /* ******************************** */ /* ******************************** */ template <class DTYPE> int cudaCommon_transferArrayFromDeviceToCpu(DTYPE *array_cpu, DTYPE *array_d, const unsigned int nElements) { const unsigned int memSize = nElements * sizeof(DTYPE); //copyData NR_CUDA_SAFE_CALL(cudaMemcpy(array_cpu, array_d, memSize, cudaMemcpyDeviceToHost)); // return EXIT_SUCCESS; } template int cudaCommon_transferArrayFromDeviceToCpu<int>(int *array_cpu, int *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<float>(float *array_cpu, float *array_d, const unsigned int nElements); template int cudaCommon_transferArrayFromDeviceToCpu<double>(double *array_cpu, double *array_d, const unsigned int nElements); #endif /* ******************************** */ /* ******************************** */
the_stack
#include <amgx_types/util.h> #include <amgx_types/math.h> using namespace std; namespace amgx { /*************************************** * Source Definitions ***************************************/ template<class TConfig> MatrixColoring<TConfig>::MatrixColoring(AMG_Config &cfg, const std::string &cfg_scope) : m_num_colors(0), m_row_colors(0), m_sorted_rows_by_color(0), m_offsets_rows_per_color(0), m_ref_count(1), m_boundary_coloring(SYNC_COLORS), m_halo_coloring(FIRST) { m_coloring_level = cfg.getParameter<int>("coloring_level", cfg_scope); m_boundary_coloring = cfg.getParameter<ColoringType>("boundary_coloring", cfg_scope); m_halo_coloring = cfg.getParameter<ColoringType>("halo_coloring", cfg_scope); } template<class TConfig> MatrixColoring<TConfig>::~MatrixColoring() { } __global__ void findSeparation(INDEX_TYPE *rows_by_color, INDEX_TYPE *offsets_by_color, INDEX_TYPE *separation, INDEX_TYPE boundary, INDEX_TYPE num_colors, INDEX_TYPE num_rows) { int block_offset = blockIdx.x * (blockDim.x / 32) * 31; //each warp does 31 rows, 1 on the left side is redundant int lane = threadIdx.x % 32; int element = block_offset + (threadIdx.x / 32) * 31 + lane - 1; while (element < num_rows) { int color = 0; int row = -1; if (element != -1) { row = rows_by_color[element]; while ((color < num_colors) && ((element < offsets_by_color[color]) || (element >= offsets_by_color[color + 1]))) { color ++; } if ((element == offsets_by_color[color]) && (row >= boundary)) { separation[color] = element; } //special case when first row of color is immediately a boundary node if ((element == offsets_by_color[color + 1] - 1) && (row < boundary)) { separation[color] = element + 1; } //special case when I am the last, but I am still not a boundary } unsigned int result = utils::ballot(row >= boundary, utils::activemask()); //if (result>0) printf("%x\n", result); if (lane > 0 && row >= boundary && ((result >> (lane - 1)) & 1) == 0) { separation[color] = element; } element += gridDim.x * (blockDim.x / 32) * 31; } } //prints how many edges fail to obey coloring property //if the optional aggregates parameter is specified, it also measures the downwind coloring property: //for each incoming edge (j,i), where i and j share the same aggregate, holds: color(j) < color(i) template <class TConfig> void MatrixColoring<TConfig>::assertColoring( Matrix<TConfig> &A, IVector &aggregates ) { IndexType numRows = A.get_num_rows(); IndexType nnz = A.get_num_nz(); IndexType blocksize = A.get_block_dimx() * A.get_block_dimy(); IVector &coloring = this->m_row_colors; bool check_downwind = aggregates.size() == A.get_num_rows(); //allocate host memory IndexType *ia = new IndexType[numRows + 1]; IndexType *ja = new IndexType[nnz]; ValueType *aa = new ValueType[nnz * blocksize]; IndexType *color = new IndexType[numRows]; //copy to host cudaMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), cudaMemcpyDeviceToHost ); cudaMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, cudaMemcpyDeviceToHost ); cudaMemcpy( aa, A.values.raw(), sizeof(ValueType)*blocksize * nnz, cudaMemcpyDeviceToHost ); cudaMemcpy( color, coloring.raw(), sizeof(IndexType)*numRows, cudaMemcpyDeviceToHost ); IndexType *agg = new IndexType[numRows]; if ( check_downwind ) { cudaMemcpy( agg, aggregates.raw(), sizeof(IndexType)*numRows, cudaMemcpyDeviceToHost ); } //count how many nodes have a color IndexType *color_used = new IndexType[numRows]; for (IndexType i = 0; i < numRows; i++) { color_used[i] = 0; } for (IndexType i = 0; i < numRows; i++) { if ( color[i] >= 0 && color[i] < numRows ) { color_used[color[i]]++; } else { std::cout << "color out of range: color[" << i << "] = " << color[i] << std::endl; } } // count violations of these two properties: // 1. locally downwind: for incoming edges (j,i) in same aggregate: color(j) < color(i) // 2. valid coloring: for neighbors j: color(j) != color(i) int violation_1 = 0; int property_1 = 0; int violation_2 = 0; int property_2 = 0; int inner_edges = 0; //note: property 1 cannot be enforeced all the time. Each cycle for example will violate it regardless of the coloring. for (IndexType i = 0; i < numRows; i++) { for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) { IndexType j = ja[ii]; if ( j == i ) { continue; } //check coloring property if ( color[j] == color[i] ) { violation_2++; } property_2++; if ( check_downwind && agg[j] == agg[i] ) { //look for transpose edge to decide outgoing or not bool outgoing = true; for (IndexType jj = ia[j]; jj < ia[j + 1]; jj++) { //found if ( ja[jj] == i ) { ValueType weight = types::util<ValueType>::get_zero(); for (IndexType iii = ii * blocksize; iii < (ii + 1)*blocksize; iii++) { weight = weight + aa[iii] * aa[iii]; } ValueType counter_weight = types::util<ValueType>::get_zero(); for (IndexType jjj = jj * blocksize; jjj < (jj + 1)*blocksize; jjj++) { counter_weight = counter_weight + aa[jjj] * aa[jjj]; } outgoing = types::util<ValueType>::abs(weight) > types::util<ValueType>::abs(counter_weight); break; } } //outgoing -> check downwind property if ( outgoing ) { if ( color[j] <= color[i] ) { violation_1++; } property_1++; } inner_edges++; } } } //tell results if ( check_downwind ) { std::cout << 200 * property_1 / double(inner_edges) << "% of all edges inside an aggregate are directed" << std::endl; if ( property_1 > 0 ) { std::cout << 100 * violation_1 / double(property_1) << "% of all outgoing edges inside an aggregate are not colored downwind" << std::endl; } } std::cout << 100 * violation_2 / double(property_2) << "% of all edges violated coloring property" << std::endl; std::cout << "number of nodes that use this color:" << std::endl; for (IndexType i = 0; i < numRows; i++) if ( color_used[i] > 0 ) { std::cout << i << ": " << color_used[i] << std::endl; } //free! delete [] ia; delete [] ja; delete [] aa; delete [] agg; delete [] color; delete [] color_used; } template<class TConfig> void MatrixColoring<TConfig>::createColorArrays(Matrix<TConfig> &A) { ViewType old = A.currentView(); A.setViewExterior(); int num_rows = A.get_num_rows(); //Disabled since currently we are not doing halo exchanges during colored execution /*typedef TemplateConfig<AMGX_host,AMGX_vecInt,matPrec,indPrec> hvector_type; typedef Vector<hvector_type> HVector; if (!A.is_matrix_singleGPU()) { HVector num_colors(1); std::vector<HVector> partition_num_colors(0); num_colors[0] = m_num_colors; A.manager->getComms()->global_reduce(partition_num_colors, num_colors, A, 6332); int max_partition_colors = 0; for (int i = 0; i < partition_num_colors.size(); i++) max_partition_colors = std::max(partition_num_colors[i][0],max_partition_colors); m_num_colors = max_partition_colors; }*/ if (m_halo_coloring == LAST) { thrust::fill(m_row_colors.begin() + num_rows, m_row_colors.end(), m_num_colors); cudaCheckError(); } IVector offsets_rows_per_color; if (m_offsets_rows_per_color.size() == 0) { // Sort the vertices based o their color m_sorted_rows_by_color.resize(num_rows); // Copy row colors IVector row_colors(m_row_colors); thrust::sequence(m_sorted_rows_by_color.begin(), m_sorted_rows_by_color.end()); thrust::sort_by_key(row_colors.begin(), row_colors.begin() + num_rows, m_sorted_rows_by_color.begin()); cudaCheckError(); // Compute the offset for each color offsets_rows_per_color.resize(m_num_colors + 1); m_offsets_rows_per_color.resize(m_num_colors + 1); // Compute interior-exterior separation for every color m_offsets_rows_per_color_separation.resize(m_num_colors); //m_offsets_rows_per_color_separation_halo.resize(m_num_colors); thrust::lower_bound(row_colors.begin(), row_colors.begin() + num_rows, thrust::counting_iterator<IndexType>(0), thrust::counting_iterator<IndexType>(offsets_rows_per_color.size()), offsets_rows_per_color.begin()); // Copy from device to host m_offsets_rows_per_color = offsets_rows_per_color; cudaCheckError(); } else { m_offsets_rows_per_color_separation.resize(m_num_colors); } cudaCheckError(); if (!A.is_matrix_singleGPU() && (A.getViewExterior() != A.getViewInterior())) { A.setViewInterior(); int separation = A.get_num_rows(); if (TConfig::memSpace == AMGX_host) { for (int i = 0; i < m_num_colors; i++) { m_offsets_rows_per_color_separation[i] = m_offsets_rows_per_color[i] + (thrust::lower_bound(m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i], m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i + 1], separation) - (m_sorted_rows_by_color.begin() + m_offsets_rows_per_color[i])); } cudaCheckError(); } // this is not a proper search, rather we look at every single element. But it is still a lot faster than the above (~10*) else { IVector separation_offsets_rows_per_color(m_num_colors); int size = num_rows; int num_blocks = min(4096, (size + 123) / 124); findSeparation <<< num_blocks, 128>>>(m_sorted_rows_by_color.raw(), offsets_rows_per_color.raw(), separation_offsets_rows_per_color.raw(), separation, m_num_colors, num_rows); thrust::copy(separation_offsets_rows_per_color.begin(), separation_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); for (int i = 0; i < m_num_colors; i++) { if (this->m_offsets_rows_per_color[i] == this->m_offsets_rows_per_color[i + 1]) { this->m_offsets_rows_per_color_separation[i] = this->m_offsets_rows_per_color[i + 1]; } } } } else { thrust::copy(m_offsets_rows_per_color.begin() + 1, m_offsets_rows_per_color.end(), m_offsets_rows_per_color_separation.begin()); cudaCheckError(); } A.setView(old); } template<class TConfig> std::map<std::string, MatrixColoringFactory<TConfig>*> & MatrixColoringFactory<TConfig>::getFactories( ) { static std::map<std::string, MatrixColoringFactory<TConfig> *> s_factories; return s_factories; } template<class TConfig> void MatrixColoringFactory<TConfig>::registerFactory(string name, MatrixColoringFactory<TConfig> *f) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it != factories.end()) { string error = "MatrixColoringFactory '" + name + "' has already been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } factories[name] = f; } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactory(std::string name) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename std::map<std::string, MatrixColoringFactory<TConfig> *>::iterator it = factories.find(name); if (it == factories.end()) { std::string error = "MatrixColoringFactory '" + name + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); delete factory; factories.erase(it); } template<class TConfig> void MatrixColoringFactory<TConfig>::unregisterFactories( ) { std::map<std::string, MatrixColoringFactory<TConfig>*> &factories = getFactories( ); typename map<string, MatrixColoringFactory<TConfig> *>::iterator it = factories.begin( ); for ( ; it != factories.end( ) ; ) { MatrixColoringFactory<TConfig> *factory = it->second; assert( factory != NULL ); it++; delete factory; } factories.clear( ); } template<class TConfig> MatrixColoring<TConfig> *MatrixColoringFactory<TConfig>::allocate(AMG_Config &cfg, const std::string &cfg_scope) { std::map<std::string, MatrixColoringFactory<TConfig> *> &factories = getFactories( ); string matrix_coloring_scheme = cfg.getParameter<string>("matrix_coloring_scheme", cfg_scope); typename map<string, MatrixColoringFactory<TConfig> *>::const_iterator it = factories.find(matrix_coloring_scheme); if (it == factories.end()) { string error = "MatrixColoringFactory '" + matrix_coloring_scheme + "' has not been registered\n"; FatalError(error.c_str(), AMGX_ERR_CORE); } return it->second->create(cfg, cfg_scope); }; /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MatrixColoring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MatrixColoringFactory<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE }
the_stack
#include <THC/THC.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> template <typename scalar_t> __device__ __forceinline__ int is_include( scalar_t token, const scalar_t* __restrict__ subset, const size_t size) { for (size_t i = 0; i < size; ++i) { if (token == subset[i]) { return true; } } return false; } template <typename scalar_t> __device__ __forceinline__ int tokens_length( const scalar_t* __restrict__ first, const scalar_t* __restrict__ last, const scalar_t* __restrict__ separator, const size_t separator_size, const scalar_t* __restrict__ blank, const size_t blank_size) { int length = 0; bool prev_separator = true; auto iter = (scalar_t*)first; while (iter < last) { if (is_include(*iter, blank, blank_size)) { ++iter; continue; } if (separator_size == 0) { ++length; } else { bool curr_separator = is_include(*iter, separator, separator_size); if (!curr_separator && prev_separator) { ++length; } prev_separator = curr_separator; } ++iter; } return length; } template <typename scalar_t> __device__ __forceinline__ scalar_t* first_token( const scalar_t* __restrict__ first, const scalar_t* __restrict__ last, const scalar_t* __restrict__ separator, const size_t separator_size, const scalar_t* __restrict__ blank, const size_t blank_size) { auto iter = (scalar_t*)first; while (iter < last) { if (is_include(*iter, blank, blank_size)) { ++iter; continue; } if (separator_size == 0) { break; } if (!is_include(*iter, separator, separator_size)) { break; } ++iter; } return iter; } template <typename scalar_t> __device__ __forceinline__ bool compare_tokens( const scalar_t* __restrict__ a, const scalar_t* __restrict__ b, const scalar_t* __restrict__ c, const scalar_t* __restrict__ d, const scalar_t* __restrict__ separator, const size_t separator_size, const scalar_t* __restrict__ blank, const size_t blank_size) { auto iter1 = (scalar_t*)a; auto iter2 = (scalar_t*)b; while (true) { while (iter1 < c && is_include(*iter1, blank, blank_size)) { ++iter1; } while (iter2 < d && is_include(*iter2, blank, blank_size)) { ++iter2; } if (iter1 == c && iter2 == d) { return true; } if (iter1 == c && iter2 < d) { if (separator_size == 0) { return false; } return is_include(*iter2, separator, separator_size); } if (iter2 == d && iter1 < c) { if (separator_size == 0) { return false; } return is_include(*iter1, separator, separator_size); } if (separator_size == 0) { return *iter1 == *iter2; } if (is_include(*iter1, separator, separator_size) && is_include(*iter2, separator, separator_size)) { return true; } if (*iter1 != *iter2) { return false; } ++iter1; ++iter2; } } template <typename scalar_t> __device__ __forceinline__ scalar_t* next_token( const scalar_t* __restrict__ first, const scalar_t* __restrict__ last, const scalar_t* __restrict__ separator, const size_t separator_size, const scalar_t* __restrict__ blank, const size_t blank_size) { auto iter = (scalar_t*)first; while (iter < last) { if (is_include(*iter, blank, blank_size)) { ++iter; continue; } if (separator_size == 0) { return ++iter; } if (is_include(*iter, separator, separator_size)) { break; } ++iter; } return first_token(iter, last, separator, separator_size, blank, blank_size); } template <typename scalar_t> __global__ void levenshtein_distance_kernel( const scalar_t* __restrict__ source, const scalar_t* __restrict__ target, const int* __restrict__ source_length, const int* __restrict__ target_length, const size_t source_size, const size_t target_size, const scalar_t* __restrict__ separator, const size_t separator_size, const scalar_t* __restrict__ blank, const size_t blank_size, int* __restrict__ operations) { extern __shared__ short errors[]; const int i = blockIdx.x; auto errors_prev = errors; auto errors_curr = errors + (target_size + 1) * 4; const scalar_t* hyp_begin = source + i * source_size; const scalar_t* ref_begin = target + i * target_size; const scalar_t* hyp_end = hyp_begin + source_length[i]; const scalar_t* ref_end = ref_begin + target_length[i]; int hyp_size = tokens_length(hyp_begin, hyp_end, separator, separator_size, blank, blank_size); int ref_size = tokens_length(ref_begin, ref_end, separator, separator_size, blank, blank_size); for (int r = 0; r <= ref_size; ++r) { errors_prev[r*4+0] = 0; // ins_num errors_prev[r*4+1] = r; // del_num errors_prev[r*4+2] = 0; // sub_num errors_prev[r*4+3] = r; // total_cost } auto hyp = first_token(hyp_begin, hyp_end, separator, separator_size, blank, blank_size); for (int h = 1; h <= hyp_size; ++h) { errors_curr[0] = errors_prev[0] + 1; // ins_num errors_curr[1] = errors_prev[1]; // del_num errors_curr[2] = errors_prev[2]; // sub_num errors_curr[3] = errors_prev[3] + 1; // total_cost auto ref = first_token(ref_begin, ref_end, separator, separator_size, blank, blank_size); for (int r = 1; r <= ref_size; ++r) { int r4 = r * 4; int p4 = r4 - 4; int ins_err = errors_prev[r4+3] + 1; int del_err = errors_curr[p4+3] + 1; int sub_err = errors_prev[p4+3]; int d; if (compare_tokens(hyp, ref, hyp_end, ref_end, separator, separator_size, blank, blank_size)) { d = 0; } else { d = 1; sub_err++; } if (sub_err < ins_err && sub_err < del_err) { errors_curr[r4+0] = errors_prev[p4+0]; // ins_num errors_curr[r4+1] = errors_prev[p4+1]; // del_num errors_curr[r4+2] = errors_prev[p4+2] + d; // sub_num errors_curr[r4+3] = sub_err; // total_cost } else if (del_err < ins_err) { errors_curr[r4+0] = errors_curr[p4+0]; // ins_num errors_curr[r4+1] = errors_curr[p4+1] + 1; // del_num errors_curr[r4+2] = errors_curr[p4+2]; // sub_num errors_curr[r4+3] = del_err; // total_cost } else { errors_curr[r4+0] = errors_prev[r4+0] + 1; // ins_num errors_curr[r4+1] = errors_prev[r4+1]; // del_num errors_curr[r4+2] = errors_prev[r4+2]; // sub_num errors_curr[r4+3] = ins_err; // total_cost } ref = next_token(ref, ref_end, separator, separator_size, blank, blank_size); } // alternate for the next recursion short* temp = errors_prev; errors_prev = errors_curr; errors_curr = temp; hyp = next_token(hyp, hyp_end, separator, separator_size, blank, blank_size); } operations[i*4+0] = errors_prev[ref_size*4+0]; // ins operations[i*4+1] = errors_prev[ref_size*4+1]; // del operations[i*4+2] = errors_prev[ref_size*4+2]; // sub operations[i*4+3] = ref_size; } template <typename scalar_t> __global__ void collapse_repeated_kernel( scalar_t* __restrict__ source, int* __restrict__ length, const size_t size) { const int i = threadIdx.x; if (length[i] <= 0) { return; } const scalar_t* iter = source + i * size; const scalar_t* last = iter + length[i]; auto target = (scalar_t*)iter; int n = 1; ++iter; while (iter < last) { if (*iter == *target) { ++iter; continue; } ++target; *target = *iter; ++iter; ++n; } length[i] = n; } template <typename scalar_t> __global__ void remove_blank_kernel( scalar_t* __restrict__ source, int* __restrict__ length, const size_t size, const scalar_t* __restrict__ blank, const size_t blank_size) { const int i = threadIdx.x; if (length[i] <= 0) { return; } const scalar_t* iter = source + i * size; const scalar_t* last = iter + length[i]; auto target = (scalar_t*)iter; int n = 0; while (iter < last) { if (is_include(*iter, blank, blank_size)) { ++iter; continue; } *target = *iter; ++target; ++iter; ++n; } length[i] = n; } template <typename scalar_t> __global__ void strip_separator_kernel( scalar_t* __restrict__ source, int* __restrict__ length, const size_t size, const scalar_t* __restrict__ separator, const size_t separator_size) { const int i = threadIdx.x; if (length[i] <= 0) { return; } const scalar_t* iter = source + i * size; const scalar_t* last = iter + length[i]; auto target = (scalar_t*)iter; int n = 0; int p = 0; while (iter < last) { if (is_include(*iter, separator, separator_size)) { if (n == p) { ++iter; continue; } p = n + 1; } *target = *iter; ++target; ++iter; ++n; } if (n > 0 && n == p) { --n; } length[i] = n; } void CollapseRepeatedCuda( torch::Tensor source, torch::Tensor length) { const auto batch_size = source.size(0); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "collapse_repeated", ([&] { collapse_repeated_kernel<scalar_t><<<1, batch_size, 0, stream>>>( source.data<scalar_t>(), length.data<int>(), source.size(1)); })); } void RemoveBlankCuda( torch::Tensor source, torch::Tensor length, torch::Tensor blank) { const auto batch_size = source.size(0); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "remove_blank", ([&] { remove_blank_kernel<scalar_t><<<1, batch_size, 0, stream>>>( source.data<scalar_t>(), length.data<int>(), source.size(1), blank.data<scalar_t>(), blank.ndimension() * blank.numel()); })); } void StripSeparatorCuda( torch::Tensor source, torch::Tensor length, torch::Tensor separator) { const auto batch_size = source.size(0); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "strip_separator", ([&] { strip_separator_kernel<scalar_t><<<1, batch_size, 0, stream>>>( source.data<scalar_t>(), length.data<int>(), source.size(1), separator.data<scalar_t>(), separator.ndimension() * separator.numel()); })); } torch::Tensor LevenshteinDistanceCuda( torch::Tensor source, torch::Tensor target, torch::Tensor source_length, torch::Tensor target_length, torch::Tensor blank, torch::Tensor separator) { const auto batch_size = source.size(0); const auto shared_size = (target.size(1) + 1) * 4 * 2 * sizeof(short); at::TensorOptions options(source.device()); options = options.dtype(at::ScalarType::Int); auto operations = torch::empty({batch_size, 4}, options); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "levenshtein_distance", ([&] { levenshtein_distance_kernel<scalar_t><<<batch_size, 1, shared_size, stream>>>( source.data<scalar_t>(), target.data<scalar_t>(), source_length.data<int>(), target_length.data<int>(), source.size(1), target.size(1), separator.data<scalar_t>(), separator.ndimension() * separator.numel(), blank.data<scalar_t>(), blank.ndimension() * blank.numel(), operations.data<int>()); })); return operations; }
the_stack
#include <cuda.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <cuda_runtime.h> //#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/wmma_gemm_traits.h" namespace { cublasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return CUBLAS_OP_T; else if (trans == 'n') return CUBLAS_OP_N; else if (trans == 'c') return CUBLAS_OP_C; else { AT_ERROR("trans must be one of: t, n, c"); return CUBLAS_OP_T; } } void CublasStridedBatchedGemm( char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount, cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); float fAlpha = alpha; float fBeta = beta; // THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx( handle, opa, opb, (int)m, (int)n, (int)k, (void *)&fAlpha, a, CUDA_R_16F, (int)lda, strideA, b, CUDA_R_16F, (int)ldb, strideB, (void *)&fBeta, c, CUDA_R_16F, (int)ldc, strideC, (int)batchCount, CUDA_R_32F, algo)); // THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } } // namespace template <cutlass::MatrixLayout::Kind A_LAYOUT, cutlass::MatrixLayout::Kind B_LAYOUT, int SRC_A, int SRC_B, int DST_C> void CutlassGemm_FP32Accum(cudaStream_t stream, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { // printf("CUTLASS-> %c%c M: %ld N: %ld K: %ld %d%d%d LDA: %ld LDB: %ld LDC: // %ld strideA: %ld strideB: %ld strideC: %ld Alpha: %f Beta: %f\n", // ((int)A_LAYOUT == 0 ? 'T' : 'N'), ((int)B_LAYOUT ==0 ? 'T' : 'N'), m, n, k, // SRC_A,SRC_B,DST_C, lda, ldb, ldc, strideA, strideB, strideC, alpha, beta); typedef cutlass::gemm::WmmaGemmTraits< A_LAYOUT, B_LAYOUT, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<float>, float, typename cutlass::gemm::WmmaGemmAccumulatorsPerWarp< typename cutlass::Shape<32, 16, 16>>::Shape, typename cutlass::Shape<16, 16, 16>, SRC_A, // kScalarsPerLdgA_ SRC_B, // kScalarsPerLdgB_ SRC_A, // KScalarsPerLdsA_ SRC_B, // KScalarsPerLdsB_ DST_C, // kScalarsPerLdgCAndStgD_ DST_C / 2, // kScalarsPerStsD_ DST_C / 2 // kScalarsPerLdsD_ > WmmaGemmTraits; typedef cutlass::gemm::Gemm<WmmaGemmTraits> Gemm; typename Gemm::Params params; int result = params.initialize( m, // M dimension for each batch n, // N dimension for each batch k, // K dimension for each batch alpha, // scalar alpha a, lda, strideA, // distance in memory between the first element of neighboring // batch b, ldb, strideB, // distance in memory between the first element of neighboring // batch beta, // scalar beta c, // source matrix C ldc, strideC, // distance in memory between the first element of neighboring // batch c, // destination matrix C (may be different memory than source C matrix) ldc, strideC, // distance in memory between the first element of neighboring // batch batchCount); AT_ASSERTM(result == 0, "Failed to initialize CUTLASS Gemm::Params object."); // batchCount in cutlass batched GEMM kernels maps to gridDim.z, which is // limited to 16 bits. To implement batched GEMM with larger batch size, we // fragment it into smaller batched GEMMs of gridDim.z <= 64k long batchesLeft = batchCount; long iterBatchCount = std::min(batchesLeft, static_cast<long>((1 << 16) - 1)); do { // printf("CUTLASS-> %c%c M: %ld N: %ld K: %ld %d%d%d LDA: %ld LDB: %ld LDC: // %ld strideA: %ld strideB: %ld strideC: %ld Alpha: %f Beta: %f // TotalBatches: %ld iterBatchCount %ld\n", ((int)A_LAYOUT == 0 ? 'T' : 'N'), // ((int)B_LAYOUT ==0 ? 'T' : 'N'), m, n, k, SRC_A,SRC_B,DST_C, lda, ldb, // ldc, strideA, strideB, strideC, alpha, beta, batchesLeft, iterBatchCount); int result = params.initialize(m, // M dimension for each batch n, // N dimension for each batch k, // K dimension for each batch alpha, // scalar alpha a, lda, strideA, // distance in memory between the first // element of neighboring batch b, ldb, strideB, // distance in memory between the first // element of neighboring batch beta, // scalar beta c, // source matrix C ldc, strideC, // distance in memory between the first // element of neighboring batch c, // destination matrix C (may be different memory // than source C matrix) ldc, strideC, // distance in memory between the first // element of neighboring batch iterBatchCount); AT_ASSERTM(result == 0, "Failed to initialize CUTLASS Gemm::Params object."); // Launch the CUTLASS GEMM kernel. C10_CUDA_CHECK(Gemm::launch(params, stream)); // Update batched GEMM params based on completed work batchesLeft = batchesLeft - iterBatchCount; a += iterBatchCount * strideA; b += iterBatchCount * strideB; c += iterBatchCount * strideC; ; iterBatchCount = std::min(batchesLeft, static_cast<long>((1 << 16) - 1)); } while (batchesLeft > 0); } namespace { void gemm_switch_fp32accum(char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { auto stream = c10::cuda::getCurrentCUDAStream(); // printf("GEMM -> %c%c M: %i N: %i K: %i Alpha: %f Beta: %f\n", (transa == // 't' ? 'T' : 'N'), (transb =='t' ? 'T' : 'N'), m, n, k, alpha, beta); if ((transa == 't') && (transb == 'n')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else if ((transa == 'n') && (transb == 'n')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else if ((transa == 'n') && (transb == 't')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else { AT_ASSERTM(false, "TransA and TransB are invalid"); } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at // least as big the result requires (even if the value won't be used). if (n <= 1) *ldc = std::max<int64_t>(m, 1); if (transa_) { if (m <= 1) *lda = std::max<int64_t>(k, 1); } else { if (k <= 1) *lda = std::max<int64_t>(m, 1); } if (transb_) { if (k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if (n <= 1) *ldb = std::max<int64_t>(k, 1); } } void HgemmStridedBatched(char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { if ((m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX)) { AT_ERROR("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, " "batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); gemm_switch_fp32accum(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } // namespace
the_stack
#define FIT_COPYDRIFT 0 #define NUM_TIME_VARYING_PARAMS 3 #define DEBUG_REG_FITTING 0 #define TMIDNUC_REG_STEP 0.01f #define RDR_REG_STEP 0.01f #define PDR_REG_STEP 0.001f #define REGLEVMARMAT_LHS_LEN (((NUM_TIME_VARYING_PARAMS)*(NUM_TIME_VARYING_PARAMS + 1)) / 2) #define REGLEVMARMAT_RHS_LEN (NUM_TIME_VARYING_PARAMS) #define LEVMARITERS 4 #define REG_FIT_SM_ACCUM_BUFFERSIZE 256 template<typename T, const size_t len = 6> struct OneDimVec { T val[len]; __device__ OneDimVec() { for (size_t i=0; i<len; ++i) val[i] = 0; } __device__ __inline__ void clear() { for (size_t i=0; i<len; ++i) val[i] = 0; } }; template<typename T> __device__ double CalculateDotProduct( T *src1, T *src2, int len) { double sum = 0; for (int i=0; i<len; ++i) { sum += src1[i]*src2[i]; } return sum; } __device__ float CalculateTmidNucShiftFitResidual( float *obsTrace, float *modelTrace, float *err, int num_frames ) { float residual = 0; for (int i=0; i<num_frames; ++i) { float e = obsTrace[i] - modelTrace[i]; residual += e*e; err[i] = e; } return (residual / (float)(num_frames)); } __device__ void BkgCorrectedRawTrace( const float *bkgTrace, const short *rawTrace, const float *beadParamCube, const float *regionFrameCube, const float *deltaFrames, const float darkness, const float etbR, const float gain, const float tauB, const int num_frames, const int beadFrameStride, const int regionFrameStride, float *correctedTrace ) { float R = etbR - 1.0f; float dv = 0.0f; float dv_rs = 0.0f; float dvn = 0.0f; float aval; float curSbgVal, deltaFrameVal; for (int i=0; i<num_frames; ++i) { deltaFrameVal = LDG_ACCESS(deltaFrames, i); curSbgVal = LDG_ACCESS(bkgTrace, i); aval = deltaFrameVal/(2.0f * tauB); dvn = (R*curSbgVal - dv_rs/tauB - dv*aval) / (1.0f + aval); dv_rs += (dv+dvn) * deltaFrameVal * 0.5f; dv = dvn; correctedTrace[i] = (float)(*rawTrace) - ((dv+curSbgVal)*gain + ApplyDarkMatterToFrame( beadParamCube, regionFrameCube, darkness, i, num_frames, beadFrameStride, regionFrameStride)); rawTrace += beadFrameStride; } } __device__ const float* setAdaptiveEmphasis( float ampl, const float *emphasis, int emphVecLen, int emphMax) { int emAmp = (int)ampl; return ((emAmp > emphMax) ? emphasis + emphVecLen*emphMax : emphasis + emphVecLen*emAmp); } __device__ float CalculateBeadResidualError( float *observedTrace, float *modelTrace, const float *emphasis, int num_frames) { float res = 0; for (int frm=0; frm < num_frames; ++frm) { float frm_res = (observedTrace[frm] - modelTrace[frm])*emphasis[frm]; res += (frm_res * frm_res); } return res; } __device__ float CalculateBeadResidualError( const short *observedTrace, const float *modelTrace, const float *emphasis, const size_t obsTrStride, const size_t num_frames) { float res = 0; for (size_t frm=0; frm < num_frames; ++frm) { float frm_res = ((float)(*observedTrace) - modelTrace[frm])*emphasis[frm]; res += (frm_res * frm_res); observedTrace += obsTrStride; } return res; } __device__ void CalculateYerr( float *pd, const short *obsTrace, const float *oldTrace, const float *emphForFitting, const float stepSize, const size_t stride, const size_t frames) { for (size_t frm=0; frm<frames; ++frm) { pd[frm] = (((float)(*obsTrace) - oldTrace[frm])*emphForFitting[frm]) / stepSize; obsTrace += stride; } } __device__ void CalculatePartialDerivative( float *pd, const float *newTrace, const float *oldTrace, const float *emphForFitting, const float stepSize, const size_t frames) { if (emphForFitting) { for (size_t frm=0; frm<frames; ++frm) pd[frm] = ((newTrace[frm] - oldTrace[frm])*emphForFitting[frm]) / stepSize; } else { for (size_t frm=0; frm<frames; ++frm) pd[frm] = (newTrace[frm] - oldTrace[frm]) / stepSize; } } __device__ void AccumulatePartialDerivatives( const float *pd, float *smReadBuffer, float *smWriteBuffer, const int samples, const int frames) { for (int j=0; j<frames; ++j) { smReadBuffer[threadIdx.x] = pd[j]; // not coalsced..need to write to global mem cubes __syncthreads(); SimplestReductionAndAverage(smReadBuffer, samples, false); if (threadIdx.x == 0) smWriteBuffer[j] = smReadBuffer[0]; // reduce one syncthread here __syncthreads(); } } __device__ void FitTmidNucShiftPerFlow( const int realFnum, const float ampl, const short *obsTrace, const float *beadParamCube, const unsigned short *beadStateCube, const float *emptyTraceAvg, const float *emphasisVec, const PerNucParamsRegion *perNucRegP, const ConstantParamsRegion *constRegP, const float *regionFrameCube, const size_t beadFrameStride, const size_t regionFrameStride, const size_t num_frames, const size_t samples, PerFlowParamsRegion *perFlowRegP) { __shared__ float smAvgCopies[512]; __shared__ float smAvgR[512]; __shared__ int smOnemerCount[512]; float correctedTrace[MAX_COMPRESSED_FRAMES_GPU]; // right now getting bead params in the order they were in bead_params struct const float copies = *(beadParamCube + BpCopies*beadFrameStride); const float R = *(beadParamCube + BpR*beadFrameStride); const float gain = *(beadParamCube + BpGain*beadFrameStride); // calculate empty to bead ratio, buffering and copies const float C = perNucRegP->getC(); const float nuc_flow_span = ConstGlobalP.getNucFlowSpan(); const float sigma = ComputeSigma(perFlowRegP, perNucRegP); float tmidNuc = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP); float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies, realFnum); float tauB = ComputeTauB(constRegP, etbR); // Need shifted background const float* deltaFrames = regionFrameCube + RfDeltaFrames*regionFrameStride; const float* frameNumber = regionFrameCube + RfFrameNumber*regionFrameStride; BkgCorrectedRawTrace( emptyTraceAvg, obsTrace, beadParamCube, regionFrameCube, deltaFrames, perFlowRegP->getDarkness(), etbR, gain, tauB, num_frames, beadFrameStride, regionFrameStride, correctedTrace); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("Unaverage corrected trace\n"); for (int i=0; i<num_frames; ++i) { printf(" %f", correctedTrace[i]); } printf("Unaverage raw trace\n"); for (int i=0; i<num_frames; ++i) { printf(" %f", (float)(*(obsTrace + i*beadFrameStride))); } printf("Empty trace\n"); for (int i=0; i<num_frames; ++i) { printf(" %f", emptyTraceAvg[i]); } printf("\n"); } __syncthreads(); #endif // calculate avergae bead params and average coorected trace bool accum = false; if (ampl > 0.5f && ampl < 1.5f) { accum = true; smOnemerCount[threadIdx.x] = 1; smAvgCopies[threadIdx.x] = copies; smAvgR[threadIdx.x] = R; } else { smOnemerCount[threadIdx.x] = 0; smAvgCopies[threadIdx.x] = 0; smAvgR[threadIdx.x] = 0; } __syncthreads(); SimplestReductionAndAverage(smOnemerCount, samples, false); SimplestReductionAndAverage(smAvgCopies, samples, false); SimplestReductionAndAverage(smAvgR, samples, false); int oneMerCount = smOnemerCount[0]; float avgCopies = smAvgCopies[0] / oneMerCount; float avgR = smAvgR[0] / oneMerCount; float *smFrameAvg = &smAvgR[0]; for (size_t i =0; i< num_frames; ++i) { if (accum) smFrameAvg[threadIdx.x] = correctedTrace[i]; else smFrameAvg[threadIdx.x] = 0; SimplestReductionAndAverage(smFrameAvg, samples, false); correctedTrace[i] = smFrameAvg[0] / oneMerCount; } if (threadIdx.x == 0) { #if DEBUG_REG_FITTING printf("===> count:%d, copies:%f, R:%f \n", oneMerCount, avgCopies,avgR); printf("====> Tmid nuc corrected trace\n"); for (size_t i =0; i< num_frames; ++i) printf("%f ", correctedTrace[i]); printf("\n"); #endif // corrected trace is average trace now // perform lev mar on this average 1-mer bead float fineNucRise[ISIG_SUB_STEPS_SINGLE_FLOW * MAX_COMPRESSED_FRAMES_GPU]; float modelTrace[MAX_COMPRESSED_FRAMES_GPU]; float err[MAX_COMPRESSED_FRAMES_GPU]; // using average dmult of 1 const float d = 1.0f * perNucRegP->getD(); // effective diffusion float avgAmpl = 1.0f; float avgKmult = 1.0f; float deltaTmidNuc = 0.0f; // fine nuc trace int nucStart = CalculateNucRise( tmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_SINGLE_FLOW, fineNucRise); etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), avgR, avgCopies, realFnum); tauB = ComputeTauB(constRegP, etbR); float SP = ComputeSP(perFlowRegP->getCopyDrift(), avgCopies, realFnum); #if DEBUG_REG_FITTING printf("====> etbr:%f, taub:%f, SP:%f tmidNuc:%f \n", etbR, tauB, SP, tmidNuc); #endif // calculate model trace (red trace) BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, fineNucRise, avgAmpl, avgKmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, ISIG_SUB_STEPS_SINGLE_FLOW * nucStart, modelTrace, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); // calculate residual float oldResidual, newResidual; oldResidual = CalculateTmidNucShiftFitResidual( correctedTrace, modelTrace, err, num_frames); // run lev mar iterations const float amplMin = 0.5f; const float amplMax = 1.5f; const float delta_tmid_nuc_min = -3.0f; const float delta_tmid_nuc_max = 3.0f; const float kmultMin = 0.2f; const float kmultMax = 2.0f; const int maxIters = 100; const float lambda_max = 1E+10; float delta0 = 0, delta1 = 0, delta2 = 0; int done = 0; float lambda = 1; float pdA[MAX_COMPRESSED_FRAMES_GPU]; float pdKmult[MAX_COMPRESSED_FRAMES_GPU]; float pdDeltaTmidNuc[MAX_COMPRESSED_FRAMES_GPU]; float newA, newKmult, newDeltaTmidNuc; for (int iter=0; iter<maxIters; ++iter) { if (delta0*delta0 < 0.0000025f) done++; else done = 0; if (done >=5) break; // calculate partial derivatives using pertubed parameters newA = avgAmpl + 0.001f; newKmult = avgKmult + 0.001f; newDeltaTmidNuc = deltaTmidNuc + 0.001f; // partial derivative w.r.t A BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, fineNucRise, newA, avgKmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, ISIG_SUB_STEPS_SINGLE_FLOW * nucStart, pdA, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); CalculatePartialDerivative( pdA, pdA, modelTrace, NULL, 0.001f, num_frames); // partial derivative w.r.t kmult BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, fineNucRise, avgAmpl, newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, ISIG_SUB_STEPS_SINGLE_FLOW * nucStart, pdKmult, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); CalculatePartialDerivative( pdKmult, pdKmult, modelTrace, NULL, 0.001f, num_frames); // partial derivative w.r.t deltaTmidNuc nucStart = CalculateNucRise( tmidNuc + newDeltaTmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_SINGLE_FLOW, fineNucRise); BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, fineNucRise, avgAmpl, avgKmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, ISIG_SUB_STEPS_SINGLE_FLOW * nucStart, pdDeltaTmidNuc, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); CalculatePartialDerivative( pdDeltaTmidNuc, pdDeltaTmidNuc, modelTrace, NULL, 0.001f, num_frames); // jacobian matrix members float lhs_00=0, lhs_01=0, lhs_02=0, lhs_11=0, lhs_12=0, lhs_22=0; float rhs_0=0, rhs_1=0, rhs_2=0, det; // calculate jtj matrix entries for (int i=0; i<num_frames; ++i) { lhs_00 += pdA[i]*pdA[i]; lhs_01 += pdA[i]*pdKmult[i]; lhs_02 += pdA[i]*pdDeltaTmidNuc[i]; lhs_22 += pdDeltaTmidNuc[i]*pdDeltaTmidNuc[i]; lhs_12 += pdKmult[i]*pdDeltaTmidNuc[i]; lhs_11 += pdKmult[i]*pdKmult[i]; rhs_0 += pdA[i]*err[i]; rhs_1 += pdKmult[i]*err[i]; rhs_2 += pdDeltaTmidNuc[i]*err[i]; } // Solve bool cont_proc = false; while (!cont_proc) { float new_lhs00 = lhs_00 * (1.0f + lambda); float new_lhs11 = lhs_11 * (1.0f + lambda); float new_lhs22 = lhs_22 * (1.0f + lambda); // calculate determinant det = new_lhs00*(new_lhs11*new_lhs22 - lhs_12*lhs_12) - lhs_01*(lhs_01*new_lhs22 - lhs_12*lhs_02) + lhs_02*(lhs_01*lhs_12 - new_lhs11*lhs_02); det = 1.0f/det; //if (bead_ndx == 0) // printf("lhs00:%.2f lhs01: %.2f lhs02:%.2f lhs11:%.2f lhs12:%.2f lhs22:%.2f rhs0:%.2f rhs1:%.2f rhs2:%.2f, det:%.2f\n", lhs_00,lhs_01,lhs_02,lhs_11,lhs_12,lhs_22,rhs_0,rhs_1,rhs_2,det); delta0 = det*(rhs_0*(new_lhs11*new_lhs22 - lhs_12*lhs_12) + rhs_1*(lhs_02*lhs_12 - lhs_01*new_lhs22) + rhs_2*(lhs_01*lhs_12 - lhs_02*new_lhs11)); delta1 = det*(rhs_0*(lhs_12*lhs_02 - lhs_01*new_lhs22) + rhs_1*(new_lhs00*new_lhs22 - lhs_02*lhs_02) + rhs_2*(lhs_01*lhs_02 - new_lhs00*lhs_12)); delta2 = det*(rhs_0*(lhs_01*lhs_12 - lhs_02*new_lhs11) + rhs_1*(lhs_01*lhs_02 - new_lhs00*lhs_12) + rhs_2*(new_lhs00*new_lhs11 - lhs_01*lhs_01)); // NAN check bool nan_detected = true; //if (bead_ndx == 0) // printf("delta0: %.2f delta1: %.2f delta2: %.2f\n", delta0, delta1, delta2); if (!::isnan(delta0) && !::isnan(delta1) && !::isnan(delta2)) { newA = avgAmpl + delta0; newKmult = avgKmult + delta1; newDeltaTmidNuc = deltaTmidNuc + delta2; clampT(newA, amplMin, amplMax); clampT(newKmult, kmultMin, kmultMax); clampT(newDeltaTmidNuc, delta_tmid_nuc_min, delta_tmid_nuc_max); #if DEBUG_REG_FITTING printf("Not NAN newA:%f, newKmult:%f, newDeltaTmidNuc:%f, iter:%d, lambda:%f, delta0:%f, delta1:%f, delta2:%f\n",newA, newKmult, newDeltaTmidNuc, iter, lambda, delta0, delta1, delta2); #endif nucStart = CalculateNucRise( tmidNuc + newDeltaTmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_SINGLE_FLOW, fineNucRise); BkgModelRedTraceCalculation( constRegP, perNucRegP, nucStart, fineNucRise, newA, newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, constRegP->getSens()*SENSMULTIPLIER, ISIG_SUB_STEPS_SINGLE_FLOW * nucStart, modelTrace, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); newResidual = CalculateTmidNucShiftFitResidual( correctedTrace, modelTrace, err, num_frames); nan_detected = false; } if (!nan_detected && newResidual < oldResidual) { lambda /= 10.0f; if (lambda < FLT_MIN) lambda = FLT_MIN; avgAmpl = newA; avgKmult = newKmult; deltaTmidNuc = newDeltaTmidNuc; #if DEBUG_REG_FITTING printf("iter:%d, avgAmpl:%f, avgKmult:%f, deltaTmidNuc:%f, oldResidual:%f, newResidual:%f\n",iter, avgAmpl, avgKmult, deltaTmidNuc, oldResidual, newResidual); #endif //if (bead_ndx == 0) // printf("===> iter: %d Tau: %.2f residual: %.2f newresidual: %.2f\n", iter, taub, residual, newresidual); oldResidual = newResidual; cont_proc = true; } else { lambda *= 10.0f; } if (lambda > lambda_max) cont_proc = true; } if (lambda > lambda_max) break; } // update the tmidnuc shift #if DEBUG_REG_FITTING printf("===> Fitted Tmidnuc Shift : %f, avgAmpl: %f, avgKmult: %f\n", deltaTmidNuc); #endif perFlowRegP->setTMidNucShift(deltaTmidNuc); } __syncthreads(); } // TODO Need to apply nonzero emphasis frames optimization // TODO transpose emphasis // TODO dense layout should benefit here with lots of reductions __device__ void SingleFlowRegionalLevMarFit( float *scratchSpace, const short *observedTrace, // NUM_SAMPLES_RF x F const float *BeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == beadFrameStride const unsigned short *BeadStateCube, //key_norm, ppf, ssq const float *emphasisVec, //(MAX_POISSON_TABLE_COL)*F const int *nonZeroEmphFrames, float *nucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F const ConstantParamsRegion *constRegP, PerFlowParamsRegion *perFlowRegP, const PerNucParamsRegion *perNucRegP, const float *RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float *EmptyTraceAvg, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const size_t beadFrameStride, //stride from one CUBE plane to the next for the Per Well Cubes const size_t regionFrameStride, //, //stride in Region Frame Cube to get to next parameter const size_t num_frames, // 4 const size_t samples) { __shared__ double smBuffer[REG_FIT_SM_ACCUM_BUFFERSIZE]; __shared__ float smNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ float tmpNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ double deltas[3]; // NUM_PARAMS=3 __shared__ bool cont_lambda_itr; __shared__ bool nan_detected; __shared__ bool solved; // zero out shared memory for reductions for (size_t i=threadIdx.x; i<REG_FIT_SM_ACCUM_BUFFERSIZE; i+=blockDim.x) { smBuffer[i] = 0; } if (threadIdx.x == 0) { perFlowRegP->setTMidNucShift(0); } __syncthreads(); float correctedTrace[MAX_COMPRESSED_FRAMES_GPU]; float obsTrace[MAX_COMPRESSED_FRAMES_GPU]; // raw traces being written to float tmpTrace[MAX_COMPRESSED_FRAMES_GPU]; // raw traces being written to float purpleTrace[MAX_COMPRESSED_FRAMES_GPU]; float pdTmidNuc[MAX_COMPRESSED_FRAMES_GPU]; float pdRDR[MAX_COMPRESSED_FRAMES_GPU]; float pdPDR[MAX_COMPRESSED_FRAMES_GPU]; float yerr[MAX_COMPRESSED_FRAMES_GPU]; // right now getting bead params in the order they were in bead_params struct const float copies = *(BeadParamCube + BpCopies*beadFrameStride); const float R = *(BeadParamCube + BpR*beadFrameStride); const float d = (*(BeadParamCube + BpDmult*beadFrameStride)) * perNucRegP->getD(); // effective diffusion const float gain = *(BeadParamCube + BpGain*beadFrameStride); // calculate empty to bead ratio, buffering and copies const float C = perNucRegP->getC(); const float nuc_flow_span = ConstGlobalP.getNucFlowSpan(); const float sigma = ComputeSigma(perFlowRegP, perNucRegP); float tmidNuc = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP); float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies); float tauB = ComputeTauB(constRegP, etbR); float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies); // Need shifted background const float* bkgTrace = EmptyTraceAvg;//RegionFrameCube + RfBkgTraces*regionFrameStride; const float* deltaFrames = RegionFrameCube + RfDeltaFrames*regionFrameStride; const float* frameNumber = RegionFrameCube + RfFrameNumber*regionFrameStride; // background subtracted trace for amplitude estimation // calculate initial nucRise here if (threadIdx.x == 0) { #if DEBUG_REG_FITTING printf("C: %f sigma: %f, tmidNuc: %f\n", C, sigma, tmidNuc); printf("copies: %f R: %f d: %f gain: %f, etbR: %f tauB: %f\n", copies, R, d, gain, etbR, tauB); #endif smBuffer[0] = CalculateNucRise( tmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, smNucRise); } __syncthreads(); int nucStart = smBuffer[0]; __syncthreads(); // DEBUG #if DEBUG_REG_FITTING if (threadIdx.x == 1) { printf("GPU before fitting...start: %d, tmidnuc: %f rdr: %f pdr: %f\n", nucStart,perFlowRegP->getTMidNuc(), perFlowRegP->getRatioDrift(), perFlowRegP->getCopyDrift()); printf("Nucrise\n"); for (int i=0; i<(ISIG_SUB_STEPS_MULTI_FLOW*num_frames); ++i) { printf("%f ",smNucRise[i]); } printf("\n"); printf("Emphasis\n"); for (int i=0; i<num_frames; ++i) { printf("%f ",emphasisVec[i]); } printf("\n"); printf("Shifted Bkg\n"); for (int i=0; i<num_frames; ++i) { printf("%f ",bkgTrace[i]); } printf("\n"); } __syncthreads(); #endif // END DEBUG // START AMPLITUDE ESTIMATION BkgCorrectedRawTrace( bkgTrace, observedTrace, BeadParamCube, RegionFrameCube, deltaFrames, perFlowRegP->getDarkness(), etbR, gain, tauB, num_frames, beadFrameStride, regionFrameStride, correctedTrace); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("====>tid: %d,", threadIdx.x); for (int i=0; i<num_frames; ++i) { printf("%f ", correctedTrace[i]); } printf("\n"); } __syncthreads(); #endif //Provide emphasis stride to projection search...the transposed layout is // used in the below function because of single flow fit float ampl = ProjectionSearch( constRegP, perFlowRegP, perNucRegP, correctedTrace, emphasisVec, num_frames, smNucRise, deltaFrames, 1.0f, d, tauB, gain, SP, tmpTrace, nucStart, beadFrameStride, 1, // emphasis stride ISIG_SUB_STEPS_MULTI_FLOW ); #if DEBUG_REG_FITTING printf("====> GPU....tid: %d Ampl: %f\n", threadIdx.x, ampl); #endif // END AMPLITUDE ESTIMATION // select emphasis now based on Ampl // TODO check if max emphasis is correct const float *emphForFitting = setAdaptiveEmphasis(ampl, emphasisVec, num_frames, MAX_HPXLEN); // calculate starting regional residual bool goodBead = true; ComputeModelBasedTrace( bkgTrace, deltaFrames, constRegP, perNucRegP, BeadParamCube, RegionFrameCube, smNucRise, nucStart, perNucRegP->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, ampl, ISIG_SUB_STEPS_MULTI_FLOW * nucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, purpleTrace); float beadRes = CalculateBeadResidualError( obsTrace, purpleTrace, emphForFitting, num_frames); beadRes = sqrtf(beadRes/num_frames); smBuffer[threadIdx.x] = beadRes; __syncthreads(); // reduce here for average residual value // reduction has slightly wrong logic currently with assuming 256 threads being spawned SimplestReductionAndAverage(smBuffer, samples, false); float curRes = smBuffer[0]; float curAvgRes = curRes / (float)(samples); __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) printf("====> GPU....Before fitting residual: %f\n", curAvgRes); #endif // Lev mar iterations loop float ratioDrift, copyDrift; float new_tmidnuc, new_ratiodrift, new_copydrift; //float lambda = 0.0001f; double lambda = 0.0001; float *oldTrace = purpleTrace; float *newTrace = tmpTrace; for (int iter=0; iter<4; ++iter) { // possibly filter beads at this point // residual not changing or corrupt or ... goodBead = beadRes < 4.0f*curAvgRes; tmidNuc = perFlowRegP->getTMidNuc(); ratioDrift = perFlowRegP->getRatioDrift(); copyDrift = perFlowRegP->getCopyDrift(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("====GPU REG Fitting...iter:%d, tmidNuc:%f, rdr:%f, pdr:%f\n", iter, tmidNuc, ratioDrift, copyDrift); } __syncthreads(); #endif // START YERR CalculatePartialDerivative( yerr, obsTrace, oldTrace, emphForFitting, 1.0f, num_frames); // END YERR // START TMIDNUC PARTIAL DERIVATIVE new_tmidnuc = ComputeMidNucTime( tmidNuc + TMIDNUC_REG_STEP, perFlowRegP, perNucRegP); if (threadIdx.x == 0) { smBuffer[0] = CalculateNucRise( new_tmidnuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, tmpNucRise); } __syncthreads(); int tmp_nucStart = smBuffer[0]; __syncthreads(); ComputeModelBasedTrace( bkgTrace, deltaFrames, constRegP, perNucRegP, BeadParamCube, RegionFrameCube, tmpNucRise, tmp_nucStart, perNucRegP->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, ampl, ISIG_SUB_STEPS_MULTI_FLOW * tmp_nucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdTmidNuc, newTrace, oldTrace, emphForFitting, TMIDNUC_REG_STEP, num_frames); // END TMIDNUC PARTIAL DERIVATIVE // START RATODRIFT DERIVATIVE new_ratiodrift = ratioDrift + RDR_REG_STEP; etbR = ComputeETBR(perNucRegP, new_ratiodrift, R, copies); tauB = ComputeTauB(constRegP, etbR); ComputeModelBasedTrace( bkgTrace, deltaFrames, constRegP, perNucRegP, BeadParamCube, RegionFrameCube, smNucRise, nucStart, perNucRegP->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, ampl, ISIG_SUB_STEPS_MULTI_FLOW * nucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdRDR, newTrace, oldTrace, emphForFitting, RDR_REG_STEP, num_frames); // END RATODRIFT DERIVATIVE // START COPYDRIFT DERIVATIVE new_copydrift = copyDrift + PDR_REG_STEP; etbR = ComputeETBR(perNucRegP, ratioDrift, R, copies); tauB = ComputeTauB(constRegP, etbR); SP = ComputeSP(new_copydrift, copies); ComputeModelBasedTrace( bkgTrace, deltaFrames, constRegP, perNucRegP, BeadParamCube, RegionFrameCube, smNucRise, nucStart, perNucRegP->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, ampl, ISIG_SUB_STEPS_MULTI_FLOW * nucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdPDR, newTrace, oldTrace, emphForFitting, PDR_REG_STEP, num_frames); // END COPYDRIFT DERIVATIVE #if DEBUG_REG_FITTING // DEBUG if (threadIdx.x == 0) { for (int i=0; i<num_frames; ++i) { printf("%f,",oldTrace[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdTmidNuc[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdRDR[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdPDR[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",yerr[i]); } printf("\n"); } __syncthreads(); #endif // Calculate JTJ matrix entries //float lhs_00=0, lhs_01=0, lhs_02=0, lhs_11=0, lhs_12=0, lhs_22=0; double lhs_00=0, lhs_01=0, lhs_02=0, lhs_11=0, lhs_12=0, lhs_22=0; //float rhs_0=0, rhs_1=0, rhs_2=0; double rhs_0=0, rhs_1=0, rhs_2=0; smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdTmidNuc, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_00 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_01 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_02 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdRDR, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_11 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdRDR, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_12 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdPDR, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) lhs_22 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdTmidNuc, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs_0 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs_1 = smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs_2 = smBuffer[0]; __syncthreads(); if (threadIdx.x == 0) { cont_lambda_itr = true; nan_detected = false; solved = false; } __syncthreads(); // multiply jtj matrix by lambda // solve for delta change in parameters // calculate new parameters and clamp them within boundaries // find new residual and compare to current one // if residual decreases this iteration is done else increase/decrease lambda accordingly and // back to lambda multiply step // since solve in three variable, inlining matrix inverse // rather than going for LUT decomposition float new_residual = 0; float newBeadRes = 0; while (cont_lambda_itr) { if (threadIdx.x == 0) { /*float new_lhs00 = lhs_00 * (1.0f + lambda); float new_lhs11 = lhs_11 * (1.0f + lambda); float new_lhs22 = lhs_22 * (1.0f + lambda);*/ #if DEBUG_REG_FITTING printf("jtj: %f,%f,%f,%f,%f,%f,%f,%f,%f\n", lhs_00,lhs_01,lhs_02,lhs_11,lhs_12,lhs_22,rhs_0,rhs_1,rhs_2); #endif double new_lhs00 = lhs_00 * (1.0 + lambda); double new_lhs11 = lhs_11 * (1.0 + lambda); double new_lhs22 = lhs_22 * (1.0 + lambda); // calculate determinant /*float det = new_lhs00*(new_lhs11*new_lhs22 - lhs_12*lhs_12) - lhs_01*(lhs_01*new_lhs22 - lhs_12*lhs_02) + lhs_02*(lhs_01*lhs_12 - new_lhs11*lhs_02); det = 1.0f/det;*/ double det = new_lhs00*(new_lhs11*new_lhs22 - lhs_12*lhs_12) - lhs_01*(lhs_01*new_lhs22 - lhs_12*lhs_02) + lhs_02*(lhs_01*lhs_12 - new_lhs11*lhs_02); det = 1.0/det; deltas[0] = det*(rhs_0*(new_lhs11*new_lhs22 - lhs_12*lhs_12) + rhs_1*(lhs_02*lhs_12 - lhs_01*new_lhs22) + rhs_2*(lhs_01*lhs_12 - lhs_02*new_lhs11)); deltas[1] = det*(rhs_0*(lhs_12*lhs_02 - lhs_01*new_lhs22) + rhs_1*(new_lhs00*new_lhs22 - lhs_02*lhs_02) + rhs_2*(lhs_01*lhs_02 - new_lhs00*lhs_12)); deltas[2] = det*(rhs_0*(lhs_01*lhs_12 - lhs_02*new_lhs11) + rhs_1*(lhs_01*lhs_02 - new_lhs00*lhs_12) + rhs_2*(new_lhs00*new_lhs11 - lhs_01*lhs_01)); if (::isnan(deltas[0]) || ::isnan(deltas[1]) || ::isnan(deltas[2])) nan_detected = true; #if DEBUG_REG_FITTING printf("===GPU REG Params...iter:%d,delta0:%f,delta1:%f,delta2:%f\n", iter, deltas[0], deltas[1], deltas[2]); #endif } __syncthreads(); if (!nan_detected) { new_tmidnuc = tmidNuc + deltas[0]; new_ratiodrift = ratioDrift + deltas[1]; new_copydrift = copyDrift + deltas[2]; // clamp the parameters here clampT(new_tmidnuc, constRegP->getMinTmidNuc(), constRegP->getMaxTmidNuc()); clampT(new_ratiodrift, constRegP->getMinRatioDrift(), constRegP->getMaxRatioDrift()); clampT(new_copydrift, constRegP->getMinCopyDrift(), constRegP->getMaxCopyDrift()); // compute residual if (threadIdx.x == 0) { smBuffer[0] = CalculateNucRise( ComputeMidNucTime(new_tmidnuc, perFlowRegP, perNucRegP), sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, tmpNucRise); } __syncthreads(); tmp_nucStart = smBuffer[0]; __syncthreads(); etbR = ComputeETBR(perNucRegP, new_ratiodrift, R, copies); tauB = ComputeTauB(constRegP, etbR); SP = ComputeSP(new_copydrift, copies); ComputeModelBasedTrace( bkgTrace, deltaFrames, constRegP, perNucRegP, BeadParamCube, RegionFrameCube, tmpNucRise, tmp_nucStart, perNucRegP->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, ampl, ISIG_SUB_STEPS_MULTI_FLOW * tmp_nucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); newBeadRes = CalculateBeadResidualError( obsTrace, newTrace, emphForFitting, num_frames); newBeadRes = sqrtf(newBeadRes/num_frames); smBuffer[threadIdx.x] = newBeadRes; __syncthreads(); // reduce here for average residual value SimplestReductionAndAverage(smBuffer, samples, false); //curAvgRes = smBuffer[0] / (float)(samples); new_residual = smBuffer[0]; __syncthreads(); } if (threadIdx.x == 0) { //new_residual = smBuffer[0]; // DEBUG #if DEBUG_REG_FITTING printf("===GPU REG Params...iter:%d,tmidnuc:%f,rdr:%f,pdr:%f,old_residual:%f,new_residual:%f\n", iter, new_tmidnuc, new_ratiodrift, new_copydrift, curRes, new_residual); #endif if (!nan_detected && new_residual < curRes) { solved = true; //curRes = new_residual; lambda /= 30.0; // use correct lambda step from bkgmodel if (lambda < FLT_MIN) lambda = FLT_MIN; // update parameters perFlowRegP->setTMidNuc(new_tmidnuc); perFlowRegP->setRatioDrift(new_ratiodrift); perFlowRegP->setCopyDrift(new_copydrift); // update nucrise smBuffer[0] = tmp_nucStart; for (int i=0; i<num_frames; ++i) { smNucRise[i] = tmpNucRise[i]; } cont_lambda_itr = false; } else { lambda *= 30.0; if (lambda > 1E+9f) { cont_lambda_itr = false; smBuffer[0] = nucStart; solved = false; } } nan_detected = false; } __syncthreads(); } nucStart = smBuffer[0]; if (solved) { float *tmp = oldTrace; oldTrace = newTrace; newTrace = tmp; // update residuals for next iteration beadRes = newBeadRes; curRes = new_residual; curAvgRes = curRes / (float)(samples); } __syncthreads(); } } __device__ void UpdateFineNucRiseForSingleFlowFit( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, PerFlowParamsRegion * perFlowRegP, const float * RegionFrameCube, const int RegionFrameStride, const int num_frames, float *nucRise) { perFlowRegP->setFineStart(CalculateNucRise( ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP), ComputeSigma(perFlowRegP, perNucRegP), perNucRegP->getC(), ConstGlobalP.getNucFlowSpan(), RegionFrameCube + RfFrameNumber*RegionFrameStride, num_frames, ISIG_SUB_STEPS_SINGLE_FLOW, nucRise)); } __device__ void UpdateCoarseNucRiseForSingleFlowFit( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, PerFlowParamsRegion * perFlowRegP, const float * RegionFrameCube, const int RegionFrameStride, const int num_frames, float *nucRise) { perFlowRegP->setCoarseStart(CalculateNucRise( ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP), ComputeSigma(perFlowRegP, perNucRegP), perNucRegP->getC(), ConstGlobalP.getNucFlowSpan(), RegionFrameCube + RfFrameNumber*RegionFrameStride, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, nucRise)); } __device__ float ComputeMultiFlowBeadResidual( const float *Ampl, const short **observedTrace, const float **emptyTrace, const PerNucParamsRegion **nucRegParams, const float *frameNumber, const float *deltaFrames, const float *emphasisVec, const ConstantParamsRegion *constRegP, const float *BeadParamCube, const float *RegionFrameCube, const PerFlowParamsRegion *perFlowRegP, const size_t num_frames, const size_t beadFrameStride, const size_t regionFrameStride) { __shared__ float smNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ int smNucStart; float purpleTrace[MAX_COMPRESSED_FRAMES_GPU]; // right now getting bead params in the order they were in bead_params struct const float copies = *(BeadParamCube + BpCopies*beadFrameStride); const float R = *(BeadParamCube + BpR*beadFrameStride); const float gain = *(BeadParamCube + BpGain*beadFrameStride); double beadRes = 0; for(int histFlowIdx = 0; histFlowIdx < ConstHistCol.getNumHistoryFlows(); histFlowIdx++) { const PerNucParamsRegion *histFlowNucParams = nucRegParams[histFlowIdx]; const float d = (*(BeadParamCube + BpDmult*beadFrameStride)) * histFlowNucParams->getD(); // effective diffusion // calculate empty to bead ratio, buffering and copies const float C = histFlowNucParams->getC(); const float nuc_flow_span = ConstGlobalP.getNucFlowSpan(); const float sigma = ComputeSigma(perFlowRegP, histFlowNucParams); float tmidNuc = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, histFlowNucParams); int realFlowNum = ConstFlowP.getRealFnum() - (ConstHistCol.getNumHistoryFlows() - 1 - histFlowIdx); float etbR = ComputeETBR(histFlowNucParams, perFlowRegP->getRatioDrift(), R, copies, realFlowNum); float tauB = ComputeTauB(constRegP, etbR); float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies, realFlowNum); // Compute nucrise if (threadIdx.x == 0) { smNucStart = CalculateNucRise( tmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, smNucRise); } __syncthreads(); ComputeModelBasedTrace( emptyTrace[histFlowIdx], deltaFrames, constRegP, histFlowNucParams, BeadParamCube, RegionFrameCube, smNucRise, smNucStart, histFlowNucParams->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, Ampl[histFlowIdx], ISIG_SUB_STEPS_MULTI_FLOW * smNucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, purpleTrace); const float *emphForFitting = setAdaptiveEmphasis(Ampl[histFlowIdx], emphasisVec, num_frames, MAX_HPXLEN); beadRes += CalculateBeadResidualError( observedTrace[histFlowIdx], purpleTrace, emphForFitting, beadFrameStride, num_frames); } beadRes = sqrtf(beadRes/(float)(num_frames * ConstHistCol.getNumHistoryFlows())); return beadRes; } __device__ void BuildMatrices( double *smBuffer, // shared memory buffer OneDimVec<double,REGLEVMARMAT_LHS_LEN> *jtj, OneDimVec<double,REGLEVMARMAT_RHS_LEN> *rhs, const bool goodBead, const float *Ampl, const short **observedTrace, const float **emptyTrace, const PerNucParamsRegion **nucRegParams, const float *frameNumber, const float *deltaFrames, const float *emphasisVec, const ConstantParamsRegion *constRegP, const float *BeadParamCube, const float *RegionFrameCube, const PerFlowParamsRegion *perFlowRegP, const size_t samples, const size_t num_frames, const size_t beadFrameStride, const size_t regionFrameStride) { __shared__ float smNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ float smTmpNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ int smNucStart; __shared__ int smTmpNucStart; float pdTmidNuc[MAX_COMPRESSED_FRAMES_GPU]; float pdRDR[MAX_COMPRESSED_FRAMES_GPU]; #if FIT_COPYDRIFT float pdPDR[MAX_COMPRESSED_FRAMES_GPU]; #endif float yerr[MAX_COMPRESSED_FRAMES_GPU]; float oldTrace[MAX_COMPRESSED_FRAMES_GPU]; float newTrace[MAX_COMPRESSED_FRAMES_GPU]; // right now getting bead params in the order they were in bead_params struct const float copies = *(BeadParamCube + BpCopies*beadFrameStride); const float R = *(BeadParamCube + BpR*beadFrameStride); const float gain = *(BeadParamCube + BpGain*beadFrameStride); for(int histFlowIdx = 0; histFlowIdx < ConstHistCol.getNumHistoryFlows(); histFlowIdx++) { const PerNucParamsRegion *histFlowNucParams = nucRegParams[histFlowIdx]; const float d = (*(BeadParamCube + BpDmult*beadFrameStride)) * histFlowNucParams->getD(); // effective diffusion // calculate empty to bead ratio, buffering and copies const float C = histFlowNucParams->getC(); const float nuc_flow_span = ConstGlobalP.getNucFlowSpan(); const float sigma = ComputeSigma(perFlowRegP, histFlowNucParams); int realFlowNum = ConstFlowP.getRealFnum() - (ConstHistCol.getNumHistoryFlows() - 1 - histFlowIdx); float tmidNuc = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, histFlowNucParams); float etbR = ComputeETBR(histFlowNucParams, perFlowRegP->getRatioDrift(), R, copies, realFlowNum); float tauB = ComputeTauB(constRegP, etbR); float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies, realFlowNum); const float *emphForFitting = setAdaptiveEmphasis(Ampl[histFlowIdx], emphasisVec, num_frames, MAX_HPXLEN); // Compute nucrise if (threadIdx.x == 0) { smNucStart = CalculateNucRise( tmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, smNucRise); } __syncthreads(); ComputeModelBasedTrace( emptyTrace[histFlowIdx], deltaFrames, constRegP, histFlowNucParams, BeadParamCube, RegionFrameCube, smNucRise, smNucStart, histFlowNucParams->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, Ampl[histFlowIdx], ISIG_SUB_STEPS_MULTI_FLOW * smNucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, oldTrace); // START YERR CalculateYerr( yerr, observedTrace[histFlowIdx], oldTrace, emphForFitting, 1.0f, beadFrameStride, num_frames); // END YERR // START TMIDNUC PARTIAL DERIVATIVE float new_tmidnuc = ComputeMidNucTime( perFlowRegP->getTMidNuc() + TMIDNUC_REG_STEP, perFlowRegP, histFlowNucParams); if (threadIdx.x == 0) { smTmpNucStart = CalculateNucRise( new_tmidnuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, smTmpNucRise); } __syncthreads(); ComputeModelBasedTrace( emptyTrace[histFlowIdx], deltaFrames, constRegP, histFlowNucParams, BeadParamCube, RegionFrameCube, smTmpNucRise, smTmpNucStart, histFlowNucParams->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, Ampl[histFlowIdx], ISIG_SUB_STEPS_MULTI_FLOW * smTmpNucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdTmidNuc, newTrace, oldTrace, emphForFitting, TMIDNUC_REG_STEP, num_frames); // END TMIDNUC PARTIAL DERIVATIVE // START RATODRIFT DERIVATIVE float new_ratiodrift = perFlowRegP->getRatioDrift() + RDR_REG_STEP; etbR = ComputeETBR(histFlowNucParams, new_ratiodrift, R, copies, realFlowNum); tauB = ComputeTauB(constRegP, etbR); ComputeModelBasedTrace( emptyTrace[histFlowIdx], deltaFrames, constRegP, histFlowNucParams, BeadParamCube, RegionFrameCube, smNucRise, smNucStart, histFlowNucParams->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, Ampl[histFlowIdx], ISIG_SUB_STEPS_MULTI_FLOW * smNucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdRDR, newTrace, oldTrace, emphForFitting, RDR_REG_STEP, num_frames); // END RATODRIFT DERIVATIVE // START COPYDRIFT DERIVATIVE #if FIT_COPYDRIFT float new_copydrift = perFlowRegP->getCopyDrift() + PDR_REG_STEP; etbR = ComputeETBR(histFlowNucParams, perFlowRegP->getRatioDrift(), R, copies, realFlowNum); tauB = ComputeTauB(constRegP, etbR); SP = ComputeSP(new_copydrift, copies, realFlowNum); ComputeModelBasedTrace( emptyTrace[histFlowIdx], deltaFrames, constRegP, histFlowNucParams, BeadParamCube, RegionFrameCube, smNucRise, smNucStart, histFlowNucParams->getKrate(), tauB, gain, SP, d, perFlowRegP->getDarkness(), etbR, constRegP->getSens()*SENSMULTIPLIER, Ampl[histFlowIdx], ISIG_SUB_STEPS_MULTI_FLOW * smNucStart, ISIG_SUB_STEPS_MULTI_FLOW, num_frames, beadFrameStride, regionFrameStride, newTrace); CalculatePartialDerivative( pdPDR, newTrace, oldTrace, emphForFitting, PDR_REG_STEP, num_frames); // END COPYDRIFT DERIVATIVE #endif #if DEBUG_REG_FITTING // DEBUG if (threadIdx.x == 0) { for (int i=0; i<num_frames; ++i) { printf("%f,",oldTrace[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdTmidNuc[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdRDR[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",pdPDR[i]); } printf("\n"); for (int i=0; i<num_frames; ++i) { printf("%f,",yerr[i]); } printf("\n"); } __syncthreads(); #endif // Calculate JTJ matrix entries smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdTmidNuc, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) jtj->val[0] += smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) jtj->val[1] += smBuffer[0]; __syncthreads(); #if FIT_COPYDRIFT smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdTmidNuc, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) jtj->val[2] += smBuffer[0]; __syncthreads(); #endif smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdRDR, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) #if FIT_COPYDRIFT jtj->val[3] += smBuffer[0]; #else jtj->val[2] += smBuffer[0]; #endif __syncthreads(); #if FIT_COPYDRIFT smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdRDR, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) jtj->val[4] += smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( pdPDR, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) jtj->val[5] += smBuffer[0]; __syncthreads(); #endif smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdTmidNuc, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs->val[0] += smBuffer[0]; __syncthreads(); smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdRDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs->val[1] += smBuffer[0]; __syncthreads(); #if FIT_COPYDRIFT smBuffer[threadIdx.x] = goodBead ? CalculateDotProduct( yerr, pdPDR, num_frames) : 0; __syncthreads(); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("sum:%f\n", smBuffer[0]); } __syncthreads(); #endif SimplestReductionAndAverage(smBuffer, samples, false); if (threadIdx.x == 0) rhs->val[2] += smBuffer[0]; __syncthreads(); #endif } } __device__ void MultiFlowRegionalLevMarFit( const float *Ampl, const short **obsTrace, const float **emptyTrace, const PerNucParamsRegion **nucRegParams, const float *BeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == beadFrameStride const unsigned short *BeadStateCube, //key_norm, ppf, ssq const float *emphasisVec, //(MAX_POISSON_TABLE_COL)*F const ConstantParamsRegion *constRegP, const float *RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const size_t beadFrameStride, //stride from one CUBE plane to the next for the Per Well Cubes const size_t regionFrameStride, //, //stride in Region Frame Cube to get to next parameter const size_t num_frames, // 4 const size_t samples, float *scratchSpace, float *nucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F PerFlowParamsRegion *perFlowRegP) { __shared__ double smBuffer[REG_FIT_SM_ACCUM_BUFFERSIZE]; __shared__ double deltas[NUM_TIME_VARYING_PARAMS]; // NUM_PARAMS=3 __shared__ bool cont_lambda_itr; __shared__ bool nan_detected; __shared__ bool solved; const float* deltaFrames = RegionFrameCube + RfDeltaFrames*regionFrameStride; const float* frameNumber = RegionFrameCube + RfFrameNumber*regionFrameStride; // Calculate starting residual over block of flows (history of flows here) float beadRes = 0; beadRes = ComputeMultiFlowBeadResidual( Ampl, obsTrace, emptyTrace, nucRegParams, frameNumber, deltaFrames, emphasisVec, constRegP, BeadParamCube, RegionFrameCube, perFlowRegP, num_frames, beadFrameStride, regionFrameStride); smBuffer[threadIdx.x] = beadRes; __syncthreads(); SimplestReductionAndAverage(smBuffer, samples, false); float curAvgRes = smBuffer[0] / (float)(samples); __syncthreads(); // calculate partial derivatives and build matrix // solve for parameters // iterate // Lev mar iterations loop double lambda = 0.0001; bool goodBead = true; OneDimVec<double, REGLEVMARMAT_LHS_LEN> jtj; OneDimVec<double, REGLEVMARMAT_RHS_LEN> rhs; for (int iter=0; iter<(LEVMARITERS); ++iter) { if (iter > 0 && !solved) { if (threadIdx.x == 0) printf("max lambda reached: %f\n", lambda); return; } // zero out shared memory for reductions for (size_t i=threadIdx.x; i<REG_FIT_SM_ACCUM_BUFFERSIZE; i+=blockDim.x) { smBuffer[i] = 0; } __syncthreads(); float tmidNuc = perFlowRegP->getTMidNuc(); float ratioDrift = perFlowRegP->getRatioDrift();; float copyDrift = perFlowRegP->getCopyDrift(); float new_tmidnuc, new_ratiodrift; #if FIT_COPYDRIFT new_copydrift; #endif // possibly filter beads at this point // residual not changing or corrupt or ... goodBead = beadRes < 4.0f*curAvgRes; //goodBead = true; jtj.clear(); rhs.clear(); BuildMatrices( smBuffer, &jtj, &rhs, goodBead, Ampl, obsTrace, emptyTrace, nucRegParams, frameNumber, deltaFrames, emphasisVec, constRegP, BeadParamCube, RegionFrameCube, perFlowRegP, samples, num_frames, beadFrameStride, regionFrameStride); // Solve // Compute new residual error nad iterate if (threadIdx.x == 0) { cont_lambda_itr = true; nan_detected = false; solved = false; } __syncthreads(); // multiply jtj matrix by lambda // solve for delta change in parameters // calculate new parameters and clamp them within boundaries // find new residual and compare to current one // if residual decreases this iteration is done else increase/decrease lambda accordingly and // back to lambda multiply step // since solve in three variable, inlining matrix inverse // rather than going for LUT decomposition float newBeadRes = 0; float newAvgRes = 0; while (cont_lambda_itr) { if (threadIdx.x == 0) { OneDimVec<double,REGLEVMARMAT_RHS_LEN> newJTJDiag; #if FIT_COPYDRIFT newJTJDiag.val[0] = jtj.val[0] * (1.0 + lambda); newJTJDiag.val[1] = jtj.val[3] * (1.0 + lambda); newJTJDiag.val[2] = jtj.val[5] * (1.0 + lambda); // calculate determinant double det = newJTJDiag.val[0]*(newJTJDiag.val[1]*newJTJDiag.val[2] - jtj.val[4]*jtj.val[4]) - jtj.val[1]*(jtj.val[1]*newJTJDiag.val[2] - jtj.val[4]*jtj.val[2]) + jtj.val[2]*(jtj.val[1]*jtj.val[4] - newJTJDiag.val[1]*jtj.val[2]); det = 1.0/det; deltas[0] = det*(rhs.val[0]*(newJTJDiag.val[1]*newJTJDiag.val[2] - jtj.val[4]*jtj.val[4]) + rhs.val[1]*(jtj.val[2]*jtj.val[4] - jtj.val[1]*newJTJDiag.val[2]) + rhs.val[2]*(jtj.val[1]*jtj.val[4] - jtj.val[2]*newJTJDiag.val[1])); deltas[1] = det*(rhs.val[0]*(jtj.val[4]*jtj.val[2] - jtj.val[1]*newJTJDiag.val[2]) + rhs.val[1]*(newJTJDiag.val[0]*newJTJDiag.val[2] - jtj.val[2]*jtj.val[2]) + rhs.val[2]*(jtj.val[1]*jtj.val[2] - newJTJDiag.val[0]*jtj.val[4])); deltas[2] = det*(rhs.val[0]*(jtj.val[1]*jtj.val[4] - jtj.val[2]*newJTJDiag.val[1]) + rhs.val[1]*(jtj.val[1]*jtj.val[2] - newJTJDiag.val[0]*jtj.val[4]) + rhs.val[2]*(newJTJDiag.val[0]*newJTJDiag.val[1] - jtj.val[1]*jtj.val[1])); if (isnan(deltas[0]) || isnan(deltas[1]) || isnan(deltas[2])) nan_detected = true; #else newJTJDiag.val[0] = jtj.val[0] * (1.0 + lambda); newJTJDiag.val[1] = jtj.val[2] * (1.0 + lambda); // calculate determinant double det = (newJTJDiag.val[0]*newJTJDiag.val[1]) - (jtj.val[1] * jtj.val[1]); det = 1.0/det; deltas[0] = det*(newJTJDiag.val[1]*rhs.val[0] - jtj.val[1]*rhs.val[1]); deltas[1] = det*(-jtj.val[1]*rhs.val[0] + newJTJDiag.val[0]*rhs.val[1]); if (::isnan(deltas[0]) || ::isnan(deltas[1])) nan_detected = true; #endif #if DEBUG_REG_FITTING printf("===GPU REG Params...iter:%d,delta0:%f,delta1:%f,delta2:%f,lambda:%f\n", iter, deltas[0], deltas[1], deltas[2],lambda); #endif } __syncthreads(); if (!nan_detected) { new_tmidnuc = tmidNuc + deltas[0]; clampT(new_tmidnuc, constRegP->getMinTmidNuc(), constRegP->getMaxTmidNuc()); perFlowRegP->setTMidNuc(new_tmidnuc); new_ratiodrift = ratioDrift + deltas[1]; clampT(new_ratiodrift, constRegP->getMinRatioDrift(), constRegP->getMaxRatioDrift()); perFlowRegP->setRatioDrift(new_ratiodrift); #if FIT_COPYDRIFT new_copydrift = copyDrift + deltas[2]; clampT(new_copydrift, constRegP->getMinCopyDrift(), constRegP->getMaxCopyDrift()); perFlowRegP->setCopyDrift(new_copydrift); #endif // Calculate residual newBeadRes = ComputeMultiFlowBeadResidual( Ampl, obsTrace, emptyTrace, nucRegParams, frameNumber, deltaFrames, emphasisVec, constRegP, BeadParamCube, RegionFrameCube, perFlowRegP, num_frames, beadFrameStride, regionFrameStride); smBuffer[threadIdx.x] = newBeadRes; __syncthreads(); // reduce here for average residual value SimplestReductionAndAverage(smBuffer, samples, false); newAvgRes = smBuffer[0] / (float)(samples); __syncthreads(); } if (threadIdx.x == 0) { //new_residual = smBuffer[0]; // DEBUG #if DEBUG_REG_FITTING printf("===GPU REG Params...iter:%d,tmidnuc:%f,rdr:%f,pdr:%f,old_residual:%f,new_residual:%f\n", iter, new_tmidnuc, new_ratiodrift, new_copydrift, curAvgRes, newAvgRes); #endif if (!nan_detected && newAvgRes < curAvgRes) { solved = true; lambda /= 30.0; // use correct lambda step from bkgmodel if (lambda < FLT_MIN) lambda = FLT_MIN; cont_lambda_itr = false; } else { lambda *= 30.0; if (lambda > 1E+9f) { cont_lambda_itr = false; solved = false; } perFlowRegP->setTMidNuc(tmidNuc); perFlowRegP->setRatioDrift(ratioDrift); #if FIT_COPYDRIFT perFlowRegP->setCopyDrift(copyDrift); #endif } nan_detected = false; } __syncthreads(); } if (solved) { // update residuals for next iteration beadRes = newBeadRes; curAvgRes = newAvgRes; } __syncthreads(); } } __device__ float EstimateAmplForMultiFlowRegionalLevMarFit( const int realFnum, const short *observedTrace, // NUM_SAMPLES_RF x F const float *BeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == beadFrameStride const float *emphasisVec, //(MAX_POISSON_TABLE_COL)*F float *nucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F const ConstantParamsRegion *constRegP, PerFlowParamsRegion *perFlowRegP, const PerNucParamsRegion *perNucRegP, const float *RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float *EmptyTraceAvg, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const size_t beadFrameStride, //stride from one CUBE plane to the next for the Per Well Cubes const size_t regionFrameStride, //, //stride in Region Frame Cube to get to next parameter const size_t num_frames) { __shared__ float smNucRise[ISIG_SUB_STEPS_MULTI_FLOW*MAX_COMPRESSED_FRAMES_GPU]; __shared__ int tStart; if (threadIdx.x == 0) { perFlowRegP->setTMidNucShift(0); } __syncthreads(); float correctedTrace[MAX_COMPRESSED_FRAMES_GPU]; float obsTrace[MAX_COMPRESSED_FRAMES_GPU]; // raw traces being written to // right now getting bead params in the order they were in bead_params struct const float copies = *(BeadParamCube + BpCopies*beadFrameStride); const float R = *(BeadParamCube + BpR*beadFrameStride); const float d = (*(BeadParamCube + BpDmult*beadFrameStride)) * perNucRegP->getD(); // effective diffusion const float gain = *(BeadParamCube + BpGain*beadFrameStride); // calculate empty to bead ratio, buffering and copies const float C = perNucRegP->getC(); const float nuc_flow_span = ConstGlobalP.getNucFlowSpan(); const float sigma = ComputeSigma(perFlowRegP, perNucRegP); float tmidNuc = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP); float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies, realFnum); float tauB = ComputeTauB(constRegP, etbR); float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies, realFnum); // Need shifted background const float* bkgTrace = EmptyTraceAvg; const float* deltaFrames = RegionFrameCube + RfDeltaFrames*regionFrameStride; const float* frameNumber = RegionFrameCube + RfFrameNumber*regionFrameStride; // background subtracted trace for amplitude estimation // calculate initial nucRise here if (threadIdx.x == 0) { #if DEBUG_REG_FITTING printf("C: %f sigma: %f, tmidNuc: %f\n", C, sigma, tmidNuc); printf("copies: %f R: %f d: %f gain: %f, etbR: %f tauB: %f\n", copies, R, d, gain, etbR, tauB); #endif tStart = CalculateNucRise( tmidNuc, sigma, C, nuc_flow_span, frameNumber, num_frames, ISIG_SUB_STEPS_MULTI_FLOW, smNucRise); } __syncthreads(); // DEBUG #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("GPU before fitting...start: %d, tmidnuc: %f rdr: %f pdr: %f\n", tStart,perFlowRegP->getTMidNuc(), perFlowRegP->getRatioDrift(), perFlowRegP->getCopyDrift()); printf("Nucrise\n"); for (int i=0; i<(ISIG_SUB_STEPS_MULTI_FLOW*num_frames); ++i) { printf("%f ",smNucRise[i]); } printf("\n"); printf("Emphasis\n"); for (int i=0; i<num_frames; ++i) { printf("%f ",emphasisVec[i]); } printf("\n"); printf("Shifted Bkg\n"); for (int i=0; i<num_frames; ++i) { printf("%f ",bkgTrace[i]); } printf("\n"); } __syncthreads(); #endif // END DEBUG // START AMPLITUDE ESTIMATION BkgCorrectedRawTrace( bkgTrace, observedTrace, BeadParamCube, RegionFrameCube, deltaFrames, perFlowRegP->getDarkness(), etbR, gain, tauB, num_frames, beadFrameStride, regionFrameStride, correctedTrace); #if DEBUG_REG_FITTING if (threadIdx.x == 0) { printf("====>tid: %d,", threadIdx.x); for (int i=0; i<num_frames; ++i) { printf("%f ", correctedTrace[i]); } printf("\n"); } __syncthreads(); #endif //Provide emphasis stride to projection search...the transposed layout is // used in the below function because of single flow fit float ampl = ProjectionSearch( constRegP, perFlowRegP, perNucRegP, correctedTrace, emphasisVec, num_frames, smNucRise, deltaFrames, 1.0f, d, tauB, gain, SP, obsTrace, tStart, beadFrameStride, 1, // emphasis stride ISIG_SUB_STEPS_MULTI_FLOW ); #if DEBUG_REG_FITTING printf("====> GPU....tid: %d Ampl: %f\n", threadIdx.x, ampl); #endif return ampl; } // Fit time varying parameters on a collection of flows. For flow by // flow pipeline it means fitting on a history of flows and recycling that // history as we advance in the flows __global__ void PerformMultiFlowRegionalFitting( const unsigned short * RegionMask, const float *beadParamCube, const unsigned short *beadStateCube, const float *emphasisVec, //(MAX_POISSON_TABLE_COL)*F const int *nonZeroEmphFrames, float *finenucRise, float *coarsenucRise, float *scratchSpace, const size_t *numFramesRegion, const ConstantParamsRegion * constRegP, PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float * RegionFrameCube, const int *NumSamples ) { // each region is fitted by one thread block const size_t regId = blockIdx.x; const size_t beadId = threadIdx.x; if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; if (beadId >= NumSamples[regId]) return; //strides //const size_t BeadFrameStride = ( ImgRegP.getGridParam( NUM_SAMPLES_RF )).getPlaneStride(); const size_t BeadFrameStride = ( ImgRegParams::getGridParam( ImgRegP, NUM_SAMPLES_RF )).getPlaneStride(); const size_t RegionFrameStride = ConstFrmP.getMaxCompFrames() * ImgRegP.getNumRegions(); RegionFrameCube += regId*ConstFrmP.getMaxCompFrames(); //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber //update per region pointers constRegP += regId; perFlowRegP += regId; // update per region pointer depending on nuc id //perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; //nonZeroEmphFrames += regId*MAX_POISSON_TABLE_COL; emphasisVec += regId * MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); const size_t numf = numFramesRegion[regId]; ///////////////////////////////////// //Observed Sample traces now come from a Sample collection: // if we have a sample history this needs to be replaced by the following: // n sample buffers starting with the oldest: idx = 0 to the latest: idx= numSampleFlows-1 beadStateCube += NUM_SAMPLES_RF*regId + threadIdx.x; beadParamCube += NUM_SAMPLES_RF*regId + threadIdx.x; float *multiFlowNucRise =coarsenucRise + regId * ISIG_SUB_STEPS_MULTI_FLOW * ConstFrmP.getMaxCompFrames() ; float AmplEst[MAX_NUM_FLOWS_IN_BLOCK_GPU]; const short* obsTracePtr[MAX_NUM_FLOWS_IN_BLOCK_GPU]; const float* emptyTracePtr[MAX_NUM_FLOWS_IN_BLOCK_GPU]; const PerNucParamsRegion* nucRegParamsPtr[MAX_NUM_FLOWS_IN_BLOCK_GPU]; for(int histFlowIdx = 0; histFlowIdx < ConstHistCol.getNumHistoryFlows(); histFlowIdx++) { const short *observedTrace = ConstHistCol.getSampleTraces(histFlowIdx) + NUM_SAMPLES_RF*regId + threadIdx.x; obsTracePtr[histFlowIdx] = observedTrace; const float *emptyTraceAvg = ConstHistCol.getEmptyTraces(histFlowIdx) + regId*ConstFrmP.getUncompFrames(); emptyTracePtr[histFlowIdx] = emptyTraceAvg; const PerNucParamsRegion *histNucRegParams = perNucRegP + ImgRegP.getNumRegions() * ConstHistCol.getNucId(histFlowIdx) + regId; nucRegParamsPtr[histFlowIdx] = histNucRegParams; int realFlowNum = ConstFlowP.getRealFnum() - (ConstHistCol.getNumHistoryFlows() - 1 - histFlowIdx); AmplEst[histFlowIdx] = EstimateAmplForMultiFlowRegionalLevMarFit( realFlowNum, observedTrace, beadParamCube, emphasisVec, multiFlowNucRise, constRegP, perFlowRegP, histNucRegParams, RegionFrameCube, emptyTraceAvg, BeadFrameStride, RegionFrameStride, numf); } MultiFlowRegionalLevMarFit( AmplEst, obsTracePtr, emptyTracePtr, nucRegParamsPtr, beadParamCube, beadStateCube, emphasisVec, constRegP, RegionFrameCube, BeadFrameStride, RegionFrameStride, numf, NumSamples[regId], scratchSpace, multiFlowNucRise, perFlowRegP); __syncthreads(); if (ConfigP.FitTmidNucShift()) FitTmidNucShiftPerFlow( ConstFlowP.getRealFnum(), AmplEst[ConstHistCol.getNumHistoryFlows() - 1], obsTracePtr[ConstHistCol.getNumHistoryFlows() - 1], beadParamCube, beadStateCube, emptyTracePtr[ConstHistCol.getNumHistoryFlows() - 1], emphasisVec, nucRegParamsPtr[ConstHistCol.getNumHistoryFlows() - 1], constRegP, RegionFrameCube, BeadFrameStride, RegionFrameStride, numf, NumSamples[regId], perFlowRegP); if (beadId == 0) { UpdateFineNucRiseForSingleFlowFit( constRegP, nucRegParamsPtr[ConstHistCol.getNumHistoryFlows() - 1], perFlowRegP, RegionFrameCube, RegionFrameStride, numf, finenucRise + regId * ISIG_SUB_STEPS_SINGLE_FLOW * ConstFrmP.getMaxCompFrames()); UpdateCoarseNucRiseForSingleFlowFit( constRegP, nucRegParamsPtr[ConstHistCol.getNumHistoryFlows() - 1], perFlowRegP, RegionFrameCube, RegionFrameStride, numf, coarsenucRise + regId * ISIG_SUB_STEPS_MULTI_FLOW * ConstFrmP.getMaxCompFrames()); }; }
the_stack
//#define NVBIO_CUDA_DEBUG #include <cub/cub.cuh> #include <zlib/zlib.h> #include <nvbio/basic/omp.h> #include "bloom_filters.h" #include "input_thread.h" #include "output_thread.h" #include "sample_kmers.h" #include "error_correct.h" #include <nvbio/basic/pipeline.h> #include <nvbio/basic/numbers.h> #include <nvbio/basic/bloom_filter.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/primitives.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/system.h> #include <nvbio/strings/string_set.h> #include <nvbio/io/sequence/sequence.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> using namespace nvbio; void cumulative_binomial_distribution(double F[], uint32 l, double p) { // p is the probability of getting 1 double coef = 1 ; double exp = pow( 1 - p, l ); F[0] = pow( 1 - p, l ); for (uint32 i = 1 ; i <= l ; ++i) { coef = coef / i * (l - i + 1); exp = exp / (1 - p) * p; F[i] = F[i-1] + coef * exp; } } char get_bad_quality(nvbio::io::SequenceDataStream* reads_file) { int i; int histogram1[300], histogram2[300]; // fetch some reads io::SequenceDataHost reads; memset( histogram1, 0, sizeof( histogram1 )) ; memset( histogram2, 0, sizeof( histogram2 ) ) ; int n_reads = 0; for (int batch = 0 ; batch < 100; ++batch) { const int ret = nvbio::io::next( ASCII, &reads, reads_file, 10000, 10000000 ); if (ret == 0) break; const nvbio::io::SequenceDataAccess<ASCII> reads_view( reads ); typedef nvbio::io::SequenceDataAccess<ASCII>::qual_string qual_string; for (uint32 i = 0; i < reads.size(); ++i) { const qual_string quals = reads_view.get_quals(i); ++histogram2[ (int)quals[ quals.length() - 1 ] ]; ++histogram1[ (int)quals[0] ]; } n_reads += reads.size(); } // rewind the file if (reads_file->rewind() == false) { log_error(stderr, " failed rewinding reads file\n"); return 1; } int cnt = 0; for (i = 0 ; i < 300; ++i) { cnt += histogram1[i]; if (cnt > n_reads * 0.05f) break; } const int t1 = i - 1; cnt = 0; for (i = 0; i < 300 ; ++i) { cnt += histogram2[i]; if (cnt > n_reads * 0.05f) break; } const int t2 = i; return (char)nvbio::min( t1, t2 ); } float infer_alpha(nvbio::io::SequenceDataStream* reads_file, const uint64 genome_size) { log_info(stderr, " inferring alpha... started\n" ); nvbio::io::SequenceDataHost reads; uint64 n_reads = 0; uint64 n_bps = 0; float time = 0.0f; while (1) { Timer timer; timer.start(); const int ret = nvbio::io::next( ASCII, &reads, reads_file, 512*1024, 128*1024*1024 ); if (ret == 0) break; timer.stop(); time += timer.seconds(); log_verbose(stderr, "\r input: %llu reads, %.2f%c bps (%.1f Mbps/s) ", n_reads, n_bps >= 1.0e9f ? 'B' : 'M', float(n_bps)*(n_bps >= 1.0e-9f ? 1.0e-9f : 1.0e-6f), 1.0e-6f * float(n_bps)/time ); n_reads += reads.size(); n_bps += reads.bps(); } log_verbose_cont(stderr, "\n"); const float coverage = float( double( n_bps ) / double( genome_size ) ); const float alpha = 7.0f / coverage; log_info(stderr, " inferring alpha... done\n" ); log_stats(stderr, " input: %llu reads, %.2f%c bps, %.3fx coverage\n", n_reads, n_bps >= 1.0e9f ? 'B' : 'M', float(n_bps)*(n_bps >= 1.0e-9f ? 1.0e-9f : 1.0e-6f), coverage ); log_visible(stderr, " inferred alpha: %f\n", alpha ); // rewind the file if (reads_file->rewind() == false) { log_error(stderr, " failed rewinding reads file\n"); return 1; } return alpha; } int main(int argc, char* argv[]) { if ((argc < 3) || (strcmp( argv[1], "--help" ) == 0)) { log_visible(stderr, "nvLighter - Copyright 2015, NVIDIA Corporation\n"); log_info(stderr, "usage:\n"); log_info(stderr, " nvLighter [options] input_file output_file\n"); log_info(stderr, " options:\n"); log_info(stderr, " -v int (0-6) [5] # verbosity level\n"); log_info(stderr, " -zlib string [1R] # e.g. \"1\", ..., \"9\", \"1R\"\n"); log_info(stderr, " -t int [auto] # number of CPU threads\n"); log_info(stderr, " -d int [0] # add the specified GPU device\n"); log_info(stderr, " -k k-mer genome-size alpha # error correction parameters\n"); log_info(stderr, " -K k-mer genome-size # error correction parameters\n"); log_info(stderr, " -maxcor int [4] # maximum correction factor\n"); log_info(stderr, " -newQual int [disabled] # new quality score value\n"); log_info(stderr, " -no-cpu # disable CPU usage\n"); log_info(stderr, " -no-gpu # disable GPU usage\n"); return 0; } const char* reads_name = argv[argc-2]; const char* output_name = argv[argc-1]; const char* comp_level = "1R"; io::QualityEncoding qencoding = io::Phred; int threads = 0; uint32 k = 11u; uint64 genome_size = 0; float alpha = 0.0; float max_correction = 4.0f; char new_quality = 0; float bf_factor = 1.0f; // original: 1.5 bool cpu = true; bool gpu = true; std::vector<int> devices(0); for (int i = 0; i < argc - 2; ++i) { if ((strcmp( argv[i], "-v" ) == 0) || (strcmp( argv[i], "-verbosity" ) == 0) || (strcmp( argv[i], "--verbosity" ) == 0)) { set_verbosity( Verbosity( atoi( argv[++i] ) ) ); } else if ((strcmp( argv[i], "-c" ) == 0) || (strcmp( argv[i], "--compression" ) == 0) || (strcmp( argv[i], "-zlib" ) == 0)) // setup compression level { comp_level = argv[++i]; } else if ((strcmp( argv[i], "-t" ) == 0) || (strcmp( argv[i], "-threads" ) == 0) || (strcmp( argv[i], "--threads" ) == 0)) // setup number of threads { threads = atoi( argv[++i] ); } else if ((strcmp( argv[i], "-d" ) == 0) || (strcmp( argv[i], "-device" ) == 0) || (strcmp( argv[i], "--device" ) == 0)) // add a device { devices.push_back( atoi( argv[++i] ) ); } else if ((strcmp( argv[i], "-no-cpu" ) == 0) || // remove CPU (strcmp( argv[i], "--no-cpu" ) == 0)) cpu = false; else if ((strcmp( argv[i], "-no-gpu" ) == 0) || // remove GPU (strcmp( argv[i], "--no-gpu" ) == 0)) gpu = false; else if (strcmp( argv[i], "-k" ) == 0) // setup kmer length, genome size and sampling frequency { k = atoi( argv[++i] ); genome_size = atol( argv[++i] ); alpha = atof( argv[++i] ); } else if (strcmp( argv[i], "-K" ) == 0) // setup kmer length, genome size and sampling frequency { k = atoi( argv[++i] ); genome_size = atol( argv[++i] ); alpha = 0.0f; } else if (strcmp( argv[i], "-maxcor" ) == 0) // setup max correction factor max_correction = (float)atoi( argv[++i] ); else if (strcmp( argv[i], "-newQual" ) == 0) // setup new quality new_quality = argv[++i][0]; else if (strcmp( argv[i], "-bf" ) == 0) // Bloom filter expansion factor bf_factor = atof( argv[++i] ); } // if no devices were specified, and the gpu is enabled, pick GPU 0 if (gpu && (devices.size() == 0)) devices.push_back(0); uint32 device_count = uint32( devices.size() ); // check whether the genome size has been specified if (genome_size == 0u) { log_error(stderr, "must specify the k-mer and genome size with the option: -k k genome-size alpha\n" ); return 1; } try { log_visible(stderr,"nvLighter... started\n"); // compute the optimal Bloom filter size scaling factors const float sampled_kmers_bf_factor = optimal_bloom_filter_bits_per_key( 0.01f ); const float trusted_kmers_bf_factor = optimal_bloom_filter_bits_per_key( 0.0005f ); log_verbose(stderr, " optimal m(0.01f) = %.2f\n", sampled_kmers_bf_factor ); log_verbose(stderr, " optimal k(0.01f) = %u\n", optimal_bloom_filter_hashes( sampled_kmers_bf_factor ) ); log_verbose(stderr, " optimal m(0.0005f) = %.2f\n", trusted_kmers_bf_factor ); log_verbose(stderr, " optimal k(0.0005f) = %u\n", optimal_bloom_filter_hashes( trusted_kmers_bf_factor ) ); // compute the Bloom filter sizes, in words const uint64 sampled_kmers_bf_words = align<8u>( uint64( float(genome_size) * bf_factor * (sampled_kmers_bf_factor / 32.0f) ) ); const uint64 trusted_kmers_bf_words = align<8u>( uint64( float(genome_size) * bf_factor * (trusted_kmers_bf_factor / 32.0f) ) ); const uint32 bits_per_word = 32u; // now set the number of CPU threads threads = threads > 0 ? threads : omp_get_num_procs(); //omp_set_num_threads( threads ); omp_set_num_threads( omp_get_num_procs() ); // use all threads for the merging steps... omp_set_nested(1); // setup the device Bloom filters BloomFilters<host_tag> h_bloom_filters; BloomFilters<host_tag>* h_bloom_filters_ptr = &h_bloom_filters; BloomFilters<device_tag> d_bloom_filters[16]; for (int i = device_count-1; i >= 0; --i) { if (d_bloom_filters[i].setup( devices[i], sampled_kmers_bf_words, trusted_kmers_bf_words ) == false) devices.erase( devices.begin() + i ); } device_count = uint32( devices.size() ); if (gpu && (device_count == 0)) { log_warning(stderr, " no available GPU devices\n"); // revert to using the CPU even if -no-cpu was specified if (cpu == false) { cpu = true; log_warning(stderr, " switching the CPU on\n"); } } if (cpu && h_bloom_filters.setup( -1, sampled_kmers_bf_words, trusted_kmers_bf_words ) == false) cpu = false; if (cpu == false) { h_bloom_filters_ptr = NULL; if (device_count == 0) { log_error(stderr, " no available CPU or GPU devices\n"); return 1; } } // // open the output file // log_info(stderr, " opening output file \"%s\"... started\n", output_name); SharedPointer<nvbio::io::SequenceDataOutputStream> output_file( nvbio::io::open_output_sequence_file( output_name, comp_level ) ); if (output_file == NULL || output_file->is_ok() == false) { log_error(stderr, " failed opening output \"%s\"\n", output_name); return 1; } log_info(stderr, " opening output file \"%s\"... done\n", output_name); // // open the reads file // uint32 max_block_strings = 512*1024; uint32 max_block_bps = 64*1024*1024; log_info(stderr, " opening read file \"%s\"... started\n", reads_name); SharedPointer<nvbio::io::SequenceDataInputStream> read_data_file( nvbio::io::open_sequence_file( reads_name, qencoding, uint32(-1), uint32(-1), io::FORWARD ) ); if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads_name); return 1; } log_info(stderr, " opening read file \"%s\"... done\n", reads_name); // infer alpha if necessary if (alpha <= 0.0f) { alpha = infer_alpha( read_data_file.get(), genome_size ); if (alpha >= 1.0f) { log_error(stderr, " alpha cannot be greater than 1, coverage likely too low\n"); exit(1); } } const char bad_quality = get_bad_quality( read_data_file.get() ); log_info(stderr, " bad quality threshold: '%c'\n", bad_quality); log_info(stderr," sample kmers... started\n"); { // // The following code implements a parallel nvbio::Pipeline to sample kmers from the input // reads. The pipeline is composed several subpipelines, one per active device, each made // of two stages (which will be run in separate threads): an InputStage, and a // SampleKmersStage doing the actual sampling work. // log_debug(stderr, " assemble pipeline\n"); // build the input stage InputStageData input_stage_data( read_data_file.get(), max_block_strings, max_block_bps ); InputStage input_stage[16]; // build the sink SampleKmersStage sample_stage[16]; SequenceStats sample_stats; // setup the pipeline stages if (cpu) { // host stages input_stage[device_count] = InputStage( &input_stage_data ); sample_stage[device_count] = SampleKmersStage( -threads, k, alpha, sampled_kmers_bf_words * bits_per_word, raw_pointer( h_bloom_filters.sampled_kmers_storage ), &sample_stats ); } for (uint32 i = 0; i < device_count; ++i) { // device stages input_stage[i] = InputStage( &input_stage_data ); sample_stage[i] = SampleKmersStage( devices[i], k, alpha, sampled_kmers_bf_words * bits_per_word, raw_pointer( d_bloom_filters[i].sampled_kmers_storage ), &sample_stats ); } // build the pipeline nvbio::Pipeline pipeline; for (uint32 i = 0; i < device_count + (cpu ? 1 : 0); ++i) { const uint32 in0 = pipeline.append_stage( &input_stage[i], 4u ); const uint32 out = pipeline.append_sink( &sample_stage[i] ); pipeline.add_dependency( in0, out ); } log_debug(stderr, " start pipeline\n"); Timer timer; timer.start(); // and run it! pipeline.run(); log_info_cont(stderr, "\n"); merge( h_bloom_filters_ptr, device_count, d_bloom_filters, SAMPLED_KMERS ); timer.stop(); const float time = timer.seconds(); log_verbose(stderr," total time : %.1fs\n", time); log_verbose(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); } log_info(stderr," sample kmers... done\n"); log_info(stderr," mark trusted kmers... started\n"); { // // The following code implements a parallel nvbio::Pipeline to mark trusted kmers in the input // reads. The pipeline is composed several subpipelines, one per active device, each made // of two stages (which will be run in separate threads): an InputStage, and a // TrustedKmersStage doing the actual marking work. // // gather Bloom filter statistics nvbio::vector<host_tag,uint32> threshold( 100, 0u ); { double untrustF[100][100]; // compute the number of bits set float occupancy; float approx_size; float FP; if (device_count) { compute_bloom_filter_stats( d_bloom_filters[0], SAMPLED_KMERS, SAMPLED_KMERS_FILTER_K, occupancy, approx_size, FP ); } else { compute_bloom_filter_stats( h_bloom_filters, SAMPLED_KMERS, SAMPLED_KMERS_FILTER_K, occupancy, approx_size, FP ); } log_stats(stderr, " sampled kmers:\n" ); log_stats(stderr, " occupancy : %f\n", occupancy ); log_stats(stderr, " #kmers (approx) : %.1f\n", approx_size ); log_stats(stderr, " FP rate : %f\n", FP ); // compute the i-th untrustF table for (uint32 i = 1; i <= k; ++i) { int d = (int)(0.1 / alpha * 2); if (d < 2) d = 2; const double p = 1.0 - pow( (1.0 - alpha), d ); cumulative_binomial_distribution( untrustF[i], i, p + FP - p * FP ); } // compute the threshold table for (uint32 i = 1; i <= k; ++i) { for (uint32 j = 0; j <= i; ++j) { if (untrustF[i][j] >= 1 - 0.5 * 1e-2) { threshold[i] = j; break; } } } log_verbose(stderr, " thresholds = {"); for (uint32 i = 1; i <= k; ++i) log_verbose_cont(stderr, " %u,", threshold[i]); log_verbose_cont(stderr, " }\n"); } h_bloom_filters.set_threshold( threshold ); for (uint32 i = 0; i < device_count; ++i) d_bloom_filters[i].set_threshold( threshold ); log_debug(stderr, " rewind reads\n"); if (read_data_file->rewind() == false) { log_error(stderr, " failed rewinding file \"%s\"\n", reads_name); return 1; } log_debug(stderr, " assemble pipeline\n"); // build the input stages InputStageData input_stage_data( read_data_file.get(), max_block_strings, max_block_bps ); InputStage input_stage[16]; // build the trusted-kmer marking stages SequenceStats marking_stats; TrustedKmersStage marking_stage[16]; // setup the pipeline stages if (cpu) { // host stages input_stage[device_count] = InputStage( &input_stage_data ); marking_stage[device_count] = TrustedKmersStage( -threads, k, sampled_kmers_bf_words * bits_per_word, raw_pointer( h_bloom_filters.sampled_kmers_storage ), trusted_kmers_bf_words * bits_per_word, raw_pointer( h_bloom_filters.trusted_kmers_storage ), raw_pointer( h_bloom_filters.threshold ), &marking_stats ); } for (uint32 i = 0; i < device_count; ++i) { // device stages input_stage[i] = InputStage( &input_stage_data ); marking_stage[i] = TrustedKmersStage( devices[i], k, sampled_kmers_bf_words * bits_per_word, raw_pointer( d_bloom_filters[i].sampled_kmers_storage ), trusted_kmers_bf_words * bits_per_word, raw_pointer( d_bloom_filters[i].trusted_kmers_storage ), raw_pointer( d_bloom_filters[i].threshold ), &marking_stats ); } // build the pipeline nvbio::Pipeline pipeline; for (uint32 i = 0; i < device_count + (cpu ? 1 : 0); ++i) { const uint32 in0 = pipeline.append_stage( &input_stage[i], 4u ); const uint32 out = pipeline.append_sink( &marking_stage[i] ); pipeline.add_dependency( in0, out ); } log_debug(stderr, " start pipeline\n"); Timer timer; timer.start(); // and run it! pipeline.run(); log_info_cont(stderr, "\n"); merge( h_bloom_filters_ptr, device_count, d_bloom_filters, TRUSTED_KMERS ); timer.stop(); const float time = timer.seconds(); log_verbose(stderr," total time : %.1fs\n", time); log_verbose(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); } log_info(stderr," mark trusted kmers... done\n"); log_info(stderr," error correction... started\n"); { // // The following code implements a parallel nvbio::Pipeline to error-correct the input // reads. The pipeline is composed several subpipelines, one per active device, each made // of three stages (which will be run in separate threads): an InputStage, an // ErrorCorrectStage doing the actual error correction work, and a final OutputStage. // // gather Bloom filter statistics { // compute the number of bits set float occupancy; float approx_size; float FP; if (device_count) { compute_bloom_filter_stats( d_bloom_filters[0], TRUSTED_KMERS, TRUSTED_KMERS_FILTER_K, occupancy, approx_size, FP ); } else { compute_bloom_filter_stats( h_bloom_filters, TRUSTED_KMERS, TRUSTED_KMERS_FILTER_K, occupancy, approx_size, FP ); } log_stats(stderr, " trusted kmers:\n" ); log_stats(stderr, " occupancy : %f\n", occupancy ); log_stats(stderr, " #kmers (approx) : %.1f\n", approx_size ); log_stats(stderr, " FP rate : %f\n", FP ); } log_debug(stderr, " rewind reads\n"); if (read_data_file->rewind() == false) { log_error(stderr, " failed rewinding file \"%s\"\n", reads_name); return 1; } log_debug(stderr, " assemble pipeline\n"); // build the input stages InputStageData input_stage_data( read_data_file.get(), max_block_strings, max_block_bps ); InputStage input_stage[16]; // build the error correction stages SequenceStats ec_stats; ErrorCorrectStage ec_stage[16]; // build the output stages OutputStageData output_stage_data( output_file.get() ); OutputStage output_stage[16]; // setup the pipeline stages if (cpu) { // build the input stage input_stage[device_count] = InputStage( &input_stage_data ); // build the sink ec_stage[device_count] = ErrorCorrectStage( -threads, k, trusted_kmers_bf_words * bits_per_word, raw_pointer( h_bloom_filters.trusted_kmers_storage ), raw_pointer( h_bloom_filters.stats ), max_correction, bad_quality, new_quality, &ec_stats ); // build the input stage output_stage[device_count] = OutputStage( &output_stage_data ); } for (uint32 i = 0; i < device_count; ++i) { // build the input stage input_stage[i] = InputStage( &input_stage_data ); // build the sink ec_stage[i] = ErrorCorrectStage( devices[i], k, trusted_kmers_bf_words * bits_per_word, raw_pointer( d_bloom_filters[i].trusted_kmers_storage ), raw_pointer( d_bloom_filters[i].stats ), max_correction, bad_quality, new_quality, &ec_stats ); // build the input stage output_stage[i] = OutputStage( &output_stage_data ); } log_debug(stderr, " start pipeline\n"); // build the pipeline nvbio::Pipeline pipeline; for (uint32 i = 0; i < device_count + (cpu ? 1 : 0); ++i) { const uint32 in = pipeline.append_stage( &input_stage[i], 4u ); const uint32 ec = pipeline.append_stage( &ec_stage[i] ); const uint32 out = pipeline.append_sink( &output_stage[i] ); pipeline.add_dependency( in, ec ); pipeline.add_dependency( ec, out ); } Timer timer; timer.start(); // and run it! pipeline.run(); timer.stop(); const float time = timer.seconds(); log_info_cont(stderr, "\n"); nvbio::vector<host_tag,uint64> stats; merged_stats( h_bloom_filters_ptr, device_count, d_bloom_filters, stats ); log_info(stderr," stats :\n"); log_info(stderr," error free reads : %llu\n", uint64( stats[ERROR_FREE] )); log_info(stderr," unfixable reads : %llu\n", uint64( stats[UNFIXABLE] )); log_info(stderr," corrected bases : %llu\n", uint64( stats[CORRECTIONS] )); log_verbose(stderr," total time : %.1fs\n", time); log_verbose(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); } log_info(stderr," error correction... done\n"); log_visible(stderr,"nvLighter... done\n"); } catch (nvbio::cuda_error e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::bad_alloc e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::logic_error e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::runtime_error e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (thrust::system::system_error e) { log_error(stderr, "caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::bad_alloc e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::logic_error e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::runtime_error e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (...) { log_error(stderr, "caught an unknown exception!\n"); return 1; } return 0; }
the_stack
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace csr_multiply_sm35 { #include <amgx_types/util.h> #include <sm_utils.inl> #include <hash_containers_sm35.inl> // Included inside the namespace to solve name colisions. /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void flag_halo_rows(int *row_ids, int size, int *flagArray, int neighbor, int global_id) { for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < size; tidx += blockDim.x * gridDim.x) { int row_id = row_ids[tidx]; flagArray[row_id] = tidx; } } __device__ __forceinline__ int get_work( int *queue, int warp_id ) { int offset = -1; if ( utils::lane_id() == 0 ) { offset = atomicAdd( queue, 1 ); } return utils::shfl( offset, 0 ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool COUNT_ONLY > __global__ __launch_bounds__( CTA_SIZE ) void count_non_zeroes_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *B_rows, const int *B_cols, int *C_rows, int *C_cols, int *Aq1, int *Bq1, int *Aq2, int *Bq2, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // The hash keys stored in shared memory. __shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { int c_row_id = a_row_id; if (Aq1 != NULL) { a_row_id = Aq1[a_row_id]; } // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(a_col_it < a_col_end) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id = -1; if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); //b_row_id is actually column of A if (Aq2 != NULL) { b_row_id = Aq2[b_row_id]; } if (Bq1 != NULL) { b_row_id = Bq1[b_row_id]; } } // The number of valid rows. const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; ++k ) { // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_b_row_id = utils::shfl( b_row_id, k ); // Load the range of the row of B. int b_col_tmp = -1; if ( lane_id < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &B_rows[uniform_b_row_id + lane_id] ); } int b_col_it = utils::shfl( b_col_tmp, 0 ); int b_col_end = utils::shfl( b_col_tmp, 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id ; utils::any(b_col_it < b_col_end) ; b_col_it += WARP_SIZE ) { int b_col_id = -1; if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &B_cols[b_col_it] ); // b_col_id is actually column of B if (Bq2 != NULL) { b_col_id = Bq2[b_col_id]; } } set.insert( b_col_id, COUNT_ONLY ? wk_status : NULL ); } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size(); if ( lane_id == 0 ) { C_rows[c_row_id] = count; } } else { int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[c_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } set.store( count, &C_cols[c_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool COUNT_ONLY > __global__ __launch_bounds__( CTA_SIZE ) void count_non_zeroes_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict B_rows, const int *__restrict B_cols, int *__restrict C_rows, int *__restrict C_cols, int *Aq1, int *Bq1, int *Aq2, int *Bq2, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ /*volatile*/ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { int c_row_id = a_row_id; if (Aq1 != NULL) { a_row_id = Aq1[a_row_id]; } // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(a_col_it < a_col_end) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id = -1; if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); //b_row_id is actually column of A if (Aq2 != NULL) { b_row_id = Aq2[b_row_id]; } if (Bq1 != NULL) { b_row_id = Bq1[b_row_id]; } } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_b_row_id = utils::shfl( b_row_id, local_k ); // Load the range of the row of B. int b_col_tmp = -1; if ( is_active_k && lane_id_mod_num_threads < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &B_rows[uniform_b_row_id + lane_id_mod_num_threads] ); } int b_col_it = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 0 ); int b_col_end = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id_mod_num_threads ; utils::any(b_col_it < b_col_end) ; b_col_it += NUM_THREADS_PER_ROW ) { int b_col_id = -1; if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &B_cols[b_col_it] ); // b_col_id is actually column of B if (Bq2 != NULL) { b_col_id = Bq2[b_col_id]; } } set.insert( b_col_id, COUNT_ONLY ? wk_status : NULL ); } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size_with_duplicates(); if ( lane_id == 0 ) { C_rows[c_row_id] = count; } } else { int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[c_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } set.store( count, &C_cols[c_col_it] ); } } } template <int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE, bool COUNT_ONLY > __device__ __forceinline__ void sparse_add_process_row(int row_id, const int *__restrict__ row_offsets, const int *__restrict__ col_indices, int lane_id, Hash_set<int, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE> &set, int *wk_status, int global_id, bool print_flag) { // Load the range of the row of RAP_int int col_tmp = -1; if ( lane_id < 2 ) { col_tmp = utils::Ld<utils::LD_NC>::load( &row_offsets[row_id + lane_id] ); } int col_it = utils::shfl( col_tmp, 0 ); int col_end = utils::shfl( col_tmp, 1 ); // Iterate over the columns of RAP_int for ( col_it += lane_id ; utils::any(col_it < col_end) ; col_it += WARP_SIZE ) { int col_id = -1; if ( col_it < col_end ) { col_id = utils::Ld<utils::LD_NC>::load( &col_indices[col_it] ); } set.insert( col_id, COUNT_ONLY ? wk_status : NULL ); } } template <typename Value_type, int SMEM_SIZE, int NUM_HASH_FCTS, int WARP_SIZE> __device__ __forceinline__ void sparse_add_process_row_values(int row_id, const int *__restrict__ row_offsets, const int *__restrict__ col_indices, const Value_type *__restrict vals, int lane_id, Hash_map<int, Value_type, SMEM_SIZE, NUM_HASH_FCTS, WARP_SIZE> &map, int *wk_status) { // Load the range of the row. int col_tmp = -1; if ( lane_id < 2 ) { col_tmp = utils::Ld<utils::LD_NC>::load( &row_offsets[row_id + lane_id] ); } int col_it = utils::shfl( col_tmp, 0 ); int col_end = utils::shfl( col_tmp, 1 ); // Iterate over the columns of A. for ( col_it += lane_id ; utils::any(col_it < col_end) ; col_it += WARP_SIZE ) { const bool is_active = col_it < col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int col_id(-1); Value_type value = amgx::types::util<Value_type>::get_zero(); if ( is_active ) { col_id = utils::Ld<utils::LD_NC>::load( &col_indices[col_it] ); value = utils::Ld<utils::LD_NC>::load( &vals[col_it] ); } map.insert_with_duplicates( col_id, value, wk_status ); } } template<int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool COUNT_ONLY > __global__ __launch_bounds__( CTA_SIZE ) void count_non_zeroes_RAP_ext_kernel( const int RAP_int_num_rows, const int *__restrict RAP_int_rows, const int *__restrict RAP_int_cols, int **RAP_ext_row_ptrs, int **RAP_ext_col_ptrs, int *__restrict RAP_rows, int *__restrict RAP_cols, int **flagArray_ptrs, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status, int num_neighbors, int global_id ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // The hash keys stored in shared memory. __shared__ /*volatile*/ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // First threads load the row IDs of A needed by the CTA... int rap_int_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[rap_int_row_id * gmem_size], gmem_size ); // Loop over rows of RAP_ext for ( ; rap_int_row_id < RAP_int_num_rows; rap_int_row_id = get_work( wk_work_queue, warp_id ) ) { // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // --------------------------------- // First process RAP_int // --------------------------------- bool print_flag = false; if (rap_int_row_id == 3 && global_id == 0) { print_flag = true; } sparse_add_process_row<SMEM_SIZE, 4, WARP_SIZE, COUNT_ONLY>(rap_int_row_id, RAP_int_rows, RAP_int_cols, lane_id, set, wk_status, global_id, print_flag); // --------------------------------- // Then process RAP_ext // --------------------------------- for (int i = 0; i < num_neighbors; i++) { int flag = flagArray_ptrs[i][rap_int_row_id]; if (flag != -1) { int *RAP_ext_rows = RAP_ext_row_ptrs[i]; int *RAP_ext_cols = RAP_ext_col_ptrs[i]; int pos_in_row_ext = flag; sparse_add_process_row<SMEM_SIZE, 4, WARP_SIZE, COUNT_ONLY>(pos_in_row_ext, RAP_ext_rows, RAP_ext_cols, lane_id, set, wk_status, global_id, print_flag); } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size_with_duplicates(); if ( lane_id == 0 ) { RAP_rows[rap_int_row_id] = count; } } else { int rap_col_tmp = -1; if ( lane_id < 2 ) { rap_col_tmp = utils::Ld<utils::LD_NC>::load( &RAP_rows[rap_int_row_id + lane_id] ); } int rap_col_it = utils::shfl( rap_col_tmp, 0 ); int rap_col_end = utils::shfl( rap_col_tmp, 1 ); // Store the results. int count = rap_col_end - rap_col_it; if ( count == 0 ) { continue; } set.store( count, &RAP_cols[rap_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct Without_external_diag { static __device__ __forceinline__ bool is_active ( int a_col_it, int a_col_end ) { return a_col_it < a_col_end; } static __device__ __forceinline__ bool is_boundary( int a_col_it, int a_col_end ) { return false; } }; // ==================================================================================================================== struct With_external_diag { static __device__ __forceinline__ bool is_active ( int a_col_it, int a_col_end ) { return a_col_it <= a_col_end; } static __device__ __forceinline__ bool is_boundary( int a_col_it, int a_col_end ) { return a_col_it == a_col_end; } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool COUNT_ONLY, typename Diag_traits > __global__ __launch_bounds__( CTA_SIZE ) void count_non_zeroes_ilu1_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_coloring, int *__restrict C_rows, int *__restrict C_cols, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Tables to broadcast values. __shared__ volatile int s_b_rows[CTA_SIZE], s_b_colors[CTA_SIZE]; // The hash keys stored in shared memory. __shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // The color of the row. int a_row_color = A_coloring[a_row_id]; // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(Diag_traits::is_active(a_col_it, a_col_end)) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id = -1; if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); } if ( Diag_traits::is_boundary(a_col_it, a_col_end) ) { b_row_id = a_row_id; } // Push the columns in the set. set.insert( b_row_id, COUNT_ONLY ? wk_status : NULL ); // Skip computation if the color of the row is 0. if ( a_row_color != 0 ) { // Gather the colors of the columns. int b_row_color = -1; if ( is_active ) { b_row_color = A_coloring[b_row_id]; } // The number of valid rows. int pred = is_active && b_row_color < a_row_color; int vote = utils::ballot( pred ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( pred ) { s_b_rows [warp_id * WARP_SIZE + dest] = b_row_id; s_b_colors[warp_id * WARP_SIZE + dest] = b_row_color; // TODO: store an int2 rather than 2 ints of SM35 with 64bit banks. } const int num_rows = __popc( vote ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; ++k ) { // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_b_row_id = s_b_rows [warp_id * WARP_SIZE + k]; const int uniform_b_color = s_b_colors[warp_id * WARP_SIZE + k]; // Load the range of the row of B. int b_col_tmp = -1; if ( lane_id < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[uniform_b_row_id + lane_id] ); } int b_col_it = utils::shfl( b_col_tmp, 0 ); int b_col_end = utils::shfl( b_col_tmp, 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id ; utils::any(b_col_it < b_col_end) ; b_col_it += WARP_SIZE ) { int b_col_id = -1, b_col_color = -1; if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &A_cols[b_col_it] ); b_col_color = utils::Ld<utils::LD_NC>::load( &A_coloring[b_col_id] ); } int item = -1; if ( b_col_color >= uniform_b_color && b_col_color != a_row_color ) { item = b_col_id; } set.insert( item, COUNT_ONLY ? wk_status : NULL ); } } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size(); if ( lane_id == 0 ) { C_rows[a_row_id] = count; } } else { int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[a_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } set.store( count, &C_cols[c_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool COUNT_ONLY, typename Diag_traits > __global__ __launch_bounds__( CTA_SIZE ) void count_non_zeroes_ilu1_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_coloring, int *__restrict C_rows, int *__restrict C_cols, const int gmem_size, int *g_keys, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // Tables to broadcast values. __shared__ volatile int s_b_rows[CTA_SIZE], s_b_colors[CTA_SIZE]; // The hash keys stored in shared memory. __shared__ int s_keys[NUM_WARPS * SMEM_SIZE]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { // Make sure we have to proceed. if ( COUNT_ONLY ) { volatile int *status = reinterpret_cast<volatile int *>( wk_status ); if ( set.has_failed() || *status != 0 ) { return; } } // Clear the set. set.clear(); // The color of the row. int a_row_color = A_coloring[a_row_id]; // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(Diag_traits::is_active(a_col_it, a_col_end)) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id = -1; if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); } if ( Diag_traits::is_boundary(a_col_it, a_col_end) ) { b_row_id = a_row_id; } // Push the columns in the set. set.insert( b_row_id, COUNT_ONLY ? wk_status : NULL ); // Skip computation if the color of the row is 0. if ( a_row_color != 0 ) { // Gather the colors of the columns. int b_row_color = -1; if ( is_active ) { b_row_color = A_coloring[b_row_id]; } // The number of valid rows. int pred = is_active && b_row_color < a_row_color; int vote = utils::ballot( pred ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( pred ) { s_b_rows [warp_id * WARP_SIZE + dest] = b_row_id; s_b_colors[warp_id * WARP_SIZE + dest] = b_row_color; } const int num_rows = __popc( vote ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). int uniform_b_row_id = -1, uniform_b_color = -1; if ( is_active_k ) { uniform_b_row_id = s_b_rows [warp_id * WARP_SIZE + local_k]; uniform_b_color = s_b_colors[warp_id * WARP_SIZE + local_k]; } // Load the range of the row of B. int b_col_tmp = -1; if ( is_active_k && lane_id_mod_num_threads < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[uniform_b_row_id + lane_id_mod_num_threads] ); } int b_col_it = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 0 ); int b_col_end = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id_mod_num_threads ; utils::any(b_col_it < b_col_end) ; b_col_it += NUM_THREADS_PER_ROW ) { int b_col_id = -1, b_col_color = -1; if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &A_cols[b_col_it] ); b_col_color = utils::Ld<utils::LD_NC>::load( &A_coloring[b_col_id] ); } int item = -1; if ( b_col_color >= uniform_b_color && b_col_color != a_row_color ) { item = b_col_id; } set.insert( item, COUNT_ONLY ? wk_status : NULL ); } } } } // Store the results. if ( COUNT_ONLY ) { int count = set.compute_size_with_duplicates(); if ( lane_id == 0 ) { C_rows[a_row_id] = count; } } else { int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[a_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } set.store( count, &C_cols[c_col_it] ); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE > __global__ __launch_bounds__( CTA_SIZE, 6 ) void compute_values_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const Value_type *__restrict A_vals, const int *__restrict B_rows, const int *__restrict B_cols, const Value_type *__restrict B_vals, const int *__restrict C_rows, int *__restrict C_cols, Value_type *__restrict C_vals, int *Aq1, int *Bq1, int *Aq2, int *Bq2, const int gmem_size, int *g_keys, Value_type *g_vals, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / 32; // The hash keys stored in shared memory. __shared__ /*volatile*/ int s_keys[NUM_WARPS * SMEM_SIZE]; // The hash values stored in shared memory. __shared__ volatile Word s_vote[NUM_WARPS * SMEM_SIZE / 4]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], &s_vote[warp_id * SMEM_SIZE / 4], &g_vals[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { int c_row_id = a_row_id; if (Aq1 != NULL) { a_row_id = Aq1[a_row_id]; } // Clear the map. map.clear(); // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(a_col_it < a_col_end) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id = -1; Value_type a_value = amgx::types::util<Value_type>::get_zero(); if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); a_value = utils::Ld<utils::LD_NC>::load( &A_vals[a_col_it] ); //b_row_id is actually column of A if (Aq2 != NULL) { b_row_id = Aq2[b_row_id]; } if (Bq1 != NULL) { b_row_id = Bq1[b_row_id]; } } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; ++k ) { // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_b_row_id = utils::shfl( b_row_id, k ); // The value of A. const Value_type uniform_a_value = utils::shfl( a_value, k ); // Load the range of the row of B. int b_col_tmp = -1; if ( lane_id < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &B_rows[uniform_b_row_id + lane_id] ); } int b_col_it = utils::shfl( b_col_tmp, 0 ); int b_col_end = utils::shfl( b_col_tmp, 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id ; utils::any(b_col_it < b_col_end) ; b_col_it += WARP_SIZE ) { int b_col_id = -1; Value_type b_value = amgx::types::util<Value_type>::get_zero(); if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &B_cols[b_col_it] ); b_value = utils::Ld<utils::LD_NC>::load( &B_vals[b_col_it] ); if (Bq2 != NULL) { b_col_id = Bq2[b_col_id]; } } map.insert( b_col_id, uniform_a_value, b_value, wk_status ); } } } // Store the results. int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[c_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } map.store( count, &C_cols[c_col_it], &C_vals[c_col_it] ); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_ROW, typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE > __global__ __launch_bounds__( CTA_SIZE, 6 ) void compute_values_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const Value_type *__restrict A_vals, const int *__restrict B_rows, const int *__restrict B_cols, const Value_type *__restrict B_vals, const int *__restrict C_rows, int *__restrict C_cols, Value_type *__restrict C_vals, int *Aq1, int *Bq1, int *Aq2, int *Bq2, const int gmem_size, int *g_keys, Value_type *g_vals, int *wk_work_queue, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW; // The hash keys stored in shared memory. __shared__ /*volatile*/ int s_keys[NUM_WARPS * SMEM_SIZE]; // The hash values stored in shared memory. __shared__ volatile Word s_vote[NUM_WARPS * SMEM_SIZE / 4]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // Constants. const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE], &g_keys[a_row_id * gmem_size], &s_vote[warp_id * SMEM_SIZE / 4], &g_vals[a_row_id * gmem_size], gmem_size ); // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id = get_work( wk_work_queue, warp_id ) ) { int c_row_id = a_row_id; if (Aq1 != NULL) { a_row_id = Aq1[a_row_id]; } // Clear the map. map.clear_all(); // Load the range of the row. int a_col_tmp = -1; if ( lane_id < 2 ) { a_col_tmp = utils::Ld<utils::LD_NC>::load( &A_rows[a_row_id + lane_id] ); } int a_col_it = utils::shfl( a_col_tmp, 0 ); int a_col_end = utils::shfl( a_col_tmp, 1 ); // Iterate over the columns of A. for ( a_col_it += lane_id ; utils::any(a_col_it < a_col_end) ; a_col_it += WARP_SIZE ) { // Is it an active thread. const bool is_active = a_col_it < a_col_end; // Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID. int b_row_id(-1); Value_type a_value = amgx::types::util<Value_type>::get_zero(); if ( is_active ) { b_row_id = utils::Ld<utils::LD_NC>::load( &A_cols[a_col_it] ); a_value = utils::Ld<utils::LD_NC>::load( &A_vals[a_col_it] ); //b_row_id is actually column of A if (Aq2 != NULL) { b_row_id = Aq2[b_row_id]; } if (Bq1 != NULL) { b_row_id = Bq1[b_row_id]; } } const int num_rows = __popc( utils::ballot(is_active) ); // Uniform loop: threads collaborate to load other elements. for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS ) { int local_k = k + lane_id_div_num_threads; // Is it an active thread. bool is_active_k = local_k < num_rows; // Threads in the warp proceeds columns of B in the range [bColIt, bColEnd). const int uniform_b_row_id = utils::shfl( b_row_id, k + lane_id_div_num_threads ); // The value of A. const Value_type uniform_a_value = utils::shfl( a_value, k + lane_id_div_num_threads ); // Load the range of the row of B. int b_col_tmp = -1; if ( is_active_k && lane_id_mod_num_threads < 2 ) { b_col_tmp = utils::Ld<utils::LD_NC>::load( &B_rows[uniform_b_row_id + lane_id_mod_num_threads] ); } int b_col_it = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 0 ); int b_col_end = utils::shfl( b_col_tmp, lane_id_div_num_threads * NUM_THREADS_PER_ROW + 1 ); // Iterate over the range of columns of B. for ( b_col_it += lane_id_mod_num_threads ; utils::any(b_col_it < b_col_end) ; b_col_it += NUM_THREADS_PER_ROW ) { int b_col_id(-1); Value_type b_value = amgx::types::util<Value_type>::get_zero(); if ( b_col_it < b_col_end ) { b_col_id = utils::Ld<utils::LD_NC>::load( &B_cols[b_col_it] ); b_value = utils::Ld<utils::LD_NC>::load( &B_vals[b_col_it] ); //b_col_id is actually column of B if (Bq2 != NULL) { b_col_id = Bq2[b_col_id]; } } map.insert_with_duplicates( b_col_id, uniform_a_value * b_value, wk_status ); } } } // Store the results. int c_col_tmp = -1; if ( lane_id < 2 ) { c_col_tmp = utils::Ld<utils::LD_NC>::load( &C_rows[c_row_id + lane_id] ); } int c_col_it = utils::shfl( c_col_tmp, 0 ); int c_col_end = utils::shfl( c_col_tmp, 1 ); // Store the results. int count = c_col_end - c_col_it; if ( count == 0 ) { continue; } map.store( count, &C_cols[c_col_it], &C_vals[c_col_it] ); } } template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE > __global__ __launch_bounds__( CTA_SIZE, 6 ) void compute_values_RAP_ext_kernel( const int RAP_int_num_rows, const int *__restrict RAP_int_rows, const int *__restrict RAP_int_cols, const Value_type *__restrict RAP_int_vals, int **RAP_ext_row_ptrs, int **RAP_ext_col_ptrs, Value_type **RAP_ext_val_ptrs, int *__restrict RAP_rows, int *__restrict RAP_cols, Value_type *__restrict RAP_vals, int **flagArray_ptrs, const int gmem_size, int *g_keys, Value_type *g_vals, int *wk_work_queue, int num_neighbors, int *wk_status ) { const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // The hash keys stored in shared memory. __shared__ /*volatile*/ int s_keys[NUM_WARPS * SMEM_SIZE]; // The hash values stored in shared memory. __shared__ volatile Word s_vote[NUM_WARPS * SMEM_SIZE / 4]; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id( ); const int lane_id = utils::lane_id( ); // First threads load the row IDs of A needed by the CTA... int rap_int_row_id = blockIdx.x * NUM_WARPS + warp_id; // Create local storage for the set. Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE], &g_keys[rap_int_row_id * gmem_size], &s_vote[warp_id * SMEM_SIZE / 4], &g_vals[rap_int_row_id * gmem_size], gmem_size ); // Loop over rows of RAP_ext for ( ; rap_int_row_id < RAP_int_num_rows ; rap_int_row_id = get_work( wk_work_queue, warp_id ) ) { // Clear the map. map.clear_all(); // --------------------------------- // First process RAP_int // --------------------------------- sparse_add_process_row_values<Value_type, SMEM_SIZE, 4, WARP_SIZE>(rap_int_row_id, RAP_int_rows, RAP_int_cols, RAP_int_vals, lane_id, map, wk_status); // --------------------------------- // Then process RAP_ext // --------------------------------- for (int i = 0; i < num_neighbors; i++) { int flag = flagArray_ptrs[i][rap_int_row_id]; if (flag != -1) { int *RAP_ext_rows = RAP_ext_row_ptrs[i]; int *RAP_ext_cols = RAP_ext_col_ptrs[i]; Value_type *RAP_ext_vals = RAP_ext_val_ptrs[i]; int pos_in_row_ext = flag; sparse_add_process_row_values<Value_type, SMEM_SIZE, 4, WARP_SIZE>(pos_in_row_ext, RAP_ext_rows, RAP_ext_cols, RAP_ext_vals, lane_id, map, wk_status); } } // Store the results. int rap_col_tmp = -1; if ( lane_id < 2 ) { rap_col_tmp = utils::Ld<utils::LD_NC>::load( &RAP_rows[rap_int_row_id + lane_id] ); } int rap_col_it = utils::shfl( rap_col_tmp, 0 ); int rap_col_end = utils::shfl( rap_col_tmp, 1 ); // Store the results. int count = rap_col_end - rap_col_it; if ( count == 0 ) { continue; } map.store( count, &RAP_cols[rap_col_it], &RAP_vals[rap_col_it] ); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace csr_multiply_sm35 /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace amgx { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum { WARP_SIZE = 32, GRID_SIZE = 128, SMEM_SIZE = 128 }; // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::CSR_Multiply_Sm35( bool allocate_values, int grid_size, int max_warp_count, int gmem_size ) : Base(allocate_values, grid_size, max_warp_count, gmem_size) {} // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::count_non_zeroes( const Matrix_d &A, const Matrix_d &B, Matrix_d &C, IVector *Aq1, IVector *Bq1, IVector *Aq2, IVector *Bq2 ) { const int CTA_SIZE = 256; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Compute non-zero elements. switch ( this->m_num_threads_per_row_count ) { case 2: csr_multiply_sm35::count_non_zeroes_kernel< 2, CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), NULL, (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); break; case 4: csr_multiply_sm35::count_non_zeroes_kernel< 4, CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), NULL, (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); break; case 8: csr_multiply_sm35::count_non_zeroes_kernel< 8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), NULL, (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); break; case 16: csr_multiply_sm35::count_non_zeroes_kernel<16, CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), NULL, (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); break; default: csr_multiply_sm35::count_non_zeroes_kernel<CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), NULL, (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); } cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::count_non_zeroes_RAP_sparse_add( Matrix_d &RAP, const Matrix_d &RAP_int, std::vector<IVector> &RAP_ext_row_offsets, std::vector<IVector> &RAP_ext_col_indices, std::vector<MVector> &RAP_ext_values, std::vector<IVector> &RAP_ext_row_ids) { const int CTA_SIZE = 256; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // This is num_owned_coarse_rows int RAP_size = RAP.get_num_rows(); int RAP_int_size = RAP_int.row_offsets.size() - 1; if (RAP_int_size < RAP_size) { FatalError("RAP_int has less rows than RAP, need to modify sparse RAP add to handle that case\n", AMGX_ERR_NOT_IMPLEMENTED); } // Create a device vector of the raw pointers to the array // RAP_ext_row_ids_ptrs // RAP_ext_row_offsets_ptrs // RAP_ext_col_indices_ptrs // RAP_ext_values_ptrs int num_neighbors = RAP_ext_row_offsets.size(); std::vector<IVector> flagArray(num_neighbors); for (int i = 0; i < num_neighbors; i++) { flagArray[i].resize(RAP_size); thrust::fill(flagArray[i].begin(), flagArray[i].end(), -1); } cudaCheckError(); std::vector<int *> flagArray_ptrs_h(num_neighbors); std::vector<int *> RAP_ext_row_offsets_ptrs_h(num_neighbors); std::vector<int *> RAP_ext_col_indices_ptrs_h(num_neighbors); for (int i = 0; i < num_neighbors; i++) { flagArray_ptrs_h[i] = thrust::raw_pointer_cast(&flagArray[i][0]); RAP_ext_row_offsets_ptrs_h[i] = thrust::raw_pointer_cast(&RAP_ext_row_offsets[i][0]); RAP_ext_col_indices_ptrs_h[i] = thrust::raw_pointer_cast(&RAP_ext_col_indices[i][0]); } device_vector_alloc<int *> flagArray_ptrs = flagArray_ptrs_h; device_vector_alloc<int *> RAP_ext_row_offsets_ptrs = RAP_ext_row_offsets_ptrs_h; device_vector_alloc<int *> RAP_ext_col_indices_ptrs = RAP_ext_col_indices_ptrs_h; for (int i = 0; i < num_neighbors; i++) { int size = RAP_ext_row_ids[i].size(); if (size != 0) { int num_blocks = min(4096, (size + 127) / 128); //write the position in RAP_ext_row_ids csr_multiply_sm35::flag_halo_rows <<< num_blocks, 128>>>( RAP_ext_row_ids[i].raw(), size, flagArray[i].raw(), i, RAP.manager->global_id()); } } csr_multiply_sm35::count_non_zeroes_RAP_ext_kernel<CTA_SIZE, SMEM_SIZE, WARP_SIZE, true> <<< GRID_SIZE, CTA_SIZE>>>( RAP_size, RAP_int.row_offsets.raw(), RAP_int.col_indices.raw(), thrust::raw_pointer_cast(&RAP_ext_row_offsets_ptrs[0]), thrust::raw_pointer_cast(&RAP_ext_col_indices_ptrs[0]), RAP.row_offsets.raw(), (int *) NULL, thrust::raw_pointer_cast(&flagArray_ptrs[0]), this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status, num_neighbors, RAP.manager->global_id() ); cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); } // ==================================================================================================================== template< int CTA_SIZE, bool COUNT_ONLY, typename Diag_traits, typename Matrix > static void count_non_zeroes_ilu1_dispatch( const Matrix &A, Matrix &B, int num_threads_per_row_count, int gmem_size, int *keys, int *work_queue, int *status ) { switch ( num_threads_per_row_count ) { case 2: csr_multiply_sm35::count_non_zeroes_ilu1_kernel< 2, CTA_SIZE, SMEM_SIZE, WARP_SIZE, COUNT_ONLY, Diag_traits> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &A.getMatrixColoring().getRowColors()[0] ), B.row_offsets.raw(), B.col_indices.raw(), gmem_size, keys, work_queue, status ); break; case 4: csr_multiply_sm35::count_non_zeroes_ilu1_kernel< 4, CTA_SIZE, SMEM_SIZE, WARP_SIZE, COUNT_ONLY, Diag_traits> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &A.getMatrixColoring().getRowColors()[0] ), B.row_offsets.raw(), B.col_indices.raw(), gmem_size, keys, work_queue, status ); break; case 8: csr_multiply_sm35::count_non_zeroes_ilu1_kernel< 8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, COUNT_ONLY, Diag_traits> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &A.getMatrixColoring().getRowColors()[0] ), B.row_offsets.raw(), B.col_indices.raw(), gmem_size, keys, work_queue, status ); break; case 16: csr_multiply_sm35::count_non_zeroes_ilu1_kernel<16, CTA_SIZE, SMEM_SIZE, WARP_SIZE, COUNT_ONLY, Diag_traits> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &A.getMatrixColoring().getRowColors()[0] ), B.row_offsets.raw(), B.col_indices.raw(), gmem_size, keys, work_queue, status ); break; default: csr_multiply_sm35::count_non_zeroes_ilu1_kernel<CTA_SIZE, SMEM_SIZE, WARP_SIZE, COUNT_ONLY, Diag_traits> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &A.getMatrixColoring().getRowColors()[0] ), B.row_offsets.raw(), B.col_indices.raw(), gmem_size, keys, work_queue, status ); } cudaCheckError(); } // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::count_non_zeroes_ilu1( const Matrix_d &A, Matrix_d &B ) { const int CTA_SIZE = 256; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Count the number of non zeroes. if ( A.hasProps(DIAG) ) count_non_zeroes_ilu1_dispatch<CTA_SIZE, true, csr_multiply_sm35::With_external_diag, Matrix_d>( A, B, this->m_num_threads_per_row_count, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); else count_non_zeroes_ilu1_dispatch<CTA_SIZE, true, csr_multiply_sm35::Without_external_diag, Matrix_d>( A, B, this->m_num_threads_per_row_count, this->m_gmem_size, this->m_keys, this->m_work_queue, this->m_status ); // Compute non-zero elements. CUDA_SAFE_CALL( cudaGetLastError() ); } // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_offsets( Matrix_d &C ) { thrust::device_ptr<int> offsets_begin(C.row_offsets.raw()); thrust::device_ptr<int> offsets_end (C.row_offsets.raw() + C.get_num_rows() + 1); thrust_wrapper::exclusive_scan( offsets_begin, offsets_end, offsets_begin ); cudaCheckError(); } // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_sparsity( const Matrix_d &A, const Matrix_d &B, Matrix_d &C ) { const int CTA_SIZE = 256; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // std::cerr << "CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_sparsity" << std::endl; // Reset the work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Compute the values. switch ( this->m_num_threads_per_row_count ) { case 2: csr_multiply_sm35::count_non_zeroes_kernel< 2, CTA_SIZE, SMEM_SIZE, WARP_SIZE, false> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), C.col_indices.raw(), NULL, NULL, NULL, NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); break; case 4: csr_multiply_sm35::count_non_zeroes_kernel< 4, CTA_SIZE, SMEM_SIZE, WARP_SIZE, false> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), C.col_indices.raw(), NULL, NULL, NULL, NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); break; case 8: csr_multiply_sm35::count_non_zeroes_kernel< 8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, false> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), C.col_indices.raw(), NULL, NULL, NULL, NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); break; case 16: csr_multiply_sm35::count_non_zeroes_kernel<16, CTA_SIZE, SMEM_SIZE, WARP_SIZE, false> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), C.col_indices.raw(), NULL, NULL, NULL, NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); break; default: csr_multiply_sm35::count_non_zeroes_kernel<CTA_SIZE, SMEM_SIZE, WARP_SIZE, false> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), B.row_offsets.raw(), B.col_indices.raw(), C.row_offsets.raw(), C.col_indices.raw(), NULL, NULL, NULL, NULL, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); } cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); } // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_sparsity_ilu1( const Matrix_d &A, Matrix_d &B ) { const int CTA_SIZE = 256; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Count the number of non zeroes. if ( A.hasProps(DIAG) ) count_non_zeroes_ilu1_dispatch<CTA_SIZE, false, csr_multiply_sm35::With_external_diag, Matrix_d>( A, B, this->m_num_threads_per_row_count, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); else count_non_zeroes_ilu1_dispatch<CTA_SIZE, false, csr_multiply_sm35::Without_external_diag, Matrix_d>( A, B, this->m_num_threads_per_row_count, this->m_gmem_size, this->m_keys, this->m_work_queue, NULL ); // Make sure it worked properly. CUDA_SAFE_CALL( cudaGetLastError() ); } // ==================================================================================================================== template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_values( const Matrix_d &A, const Matrix_d &B, Matrix_d &C, int num_threads, IVector *Aq1, IVector *Bq1, IVector *Aq2, IVector *Bq2 ) { const int CTA_SIZE = 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset the work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Compute the values. int *status = NULL; if ( num_threads != this->m_num_threads_per_row_compute ) { status = this->m_status; } switch ( num_threads ) { case 2: csr_multiply_sm35::compute_values_kernel< 2, Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), B.row_offsets.raw(), B.col_indices.raw(), B.values.raw(), C.row_offsets.raw(), C.col_indices.raw(), C.values.raw(), (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, status ); break; case 4: csr_multiply_sm35::compute_values_kernel< 4, Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), B.row_offsets.raw(), B.col_indices.raw(), B.values.raw(), C.row_offsets.raw(), C.col_indices.raw(), C.values.raw(), (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, status ); break; case 8: csr_multiply_sm35::compute_values_kernel< 8, Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), B.row_offsets.raw(), B.col_indices.raw(), B.values.raw(), C.row_offsets.raw(), C.col_indices.raw(), C.values.raw(), (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, status ); break; case 16: csr_multiply_sm35::compute_values_kernel<16, Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), B.row_offsets.raw(), B.col_indices.raw(), B.values.raw(), C.row_offsets.raw(), C.col_indices.raw(), C.values.raw(), (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, status ); break; default: csr_multiply_sm35::compute_values_kernel<Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), B.row_offsets.raw(), B.col_indices.raw(), B.values.raw(), C.row_offsets.raw(), C.col_indices.raw(), C.values.raw(), (Aq1 != NULL) ? Aq1->raw() : NULL, (Bq1 != NULL) ? Bq1->raw() : NULL, (Aq2 != NULL) ? Aq2->raw() : NULL, (Bq2 != NULL) ? Bq2->raw() : NULL, this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, status ); } cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void CSR_Multiply_Sm35<TemplateConfig<AMGX_device, V, M, I> >::compute_values_RAP_sparse_add( Matrix_d &RAP, const Matrix_d &RAP_int, std::vector<IVector> &RAP_ext_row_offsets, std::vector<IVector> &RAP_ext_col_indices, std::vector<MVector> &RAP_ext_values, std::vector<IVector> &RAP_ext_row_ids, int num_threads) { const int CTA_SIZE = 128; const int NUM_WARPS = CTA_SIZE / WARP_SIZE; // Reset the work queue. int work_offset = GRID_SIZE * NUM_WARPS; CUDA_SAFE_CALL( cudaMemcpy( this->m_work_queue, &work_offset, sizeof(int), cudaMemcpyHostToDevice ) ); // Compute the values. int *status = NULL; if ( num_threads != this->m_num_threads_per_row_compute ) { status = this->m_status; } // This is num_owned_coarse_rows int RAP_size = RAP.get_num_rows(); int RAP_int_size = RAP_int.row_offsets.size() - 1; if (RAP_int_size < RAP_size) { FatalError("RAP_int has less rows than RAP, need to modify sparse RAP add to handle that case\n", AMGX_ERR_NOT_IMPLEMENTED); } //TODO: Optimize: reuse arrays from count nonzeros int num_neighbors = RAP_ext_row_offsets.size(); std::vector<IVector> flagArray(num_neighbors); for (int i = 0; i < num_neighbors; i++) { flagArray[i].resize(RAP_size); thrust::fill(flagArray[i].begin(), flagArray[i].end(), -1); } cudaCheckError(); std::vector<int *> flagArray_ptrs_h(num_neighbors); std::vector<int *> RAP_ext_row_offsets_ptrs_h(num_neighbors); std::vector<int *> RAP_ext_col_indices_ptrs_h(num_neighbors); std::vector<Value_type *> RAP_ext_values_ptrs_h(num_neighbors); for (int i = 0; i < num_neighbors; i++) { flagArray_ptrs_h[i] = thrust::raw_pointer_cast(&flagArray[i][0]); RAP_ext_row_offsets_ptrs_h[i] = thrust::raw_pointer_cast(&RAP_ext_row_offsets[i][0]); RAP_ext_col_indices_ptrs_h[i] = thrust::raw_pointer_cast(&RAP_ext_col_indices[i][0]); RAP_ext_values_ptrs_h[i] = thrust::raw_pointer_cast(&RAP_ext_values[i][0]); } device_vector_alloc<int *> flagArray_ptrs = flagArray_ptrs_h; device_vector_alloc<int *> RAP_ext_row_offsets_ptrs = RAP_ext_row_offsets_ptrs_h; device_vector_alloc<int *> RAP_ext_col_indices_ptrs = RAP_ext_col_indices_ptrs_h; device_vector_alloc<Value_type *> RAP_ext_values_ptrs = RAP_ext_values_ptrs_h; for (int i = 0; i < num_neighbors; i++) { int size = RAP_ext_row_ids[i].size(); if (size != 0) { int num_blocks = min(4096, (size + 127) / 128); //write the position in RAP_ext_row_ids csr_multiply_sm35::flag_halo_rows <<< num_blocks, 128>>>( RAP_ext_row_ids[i].raw(), size, flagArray[i].raw(), i, RAP.manager->global_id()); } } cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); csr_multiply_sm35::compute_values_RAP_ext_kernel< Value_type, CTA_SIZE, SMEM_SIZE, WARP_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( RAP_size, RAP_int.row_offsets.raw(), RAP_int.col_indices.raw(), RAP_int.values.raw(), thrust::raw_pointer_cast(&RAP_ext_row_offsets_ptrs[0]), thrust::raw_pointer_cast(&RAP_ext_col_indices_ptrs[0]), thrust::raw_pointer_cast(&RAP_ext_values_ptrs[0]), RAP.row_offsets.raw(), RAP.col_indices.raw(), RAP.values.raw(), thrust::raw_pointer_cast(&flagArray_ptrs[0]), this->m_gmem_size, this->m_keys, this->m_vals, this->m_work_queue, num_neighbors, status ); cudaCheckError(); //CUDA_SAFE_CALL( cudaGetLastError() ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define AMGX_CASE_LINE(CASE) template class CSR_Multiply_Sm35<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace amgx
the_stack
#if defined(CUDA_SIFTGPU_ENABLED) #include "GL/glew.h" #include "stdio.h" #include "CuTexImage.h" #include "ProgramCU.h" #include "GlobalUtil.h" //---------------------------------------------------------------- //Begin SiftGPU setting section. ////////////////////////////////////////////////////////// #define IMUL(X,Y) __mul24(X,Y) //#define FDIV(X,Y) ((X)/(Y)) #define FDIV(X,Y) __fdividef(X,Y) ///////////////////////////////////////////////////////// //filter kernel width range (don't change this) #define KERNEL_MAX_WIDTH 33 #define KERNEL_MIN_WIDTH 5 ////////////////////////////////////////////////////////// //horizontal filter block size (32, 64, 128, 256, 512) #define FILTERH_TILE_WIDTH 128 //thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16) #define FILTERV_BLOCK_WIDTH 16 #define FILTERV_BLOCK_HEIGHT 32 //The corresponding image patch for a thread block #define FILTERV_PIXEL_PER_THREAD 4 #define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH #define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT) ////////////////////////////////////////////////////////// //thread block size for computing Difference of Gaussian #define DOG_BLOCK_LOG_DIMX 7 #define DOG_BLOCK_LOG_DIMY 0 #define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX) #define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY) ////////////////////////////////////////////////////////// //thread block size for keypoint detection #define KEY_BLOCK_LOG_DIMX 3 #define KEY_BLOCK_LOG_DIMY 3 #define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX) #define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY) //#define KEY_OFFSET_ONE //make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced.. //but it seems uncoalesced writes don't affect the speed ////////////////////////////////////////////////////////// //thread block size for initializing list generation (64, 128, 256, 512 ...) #define HIST_INIT_WIDTH 128 //thread block size for generating feature list (32, 64, 128, 256, 512, ...) #define LISTGEN_BLOCK_DIM 128 ///////////////////////////////////////////////////////// //how many keypoint orientations to compute in a block #define ORIENTATION_COMPUTE_PER_BLOCK 64 //how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32) #define DESCRIPTOR_COMPUTE_PER_BLOCK 4 #define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK) //how many keypoint descriptor to normalized in a block (32, ...) #define DESCRIPTOR_NORMALIZ_PER_BLOCK 32 /////////////////////////////////////////// //Thread block size for visualization //(This doesn't affect the speed of computation) #define BLOCK_LOG_DIM 4 #define BLOCK_DIM (1 << BLOCK_LOG_DIM) //End SiftGPU setting section. //---------------------------------------------------------------- __device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH]; texture<float, 1, cudaReadModeElementType> texData; texture<unsigned char, 1, cudaReadModeNormalizedFloat> texDataB; texture<float2, 2, cudaReadModeElementType> texDataF2; texture<float4, 1, cudaReadModeElementType> texDataF4; texture<int4, 1, cudaReadModeElementType> texDataI4; texture<int4, 1, cudaReadModeElementType> texDataList; //template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];} //template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; } ////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterH( float* d_result, int width) { const int HALF_WIDTH = FW >> 1; const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1; const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH; __shared__ float data[CACHE_WIDTH]; const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH); const int col = bcol + threadIdx.x; const int index_min = IMUL(blockIdx.y, width); const int index_max = index_min + width - 1; int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x; int cache_index = threadIdx.x; float value = 0; #pragma unroll for(int j = 0; j < CACHE_COUNT; ++j) { if(cache_index < CACHE_WIDTH) { int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index); data[cache_index] = tex1Dfetch(texData,fetch_index); src_index += FILTERH_TILE_WIDTH; cache_index += FILTERH_TILE_WIDTH; } } __syncthreads(); if(col >= width) return; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[threadIdx.x + i]* d_kernel[i]); } // value = Conv<FW-1>(data + threadIdx.x); d_result[index_min + col] = value; } //////////////////////////////////////////////////////////////////// template<int FW> __global__ void FilterV(float* d_result, int width, int height) { const int HALF_WIDTH = FW >> 1; const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1; const int TEMP = CACHE_WIDTH & 0xf; //add some extra space to avoid bank conflict #if FILTERV_TILE_WIDTH == 16 //make the stride 16 * n +/- 1 const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP; #elif FILTERV_TILE_WIDTH == 8 //make the stride 16 * n +/- 2 const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP); #elif FILTERV_TILE_WIDTH == 4 //make the stride 16 * n +/- 4 const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP); #else #error #endif const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA; const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT; const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT; __shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH]; const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT); const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x; const int row_first = row_block_first - HALF_WIDTH; const int data_index_max = IMUL(height - 1, width) + col; const int cache_col_start = threadIdx.y; const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH); int cache_index = cache_col_start + cache_row_start; int data_index = IMUL(row_first + cache_col_start, width) + col; if(col < width) { #pragma unroll for(int i = 0; i < CACHE_COUNT; ++i) { if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT) { int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index); data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index); data_index += IMUL(FILTERV_BLOCK_HEIGHT, width); } } } __syncthreads(); if(col >= width) return; int row = row_block_first + threadIdx.y; int index_start = cache_row_start + threadIdx.y; #pragma unroll for(int i = 0; i < WRITE_COUNT; ++i, row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT) { if(row < height) { int index_dest = IMUL(row, width) + col; float value = 0; #pragma unroll for(int i = 0; i < FW; ++i) { value += (data[index_start + i] * d_kernel[i]); } d_result[index_dest] = value; } } } template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width) { const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1); const float INV_SCALE = 1.0f / (float(SCALE)); int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(col >= width) return; int row = blockIdx.y >> LOG_SCALE; int index = row * width + col; int dst_row = blockIdx.y; int dst_idx= (width * dst_row + col) * SCALE; int helper = blockIdx.y & SCALE_MASK; if (helper) { float v11 = tex1Dfetch(texData, index); float v12 = tex1Dfetch(texData, index + 1); index += width; float v21 = tex1Dfetch(texData, index); float v22 = tex1Dfetch(texData, index + 1); float w1 = INV_SCALE * helper, w2 = 1.0 - w1; float v1 = (v21 * w1 + w2 * v11); float v2 = (v22 * w1 + w2 * v12); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } }else { float v1 = tex1Dfetch(texData, index); float v2 = tex1Dfetch(texData, index + 1); d_result[dst_idx] = v1; #pragma unroll for(int i = 1; i < SCALE; ++i) { const float r2 = i * INV_SCALE; const float r1 = 1.0f - r2; d_result[dst_idx +i] = v1 * r1 + v2 * r2; } } } //////////////////////////////////////////////////////////////////////////////////////// void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale) { int width = src->GetImgWidth(), height = src->GetImgHeight(); src->BindTexture(texData); dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width); break; case 2 : UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width); break; case 3 : UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width); break; default: break; } } template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << LOG_SCALE), (src_width - 1)); const int dst_row = blockIdx.y; const int src_row = blockIdx.y << LOG_SCALE; const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale) { const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; if(dst_col >= dst_width) return; const int src_col = min((dst_col << log_scale), (src_width - 1)); const int dst_row = blockIdx.y; const int src_row = blockIdx.y << log_scale; const int src_idx = IMUL(src_row, src_width) + src_col; const int dst_idx = IMUL(dst_width, dst_row) + dst_col; d_result[dst_idx] = tex1Dfetch(texData, src_idx); } void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale) { int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ; src->BindTexture(texData); dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight()); dim3 block(FILTERH_TILE_WIDTH); switch(log_scale) { case 1 : DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; case 2 : DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; case 3 : DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break; default: DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale); } } __global__ void ChannelReduce_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texData, index*4); } __global__ void ChannelReduce_Convert_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; float4 rgba = tex1Dfetch(texDataF4, index); d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z; } void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); if(convert_rgb) { src->BindTexture(texDataF4); ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData); }else { src->BindTexture(texData); ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData); } } __global__ void ConvertByteToFloat_Kernel(float* d_result) { int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x; d_result[index] = tex1Dfetch(texDataB, index); } void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst) { int width = src->GetImgWidth(), height = dst->GetImgHeight() ; dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH); dim3 block(FILTERH_TILE_WIDTH); src->BindTexture(texDataB); ConvertByteToFloat_Kernel<<<grid, block>>>((float*)dst->_cuData); } void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width) { int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;// width = 2*sz + 1; if(width > KERNEL_MAX_WIDTH) { //filter size truncation sz = KERNEL_MAX_WIDTH >> 1; width =KERNEL_MAX_WIDTH; }else if(width < KERNEL_MIN_WIDTH) { sz = KERNEL_MIN_WIDTH >> 1; width =KERNEL_MIN_WIDTH; } float rv = 1.0f/(sigma*sigma), v, ksum =0; // pre-compute filter for( i = -sz ; i <= sz ; ++i) { kernel[i+sz] = v = exp(-0.5f * i * i *rv) ; ksum += v; } //normalize the kernel rv = 1.0f/ksum; for(i = 0; i< width ;i++) kernel[i]*=rv; } template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf) { int width = src->GetImgWidth(), height = src->GetImgHeight(); //horizontal filtering src->BindTexture(texData); dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height); dim3 blockh(FILTERH_TILE_WIDTH); FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width); CheckErrorCUDA("FilterH"); ///vertical filtering buf->BindTexture(texData); dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT); dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT); FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height); CheckErrorCUDA("FilterV"); } ////////////////////////////////////////////////////////////////////// // tested on 2048x1500 image, the time on pyramid construction is // OpenGL version : 18ms // CUDA version: 28 ms void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma) { float filter_kernel[KERNEL_MAX_WIDTH]; int width; CreateFilterKernel(sigma, filter_kernel, width); cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice); switch(width) { case 5: FilterImage< 5>(dst, src, buf); break; case 7: FilterImage< 7>(dst, src, buf); break; case 9: FilterImage< 9>(dst, src, buf); break; case 11: FilterImage<11>(dst, src, buf); break; case 13: FilterImage<13>(dst, src, buf); break; case 15: FilterImage<15>(dst, src, buf); break; case 17: FilterImage<17>(dst, src, buf); break; case 19: FilterImage<19>(dst, src, buf); break; case 21: FilterImage<21>(dst, src, buf); break; case 23: FilterImage<23>(dst, src, buf); break; case 25: FilterImage<25>(dst, src, buf); break; case 27: FilterImage<27>(dst, src, buf); break; case 29: FilterImage<29>(dst, src, buf); break; case 31: FilterImage<31>(dst, src, buf); break; case 33: FilterImage<33>(dst, src, buf); break; default: break; } } texture<float, 1, cudaReadModeElementType> texC; texture<float, 1, cudaReadModeElementType> texP; texture<float, 1, cudaReadModeElementType> texN; void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; float vxn = tex1Dfetch(texC, index + 1); float vxp = tex1Dfetch(texC, index - 1); float vyp = tex1Dfetch(texC, index - width); float vyn = tex1Dfetch(texC, index + width); float dx = vxn - vxp, dy = vyn - vyp; float grd = 0.5f * sqrt(dx * dx + dy * dy); float rot = (grd == 0.0f? 0.0f : atan2(dy, dx)); d_got[index] = make_float2(grd, rot); } } void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height) { int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x; if(col < width && row < height) { int index = IMUL(row, width) + col; float vp = tex1Dfetch(texP, index); float v = tex1Dfetch(texC, index); d_dog[index] = v - vp; } } void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got) { int width = gus->GetImgWidth(), height = gus->GetImgHeight(); dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY); dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY); gus->BindTexture(texC); (gus -1)->BindTexture(texP); if(got->_cuData) ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height); else ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height); } #define READ_CMP_DOG_DATA(datai, tex, idx) \ datai[0] = tex1Dfetch(tex, idx - 1);\ datai[1] = tex1Dfetch(tex, idx);\ datai[2] = tex1Dfetch(tex, idx + 1);\ if(v > nmax)\ {\ nmax = max(nmax, datai[0]);\ nmax = max(nmax, datai[1]);\ nmax = max(nmax, datai[2]);\ if(v < nmax) goto key_finish;\ }else\ {\ nmin = min(nmin, datai[0]);\ nmin = min(nmin, datai[1]);\ nmin = min(nmin, datai[2]);\ if(v > nmin) goto key_finish;\ } void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax, float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization) { float data[3][3], v; float datap[3][3], datan[3][3]; #ifdef KEY_OFFSET_ONE int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1; #else int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y; int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x; #endif int index = IMUL(row, width) + col; int idx[3] ={index - width, index, index + width}; int in_image =0; float nmax, nmin, result = 0.0f; float dx = 0, dy = 0, ds = 0; bool offset_test_passed = true; #ifdef KEY_OFFSET_ONE if(row < rowmax && col < colmax) #else if(row > 0 && col > 0 && row < rowmax && col < colmax) #endif { in_image = 1; data[1][1] = v = tex1Dfetch(texC, idx[1]); if(fabs(v) <= dog_threshold0) goto key_finish; data[1][0] = tex1Dfetch(texC, idx[1] - 1); data[1][2] = tex1Dfetch(texC, idx[1] + 1); nmax = max(data[1][0], data[1][2]); nmin = min(data[1][0], data[1][2]); if(v <=nmax && v >= nmin) goto key_finish; //if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish; READ_CMP_DOG_DATA(data[0], texC, idx[0]); READ_CMP_DOG_DATA(data[2], texC, idx[2]); //edge supression float vx2 = v * 2.0f; float fxx = data[1][0] + data[1][2] - vx2; float fyy = data[0][1] + data[2][1] - vx2; float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]); float temp1 = fxx * fyy - fxy * fxy; float temp2 = (fxx + fyy) * (fxx + fyy); if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish; //read the previous level READ_CMP_DOG_DATA(datap[0], texP, idx[0]); READ_CMP_DOG_DATA(datap[1], texP, idx[1]); READ_CMP_DOG_DATA(datap[2], texP, idx[2]); //read the next level READ_CMP_DOG_DATA(datan[0], texN, idx[0]); READ_CMP_DOG_DATA(datan[1], texN, idx[1]); READ_CMP_DOG_DATA(datan[2], texN, idx[2]); if(subpixel_localization) { //subpixel localization float fx = 0.5f * (data[1][2] - data[1][0]); float fy = 0.5f * (data[2][1] - data[0][1]); float fs = 0.5f * (datan[1][1] - datap[1][1]); float fss = (datan[1][1] + datap[1][1] - vx2); float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]); float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]); //need to solve dx, dy, ds; // |-fx| | fxx fxy fxs | |dx| // |-fy| = | fxy fyy fys | * |dy| // |-fs| | fxs fys fss | |ds| float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx); float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy); float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs); float maxa = max(max(A0.x, A1.x), A2.x); if(maxa >= 1e-10) { if(maxa == A1.x) { float4 TEMP = A1; A1 = A0; A0 = TEMP; }else if(maxa == A2.x) { float4 TEMP = A2; A2 = A0; A0 = TEMP; } A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x; A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w; A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w; if(abs(A2.y) > abs(A1.y)) { float4 TEMP = A2; A2 = A1; A1 = TEMP; } if(abs(A1.y) >= 1e-10) { A1.z /= A1.y; A1.w /= A1.y; A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w; if(abs(A2.z) >= 1e-10) { ds = A2.w / A2.z; dy = A1.w - ds * A1.z; dx = A0.w - ds * A0.z - dy * A0.y; offset_test_passed = fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold &&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f; } } } } if(offset_test_passed) result = v > nmax ? 1.0 : -1.0; } key_finish: if(in_image) d_key[index] = make_float4(result, dx, dy, ds); } void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge) { int width = dog->GetImgWidth(), height = dog->GetImgHeight(); float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog; CuTexImage* dogp = dog - 1; CuTexImage* dogn = dog + 1; #ifdef KEY_OFFSET_ONE dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #else dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY); #endif dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY); dogp->BindTexture(texP); dog ->BindTexture(texC); dogn->BindTexture(texN); Tedge = (Tedge+1)*(Tedge+1)/Tedge; ComputeKEY_Kernel<<<grid, block>>>((float4*) key->_cuData, width, width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization); } void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y; int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(row < height && col < wd) { int hidx = IMUL(row, wd) + col; int scol = col << 2; int sidx = IMUL(row, ws) + scol; int v[4] = {0, 0, 0, 0}; if(row > 0 && row < height -1) { #pragma unroll for(int i = 0; i < 4 ; ++i, ++scol) { float4 temp = tex1Dfetch(texDataF4, sidx +i); v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0; } } hist[hidx] = make_int4(v[0], v[1], v[2], v[3]); } } void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist) { int ws = key->GetImgWidth(), hs = key->GetImgHeight(); int wd = hist->GetImgWidth(), hd = hist->GetImgHeight(); dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd); dim3 block(HIST_INIT_WIDTH, 1); key->BindTexture(texDataF4); InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd); } void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height) { int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y; int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(row < height && col < wd) { int hidx = IMUL(row, wd) + col; int scol = col << 2; int sidx = IMUL(row, ws) + scol; int v[4] = {0, 0, 0, 0}; #pragma unroll for(int i = 0; i < 4 && scol < ws; ++i, ++scol) { int4 temp = tex1Dfetch(texDataI4, sidx + i); v[i] = temp.x + temp.y + temp.z + temp.w; } d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]); } } void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2) { int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight(); int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight(); int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f)); const int wi = min(7, max(temp , 0)); hist1->BindTexture(texDataI4); const int BW = 1 << wi, BH = 1 << (7 - wi); dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH); dim3 block(BW, BH); ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd); } void __global__ ListGen_Kernel(int4* d_list, int width) { int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int4 pos = tex1Dfetch(texDataList, idx1); int idx2 = IMUL(pos.y, width) + pos.x; int4 temp = tex1Dfetch(texDataI4, idx2); int sum1 = temp.x + temp.y; int sum2 = sum1 + temp.z; pos.x <<= 2; if(pos.z >= sum2) { pos.x += 3; pos.z -= sum2; }else if(pos.z >= sum1) { pos.x += 2; pos.z -= sum1; }else if(pos.z >= temp.x) { pos.x += 1; pos.z -= temp.x; } d_list[idx1] = pos; } //input list (x, y) (x, y) .... void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist) { int len = list->GetImgWidth(); list->BindTexture(texDataList); hist->BindTexture(texDataI4); dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM); dim3 block(LISTGEN_BLOCK_DIM); ListGen_Kernel<<<grid, block>>>((int4*) list->_cuData, hist->GetImgWidth()); } void __global__ ComputeOrientation_Kernel(float4* d_list, int list_len, int width, int height, float sigma, float sigma_step, float gaussian_factor, float sample_factor, int num_orientation, int existing_keypoint, int subpixel, int keepsign) { const float ten_degree_per_radius = 5.7295779513082320876798154814105; const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105; int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x; if(idx >= list_len) return; float4 key; if(existing_keypoint) { key = tex1Dfetch(texDataF4, idx); }else { int4 ikey = tex1Dfetch(texDataList, idx); key.x = ikey.x + 0.5f; key.y = ikey.y + 0.5f; key.z = sigma; if(subpixel || keepsign) { float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x); if(subpixel) { key.x += offset.y; key.y += offset.z; key.z *= pow(sigma_step, offset.w); } if(keepsign) key.z *= offset.x; } } if(num_orientation == 0) { key.w = 0; d_list[idx] = key; return; } float vote[37]; float gsigma = key.z * gaussian_factor; float win = fabs(key.z) * sample_factor; float dist_threshold = win * win + 0.5; float factor = -0.5f / (gsigma * gsigma); float xmin = max(1.5f, floor(key.x - win) + 0.5f); float ymin = max(1.5f, floor(key.y - win) + 0.5f); float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f); float ymax = min(height -1.5f, floor(key.y + win) + 0.5f); #pragma unroll for(int i = 0; i < 36; ++i) vote[i] = 0.0f; for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float dx = x - key.x; float dy = y - key.y; float sq_dist = dx * dx + dy * dy; if(sq_dist >= dist_threshold) continue; float2 got = tex2D(texDataF2, x, y); float weight = got.x * exp(sq_dist * factor); float fidx = floor(got.y * ten_degree_per_radius); int oidx = fidx; if(oidx < 0) oidx += 36; vote[oidx] += weight; } } //filter the vote const float one_third = 1.0 /3.0; #pragma unroll for(int i = 0; i < 6; ++i) { vote[36] = vote[0]; float pre = vote[35]; #pragma unroll for(int j = 0; j < 36; ++j) { float temp = one_third * (pre + vote[j] + vote[j + 1]); pre = vote[j]; vote[j] = temp; } } vote[36] = vote[0]; if(num_orientation == 1 || existing_keypoint) { int index_max = 0; float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) { index_max = vote[i] > max_vote? i : index_max; max_vote = max(max_vote, vote[i]); } float pre = vote[index_max == 0? 35 : index_max -1]; float next = vote[index_max + 1]; float weight = max_vote; float off = 0.5f * FDIV(next - pre, weight + weight - next - pre); key.w = radius_per_ten_degrees * (index_max + 0.5f + off); d_list[idx] = key; }else { float max_vote = vote[0]; #pragma unroll for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]); float vote_threshold = max_vote * 0.8f; float pre = vote[35]; float max_rot[2], max_vot[2] = {0, 0}; int ocount = 0; #pragma unroll for(int i =0; i < 36; ++i) { float next = vote[i + 1]; if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next) { float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre); float rot = i + di + 0.5f; float weight = vote[i]; /// if(weight > max_vot[1]) { if(weight > max_vot[0]) { max_vot[1] = max_vot[0]; max_rot[1] = max_rot[0]; max_vot[0] = weight; max_rot[0] = rot; } else { max_vot[1] = weight; max_rot[1] = rot; } ocount ++; } } pre = vote[i]; } float fr1 = max_rot[0] / 36.0f; if(fr1 < 0) fr1 += 1.0f; unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f)); unsigned short us2 = 65535; if(ocount > 1) { float fr2 = max_rot[1] / 36.0f; if(fr2 < 0) fr2 += 1.0f; us2 = (unsigned short ) floor(fr2 * 65535.0f); } unsigned int uspack = (us2 << 16) | us1; key.w = __int_as_float(uspack); d_list[idx] = key; } } void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key, float sigma, float sigma_step, int existing_keypoint) { int len = list->GetImgWidth(); if(len <= 0) return; int width = got->GetImgWidth(), height = got->GetImgHeight(); if(existing_keypoint) { list->BindTexture(texDataF4); }else { list->BindTexture(texDataList); if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4); } got->BindTexture2D(texDataF2); const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK; dim3 grid((len + block_width -1) / block_width); dim3 block(block_width); ComputeOrientation_Kernel<<<grid, block>>>((float4*) list->_cuData, len, width, height, sigma, sigma_step, GlobalUtil::_OrientationGaussianFactor, GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor, GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation, existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign); ProgramCU::CheckErrorCUDA("ComputeOrientation"); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx); int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; float spt = fabs(key.z * window_factor); float s, c; __sincosf(key.w, &s, &c); float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ; float cspt = c * spt, sspt = s * spt; float crspt = c / spt, srspt = s / spt; float2 offsetpt, pt; float xmin, ymin, xmax, ymax, bsz; offsetpt.x = ix - 1.5f; offsetpt.y = iy - 1.5f; pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x; pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y; bsz = fabs(cspt) + fabs(sspt); xmin = max(1.5f, floor(pt.x - bsz) + 0.5f); ymin = max(1.5f, floor(pt.y - bsz) + 0.5f); xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float dx = x - pt.x; float dy = y - pt.y; float nx = crspt * dx + srspt * dy; float ny = crspt * dy - srspt * dx; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); float dnx = nx + offsetpt.x; float dny = ny + offsetpt.y; float ww = exp(-0.125f * (dnx * dnx + dny * dny)); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = ww * wx * wy * cc.x; float theta = (anglef - cc.y) * rpi; if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo; float weight1 = fo + 1.0f - theta; float weight2 = theta - fo; if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1; d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num, int width, int height, float window_factor) { const float rpi = 4.0/ 3.14159265358979323846; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; int fidx = idx >> 4; if(fidx >= num) return; float4 key = tex1Dfetch(texDataF4, fidx); int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2; //float aspect_ratio = key.w / key.z; //float aspect_sq = aspect_ratio * aspect_ratio; float sptx = key.z * 0.25, spty = key.w * 0.25; float xmin, ymin, xmax, ymax; float2 pt; pt.x = sptx * (ix + 0.5f) + key.x; pt.y = spty * (iy + 0.5f) + key.y; xmin = max(1.5f, floor(pt.x - sptx) + 0.5f); ymin = max(1.5f, floor(pt.y - spty) + 0.5f); xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f); ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f); float des[9]; #pragma unroll for(int i =0; i < 9; ++i) des[i] = 0.0f; for(float y = ymin; y <= ymax; y += 1.0f) { for(float x = xmin; x <= xmax; x += 1.0f) { float nx = (x - pt.x) / sptx; float ny = (y - pt.y) / spty; float nxn = fabs(nx); float nyn = fabs(ny); if(nxn < 1.0f && nyn < 1.0f) { float2 cc = tex2D(texDataF2, x, y); float wx = 1.0 - nxn; float wy = 1.0 - nyn; float weight = wx * wy * cc.x; float theta = (- cc.y) * rpi; if(theta < 0) theta += 8.0f; float fo = floor(theta); int fidx = fo; float weight1 = fo + 1.0f - theta; float weight2 = theta - fo; if(DYNAMIC_INDEXING) { des[fidx] += (weight1 * weight); des[fidx + 1] += (weight2 * weight); //this dynamic indexing part might be slow }else { #pragma unroll for(int k = 0; k < 8; ++k) { if(k == fidx) { des[k] += (weight1 * weight); des[k+1] += (weight2 * weight); } } } } } } des[0] += des[8]; int didx = idx << 1; d_des[didx] = make_float4(des[0], des[1], des[2], des[3]); d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]); } void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num) { float4 temp[32]; int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int sidx = idx << 5; float norm1 = 0, norm2 = 0; #pragma unroll for(int i = 0; i < 32; ++i) { temp[i] = tex1Dfetch(texDataF4, sidx +i); norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w); } norm1 = rsqrt(norm1); #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x = min(0.2f, temp[i].x * norm1); temp[i].y = min(0.2f, temp[i].y * norm1); temp[i].z = min(0.2f, temp[i].z * norm1); temp[i].w = min(0.2f, temp[i].w * norm1); norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y + temp[i].z * temp[i].z + temp[i].w * temp[i].w); } norm2 = rsqrt(norm2); #pragma unroll for(int i = 0; i < 32; ++i) { temp[i].x *= norm2; temp[i].y *= norm2; temp[i].z *= norm2; temp[i].w *= norm2; d_des[sidx + i] = temp[i]; } } void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream) { int num = list->GetImgWidth(); int width = got->GetImgWidth(); int height = got->GetImgHeight(); dtex->InitTexture(num * 128, 1, 1); got->BindTexture2D(texDataF2); list->BindTexture(texDataF4); int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE; dim3 grid((num * 16 + block_width -1) / block_width); dim3 block(block_width); if(rect) { if(GlobalUtil::_UseDynamicIndexing) ComputeDescriptorRECT_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else ComputeDescriptorRECT_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); }else { if(GlobalUtil::_UseDynamicIndexing) ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); else ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor); } if(GlobalUtil::_NormalizedSIFT) { dtex->BindTexture(texDataF4); const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK; dim3 grid((num + block_width -1) / block_width); dim3 block(block_width); NormalizeDescriptor_Kernel<<<grid, block>>>((float4*) dtex->_cuData, num); } CheckErrorCUDA("ComputeDescriptor"); } ////////////////////////////////////////////////////// void ProgramCU::FinishCUDA() { cudaThreadSynchronize(); } int ProgramCU::CheckErrorCUDA(const char* location) { cudaError_t e = cudaGetLastError(); if(e) { if(location) fprintf(stderr, "%s:\t", location); fprintf(stderr, "%s\n", cudaGetErrorString(e)); //assert(0); return 1; }else { return 0; } } void __global__ ConvertDOG_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0.5 : saturate(0.5+20.0*v); } } /// void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = dog->GetImgWidth(), height = dog ->GetImgHeight(); dog->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertDOG"); } void __global__ ConvertGRD_Kernel(float* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float v = tex1Dfetch(texData, index << 1); d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)? 0 : saturate(5 * v); } } void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out) { if(out->_cuData == NULL) return; int width = got->GetImgWidth(), height = got ->GetImgHeight(); got->BindTexture(texData); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height); ProgramCU::CheckErrorCUDA("DisplayConvertGRD"); } void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height) { int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y; int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x; if(col < width && row < height) { int index = row * width + col; float4 keyv = tex1Dfetch(texDataF4, index); int is_key = (keyv.x == 1.0f || keyv.x == -1.0f); int inside = col > 0 && row > 0 && row < height -1 && col < width - 1; float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5; d_result[index] = is_key && inside ? (keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)): make_float4(v, v, v, 1.0f) ; } } void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out) { if(out->_cuData == NULL) return; int width = key->GetImgWidth(), height = key ->GetImgHeight(); dog->BindTexture(texData); key->BindTexture(texDataF4); dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height); } void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; float4 v = tex1Dfetch(texDataF4, idx); d_result[idx] = make_float4(v.x, v.y, 0, 1.0f); } void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out) { int num = ftex->GetImgWidth(); int block_width = 64; dim3 grid((num + block_width -1) /block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num); ProgramCU::CheckErrorCUDA("DisplayKeyPoint"); } void __global__ DisplayKeyBox_Kernel(float4* d_result, int num) { int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x; if(idx >= num) return; int kidx = idx / 10, vidx = idx - IMUL(kidx , 10); float4 v = tex1Dfetch(texDataF4, kidx); float sz = fabs(v.z * 3.0f); /////////////////////// float s, c; __sincosf(v.w, &s, &c); /////////////////////// float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz); float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz); float4 pos; pos.x = v.x + c * dx - s * dy; pos.y = v.y + c * dy + s * dx; pos.z = 0; pos.w = 1.0f; d_result[idx] = pos; } void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out) { int len = ftex->GetImgWidth(); int block_width = 32; dim3 grid((len * 10 + block_width -1) / block_width); dim3 block(block_width); ftex->BindTexture(texDataF4); DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10); } /////////////////////////////////////////////////////////////////// inline void CuTexImage:: BindTexture(textureReference& texRef) { cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes); } inline void CuTexImage::BindTexture2D(textureReference& texRef) { #if defined(SIFTGPU_ENABLE_LINEAR_TEX2D) cudaBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float)); #else cudaChannelFormatDesc desc; cudaGetChannelDesc(&desc, _cuData2D); cudaBindTextureToArray(&texRef, _cuData2D, &desc); #endif } int ProgramCU::CheckCudaDevice(int device) { int count = 0, device_used; if(cudaGetDeviceCount(&count) != cudaSuccess || count <= 0) { ProgramCU::CheckErrorCUDA("CheckCudaDevice"); return 0; }else if(count == 1) { cudaDeviceProp deviceProp; if ( cudaGetDeviceProperties(&deviceProp, 0) != cudaSuccess || (deviceProp.major == 9999 && deviceProp.minor == 9999)) { fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n"); return 0; }else { GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024; GlobalUtil::_texMaxDimGL = 32768; if(GlobalUtil::_verbose) fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL); } } if(device >0 && device < count) { cudaSetDevice(device); CheckErrorCUDA("cudaSetDevice\n"); } cudaGetDevice(&device_used); if(device != device_used) fprintf(stderr, "\nERROR: Cannot set device to %d\n" "\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count); return 1; } //////////////////////////////////////////////////////////////////////////////////////// // siftmatch funtions ////////////////////////////////////////////////////////////////////////////////////////// #define MULT_TBLOCK_DIMX 128 #define MULT_TBLOCK_DIMY 1 #define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX) #define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY) texture<uint4, 1, cudaReadModeElementType> texDes1; texture<uint4, 1, cudaReadModeElementType> texDes2; void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp) { int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX); int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x; __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY]; int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8; int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2; int cache_idx1 = IMUL(row4, 17) + (col4 << 2); /////////////////////////////////////////////////////////////// //Load feature descriptors /////////////////////////////////////////////////////////////// #if MULT_BLOCK_DIMY == 16 uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) { uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); /// if(idx2 >= num2) return; /////////////////////////////////////////////////////////////////////////// //compare descriptors int results[MULT_BLOCK_DIMY]; #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0; #pragma unroll for(int i = 0; i < 8; ++i) { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); } } int dst_idx = IMUL(idx1, num2) + idx2; if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); d_result[dst_idx + IMUL(i, num2)] = results[i]; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; }else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i]; } } } void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT) { int num1 = des1->GetImgWidth() / 8; int num2 = des2->GetImgWidth() / 8; dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY); dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY); texDot->InitTexture( num2,num1); if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32); des1->BindTexture(texDes1); des2->BindTexture(texDes2); MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL)); ProgramCU::CheckErrorCUDA("MultiplyDescriptor"); } texture<float, 1, cudaReadModeElementType> texLoc1; texture<float2, 1, cudaReadModeElementType> texLoc2; struct Matrix33{float mat[3][3];}; void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp, Matrix33 H, float hdistmax, Matrix33 F, float fdistmax) { int idx01 = (blockIdx.y * MULT_BLOCK_DIMY); int idx02 = (blockIdx.x * MULT_BLOCK_DIMX); int idx1 = idx01 + threadIdx.y; int idx2 = idx02 + threadIdx.x; __shared__ int data1[17 * 2 * MULT_BLOCK_DIMY]; __shared__ float loc1[MULT_BLOCK_DIMY * 2]; int read_idx1 = idx01 * 8 + threadIdx.x ; int read_idx2 = idx2 * 8; int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2; int cache_idx1 = IMUL(row4, 17) + (col4 << 2); #if MULT_BLOCK_DIMY == 16 uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; #elif MULT_BLOCK_DIMY == 8 if(threadIdx.x < 64) { uint4 v = tex1Dfetch(texDes1, read_idx1); data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y; data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w; } #else #error #endif __syncthreads(); if(threadIdx.x < MULT_BLOCK_DIMY * 2) { loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x); } __syncthreads(); if(idx2 >= num2) return; int results[MULT_BLOCK_DIMY]; ///////////////////////////////////////////////////////////////////////////////////////////// //geometric verification ///////////////////////////////////////////////////////////////////////////////////////////// int good_count = 0; float2 loc2 = tex1Dfetch(texLoc2, idx2); #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) { float* loci = loc1 + i * 2; float locx = loci[0], locy = loci[1]; //homography float x[3], diff[2]; x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2]; x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2]; x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2]; diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x); diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y); if(diff[0] < hdistmax && diff[1] < hdistmax) { //check fundamental matrix float fx1[3], ftx2[3], x2fx1, se; fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2]; fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2]; fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2]; ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0]; ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1]; //ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2]; x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2]; se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]); results[i] = se < fdistmax? 0: -262144; }else { results[i] = -262144; } }else { results[i] = -262144; } good_count += (results[i] >=0); } ///////////////////////////////////////////////////////////////////////////////////////////// ///compare feature descriptors anyway ///////////////////////////////////////////////////////////////////////////////////////////// if(good_count > 0) { #pragma unroll for(int i = 0; i < 8; ++i) { uint4 v = tex1Dfetch(texDes2, read_idx2 + i); unsigned char* p2 = (unsigned char*)(&v); #pragma unroll for(int k = 0; k < MULT_BLOCK_DIMY; ++k) { unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4)); results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1]) + IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3]) + IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5]) + IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7]) + IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9]) + IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11]) + IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13]) + IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15])); } } } int dst_idx = IMUL(idx1, num2) + idx2; if(d_temp) { int3 cmp_result = make_int3(0, -1, 0); #pragma unroll for(int i= 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) { cmp_result = results[i] > cmp_result.x? make_int3(results[i], idx1 + i, cmp_result.x) : make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i])); d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); }else { break; } } d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result; }else { #pragma unroll for(int i = 0; i < MULT_BLOCK_DIMY; ++i) { if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0); else break; } } } void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2, CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT, float H[3][3], float hdistmax, float F[3][3], float fdistmax) { int num1 = des1->GetImgWidth() / 8; int num2 = des2->GetImgWidth() / 8; Matrix33 MatF, MatH; //copy the matrix memcpy(MatF.mat, F, 9 * sizeof(float)); memcpy(MatH.mat, H, 9 * sizeof(float)); //thread blocks dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY); dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY); //intermediate results texDot->InitTexture( num2,num1); if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3); loc1->BindTexture(texLoc1); loc2->BindTexture(texLoc2); des1->BindTexture(texDes1); des2->BindTexture(texDes2); MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2, (texCRT? (int3*)texCRT->_cuData : NULL), MatH, hdistmax, MatF, fdistmax); } texture<int, 1, cudaReadModeElementType> texDOT; #define ROWMATCH_BLOCK_WIDTH 32 #define ROWMATCH_BLOCK_HEIGHT 1 void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax) { #if ROWMATCH_BLOCK_HEIGHT == 1 __shared__ int dotmax[ROWMATCH_BLOCK_WIDTH]; __shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH]; __shared__ int dotidx[ROWMATCH_BLOCK_WIDTH]; int row = blockIdx.y; #else __shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; __shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH]; int* dotmax = x_dotmax[threadIdx.y]; int* dotnxt = x_dotnxt[threadIdx.y]; int* dotidx = x_dotidx[threadIdx.y]; int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y; #endif int base_address = IMUL(row , num2); int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1; for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH) { if(threadIdx.x + i < num2) { int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];// bool test = v > t_dotmax; t_dotnxt = test? t_dotmax : max(t_dotnxt, v); t_dotidx = test? (threadIdx.x + i) : t_dotidx; t_dotmax = test? v: t_dotmax; } __syncthreads(); } dotmax[threadIdx.x] = t_dotmax; dotnxt[threadIdx.x] = t_dotnxt; dotidx[threadIdx.x] = t_dotidx; __syncthreads(); #pragma unroll for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2) { if(threadIdx.x < step) { int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step]; bool test = v2 > v1; dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2); dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x]; dotmax[threadIdx.x] = test? v2 : v1; } __syncthreads(); } if(threadIdx.x == 0) { float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0)); float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0)); //float ratio = dist / distn; d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1; } } void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax) { int num1 = texDot->GetImgHeight(); int num2 = texDot->GetImgWidth(); dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT); dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT); texDot->BindTexture(texDOT); RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData, (int*)texMatch->_cuData, num2, distmax, ratiomax); } #define COLMATCH_BLOCK_WIDTH 32 //texture<int3, 1, cudaReadModeElementType> texCT; void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax) { int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x; if(col >= num2) return; int3 result = d_crt[col];//tex1Dfetch(texCT, col); int read_idx = col + num2; for(int i = 1; i < height; ++i, read_idx += num2) { int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx); result = result.x < temp.x? make_int3(temp.x, temp.y, max(result.x, temp.z)) : make_int3(result.x, result.y, max(result.z, temp.x)); } float dist = acos(min(result.x * 0.000003814697265625f, 1.0)); float distn = acos(min(result.z * 0.000003814697265625f, 1.0)); //float ratio = dist / distn; d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1; } void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax) { int height = texCRT->GetImgHeight(); int num2 = texCRT->GetImgWidth(); //texCRT->BindTexture(texCT); dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH); dim3 block(COLMATCH_BLOCK_WIDTH); ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax); } #endif
the_stack
#include "flowfilter/gpu/util.h" #include "flowfilter/gpu/error.h" #include "flowfilter/gpu/flowfilter.h" namespace flowfilter { namespace gpu { FlowFilter::FlowFilter() : Stage() { __height = 0; __width = 0; __configured = false; __inputImageSet = false; } FlowFilter::FlowFilter(flowfilter::gpu::GPUImage inputImage) : Stage() { setInputImage(inputImage); configure(); } FlowFilter::FlowFilter(const int height, const int width) : FlowFilter(height, width, 1, 1.0, 1.0) { } FlowFilter::FlowFilter(const int height, const int width, const int smoothIterations, const float maxflow, const float gamma) : Stage() { if(height <= 0) { std::cerr << "ERROR: FlowFilter::FlowFilter(): height should be greater than zero: " << height << std::endl; throw std::invalid_argument("FlowFilter::FlowFilter(): height should be greater than zero, got: " + std::to_string(height)); } if(width <= 0) { std::cerr << "ERROR: FlowFilter::FlowFilter(): width should be greater than zero: " << width << std::endl; throw std::invalid_argument("FlowFilter::FlowFilter(): width should be greater than zero, got: " + std::to_string(width)); } // __height = height; // __width = width; __height = 0; __width = 0; __configured = false; __inputImageSet = false; // creates a GPUImage for storing input image internally GPUImage inputImage = GPUImage(height, width, 1, sizeof(unsigned char)); setInputImage(inputImage); configure(); setGamma(gamma); setMaxFlow(maxflow); setSmoothIterations(smoothIterations); } FlowFilter::~FlowFilter() { // nothing to do } void FlowFilter::configure() { if(!__inputImageSet) { std::cerr << "ERROR: FlowFilter::configure(): input image has not been set" << std::endl; throw std::logic_error("FlowFilter::configure(): input image has not been set"); } // connect the blocks __imageModel = ImageModel(__inputImage); // dummy flow field use to instanciate the update block // This is necessary to break the circular dependency // between propagation and update blocks. GPUImage dummyFlow(__height, __width, 2, sizeof(float)); // FIXME: find good default values __update = FlowUpdate(dummyFlow, __imageModel.getImageConstant(), __imageModel.getImageGradient(), 1.0, 1.0); __smoother = FlowSmoother(__update.getUpdatedFlow(), 1); __propagator = FlowPropagator(__smoother.getSmoothedFlow(), 1); // set the input flow of the update block to the output // of the propagator. This replaces dummyFlow previously // assigned to the update __update.setInputFlow(__propagator.getPropagatedFlow()); // clear buffers __propagator.getPropagatedFlow().clear(); __update.getUpdatedFlow().clear(); __update.getUpdatedImage().clear(); __smoother.getSmoothedFlow().clear(); __configured = true; __firstLoad = true; } void FlowFilter::compute() { startTiming(); // compute image model __imageModel.compute(); if(__firstLoad) { std::cout << "FlowFilter::compute(): fisrt load" << std::endl; // set the old image value to current // computed constant brightness parameter GPUImage imConstant = __imageModel.getImageConstant(); __update.getUpdatedImage().copyFrom(imConstant); __firstLoad = false; } // propagate old flow __propagator.compute(); // update __update.compute(); // smooth updated flow __smoother.compute(); stopTiming(); } void FlowFilter::computeImageModel() { startTiming(); __imageModel.compute(); stopTiming(); } void FlowFilter::computePropagation() { startTiming(); __propagator.compute(); stopTiming(); } void FlowFilter::computeUpdate() { startTiming(); if(__firstLoad) { std::cout << "FlowFilter::compute(): fisrt load" << std::endl; // set the old image value to current // computed constant brightness parameter GPUImage imConstant = __imageModel.getImageConstant(); __update.getUpdatedImage().copyFrom(imConstant); __firstLoad = false; } // update __update.compute(); // smooth updated flow __smoother.compute(); stopTiming(); } void FlowFilter::setInputImage(GPUImage inputImage) { if(inputImage.depth() != 1) { std::cerr << "ERROR: FlowFilter::setInputImage(): input image should have depth 1: " << inputImage.depth() << std::endl; throw std::invalid_argument("FlowFilter::setInputImage(): input image should have depth 1, got: " + std::to_string(inputImage.depth())); } if(inputImage.itemSize() != sizeof(unsigned char) && inputImage.itemSize() != sizeof(float)) { std::cerr << "ERROR: FlowFilter::setInputImage(): item size should be 1 or 4: " << inputImage.itemSize() << std::endl; throw std::invalid_argument("FlowFilter::setInputImage(): item size should be 1 or 4: " + std::to_string(inputImage.itemSize())); } __inputImage = inputImage; __height = __inputImage.height(); __width = __inputImage.width(); __inputImageSet = true; } void FlowFilter::loadImage(flowfilter::image_t& image) { __inputImage.upload(image); // if(__firstLoad) { // std::cout << "FlowFilter::loadImage(): fisrt load" << std::endl; // // compute image model parameters // __imageModel.compute(); // // set the old image value to current // // computed constant brightness parameter // GPUImage imConstant = __imageModel.getImageConstant(); // __update.getUpdatedImage().copyFrom(imConstant); // __firstLoad = false; // } } void FlowFilter::downloadFlow(flowfilter::image_t& flow) { __smoother.getSmoothedFlow().download(flow); } void FlowFilter::downloadImage(flowfilter::image_t& image) { __update.getUpdatedImage().download(image); } // void FlowFilter::downloadImageGradient(flowfilter::image_t& gradient) { // __imageModel.getImageGradient().download(gradient); // } // void FlowFilter::downloadImageConstant(flowfilter::image_t& image) { // __imageModel.getImageConstant().download(image); // } // void FlowFilter::downloadImageUpdated(flowfilter::image_t& image) { // __update.getUpdatedImage().download(image); // } // void FlowFilter::downloadFlowUpdated(flowfilter::image_t& flow) { // __update.getUpdatedFlow().download(flow); // } // void FlowFilter::downloadSmoothedFlow(flowfilter::image_t& flow) { // __smoother.getSmoothedFlow().download(flow); // } GPUImage FlowFilter::getFlow() { return __update.getUpdatedFlow(); } float FlowFilter::getGamma() const { return __update.getGamma(); } void FlowFilter::setGamma(const float gamma) { // scale gamma if input image is uint8 if(__inputImage.itemSize() == 1){ __update.setGamma(gamma / (255.0f*255.0f)); } else { __update.setGamma(gamma); } } float FlowFilter::getMaxFlow() const { return __update.getMaxFlow(); } void FlowFilter::setMaxFlow(const float maxflow) { __update.setMaxFlow(maxflow); __propagator.setIterations(int(ceilf(maxflow))); } int FlowFilter::getSmoothIterations() const { return __smoother.getIterations(); } void FlowFilter::setSmoothIterations(const int N) { __smoother.setIterations(N); } void FlowFilter::setPropagationBorder(const int border) { __propagator.setBorder(border); } int FlowFilter::getPropagationBorder() const { return __propagator.getBorder(); } int FlowFilter::getPropagationIterations() const { return __propagator.getIterations(); } int FlowFilter::height() const { return __height; } int FlowFilter::width() const { return __width; } //############################################### // DeltaFlowFilter //############################################### DeltaFlowFilter::DeltaFlowFilter() : Stage() { __configured = false; __firstLoad = true; __inputImageSet = false; __inputFlowSet = false; } DeltaFlowFilter::DeltaFlowFilter(flowfilter::gpu::GPUImage inputImage, flowfilter::gpu::GPUImage inputFlow) : Stage() { __configured = false; __firstLoad = true; __inputImageSet = false; __inputFlowSet = false; setInputImage(inputImage); setInputFlow(inputFlow); configure(); } DeltaFlowFilter::~DeltaFlowFilter() { // nothing to do } void DeltaFlowFilter::configure() { if(!__inputFlowSet) { std::cerr << "ERROR: DeltaFlowFilter::configure(): input flow not set" << std::endl; throw std::exception(); } if(!__inputImageSet) { std::cerr << "ERROR: DeltaFlowFilter::configure(): input image not set" << std::endl; throw std::exception(); } int height = __inputImage.height(); int width = __inputImage.width(); __imageModel = ImageModel(__inputImage); // dummy inputs to create delta flow update GPUImage dummyDeltaFlow(height, width, 2, sizeof(float)); GPUImage dummyImageOld(height, width, 1, sizeof(float)); // create delta flow update stage __update = DeltaFlowUpdate(__inputFlow, dummyDeltaFlow, dummyImageOld, __imageModel.getImageConstant(), __imageModel.getImageGradient()); // flow smoother __smoother = FlowSmoother(__update.getUpdatedFlow(), 1); // propagator with payload __propagator = FlowPropagatorPayload(__smoother.getSmoothedFlow(), __update.getUpdatedImage(), __update.getUpdatedDeltaFlow()); // replace dummy inputs with propagated outputs __update.setInputDeltaFlow(__propagator.getPropagatedVector()); __update.setInputImageOld(__propagator.getPropagatedScalar()); // clear buffers __imageModel.getImageConstant().clear(); __imageModel.getImageGradient().clear(); __propagator.getPropagatedFlow().clear(); __propagator.getPropagatedScalar().clear(); __propagator.getPropagatedVector().clear(); __update.getUpdatedFlow().clear(); __update.getUpdatedDeltaFlow().clear(); __update.getUpdatedImage().clear(); __smoother.getSmoothedFlow().clear(); __configured = true; __firstLoad = true; } void DeltaFlowFilter::compute() { startTiming(); // compute image model __imageModel.compute(); if(__firstLoad) { std::cout << "DeltaFlowFilter::compute(): fisrt load" << std::endl; // set the old image value to current // computed constant brightness parameter GPUImage imConstant = __imageModel.getImageConstant(); __update.getUpdatedImage().copyFrom(imConstant); __firstLoad = false; } // propagate old flow __propagator.compute(); // update __update.compute(); // smooth updated flow __smoother.compute(); stopTiming(); } void DeltaFlowFilter::computeImageModel() { startTiming(); __imageModel.compute(); stopTiming(); } void DeltaFlowFilter::computePropagation() { startTiming(); __propagator.compute(); stopTiming(); } void DeltaFlowFilter::computeUpdate() { startTiming(); if(__firstLoad) { std::cout << "DeltaFlowFilter::compute(): fisrt load" << std::endl; // set the old image value to current // computed constant brightness parameter GPUImage imConstant = __imageModel.getImageConstant(); __update.getUpdatedImage().copyFrom(imConstant); __propagator.getPropagatedScalar().copyFrom(imConstant); __firstLoad = false; } // update __update.compute(); // smooth updated flow __smoother.compute(); stopTiming(); } void DeltaFlowFilter::setInputImage(GPUImage inputImage) { if(inputImage.depth() != 1) { std::cerr << "ERROR: DeltaFlowFilter::setInputImage(): input image should have depth 1: " << inputImage.depth() << std::endl; throw std::exception(); } if(inputImage.itemSize() != sizeof(unsigned char) && inputImage.itemSize() != sizeof(float)) { std::cerr << "ERROR: DeltaFlowFilter::setInputImage(): input image should have item size 4: " << inputImage.itemSize() << std::endl; throw std::exception(); } __inputImage = inputImage; __inputImageSet = true; } void DeltaFlowFilter::setInputFlow(GPUImage inputFlow) { if(inputFlow.depth() != 2) { std::cerr << "ERROR: DeltaFlowFilter::setInputFlow(): input flow should have depth 2: " << inputFlow.depth() << std::endl; throw std::exception(); } if(inputFlow.itemSize() != 4) { std::cerr << "ERROR: DeltaFlowFilter::setInputFlow(): input flow should have item size 4: " << inputFlow.itemSize() << std::endl; throw std::exception(); } __inputFlow = inputFlow; __inputFlowSet = true; } GPUImage DeltaFlowFilter::getFlow() { return __smoother.getSmoothedFlow(); } GPUImage DeltaFlowFilter::getImage() { return __update.getUpdatedImage(); } float DeltaFlowFilter::getGamma() const { return __update.getGamma(); } void DeltaFlowFilter::setGamma(const float gamma) { // scale gamma if input image is uint8 if(__inputImage.itemSize() == 1) { __update.setGamma(gamma / (255.0f*255.0f)); } else { __update.setGamma(gamma); } } float DeltaFlowFilter::getMaxFlow() const { return __update.getMaxFlow(); } void DeltaFlowFilter::setMaxFlow(const float maxflow) { __update.setMaxFlow(maxflow); __propagator.setIterations(int(ceilf(maxflow))); } int DeltaFlowFilter::getSmoothIterations() const { return __smoother.getIterations(); } void DeltaFlowFilter::setSmoothIterations(const int N) { __smoother.setIterations(N); } void DeltaFlowFilter::setPropagationBorder(const int border) { __propagator.setBorder(border); } int DeltaFlowFilter::getPropagationBorder() const { return __propagator.getBorder(); } int DeltaFlowFilter::getPropagationIterations() const { return __propagator.getIterations(); } int DeltaFlowFilter::height() const { return __inputImage.height(); } int DeltaFlowFilter::width() const { return __inputImage.width(); } //############################################### // PyramidalFlowFilter //############################################### PyramidalFlowFilter::PyramidalFlowFilter() : Stage() { __height = 0; __width = 0; __levels = 0; __configured = false; } PyramidalFlowFilter::PyramidalFlowFilter(const int height, const int width, const int levels) : Stage() { __height = height; __width = width; __levels = levels; __configured = false; configure(); } PyramidalFlowFilter::~PyramidalFlowFilter() { // nothing to do } void PyramidalFlowFilter::configure() { __inputImage = GPUImage(__height, __width, 1, sizeof(unsigned char)); // image pyramid __imagePyramid = ImagePyramid(__inputImage, __levels); // top level filter block __topLevelFilter = FlowFilter(__imagePyramid.getImage(__levels -1)); if(__levels > 1) { __lowLevelFilters.resize(__levels -1); GPUImage levelInputFlow = __topLevelFilter.getFlow(); levelInputFlow.clear(); for(int h = __levels -2; h >= 0; h --) { __lowLevelFilters[h] = DeltaFlowFilter( __imagePyramid.getImage(h), levelInputFlow); levelInputFlow = __lowLevelFilters[h].getFlow(); } } // clear buffers __inputImage.clear(); for(int h = 0; h < __levels; h ++) { __imagePyramid.getImage(h).clear(); } __configured = true; } void PyramidalFlowFilter::compute() { startTiming(); // compute image pyramid __imagePyramid.compute(); if(__levels == 1) { __topLevelFilter.compute(); } else { // compute image model and propagation for all levels __topLevelFilter.computeImageModel(); __topLevelFilter.computePropagation(); for(int h =0; h < __levels - 1; h ++) { __lowLevelFilters[h].computeImageModel(); __lowLevelFilters[h].computePropagation(); } // update __topLevelFilter.computeUpdate(); for(int h =0; h < __levels - 1; h ++) { __lowLevelFilters[h].computeUpdate(); } } stopTiming(); } GPUImage PyramidalFlowFilter::getFlow() { if(__levels == 1) { return __topLevelFilter.getFlow(); } else { return __lowLevelFilters[0].getFlow(); } } void PyramidalFlowFilter::loadImage(image_t& image) { __inputImage.upload(image); } void PyramidalFlowFilter::downloadFlow(image_t& flow) { if(__levels == 1) { __topLevelFilter.downloadFlow(flow); } else { __lowLevelFilters[0].getFlow().download(flow); } } void PyramidalFlowFilter::downloadImage(image_t& image) { if(__levels == 1) { __topLevelFilter.downloadImage(image); } else { __lowLevelFilters[0].getImage().download(image); } } float PyramidalFlowFilter::getGamma(const int level) const { if(level < 0 || level >= __levels) { std::cerr << "ERROR: PyramidalFlowFilter::getGamma(): level index out of bounds: " << level << std::endl; throw std::exception(); } if(level == __levels -1) { return __topLevelFilter.getGamma(); } else { return __lowLevelFilters[level].getGamma(); } } void PyramidalFlowFilter::setGamma(const int level, const float gamma) { if(level < 0 || level >= __levels) { std::cerr << "ERROR: PyramidalFlowFilter::setGamma(): level index out of bounds: " << level << std::endl; throw std::exception(); } if(level == __levels -1) { __topLevelFilter.setGamma(gamma); } else { __lowLevelFilters[level].setGamma(gamma); } } void PyramidalFlowFilter::setGamma(const std::vector<float>& gamma) { if(gamma.size() != __levels) { std::cerr << "ERROR: PyramidalFlowFilter::setGamma(): gamma vector should be size " << __levels << ", got: " << gamma.size(); throw std::exception(); } for(int h = 0; h < __levels; h ++) { setGamma(h, gamma[h]); } } int PyramidalFlowFilter::getSmoothIterations(const int level) const { if(level < 0 || level >= __levels) { std::cerr << "ERROR: PyramidalFlowFilter::getSmoothIterations(): level index out of bounds: " << level << std::endl; throw std::exception(); } if(level == __levels -1) { return __topLevelFilter.getSmoothIterations(); } else { return __lowLevelFilters[level].getSmoothIterations(); } } void PyramidalFlowFilter::setSmoothIterations(const int level, const int N) { if(level < 0 || level >= __levels) { std::cerr << "ERROR: PyramidalFlowFilter::setSmoothIterations(): level index out of bounds: " << level << std::endl; throw std::exception(); } if(level == __levels -1) { __topLevelFilter.setSmoothIterations(N); } else { __lowLevelFilters[level].setSmoothIterations(N); } } void PyramidalFlowFilter::setSmoothIterations(const std::vector<int>& iterations) { if(iterations.size() != __levels) { std::cerr << "ERROR: PyramidalFlowFilter::setSmoothIterations(): iterations vector should be size " << __levels << ", got: " << iterations.size(); throw std::exception(); } for(int h = 0; h < __levels; h ++) { setSmoothIterations(h, iterations[h]); } } float PyramidalFlowFilter::getMaxFlow() const { if(__levels == 1) { return __topLevelFilter.getMaxFlow(); } else { return __lowLevelFilters[0].getMaxFlow(); } } void PyramidalFlowFilter::setMaxFlow(const float maxflow) { if(__levels == 1) { __topLevelFilter.setMaxFlow(maxflow); } else { float maxflowLevel = maxflow; for(int h = 0; h < __levels - 1; h ++) { __lowLevelFilters[h].setMaxFlow(maxflowLevel); maxflowLevel /= 2.0f; } __topLevelFilter.setMaxFlow(maxflowLevel); } } void PyramidalFlowFilter::setPropagationBorder(const int border) { __topLevelFilter.setPropagationBorder(border); if(__levels > 1) { for(int h = 0; h < __levels; h ++) { __lowLevelFilters[h].setPropagationBorder(border); } } } int PyramidalFlowFilter::getPropagationBorder() const { return __topLevelFilter.getPropagationBorder(); } int PyramidalFlowFilter::height() const { return __height; } int PyramidalFlowFilter::width() const { return __width; } int PyramidalFlowFilter::levels() const { return __levels; } }; // namespace gpu }; // namespace flowfilter
the_stack
#define NVBIO_CUDA_DEBUG #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/strided_iterator.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/work_queue.h> namespace nvbio { namespace wqtest { struct TestWorkStream; // // A test work-unit to be used with cuda::WorkQueue. // Odd work-units produce a continuation, the others don't. // struct TestWorkUnit { NVBIO_FORCEINLINE NVBIO_HOST_DEVICE TestWorkUnit() {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE TestWorkUnit(const uint32 _i, const uint32 _size, uint32* _output) : i(_i), size( _size ), offset( 0 ), output( _output ) {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool run(const TestWorkStream& stream) { output[i] = i; if (i & 1) { const uint32 d = i - offset; offset += size; i = offset + (d/2); size /= 2; return true; } return false; } uint32 i; uint32 size; uint32 offset; uint32* output; }; // // A test work-stream to be used with cuda::WorkQueue // struct TestWorkStream { typedef TestWorkUnit WorkUnit; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE TestWorkStream(const uint32 size, uint32* output) : m_size( size ), m_output( output ) {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 size() const { return m_size; } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void get(const uint32 i, TestWorkUnit* unit, const uint2 slot) const { *unit = TestWorkUnit(i,m_size,m_output); } private: uint32 m_size; uint32* m_output; }; template <typename WorkUnit> struct BenchmarkWorkStream; // // A test work-unit to be used with cuda::WorkQueue. // Odd work-units produce a continuation, the others don't. // The work-units come with a payload, which is part of the work-unit itself. // template <uint32 PAYLOAD> struct BenchmarkWorkUnit { NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkWorkUnit() {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkWorkUnit( const BenchmarkWorkStream<BenchmarkWorkUnit>& stream, const uint32 _i, const uint2 _slot) : i(_i) { // fill the payload with multiples of 2 for (uint32 j = 0; j < PAYLOAD; ++j) m_payload[j] = j*2; } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool run(const BenchmarkWorkStream<BenchmarkWorkUnit>& stream) { // do something with the associated memory to simulate reading from it uint32 sum = 0; for (uint32 j = 0; j < PAYLOAD; ++j) { sum += m_payload[j]; m_payload[j] *= 2; } assert( (sum & 1) == 0 ); if ((i+sum) & 1) { i /= 2; return true; } return false; } uint32 i; uint32 m_payload[PAYLOAD]; }; // // A test work-unit to be used with cuda::WorkQueue. // Odd work-units produce a continuation, the others don't. // The work-units come with a payload which is new'd and delete'd at run-time. // template <uint32 PAYLOAD> struct BenchmarkDynMemWorkUnit { NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkDynMemWorkUnit() : payload(NULL) {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE ~BenchmarkDynMemWorkUnit() { } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkDynMemWorkUnit( const BenchmarkWorkStream<BenchmarkDynMemWorkUnit>& stream, const uint32 _i, const uint2 _slot) : i(_i), payload(NULL) {} NVBIO_FORCEINLINE NVBIO_DEVICE bool run(const BenchmarkWorkStream<BenchmarkDynMemWorkUnit>& stream); uint32 i; uint32* payload; }; // // A test work-unit to be used with cuda::WorkQueue. // Odd work-units produce a continuation, the others don't. // The work-units are bound to an external payload which is stored in the stream class // in strided fashion. // When continuations are moved from one execution slot to another by the work queue, // an external mover class copies the payload to its new location. // template <uint32 PAYLOAD> struct BenchmarkStridedWorkUnit { NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkStridedWorkUnit() {} NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkStridedWorkUnit( const BenchmarkWorkStream<BenchmarkStridedWorkUnit>& stream, const uint32 _i, const uint2 _slot); NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool run(const BenchmarkWorkStream<BenchmarkStridedWorkUnit>& stream); uint32 i; uint2 slot; }; // // A work-unit mover class responsible for moving the strided payload bound to each // work-unit. Its move method gets invoked when the work-queue changes the execution // slot of a continuation relative to its parent. // template <uint32 PAYLOAD> struct BenchmarkStridedWorkMover { template <typename WorkUnit> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void move( const BenchmarkWorkStream<WorkUnit>& stream, const uint2 src_slot, WorkUnit* src_unit, const uint2 dst_slot, WorkUnit* dst_unit) const; }; // // A generic work-stream class for all the above work-units. // template <typename WorkUnitT> struct BenchmarkWorkStream { typedef WorkUnitT WorkUnit; NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkWorkStream(const uint32 size, uint32* payloads = NULL, uint32 stride = 0) : m_size( size ), m_payloads( payloads ), m_stride( stride ) {} // set pool // void set_pool(uint32* pool_size, uint32* pool) { m_pool = pool; m_pool_size = pool_size; } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 size() const { return m_size; } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void get(const uint32 i, WorkUnit* unit, const uint2 slot) const { *unit = WorkUnit( *this, i,slot); } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32* payloads() const { return m_payloads; } NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 stride() const { return m_stride; } NVBIO_FORCEINLINE NVBIO_DEVICE uint32* alloc(const uint32 PAYLOAD) const { const uint32 slot = atomicSub( m_pool_size, 1u ); return m_payloads + m_pool[slot-1] * PAYLOAD; } NVBIO_FORCEINLINE NVBIO_DEVICE void free(const uint32 PAYLOAD, const uint32* payload) const { const uint32 slot = atomicAdd( m_pool_size, 1u ); m_pool[slot] = (payload - m_payloads) / PAYLOAD; } private: uint32 m_size; uint32* m_payloads; uint32 m_stride; uint32* m_pool_size; uint32* m_pool; }; template <uint32 PAYLOAD> NVBIO_FORCEINLINE NVBIO_DEVICE bool BenchmarkDynMemWorkUnit<PAYLOAD>::run(const BenchmarkWorkStream<BenchmarkDynMemWorkUnit>& stream) { if (payload == NULL) { // alloc memory on first run payload = stream.alloc( PAYLOAD ); assert( payload ); // fill the payload with multiples of 2 for (uint32 j = 0; j < PAYLOAD; ++j) payload[j] = j*2; } // do something with the associated memory to simulate reading from it uint32 sum = 0; for (uint32 j = 0; j < PAYLOAD; ++j) { sum += payload[j]; payload[j] *= 2; } if ((i+sum) & 1) { i /= 2; return true; } // release memory stream.free( PAYLOAD, payload ); payload = NULL; return false; } template <uint32 PAYLOAD> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE BenchmarkStridedWorkUnit<PAYLOAD>::BenchmarkStridedWorkUnit( const BenchmarkWorkStream<BenchmarkStridedWorkUnit>& stream, const uint32 _i, const uint2 _slot) : i(_i), slot(_slot) { uint32* payloads_buffer = (stream.payloads() + slot.y * stream.stride()*PAYLOAD); strided_iterator<uint32*> payload( payloads_buffer + slot.x, stream.stride() ); // fill the payload with multiples of 2 for (uint32 j = 0; j < PAYLOAD; ++j) payload[j] = j*2; } template <uint32 PAYLOAD> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool BenchmarkStridedWorkUnit<PAYLOAD>::run(const BenchmarkWorkStream<BenchmarkStridedWorkUnit>& stream) { uint32* payloads_buffer = (stream.payloads() + slot.y * stream.stride()*PAYLOAD); strided_iterator<uint32*> payload( payloads_buffer + slot.x, stream.stride() ); // do something with the associated memory to simulate reading from it uint32 sum = 0; for (uint32 j = 0; j < PAYLOAD; ++j) { sum += payload[j]; payload[j] *= 2; } assert( (sum & 1) == 0 ); if ((i+sum) & 1) { i /= 2; return true; } return false; } template <uint32 PAYLOAD> template <typename WorkUnit> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void BenchmarkStridedWorkMover<PAYLOAD>::move( const BenchmarkWorkStream<WorkUnit>& stream, const uint2 src_slot, WorkUnit* src_unit, const uint2 dst_slot, WorkUnit* dst_unit) const { // copy the strided payloads const uint32 src_queue = src_slot.y; const uint32 dst_queue = dst_slot.y; uint32* src_payloads = stream.payloads() + src_queue * stream.stride() * PAYLOAD; uint32* dst_payloads = stream.payloads() + dst_queue * stream.stride() * PAYLOAD; strided_iterator<uint32*> src_payload( src_payloads + src_slot.x, stream.stride() ); strided_iterator<uint32*> dst_payload( dst_payloads + dst_slot.x, stream.stride() ); for (uint32 i = 0; i < PAYLOAD; ++i) dst_payload[i] = src_payload[i]; // copy the unit *dst_unit = *src_unit; // fix destination's slot dst_unit->slot = dst_slot; } template <uint32 PAYLOAD, uint32 BLOCKDIM> void benchmark(const uint32 n_tests, const uint32 min_size, const uint32 max_size) { using namespace cuda; typedef BenchmarkWorkUnit<PAYLOAD> FatWorkUnit; typedef BenchmarkWorkStream<FatWorkUnit> FatWorkStream; typedef WorkQueue<OrderedQueueTag,FatWorkUnit,BLOCKDIM> FatWorkQueue; typedef WorkQueue<MultiPassQueueTag,FatWorkUnit,BLOCKDIM> FatMKWorkQueue; typedef WorkQueue<PersistentWarpsQueueTag,FatWorkUnit,BLOCKDIM> FatPWWorkQueue; typedef WorkQueue<PersistentThreadsQueueTag,FatWorkUnit,BLOCKDIM> FatPTWorkQueue; const uint32 sz = uint32(sizeof(FatWorkUnit)); const float GB = float(1024*1024*1024); const uint32 base_stream_size = min_size; uint32 stream_doublings = 0; for (uint32 size = min_size; size <= max_size; size *= 2) ++stream_doublings; const uint64 bytes_copied = uint64(base_stream_size*2-1 - base_stream_size/2) * sz*2; float times[16]; #if 0 log_info( stderr, " ordered work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; FatWorkStream stream( n_stream_size ); FatWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); #endif log_info( stderr, " multi-pass work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; FatWorkStream stream( n_stream_size ); FatMKWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); log_info( stderr, " persistent-warps work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; FatWorkStream stream( n_stream_size ); FatPWWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); log_info( stderr, " persistent-threads work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; FatWorkStream stream( n_stream_size ); FatPTWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); // alloc payloads storage, initialized to zero const uint32 capacity = 64*1024; thrust::device_vector<uint32> payloads( 2 * PAYLOAD * capacity, 0u ); thrust::device_vector<uint32> payloads_pool( 1u + capacity, 0u ); // fill the pool of payloads payloads_pool[capacity] = capacity; thrust::copy( thrust::make_counting_iterator( 0u ), thrust::make_counting_iterator( 0u ) + capacity, payloads_pool.begin() ); typedef BenchmarkDynMemWorkUnit<PAYLOAD> DynMemWorkUnit; typedef BenchmarkWorkStream<DynMemWorkUnit> DynMemWorkStream; typedef WorkQueue<OrderedQueueTag,DynMemWorkUnit,BLOCKDIM> DynMemWorkQueue; typedef WorkQueue<MultiPassQueueTag,DynMemWorkUnit,BLOCKDIM> DynMemMKWorkQueue; #if 0 log_info( stderr, " ordered dyn-mem work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; DynMemWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &payloads.front() ) ); DynMemWorkQueue work_queue; // setup the pool stream.set_pool( thrust::raw_pointer_cast( &payloads_pool.front() ) + capacity, thrust::raw_pointer_cast( &payloads_pool.front() ) ); // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); #endif typedef BenchmarkStridedWorkUnit<PAYLOAD> StridedWorkUnit; typedef BenchmarkWorkStream<StridedWorkUnit> StridedWorkStream; typedef BenchmarkStridedWorkMover<PAYLOAD> StridedWorkMover; typedef WorkQueue<OrderedQueueTag,StridedWorkUnit,BLOCKDIM> StridedWorkQueue; typedef WorkQueue<MultiPassQueueTag,StridedWorkUnit,BLOCKDIM> StridedMKWorkQueue; #if 0 log_info( stderr, " ordered strided work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; StridedWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &payloads.front() ), capacity ); StridedWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream, StridedWorkMover() ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); #endif log_info( stderr, " multi-pass strided work-queue, %u-byte payload", PAYLOAD*4u ); for (uint32 m = 0; m < stream_doublings; ++m) { log_info_cont( stderr, "." ); const uint32 n_stream_size = base_stream_size << m; StridedWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &payloads.front() ), capacity ); StridedMKWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) work_queue.consume( stream, StridedWorkMover() ); cudaDeviceSynchronize(); timer.stop(); times[m] = timer.seconds() / float(n_tests); } log_info_nl( stderr ); log_info( stderr, " runtime (ms) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", times[i] * 1.0e3f ); log_info_nl( stderr ); log_info( stderr, " work-units (M/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", 1.0e-6f * (float((base_stream_size<<i)*2-1)/times[i]) ); log_info_nl( stderr ); log_info( stderr, " bandwidth (GB/s) :" ); for (uint32 i = 0; i < stream_doublings; ++i) log_info_cont( stderr, " %7.2f", (float(bytes_copied<<i)/times[i]) / GB ); log_info_nl( stderr ); } } // wqtest namespace int work_queue_test(int argc, char* argv[]) { using namespace cuda; using namespace wqtest; log_info( stderr, "work_queue test... started\n" ); uint32 n_tests = 1; uint32 min_size = 512*1024; uint32 max_size = 1024*1024; uint32 max_payload = 32; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-n-tests" ) == 0) n_tests = atoi( argv[++i] ); else if (strcmp( argv[i], "-min-size" ) == 0) min_size = atoi( argv[++i] ) * 1024; else if (strcmp( argv[i], "-max-size" ) == 0) min_size = atoi( argv[++i] ) * 1024; else if (strcmp( argv[i], "-max-payload" ) == 0) max_payload = atoi( argv[++i] ); } NVBIO_VAR_UNUSED const uint32 BLOCKDIM = 128; typedef WorkQueue<OrderedQueueTag,TestWorkUnit,BLOCKDIM> TestWorkQueue; typedef WorkQueue<MultiPassQueueTag,TestWorkUnit,BLOCKDIM> TestMKWorkQueue; typedef WorkQueue<PersistentWarpsQueueTag,TestWorkUnit,BLOCKDIM> TestPWWorkQueue; typedef WorkQueue<PersistentThreadsQueueTag,TestWorkUnit,BLOCKDIM> TestPTWorkQueue; #if 0 log_info( stderr, " testing ordered work-queue:\n" ); { const uint32 n_stream_size = 1024*1024; thrust::device_vector<uint32> output( n_stream_size*2, 0 ); TestWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &output.front() ) ); TestWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); thrust::host_vector<uint32> h_output( output ); for (uint32 i = 0; i < n_stream_size*2-1; ++i) { if (i != h_output[i]) { log_error( stderr, " found %u at position %u\n", h_output[i], i ); return 1; } } log_info( stderr, " correctness test passed\n" ); } #endif log_info( stderr, " testing multi-pass work-queue:\n" ); { const uint32 n_stream_size = 1024*1024; thrust::device_vector<uint32> output( n_stream_size*2, 0 ); TestWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &output.front() ) ); TestMKWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); thrust::host_vector<uint32> h_output( output ); for (uint32 i = 0; i < n_stream_size*2-1; ++i) { if (i != h_output[i]) { log_error( stderr, " found %u at position %u\n", h_output[i], i ); return 1; } } log_info( stderr, " correctness test passed\n" ); } log_info( stderr, " testing persistent-warps work-queue:\n" ); { const uint32 n_stream_size = 1024*1024; thrust::device_vector<uint32> output( n_stream_size*2, 0 ); TestWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &output.front() ) ); TestPWWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); thrust::host_vector<uint32> h_output( output ); for (uint32 i = 0; i < n_stream_size*2-1; ++i) { if (i != h_output[i]) { log_error( stderr, " found %u at position %u\n", h_output[i], i ); return 1; } } log_info( stderr, " correctness test passed\n" ); } log_info( stderr, " testing persistent-threads work-queue:\n" ); { const uint32 n_stream_size = 1024*1024; thrust::device_vector<uint32> output( n_stream_size*2, 0 ); TestWorkStream stream( n_stream_size, thrust::raw_pointer_cast( &output.front() ) ); TestPTWorkQueue work_queue; // do one warm-up launch work_queue.consume( stream ); cudaDeviceSynchronize(); thrust::host_vector<uint32> h_output( output ); for (uint32 i = 0; i < n_stream_size*2-1; ++i) { if (i != h_output[i]) { log_error( stderr, " found %u at position %u\n", h_output[i], i ); return 1; } } log_info( stderr, " correctness test passed\n" ); } log_info( stderr, " benchmarking... started\n" ); benchmark<1,BLOCKDIM>( n_tests, min_size, max_size ); if (max_payload >= 32) benchmark<32,BLOCKDIM>( n_tests, min_size, max_size ); if (max_payload >= 64) benchmark<64,BLOCKDIM>( n_tests, min_size, max_size ); if (max_payload >= 128) benchmark<128,BLOCKDIM>( n_tests, min_size, max_size ); if (max_payload >= 256) benchmark<256,BLOCKDIM>( n_tests, min_size, max_size ); log_info( stderr, " benchmarking... done\n" ); log_info( stderr, "work_queue test... done\n" ); return 0; } } // namespace nvbio
the_stack
/** @addtogroup cudpp_app * @{ */ /** @name RadixSort Functions * @{ */ #include "cuda_util.h" #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_radixsort.h" #include "cudpp_scan.h" #if 0 #include "kernel/radixsort_kernel.cuh" #include "cudpp_maximal_launch.h" #include <cstdlib> #include <cstdio> #include <assert.h> #endif #if 0 typedef unsigned int uint; /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * Uses cudppScanDispatch() for the prefix sum of radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStep(uint *keys, uint *values, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[0] : plan->m_persistentCTAThreshold[0]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; // Run an empty kernel -- this seems to reset some of the CTA scheduling hardware // on GT200, resulting in better scheduling and lower run times if (startbit > 0) { emptyKernel<<<plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK], SORT_CTA_SIZE>>>(); } } if (fullBlocks) { if (loop) { if (persist) { blocks = flip? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T]; } radixSortBlocks<nbits, startbit, true, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, true, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } else { if (loop) { if (persist) { blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T]; } radixSortBlocks<nbits, startbit, false, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, false, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } CUDA_CHECK_ERROR("radixSortBlocks"); if (fullBlocks) { if (loop) { if (persist) { blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T]; } findRadixOffsets<startbit, true, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { findRadixOffsets<startbit, true, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T]; } findRadixOffsets<startbit, false, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { findRadixOffsets<startbit, false, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } CUDA_CHECK_ERROR("findRadixOffsets"); cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T]; } reorderData<startbit, true, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, true, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T]; } reorderData<startbit, true, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, true, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T]; } reorderData<startbit, false, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, false, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T]; } reorderData<startbit, false, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, false, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } CUDA_CHECK_ERROR("radixSortStep"); } /** * @brief Single-block optimization for sorts of fewer than 4 * CTA_SIZE elements * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlock(uint *keys, uint *values, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocks<32, 0, true, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } else { radixSortBlocks<32, 0, false, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } if (flip) unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements); CUDA_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function * * Main radix sort function. Sorts in place in the keys and values arrays, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses cudppScan() for the prefix sum of * radix counters. * * While the interface supports forward and backward sorts (via \a plan), * only forward is currently implemented. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] flipBits Is set true if key datatype is a float * (neg. numbers) for special float sorting operations. * @param[in] keyBits Number of interesting bits in the key **/ void radixSort(uint *keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool flipBits, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) radixSortSingleWarp<true><<<1, numElements>>> (keys, values, numElements); else radixSortSingleWarp<false><<<1, numElements>>> (keys, values, numElements); CUDA_CHECK_ERROR("radixSortSingleWarp"); return; } if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlock<true>(keys, values, numElements); else radixSortSingleBlock<false>(keys, values, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStep<4, 0, true, false> (keys, values, plan, numElements); } else { radixSortStep<4, 0, false, false> (keys, values, plan, numElements); } if (keyBits > 4) { radixSortStep<4, 4, false, false> (keys, values, plan, numElements); } if (keyBits > 8) { radixSortStep<4, 8, false, false> (keys, values, plan, numElements); } if (keyBits > 12) { radixSortStep<4, 12, false, false> (keys, values, plan, numElements); } if (keyBits > 16) { radixSortStep<4, 16, false, false> (keys, values, plan, numElements); } if (keyBits > 20) { radixSortStep<4, 20, false, false> (keys, values, plan, numElements); } if (keyBits > 24) { radixSortStep<4, 24, false, false> (keys, values, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStep<4, 28, false, true> (keys, values, plan, numElements); } else { radixSortStep<4, 28, false, false> (keys, values, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For float configuration. * * Calls the main radix sort function. For float configuration. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] negativeKeys Is set true if key datatype has neg. numbers. * @param[in] keyBits Number of interesting bits in the key **/ void radixSortFloatKeys(float* keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool negativeKeys, int keyBits) { radixSort((uint*)keys, (uint*)values, plan, numElements, negativeKeys, keyBits); } /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStepKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[1] : plan->m_persistentCTAThreshold[1]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; } if (fullBlocks) { if (loop) { if (persist) { blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T]; } radixSortBlocksKeysOnly<nbits, startbit, true, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else radixSortBlocksKeysOnly<nbits, startbit, true, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else { if (loop) { if (persist) { blocks = flip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T]; } radixSortBlocksKeysOnly<nbits, startbit, false, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else radixSortBlocksKeysOnly<nbits, startbit, false, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } if (fullBlocks) { if (loop) { if (persist) { blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T]; } findRadixOffsets<startbit, true, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else findRadixOffsets<startbit, true, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksFind = plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T]; } findRadixOffsets<startbit, false, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else findRadixOffsets<startbit, false, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T]; } reorderDataKeysOnly<startbit, true, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, true, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T]; } reorderDataKeysOnly<startbit, true, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, true, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T]; } reorderDataKeysOnly<startbit, false, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, false, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] : plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T]; } reorderDataKeysOnly<startbit, false, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, false, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } CUDA_CHECK_ERROR("radixSortStepKeysOnly"); } /** * @brief Optimization for sorts of fewer than 4 * CTA_SIZE elements (keys only). * * @param[in,out] keys Keys to be sorted. * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlockKeysOnly(uint *keys, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocksKeysOnly<32, 0, true, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } else { radixSortBlocksKeysOnly<32, 0, false, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } if (flip) unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements); CUDA_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function. For keys only configuration. * * Main radix sort function. Sorts in place in the keys array, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses scan for the prefix sum of * radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] flipBits Is set true if key datatype is a float (neg. numbers) * for special float sorting operations. * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ void radixSortKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, size_t numElements, bool flipBits, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements); else radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements); return; } if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlockKeysOnly<true>(keys, numElements); else radixSortSingleBlockKeysOnly<false>(keys, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStepKeysOnly<4, 0, true, false>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 0, false, false>(keys, plan, numElements); } if (keyBits > 4) { radixSortStepKeysOnly<4, 4, false, false>(keys, plan, numElements); } if (keyBits > 8) { radixSortStepKeysOnly<4, 8, false, false>(keys, plan, numElements); } if (keyBits > 12) { radixSortStepKeysOnly<4, 12, false, false>(keys, plan, numElements); } if (keyBits > 16) { radixSortStepKeysOnly<4, 16, false, false>(keys, plan, numElements); } if (keyBits > 20) { radixSortStepKeysOnly<4, 20, false, false>(keys, plan, numElements); } if (keyBits > 24) { radixSortStepKeysOnly<4, 24, false, false>(keys, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStepKeysOnly<4, 28, false, true>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 28, false, false>(keys, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For floats and keys only. * * Calls the radixSortKeysOnly function setting parameters for floats. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] negativeKeys Is set true if key flipBits is to be true in * radixSortKeysOnly(). * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ void radixSortFloatKeysOnly(float *keys, const CUDPPRadixSortPlan *plan, size_t numElements, bool negativeKeys, int keyBits) { radixSortKeysOnly((uint*)keys, plan, numElements, negativeKeys, keyBits); } void initDeviceParameters(CUDPPRadixSortPlan *plan) { int deviceID = -1; if (cudaSuccess == cudaGetDevice(&deviceID)) { cudaDeviceProp devprop; plan->m_planManager->getDeviceProps(devprop); int smVersion = devprop.major * 10 + devprop.minor; // sm_12 and later devices don't need help with coalesce in reorderData kernel plan->m_bManualCoalesce = (smVersion < 12); // sm_20 and later devices are better off not using persistent CTAs plan->m_bUsePersistentCTAs = (smVersion < 20); if (plan->m_bUsePersistentCTAs) { // The following is only true on pre-sm_20 devices (pre-Fermi): // Empirically we have found that for some (usually larger) sort // sizes it is better to use exactly as many "persistent" CTAs // as can fill the GPU, which loop over the "blocks" of work. For smaller // arrays it is better to use the typical CUDA approach of launching one CTA // per block of work. // 0-element of these two-element arrays is for key-value sorts // 1-element is for key-only sorts plan->m_persistentCTAThreshold[0] = plan->m_bManualCoalesce ? 16777216 : 524288; plan->m_persistentCTAThresholdFullBlocks[0] = plan->m_bManualCoalesce ? 2097152: 524288; plan->m_persistentCTAThreshold[1] = plan->m_bManualCoalesce ? 16777216 : 8388608; plan->m_persistentCTAThresholdFullBlocks[1] = plan->m_bManualCoalesce ? 2097152: 0; // create a map of function pointers to register counts for more accurate occupancy calculation // Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it // Note we only insert the "loop" version of the kernels (the one with the last template param = true) // Because those are the only ones that require persistent CTAs that maximally fill the device. plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_F_T] = maxBlocks(radixSortBlocks<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_F_T_T] = maxBlocks(radixSortBlocks<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_F_T] = maxBlocks(radixSortBlocks<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSB_4_0_T_T_T] = maxBlocks(radixSortBlocks<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_F_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_F_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RSBKO_4_0_T_T_T] = maxBlocks(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_F_T] = maxBlocks(findRadixOffsets<0, false, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_FRO_0_T_T] = maxBlocks(findRadixOffsets<0, true, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_F_T] = maxBlocks(reorderData<0, false, false, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_F_T_T] = maxBlocks(reorderData<0, false, false, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_F_T] = maxBlocks(reorderData<0, false, true, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_F_T_T_T] = maxBlocks(reorderData<0, false, true, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_F_T] = maxBlocks(reorderData<0, true, false, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_F_T_T] = maxBlocks(reorderData<0, true, false, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_F_T] = maxBlocks(reorderData<0, true, true, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RD_0_T_T_T_T] = maxBlocks(reorderData<0, true, true, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_F_T] = maxBlocks(reorderDataKeysOnly<0, false, false, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_F_T_T] = maxBlocks(reorderDataKeysOnly<0, false, false, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_F_T] = maxBlocks(reorderDataKeysOnly<0, false, true, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_F_T_T_T] = maxBlocks(reorderDataKeysOnly<0, false, true, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_F_T] = maxBlocks(reorderDataKeysOnly<0, true, false, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_F_T_T] = maxBlocks(reorderDataKeysOnly<0, true, false, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_F_T] = maxBlocks(reorderDataKeysOnly<0, true, true, false, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_RDKO_0_T_T_T_T] = maxBlocks(reorderDataKeysOnly<0, true, true, true, true>, 0, SORT_CTA_SIZE); plan->m_numCTAs[CUDPPRadixSortPlan::KERNEL_EK] = maxBlocks(emptyKernel, 0, SORT_CTA_SIZE); } } } #endif /** * @brief From the programmer-specified sort configuration, * creates internal memory for performing the sort. * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ void allocRadixSortStorage(CUDPPRadixSortPlan *plan) { #if 0 unsigned int numElements = plan->m_numElements; unsigned int numBlocks = ((numElements % (SORT_CTA_SIZE * 4)) == 0) ? (numElements / (SORT_CTA_SIZE * 4)) : (numElements / (SORT_CTA_SIZE * 4) + 1); switch(plan->m_config.datatype) { case CUDPP_UINT: CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys, numElements * sizeof(unsigned int))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues, numElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(unsigned int))); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys, numElements * sizeof(float))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues, numElements * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(float))); break; } initDeviceParameters(plan); #endif } /** @brief Deallocates intermediate memory from allocRadixSortStorage. * * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ void freeRadixSortStorage(CUDPPRadixSortPlan* plan) { #if 0 CUDA_SAFE_CALL( cudaFree(plan->m_tempKeys)); CUDA_SAFE_CALL( cudaFree(plan->m_tempValues)); CUDA_SAFE_CALL( cudaFree(plan->m_counters)); CUDA_SAFE_CALL( cudaFree(plan->m_countersSum)); CUDA_SAFE_CALL( cudaFree(plan->m_blockOffsets)); #endif } #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/reverse.h> template<typename T> void runSort(T *pkeys, unsigned int *pvals, size_t numElements, const CUDPPRadixSortPlan *plan) { thrust::device_ptr<T> keys((T*)pkeys); thrust::device_ptr<unsigned int> vals((unsigned int*)pvals); if (plan->m_bKeysOnly) thrust::sort(keys, keys + numElements); else thrust::sort_by_key(keys, keys + numElements, vals); if (plan->m_bBackward) { thrust::reverse(keys, keys + numElements); if (!plan->m_bKeysOnly) thrust::reverse(vals, vals + numElements); } CUDA_CHECK_ERROR("cudppRadixSortDispatch"); } /** @brief Dispatch function to perform a sort on an array with * a specified configuration. * * This is the dispatch routine which calls radixSort...() with * appropriate template parameters and arguments as specified by * the plan. * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] numElements Number of elements in the sort. * @param[in] plan Configuration information for RadixSort. **/ void cudppRadixSortDispatch(void *keys, void *values, size_t numElements, const CUDPPRadixSortPlan *plan) { switch(plan->m_config.datatype) { case CUDPP_CHAR: runSort<char>((char*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_UCHAR: runSort<unsigned char>((unsigned char*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_INT: runSort<int>((int*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_UINT: runSort<unsigned int>((unsigned int*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_FLOAT: runSort<float>((float*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_DOUBLE: runSort<double>((double*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_LONGLONG: runSort<long long>((long long*)keys, (unsigned int*)values, numElements, plan); break; case CUDPP_ULONGLONG: runSort<unsigned long long>((unsigned long long*)keys, (unsigned int*)values, numElements, plan); break; } /*if (plan->m_bKeysOnly) { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSortKeysOnly((uint*)keys, plan, numElements, false, 32); break; case CUDPP_FLOAT: radixSortFloatKeysOnly((float*)keys, plan, numElements, true, 32); } } else { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSort((uint*)keys, (uint*) values, plan, numElements, false, 32); break; case CUDPP_FLOAT: radixSortFloatKeys((float*)keys, (uint*) values, plan, numElements, true, 32); } }*/ } /** @} */ // end radixsort functions /** @} */ // end cudpp_app
the_stack
#include "radix_tree.cu.h" #include "fs_constants.h" #include "fs_debug.cu.h" #include "util.cu.h" #include "cpu_ipc.cu.h" #include "mallocfree.cu.h" #include "fs_structures.cu.h" #include "timer.h" #include "hash_table.cu.h" #include "swapper.cu.h" #include "fs_globals.cu.h" #include "preclose_table.cu.h" #include "fs_calls.cu.h" // no reference counting here DEBUG_NOINLINE __device__ int single_thread_fsync(int fd) { int res=0; GPU_ASSERT(fd>=0); volatile OTable_entry* e=&g_otable->entries[fd]; GPU_ASSERT(e->refCount>0); volatile FTable_entry* file=&(g_ftable->files[fd]); unsigned int inode=g_otable->entries[fd].cpu_inode; GPU_ASSERT(fd>=0); GPU_ASSERT(inode!=(unsigned int)-1); // globally locking until everything is flushed // this is a slow operation so we don't hold the g_otable lock res=flush_cpu(file,e,e->flags); if (res<0) { // TODO: add error handling GPU_ASSERT(NULL); } return res; } DEBUG_NOINLINE __device__ int gfsync(int fd){ __shared__ int ret; BEGIN_SINGLE_THREAD ret=single_thread_fsync(fd); END_SINGLE_THREAD; return ret; } DEBUG_NOINLINE __device__ int single_thread_ftruncate(int fd, int size) { GPU_ASSERT(size==0); GPU_ASSERT(fd>=0); volatile OTable_entry* e=&g_otable->entries[fd]; int res= truncate_cpu(e->cpu_fd)==0; if (res==0) { e->size=0; g_ftable->files[fd].pages->lock_for_flush(); if (g_ftable->files[fd].pages->count !=0) { g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree } g_ftable->files[fd].pages->unlock_after_flush(); } return res; } DEBUG_NOINLINE __device__ int gftruncate(int fd,int size){ __shared__ int ret; BEGIN_SINGLE_THREAD ret=single_thread_ftruncate(fd,size); END_SINGLE_THREAD; return ret; } DEBUG_NOINLINE __device__ int single_thread_close(int fd) { GPU_ASSERT(fd>=0); g_otable->lock(); volatile OTable_entry* e=&g_otable->entries[fd]; e->refCount--; GPU_ASSERT(e->refCount>=0); int res=0; if (e->refCount>0 || e->status!=FSENTRY_OPEN) { __threadfence(); g_otable->unlock(); return 0;} // lock in the opening thread e->status=FSENTRY_CLOSING; volatile FTable_entry* file=&(g_ftable->files[fd]); unsigned int inode=g_otable->entries[fd].cpu_inode; GPU_ASSERT(fd>=0); GPU_ASSERT(inode!=(unsigned int)-1); volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]); if (file->pages->dirty_tree) { /// this file is dirty, so we put it into pre_close. g_preclose_table->lock(); if( g_preclose_table->add(file,e)) GPU_ASSERT("Pre-close file table is full" == 0); g_preclose_table->unlock(); // we do not close the file on a CPU } else{ // we do close now: we must hold a global lock on the otable // because otherwise the thread which is opening a file will get // a file handle for a closed file // first, exchange the page cache for this file g_closed_ftable.lock_table(inode); // this might be a long because it deallocates and frees the tree unsigned int drop_residence_inode=0; file->pages=g_closed_ftable.exchange(inode, file->pages,&drop_residence_inode); GPU_ASSERT(file->pages); g_closed_ftable.unlock_table(inode); res=cpu_e->close(g_otable->entries[fd].cpu_fd,drop_residence_inode); if (res<0) { // GPU_ASSERT(NULL); } } cpu_e->clean(); file->clean(); e->clean(); __threadfence(); g_otable->unlock(); return res; } DEBUG_NOINLINE __device__ int gclose(int fd){ __shared__ int ret; BEGIN_SINGLE_THREAD ret=single_thread_close(fd); END_SINGLE_THREAD; return ret; } DEBUG_NOINLINE __device__ int single_thread_open(char* filename, int flags) { /* Lock ftable find entry increase ref-count Unlock ftable if not found -> ret E_FTABLE_FULL if (new_entry) -> send CPU open req else -> wait on CPU open req if (req failed) -> Lock ftable dec ref_count if last -> delete entry unlock ftable */ g_otable->lock(); bool isNewEntry=false; int fd=g_otable->findEntry(filename,&isNewEntry,flags); GPU_ASSERT(fd>=0); if (fd<0) { g_otable->unlock(); return E_FSTABLE_FULL;} volatile OTable_entry* e=&g_otable->entries[fd]; e->refCount++; __threadfence(); g_otable->unlock(); volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]); if (isNewEntry) { g_preclose_table->lock(); if (g_preclose_table->size!=0) { if (g_preclose_table->findEntry(e->filename,&g_ftable->files[fd],e) == 0) { g_preclose_table->unlock(); e->notify(e->cpu_fd,e->cpu_inode,e->size); return fd; } } g_preclose_table->unlock(); // fetch the cpu_e->open(filename,flags); unsigned int cpu_inode=readNoCache(&cpu_e->cpu_inode); int cpu_fd=readNoCache(&cpu_e->cpu_fd); g_closed_ftable.lock_table(cpu_inode); volatile rtree* fpages=g_closed_ftable.get(cpu_inode); if (fpages!=NULL) { volatile rtree* fpages_old=g_ftable->files[fd].pages; g_ftable->files[fd].pages=fpages; g_closed_ftable.reset(cpu_inode, fpages_old); }else{ g_ftable->files[fd].pages->file_id=getNewFileId(); } g_closed_ftable.unlock_table(cpu_inode); // make sure we flush the cache if the owner has changed int cpu_flush_cache=readNoCache(&cpu_e->flush_cache); if (cpu_flush_cache){ g_ftable->files[fd].pages->lock_for_flush(); if (g_ftable->files[fd].pages->count !=0) { g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree } g_ftable->files[fd].pages->file_id=getNewFileId(); g_ftable->files[fd].pages->unlock_after_flush(); } size_t size=readNoCache(&cpu_e->size); e->notify(cpu_fd,cpu_inode,size); } else { e->wait_open(); } if (e->cpu_fd < 0) { g_otable->lock(); e->refCount--; if (e->refCount==0) { e->clean(); cpu_e->clean(); } __threadfence(); g_otable->unlock(); return E_IPC_OPEN_ERROR; } return fd; } DEBUG_NOINLINE __device__ int gopen(char* filename, int flags){ __shared__ int ret; BEGIN_SINGLE_THREAD ret=single_thread_open(filename,flags); END_SINGLE_THREAD; return ret; } #define READ 0 #define WRITE 1 DEBUG_NOINLINE __device__ volatile FTable_page* getRwLockedPage(volatile FTable_entry* fentry, size_t block_id, int fd, int cpu_fd,int type_req){ __shared__ volatile FTable_page* fpage; __shared__ FTable_page_locker::page_states_t pstate; // try lockless path first BEGIN_SINGLE_THREAD int deadlock=0; int file_id=fentry->pages->file_id; while(1){ deadlock++; GPU_ASSERT(deadlock<200); pstate=FTable_page_locker::P_INIT; fpage=fentry->pages->getLeaf(block_id,&pstate,0,type_req); // lockless first if (fpage && pstate == FTable_page_locker::P_READY) { // success? GPU_ASSERT(fpage->frame); if ((block_id<<FS_LOGBLOCKSIZE) == fpage->frame->file_offset && file_id == fpage->frame->file_id) { LOCKLESS_SUCCESS; break; } } fentry->pages->lock_tree(); fpage=fentry->pages->getLeaf(block_id,&pstate,1,type_req); // locked version - updates all the counters fentry->pages->unlock_tree(); // TODO: handle file size! // TODO: add reasonable dirty bitmap update here // at this point we have 3 options // 1. pstate == P_INIT => page is locked and needs to be inited // 2. pstate == P_UNDEFINED => page is locked by some other process and we need to // retry to getLeaf // 3. pstate = P_RW => lock page with lock_init_rw if (pstate == FTable_page_locker::P_UNDEFINED ) { // we'd better block PAGE_ALLOC_RETRIES fpage->locker.lock_wait_unlock(); // just wait continue; } break; } if (pstate == FTable_page_locker::P_INIT ){ GPU_ASSERT(fpage->locker.lock ==1); GPU_ASSERT(fpage->frame==NULL); // if we inited, the page is locked and we just keep going /*** DEBUG if (atomicAdd(&countInited[block_id],1)>=1) { GPU_ASSERT(0); } **/ fpage->allocPage(file_id,block_id<<FS_LOGBLOCKSIZE); //GPU_ASSERT((fpage->frame->file_offset)>=0); if (cpu_fd>=0) { int datasize=read_cpu(cpu_fd,fpage->frame); if (datasize < 0) { // TODO: error handling GPU_ASSERT("Failed to read data from CPU"==NULL); } fpage->frame->content_size=datasize; } if (type_req==PAGE_WRITE_ACCESS) fpage->markDirty(); } GPU_ASSERT((pstate == FTable_page_locker::P_INIT && fpage->locker.lock) || (( pstate == FTable_page_locker::P_RW || pstate== FTable_page_locker::P_READY) && fpage->locker.rw_counter>0) ); // if we do not need to zero out the page (cpu_fd<0) // if the page was initialized, return. Make sure to return with all threads active if (pstate == FTable_page_locker::P_INIT && cpu_fd>=0 ) fpage->locker.unlock_init(); END_SINGLE_THREAD if ((pstate == FTable_page_locker::P_INIT && cpu_fd >= 0 ) || pstate == FTable_page_locker::P_RW || pstate == FTable_page_locker::P_READY ) return fpage; //fill the page with zeros - optimization for the case of write-once exclusive create owned by GPU bzero_page((volatile char*)fpage->frame->page); __threadfence(); // make sure all threads will see these zeros BEGIN_SINGLE_THREAD GPU_ASSERT(cpu_fd<0); GPU_ASSERT(pstate == FTable_page_locker::P_INIT); fpage->frame->content_size=0; fpage->locker.unlock_init(); //GPU_ASSERT(fpage->frame->file_offset>=0); END_SINGLE_THREAD return fpage; } DEBUG_NOINLINE __device__ int gmsync(volatile void *addr, size_t length,int flags) { size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage); // assert(tmp>=0); size_t offset=tmp>>FS_LOGBLOCKSIZE; GPU_ASSERT(offset<PPOOL_FRAMES); __threadfence(); // make sure all writes to the page become visible BEGIN_SINGLE_THREAD volatile PFrame* p=&(g_ppool->frames[offset]); volatile FTable_page* fp=p->fpage; GPU_ASSERT(fp); // super ineffisient way to find which file this page belongs to int i=0; for( i=0;i<FSTABLE_SIZE;i++){ if (p->file_id == g_ftable->files[i].pages->file_id){ // no lock on page is required - last 0 writeback_page(g_otable->entries[i].cpu_fd,fp,g_otable->entries[i].flags,0); break; } } GPU_ASSERT(i!=FSTABLE_SIZE); // if this assert fires it means that the file with that id was not // found among open files. That's not valid becuase msync works only if the // file is mapped -> it cannot be closed. END_SINGLE_THREAD return 0; } DEBUG_NOINLINE __device__ int gmunmap(volatile void *addr, size_t length) { size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage); // assert(tmp>=0); size_t offset=tmp>>FS_LOGBLOCKSIZE; if (offset>=PPOOL_FRAMES) return -1; __threadfence(); // make sure all writes to the page become visible BEGIN_SINGLE_THREAD volatile PFrame* p=&(g_ppool->frames[offset]); volatile FTable_page* fp=p->fpage; GPU_ASSERT(fp); fp->locker.unlock_rw(); END_SINGLE_THREAD return 0; } DEBUG_NOINLINE __device__ volatile void* gmmap(void *addr, size_t size, int prot, int flags, int fd, off_t offset) { __shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem __shared__ size_t block_id; __shared__ int block_offset; volatile FTable_page* fpage; __shared__ int cpu_fd; __shared__ volatile FTable_entry* fentry; BEGIN_SINGLE_THREAD fentry=&g_ftable->files[fd]; block_id=offset2block(offset,FS_LOGBLOCKSIZE); block_offset=offset2blockoffset(offset,FS_BLOCKSIZE); GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES); cpu_fd=g_otable->entries[fd].cpu_fd; GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 ); if (block_offset+size > FS_BLOCKSIZE) assert("Reading beyond the page boundary"==0); GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE); // decide whether to fetch data or not if ( g_otable->entries[fd].flags == O_GWRONCE ) cpu_fd=-1; END_SINGLE_THREAD int purpose= (g_otable->entries[fd].flags == O_GRDONLY) ? PAGE_READ_ACCESS:PAGE_WRITE_ACCESS; fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, purpose); BEGIN_SINGLE_THREAD // page inited, just read, frane us a _shared_ mem variable frame=fpage->frame; //TODO: handle reading beyond eof if (frame->content_size < block_offset+size && flags==O_GRDONLY) { GPU_ASSERT("Failed to map beyond the end of file"!=NULL); } if (flags!= O_GRDONLY) atomicMax((uint*)&(frame->content_size),block_offset+size); END_SINGLE_THREAD GPU_ASSERT(frame!=NULL); return (void*)(((uchar*)(frame->page))+block_offset); } DEBUG_NOINLINE __device__ size_t gwrite(int fd,size_t offset, size_t size, uchar* buffer) { //attempt to write to a specific block //if null -> allocate // otherwise -> copy to bufcache // mark dirty // we ignore that we may run out of disk space GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES); GPU_ASSERT( g_otable->entries[fd].refCount >0 ); __shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem __shared__ size_t block_id; __shared__ int block_offset; __shared__ int cpu_fd; __shared__ int written; __shared__ volatile FTable_page* fpage; __shared__ volatile FTable_entry* fentry; BEGIN_SINGLE_THREAD block_id=offset2block(offset,FS_LOGBLOCKSIZE); block_offset=offset2blockoffset(offset,FS_BLOCKSIZE); fentry=&g_ftable->files[fd]; cpu_fd=g_otable->entries[fd].cpu_fd; if (g_otable->entries[fd].flags == O_GWRONCE || ( size == FS_BLOCKSIZE && block_offset==0)) { // we will not read the data from CPU if (1) the file is ONLY_ONCE, or the writes are whole-page writes cpu_fd=-1; } written=0; END_SINGLE_THREAD while(written<size){ int single_op=min((int)(size-written),(int)(FS_BLOCKSIZE-block_offset)); GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE); //TODO: handle reading beyond eof // allow multiple threads to get into this function // the value returned is correct only in thread 0 fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, PAGE_WRITE_ACCESS); BEGIN_SINGLE_THREAD frame=fpage->frame; atomicMax((uint*)&frame->content_size,block_offset+single_op); fpage->markDirty(); END_SINGLE_THREAD // go over the page and reset it if necessary // cpu_fd==-1 it will reset the page copy_block((uchar*)(frame->page)+block_offset,buffer+written,single_op); __threadfence(); // we must sync here otherwise swapper will be inconsistent BEGIN_SINGLE_THREAD written+=single_op; fpage->locker.unlock_rw(); // the page is unlocked for flush only here. block_id++; block_offset=0; END_SINGLE_THREAD; } return size; } // currently pread is expected to be issued by all threads in a thread block // with the same parameters // all parameters other than in thread idx ==0 are ignored DEBUG_NOINLINE __device__ size_t gread(int fd, size_t offset, size_t size, uchar* buffer) { __shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem __shared__ volatile FTable_page* fpage; __shared__ size_t block_id; __shared__ int block_offset; __shared__ volatile FTable_entry* fentry; __shared__ int cpu_fd; __shared__ int data_read; BEGIN_SINGLE_THREAD block_id=offset2block(offset,FS_LOGBLOCKSIZE); block_offset=offset2blockoffset(offset,FS_BLOCKSIZE); fentry=&g_ftable->files[fd]; cpu_fd=g_otable->entries[fd].cpu_fd; GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES); GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 ); data_read=0; END_SINGLE_THREAD while(data_read<size){ int single_op=min((int)(size-data_read),(int)(FS_BLOCKSIZE-block_offset)); GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE); // synchtreads in getRwLockedPage fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd,PAGE_READ_ACCESS); // page inited, just read, frane us a _shared_ mem variable frame=fpage->frame; //TODO: handle reading beyond eof GPU_ASSERT(frame!=NULL); copyNoCache_block(buffer+data_read,(uchar*)(frame->page)+block_offset,single_op); BEGIN_SINGLE_THREAD block_offset=0; data_read+=single_op; block_id++; fpage->locker.unlock_rw(); END_SINGLE_THREAD } return size; } DEBUG_NOINLINE __device__ uint gunlink(char* filename) { GPU_ASSERT(NULL); // tobe implemented return 0; } DEBUG_NOINLINE __device__ size_t fstat(int fd) { return g_otable->entries[fd].size; } #endif
the_stack
#include <thrust/device_ptr.h> #include <thrust/scan.h> #include <cuComplex.h> #include "../cuspreadinterp.h" #include "../memtransfer.h" using namespace std; // only relates to the locations of the nodes, which only needs to be done once int CUSPREAD2D_PAUL_PROP(int nf1, int nf2, int M, CUFINUFFT_PLAN d_plan) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ns=d_plan->spopts.nspread; int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int numbins[2]; numbins[0] = ceil((FLT) nf1/bin_size_x); numbins[1] = ceil((FLT) nf2/bin_size_y); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"]"<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; #ifdef DEBUG FLT *h_kx; FLT *h_ky; h_kx = (FLT*)malloc(M*sizeof(FLT)); h_ky = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_ky,d_ky,M*sizeof(FLT),cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout<<"[debug ]"; cout <<" ("<<setw(3)<<h_kx[i]<<","<<setw(3)<<h_ky[i]<<")"<<endl; } #endif int *d_binsize = d_plan->binsize; int *d_finegridsize = d_plan->finegridsize; int *d_sortidx = d_plan->sortidx; int *d_fgstartpts = d_plan->fgstartpts; int *d_idxnupts = d_plan->idxnupts; int *d_numsubprob = d_plan->numsubprob; int pirange=d_plan->spopts.pirange; void *d_temp_storage = NULL; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_finegridsize,0,nf1*nf2*sizeof(int))); LocateFineGridPos_Paul<<<(M+1024-1)/1024, 1024>>>(M,nf1,nf2,bin_size_x, bin_size_y,numbins[0],numbins[1],d_binsize,ns,d_kx,d_ky, d_sortidx,d_finegridsize,pirange); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel LocateFineGridPos \t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] ns = %d\n", ns); int binx, biny, binidx; int *h_finegridsize; h_finegridsize = (int*)malloc(nf1*nf2*sizeof(int)); checkCudaErrors(cudaMemcpy(h_finegridsize,d_finegridsize, nf1*nf2*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<nf2; j++){ if( j % d_plan->opts.gpu_binsizey == 0) printf("\n"); biny = floor(j/bin_size_y); cout<<"[debug ] "; for(int i=0; i<nf1; i++){ if( i % d_plan->opts.gpu_binsizex == 0 && i!=0) printf(" |"); binx = floor(i/bin_size_x); binidx = binx+biny*numbins[0]; if(i!=0) cout<<" "; cout <<setw(2)<<h_finegridsize[binidx*bin_size_x* bin_size_y+ (i-binx*bin_size_x)+(j-bin_size_y*biny) *bin_size_x]; } cout<<endl; } cout<<"[debug ] ------------------------------------------------"<<endl; free(h_finegridsize); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins[0]*numbins[1]*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins[0]*numbins[1]* sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<" bin["<<setw(3)<<i<<","<<setw(3)<<j<<"]="<< h_binsize[i+j*numbins[0]]; } cout<<endl; } free(h_binsize); #endif #ifdef DEBUG cout<<"[debug ] ------------------------------------------------"<<endl; int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); cout<<"[debug ]"; for(int i=0; i<M; i++){ cout <<"point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; } #endif int n=nf1*nf2; cudaEventRecord(start); thrust::device_ptr<int> d_ptr(d_finegridsize); thrust::device_ptr<int> d_result(d_fgstartpts); thrust::exclusive_scan(d_ptr, d_ptr + n, d_result); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Scan fingridsize array\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_fgstartpts; h_fgstartpts = (int*)malloc((nf1*nf2)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_fgstartpts,d_fgstartpts, (nf1*nf2)*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan finegridsize array:"<<endl; for(int j=0; j<nf2; j++){ if( j % d_plan->opts.gpu_binsizey == 0) printf("\n"); biny = floor(j/bin_size_y); cout<<"[debug ] "; for(int i=0; i<nf1; i++){ if( i % d_plan->opts.gpu_binsizex == 0 && i!=0) printf(" |"); binx = floor(i/bin_size_x); binidx = binx+biny*numbins[0]; if(i!=0) cout<<" "; cout<<setw(2)<<h_fgstartpts[binidx*bin_size_x*bin_size_y+ (i - binx*bin_size_x)+(j-bin_size_y*biny)* bin_size_x]; } cout<<endl; } free(h_fgstartpts); cout<<"[debug ] -----------------------------------------------"<<endl; #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_Paul<<<(M+1024-1)/1024,1024>>>(nf1, nf2, M, bin_size_x, bin_size_y, numbins[0], numbins[1],ns,d_kx, d_ky, d_fgstartpts, d_sortidx, d_idxnupts, pirange); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tCalcInvertofGlobalSortIdx_Paul\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,M*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<M; i++){ cout <<"idx="<< h_idxnupts[i]<<" "; } cout<<endl; free(h_idxnupts); #endif int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize; cudaEventRecord(start); int blocksize = bin_size_x*bin_size_y; cudaEventRecord(start); CalcSubProb_2d_Paul<<<numbins[0]*numbins[1], blocksize>>>( d_finegridsize, d_numsubprob, maxsubprobsize, bin_size_x, bin_size_y); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tCalcSubProb_2d_Paul\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int* h_numsubprob; h_numsubprob = (int*) malloc(n*sizeof(int)); checkCudaErrors(cudaMemcpy(h_numsubprob,d_numsubprob,numbins[0]* numbins[1]*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<"nsub["<<setw(3)<<i<<","<<setw(3)<<j<<"] = "<< setw(2)<<h_numsubprob[i+j*numbins[0]]; } cout<<endl; } free(h_numsubprob); #endif int *d_subprobstartpts = d_plan->subprobstartpts; n = numbins[0]*numbins[1]; cudaEventRecord(start); d_ptr = thrust::device_pointer_cast(d_numsubprob); d_result = thrust::device_pointer_cast(d_subprobstartpts+1); thrust::inclusive_scan(d_ptr, d_ptr + n, d_result); checkCudaErrors(cudaMemset(d_subprobstartpts,0,sizeof(int))); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tScan subproblem size array\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Subproblem start points\n"); int* h_subprobstartpts; h_subprobstartpts = (int*) malloc((n+1)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprobstartpts,d_subprobstartpts, (n+1)*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<"nsub["<<setw(3)<<i<<","<<setw(3)<<j<<"] = "<<setw(2)<< h_subprobstartpts[i+j*numbins[0]]; } cout<<endl; } printf("[debug ] Total number of subproblems = %d\n", h_subprobstartpts[n]); free(h_subprobstartpts); #endif int *d_subprob_to_bin; int totalnumsubprob; cudaEventRecord(start); checkCudaErrors(cudaMemcpy(&totalnumsubprob,&d_subprobstartpts[n], sizeof(int),cudaMemcpyDeviceToHost)); // TODO: Warning! This gets malloc'ed but not freed checkCudaErrors(cudaMalloc(&d_subprob_to_bin,totalnumsubprob*sizeof(int))); MapBintoSubProb_2d<<<(numbins[0]*numbins[1]+1024-1)/1024, 1024>>>( d_subprob_to_bin,d_subprobstartpts,d_numsubprob,numbins[0]* numbins[1]); assert(d_subprob_to_bin != NULL); d_plan->subprob_to_bin = d_subprob_to_bin; assert(d_plan->subprob_to_bin != NULL); d_plan->totalnumsubprob = totalnumsubprob; #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tMap Subproblem to Bins\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Map Subproblem to Bins\n"); int* h_subprob_to_bin; h_subprob_to_bin = (int*) malloc((totalnumsubprob)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprob_to_bin,d_subprob_to_bin, (totalnumsubprob)*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<totalnumsubprob; j++){ cout<<"[debug ] "; cout <<"nsub["<<j<<"] = "<<setw(2)<<h_subprob_to_bin[j]; cout<<endl; } #endif cudaFree(d_temp_storage); return 0; } int CUSPREAD2D_PAUL(int nf1, int nf2, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ns=d_plan->spopts.nspread; // psi's support in terms of number of cells FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; // assume that bin_size_x > ns/2; int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int numbins[2]; numbins[0] = ceil((FLT) nf1/bin_size_x); numbins[1] = ceil((FLT) nf2/bin_size_y); #ifdef INFO cout<<"[info ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"]"<<endl; cout<<"[info ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"]"<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_numsubprob = d_plan->numsubprob; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int *d_fgstartpts = d_plan->fgstartpts; int *d_finegridsize = d_plan->finegridsize; int totalnumsubprob=d_plan->totalnumsubprob; int *d_subprob_to_bin = d_plan->subprob_to_bin; int pirange=d_plan->spopts.pirange; FLT sigma=d_plan->opts.upsampfac; cudaEventRecord(start); size_t sharedplanorysize = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+ 2*ceil(ns/2.0))*sizeof(CUCPX); if(sharedplanorysize > 49152){ cout<<"error: not enough shared memory"<<endl; return 1; } for(int t=0; t<blksize; t++){ Spread_2d_Subprob_Paul<<<totalnumsubprob,1024, sharedplanorysize>>>(d_kx, d_ky, d_c+t*M, d_fw+t*nf1*nf2, M, ns, nf1, nf2, es_c, es_beta, sigma, d_binstartpts, d_binsize, bin_size_x, bin_size_y, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins[0], numbins[1], d_idxnupts, d_fgstartpts, d_finegridsize, pirange); } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_2d_Subprob_Paul \t%.3g ms\n", milliseconds); #endif return 0; }
the_stack
#include "../../utils/timer.h" /* You will probably want to move the following declarations to a central header file. */ struct stemmer; extern struct stemmer *create_stemmer(void); extern void free_stemmer(struct stemmer *z); extern int stem(struct stemmer *z, char *b, int k); /* The main part of the stemming algorithm starts here. */ #define TRUE 1 #define FALSE 0 #define INC 32 /* size units in which s is increased */ /* stemmer is a structure for a few local bits of data, */ struct stemmer { // char *b; /* buffer for word to be stemmed */ char b[INC + 1]; /* buffer for word to be stemmed */ int k; /* offset to the end of the string */ int j; /* a general offset into the string */ }; /* Member b is a buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[z->k]. Member k is readjusted downwards as the stemming progresses. Zero termination is not in fact used in the algorithm. Note that only lower case sequences are stemmed. Forcing to lower case should be done before stem(...) is called. Typical usage is: struct stemmer * z = create_stemmer(); char b[] = "pencils"; int res = stem(z, b, 6); /- stem the 7 characters of b[0] to b[6]. The result, res, will be 5 (the 's' is removed). -/ free_stemmer(z); */ extern struct stemmer *create_stemmer(void) { return (struct stemmer *)malloc(sizeof(struct stemmer)); /* assume malloc succeeds */ } extern void free_stemmer(struct stemmer *z) { free(z); } /* cons(z, i) is TRUE <=> b[i] is a consonant. ('b' means 'z->b', but here and below we drop 'z->' in comments. */ __host__ __device__ static int cons1(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; default: return TRUE; } } __host__ __device__ static int cons(struct stemmer *z, int i) { switch (z->b[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': return FALSE; case 'y': return (i == 0) ? TRUE : !cons1(z, i - 1); default: return TRUE; } } /* m(z) measures the number of consonant sequences between 0 and j. if c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3 .... */ __host__ __device__ static int m(struct stemmer *z) { int n = 0; int i = 0; int j = z->j; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; while (TRUE) { while (TRUE) { if (i > j) return n; if (cons(z, i)) break; i++; } i++; n++; while (TRUE) { if (i > j) return n; if (!cons(z, i)) break; i++; } i++; } } /* vowelinstem(z) is TRUE <=> 0,...j contains a vowel */ __host__ __device__ static int vowelinstem(struct stemmer *z) { int j = z->j; int i; for (i = 0; i <= j; i++) if (!cons(z, i)) return TRUE; return FALSE; } /* doublec(z, j) is TRUE <=> j,(j-1) contain a double consonant. */ __host__ __device__ static int doublec(struct stemmer *z, int j) { char *b = z->b; if (j < 1) return FALSE; if (b[j] != b[j - 1]) return FALSE; return cons(z, j); } /* cvc(z, i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short word. e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. */ __host__ __device__ static int cvc(struct stemmer *z, int i) { if (i < 2 || !cons(z, i) || cons(z, i - 1) || !cons(z, i - 2)) return FALSE; { int ch = z->b[i]; if (ch == 'w' || ch == 'x' || ch == 'y') return FALSE; } return TRUE; } /* ends(z, s) is TRUE <=> 0,...k ends with the string s. */ __host__ __device__ static int memcmp1(const void *buffer1, const void *buffer2, int count) { if (!count) return (0); while (--count && *(char *)buffer1 == *(char *)buffer2) { buffer1 = (char *)buffer1 + 1; buffer2 = (char *)buffer2 + 1; } return (*((unsigned char *)buffer1) - *((unsigned char *)buffer2)); } __host__ __device__ static int ends(struct stemmer *z, char *s) { int length = s[0]; char *b = z->b; int k = z->k; if (s[length] != b[k]) return FALSE; /* tiny speed-up */ if (length > k + 1) return FALSE; if (memcmp1(b + k - length + 1, s + 1, length) != 0) return FALSE; z->j = k - length; return TRUE; } /* setto(z, s) sets (j+1),...k to the characters in the string s, readjusting k. */ __host__ __device__ void memmove1(void *dst, const void *src, int count) { char *dst_t; char *src_t; if ((unsigned char *)dst <= (unsigned char *)src || (unsigned char *)dst >= ((unsigned char *)src + count)) { dst_t = (char *)dst; src_t = (char *)src; while (count--) { *dst_t++ = *src_t++; } } else { dst_t = (char *)dst + count - 1; src_t = (char *)src + count - 1; while (count--) { *dst_t-- = *src_t--; } } } __host__ __device__ static void setto(struct stemmer *z, char *s) { int length = s[0]; int j = z->j; memmove1(z->b + j + 1, s + 1, length); z->k = j + length; } /* r(z, s) is used further down. */ __host__ __device__ static void r(struct stemmer *z, char *s) { if (m(z) > 0) setto(z, s); } /* step1ab(z) gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet */ /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __host__ __device__ static void step1ab(struct stemmer *z) { char *b = z->b; if (b[z->k] == 's') { if (ends(z, "\04" "sses")) z->k -= 2; else if (ends(z, "\03" "ies")) setto(z, "\01" "i"); else if (b[z->k - 1] != 's') z->k--; } if (ends(z, "\03" "eed")) { if (m(z) > 0) z->k--; } else if ((ends(z, "\02" "ed") || ends(z, "\03" "ing")) && vowelinstem(z)) { z->k = z->j; if (ends(z, "\02" "at")) setto(z, "\03" "ate"); else if (ends(z, "\02" "bl")) setto(z, "\03" "ble"); else if (ends(z, "\02" "iz")) setto(z, "\03" "ize"); else if (doublec(z, z->k)) { z->k--; { int ch = b[z->k]; if (ch == 'l' || ch == 's' || ch == 'z') z->k++; } } else if (m(z) == 1 && cvc(z, z->k)) setto(z, "\01" "e"); } } /* step1c(z) turns terminal y to i when there is another vowel in the stem. */ __host__ __device__ static void step1c(struct stemmer *z) { if (ends(z, "\01" "y") && vowelinstem(z)) z->b[z->k] = 'i'; } /* step2(z) maps double suffices to single ones. so -ization ( = -ize plus -ation) maps to -ize etc. note that the string before the suffix must give m(z) > 0. */ __host__ __device__ static void step2(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\07" "ational")) { r(z, "\03" "ate"); break; } if (ends(z, "\06" "tional")) { r(z, "\04" "tion"); break; } break; case 'c': if (ends(z, "\04" "enci")) { r(z, "\04" "ence"); break; } if (ends(z, "\04" "anci")) { r(z, "\04" "ance"); break; } break; case 'e': if (ends(z, "\04" "izer")) { r(z, "\03" "ize"); break; } break; case 'l': if (ends(z, "\03" "bli")) { r(z, "\03" "ble"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, replace this line with case 'l': if (ends(z, "\04" "abli")) { r(z, "\04" "able"); break; } */ if (ends(z, "\04" "alli")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "entli")) { r(z, "\03" "ent"); break; } if (ends(z, "\03" "eli")) { r(z, "\01" "e"); break; } if (ends(z, "\05" "ousli")) { r(z, "\03" "ous"); break; } break; case 'o': if (ends(z, "\07" "ization")) { r(z, "\03" "ize"); break; } if (ends(z, "\05" "ation")) { r(z, "\03" "ate"); break; } if (ends(z, "\04" "ator")) { r(z, "\03" "ate"); break; } break; case 's': if (ends(z, "\05" "alism")) { r(z, "\02" "al"); break; } if (ends(z, "\07" "iveness")) { r(z, "\03" "ive"); break; } if (ends(z, "\07" "fulness")) { r(z, "\03" "ful"); break; } if (ends(z, "\07" "ousness")) { r(z, "\03" "ous"); break; } break; case 't': if (ends(z, "\05" "aliti")) { r(z, "\02" "al"); break; } if (ends(z, "\05" "iviti")) { r(z, "\03" "ive"); break; } if (ends(z, "\06" "biliti")) { r(z, "\03" "ble"); break; } break; case 'g': if (ends(z, "\04" "logi")) { r(z, "\03" "log"); break; } /*-DEPARTURE-*/ /* To match the published algorithm, delete this line */ } } /* step3(z) deals with -ic-, -full, -ness etc. similar strategy to step2. */ __host__ __device__ static void step3(struct stemmer *z) { switch (z->b[z->k]) { case 'e': if (ends(z, "\05" "icate")) { r(z, "\02" "ic"); break; } if (ends(z, "\05" "ative")) { r(z, "\00" ""); break; } if (ends(z, "\05" "alize")) { r(z, "\02" "al"); break; } break; case 'i': if (ends(z, "\05" "iciti")) { r(z, "\02" "ic"); break; } break; case 'l': if (ends(z, "\04" "ical")) { r(z, "\02" "ic"); break; } if (ends(z, "\03" "ful")) { r(z, "\00" ""); break; } break; case 's': if (ends(z, "\04" "ness")) { r(z, "\00" ""); break; } break; } } /* step4(z) takes off -ant, -ence etc., in context <c>vcvc<v>. */ __host__ __device__ static void step4(struct stemmer *z) { switch (z->b[z->k - 1]) { case 'a': if (ends(z, "\02" "al")) break; return; case 'c': if (ends(z, "\04" "ance")) break; if (ends(z, "\04" "ence")) break; return; case 'e': if (ends(z, "\02" "er")) break; return; case 'i': if (ends(z, "\02" "ic")) break; return; case 'l': if (ends(z, "\04" "able")) break; if (ends(z, "\04" "ible")) break; return; case 'n': if (ends(z, "\03" "ant")) break; if (ends(z, "\05" "ement")) break; if (ends(z, "\04" "ment")) break; if (ends(z, "\03" "ent")) break; return; case 'o': if (ends(z, "\03" "ion") && (z->b[z->j] == 's' || z->b[z->j] == 't')) break; if (ends(z, "\02" "ou")) break; return; /* takes care of -ous */ case 's': if (ends(z, "\03" "ism")) break; return; case 't': if (ends(z, "\03" "ate")) break; if (ends(z, "\03" "iti")) break; return; case 'u': if (ends(z, "\03" "ous")) break; return; case 'v': if (ends(z, "\03" "ive")) break; return; case 'z': if (ends(z, "\03" "ize")) break; return; default: return; } if (m(z) > 1) z->k = z->j; } /* step5(z) removes a final -e if m(z) > 1, and changes -ll to -l if m(z) > 1. */ __host__ __device__ static void step5(struct stemmer *z) { char *b = z->b; z->j = z->k; if (b[z->k] == 'e') { int a = m(z); if (a > 1 || a == 1 && !cvc(z, z->k - 1)) z->k--; } if (b[z->k] == 'l' && doublec(z, z->k) && m(z) > 1) z->k--; } /* In stem(z, b, k), b is a char pointer, and the string to be stemmed is from b[0] to b[k] inclusive. Possibly b[k+1] == '\0', but it is not important. The stemmer adjusts the characters b[0] ... b[k] and returns the new end-point of the string, k'. Stemming never increases word length, so 0 <= k' <= k. */ __global__ void stem_gpu(struct stemmer *stem_list, int words) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < words) { if (stem_list[tid].k <= 1) { return; } step1ab(&(stem_list[tid])); step1c(&(stem_list[tid])); step2(&(stem_list[tid])); step3(&(stem_list[tid])); step4(&(stem_list[tid])); step5(&(stem_list[tid])); stem_list[tid].b[stem_list[tid].k + 1] = 0; } } /*--------------------stemmer definition ends here------------------------*/ #define A_INC 10000 static int i_max = INC; /* maximum offset in s */ struct stemmer *stem_list; struct stemmer *gpu_stem_list; #define LETTER(ch) (isupper(ch) || islower(ch)) int load_data(struct stemmer *stem_list, FILE *f) { static int a_max = WORDS; int a_size = 0; while (TRUE) { int ch = getc(f); if (ch == EOF) return a_size; char *s = (char *)malloc(i_max + 1); if (LETTER(ch)) { int i = 0; while (TRUE) { if (i == i_max) { i_max += INC; s = (char *)realloc(s, i_max + 1); } ch = tolower(ch); /* forces lower case */ stem_list[a_size].b[i] = ch; s[i] = ch; i++; ch = getc(f); if (!LETTER(ch)) { ungetc(ch, f); break; } } stem_list[a_size].k = i - 1; if (a_size == a_max) { a_max += A_INC; stem_list = (struct stemmer *)realloc(stem_list, a_max * sizeof(struct stemmer)); } a_size += 1; } } } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "[ERROR] Invalid arguments provided.\n\n"); fprintf(stderr, "Usage: %s [WORDS] [INPUT FILE]\n\n", argv[0]); exit(0); } /* Timing */ STATS_INIT("kernel", "gpu_porter_stemming"); PRINT_STAT_STRING("abrv", "gpu_stemmer"); cudaEvent_t eStart, eStop; float cuda_elapsedTime; int WORDS = atoi(argv[1]); // allocate data FILE *f; f = fopen(argv[2], "r"); if (f == 0) { fprintf(stderr, "File %s not found\n", argv[1]); exit(1); } cudaMallocHost((void **)&stem_list, WORDS* sizeof(struct stemmer)); int words = load_data(WORDS, stem_list, f); PRINT_STAT_INT("words", words); fclose(f); cudaEventCreate(&eStart); cudaEventCreate(&eStop); cudaMalloc((void **)&gpu_stem_list, words * sizeof(struct stemmer)); cudaEventRecord(eStart, 0); cudaMemcpy(gpu_stem_list, stem_list, words * sizeof(struct stemmer), cudaMemcpyHostToDevice); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("host_to_device", cuda_elapsedTime); cudaEventRecord(eStart, 0); dim3 block(256); dim3 grid; grid.x = ceil(words * 1.0 / block.x); cudaEventRecord(eStart, 0); stem_gpu << <grid, block>>> (gpu_stem_list, words); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("gpu_stemmer", cuda_elapsedTime); cudaEventRecord(eStart, 0); cudaMemcpy(stem_list, gpu_stem_list, words * sizeof(struct stemmer), cudaMemcpyDeviceToHost); cudaEventRecord(eStop, 0); cudaEventSynchronize(eStop); cudaEventElapsedTime(&cuda_elapsedTime, eStart, eStop); PRINT_STAT_DOUBLE("device_to_host", cuda_elapsedTime); cudaEventDestroy(eStart); cudaEventDestroy(eStop); STATS_END(); #ifdef TESTING f = fopen("../input/stem_porter.gpu", "w"); for (int i = 0; i < words; ++i) fprintf(f, "%s\n", stem_list[i].b); fclose(f); #endif cudaFreeHost(stem_list); cudaFree(gpu_stem_list); return 0; }
the_stack
#include "cupoch/geometry/geometry_utils.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { template <int Dim> struct transform_points_functor { transform_points_functor( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform) : transform_(transform){}; const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_; __device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) { pt = transform_.template block<Dim, Dim>(0, 0) * pt + transform_.template block<Dim, 1>(0, Dim); } }; struct transform_normals_functor { transform_normals_functor(const Eigen::Matrix4f &transform) : transform_(transform){}; const Eigen::Matrix4f transform_; __device__ void operator()(Eigen::Vector3f &nl) { nl = transform_.block<3, 3>(0, 0) * nl; } }; } // namespace template <int Dim, typename FuncT> Eigen::Matrix<float, Dim, 1> ComputeBound( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, FuncT()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMinBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeBound< Dim, thrust::elementwise_minimum<Eigen::Matrix<float, Dim, 1>>>( 0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeBound< Dim, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>>( 0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce( utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeCenter( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { Eigen::Matrix<float, Dim, 1> init = Eigen::Matrix<float, Dim, 1>::Zero(); if (points.empty()) return init; Eigen::Matrix<float, Dim, 1> sum = thrust::reduce( utility::exec_policy(0)->on(0), points.begin(), points.end(), init, thrust::plus<Eigen::Matrix<float, Dim, 1>>()); return sum / points.size(); } template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_minimum<Eigen::Matrix<float, 2, 1>>>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_minimum<Eigen::Matrix<float, 3, 1>>>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeBound<2, thrust::elementwise_maximum<Eigen::Matrix<float, 2, 1>>>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeBound<3, thrust::elementwise_maximum<Eigen::Matrix<float, 3, 1>>>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeCenter<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeCenter<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors, const size_t size, const Eigen::Vector3f &color) { colors.resize(size); Eigen::Vector3f clipped_color = color; if (color.minCoeff() < 0 || color.maxCoeff() > 1) { utility::LogWarning( "invalid color in PaintUniformColor, clipping to [0, 1]"); clipped_color = clipped_color.array() .max(Eigen::Vector3f(0, 0, 0).array()) .matrix(); clipped_color = clipped_color.array() .min(Eigen::Vector3f(1, 1, 1).array()) .matrix(); } thrust::fill(colors.begin(), colors.end(), clipped_color); } template <int Dim> void TransformPoints( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { TransformPoints<Dim>(0, transformation, points); } template <int Dim> void TransformPoints( cudaStream_t stream, const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { transform_points_functor<Dim> func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), func); } template void TransformPoints<2>( const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<2>( cudaStream_t stream, const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<3>( const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); template void TransformPoints<3>( cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); void TransformNormals(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { TransformNormals(0, transformation, normals); } void TransformNormals(cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { transform_normals_functor func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), func); } template <int Dim> void TranslatePoints( const Eigen::Matrix<float, Dim, 1> &translation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool relative) { Eigen::Matrix<float, Dim, 1> transform = translation; if (!relative) { transform -= ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt += transform; }); } template <int Dim> void ScalePoints(const float scale, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = (pt - points_center) * scale + points_center; }); } template void TranslatePoints<2>( const Eigen::Vector2f &translation, utility::device_vector<Eigen::Vector2f> &points, bool relative); template void TranslatePoints<3>( const Eigen::Vector3f &translation, utility::device_vector<Eigen::Vector3f> &points, bool relative); template void ScalePoints<2>(const float scale, utility::device_vector<Eigen::Vector2f> &points, bool center); template void ScalePoints<3>(const float scale, utility::device_vector<Eigen::Vector3f> &points, bool center); template <int Dim> void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { RotatePoints<Dim>(0, R, points, center); } template <int Dim> void RotatePoints(cudaStream_t stream, const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = R * (pt - points_center) + points_center; }); } template void RotatePoints<2>(const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); template void RotatePoints<2>(cudaStream_t stream, const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); void RotateNormals(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { RotateNormals(0, R, normals); } void RotateNormals(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), [=] __device__(Eigen::Vector3f & normal) { normal = R * normal; }); } Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromAxisAngle( const Eigen::Vector3f &rotation) { const float phi = rotation.norm(); return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix(); } Eigen::Matrix3f GetRotationMatrixFromQuaternion( const Eigen::Vector4f &rotation) { return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2), rotation(3)) .normalized() .toRotationMatrix(); } } // namespace geometry } // namespace cupoch
the_stack
#include <io/utilities/parsing_utils.cuh> #include <io/utilities/time_utils.cuh> #include <cudf/fixed_point/fixed_point.hpp> #include <thrust/execution_policy.h> #include <thrust/reduce.h> namespace cudf { namespace io { /** * @brief Parses non-negative integral vales. * * This helper function is only intended to handle positive integers. The input * character string is expected to be well-formed. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @return The parsed and converted value */ template <typename T> __inline__ __device__ T to_non_negative_integer(char const* begin, char const* end) { T value = 0; for (; begin < end; ++begin) { if (*begin >= '0' && *begin <= '9') { value *= 10; value += *begin - '0'; } } return value; } /** * @brief Extracts the Day, Month, and Year from a string. * * This function takes a string and produces a `year_month_day` representation. * Acceptable formats are a combination of `YYYY`, `M`, `MM`, `D` and `DD` with * `/` or `-` as separators. Data with only year and month (no day) is also valid. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param dayfirst Flag indicating that first field is the day * @return Extracted year, month and day in `cuda::std::chrono::year_month_day` format */ __inline__ __device__ cuda::std::chrono::year_month_day extract_date(char const* begin, char const* end, bool dayfirst) { using namespace cuda::std::chrono; char sep = '/'; auto sep_pos = thrust::find(thrust::seq, begin, end, sep); if (sep_pos == end) { sep = '-'; sep_pos = thrust::find(thrust::seq, begin, end, sep); } year y; month m; day d; //--- is year the first filed? if ((sep_pos - begin) == 4) { y = year{to_non_negative_integer<int32_t>(begin, sep_pos)}; // year is signed // Month auto s2 = sep_pos + 1; sep_pos = thrust::find(thrust::seq, s2, end, sep); if (sep_pos == end) { //--- Data is just Year and Month - no day m = month{to_non_negative_integer<uint32_t>(s2, end)}; // month and day are unsigned d = day{1}; } else { m = month{to_non_negative_integer<uint32_t>(s2, sep_pos)}; d = day{to_non_negative_integer<uint32_t>((sep_pos + 1), end)}; } } else { //--- if the dayfirst flag is set, then restricts the format options if (dayfirst) { d = day{to_non_negative_integer<uint32_t>(begin, sep_pos)}; auto s2 = sep_pos + 1; sep_pos = thrust::find(thrust::seq, s2, end, sep); m = month{to_non_negative_integer<uint32_t>(s2, sep_pos)}; y = year{to_non_negative_integer<int32_t>((sep_pos + 1), end)}; } else { m = month{to_non_negative_integer<uint32_t>(begin, sep_pos)}; auto s2 = sep_pos + 1; sep_pos = thrust::find(thrust::seq, s2, end, sep); if (sep_pos == end) { //--- Data is just Year and Month - no day y = year{to_non_negative_integer<int32_t>(s2, end)}; d = day{1}; } else { d = day{to_non_negative_integer<uint32_t>(s2, sep_pos)}; y = year{to_non_negative_integer<int32_t>((sep_pos + 1), end)}; } } } return year_month_day{y, m, d}; } /** * @brief Parses a string to extract the hour, minute, second and millisecond time field * values of a day. * * Incoming format is expected to be `HH:MM:SS.MS`, with the latter second and millisecond fields * optional. Each time field can be a single, double, or triple (in the case of milliseconds) * digits. 12-hr and 24-hr time format is detected via the absence or presence of AM/PM characters * at the end. * * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @return Extracted hours, minutes, seconds and milliseconds of `chrono::hh_mm_ss` type with a * precision of milliseconds */ __inline__ __device__ cuda::std::chrono::hh_mm_ss<duration_ms> extract_time_of_day( char const* begin, char const* end) { constexpr char sep = ':'; // Adjust for AM/PM and any whitespace before duration_h d_h{0}; auto last = end - 1; if (*last == 'M' || *last == 'm') { if (*(last - 1) == 'P' || *(last - 1) == 'p') { d_h = duration_h{12}; } last = last - 2; while (*last == ' ') { --last; } } end = last + 1; // Find hour-minute separator const auto hm_sep = thrust::find(thrust::seq, begin, end, sep); // Extract hours d_h += cudf::duration_h{to_non_negative_integer<int>(begin, hm_sep)}; duration_m d_m{0}; duration_s d_s{0}; duration_ms d_ms{0}; // Find minute-second separator (if present) const auto ms_sep = thrust::find(thrust::seq, hm_sep + 1, end, sep); if (ms_sep == end) { d_m = duration_m{to_non_negative_integer<int32_t>(hm_sep + 1, end)}; } else { d_m = duration_m{to_non_negative_integer<int32_t>(hm_sep + 1, ms_sep)}; // Find second-millisecond separator (if present) const auto sms_sep = thrust::find(thrust::seq, ms_sep + 1, end, '.'); if (sms_sep == end) { d_s = duration_s{to_non_negative_integer<int64_t>(ms_sep + 1, end)}; } else { d_s = duration_s{to_non_negative_integer<int64_t>(ms_sep + 1, sms_sep)}; d_ms = duration_ms{to_non_negative_integer<int64_t>(sms_sep + 1, end)}; } } return cuda::std::chrono::hh_mm_ss<duration_ms>{d_h + d_m + d_s + d_ms}; } /** * @brief Checks whether `c` is decimal digit */ constexpr bool is_digit(char c) { return c >= '0' and c <= '9'; } /** * @brief Parses a datetime string and computes the corresponding timestamp. * * Acceptable date formats are a combination of `YYYY`, `M`, `MM`, `D` and `DD` with `/` or `-` as * separators. Input with only year and month (no day) is also valid. Character `T` or blank space * is expected to be the separator between date and time of day. Optional time of day information * like hours, minutes, seconds and milliseconds are expected to be `HH:MM:SS.MS`. Each time field * can be a single, double, or triple (in the case of milliseconds) digits. 12-hr and 24-hr time * format is detected via the absence or presence of AM/PM characters at the end. * * @tparam timestamp_type Type of output timestamp * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param dayfirst Flag to indicate day/month or month/day order * @return Timestamp converted to `timestamp_type` */ template <typename timestamp_type> __inline__ __device__ timestamp_type to_timestamp(char const* begin, char const* end, bool dayfirst) { using duration_type = typename timestamp_type::duration; auto sep_pos = end; // Find end of the date portion int count = 0; bool digits_only = true; for (auto i = begin; i < end; ++i) { digits_only = digits_only and is_digit(*i); if (*i == 'T') { sep_pos = i; break; } else if (count == 3 && *i == ' ') { sep_pos = i; break; } else if ((*i == '/' || *i == '-') || (count == 2 && *i != ' ')) { count++; } } // Exit if the input string is digit-only if (digits_only) { return timestamp_type{ duration_type{to_non_negative_integer<typename timestamp_type::rep>(begin, end)}}; } auto ymd = extract_date(begin, sep_pos, dayfirst); timestamp_type answer{cuda::std::chrono::sys_days{ymd}}; // Extract time only if separator is present if (sep_pos != end) { auto t = extract_time_of_day(sep_pos + 1, end); answer += cuda::std::chrono::duration_cast<duration_type>(t.to_duration()); } return answer; } /** * @brief Parses the input string into an integral value of the given type. * * Moves the `begin` iterator past the parsed value. * * @param[in, out] begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @return The parsed and converted value */ template <typename T> __inline__ __device__ T parse_integer(char const** begin, char const* end) { bool const is_negative = (**begin == '-'); T value = 0; auto cur = *begin + is_negative; while (cur < end) { if (*cur >= '0' && *cur <= '9') { value *= 10; value += *cur - '0'; } else break; ++cur; } *begin = cur; return is_negative ? -value : value; } /** * @brief Parses the input string into an integral value of the given type if the delimiter is * present. * * Moves the `begin` iterator past the parsed value. * * @param[in, out] begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @param delimiter delimiter character * @return The parsed and converted value, zero is delimiter is not present */ template <typename T> __inline__ __device__ T parse_optional_integer(char const** begin, char const* end, char delimiter) { if (**begin != delimiter) { return 0; } ++(*begin); return parse_integer<T>(begin, end); } /** * @brief Parses the input string into a duration of `duration_type`. * * The expected format can be one of the following: `DD days`, `DD days +HH:MM:SS.NS`, `DD days * HH:MM::SS.NS`, `HH:MM::SS.NS` and digits-only string. Note `DD` and optional `NS` field can * contain arbitrary number of digits while `HH`, `MM` and `SS` can be single or double digits. * * @tparam duration_type Type of the parsed duration * @param begin Pointer to the first element of the string * @param end Pointer to the first element after the string * @return The parsed duration in `duration_type` */ template <typename duration_type> __inline__ __device__ duration_type to_duration(char const* begin, char const* end) { using cuda::std::chrono::duration_cast; // %d days [+]%H:%M:%S.n => %d days, %d days [+]%H:%M:%S, %H:%M:%S.n, %H:%M:%S, %value. constexpr char sep = ':'; // single pass to parse days, hour, minute, seconds, nanosecond auto cur = begin; auto const value = parse_integer<int32_t>(&cur, end); cur = skip_spaces(cur, end); if (std::is_same_v<duration_type, cudf::duration_D> || cur >= end) { return duration_type{static_cast<typename duration_type::rep>(value)}; } // " days [+]" auto const after_days_sep = skip_if_starts_with(cur, end, "days"); auto const has_days_seperator = (after_days_sep != cur); cur = skip_spaces(after_days_sep, end); cur += (*cur == '+'); duration_D d_d{0}; duration_h d_h{0}; if (has_days_seperator) { d_d = duration_D{value}; d_h = duration_h{parse_integer<int32_t>(&cur, end)}; } else { d_h = duration_h{value}; } duration_m d_m{parse_optional_integer<int32_t>(&cur, end, sep)}; duration_s d_s{parse_optional_integer<int64_t>(&cur, end, sep)}; // Convert all durations to the given type auto output_d = duration_cast<duration_type>(d_d + d_h + d_m + d_s); if constexpr (std::is_same_v<duration_type, cudf::duration_s>) { return output_d; } auto const d_ns = (*cur != '.') ? duration_ns{0} : [&]() { auto const start_subsecond = ++cur; auto const unscaled_subseconds = parse_integer<int64_t>(&cur, end); auto const scale = min(9L, cur - start_subsecond) - 9; auto const rescaled = numeric::decimal64{unscaled_subseconds, numeric::scale_type{scale}}; return duration_ns{rescaled.value()}; }(); return output_d + duration_cast<duration_type>(d_ns); } } // namespace io } // namespace cudf
the_stack
#include "CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h" #include "CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h" #include "EcalRecHitBuilderKernels.h" #include "KernelHelpers.h" namespace ecal { namespace rechit { // uncalibrecHit flags enum UncalibRecHitFlags { kGood = -1, // channel is good (mutually exclusive with other states) setFlagBit(kGood) reset flags_ to zero kPoorReco, // channel has been badly reconstructed (e.g. bad shape, bad chi2 etc.) kSaturated, // saturated channel kOutOfTime, // channel out of time kLeadingEdgeRecovered, // saturated channel: energy estimated from the leading edge before saturation kHasSwitchToGain6, // at least one data frame is in G6 kHasSwitchToGain1 // at least one data frame is in G1 }; // recHit flags enum RecHitFlags { RecHitFlags_kGood = 0, // channel ok, the energy and time measurement are reliable RecHitFlags_kPoorReco, // the energy is available from the UncalibRecHit, but approximate (bad shape, large chi2) RecHitFlags_kOutOfTime, // the energy is available from the UncalibRecHit (sync reco), but the event is out of time RecHitFlags_kFaultyHardware, // The energy is available from the UncalibRecHit, channel is faulty at some hardware level (e.g. noisy) RecHitFlags_kNoisy, // the channel is very noisy RecHitFlags_kPoorCalib, // the energy is available from the UncalibRecHit, but the calibration of the channel is poor RecHitFlags_kSaturated, // saturated channel (recovery not tried) RecHitFlags_kLeadingEdgeRecovered, // saturated channel: energy estimated from the leading edge before saturation RecHitFlags_kNeighboursRecovered, // saturated/isolated dead: energy estimated from neighbours RecHitFlags_kTowerRecovered, // channel in TT with no data link, info retrieved from Trigger Primitive RecHitFlags_kDead, // channel is dead and any recovery fails RecHitFlags_kKilled, // MC only flag: the channel is killed in the real detector RecHitFlags_kTPSaturated, // the channel is in a region with saturated TP RecHitFlags_kL1SpikeFlag, // the channel is in a region with TP with sFGVB = 0 RecHitFlags_kWeird, // the signal is believed to originate from an anomalous deposit (spike) RecHitFlags_kDiWeird, // the signal is anomalous, and neighbors another anomalous signal RecHitFlags_kHasSwitchToGain6, // at least one data frame is in G6 RecHitFlags_kHasSwitchToGain1, // at least one data frame is in G1 // RecHitFlags_kUnknown // to ease the interface with functions returning flags. }; // status code enum EcalChannelStatusCode_Code { kOk = 0, kDAC, kNoLaser, kNoisy, kNNoisy, kNNNoisy, kNNNNoisy, kNNNNNoisy, kFixedG6, kFixedG1, kFixedG0, kNonRespondingIsolated, kDeadVFE, kDeadFE, kNoDataNoTP }; __global__ void kernel_create_ecal_rehit( // configuration int const* ChannelStatusToBeExcluded, uint32_t ChannelStatusToBeExcludedSize, bool const killDeadChannels, bool const recoverEBIsolatedChannels, bool const recoverEEIsolatedChannels, bool const recoverEBVFE, bool const recoverEEVFE, bool const recoverEBFE, bool const recoverEEFE, float const EBLaserMIN, float const EELaserMIN, float const EBLaserMAX, float const EELaserMAX, // for flags setting int const* expanded_v_DB_reco_flags, // FIXME AM: to be checked uint32_t const* expanded_Sizes_v_DB_reco_flags, uint32_t const* expanded_flagbit_v_DB_reco_flags, uint32_t expanded_v_DB_reco_flagsSize, uint32_t flagmask, // conditions float const* adc2gev, float const* intercalib, uint16_t const* status, float const* apdpnrefs, float const* alphas, // input for transparency corrections float const* p1, float const* p2, float const* p3, edm::TimeValue_t const* t1, edm::TimeValue_t const* t2, edm::TimeValue_t const* t3, // input for linear corrections float const* lp1, float const* lp2, float const* lp3, edm::TimeValue_t const* lt1, edm::TimeValue_t const* lt2, edm::TimeValue_t const* lt3, // time, used for time dependent corrections edm::TimeValue_t const event_time, // input uint32_t const* did_eb, uint32_t const* did_ee, ::ecal::reco::StorageScalarType const* amplitude_eb, // in adc counts ::ecal::reco::StorageScalarType const* amplitude_ee, // in adc counts ::ecal::reco::StorageScalarType const* time_eb, ::ecal::reco::StorageScalarType const* time_ee, ::ecal::reco::StorageScalarType const* chi2_eb, ::ecal::reco::StorageScalarType const* chi2_ee, uint32_t const* flags_eb, uint32_t const* flags_ee, // output uint32_t* didEB, uint32_t* didEE, ::ecal::reco::StorageScalarType* energyEB, // in energy [GeV] ::ecal::reco::StorageScalarType* energyEE, // in energy [GeV] ::ecal::reco::StorageScalarType* timeEB, ::ecal::reco::StorageScalarType* timeEE, ::ecal::reco::StorageScalarType* chi2EB, ::ecal::reco::StorageScalarType* chi2EE, uint32_t* flagBitsEB, uint32_t* flagBitsEE, uint32_t* extraEB, uint32_t* extraEE, // other int const nchannels, uint32_t const nChannelsBarrel, uint32_t const offsetForHashes) { // // NB: energy "type_wrapper<reco::StorageScalarType, L>::type" most likely std::vector<float> // for (int ch = threadIdx.x + blockDim.x * blockIdx.x; ch < nchannels; ch += blockDim.x * gridDim.x) { bool isEndcap = (ch >= nChannelsBarrel); int const inputCh = isEndcap ? ch - nChannelsBarrel : ch; uint32_t const* didCh = isEndcap ? did_ee : did_eb; // arrange to access the right ptrs #define ARRANGE(var) auto* var = isEndcap ? var##EE : var##EB ARRANGE(did); ARRANGE(energy); ARRANGE(chi2); ARRANGE(flagBits); ARRANGE(extra); #undef ARRANGE // only two values, EB or EE // AM : FIXME : why not using "isBarrel" ? isBarrel ? adc2gev[0] : adc2gev[1] float adc2gev_to_use = isEndcap ? adc2gev[1] // ee : adc2gev[0]; // eb // first EB and then EE ::ecal::reco::StorageScalarType const* amplitude = isEndcap ? amplitude_ee : amplitude_eb; ::ecal::reco::StorageScalarType const* chi2_in = isEndcap ? chi2_ee : chi2_eb; uint32_t const* flags_in = isEndcap ? flags_ee : flags_eb; // simple copy did[inputCh] = didCh[inputCh]; auto const did_to_use = DetId{didCh[inputCh]}; auto const isBarrel = did_to_use.subdetId() == EcalBarrel; auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did_to_use.rawId()) : offsetForHashes + ecal::reconstruction::hashedIndexEE(did_to_use.rawId()); float const intercalib_to_use = intercalib[hashedId]; // get laser coefficient float lasercalib = 1.; // // AM: ideas // // One possibility is to create the map of laser corrections once on CPU // for all crystals and push them on GPU. // Then only if the LS is different, update the laser correction // The variation within a LS is not worth pursuing (<< 0.1% !!) // and below the precision we can claim on the laser corrections (right?). // This will save quite some time (also for the CPU version?) // int iLM = 1; if (isBarrel) { iLM = ecal::reconstruction::laser_monitoring_region_EB(did_to_use.rawId()); } else { iLM = ecal::reconstruction::laser_monitoring_region_EE(did_to_use.rawId()); } long long t_i = 0, t_f = 0; float p_i = 0, p_f = 0; long long lt_i = 0, lt_f = 0; float lp_i = 0, lp_f = 0; // laser if (event_time >= t1[iLM - 1] && event_time < t2[iLM - 1]) { t_i = t1[iLM - 1]; t_f = t2[iLM - 1]; p_i = p1[hashedId]; p_f = p2[hashedId]; } else if (event_time >= t2[iLM - 1] && event_time <= t3[iLM - 1]) { t_i = t2[iLM - 1]; t_f = t3[iLM - 1]; p_i = p2[hashedId]; p_f = p3[hashedId]; } else if (event_time < t1[iLM - 1]) { t_i = t1[iLM - 1]; t_f = t2[iLM - 1]; p_i = p1[hashedId]; p_f = p2[hashedId]; } else if (event_time > t3[iLM - 1]) { t_i = t2[iLM - 1]; t_f = t3[iLM - 1]; p_i = p2[hashedId]; p_f = p3[hashedId]; } // linear corrections if (event_time >= lt1[iLM - 1] && event_time < lt2[iLM - 1]) { lt_i = lt1[iLM - 1]; lt_f = lt2[iLM - 1]; lp_i = lp1[hashedId]; lp_f = lp2[hashedId]; } else if (event_time >= lt2[iLM - 1] && event_time <= lt3[iLM - 1]) { lt_i = lt2[iLM - 1]; lt_f = lt3[iLM - 1]; lp_i = lp2[hashedId]; lp_f = lp3[hashedId]; } else if (event_time < lt1[iLM - 1]) { lt_i = lt1[iLM - 1]; lt_f = lt2[iLM - 1]; lp_i = lp1[hashedId]; lp_f = lp2[hashedId]; } else if (event_time > lt3[iLM - 1]) { lt_i = lt2[iLM - 1]; lt_f = lt3[iLM - 1]; lp_i = lp2[hashedId]; lp_f = lp3[hashedId]; } // apdpnref and alpha float apdpnref = apdpnrefs[hashedId]; float alpha = alphas[hashedId]; // now calculate transparency correction if (apdpnref != 0 && (t_i - t_f) != 0 && (lt_i - lt_f) != 0) { long long tt = event_time; // never subtract two unsigned! float interpolatedLaserResponse = p_i / apdpnref + float(tt - t_i) * (p_f - p_i) / (apdpnref * float(t_f - t_i)); float interpolatedLinearResponse = lp_i / apdpnref + float(tt - lt_i) * (lp_f - lp_i) / (apdpnref * float(lt_f - lt_i)); // FIXED BY FC if (interpolatedLinearResponse > 2.f || interpolatedLinearResponse < 0.1f) { interpolatedLinearResponse = 1.f; } if (interpolatedLaserResponse <= 0.) { // AM : how the heck is it possible? // interpolatedLaserResponse = 0.0001; lasercalib = 1.; } else { float interpolatedTransparencyResponse = interpolatedLaserResponse / interpolatedLinearResponse; // ... and now this: lasercalib = 1.f / (std::pow(interpolatedTransparencyResponse, alpha) * interpolatedLinearResponse); } } // // Check for channels to be excluded from reconstruction // // Default energy not to be updated if "ChannelStatusToBeExcluded" // Exploited later by the module "EcalRecHitConvertGPU2CPUFormat" energy[inputCh] = -1; //un-physical default // truncate the chi2 if (chi2_in[inputCh] > 64) chi2[inputCh] = 64; else chi2[inputCh] = chi2_in[inputCh]; // default values for the flags flagBits[inputCh] = 0; extra[inputCh] = 0; static const int chStatusMask = 0x1f; // ChannelStatusToBeExcluded is a "int" then I put "dbstatus" to be the same int dbstatus = EcalChannelStatusCode_Code((status[hashedId]) & chStatusMask); if (ChannelStatusToBeExcludedSize != 0) { bool skip_this_channel = false; for (int ich_to_check = 0; ich_to_check < ChannelStatusToBeExcludedSize; ich_to_check++) { if (ChannelStatusToBeExcluded[ich_to_check] == dbstatus) { skip_this_channel = true; break; } } if (skip_this_channel) { // skip this channel continue; } } // Take our association map of dbstatuses-> recHit flagbits and return the apporpriate flagbit word // // AM: get the smaller "flagbit_counter" with match // uint32_t temporary_flagBits = 0; int iterator_flags = 0; bool need_to_exit = false; int flagbit_counter = 0; while (!need_to_exit) { iterator_flags = 0; for (unsigned int i = 0; i != expanded_v_DB_reco_flagsSize; ++i) { // check the correct "flagbit" if (expanded_flagbit_v_DB_reco_flags[i] == flagbit_counter) { for (unsigned int j = 0; j < expanded_Sizes_v_DB_reco_flags[i]; j++) { if (expanded_v_DB_reco_flags[iterator_flags] == dbstatus) { temporary_flagBits = 0x1 << expanded_flagbit_v_DB_reco_flags[i]; need_to_exit = true; break; // also from the big loop!!! } iterator_flags++; } } else { // if not, got to the next bunch directly iterator_flags += expanded_Sizes_v_DB_reco_flags[i]; } if (need_to_exit) { break; } } flagbit_counter += 1; } flagBits[inputCh] = temporary_flagBits; if ((flagmask & temporary_flagBits) && killDeadChannels) { // skip this channel continue; } // // multiply the adc counts with factors to get GeV // // energy[ch] = amplitude[inputCh] * adc2gev_to_use * intercalib_to_use ; energy[inputCh] = amplitude[inputCh] * adc2gev_to_use * intercalib_to_use * lasercalib; // Time is not saved so far, FIXME // time[ch] = time_in[inputCh]; // NB: calculate the "flagBits extra" --> not really "flags", but actually an encoded version of energy uncertainty, time unc., ... // // extra packing ... // uint32_t offset; uint32_t width; uint32_t value; float chi2_temp = chi2[inputCh]; if (chi2_temp > 64) chi2_temp = 64; // use 7 bits uint32_t rawChi2 = lround(chi2_temp / 64. * ((1 << 7) - 1)); offset = 0; width = 7; value = 0; uint32_t mask = ((1 << width) - 1) << offset; value &= ~mask; value |= (rawChi2 & ((1U << width) - 1)) << offset; // rawEnergy is actually "error" !!! uint32_t rawEnergy = 0; // AM: FIXME: this is not propagated currently to the uncalibrecHit collection SOA // if you want to store this in "extra", we need first to add it to the uncalibrecHit results // then it will be something like the following // amplitudeError[inputCh] * adc2gev_to_use * intercalib_to_use * lasercalib // // float amplitudeError_ch = 0.; // amplitudeError[ch]; if (amplitudeError_ch > 0.001) { static constexpr float p10[] = {1.e-2f, 1.e-1f, 1.f, 1.e1f, 1.e2f, 1.e3f, 1.e4f, 1.e5f, 1.e6f}; int b = amplitudeError_ch < p10[4] ? 0 : 5; for (; b < 9; ++b) if (amplitudeError_ch < p10[b]) break; uint16_t exponent = b; static constexpr float ip10[] = {1.e5f, 1.e4f, 1.e3f, 1.e2f, 1.e1f, 1.e0f, 1.e-1f, 1.e-2f, 1.e-3f, 1.e-4}; uint16_t significand = lround(amplitudeError_ch * ip10[exponent]); // use 13 bits (3 exponent, 10 significand) rawEnergy = exponent << 10 | significand; } offset = 8; width = 13; // value from last change, ok mask = ((1 << width) - 1) << offset; value &= ~mask; value |= (rawEnergy & ((1U << width) - 1)) << offset; uint32_t jitterErrorBits = 0; jitterErrorBits = jitterErrorBits & 0xFF; offset = 24; width = 8; // value from last change, ok mask = ((1 << width) - 1) << offset; value &= ~mask; value |= (jitterErrorBits & ((1U << width) - 1)) << offset; // // now finally set "extra[ch]" // extra[inputCh] = value; // // additional flags setting // // using correctly the flags as calculated at the UncalibRecHit stage // // Now fill flags bool good = true; if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kLeadingEdgeRecovered))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kLeadingEdgeRecovered)); good = false; } if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kSaturated))) { // leading edge recovery failed - still keep the information // about the saturation and do not flag as dead flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kSaturated)); good = false; } // // AM: why do we have two tests one after the other checking almost the same thing??? // Please clean up the code, ... also the original one! // // uncalibRH.isSaturated() ---> // // bool EcalUncalibratedRecHit::isSaturated() const { // return EcalUncalibratedRecHit::checkFlag(kSaturated); // } // // if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kSaturated))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kSaturated)); good = false; } if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kOutOfTime))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kOutOfTime)); good = false; } if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kPoorReco))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kPoorReco)); good = false; } if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kHasSwitchToGain6))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kHasSwitchToGain6)); } if (flags_in[inputCh] & (0x1 << (UncalibRecHitFlags::kHasSwitchToGain1))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kHasSwitchToGain1)); } if (good) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kGood)); } if ((isBarrel && (lasercalib < EBLaserMIN || lasercalib > EBLaserMAX)) || (!isBarrel && (lasercalib < EELaserMIN || lasercalib > EELaserMAX))) { flagBits[inputCh] |= (0x1 << (RecHitFlags::RecHitFlags_kPoorCalib)); } // recover, killing, and other stuff // // Structure: // EB // EE // // // - single MVA // - democratic sharing // - kill all the other cases // bool is_Single = false; bool is_FE = false; bool is_VFE = false; bool is_recoverable = false; // DetIdToBeRecovered if (dbstatus == 10 || dbstatus == 11 || dbstatus == 12) { is_recoverable = true; } if (is_recoverable) { if (dbstatus == EcalChannelStatusCode_Code::kDeadVFE) { is_VFE = true; } else if (dbstatus == EcalChannelStatusCode_Code::kDeadVFE) { is_FE = true; } else { is_Single = true; } // EB if (isBarrel) { if (is_Single || is_FE || is_VFE) { // single MVA if (is_Single && (recoverEBIsolatedChannels || !killDeadChannels)) { } // decmocratic sharing else if (is_FE && (recoverEBFE || !killDeadChannels)) { } // kill all the other cases else { energy[inputCh] = 0.; // Need to set also the flags ... } } } // EE else { if (is_Single || is_FE || is_VFE) { // single MVA if (is_Single && (recoverEBIsolatedChannels || !killDeadChannels)) { } // decmocratic sharing else if (is_FE && (recoverEBFE || !killDeadChannels)) { // // Code is definitely too long ... // } // kill all the other cases else { energy[inputCh] = 0.; // Need to set also the flags ... } } } } } // end channel } // host version, to be called by the plugin void create_ecal_rehit(EventInputDataGPU const& eventInputGPU, EventOutputDataGPU& eventOutputGPU, // eventDataForScratchGPU_, ConditionsProducts const& conditions, ConfigurationParameters const& configParameters, uint32_t const nChannelsBarrel, edm::TimeValue_t const event_time, cudaStream_t cudaStream) { int nchannels = eventInputGPU.ebUncalibRecHits.size + eventInputGPU.eeUncalibRecHits.size; unsigned int nchannels_per_block = 16; unsigned int threads_min = nchannels_per_block; unsigned int blocks_min = (nchannels + threads_min - 1) / threads_min; // TEST : to be optimized (AM) // // kernel create rechit // kernel_create_ecal_rehit<<<blocks_min, threads_min, 0, cudaStream>>>( // configuration configParameters.ChannelStatusToBeExcluded, configParameters.ChannelStatusToBeExcludedSize, configParameters.killDeadChannels, configParameters.recoverEBIsolatedChannels, configParameters.recoverEEIsolatedChannels, configParameters.recoverEBVFE, configParameters.recoverEEVFE, configParameters.recoverEBFE, configParameters.recoverEEFE, configParameters.EBLaserMIN, configParameters.EELaserMIN, configParameters.EBLaserMAX, configParameters.EELaserMAX, // for flags setting configParameters.expanded_v_DB_reco_flags, configParameters.expanded_Sizes_v_DB_reco_flags, configParameters.expanded_flagbit_v_DB_reco_flags, configParameters.expanded_v_DB_reco_flagsSize, configParameters.flagmask, // conditions conditions.ADCToGeV.adc2gev, conditions.Intercalib.values, conditions.ChannelStatus.status, conditions.LaserAPDPNRatiosRef.values, conditions.LaserAlphas.values, // input for transparency corrections conditions.LaserAPDPNRatios.p1, conditions.LaserAPDPNRatios.p2, conditions.LaserAPDPNRatios.p3, conditions.LaserAPDPNRatios.t1, conditions.LaserAPDPNRatios.t2, conditions.LaserAPDPNRatios.t3, // input for linear corrections conditions.LinearCorrections.p1, conditions.LinearCorrections.p2, conditions.LinearCorrections.p3, conditions.LinearCorrections.t1, conditions.LinearCorrections.t2, conditions.LinearCorrections.t3, // time, used for time dependent corrections event_time, // input eventInputGPU.ebUncalibRecHits.did.get(), eventInputGPU.eeUncalibRecHits.did.get(), eventInputGPU.ebUncalibRecHits.amplitude.get(), eventInputGPU.eeUncalibRecHits.amplitude.get(), eventInputGPU.ebUncalibRecHits.jitter.get(), eventInputGPU.eeUncalibRecHits.jitter.get(), eventInputGPU.ebUncalibRecHits.chi2.get(), eventInputGPU.eeUncalibRecHits.chi2.get(), eventInputGPU.ebUncalibRecHits.flags.get(), eventInputGPU.eeUncalibRecHits.flags.get(), // output eventOutputGPU.recHitsEB.did.get(), eventOutputGPU.recHitsEE.did.get(), eventOutputGPU.recHitsEB.energy.get(), eventOutputGPU.recHitsEE.energy.get(), eventOutputGPU.recHitsEB.time.get(), eventOutputGPU.recHitsEE.time.get(), eventOutputGPU.recHitsEB.chi2.get(), eventOutputGPU.recHitsEE.chi2.get(), eventOutputGPU.recHitsEB.flagBits.get(), eventOutputGPU.recHitsEE.flagBits.get(), eventOutputGPU.recHitsEB.extra.get(), eventOutputGPU.recHitsEE.extra.get(), // other nchannels, nChannelsBarrel, conditions.offsetForHashes); } } // namespace rechit } // namespace ecal
the_stack
//#include <cuda.h> //#include "caffe/layer.hpp" #include "caffe/layers/margin_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" //#define M_PI 3.14159265358979323846 namespace caffe { // no need to take 'sqrt'!!! template <typename Dtype> void caffe_gpu_norm2(const int n, const Dtype* x, Dtype* out); template <> void caffe_gpu_norm2<float>(const int n, const float* x, float* out) { CUBLAS_CHECK(cublasSnrm2(Caffe::cublas_handle(), n, x, 1, out)); } template <> void caffe_gpu_norm2<double>(const int n, const double* x, double* out) { CUBLAS_CHECK(cublasDnrm2(Caffe::cublas_handle(), n, x, 1, out)); } /*template <typename Dtype> __global__ void sqrt_kernel(const int n, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(y[index]); } }*/ template <typename Dtype> static __global__ void compute_exp_kernel(const int n, const int P, const Dtype *max_f, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(y[index] - max_f[index / P]); } } __device__ inline void atomic_add(float * address, float val) { atomicAdd(address, val); } __device__ inline void atomic_add(double * address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } //__constant__ const double cos_k_table[5] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1 }; template <typename Dtype> static __global__ void compute_fyi(const int nthreads, const int P, const int F, const Dtype lambda, const Dtype* x, Dtype* x_norm, const Dtype* w, Dtype* w_norm, const Dtype *label, Dtype *ip_data, Dtype *phi_data, Dtype *prob_data, Dtype *max_f) { const Dtype cos_k_table[5] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1 }; CUDA_KERNEL_LOOP(i, nthreads) { const int yi = label[i]; Dtype fyi = prob_data[i*P + yi]; ip_data[i] = fyi; if (sizeof(Dtype) == sizeof(double)) { x_norm[i] = norm(F, (const double*)(x + i*F)); w_norm[i] = norm(F, (const double*)(w + yi*F)); } else { x_norm[i] = normf(F, (const float*)(x + i*F)); w_norm[i] = normf(F, (const float*)(w + yi*F)); } Dtype xw = x_norm[i] * w_norm[i]; Dtype cos_th = xw == 0 ? 1 : fyi / xw; for (int k = 0; k < 4; ++k) { //if (cos(k*M_PI / 4) >= cos_th && cos_th >= cos((k + 1)*M_PI / 4)) { if (cos_k_table[k] >= cos_th && cos_th >= cos_k_table[k + 1]) { Dtype c2 = cos_th * cos_th; Dtype phi = (k & 1 ? (-1) : 1) * (8 * c2 * (c2 - 1) + 1) - 2 * k; phi_data[i] = phi; fyi = (fyi * lambda + xw * phi) / (1 + lambda); prob_data[i*P + yi] = fyi; break; } } for (int j = 0; j < P; ++j) if (prob_data[i*P + j] > fyi) fyi = prob_data[i*P + j]; max_f[i] = fyi; } } template <typename Dtype> static __global__ void compute_loss(const int nthreads, const int P, const Dtype *label, Dtype *prob_data, const Dtype *sum_exp, Dtype *loss) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / P; const int j = index % P; const int yi = label[i]; prob_data[index] /= sum_exp[i]; if (j == yi) { atomic_add(loss, -log(prob_data[index])); //atomic_add(loss, log(sum_exp[i]) - log); } } } template <typename Dtype> void MarginSoftmaxLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* const x = bottom[0]->gpu_data(); const Dtype* const label = bottom[1]->gpu_data(); Dtype* const loss = top[0]->mutable_gpu_data(); const Dtype* const w = this->blobs_[0]->gpu_data(); Dtype* const ip_data = ip_.mutable_gpu_data(); Dtype* const phi_data = ip_.mutable_gpu_diff(); Dtype* const prob_data = prob_.mutable_gpu_data(); Dtype* const x_norm = prob_.mutable_gpu_diff(); // N Dtype* const w_norm = x_norm + N; // N!!! not P Dtype* max_f = w_norm + N; // N const Dtype* const ones_P = max_f + N; // P Dtype* const sum_exp = (Dtype*)ones_P + P; // N const Dtype lambda = this->lambda_.get_iter("lambda"); // It turnes out to be TOO SLOW...to switch between modes /*//cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_DEVICE); // compute |x| for (int i = 0; i < N; ++i) { caffe_gpu_norm2(F, x + i*F, x_norm + i); } // compute |w| //compute_norms<Dtype> << <CAFFE_GET_BLOCKS(P), CAFFE_CUDA_NUM_THREADS >> >(P, F, w, w_norm); for (int j = 0; j < P; j++) { caffe_gpu_norm2(F, w + j*F, w_norm + j); } cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_HOST);*/ //sqrt_kernel<Dtype> << <CAFFE_GET_BLOCKS(P + N), CAFFE_CUDA_NUM_THREADS >> >(P + N, x_norm); // compute inner product: w^T * x caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, N, P, F, (Dtype)1., x, w, (Dtype)0., prob_data); // f_j(j!=yi) = w_j^T * x // f_yi = ... compute_fyi<Dtype> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, P, F, lambda, x, x_norm, w, w_norm, label, ip_data, phi_data, prob_data, max_f); // exp(f_j), max_f is employed to ensure numerical stability compute_exp_kernel<Dtype> << <CAFFE_GET_BLOCKS(N*P), CAFFE_CUDA_NUM_THREADS >> >( N*P, P, max_f, prob_data); // compute the denominator: sum(exp(...)) caffe_gpu_gemv<Dtype>(CblasNoTrans, N, P, (Dtype)1, prob_data, ones_P, (Dtype)0, sum_exp); // compute and accumalate loss caffe_gpu_set(1, Dtype(0), loss); compute_loss<Dtype> << <CAFFE_GET_BLOCKS(N*P), CAFFE_CUDA_NUM_THREADS >> >( N*P, P, label, prob_data, sum_exp, loss); caffe_gpu_scal(1, Dtype(1) / N, loss); //top[0]->mutable_cpu_data()[0] = loss / N;*/ } template <typename Dtype> static __device__ void atomic_axpy(const int n, const Dtype a, const Dtype* x, Dtype* y) { for (int i = 0; i < n; ++i, ++x, ++y) { atomic_add(y, a * (*x)); } } template <typename Dtype> static __device__ void axy(const int n, const Dtype a, const Dtype* x, Dtype* y) { for (int i = 0; i < n; ++i, ++x, ++y) *y = a * (*x); } template <typename Dtype> static __device__ void axpy(const int n, const Dtype a, const Dtype* x, Dtype* y) { for (int i = 0; i < n; ++i, ++x, ++y) *y += a * (*x); } template <typename Dtype> static __global__ void backward_yi(const int nthreads, const int P, const int F, const Dtype lambda, const Dtype* x, const Dtype* x_norm, const Dtype* w, const Dtype* w_norm, const Dtype* label, const Dtype* ip_data, const Dtype* phi_data, Dtype* prob_data, Dtype* x_diff, Dtype* w_diff ) { CUDA_KERNEL_LOOP(i, nthreads) { const Dtype * const xi = x + i*F; const int yi = label[i]; Dtype *x_diff_i = x_diff + i*F; const Dtype x_norm_i = x_norm[i]; const Dtype ip = ip_data[i]; const Dtype phi = phi_data[i]; const Dtype * const wj = w + yi*F; Dtype *w_diff_j = w_diff + yi*F; //const Dtype w_norm_j = w_norm[yi]; const Dtype w_norm_j = w_norm[i]; const Dtype prob_ij = prob_data[i*P + yi] - 1; // avoid mis-computation during the later 'gemm' prob_data[i*P + yi] = 0; if (w_norm_j == 0) { atomic_axpy(F, prob_ij * 4, xi, w_diff_j); axy(F, Dtype(0), wj, x_diff_i); // set to 0 } else if (x_norm_i == 0) { axy(F, prob_ij * 4, wj, x_diff_i); } else { Dtype m = ((-1 > phi && phi >= -3) || (-5 > phi && phi >= -7)) * (-8); m = m * (2 * ip*ip / (w_norm_j*w_norm_j*x_norm_i*x_norm_i) - 1); // d(Li)/d(xi) = -d(f_yi)/d(xi) * ( 1-p(yi|xi,w) ) + \sum_{j!=y_i} w_j * p(j|wi,w) // d( f_yi = |w||x|phi(th) ) / d(x) = axy(F, prob_ij * ( w_norm_j * phi / x_norm_i - m * 2 * ip*ip / (x_norm_i*x_norm_i*x_norm_i*w_norm_j) ) / (1 + lambda), xi, x_diff_i); // d(Li)/d(w_yi) = -d(f_yi)/d(w_yi) * ( 1-p(yi|xi,w) ) // d( f_yi = |w||x|phi(th) ) / d(w) = atomic_axpy(F, prob_ij * ( x_norm_i * phi / w_norm_j - m * 2 * ip*ip / (w_norm_j*w_norm_j*w_norm_j*x_norm_i) ) / (1 + lambda), wj, w_diff_j); m = prob_ij * (m * 2 * ip / (x_norm_i * w_norm_j) + lambda) / (1 + lambda); //atomic_axpy(F, m, wj, x_diff_i); axpy(F, m, wj, x_diff_i); atomic_axpy(F, m, xi, w_diff_j); } } } template <typename Dtype> void MarginSoftmaxLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* const x = bottom[0]->gpu_data(); Dtype* const x_diff = bottom[0]->mutable_gpu_diff(); const Dtype* const label = bottom[1]->gpu_data(); const Dtype loss_weight = top[0]->cpu_diff()[0] / N; const Dtype* const w = this->blobs_[0]->gpu_data(); //printf("w_diff[0]=%f\n", this->blobs_[0]->cpu_diff()[0]); Dtype* const w_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* const ip_data = ip_.gpu_data(); const Dtype* const phi_data = ip_.gpu_diff(); Dtype* const prob_data = prob_.mutable_gpu_data(); const Dtype* const x_norm = prob_.gpu_diff(); // N const Dtype* const w_norm = x_norm + N; // N!!! not P const Dtype lambda = this->lambda_.get(); // compute special cases of j==yi backward_yi<Dtype> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >( N, P, F, lambda, x, x_norm, w, w_norm, label, ip_data, phi_data, prob_data, x_diff, w_diff); // then, collect gradients from output j!=y_i caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, P, F, N, (Dtype)1., prob_data, x, (Dtype)1., w_diff); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, N, F, P, (Dtype)1., prob_data, w, (Dtype)1., x_diff); // scale the gradients according to top_loss caffe_gpu_scal<Dtype>(P*F, loss_weight, w_diff); caffe_gpu_scal<Dtype>(N*F, loss_weight, x_diff); } INSTANTIATE_LAYER_GPU_FUNCS(MarginSoftmaxLossLayer); } // namespace caffe
the_stack
namespace amgx { cublasHandle_t Cublas::m_handle = 0; namespace { // real valued calls cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const float *alpha, const float *x, int incx, float *y, int incy) { return cublasSaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const double *alpha, const double *x, int incx, double *y, int incy) { return cublasDaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const float *x, int incx, float *y, int incy) { return cublasScopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const double *x, int incx, double *y, int incy) { return cublasDcopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const float *x, int incx, const float *y, int incy, float *result) { return cublasSdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const double *x, int incx, const double *y, int incy, double *result) { return cublasDdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dotc(cublasHandle_t handle, int n, const float *x, int incx, const float *y, int incy, float *result) { return cublasSdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dotc(cublasHandle_t handle, int n, const double *x, int incx, const double *y, int incy, double *result) { return cublasDdot(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const float *A, int lda, float *x, int incx) { return cublasStrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const double *A, int lda, double *x, int incx) { return cublasDtrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc) { return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) { return cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const float *alpha, const float *x, int incx, const float *y, int incy, float *A, int lda) { return cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const double *alpha, const double *x, int incx, const double *y, int incy, double *A, int lda) { return cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_gerc(cublasHandle_t handle, int m, int n, const float *alpha, const float *x, int incx, const float *y, int incy, float *A, int lda) { return cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_gerc(cublasHandle_t handle, int m, int n, const double *alpha, const double *x, int incx, const double *y, int incy, double *A, int lda) { return cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const float *x, int incx, float *result) { return cublasSnrm2(handle, n, x, incx, result); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const double *x, int incx, double *result) { return cublasDnrm2(handle, n, x, incx, result); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const float *alpha, float *x, int incx) { return cublasSscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const double *alpha, double *x, int incx) { return cublasDscal(handle, n, alpha, x, incx); } // complex valued calls cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const cuComplex *alpha, const cuComplex *x, int incx, cuComplex *y, int incy) { return cublasCaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *x, int incx, cuDoubleComplex *y, int incy) { return cublasZaxpy(handle, n, alpha, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const cuComplex *x, int incx, cuComplex *y, int incy) { return cublasCcopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_copy(cublasHandle_t handle, int n, const cuDoubleComplex *x, int incx, cuDoubleComplex *y, int incy) { return cublasZcopy(handle, n, x, incx, y, incy); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *result) { return cublasCdotu(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dot(cublasHandle_t handle, int n, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *result) { return cublasZdotu(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dotc(cublasHandle_t handle, int n, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *result) { return cublasCdotc(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_dotc(cublasHandle_t handle, int n, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *result) { return cublasZdotc(handle, n, x, incx, y, incy, result); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const cuComplex *A, int lda, cuComplex *x, int incx) { return cublasCtrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_trsv_v2(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const cuDoubleComplex *A, int lda, cuDoubleComplex *x, int incx) { return cublasZtrsv (handle, uplo, trans, diag, n, A, lda, x, incx); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const cuComplex *alpha, const cuComplex *A, int lda, const cuComplex *B, int ldb, const cuComplex *beta, cuComplex *C, int ldc) { return cublasCgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const cuDoubleComplex *alpha, const cuDoubleComplex *A, int lda, const cuDoubleComplex *B, int ldb, const cuDoubleComplex *beta, cuDoubleComplex *C, int ldc) { return cublasZgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const cuComplex *alpha, const cuComplex *A, int lda, const cuComplex *x, int incx, const cuComplex *beta, cuComplex *y, int incy) { return cublasCgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_gemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *A, int lda, const cuDoubleComplex *x, int incx, const cuDoubleComplex *beta, cuDoubleComplex *y, int incy) { return cublasZgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const cuComplex *alpha, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *A, int lda) { return cublasCgeru(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_ger(cublasHandle_t handle, int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *A, int lda) { return cublasZgeru(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_gerc(cublasHandle_t handle, int m, int n, const cuComplex *alpha, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *A, int lda) { return cublasCgerc(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_gerc(cublasHandle_t handle, int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *A, int lda) { return cublasZgerc(handle, m, n, alpha, x, incx, y, incy, A, lda); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const cuComplex *x, int incx, float *result) { return cublasScnrm2(handle, n, x, incx, result); } cublasStatus_t cublas_nrm2(cublasHandle_t handle, int n, const cuDoubleComplex *x, int incx, double *result) { return cublasDznrm2(handle, n, x, incx, result); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const cuComplex *alpha, cuComplex *x, int incx) { return cublasCscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const cuDoubleComplex *alpha, cuDoubleComplex *x, int incx) { return cublasZscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const float *alpha, cuComplex *x, int incx) { return cublasCsscal(handle, n, alpha, x, incx); } cublasStatus_t cublas_scal(cublasHandle_t handle, int n, const double *alpha, cuDoubleComplex *x, int incx) { return cublasZdscal(handle, n, alpha, x, incx); } } // anonymous namespace. void Cublas::set_pointer_mode_device() { cublasHandle_t handle = Cublas::get_handle(); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE); } void Cublas::set_pointer_mode_host() { cublasHandle_t handle = Cublas::get_handle(); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST); } template <class TConfig> void Cublas::gemm(typename TConfig::VecPrec alpha, const Vector<TConfig> &A, const Vector<TConfig> &B, typename TConfig::VecPrec beta, Vector<TConfig> &C, bool A_transposed, bool B_transposed) { cublasOperation_t trans_A = A_transposed ? CUBLAS_OP_T : CUBLAS_OP_N; cublasOperation_t trans_B = B_transposed ? CUBLAS_OP_T : CUBLAS_OP_N; int m = A_transposed ? A.get_num_cols() : A.get_num_rows(); int n = B_transposed ? B.get_num_rows() : B.get_num_cols(); int k = A_transposed ? A.get_num_rows() : A.get_num_cols(); cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_gemm(handle, trans_A, trans_B, m, n, k, &alpha, A.raw(), A.get_lda(), B.raw(), B.get_lda(), &beta, C.raw(), C.get_lda())); C.dirtybit = 1; } template <typename T> void Cublas::axpy(int n, T alpha, const T *x, int incx, T *y, int incy) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_axpy(handle, n, &alpha, x, incx, y, incy)); } template <typename T> void Cublas::copy(int n, const T *x, int incx, T *y, int incy) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_copy(handle, n, x, incx, y, incy)); } template <typename T> void Cublas::dot(int n, const T *x, int incx, const T *y, int incy, T *result) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_dot(handle, n, x, incx, y, incy, result)); } template <typename T> void Cublas::dotc(int n, const T *x, int incx, const T *y, int incy, T *result) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_dotc(handle, n, x, incx, y, incy, result)); } template <typename T, typename V> V Cublas::nrm2(int n, const T *x, int incx) { cublasHandle_t handle = Cublas::get_handle(); V result; Cublas::nrm2(n, x, incx, &result); return result; } template <typename T, typename V> void Cublas::nrm2(int n, const T *x, int incx, V *result) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_nrm2(handle, n, x, incx, result)); } template <typename T, typename V> void Cublas::scal(int n, T alpha, V *x, int incx) { Cublas::scal(n, &alpha, x, incx); } template <typename T, typename V> void Cublas::scal(int n, T *alpha, V *x, int incx) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_scal(handle, n, alpha, x, incx)); } template <typename T> void Cublas::gemv(bool transposed, int m, int n, const T *alpha, const T *A, int lda, const T *x, int incx, const T *beta, T *y, int incy) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t trans = transposed ? CUBLAS_OP_T : CUBLAS_OP_N; cublasCheckError(cublas_gemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy)); } template <typename T> void Cublas::gemv_ext(bool transposed, const int m, const int n, const T *alpha, const T *A, const int lda, const T *x, const int incx, const T *beta, T *y, const int incy, const int offsetx, const int offsety, const int offseta) { cublasHandle_t handle = Cublas::get_handle(); cublasOperation_t trans = transposed ? CUBLAS_OP_T : CUBLAS_OP_N; cublasCheckError(cublas_gemv(handle, trans, m, n, alpha, A + offseta, lda, x + offsetx, incx, beta, y + offsety, incy)); } template <typename T> void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const T *A, int lda, T *x, int incx, int offseta) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError( cublas_trsv_v2(handle, uplo, trans, diag, n, A + offseta, lda, x, incx)); } template <typename T> void Cublas::ger(int m, int n, const T *alpha, const T *x, int incx, const T *y, int incy, T *A, int lda) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_ger(handle, m, n, alpha, x, incx, y, incy, A, lda)); } template <typename T> void Cublas::gerc(int m, int n, const T *alpha, const T *x, int incx, const T *y, int incy, T *A, int lda) { cublasHandle_t handle = Cublas::get_handle(); cublasCheckError(cublas_gerc(handle, m, n, alpha, x, incx, y, incy, A, lda)); } #define AMGX_CASE_LINE(CASE) \ template void Cublas::gemm(typename TemplateMode<CASE>::Type::VecPrec, const Vector<TemplateMode<CASE>::Type>&, const Vector<TemplateMode<CASE>::Type>&, typename TemplateMode<CASE>::Type::VecPrec, Vector<TemplateMode<CASE>::Type>&, bool, bool); AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE // real valued instantiaions template void Cublas::axpy(int n, float alpha, const float *x, int incx, float *y, int incy); template void Cublas::axpy(int n, double alpha, const double *x, int incx, double *y, int incy); template void Cublas::copy(int n, const float *x, int incx, float *y, int incy); template void Cublas::copy(int n, const double *x, int incx, double *y, int incy); template void Cublas::dot(int n, const float *x, int incx, const float *y, int incy, float *result); template void Cublas::dot(int n, const double *x, int incx, const double *y, int incy, double *result); template void Cublas::dotc(int n, const float *x, int incx, const float *y, int incy, float *result); template void Cublas::dotc(int n, const double *x, int incx, const double *y, int incy, double *result); template void Cublas::gemv(bool transposed, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy); template void Cublas::gemv(bool transposed, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy); template void Cublas::ger(int m, int n, const float *alpha, const float *x, int incx, const float *y, int incy, float *A, int lda); template void Cublas::ger(int m, int n, const double *alpha, const double *x, int incx, const double *y, int incy, double *A, int lda); template void Cublas::gerc(int m, int n, const float *alpha, const float *x, int incx, const float *y, int incy, float *A, int lda); template void Cublas::gerc(int m, int n, const double *alpha, const double *x, int incx, const double *y, int incy, double *A, int lda); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const float *alpha, const float *A, const int lda, const float *x, const int incx, const float *beta, float *y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const double *alpha, const double *A, const int lda, const double *x, const int incx, const double *beta, double *y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const float *A, int lda, float *x, int incx, int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const double *A, int lda, double *x, int incx, int offseta); template double Cublas::nrm2(int n, const double *x, int incx); template float Cublas::nrm2(int n, const float *x, int incx); template void Cublas::scal(int n, float alpha, float *x, int incx); template void Cublas::scal(int n, double alpha, double *x, int incx); // complex valued instantiaions template void Cublas::axpy(int n, cuComplex alpha, const cuComplex *x, int incx, cuComplex *y, int incy); template void Cublas::axpy(int n, cuDoubleComplex alpha, const cuDoubleComplex *x, int incx, cuDoubleComplex *y, int incy); template void Cublas::copy(int n, const cuComplex *x, int incx, cuComplex *y, int incy); template void Cublas::copy(int n, const cuDoubleComplex *x, int incx, cuDoubleComplex *y, int incy); template void Cublas::dot(int n, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *result); template void Cublas::dot(int n, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *result); template void Cublas::dotc(int n, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *result); template void Cublas::dotc(int n, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *result); template void Cublas::gemv(bool transposed, int m, int n, const cuComplex *alpha, const cuComplex *A, int lda, const cuComplex *x, int incx, const cuComplex *beta, cuComplex *y, int incy); template void Cublas::gemv(bool transposed, int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *A, int lda, const cuDoubleComplex *x, int incx, const cuDoubleComplex *beta, cuDoubleComplex *y, int incy); template void Cublas::ger(int m, int n, const cuComplex *alpha, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *A, int lda); template void Cublas::ger(int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *A, int lda); template void Cublas::gerc(int m, int n, const cuComplex *alpha, const cuComplex *x, int incx, const cuComplex *y, int incy, cuComplex *A, int lda); template void Cublas::gerc(int m, int n, const cuDoubleComplex *alpha, const cuDoubleComplex *x, int incx, const cuDoubleComplex *y, int incy, cuDoubleComplex *A, int lda); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const cuComplex *alpha, const cuComplex *A, const int lda, const cuComplex *x, const int incx, const cuComplex *beta, cuComplex *y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::gemv_ext(bool transposed, const int m, const int n, const cuDoubleComplex *alpha, const cuDoubleComplex *A, const int lda, const cuDoubleComplex *x, const int incx, const cuDoubleComplex *beta, cuDoubleComplex *y, const int incy, const int offsetx, const int offsety, const int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const cuComplex *A, int lda, cuComplex *x, int incx, int offseta); template void Cublas::trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const cuDoubleComplex *A, int lda, cuDoubleComplex *x, int incx, int offseta); template double Cublas::nrm2(int n, const cuDoubleComplex *x, int incx); template float Cublas::nrm2(int n, const cuComplex *x, int incx); template void Cublas::scal(int n, cuComplex alpha, cuComplex *x, int incx); template void Cublas::scal(int n, cuDoubleComplex alpha, cuDoubleComplex *x, int incx); template void Cublas::scal(int n, float alpha, cuComplex *x, int incx); template void Cublas::scal(int n, double alpha, cuDoubleComplex *x, int incx); } // namespace amgx
the_stack
#include <gtest/gtest.h> #include <cmath> #include <complex> #include <tuple> #include <vector> #include "dali/kernels/common/utils.h" #include "dali/kernels/erase/erase_gpu.h" #include "dali/kernels/scratch.h" #include "dali/pipeline/data/tensor_list.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" #include "dali/kernels/erase/erase_cpu.h" #include "dali/core/cuda_event.h" namespace dali { namespace kernels { template <int ndim> void debug_print(TensorListView<StorageCPU, uint8_t, ndim> tlv, int height, int width) { for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { std::cout << std::setw(2) << int(*tlv[0](y, x)) % 100; } std::cout << endl; } } void verify_regions(ivec<4> region_shape, ivec<4> sample_shape, ivec<4> expected_cover) { auto cover = div_ceil(sample_shape, region_shape); EXPECT_EQ(cover, expected_cover); int idx = 0; for (int d0 = 0; d0 < expected_cover[0]; d0++) { for (int d1 = 0; d1 < expected_cover[1]; d1++) { for (int d2 = 0; d2 < expected_cover[2]; d2++) { for (int d3 = 0; d3 < expected_cover[3]; d3++) { auto regions_start = get_region_start(idx, region_shape, sample_shape); ivec<4> region_start_expected = {d0 * region_shape[0], d1 * region_shape[1], d2 * region_shape[2], d3 * region_shape[3]}; EXPECT_EQ(regions_start, region_start_expected); idx++; } } } } } TEST(EraseGpuKernelTest, CheckUtils) { { ivec<4> region_shape = {2, 2, 32, 32}; ivec<4> sample_shape = {16, 8, 64, 64}; ivec<4> expected_cover = {8, 4, 2, 2}; verify_regions(region_shape, sample_shape, expected_cover); } { ivec<4> region_shape = {2, 2, 32, 32}; ivec<4> sample_shape = {16, 8, 32, 64}; ivec<4> expected_cover = {8, 4, 1, 2}; verify_regions(region_shape, sample_shape, expected_cover); } { ivec<4> region_shape = {2, 2, 32, 32}; ivec<4> sample_shape = {16, 8, 64, 32}; ivec<4> expected_cover = {8, 4, 2, 1}; verify_regions(region_shape, sample_shape, expected_cover); } { ivec<4> region_shape = {2, 2, 32, 32}; ivec<4> sample_shape = {16, 8, 32, 32}; ivec<4> expected_cover = {8, 4, 1, 1}; verify_regions(region_shape, sample_shape, expected_cover); } } enum class RegionGen { NO_ERASE, ///< only copy, no erase FULL_ERASE, ///< full, 1-element cover, only erase RANDOM_ERASE ///< randomly generated cover }; enum class FillType { MAGIC_42, ///< use single value `42` for erase CHANNEL_CONSECUTIVE, ///< use consecutive values to erase channels DEFAULT ///< do not pass a value, use default `0` }; template <int ndim> struct EraseTestParams { int max_erase_regions; RegionGen region_generation; FillType fill_type; TensorShape<ndim> shape; }; std::ostream& operator<<(std::ostream& os, RegionGen p) { switch (p) { case RegionGen::NO_ERASE: os << "RegionGen::NO_ERASE"; break; case RegionGen::FULL_ERASE: os << "RegionGen::FULL_ERASE"; break; case RegionGen::RANDOM_ERASE: os << "RegionGen::RANDOM_ERASE"; break; } return os; } std::ostream& operator<<(std::ostream& os, FillType p) { switch (p) { case FillType::MAGIC_42: os << "FillType::MAGIC_42"; break; case FillType::CHANNEL_CONSECUTIVE: os << "FillType::CHANNEL_CONSECUTIVE"; break; case FillType::DEFAULT: os << "FillType::DEFAULT"; break; } return os; } template <int ndim> std::ostream& operator<<(std::ostream& os, const EraseTestParams<ndim>& p) { os << "Num erase regions: " << p.max_erase_regions << ", region generation: " << p.region_generation << ", fill type: " << p.fill_type << ", shape: " << p.shape; return os; } template <typename T, int ndim, int channel_dim = -1> struct EraseGpuKernelTest : public testing::TestWithParam<EraseTestParams<ndim>> { void SetUp() override { auto params = this->GetParam(); max_erase_regions_ = params.max_erase_regions; region_generation_ = params.region_generation; fill_type_ = params.fill_type; shape_ = params.shape; test_shape_ = uniform_list_shape<ndim>(batch_size_, shape_); input_.reshape(test_shape_); output_.reshape(test_shape_); baseline_.reshape(test_shape_); auto cpu_input_view = input_.cpu(); SequentialFill(cpu_input_view); if (fill_type_ == FillType::DEFAULT) { fill_values_.resize(0); } else if (fill_type_ == FillType::CHANNEL_CONSECUTIVE) { fill_values_.resize(shape_[channel_dim]); int value = 0; for (auto &elem : fill_values_) { elem = value++; } } else if (fill_type_ == FillType::MAGIC_42) { fill_values_.resize(1); fill_values_[0] = 42; } } void RunTest() { if (region_generation_ == RegionGen::NO_ERASE) { std::cerr << ">> No cover" << std::endl; } else if (region_generation_ == RegionGen::FULL_ERASE) { std::cerr << ">> Full cover" << std::endl; } else if (region_generation_ == RegionGen::RANDOM_ERASE) { std::cerr << ">> Random cover of size: " << max_erase_regions_ << std::endl; } EraseGpu<T, ndim, channel_dim> kernel; KernelContext ctx; ctx.gpu.stream = 0; CreateRegions(); auto regions_gpu = regions_.gpu(); auto in_view = input_.gpu(); auto req = kernel.Setup(ctx, in_view, regions_gpu, make_span(fill_values_)); auto out_view = output_.gpu(); ScratchpadAllocator scratch_alloc; scratch_alloc.Reserve(req.scratch_sizes); auto scratchpad = scratch_alloc.GetScratchpad(); ctx.scratchpad = &scratchpad; kernel.Run(ctx, out_view, in_view, regions_gpu, make_span(fill_values_)); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaGetLastError()); RepackAndCalcCpu(); Verify(); } void Verify() { auto cpu_out_view = output_.cpu(); auto cpu_baseline_view = baseline_.cpu(); Check(cpu_out_view, cpu_baseline_view); } void RepackAndCalcCpu() { auto input_tlv = input_.cpu(); auto baseline_tlv = baseline_.cpu(); auto regions_tlv = regions_.cpu(); for (int i = 0; i < batch_size_; i++) { auto baseline_tv = baseline_tlv[i]; auto input_tv = input_tlv[i]; EraseArgs<T, ndim> args; auto n_regions = regions_tlv[i].num_elements(); args.rois.resize(n_regions); for (int j = 0; j < n_regions; j++) { for (int d = 0; d < ndim; d++) { args.rois[j].anchor[d] = regions_tlv[i](j)->lo[d]; args.rois[j].shape[d] = regions_tlv[i](j)->hi[d] - regions_tlv[i](j)->lo[d]; args.rois[j].fill_values = fill_values_; args.rois[j].channels_dim = channel_dim; } } EraseCpu<T, ndim> cpu_kernel; KernelContext ctx; cpu_kernel.Run(ctx, baseline_tv, input_tv, args); } } void CreateRegions() { TensorListShape<1> region_list_shape(batch_size_); std::mt19937 gen(0); if (region_generation_ == RegionGen::NO_ERASE) { // no cover region_list_shape = uniform_list_shape<1>(batch_size_, {0}); } else if (region_generation_ == RegionGen::FULL_ERASE) { // full cover region_list_shape = uniform_list_shape<1>(batch_size_, {1}); } else { std::uniform_int_distribution<> n_regions(0, max_erase_regions_); for (int i = 0; i < batch_size_; ++i) { region_list_shape.set_tensor_shape(i, {n_regions(gen)}); } } regions_.reshape(region_list_shape); auto regions_cpu = regions_.cpu(); if (region_generation_ == RegionGen::FULL_ERASE) { // full cover for (int i = 0; i < batch_size_; i++) { auto regions_tv = regions_cpu[i]; *regions_tv(0) = ibox<ndim>({0}, to_ivec(shape_)); } } else if (region_generation_ == RegionGen::RANDOM_ERASE) { for (int i = 0; i < batch_size_; i++) { auto regions_tv = regions_cpu[i]; for (int j = 0; j < regions_tv.shape[0]; j ++) { ibox<ndim> region_box; for (int d = 0; d < ndim; d++) { std::uniform_int_distribution<> start_dim(0, shape_[d] - 1); region_box.lo[d] = start_dim(gen); std::uniform_int_distribution<> end_dim(region_box.lo[d] + 1, shape_[d]); region_box.hi[d] = end_dim(gen); } *regions_tv(j) = region_box; } } } } int max_erase_regions_; RegionGen region_generation_; FillType fill_type_; std::vector<T> fill_values_; TensorShape<ndim> shape_; TensorListShape<ndim> test_shape_; constexpr static int batch_size_ = 16; TestTensorList<T, ndim> input_, output_, baseline_; TestTensorList<ibox<ndim>, 1> regions_; }; using EraseGpuKernel1fTest = EraseGpuKernelTest<float, 1>; using EraseGpuKernel2fTest = EraseGpuKernelTest<float, 2>; using EraseGpuKernel2NCfTest = EraseGpuKernelTest<float, 2, 1>; using EraseGpuKernel3fTest = EraseGpuKernelTest<float, 3>; using EraseGpuKernel3fHWCTest = EraseGpuKernelTest<float, 3, 2>; using EraseGpuKernel3fCHWTest = EraseGpuKernelTest<float, 3, 0>; using EraseGpuKernel4fDHCWTest = EraseGpuKernelTest<float, 4, 2>; using EraseGpuKernel4fDHWCTest = EraseGpuKernelTest<float, 4, 3>; using EraseGpuKernel5fTest = EraseGpuKernelTest<float, 5>; #define ERASE_TEST_P(TEST) \ TEST_P(TEST, RunAndVerify) { \ this->RunTest(); \ } ERASE_TEST_P(EraseGpuKernel1fTest) ERASE_TEST_P(EraseGpuKernel2fTest) ERASE_TEST_P(EraseGpuKernel2NCfTest) ERASE_TEST_P(EraseGpuKernel3fTest) ERASE_TEST_P(EraseGpuKernel3fHWCTest) ERASE_TEST_P(EraseGpuKernel3fCHWTest) ERASE_TEST_P(EraseGpuKernel4fDHCWTest) ERASE_TEST_P(EraseGpuKernel4fDHWCTest) ERASE_TEST_P(EraseGpuKernel5fTest) // Parameters for tests are: // <number of erase regions>, <generation scheme>, <fill_type>, <shape> std::vector<EraseTestParams<1>> values_1 = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {512 * 1024}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {512 * 1024}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512 * 1024}}, {10, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512 * 1024}}, {100, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512 * 1024}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {512 * 1024}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {512 * 1024}}, {1, RegionGen::RANDOM_ERASE, FillType::DEFAULT, {512 * 1024}}, {10, RegionGen::RANDOM_ERASE, FillType::DEFAULT, {512 * 1024}}, {100, RegionGen::RANDOM_ERASE, FillType::DEFAULT, {512 * 1024}}, }; std::vector<EraseTestParams<2>> values_2 = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {512, 1024}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {512, 1024}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512, 1024}}, {10, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512, 1024}}, {100, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512, 1024}}, }; std::vector<EraseTestParams<2>> values_2NC = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {512 * 1024, 3}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {512 * 1024, 3}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {512 * 1024, 3}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {512 * 1024, 3}}, {100, RegionGen::RANDOM_ERASE, FillType::DEFAULT, {512 * 1024, 3}}, }; std::vector<EraseTestParams<3>> values_3 = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {10, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {100, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {1000, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {16, 256, 256}}, }; std::vector<EraseTestParams<3>> values_3HWC = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 1}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 3}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 4}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 8}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 16}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {256, 256, 64}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 1}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 3}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 4}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 8}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 16}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {256, 256, 64}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 4}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 8}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 64}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 4}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 8}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 64}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 1}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 3}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 4}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 8}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 16}}, {0, RegionGen::NO_ERASE, FillType::DEFAULT, {256, 256, 64}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 1}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 3}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 4}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 8}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 16}}, {1, RegionGen::FULL_ERASE, FillType::DEFAULT, {256, 256, 64}}, {1, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 1}}, {1, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 3}}, {1, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {256, 256, 16}}, }; std::vector<EraseTestParams<3>> values_3CHW = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {3, 256, 256}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {3, 256, 256}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {16, 256, 256}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {3, 256, 256}}, {10, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {3, 256, 256}}, {100, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {3, 256, 256}}, {1000, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {3, 256, 256}}, }; std::vector<EraseTestParams<4>> values_4DHCW = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 3, 64}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 4, 64}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 8, 64}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 3, 64}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 4, 64}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 8, 64}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 4, 64}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 8, 64}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 4, 64}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 8, 64}}, {1, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {16, 128, 3, 256}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 3, 64}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {16, 128, 3, 256}}, }; std::vector<EraseTestParams<4>> values_4DHWC = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 64, 3}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 64, 4}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {64, 64, 64, 8}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 64, 3}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 64, 4}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {64, 64, 64, 8}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 4}}, {0, RegionGen::NO_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 8}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 4}}, {1, RegionGen::FULL_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 8}}, {1, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {10, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {100, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {16, 128, 256, 3}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {64, 64, 64, 3}}, {1000, RegionGen::RANDOM_ERASE, FillType::CHANNEL_CONSECUTIVE, {16, 128, 256, 3}}, }; std::vector<EraseTestParams<5>> values_5 = { {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {4, 6, 5, 64, 64}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {4, 6, 5, 64, 64}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 256}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 256}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {4, 6, 32, 32, 32}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {4, 6, 5, 64, 64}}, {0, RegionGen::NO_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, {1, RegionGen::FULL_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, {1, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, {10, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, {100, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, {1000, RegionGen::RANDOM_ERASE, FillType::MAGIC_42, {2, 3, 3, 256, 16}}, }; #define INSTANTIATE_ERASE_SUITE(TEST, VALUES) \ INSTANTIATE_TEST_SUITE_P(TEST, TEST ## Test, testing::ValuesIn(VALUES)); INSTANTIATE_ERASE_SUITE(EraseGpuKernel1f, values_1); INSTANTIATE_ERASE_SUITE(EraseGpuKernel2f, values_2); INSTANTIATE_ERASE_SUITE(EraseGpuKernel2NCf, values_2NC); INSTANTIATE_ERASE_SUITE(EraseGpuKernel3f, values_3); INSTANTIATE_ERASE_SUITE(EraseGpuKernel3fHWC, values_3HWC); INSTANTIATE_ERASE_SUITE(EraseGpuKernel3fCHW, values_3CHW); INSTANTIATE_ERASE_SUITE(EraseGpuKernel4fDHCW, values_4DHCW); INSTANTIATE_ERASE_SUITE(EraseGpuKernel4fDHWC, values_4DHWC); INSTANTIATE_ERASE_SUITE(EraseGpuKernel5f, values_5); } // namespace kernels } // namespace dali
the_stack
#include "utils/utils.cuh" #include "utils/intrinsics.cuh" #include "kernel_libs/kernel_fusion.cuh" #include "data_structures/graph.cuh" #include "data_structures/active_set.cuh" #include "data_structures/functor.cuh" #include "abstraction/config.cuh" template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __expand_VC_WM_fused_wtf(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; // used for local storage __shared__ int tmp[3*THDNUM_EXPAND]; // used for kernel fusion __shared__ Block_Scan<int,10>::Temp_Space scan_space; __shared__ int output_cache[THDNUM_EXPAND<<LOG_PER_OUT]; const int OFFSET_ouput = threadIdx.x << LOG_PER_OUT; int thread_output = 0; const int assize = ASProxy<fmt,M>::get_size_hard(as); const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; if(assize==0) {if(gtid==0) as.halt_device();return;} const int cosize = 32; const int phase = gtid & (cosize-1); const int warp_id = threadIdx.x >> 5; const int OFFSET_warp = 3*cosize*warp_id; const int OFFSET_start_pos = OFFSET_warp + cosize; const int OFFSET_odegree = OFFSET_warp + 2*cosize; const int assize_align = (assize&(cosize-1))?(((assize>>5)+1)<<5):assize; Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_warp+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_warp+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } //step 2: get sum of edges for these 32 vertexs and scan odegree; int nedges_warp=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } nedges_warp = tmp[OFFSET_odegree+cosize-1]; if(!phase) tmp[OFFSET_odegree+cosize-1]=0; for(int d=1; d<cosize; d<<=1){ offset >>=1; if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process 32 edges in parallel for(int i=phase; i<nedges_warp; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; //if(tmp[OFFSET_warp+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); int v = tmp[OFFSET_warp+id]; auto vdata = f.emit(v, g.fetch_edata(ei), g); bool toprocess = true; // check 1: if idempotent, we can prune the redundant update (if has, that's also OK) //TODO: this will not help to improve the performance, that's weird //toprocess = as.bitmap.mark_duplicate_lite(u); // check 2: if not push TO ALL, the target vertex must be Inactive // cond is provided by users to indicate whether u should accept the update. //if(toprocess && !conf.conf_toall) if(toprocess) toprocess = f.cond(u, vdata, g); // if u pass all the checks, do the computation in the functor if(toprocess) toprocess = f.compAtomic(f.wa_of(u), vdata, g); //check 3: enqueue the u only once. (if duplicate, wrong answer) //TODO: this will not help to improve the performance too, that's so weird if(toprocess && !conf.pruning()) toprocess = as.bitmap.mark_duplicate_atomic(u); // if u is updated successfully, write u to the queue directly. // cache mode. if(toprocess){ if(thread_output < PER_OUT) output_cache[OFFSET_ouput + (thread_output++)] = u; else{ __direct_write<M>(output_cache + OFFSET_ouput, as.queue); thread_output = 0; output_cache[OFFSET_ouput+(thread_output++)] = u; } } }//for 32 edges if(__any(thread_output==PER_OUT)) __write_global_queue_warp<M>(scan_space, output_cache, thread_output, as.queue); }//for all the elements in the active set. if(__any(thread_output>0)) __write_global_queue_warp<M>(scan_space, output_cache, thread_output, as.queue); } template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __expand_VC_WM_fused(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; // used for local storage __shared__ int tmp[3*THDNUM_EXPAND]; // used for kernel fusion __shared__ Block_Scan<int,10>::Temp_Space scan_space; __shared__ int output_cache[THDNUM_EXPAND<<LOG_PER_OUT]; const int OFFSET_ouput = threadIdx.x << LOG_PER_OUT; int thread_output = 0; const int assize = ASProxy<fmt,M>::get_size_hard(as); const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; if(assize==0) {if(gtid==0) as.halt_device();return;} const int cosize = 32; const int phase = gtid & (cosize-1); const int warp_id = threadIdx.x >> 5; const int OFFSET_warp = 3*cosize*warp_id; const int OFFSET_start_pos = OFFSET_warp + cosize; const int OFFSET_odegree = OFFSET_warp + 2*cosize; const int assize_align = (assize&(cosize-1))?(((assize>>5)+1)<<5):assize; Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_warp+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_warp+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } //step 2: get sum of edges for these 32 vertexs and scan odegree; int nedges_warp=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } nedges_warp = tmp[OFFSET_odegree+cosize-1]; if(!phase) tmp[OFFSET_odegree+cosize-1]=0; for(int d=1; d<cosize; d<<=1){ offset >>=1; if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process 32 edges in parallel for(int i=phase; i<nedges_warp; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_warp+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); int v = tmp[OFFSET_warp+id]; auto vdata = f.emit(v, g.fetch_edata(ei), g); bool toprocess = true; // check 2: if not push TO ALL, the target vertex must be Inactive // cond is provided by users to indicate whether u should accept the update. if(toprocess && !conf.conf_toall) toprocess = f.cond(u, vdata, g); // if u pass all the checks, do the computation in the functor if(toprocess){ //f.filter(u, g); // useless here toprocess = f.compAtomic(f.wa_of(u), vdata, g); } //check 3: enqueue the u only once. (if duplicate, wrong answer) if(toprocess) toprocess = as.bitmap.mark_duplicate_atomic(u); // if u is updated successfully, write u to the queue directly. // cache mode. if(toprocess){ if(thread_output < PER_OUT) output_cache[OFFSET_ouput + (thread_output++)] = u; else{ __direct_write<M>(output_cache + OFFSET_ouput, as.queue); thread_output = 0; output_cache[OFFSET_ouput+(thread_output++)] = u; } } }//for 32 edges if(__any(thread_output==PER_OUT)) __write_global_queue_warp<M>(scan_space, output_cache, thread_output, as.queue); }//for all the elements in the active set. if(__any(thread_output>0)) __write_global_queue_warp<M>(scan_space, output_cache, thread_output, as.queue); } template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __expand_VC_WM(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; // used for local storage __shared__ int tmp[3*THDNUM_EXPAND]; const int assize = ASProxy<fmt,M>::get_size(as); const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; const int cosize = 32; const int phase = gtid & (cosize-1); const int warp_id = threadIdx.x >> 5; const int OFFSET_warp = 3*cosize*warp_id; const int OFFSET_start_pos = OFFSET_warp + cosize; const int OFFSET_odegree = OFFSET_warp + 2*cosize; //const int assize_align = (assize&(cosize-1))?(((assize>>5)+1)<<5):assize; const int assize_align = alignment(assize, cosize); Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_warp+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_warp+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } //step 2: get sum of edges for these 32 vertexs and scan odegree; int nedges_warp=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } nedges_warp = tmp[OFFSET_odegree+cosize-1]; if(!phase) tmp[OFFSET_odegree+cosize-1]=0; for(int d=1; d<cosize; d<<=1){ offset >>=1; if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process 32 edges in parallel for(int i=phase; i<nedges_warp; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_warp+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); bool toprocess = true; // check 1: if idempotent, we can prune the redundant update if(toprocess && conf.pruning()) toprocess = as.bitmap.mark_duplicate_lite(u); // check 2: if not push TO ALL, the target vertex must be Inactive if(toprocess && !conf.conf_toall) toprocess = as.bitmap.is_inactive(u); // if u pass all the checks, do the computation in the functor if(toprocess){ int v = tmp[OFFSET_warp+id]; auto vdata = f.emit(v, g.fetch_edata(ei), g); f.compAtomic(f.wa_of(u), vdata, g); } }//for 32 edges }//for all the elements in the active set. } template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __rexpand_VC_WM(active_set_t as, G g, F f, config_t conf){ using edata_t = typename G::edge_t; using vdata_t = typename F::wa_t; const int* __restrict__ strict_adj_list = g.directed ? g.dgr_adj_list : g.dg_adj_list; edata_t* strict_edgedata = g.directed? g.dgr_edgedata : g.dg_edgedata; __shared__ int tmp[3*THDNUM_EXPAND]; int assize = ASProxy<fmt,M>::get_size(as); int STRIDE = blockDim.x*gridDim.x; int gtid = threadIdx.x + blockIdx.x*blockDim.x; int cosize = 32; int phase = gtid & (cosize - 1); int warp_id = threadIdx.x >> 5; int OFFSET_warp = 3*cosize*warp_id; int OFFSET_start_pos = OFFSET_warp + cosize; int OFFSET_odegree = OFFSET_warp + 2*cosize; int assize_align = (assize&(cosize-1))?(((assize>>5)+1)<<5):assize; Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_warp+phase] = v; tmp[OFFSET_start_pos+phase] = g.get_in_start_pos(v); //tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = g.get_in_degree(v); //tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_warp+phase] = v; tmp[OFFSET_odegree+phase] = 0; } //step 2: get sum of edges for these 32 vertexs and scan odegree; int nedges_warp=0; int offset=1; for(int d=32>>1; d>0; d>>=1){ if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } nedges_warp = tmp[OFFSET_odegree + 32-1]; if(!phase) tmp[OFFSET_odegree+32-1]=0; for(int d=1; d<32; d<<=1){ offset >>=1; if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } // Binary search will not get the index which is out of range int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process 32 edges in parallel int vote=0; vdata_t vdata; for(int i=phase; i<nedges_warp; i+=cosize){ vote=0; int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_warp+id] < 0) continue; // v < 0 int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int insegID = MIN((i-tmp[OFFSET_odegree+id]), phase); int rod = ((id==31)? (nedges_warp): (tmp[OFFSET_odegree+id+1])) - i - 1; int segsize = insegID + 1 + MIN(31-phase, rod); int v = tmp[OFFSET_warp+id]; int u = __ldg(strict_adj_list+ei); // rarely in the pull mode fusion // if(conf.conf_fromall || conf.conf_fuse_inspect || as.query(u) == Active){ // Data source must be active all conf_fromall is enabled if(conf.conf_fromall || as.bitmap.is_active(u)){ vote = 1; vdata = f.emit(u, strict_edgedata+ei, g); } // reduce int offset = segsize; while(offset>>1){ int th = offset>>1; int delta = (offset+1)>>1; vdata_t _vdata = __exshfl_down(vdata, delta); int _vote = __shfl_down(vote, delta); if(insegID < th){ if(vote && _vote) f.comp(&vdata, _vdata, g); else if(_vote) vdata = _vdata; vote |= _vote; } offset = delta; } if(insegID==0 && vote){ f.comp(f.wa_of(v), vdata, g); } } } } template<> struct ExpandProxy<VC,WM,Push>{ template<typename E, typename F> static void expand(active_set_t as, device_graph_t<CSR,E> g, F f, config_t conf){ if(conf.conf_fuse_inspect) { if(conf.conf_pruning && conf.conf_asfmt==Queue && as.queue.mode==Normal && conf.conf_toall==false) __expand_VC_WM_fused_wtf<Queue,Normal><<<conf.ctanum, conf.thdnum>>>(as, g, f, conf); else Launch_Expand_VC(WM_fused, as, g, f, conf); }else{ Launch_Expand_VC(WM, as, g, f, conf); } //__expand_VC_WM_fused<<<32,TH>>>(as, g, f, conf); //__expand_VC_WM<<<CN,TH>>>(as, g, f, conf); } template<typename E, typename F> static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){} }; template<> struct ExpandProxy<VC,WM,Pull>{ template<typename E, typename F> static void expand(active_set_t as, device_graph_t<CSR,E> g, F f, config_t conf){ //__rexpand_VC_WM<<<CN,TH>>>(as, g, f, conf); Launch_RExpand_VC(WM, as, g, f, conf); } template<typename E, typename F> static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){} }; #endif
the_stack
#pragma once #include <assert.h> #include <stdint.h> #include <cub/block/block_discontinuity.cuh> #include "rxmesh/context.h" #include "rxmesh/handle.h" #include "rxmesh/iterator.cuh" #include "rxmesh/kernels/collective.cuh" #include "rxmesh/kernels/debug.cuh" #include "rxmesh/kernels/loader.cuh" #include "rxmesh/kernels/rxmesh_queries.cuh" #include "rxmesh/types.h" #include "rxmesh/util/meta.h" namespace rxmesh { namespace detail { /** * query_block_dispatcher() */ template <Op op, uint32_t blockThreads, typename activeSetT> __device__ __inline__ void query_block_dispatcher(const PatchInfo& patch_info, activeSetT compute_active_set, const bool oriented, uint32_t& num_src_in_patch, uint16_t*& s_output_offset, uint16_t*& s_output_value, uint16_t& num_owned, uint32_t*& not_owned_patch, uint16_t*& not_owned_local_id) { static_assert(op != Op::EE, "Op::EE is not supported!"); constexpr bool load_fe = (op == Op::VF || op == Op::EE || op == Op::EF || op == Op::FV || op == Op::FE || op == Op::FF); constexpr bool loead_ev = (op == Op::VV || op == Op::VE || op == Op::VF || op == Op::EV || op == Op::FV); static_assert(loead_ev || load_fe, "At least faces or edges needs to be loaded"); // Check if any of the mesh elements are in the active set // input mapping does not need to be stored in shared memory since it will // be read coalesced, we can rely on L1 cache here num_src_in_patch = 0; if constexpr (op == Op::VV || op == Op::VE || op == Op::VF) { num_src_in_patch = patch_info.num_owned_vertices; } if constexpr (op == Op::EV || op == Op::EF) { num_src_in_patch = patch_info.num_owned_edges; } if constexpr (op == Op::FV || op == Op::FE || op == Op::FF) { num_src_in_patch = patch_info.num_owned_faces; } bool is_active = false; uint16_t local_id = threadIdx.x; while (local_id < num_src_in_patch) { is_active = is_active || compute_active_set({patch_info.patch_id, local_id}); local_id += blockThreads; } if (__syncthreads_or(is_active) == 0) { // reset num_src_in_patch to zero to indicate that this block/patch has // no work to do num_src_in_patch = 0; return; } // 2) Load the patch info extern __shared__ uint16_t shrd_mem[]; LocalVertexT* s_ev = reinterpret_cast<LocalVertexT*>(shrd_mem); LocalEdgeT* s_fe = reinterpret_cast<LocalEdgeT*>(shrd_mem); load_mesh<blockThreads>(patch_info, loead_ev, load_fe, s_ev, s_fe); not_owned_patch = reinterpret_cast<uint32_t*>(shrd_mem); not_owned_local_id = shrd_mem; num_owned = 0; // 3)Perform the query operation if (oriented) { assert(op == Op::VV); if constexpr (op == Op::VV) { __syncthreads(); v_v_oreinted<blockThreads>(patch_info, s_output_offset, s_output_value, reinterpret_cast<uint16_t*>(s_ev)); } } else { if constexpr (!(op == Op::VV || op == Op::FV || op == Op::FF)) { load_not_owned<op, blockThreads>( patch_info, not_owned_local_id, not_owned_patch, num_owned); } __syncthreads(); query<blockThreads, op>(s_output_offset, s_output_value, reinterpret_cast<uint16_t*>(s_ev), reinterpret_cast<uint16_t*>(s_fe), patch_info.num_vertices, patch_info.num_edges, patch_info.num_faces); } // load not-owned local and patch id if constexpr (op == Op::VV || op == Op::FV || op == Op::FF) { // need to sync since we will overwrite things that are used in // query __syncthreads(); load_not_owned<op, blockThreads>( patch_info, not_owned_local_id, not_owned_patch, num_owned); } __syncthreads(); } /** * query_block_dispatcher() */ template <Op op, uint32_t blockThreads, typename computeT, typename activeSetT> __device__ __inline__ void query_block_dispatcher(const Context& context, const uint32_t patch_id, computeT compute_op, activeSetT compute_active_set, const bool oriented = false) { // Extract the type of the input parameters of the compute lambda function. // The first parameter should be Vertex/Edge/FaceHandle and second parameter // should be RXMeshVertex/Edge/FaceIterator using ComputeTraits = detail::FunctionTraits<computeT>; using ComputeHandleT = typename ComputeTraits::template arg<0>::type; using ComputeIteratorT = typename ComputeTraits::template arg<1>::type; using LocalT = typename ComputeIteratorT::LocalT; // Extract the type of the single input parameter of the active_set lambda // function. It should be Vertex/Edge/FaceHandle and it should match the // first parameter of the compute lambda function using ActiveSetTraits = detail::FunctionTraits<activeSetT>; using ActiveSetHandleT = typename ActiveSetTraits::template arg<0>::type; static_assert( std::is_same_v<ActiveSetHandleT, ComputeHandleT>, "First argument of compute_op lambda function should match the first " "argument of active_set lambda function "); static_assert(op != Op::EE, "Op::EE is not supported!"); assert(patch_id < context.get_num_patches()); uint32_t num_src_in_patch = 0; uint16_t* s_output_offset(nullptr); uint16_t* s_output_value(nullptr); uint16_t num_owned; uint32_t* not_owned_patch(nullptr); uint16_t* not_owned_local_id(nullptr); detail::template query_block_dispatcher<op, blockThreads>( context.get_patches_info()[patch_id], compute_active_set, oriented, num_src_in_patch, s_output_offset, s_output_value, num_owned, not_owned_patch, not_owned_local_id); // Call compute on the output in shared memory by looping over all // source elements in this patch. uint16_t local_id = threadIdx.x; while (local_id < num_src_in_patch) { assert(s_output_value); if (compute_active_set({patch_id, local_id})) { constexpr uint32_t fixed_offset = ((op == Op::EV) ? 2 : (op == Op::FV || op == Op::FE) ? 3 : 0); ComputeHandleT handle(patch_id, local_id); ComputeIteratorT iter(local_id, reinterpret_cast<LocalT*>(s_output_value), s_output_offset, fixed_offset, patch_id, num_owned, not_owned_patch, not_owned_local_id, int(op == Op::FE)); compute_op(handle, iter); } local_id += blockThreads; } } } // namespace detail /** * @brief The main query function to be called by the whole block. In this * function, threads will be assigned to mesh elements which will be accessible * through the input computation lambda function (compute_op). This function * also provides a predicate to specify the active set i.e., the set on which * the query operations should be done. This is mainly used to skip query on * a subset of the input mesh elements which may lead to better performance * @tparam Op the type of query operation * @tparam blockThreads the number of CUDA threads in the block * @tparam computeT the type of compute lambda function (inferred) * @tparam activeSetT the type of active set lambda function (inferred) * @param context which store various parameters needed for the query * operation. The context can be obtained from RXMeshStatic * @param compute_op the computation lambda function that will be executed by * each thread in the block. This lambda function takes two input parameters: * 1. Handle to the mesh element assigned to the thread. The handle type matches * the source of the query (e.g., VertexHandle for VE query) 2. an iterator to * the query output. The iterator type matches the type of the mesh element * "iterated" on (e.g., EdgeIterator for VE query) * @param compute_active_set a predicate used to specify the active set. This * lambda function take a single parameter which is a handle of the type similar * to the input of the query operation (e.g., VertexHandle for VE query) * @param oriented specifies if the query are oriented. Currently only VV query * is supported for oriented queries. FV, FE and EV is oriented by default */ template <Op op, uint32_t blockThreads, typename computeT, typename activeSetT> __device__ __inline__ void query_block_dispatcher(const Context& context, computeT compute_op, activeSetT compute_active_set, const bool oriented = false) { if (blockIdx.x >= context.get_num_patches()) { return; } detail::query_block_dispatcher<op, blockThreads>( context, blockIdx.x, compute_op, compute_active_set, oriented); } /** * @brief The main query function to be called by the whole block. In this * function, threads will be assigned to mesh elements which will be accessible * through the input computation lambda function (compute_op). * @tparam Op the type of query operation * @tparam blockThreads the number of CUDA threads in the block * @tparam computeT the type of compute lambda function (inferred) * @param context which store various parameters needed for the query * operation. The context can be obtained from RXMeshStatic * @param compute_op the computation lambda function that will be executed by * each thread in the block. This lambda function takes two input parameters: * 1. Handle to the mesh element assigned to the thread. The handle type matches * the source of the query (e.g., VertexHandle for VE query) 2. an iterator to * the query output. The iterator type matches the type of the mesh element * "iterated" on (e.g., EdgeIterator for VE query) * @param oriented specifies if the query are oriented. Currently only VV query * is supported for oriented queries. FV, FE and EV is oriented by default */ template <Op op, uint32_t blockThreads, typename computeT> __device__ __inline__ void query_block_dispatcher(const Context& context, computeT compute_op, const bool oriented = false) { // Extract the type of the first input parameters of the compute lambda // function. It should be Vertex/Edge/FaceHandle using ComputeTraits = detail::FunctionTraits<computeT>; using ComputeHandleT = typename ComputeTraits::template arg<0>::type; query_block_dispatcher<op, blockThreads>( context, compute_op, [](ComputeHandleT) { return true; }, oriented); } /** * @brief This function is used to perform a query operation on a specific mesh * element. This is only needed for higher query (e.g., 2-ring query) where the * first query is done using query_block_dispatcher in which each thread is * assigned to a mesh element. Subsequent queries should be handled by this * function. This function should be called by the whole CUDA block. * @tparam Op the type of query operation * @tparam blockThreads the number of CUDA threads in the block * @tparam computeT the type of compute lambda function (inferred) * @tparam HandleT the type of input handle (inferred) which should match the * input of the query operations (e.g., VertexHandle for VE query) * @param context which store various parameters needed for the query * operation. The context can be obtained from RXMeshStatic * @param src_id the input mesh element to the query. Inactive threads can * simply pass HandleT() in which case they are skipped * @param compute_op the computation lambda function that will be executed by * the thread. This lambda function takes two input parameters: * 1. HandleT which is the same as src_id 2. an iterator to the query output. * The iterator type matches the type of the mesh element "iterated" on (e.g., * EdgeIterator for VE query) * @param oriented specifies if the query are oriented. Currently only VV query * is supported for oriented queries. FV, FE and EV is oriented by default */ template <Op op, uint32_t blockThreads, typename computeT, typename HandleT> __device__ __inline__ void higher_query_block_dispatcher( const Context& context, const HandleT src_id, computeT compute_op, const bool oriented = false) { using ComputeTraits = detail::FunctionTraits<computeT>; using ComputeIteratorT = typename ComputeTraits::template arg<1>::type; // The whole block should be calling this function. If one thread is not // participating, its src_id should be INVALID32 auto compute_active_set = [](HandleT) { return true; }; // the source and local id of the source mesh element std::pair<uint32_t, uint16_t> pl = src_id.unpack(); // Here, we want to identify the set of unique patches for this thread // block. We do this by first sorting the patches, compute discontinuity // head flag, then threads with head flag =1 can add their patches to the // shared memory buffer that will contain the unique patches __shared__ uint32_t s_block_patches[blockThreads]; __shared__ uint32_t s_num_patches; if (threadIdx.x == 0) { s_num_patches = 0; } typedef cub::BlockRadixSort<uint32_t, blockThreads, 1> BlockRadixSort; typedef cub::BlockDiscontinuity<uint32_t, blockThreads> BlockDiscontinuity; union TempStorage { typename BlockRadixSort::TempStorage sort_storage; typename BlockDiscontinuity::TempStorage discont_storage; }; __shared__ TempStorage all_temp_storage; uint32_t thread_data[1], thread_head_flags[1]; thread_data[0] = pl.first; thread_head_flags[0] = 0; BlockRadixSort(all_temp_storage.sort_storage).Sort(thread_data); BlockDiscontinuity(all_temp_storage.discont_storage) .FlagHeads(thread_head_flags, thread_data, cub::Inequality()); if (thread_head_flags[0] == 1 && thread_data[0] != INVALID32) { uint32_t id = ::atomicAdd(&s_num_patches, uint32_t(1)); s_block_patches[id] = thread_data[0]; } // We could eliminate the discontinuity operation and atomicAdd and instead // use thrust::unique. However, this method causes illegal memory access // and it looks like a bug in thrust /*__syncthreads(); // uniquify uint32_t* new_end = thrust::unique(thrust::device, s_block_patches, s_block_patches + blockThreads); __syncthreads(); if (threadIdx.x == 0) { s_num_patches = new_end - s_block_patches - 1; }*/ __syncthreads(); for (uint32_t p = 0; p < s_num_patches; ++p) { uint32_t patch_id = s_block_patches[p]; assert(patch_id < context.get_num_patches()); uint32_t num_src_in_patch = 0; uint16_t *s_output_offset(nullptr), *s_output_value(nullptr); uint16_t num_owned = 0; uint16_t* not_owned_local_id(nullptr); uint32_t* not_owned_patch(nullptr); detail::template query_block_dispatcher<op, blockThreads>( context.get_patches_info()[patch_id], compute_active_set, oriented, num_src_in_patch, s_output_offset, s_output_value, num_owned, not_owned_patch, not_owned_local_id); if (pl.first == patch_id) { constexpr uint32_t fixed_offset = ((op == Op::EV) ? 2 : (op == Op::FV || op == Op::FE) ? 3 : 0); ComputeIteratorT iter( pl.second, reinterpret_cast<typename ComputeIteratorT::LocalT*>( s_output_value), s_output_offset, fixed_offset, patch_id, num_owned, not_owned_patch, not_owned_local_id, int(op == Op::FE)); compute_op(src_id, iter); } __syncthreads(); } } } // namespace rxmesh
the_stack
#ifndef __NBLA_CUDA_UTILS_FAST_REDUCE_CUH__ #define __NBLA_CUDA_UTILS_FAST_REDUCE_CUH__ #include <assert.h> #include <nbla/cuda/common.hpp> #include <nbla/cuda/math.hpp> #include <nbla/cuda/utils/index_converter.cuh> #include <nbla/cuda/utils/reduce.hpp> #include <nbla/cuda/utils/warp_shuffle.cuh> #include <numeric> // TODO: Rename fast_reduct to device_reduce after removing other duplicated // reduction source codes. namespace nbla { #define NBLA_CUDA_REDUCE_MAX_BLOCKS 65535 #define NBLA_CUDA_REDUCE_UNROLL_XY 4 #define NBLA_CUDA_REDUCE_UNROLL_Y 4 /** Determine which block calls this kernel last. */ template <class IndexT> __device__ bool is_last_block(const int block_local_idx, // idx in a block const IndexT block_idx, // idx of blocks const int num_blocks, int *block_counter) { __threadfence(); int last = 0; if (block_local_idx == 0) { last = atomicAdd(block_counter + block_idx, 1); } return __syncthreads_or(last == num_blocks - 1); } /** Sequential reduction by each thread */ template <int unroll_size, class Op> __device__ typename Op::StorageT kernel_thread_reduce( Op op, typename Op::IndexT inner_idx, const typename Op::IndexT outer_idx, const typename Op::IndexT inner_size, const int inner_grid_stride, const IndexConverter<typename Op::IndexT> inner_idx_conv, const IndexConverter<typename Op::IndexT> outer_idx_conv) { using StorageT = typename Op::StorageT; using IndexT = typename Op::IndexT; StorageT reduced[unroll_size]; #pragma unroll for (int i = 0; i < unroll_size; i++) { reduced[i] = op.init(); } IndexT global_outer_idx = outer_idx_conv.change_strides(0, outer_idx); StorageT loader[unroll_size]; for (; inner_idx + inner_grid_stride * (unroll_size - 1) < inner_size; inner_idx += inner_grid_stride * unroll_size) { #pragma unroll for (int i = 0; i < unroll_size; i++) { // Load first. The loads become independent and the latencies are hidden. IndexT global_idx = inner_idx_conv.change_strides( global_outer_idx, inner_idx + inner_grid_stride * i); loader[i] = op.make_storage(op.input[global_idx], inner_idx + inner_grid_stride * i); } #pragma unroll for (int i = 0; i < unroll_size; i++) { // Reduce reduced[i] = op(reduced[i], loader[i]); } } #pragma unroll for (int i = 0; i < unroll_size; i++) { // Load of the tail misaligned elements // This loop unroll keeps "reduced" in register cache. If no loop unroll, // it is located in local memory, causing bad performance. const IndexT idx = inner_idx + inner_grid_stride * i; if (idx >= inner_size) { break; } IndexT global_idx = inner_idx_conv.change_strides(global_outer_idx, idx); reduced[i] = op(reduced[i], op.make_storage(op.input[global_idx], idx)); } #pragma unroll for (int i = 1; i < unroll_size; i++) { reduced[0] = op(reduced[0], reduced[i]); } return reduced[0]; } /** Determine which block calls this kernel last. */ template <int vec_size, class VecT, class Op> __device__ typename Op::StorageT kernel_vectrized_thread_reduce_x( Op op, typename Op::IndexT inner_idx, const typename Op::IndexT outer_idx, const typename Op::IndexT inner_size, const int inner_grid_stride) { using StorageT = typename Op::StorageT; using IndexT = typename Op::IndexT; StorageT reduced[vec_size]; #pragma unroll for (int i = 0; i < vec_size; i++) { reduced[i] = op.init(); } // Load of the head misaligned elements auto input_slice = op.input + inner_size * outer_idx; constexpr int align_byte = sizeof(VecT); constexpr int elem_byte = sizeof(typename Op::Tcu); constexpr int aligned_elems = align_byte / elem_byte; auto num_heads = (int64_t)input_slice % align_byte / elem_byte; if (num_heads > 0) { num_heads = aligned_elems - num_heads; // Assuming blockDim.x * gridDim.x >= vec_size; if (inner_idx < num_heads) { reduced[inner_idx] = op(reduced[inner_idx], op.make_storage(input_slice[inner_idx], inner_idx)); } // Make aligned address input_slice += num_heads; } // Vectorized load of the aligned elements const VecT *const load_input = reinterpret_cast<const VecT *>(input_slice); typename Op::Tcu load_reg[vec_size]; VecT *load_vec = reinterpret_cast<VecT *>(&load_reg[0]); for (; num_heads + vec_size - 1 + vec_size * inner_idx < inner_size; inner_idx += inner_grid_stride) { *load_vec = load_input[inner_idx]; // vectrized load #pragma unroll for (int i = 0; i < vec_size; i++) { reduced[i] = op(reduced[i], op.make_storage(load_reg[i], num_heads + i + vec_size * inner_idx)); } } // Load of the tail misaligned elements // Assuming blockDim.x >= vec_size. const IndexT idx = vec_size * IndexT((inner_size - num_heads) / vec_size) + threadIdx.x; if (blockIdx.x == 0 && num_heads + idx < inner_size) { reduced[0] = op(reduced[0], op.make_storage(input_slice[idx], num_heads + idx)); } for (int i = 1; i < vec_size; i++) { reduced[0] = op(reduced[0], reduced[i]); } return reduced[0]; } template <class SHFL_T, class T> __device__ T kernel_shuffle_down(T val, const int offset) { SHFL_T shfl_v = *reinterpret_cast<SHFL_T *>(&val); SHFL_T v = warp::shuffle_down(shfl_v, offset); return *reinterpret_cast<T *>(&v); } template <class Op> __device__ typename Op::StorageT kernel_warp_reduce(Op op, typename Op::StorageT val) { using StorageT = typename Op::StorageT; #pragma unroll for (int offset = CUDA_WARP_SIZE / 2; offset > 0; offset /= 2) { if (sizeof(StorageT) == 16) { val = op(val, kernel_shuffle_down<float4>(val, offset)); } else if (sizeof(StorageT) == 8) { val = op(val, kernel_shuffle_down<float2>(val, offset)); } else if (sizeof(StorageT) == 4) { val = op(val, kernel_shuffle_down<float>(val, offset)); } else if (sizeof(StorageT) == 2) { val = op(val, kernel_shuffle_down<half>(val, offset)); } else { assert(false); } } return val; } template <class Op> __device__ typename Op::StorageT kernel_block_reduce_xy(Op op, typename Op::StorageT reduced, typename Op::StorageT *sbuf) { // Reduction along a binary tree. if (blockDim.x > CUDA_WARP_SIZE) { const int block_local_idx = threadIdx.x + blockDim.x * threadIdx.y; sbuf[block_local_idx] = reduced; __syncthreads(); #pragma unroll for (int offset = blockDim.x / 2; offset >= CUDA_WARP_SIZE; offset /= 2) { if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) { reduced = op(reduced, sbuf[block_local_idx + offset]); sbuf[block_local_idx] = reduced; } __syncthreads(); } } reduced = kernel_warp_reduce(op, reduced); return reduced; } template <class Op> __device__ typename Op::StorageT kernel_block_reduce_y(Op op, typename Op::StorageT reduced, typename Op::StorageT *sbuf) { // Reduction along a binary tree. if (blockDim.y > 1) { const int block_local_idx = threadIdx.x + blockDim.x * threadIdx.y; sbuf[block_local_idx] = reduced; #pragma unroll for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { reduced = op(reduced, sbuf[threadIdx.x + blockDim.x * (threadIdx.y + offset)]); sbuf[block_local_idx] = reduced; } } } // Warp reduce is not performed because warps are assigned the memory // continuous part. (x part) return reduced; } template <class Op> __device__ typename Op::StorageT kernel_inter_block_reduce_xy(Op op, const typename Op::IndexT outer_idx, typename Op::StorageT *sbuf) { // Assuming blockDim.y == 1; // x reduction const auto num_blocks = gridDim.x; // the elements in buf per outer_idx const auto inner_buf = op.buf + num_blocks * outer_idx; // Load from global memory // Thread reduction on registers while loading. typename Op::StorageT reduced = op.init(); for (auto idx = threadIdx.x; idx < num_blocks; idx += blockDim.x) { reduced = op(reduced, inner_buf[idx]); } // Block reduce return kernel_block_reduce_xy(op, reduced, sbuf); } template <class Op> __device__ typename Op::StorageT kernel_inter_block_reduce_y(Op op, const typename Op::IndexT outer_idx, typename Op::StorageT *sbuf) { // x reduction const auto num_blocks = gridDim.y; // the elements in buf per outer_idx const auto inner_buf = op.buf + outer_idx; // Load from global memory // Thread reduction on registers while loading. typename Op::StorageT reduced = op.init(); for (auto idx = threadIdx.y; idx < num_blocks; idx += blockDim.y) { reduced = op(reduced, inner_buf[blockDim.x * gridDim.x * idx]); } // Block reduce return kernel_block_reduce_y(op, reduced, sbuf); } template <class VecT, class Op> __global__ void kernel_reduce_xy(Op op, int *const block_counter, const typename Op::IndexT inner_size, const typename Op::IndexT outer_size, const IndexConverter<typename Op::IndexT> inner_idx_conv, const IndexConverter<typename Op::IndexT> outer_idx_conv) { using StorageT = typename Op::StorageT; using IndexT = typename Op::IndexT; // "extern __shared__" for template type cannot be used in CUDA. // This is a workaround. extern __shared__ uint8_t sbuf_char[]; StorageT *sbuf = reinterpret_cast<StorageT *>(sbuf_char); // Grid-strided loop for outer dimensions for (IndexT outer_idx = threadIdx.y + (IndexT)blockIdx.y * blockDim.y; outer_idx < outer_size; outer_idx += blockDim.y * gridDim.y) { // Sequential reduce by each thread const IndexT inner_idx = threadIdx.x + (IndexT)blockIdx.x * blockDim.x; StorageT reduced; if (sizeof(VecT) == sizeof(typename Op::Tcu)) { // Without vectrized load for general case reduced = kernel_thread_reduce<NBLA_CUDA_REDUCE_UNROLL_XY>( op, inner_idx, outer_idx, inner_size, blockDim.x * gridDim.x, inner_idx_conv, outer_idx_conv); } else { // With vectrized load for single-dimensional reduction. reduced = kernel_vectrized_thread_reduce_x<NBLA_CUDA_REDUCE_UNROLL_XY, VecT>( op, inner_idx, outer_idx, inner_size, blockDim.x * gridDim.x); } // Block reduce reduced = kernel_block_reduce_xy(op, reduced, sbuf); if (gridDim.x == 1) { // Reduction is completed by a block. if (threadIdx.x == 0) { op.store(outer_idx, reduced); } } else { // Inter-block reduce if (threadIdx.x == 0) { op.intermediate_store(blockIdx.x + (IndexT)gridDim.x * outer_idx, reduced); } if (is_last_block(threadIdx.x, outer_idx, gridDim.x, block_counter)) { reduced = kernel_inter_block_reduce_xy(op, outer_idx, sbuf); if (threadIdx.x == 0) { op.store(outer_idx, reduced); } } } } } template <class Op> __global__ void kernel_reduce_y(Op op, int *const block_counter, const typename Op::IndexT inner_size, const typename Op::IndexT outer_size, const IndexConverter<typename Op::IndexT> inner_idx_conv, const IndexConverter<typename Op::IndexT> outer_idx_conv) { using StorageT = typename Op::StorageT; using IndexT = typename Op::IndexT; // "extern __shared__" for template type cannot be used in CUDA. // This is a workaround. extern __shared__ uint8_t sbuf_char[]; StorageT *sbuf = reinterpret_cast<StorageT *>(sbuf_char); // Grid-strided loop for outer dimensions for (IndexT outer_idx = threadIdx.x + (IndexT)blockIdx.x * blockDim.x; outer_idx < outer_size; outer_idx += blockDim.x * gridDim.x) { // Sequential reduce by each thread const IndexT inner_idx = threadIdx.y + (IndexT)blockIdx.y * blockDim.y; StorageT reduced = kernel_thread_reduce<NBLA_CUDA_REDUCE_UNROLL_Y>( op, inner_idx, outer_idx, inner_size, blockDim.y * gridDim.y, inner_idx_conv, outer_idx_conv); // Block reduce reduced = kernel_block_reduce_y(op, reduced, sbuf); if (gridDim.y == 1) { // Reduction is completed by a block. if (threadIdx.y == 0) { op.store(outer_idx, reduced); } } else { // Inter-block reduce if (threadIdx.y == 0) { op.intermediate_store( outer_idx + (IndexT)blockDim.x * gridDim.x * blockIdx.y, reduced); } if (is_last_block(threadIdx.y, outer_idx, gridDim.y, block_counter)) { reduced = kernel_inter_block_reduce_y(op, outer_idx, sbuf); if (threadIdx.y == 0) { op.store(outer_idx, reduced); } } } } } template <class Op> __global__ void kernel_copy(const Size_t size, Op op) { NBLA_CUDA_KERNEL_LOOP_SIZE_T(idx, size) { op.store(idx, op.make_storage(op.input[idx], 0)); } } static uint32_t get_strided_grid(const Size_t grid_dim) { return static_cast<uint32_t>(NBLA_CEIL_SIZE_T_DIV( grid_dim, NBLA_CEIL_SIZE_T_DIV(grid_dim, NBLA_CUDA_REDUCE_MAX_BLOCKS))); } template <class Op> void fast_reduce_xy(const Context &ctx, Op &op, const ReduceSetup &setup) { // Some procedures can be moved to ReduceSetup::operator() if implementing the // mechanism to define Op::IndexT when Function::setup. using Tcu = typename Op::Tcu; using IndexT = typename Op::IndexT; using StorageT = typename Op::StorageT; // Firstly determine an ideal parallelism for large enough input shape. const auto size_x_pow2 = next_pow2_floor(setup.size_x); const auto size_y_pow2 = next_pow2_floor(setup.size_y); const auto block_y = std::min( size_y_pow2, Size_t(NBLA_CUDA_REDUCE_NUM_THREADS / CUDA_WARP_SIZE)); const auto block_x = std::max(Size_t(CUDA_WARP_SIZE), Size_t(NBLA_CUDA_REDUCE_NUM_THREADS / block_y)); dim3 block_dim = dim3(block_x, block_y); dim3 grid_dim(1, NBLA_CEIL_SIZE_T_DIV(setup.size_y, block_dim.y)); // Try to keep sufficient parallelism for any input shape. const auto min_elements_per_thread = NBLA_CUDA_REDUCE_UNROLL_XY; auto elements_per_thread = setup.size_x / block_dim.x; // Assign more threads into each reduction. while (block_dim.x * NBLA_CUDA_REDUCE_UNROLL_XY < size_x_pow2 && block_dim.y > 1 && elements_per_thread > min_elements_per_thread) { block_dim.x *= 2; block_dim.y /= 2; grid_dim.y = NBLA_CEIL_SIZE_T_DIV(setup.size_y, block_dim.y); elements_per_thread /= 2; } // Assign more blocks into each reduction. if (block_dim.x < size_x_pow2 && block_dim.y == 1) { while (grid_dim.x < setup.min_blocks && block_dim.x * grid_dim.x * NBLA_CUDA_REDUCE_UNROLL_XY < size_x_pow2 && elements_per_thread > min_elements_per_thread) { grid_dim.x *= 2; elements_per_thread /= 2; } } // Determine the grid size for grid-strided loop. grid_dim.x = get_strided_grid(grid_dim.x); grid_dim.y = get_strided_grid(grid_dim.y); // Prepare the temporary buffers for inter-block reduce NdArray buf_arr; NdArray block_counter_arr; op.buf = nullptr; int *block_counter = nullptr; if (grid_dim.x > 1) { NBLA_CHECK(block_dim.x == NBLA_CUDA_REDUCE_NUM_THREADS && block_dim.y == 1, error_code::value, "Block division failed in reduction.", "Please report this error to the developer team."); buf_arr.reshape(Shape_t{block_dim.y * grid_dim.y, grid_dim.x * static_cast<Size_t>(sizeof(StorageT))}, true); op.buf = buf_arr.cast(get_dtype<char>(), ctx, true) ->template pointer<StorageT>(); block_counter_arr.reshape(Shape_t{setup.size_y}, true); block_counter_arr.zero(); block_counter = block_counter_arr.cast(get_dtype<int>(), ctx)->template pointer<int>(); } // Determine the shared memory size for block reduce. int smem_size = 0; if (block_dim.x > 1) { smem_size = NBLA_CUDA_REDUCE_NUM_THREADS * sizeof(StorageT); } // Utility to calculate the indices fast. const IndexConverter<IndexT> inner_idx_conv(setup.strides_x, setup.strides_x_input); const IndexConverter<IndexT> outer_idx_conv(setup.strides_y, setup.strides_y_input); // Reduction kernel launch const bool only_x = (setup.ndim_x == 1 && setup.ndim_y <= 1); auto kernel = kernel_reduce_xy<Tcu, Op>; if (only_x) { // Call the faster implementation. if (sizeof(Tcu) == 4) { // A float4 stores the 4 elements of 32-bit type (float). kernel = kernel_reduce_xy<float4, Op>; } else if (sizeof(Tcu) == 2) { // A float2 stores the 4 elements of 16-bit type (half). kernel = kernel_reduce_xy<float2, Op>; } else { NBLA_ERROR(error_code::type, "The size of types for reduction must be 2 or 4 bytes. " "Please report this error to the developer team."); } } kernel<<<grid_dim, block_dim, smem_size>>>(op, block_counter, setup.size_x, setup.size_y, inner_idx_conv, outer_idx_conv); NBLA_CUDA_KERNEL_CHECK(); } template <class Op> void fast_reduce_y(const Context &ctx, Op &op, const ReduceSetup &setup) { // Some procedures can be moved to ReduceSetup::operator() if implementing the // mechanism to define Op::IndexT when Function::setup. using Tcu = typename Op::Tcu; using StorageT = typename Op::StorageT; using IndexT = typename Op::IndexT; // Firstly determine an ideal parallelism for large enough input shape. dim3 block_dim(NBLA_CUDA_REDUCE_NUM_THREADS, 1); dim3 grid_dim(NBLA_CEIL_SIZE_T_DIV(setup.size_x, block_dim.x), 1); // Try to keep sufficient parallelism for any input shape. const auto size_y_pow2 = next_pow2_floor(setup.size_y); const auto min_elements_per_thread = NBLA_CUDA_REDUCE_UNROLL_Y; auto elements_per_thread = setup.size_y / block_dim.y; // Assign more threads into each reduction. while (block_dim.x > CUDA_WARP_SIZE && block_dim.y * NBLA_CUDA_REDUCE_UNROLL_Y < size_y_pow2 && elements_per_thread > min_elements_per_thread) { block_dim.x /= 2; block_dim.y *= 2; grid_dim.x = NBLA_CEIL_SIZE_T_DIV(setup.size_x, block_dim.x); elements_per_thread /= 2; } // Assign more blocks into each reduction. if (block_dim.x == CUDA_WARP_SIZE && block_dim.y < size_y_pow2) { while (grid_dim.x * grid_dim.y < setup.min_blocks && block_dim.y * grid_dim.y * NBLA_CUDA_REDUCE_UNROLL_Y < size_y_pow2 && elements_per_thread > min_elements_per_thread) { grid_dim.y *= 2; elements_per_thread /= 2; } } // Determine the grid size for grid-strided loop. grid_dim.x = get_strided_grid(grid_dim.x); grid_dim.y = get_strided_grid(grid_dim.y); // Prepare the temporary buffers for inter-block reduce NdArray buf_arr; NdArray block_counter_arr; op.buf = nullptr; int *block_counter = nullptr; if (grid_dim.y > 1) { buf_arr.reshape(Shape_t{block_dim.x * grid_dim.x, grid_dim.y * static_cast<Size_t>(sizeof(StorageT))}, true); op.buf = buf_arr.cast(get_dtype<char>(), ctx, true) ->template pointer<StorageT>(); block_counter_arr.reshape(Shape_t{setup.size_x}, true); block_counter_arr.zero(); block_counter = block_counter_arr.cast(get_dtype<int>(), ctx)->template pointer<int>(); } // Determine the shared memory size for block reduce. int smem_size = 0; if (block_dim.y > 1) { smem_size = NBLA_CUDA_REDUCE_NUM_THREADS * sizeof(StorageT); } // Utility to calculate the indices fast. const IndexConverter<IndexT> inner_idx_conv(setup.strides_y, setup.strides_y_input); const IndexConverter<IndexT> outer_idx_conv(setup.strides_x, setup.strides_x_input); // Reduction kernel launch kernel_reduce_y<<<grid_dim, block_dim, smem_size>>>( op, block_counter, setup.size_y, setup.size_x, inner_idx_conv, outer_idx_conv); NBLA_CUDA_KERNEL_CHECK(); } template <class Op> void fast_reduce(const Context &ctx, Op op, const ReduceSetup &setup) { if (setup.copy_only) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE_SIZE_T(kernel_copy, setup.size_input, op); return; } if (setup.reduce_x) { fast_reduce_xy(ctx, op, setup); } else { fast_reduce_y(ctx, op, setup); } } } #endif
the_stack
#pragma once #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/WarpBitonicSort.cuh" #include "cuda/WarpReductions.cuh" #include <assert.h> #include <boost/preprocessor/repetition/repeat.hpp> #include <cuda.h> #include <device_functions.h> #include <math_constants.h> #include <stdio.h> /** @file CUDA device code routines for finding the top-Kth float element in a set in O(N) time using radix selection. Uses no scratch space and does not modify inputs. Right now only contains versions to do this work in a kernel utilizing only the threads in a warp in a almost entirely warp-coherent manner. An entire warp must be given. */ namespace facebook { namespace cuda { namespace detail { /// Initialize an array to a value template <int N, typename T> __device__ __forceinline__ void setArray(T arr[N], T val) { for (int i = 0; i < N; ++i) { arr[i] = val; } } /// In order to force register usage of the bucket count array, we have to /// unroll the increment selection. Otherwise, local memory is used for /// counts[] which severely degrades performance. __device__ __forceinline__ void incrementArray(int val, int counts[16]) { #define BUCKET_CASE(UNUSED1, I, UNUSED2) \ case I: \ counts[I]++; \ break; switch (val) { BOOST_PP_REPEAT(16, BUCKET_CASE, 0); default: break; } #undef BUCKET_CASE } /** We use a most significant to least significant radix selection on the float values, which requires at most sizeof(float) * 2 scans through the array, one for each nybble. In order to use radix selection, we use the property that for positive floating-point values f1 and f2: ~~~ f1 > f2 <=> *(int*)&f1 > *(int*)&f2. ~~~ Something similar is true for negative floating point values f1 and f2 after zero-ing the leading sign bit, and except that the order is reversed: ~~~ f1 > f2 <=> (*(int*)f1 & 0x7fffffff) < (*(int*)f2 & 0x7fffffff). ~~~ This is true even for +/-inf and for denormalized floats. Negative zero is a special case. Selection by radix will give us that -0.0f < +0.0f, which is not true for IEEE fp comparison. We handle this special case when we return the answer seen, not in comparing values here. +NaNs will lead all positive values, and -NaNs will be minimal values (non-canonical NaNs, if they exist, will be sorted according to this). The focal point of the radix selection algorithm is the use of countNybbles and the CHECK_NYBBLE macro. The idea is that we starting out, we don't know where the Kth highest element lives, so we have to consider *every* float in the input. We look at the most significant nybble, and each thread counts into 16 buckets the number of floats in its subset of data with that leading nybble. This is done by countNybbles. countNybble takes as arguments `desired` and `desiredMask`. It only looks at values `v` such that (v & desiredMask) == desired. By default, both are 0, so it will look at every float. nybbleCheckPos is the current nybble that is beinig bucketed. It starts at 28, meaning we're first looking at the most significant nybble. countNybbles will add a count of nybble distribution to 16 buckets. One iteration through, by counting the distribution of the leading nybble in each float, we figure out what leading nybble the Kth highest float must have. As an example, let's say that K is 10. By counting the distribution of leading nybbles in the inputs, say we get: ~~~ 0x2: 1 0x5: 2 0x6: 2 0x7: 11 ~~~ In this case, none of the floats are negative (otherwise, they would have leading nybble 0x8 -> 0xf). Since we're looking for the 10th highest float, that cannot have leading nybble 0x2, 0x5 or 0x6 since those counts are less than 10. We walk through the buckets in order, and we warp reduce the counts across all threads to one count when it comes time to look in a particular bucket. Thus, the 10th highest float must have leading nybble 0x7. The problem then becomes, for the next iteration, finding the (10 - (1+2+2)) = 5th highest float with leading nybble 0x7. Since the count for the nybble 0x7 is not 1, we don't know the actual answer yet, and we have to continue. Next iteration through, we no longer have to count every float, just those with leading nybble 0x7 (i.e., floats interpreted as a bit pattern v such that (v & desiredMask) == desired. Even though we have to physically scan the entire input, we are only counting a subset of it. So: - desired changes from 0 -> 0x70000000, and - desiredMask changes from 0 -> 0xf0000000. We continue, and count up the floats with leading nybble 0x7, getting counts: ~~~ 0x(7)1: 1 0x(7)9: 2 0x(7)b: 1 0x(7)c: 1 0x(7)f: 6 ~~~ This means that the count of all floats with the prefix 0x7fyyyyyy is 6. We're only bucketing counts by the second nybble now. Scanning through, the 5th highest float with prefix 0x7yyyyyyy must have prefix 0x7cyyyyyy, since from lowest to highest above, we reach 5 in bucket 0xc. Thus, the 10th highest float in the entire set is the unique float with prefix 0x7cyyyyyy. This is unique because the count for this bucket is 1. If we get through all nybbles to the least significant nybble and still have a count > 1, then that means that the Kth highest element is not unique. For example, in the set 2 2 3 3 3, the 2nd highest element is 3, which is duplicated 3 times. Since the MSN contains the sign bit, we have to first look at buckets 0-7 to see if the Kth highest float is positive. If so, then we continue looking only at positive floats. If not, then we continue looking only at negative floats, but in reverse order. Eventually we find a unique Kth highest element if the count is 1 in our bucket, or we end at the LSN with a duplicate count, in which case the Kth highest element is not unique. Performs a histogram count of the nybbles that occur at the bit position `nybbleCheckPos`, but only for those ints that match (x & `desiredMask`) == `desired`. In other words, if bits [31, `nybbleCheckPos` + 4] match those in `desired`, then return the contents of bits [`nybbleCheckPos` + 3, `nybbleCheckPos`]. */ template <typename T, int N, int ILP> __device__ __forceinline__ void countNybbles(int counts[N], unsigned desired, unsigned desiredMask, int nybbleCheckPos, const DeviceTensor<T, 1>& data) { // Clear out counts from a previous round setArray<N>(counts, 0); // Treat floats as unsigned ints, since we're counting raw nybble // values unsigned vals[ILP]; setArray<N>(vals, 0U); // Handle ILP portion int index = getLaneId(); // Distribute index loop among threads and unroll by ILP, each thread // operates on interleaved indices getLaneId() + i * WARP_SIZE. if (ILP > 1) { for ( ; index + (ILP - 1) * WARP_SIZE < data.getSize(0); index += WARP_SIZE * ILP) { for (int i = 0; i < ILP; ++i) { vals[i] = data[index + i * WARP_SIZE].template ldgAs<unsigned>(); } for (int i = 0; i < ILP; ++i) { const unsigned val = vals[i]; // We only consider values that match the bits we're looking // for in `desired`, since we've already ruled out other values if ((val & desiredMask) == desired) { // Add to our count of nybbles seen const unsigned nybble = getBitfield(val, nybbleCheckPos, 4); // Add to our counts (unrolled to force use of registers for // `counts`. incrementArray(nybble, counts); } } } } // Handle remainder for ( ; index < data.getSize(0); index += WARP_SIZE) { const unsigned val = data[index].template ldgAs<unsigned>(); // We only consider values that match the bits we're looking // for in `desired`, since we've already ruled out other values if ((val & desiredMask) == desired) { // Add to our count of nybbles seen const unsigned nybble = getBitfield(val, nybbleCheckPos, 4); // Add to our counts (unrolled to force use of registers for // `counts`. incrementArray(nybble, counts); } } } /** A warp coherent implementation that finds a value in the data such that the floats, treated as uints 'v' match the bit pattern such that (v & desiredMask) == desired. If the answer found is -0.0f, because -0.0f == +0.0f, it is possible that there are multiple +0.0f results that we've ignored in comparing by radix, since radix-wise +0.0f > -0.0f which is not true for IEEE fp. Thus, if the answer found is -0.0f, then we have to include the count of all +0.0fs present in the duplicate count, in order to treat the comparison the same way that normal sorting mechanisms will treat it. If the found result is not -0.0f, returns the value found and `dupCount` as the pair's value. If the found result is -0.0f, returns the value found and `dupCount` plus the number of +0.0f in the data as the pair's value. The value need not be unique, but the warp as a whole will return the highest value seen across the warp. */ __device__ __forceinline__ Pair<float, int> findAnswer(const DeviceTensor<float, 1>& data, unsigned desired, unsigned desiredMask, int dupCount) { // Each thread will scan for values with the desired prefix, and // then we gather the value (if any) across all threads in the warp // using a max() reduction between the values and -inf. If there is // an answer, it should be greater than -inf (unless it is -inf), // and the one reduced result should be the solution. float found = -CUDART_INF_F; // TODO: ILP? for (int index = getLaneId(); index < data.getSize(0); index += WARP_SIZE) { float val = data[index].ldg(); if (((unsigned) __float_as_int(val) & desiredMask) == desired) { found = val; } } const float max = warpReduceMax(found); if (__float_as_int(max) == __float_as_int(-0.0f)) { // Special case negative zero, in order to handle the +0.0f == // -0.0f property int posZeroCount = 0; for (int index = getLaneId(); index < data.getSize(0); index += WARP_SIZE) { if (__float_as_int(data[index].ldg()) == __float_as_int(0.0f)) { ++posZeroCount; } } posZeroCount = warpReduceSum(posZeroCount); return Pair<float, int>(max, posZeroCount + dupCount); } return Pair<float, int>(max, dupCount); } /** Finds the Kth highest floating point value in a linear array [arr, end) without modifying the data and without temporary storage except for registers. K starts at 1. All threads in the warp will return the value. Handles all floats except NaNs. This function minimizes warp divergence. Implementation for small arrays such that the `(end - start) <= warpSize`. */ __device__ Pair<float, int> warpFindTopKthElementSmall32(const DeviceTensor<float, 1>& data, int k) { // The array should fit within the warp size. assert(data.getSize(0) <= WARP_SIZE); // There should be enough values to return the k-th highest. assert(k > 0 && k <= data.getSize(0)); const int lane = getLaneId(); // For threads in the warp that have no element in the array, give // them -inf, so they'll sort to the end. float val = (lane < data.getSize(0)) ? data[lane] : -CUDART_INF_F; // Warp coherent sort! Handle negative zero, as does the radix code val = warpBitonicSort<float, GreaterThan<float> >(val); // Lane k - 1 now contains the kth highest element; broadcast it to // all threads in the warp const float topK = __shfl(val, k - 1); // Also return the number of lanes <= k - 1 that have this same topK // value; this is the number of duplicates present. const int numSeen = warpReduceSum((int) ((topK == val) && getLaneId() < k)); return Pair<float, int>(topK, numSeen); } /** Finds the Kth highest floating point value in a linear array [arr, end) without modifying the data and without temporary storage except for registers. - K starts at 1. - All threads in the warp will return the value. - Handles all floats except NaNs. - Negative zero is specialized by findAnswer. - This function minimizes warp divergence. Implementation for large arrays such that there are more elements than warp threads. */ __device__ Pair<float, int> warpFindTopKthElementLarge(const DeviceTensor<float, 1>& data, int k) { // There should be enough values to return the k-th highest. assert(k > 0 && k <= data.getSize(0)); // kNybbles is the number of possible values of a nybble (2^4). #define kNybbles 16 // TODO: C++11 constexpr int nybbleCounts[kNybbles]; // We are currently evaluating the nybble in this position (e.g., // the nybble we're scanning is the one in bits // [`nybbleCheckPos` + 3, `nybbleCheckPos`]. Initially, we look at // the most significant nybble (28). int nybbleCheckPos = 28; // We only consider elements x such that (x & desiredMask) == desired // Initially, we consider all elements of the array, so the above // statement is true regardless of input. unsigned desired = 0; unsigned desiredMask = 0; // Accumulate leading nybble counts // TODO: select ILP value before starting detail::countNybbles<float, kNybbles, 1>( nybbleCounts, desired, desiredMask, nybbleCheckPos, data); // We are looking for the top kToFind-th element when iterating over // nybbles; this count gets reduced by elimination when counting // successive nybbles int kToFind = k; // For each nybble we're evaluating, in whatever order we're // evaluating them, we have to do the same work #define CHECK_NYBBLE() \ { \ /* Only reduce the bucket sum if we need to, when we need to */ \ const int count = warpReduceSum(nybbleCounts[i]); \ \ if (count == 1 && kToFind == 1) { \ /* There is a unique answer to the top-Kth element */ \ /* The unique answer contains the desired nybble at */ \ /* bit positions [`nybbleCheckPos1 + 3, `nybbleCheckPos`] */ \ return detail::findAnswer( \ data, \ setBitfield(desired, i, nybbleCheckPos, 4), \ setBitfield(desiredMask, 0xf, nybbleCheckPos, 4), \ 1); \ } \ \ if (count >= kToFind) { \ /* The top-Kth element must contain this nybble. */ \ /* Add it to the prefix we're looking for, and continue on */ \ /* the next nybble at `nybbleCheckPos` - 4 */ \ desired = setBitfield(desired, i, nybbleCheckPos, 4); \ desiredMask = setBitfield(desiredMask, 0xf, nybbleCheckPos, 4); \ nybbleCheckPos -= 4; \ break; \ } \ \ kToFind -= count; \ } // Figure out what leading nybble the k-th largest float should // have, and by extension, whether it is positive or negative. // Scan the MSN first, positive floats only (the sign bit is the // leading bit). for (int i = (kNybbles / 2) - 1; i >= 0; --i) { CHECK_NYBBLE(); } bool kthLargestIsPositive = true; if (desiredMask == 0) { // The k-th largest float is negative. kthLargestIsPositive = false; // What leading nybble does it have? for (int i = kNybbles / 2; i < kNybbles; ++i) { CHECK_NYBBLE(); } } for ( ; ; ) { // Now, we only consider floats with (f & mask) == desired. // TODO: select ILP value before starting detail::countNybbles<float, kNybbles, 1>( nybbleCounts, desired, desiredMask, nybbleCheckPos, data); if (kthLargestIsPositive) { // Iterate in greatest -> least order (we want larger positive values) for (int i = kNybbles - 1; i >= 0; --i) { CHECK_NYBBLE(); } } else { // Iterate in least -> greatest order (we want smaller negative // values) for (int i = 0; i < kNybbles; ++i) { CHECK_NYBBLE(); } } if (nybbleCheckPos < 0) { // We have scanned all nybbles, and haven't found a unique // result. Therefore, there is a non-unique result that matches // the bit pattern 'desired' entirely; return it. return detail::findAnswer(data, desired, ~0U, kToFind); } } #undef CHECK_NYBBLE } } // detail /** Finds the Kth highest floating point value in a linear array [arr, end) without modifying the data and without temporary storage except for registers. - K starts at 1. - All threads in the warp will return the value. - Handles all floats except NaNs. - This function minimizes warp divergence. Returns the number of times the top-Kth element uniquely occurs along with its value. */ __device__ Pair<float, int> warpFindTopKthElement(const DeviceTensor<float, 1>& data, int k) { if (data.getSize(0) <= WARP_SIZE) { // We can do this with a single warp coherent sort return detail::warpFindTopKthElementSmall32(data, k); } else { return detail::warpFindTopKthElementLarge(data, k); } } } } // namespace
the_stack
#include "common/common_texture_utils.h" #include "common/safe_call_utils.hpp" #include "pcg_solver/BlockPCG.h" #include "pcg_solver/BinBlockCSR.h" #include "pcg_solver/solver_configs.h" #include "math/DenseGaussian.h" #include <device_launch_parameters.h> #include <iostream> namespace surfelwarp { namespace device { /** * \brief lhs <- diag_blks rhs. Parallel over blocks */ template<int BlockDim> __global__ void blockDenseMVKernel( const float* diag_blks, const float* rhs, const unsigned num_blks, float* lhs ) { //The block for this element const auto blk_idx = threadIdx.x + blockDim.x * blockIdx.x; if(blk_idx >= num_blks) return; //Load the rhs element float r_blk[BlockDim]; #pragma unroll for (auto j = 0; j < BlockDim; j++) { r_blk[j] = rhs[BlockDim * blk_idx + j]; } //Compute the matrix vector product float l_blk[BlockDim]; for (auto j = 0; j < BlockDim; j++) { //The current row index const auto row_idx = blk_idx * BlockDim + j; //Compute the matrix-vector product float l_row = 0.0f; for (auto i = 0; i < BlockDim; i++) { const auto mat_value = diag_blks[BlockDim * row_idx + i]; l_row += mat_value * r_blk[i]; } //Store the value locally l_blk[j] = l_row; } //Store the elements #pragma unroll for (auto j = 0; j < BlockDim; j++) { lhs[BlockDim * blk_idx + j] = l_blk[j]; } } /** * \brief r <- b; x <- 0; d <- inv_diag r. This method parallelize over blocks * \tparam BlockDim */ template<int BlockDim> __global__ void blockPCGZeroInitKernel( const DeviceArrayView<float> b, const float* inv_diag_blks, float* r, float* d, float* x ) { //Obtain the index for blocks const auto blk_idx = threadIdx.x + blockDim.x * blockIdx.x; const auto num_blks = b.Size() / BlockDim; if (blk_idx >= num_blks) return; //Load the b element float b_blk[BlockDim]; #pragma unroll for(auto j = 0; j < BlockDim; j++) { b_blk[j] = b[BlockDim * blk_idx + j]; } //For each row in this block float d_local[BlockDim]; for(auto j = 0; j < BlockDim; j++) { //The current row index const auto row_idx = blk_idx * BlockDim + j; //Compute the matrix-vector product float d_row = 0.0f; for(auto i = 0; i < BlockDim; i++) { const auto mat_value = inv_diag_blks[BlockDim * row_idx + i]; d_row += mat_value * b_blk[i]; } //Store the value d_local[j] = d_row; } //Store the element for(auto j = 0; j < BlockDim; j++) { const auto row_idx = blk_idx * BlockDim + j; r[row_idx] = b_blk[j]; d[row_idx] = d_local[j]; x[row_idx] = 0.0f; } } /** * \brief The second kernel in Weber et al 2013 * alpha <- (*delta_new) / (*dot_dq); * x <- x + alpha d; t <- r - alpha q * s <- inv_diag t; delta_old = delta_new */ template<int BlockDim> __global__ void blockPCGSecondUpdateKernel( const DeviceArrayView<float> d, const float* q, const float* r, const float* inv_diag_blks, const float* delta_new, const float* dot_dq, float* x, float* s, float* t, float* delta_old ) { //Obtain the index const auto row_idx = threadIdx.x + blockDim.x * blockIdx.x; const auto blk_idx = row_idx / BlockDim; const float alpha = (*delta_new) / (*dot_dq); if(row_idx >= d.Size()) return; //Load the r block, perform r <- r - alpha q float t_blk[BlockDim]; for(auto i = 0; i < BlockDim; i++){ t_blk[i] = r[blk_idx * BlockDim + i] - alpha * q[blk_idx * BlockDim + i]; } //Perform s <- inv_diag * r float s_row = 0.0f; for(auto j = 0; j < BlockDim; j++){ const auto mat_value = inv_diag_blks[BlockDim * row_idx + j]; s_row += mat_value * t_blk[j]; } // x <- x + alpha d; store the value x[row_idx] += alpha * d[row_idx]; t[row_idx] = t_blk[(row_idx % BlockDim)]; s[row_idx] = s_row; //delta_old_ = delta_new_ if (row_idx == 0) { *delta_old = (*delta_new); } } /** * \brief beta <- delta_new / delta_old; d <- s + beta d */ template<int BlockDim> __global__ void blockPCGThirdUpdateKernel( const DeviceArrayView<float> r, const float* delta_old, const float* delta_new, float* d ) { //Check the size const auto row_idx = threadIdx.x + blockDim.x * blockIdx.x; if(row_idx >= r.Size()) return; //Compute the delta const float beta = (*delta_new) / (*delta_old); d[row_idx] = r[row_idx] + beta * d[row_idx]; } } /* End of namespace device */ }; /* End of namespace surfelwarp */ template<int BlockDim> surfelwarp::BlockPCG<BlockDim>::BlockPCG(size_t max_matrix_size, cudaStream_t stream) { //Allocate the buffer m_max_matrix_size = 0; allocateBuffer(max_matrix_size); m_spmv_handler = nullptr; //Initialize the cuda and cublas resource m_stream = stream; cublasSafeCall(cublasCreate(&m_cublas_handle)); cublasSafeCall(cublasSetPointerMode(m_cublas_handle, CUBLAS_POINTER_MODE_DEVICE)); cublasSafeCall(cublasSetStream(m_cublas_handle, m_stream)); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::UpdateCudaStream(cudaStream_t stream) { m_stream = stream; cublasSafeCall(cublasSetStream(m_cublas_handle, m_stream)); } template<int BlockDim> surfelwarp::BlockPCG<BlockDim>::~BlockPCG() { releaseBuffer(); cublasDestroy(m_cublas_handle); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::allocateBuffer(size_t max_matrix_size) { //Rectify the matrix_size to the multiple of BlockSize const auto max_blk_num = divUp(max_matrix_size, BlockDim); const auto rectify_matrix_size = max_blk_num * BlockDim; //Do not need allocate again if(m_max_matrix_size >= rectify_matrix_size) return; //Release the buffer first if already allocated if(m_max_matrix_size > 0) releaseBuffer(); //Initialize the size m_max_matrix_size = rectify_matrix_size; //Allocate the buffer r_.AllocateBuffer(rectify_matrix_size); t_.AllocateBuffer(rectify_matrix_size); d_.AllocateBuffer(rectify_matrix_size); q_.AllocateBuffer(rectify_matrix_size); s_.AllocateBuffer(rectify_matrix_size); //Explicit malloc on device: need to release them cudaSafeCall(cudaMalloc((void**)(&delta_old_), sizeof(float))); cudaSafeCall(cudaMalloc((void**)(&delta_new_), sizeof(float))); cudaSafeCall(cudaMalloc((void**)(&dot_dq_), sizeof(float))); //Allocate the page-locked memory for converge checking cudaSafeCall(cudaMallocHost((void**)(&delta_0_pagelock_), sizeof(float))); cudaSafeCall(cudaMallocHost((void**)(&delta_pagelock_), sizeof(float))); //Create the texture for sparse mv d_texture_ = create1DLinearTexture(d_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::releaseBuffer() { //Zero the size of this solver m_max_matrix_size = 0; //Release the buffer maintained by device array r_.ReleaseBuffer(); t_.ReleaseBuffer(); d_.ReleaseBuffer(); q_.ReleaseBuffer(); s_.ReleaseBuffer(); //Release the explicit malloced buffer cudaSafeCall(cudaFree(delta_old_)); cudaSafeCall(cudaFree(delta_new_)); cudaSafeCall(cudaFree(dot_dq_)); cudaSafeCall(cudaFreeHost(delta_pagelock_)); cudaSafeCall(cudaFreeHost(delta_0_pagelock_)); //Destroy the texture cudaDestroyTextureObject(d_texture_); } template<int BlockDim> bool surfelwarp::BlockPCG<BlockDim>::SetSolverInput( DeviceArrayView<float> inv_diag_blks, typename ApplySpMVBase<BlockDim>::Ptr spmv_handler, DeviceArrayView<float> b, DeviceArraySlice<float> x_init, size_t actual_size ) { //Determine the size of the matrix if(actual_size == 0) actual_size = b.Size(); //Check the size of allcoated buffer if(m_max_matrix_size < actual_size) return false; //This size can be solved given current buffer m_actual_matrix_size = actual_size; //Simple sanity check const auto num_blks = divUp(m_actual_matrix_size, BlockDim); SURFELWARP_CHECK(actual_size % BlockDim == 0); SURFELWARP_CHECK_EQ(inv_diag_blks.Size(), BlockDim * BlockDim * num_blks); SURFELWARP_CHECK_EQ(b.Size(), BlockDim * num_blks); SURFELWARP_CHECK_EQ(x_init.Size(), BlockDim * num_blks); //Seems correct m_inv_diagonal_blks = inv_diag_blks; r_.ResizeArrayOrException(actual_size); t_.ResizeArrayOrException(actual_size); d_.ResizeArrayOrException(actual_size); q_.ResizeArrayOrException(actual_size); s_.ResizeArrayOrException(actual_size); m_spmv_handler = spmv_handler; b_ = b; x_ = x_init; return true; } template<int BlockDim> surfelwarp::DeviceArrayView<float> surfelwarp::BlockPCG<BlockDim>::Solve(size_t max_iterations, bool zero_init) { return SolveNoConvergeCheck(max_iterations, zero_init); } template<int BlockDim> surfelwarp::DeviceArrayView<float> surfelwarp::BlockPCG<BlockDim>::SolveNoConvergeCheck(size_t max_iteration, bool zero_init) { //Do initialize if(zero_init) { ZeroIntialize(); } else { Initialize(x_.ArrayView()); } //The main loop for(auto i = 0; i < max_iteration; i++) { PerformSparseMV(); //TexturedSparseMV(); PerformSecondUpdate(); PerformThirdUpdate(); } return x_.ArrayView(); } template<int BlockDim> surfelwarp::DeviceArrayView<float> surfelwarp::BlockPCG<BlockDim>::SolveConvergeChecked( size_t max_iteration, bool zero_init, float epsilon ) { //Same buffer, for different purpose DeviceArray<float> x_init = DeviceArray<float>(x_.RawPtr(), m_actual_matrix_size); //Do initialize if(zero_init) { ZeroIntialize(); } else { Initialize(x_.ArrayView()); } //Download delta_0 for convergence check //float delta_0 = 0.0f, delta = 0.0f; const float eps_square = epsilon * epsilon; cudaSafeCall(cudaMemcpyAsync(delta_0_pagelock_, delta_new_, sizeof(float), cudaMemcpyDeviceToHost, m_stream)); //The main loop for(auto i = 0; i < max_iteration; i++) { PerformSparseMV(); //TexturedSparseMV(); PerformSecondUpdate(); PerformThirdUpdate(); //Check converge cudaSafeCall(cudaMemcpyAsync(delta_pagelock_, delta_new_, sizeof(float), cudaMemcpyDeviceToHost, m_stream)); cudaSafeCall(cudaStreamSynchronize(m_stream)); if(std::abs(*delta_pagelock_) < eps_square * std::abs(*delta_0_pagelock_)) { break; } } return x_.ArrayView(); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::Initialize(const DeviceArrayView<float>& x_init) { //Use handler for residual initialization m_spmv_handler->InitResidual(x_init, b_, r_.ArraySlice(), m_stream); //Use the pre-conditioner to intialize d const auto num_blocks = m_actual_matrix_size / BlockDim; dim3 init_blk(64); dim3 init_grid(divUp(num_blocks, init_blk.x)); device::blockDenseMVKernel<BlockDim><<<init_grid, init_blk, 0, m_stream>>>(m_inv_diagonal_blks, r_, num_blocks, d_); //Perform dot-product using cublas cublasSdot(m_cublas_handle, r_.ArraySize(), r_.Ptr(), 1, d_.Ptr(), 1, delta_new_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::ZeroIntialize() { const auto num_blocks = m_actual_matrix_size / BlockDim; dim3 init_blk(64); dim3 init_grid(divUp(num_blocks, init_blk.x)); device::blockPCGZeroInitKernel<BlockDim><<<init_grid, init_blk, 0, m_stream>>>(b_, m_inv_diagonal_blks, r_, d_, x_); //Perform dot-product using cublas cublasSdot(m_cublas_handle, r_.ArraySize(), r_.Ptr(), 1, d_.Ptr(), 1, delta_new_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::PerformSparseMV() { //Using the handler m_spmv_handler->ApplySpMV(d_.ArrayView(), q_.ArraySlice(), m_stream); //dot_dq <- dot(d, q) cublasSdot(m_cublas_handle, d_.ArraySize(), q_.Ptr(), 1, d_.Ptr(), 1, dot_dq_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::TexturedSparseMV() { //Use the handler m_spmv_handler->ApplySpMVTextured(d_texture_, q_.ArraySlice(), m_stream); //dot_dq <- dot(d, q) cublasSdot(m_cublas_handle, d_.ArraySize(), q_.Ptr(), 1, d_.Ptr(), 1, dot_dq_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::PerformSecondUpdate() { dim3 update_blk(128); dim3 update_grid(divUp(m_actual_matrix_size, update_blk.x)); device::blockPCGSecondUpdateKernel<BlockDim><<<update_grid, update_blk, 0, m_stream>>>( d_.ArrayView(), q_, r_, m_inv_diagonal_blks, delta_new_, dot_dq_, x_, s_, t_, delta_old_ ); //delta_new <- dot(r, s) cublasSdot(m_cublas_handle, t_.ArraySize(), t_.Ptr(), 1, s_.Ptr(), 1, delta_new_); } template<int BlockDim> void surfelwarp::BlockPCG<BlockDim>::PerformThirdUpdate() { dim3 update_blk(128); dim3 update_grid(divUp(m_actual_matrix_size, update_blk.x)); device::blockPCGThirdUpdateKernel<BlockDim><<<update_grid, update_blk, 0, m_stream>>>( s_.ArrayView(), delta_old_, delta_new_, d_ ); r_.swap(t_); }
the_stack
#include <map> #include <vector> #include <cublas_v2.h> #include <cuda.h> #include <curand.h> #include <time.h> #include <curand_kernel.h> #include <helper_cuda.h> #include "../../util/include/matrix.h" #include "nvmatrix_kernels.cuh" #include "nvmatrix_operators.cuh" #include "memory.cuh" #ifdef WARNINGS #define WARN(msg) printf("WARN: File %s, line %d: %s\n", __FILE__, __LINE__, msg); #else #define WARN(msg) ; #endif #define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \ printf("CURAND Error at %s:%d\n",__FILE__,__LINE__);\ exit(EXIT_FAILURE);}} while(0) #define CUBLAS_CALL(x) do { if((x) != CUBLAS_STATUS_SUCCESS) { \ printf("CUBLAS Error at %s:%d\n",__FILE__,__LINE__);\ exit(EXIT_FAILURE);}} while(0) /* * Memory manager to use for GPU memory allocations. * * CUDAMemoryManager: Default Nvidia memory manager; just calls cudaMalloc / cudaFree. * Allocating and freeing memory is slow. * FastMemoryManager: A GPU memory manager with very fast (constant time) * alloc / free, but possibly more wasteful of memory. */ #define DEVICE_MEMORY_MANAGER CUDAMemoryManager /* * Memory manager to use for host memory allocations. * * CUDAHostMemoryManager: Default Nvidia memory manager; just calls cudaHostAlloc / cudaFreeHost. * Allocating and freeing memory is slow. * FastHostMemoryManager: A host memory manager with very fast (constant time) * alloc / free, but possibly more wasteful of memory. */ #define HOST_MEMORY_MANAGER CUDAHostMemoryManager class NVMatrix; typedef std::vector<NVMatrix*> NVMatrixV; class NVMatrix { protected: int _numCols, _numRows; int _numElements; int _stride; // float* getDevData(); MemorySegment* _memSegment; bool _isTrans; bool _ownsData; // This flag makes sure that the NVMatrix destructor does nothing // when called on HostNVMatrix instance. bool _deleted; cudaTextureObject_t _texObj; // static std::map<int,curandGenerator_t> rndGen; static std::map<int,MemorySegment*> _rndDevStates; static std::map<int,cublasHandle_t> _cublasHandles; // Map from device id --> # of random streams initialized on that device static std::map<int,int> _rndDevThreads; static pthread_mutex_t *_rndMutex, *_cublasMutex, *_streamMutex; // Map from device id --> default stream static std::map<int,cudaStream_t> _defaultStreams; cublasOperation_t getTransChar() const { /* * not a typo! return opposite character because a * non-transposed nvmatrix is in row-major order while a non-transposed * cublas matrix is in column-major order. */ return _isTrans ? CUBLAS_OP_N : CUBLAS_OP_T; } void _init(bool isTrans); void _sum_setParams(int n, dim3* blocks, dim3* threads); template<class Agg> float cpuAgg(Agg agg, cudaStream_t stream); template<class Agg> float _totalAgg(Agg agg); template<class Agg> float _totalAgg(Agg agg, cudaStream_t stream); template<class Agg> float _totalAgg(Agg agg, NVMatrix& tmpbuf, cudaStream_t stream); template<class Agg, class UnaryOp, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop, cudaStream_t stream, NVMatrix* tmp); template<class Agg, class UnaryOp, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop, cudaStream_t stream); template<class Agg, class UnaryOp, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop); template<class Agg, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop, cudaStream_t stream); template<class Agg, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop); template<class Agg, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, BinaryOp bop, cudaStream_t stream); template<class Agg, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, BinaryOp bop); template<class Agg, class UnaryOp, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, UnaryOp, BinaryOp bop, cudaStream_t stream); template<class Agg, class UnaryOp, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, UnaryOp, BinaryOp bop); template<class Agg, class UnaryOp, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop, NVMatrix& tmp); template<class Agg, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop, cudaStream_t stream, NVMatrix& tmp); template<class Agg, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop, NVMatrix& tmp); template<class Agg, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, BinaryOp bop, cudaStream_t stream, NVMatrix& tmp); template<class Agg, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, BinaryOp bop, NVMatrix& tmp); template<class Agg, class UnaryOp, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, UnaryOp, BinaryOp bop, cudaStream_t stream, NVMatrix& tmp); template<class Agg, class UnaryOp, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, UnaryOp, BinaryOp bop, NVMatrix& tmp); template <class Randomizer> void _unaryRandomize(NVMatrix& target, Randomizer rnd, cudaStream_t stream); template <class Randomizer> void _unaryRandomize(NVMatrix& target, Randomizer rnd); template <class Randomizer> void _binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd); template <class Randomizer> void _binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd, cudaStream_t stream); virtual void alloc(int numElements); virtual void dealloc(); void deallocTexture(); virtual NVMatrix& construct() const; virtual NVMatrix& construct(bool isTrans) const; virtual NVMatrix& construct(int numRows, int numCols, bool isTrans=false) const; virtual NVMatrix& construct(const Matrix& like, bool copy) const; virtual NVMatrix& construct(const NVMatrix& like, bool copy) const; virtual NVMatrix& construct(const NVMatrix& like) const; virtual NVMatrix& construct(const Matrix& like) const; virtual NVMatrix& construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const; static cublasHandle_t getCublasHandle(); static cublasHandle_t getCublasHandle(int deviceID); public: NVMatrix(); NVMatrix(bool isTrans); NVMatrix(int numRows, int numCols, bool isTrans=false); NVMatrix(const Matrix& like, bool copy); NVMatrix(const NVMatrix& like, bool copy); NVMatrix(const NVMatrix& like); NVMatrix(const Matrix& like); NVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans); virtual ~NVMatrix(); // Returns the device ID on which the data pointer is allocated int getDataDeviceID() const; static void initRandom(unsigned long long seed, int numStreams, cudaStream_t stream); static void initRandom(unsigned long long seed, int numStreams); static void initRandom(unsigned long long seed); static void initRandom(); static void initCublas(); static void destroyCublas(); static std::pair<size_t, size_t> getCudaMemorySize(); // Returns the currently-active device ID for calling thread static int getDeviceID(); static void setDeviceID(int d); static bool canAccessPeer(int srcDevice, int tgtDevice); static bool isRndInitialized(); static bool isRndInitialized(bool haveLock); static curandState* getCurandState(); static curandState* getCurandState(int numStreams); static void destroyRandom(); static pthread_mutex_t* makeMutex(); static cudaStream_t getDefaultStream(int deviceID); static cudaStream_t getDefaultStream(); static void syncDevice(); static void syncStream(); static void syncStream(cudaStream_t stream); /* * DO NOT DEREFERENCE IN HOST CODE! This is a device memory pointer. */ float* getCellPtr(int i, int j) const { if (_isTrans) { return &getDevData()[j * _numRows + i]; } return &getDevData()[i * _numCols + j]; } bool isSameDims(const Matrix& m) const { return m.getNumRows() == _numRows && m.getNumCols() == _numCols; } bool isSameDims(const NVMatrix& m) const { return m.getNumRows() == _numRows && m.getNumCols() == _numCols; } int getNumRows() const { return _numRows; } int getNumCols() const { return _numCols; } int getStride() const { return _stride; } int getLeadingDim() const { return _isTrans ? _numRows : _numCols; } int getFollowingDim() const { return !_isTrans ? _numRows : _numCols; } /* * FALSE: Row-major order. * TRUE: Column-major order. */ bool isTrans() const { return _isTrans; } bool isView() const { return !_ownsData; } float* getDevData() const { return _memSegment == NULL ? NULL : _memSegment->getData<float>(); } MemorySegment& getMemorySegment() const { return *_memSegment; } int getNumElements() const { return _numElements; } size_t getNumDataBytes() const { return size_t(_numElements) * 4; } /* * Only use if you know what you're doing! * Does not actually transpose matrix. */ void setTrans(bool trans) { if (trans != _isTrans) { assert(isContiguous()); _isTrans = trans; _stride = getLeadingDim(); } } /* * Only use if you know what you're doing! * This toggles whether this object will free its GPU memory when it's destroyed. */ void setIsView(bool isView) { _ownsData = !isView; } bool isContiguous() const { return _stride == getLeadingDim() || getFollowingDim() == 1; } void truncate() { resize(0,0); } virtual cudaTextureObject_t getTextureObject(); virtual void copyFromHost(const Matrix& hostMatrix); virtual void copyFromHost(const Matrix& hostMatrix, bool resizeTarget); virtual void copyFromHost(const Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream); virtual void copyToHost(Matrix& hostMatrix) const; virtual void copyToHost(Matrix& hostMatrix, bool resizeTarget) const; virtual void copyToHost(Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) const; void copy(NVMatrix& dest) const; void copy(NVMatrix& dest, cudaStream_t stream) const; NVMatrix& copy() const; void addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB, cudaStream_t stream); void addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB); void addProduct(NVMatrix& a, NVMatrix &b); void rightMult(NVMatrix &b, float scaleAB, NVMatrix &target, cudaStream_t stream); void rightMult(NVMatrix &b, float scaleAB, NVMatrix &target); void rightMult(NVMatrix &b, NVMatrix &target); void rightMult(NVMatrix &b, float scaleAB); void randomizeUniform(); void addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target); void addGaussianNoise(float stdev, NVMatrix& target); void addGaussianNoise(NVMatrix& stdevs, bool var); void addGaussianNoise(NVMatrix& stdevs); void addGaussianNoise(float stdev); void addGaussianNoise(); void randomizeGaussian(); void randomizeGaussian(float stdev); void randomizeGaussian(float mean, float stdev); void randomizeGaussian(float mean, NVMatrix& stdevs); void randomizeGaussian(float mean, float stdevMult, NVMatrix& stdevs); void randomizeGaussian(NVMatrix& stdevs); void randomizeGaussian(NVMatrix& stdevs, NVMatrix& target); void binarizeProbs(); void binarizeProbs(NVMatrix& target); void biggerThan(NVMatrix& m, NVMatrix& target); void biggerThan(NVMatrix& m); void biggerThanVector(NVMatrix& vec, NVMatrix& target); void biggerThanVector(NVMatrix& vec); void equals(NVMatrix& m, NVMatrix& target); void equals(NVMatrix& m); void _checkBounds(int startRow, int endRow, int startCol, int endCol) const; NVMatrix& slice(int startRow, int endRow, int startCol, int endCol) const; void slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const; NVMatrix& sliceRows(int startRow, int endRow) const; void sliceRows(int startRow, int endRow, NVMatrix& target) const; NVMatrix& sliceCols(int startCol, int endCol) const; void sliceCols(int startCol, int endCol, NVMatrix& target) const; NVMatrixV& splitRows(int numParts); NVMatrixV& splitCols(int numParts); template <class Op> void apply(Op op, NVMatrix& target, cudaStream_t stream) { if (!target.isSameDims(*this)) { target.resize(*this); } if (getNumElements() > 0) { int height = target.getFollowingDim(), width = target.getLeadingDim(); if (target.isTrans() == isTrans()) { if (!isContiguous() || !target.isContiguous()) { dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); kEltwiseUnaryOp<Op><<<blocks, threads, 0, stream>>>(getDevData(), target.getDevData(), height, width, getStride(), target.getStride(), op); getLastCudaError("kEltwiseUnaryOp: Kernel execution failed"); } else { dim3 threads = dim3(ELTWISE_FLAT_THREADS_X); dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X))); kEltwiseUnaryOpFlat<Op><<<blocks, threads, 0, stream>>>(getDevData(), target.getDevData(), _numElements, op); getLastCudaError("kEltwiseUnaryOpFlat: Kernel execution failed"); } } else { dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0); // printf("height: %d, width: %d, stride: %d, target stride: %d, check bounds: %d, threads.x: %d, threads.y: %d, blocks.x: %d, blocks.y: %d\n", // height, width, getStride(), target.getStride(), checkBounds, threads.x, threads.y, blocks.x, blocks.y); if (checkBounds) { kEltwiseUnaryOpTrans<Op, true><<<blocks, threads, 0, stream>>>(getDevData(), target.getDevData(), height, width, getStride(), target.getStride(), op); } else { kEltwiseUnaryOpTrans<Op, false><<<blocks, threads, 0, stream>>>(getDevData(), target.getDevData(), height, width, getStride(), target.getStride(), op); } getLastCudaError("kEltwiseUnaryOpTrans: Kernel execution failed"); } } } template <class Op> void apply(Op op, cudaStream_t stream) { apply(op, *this, stream); } template <class Op> void apply(Op op, NVMatrix& target) { apply(op, target, getDefaultStream()); } template <class Op> void apply(Op op) { apply(op, *this); } template <class Op> void applyBinary(Op op, NVMatrix& b) { applyBinary(op, b, *this); } template <class Op> void applyBinary(Op op, NVMatrix& b, NVMatrix& target) { applyBinary(op, b, target, getDefaultStream()); } template <class Op> void applyBinary(Op op, NVMatrix& b, NVMatrix& target, cudaStream_t stream) { assert(this->isSameDims(b)); if (!target.isSameDims(*this)) { target.resize(*this); } if (getNumElements() > 0) { int height = target.getFollowingDim(), width = target.getLeadingDim(); if (target.isTrans() == isTrans() && target.isTrans() == b.isTrans()) { if (!isContiguous() || !b.isContiguous() || !target.isContiguous()) { dim3 blocks(std::min(128, DIVUP(width, ELTWISE_THREADS_X)), std::min(128, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); kEltwiseBinaryOp<Op><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), height, width, getStride(), b.getStride(), target.getStride(), op); } else { dim3 threads = dim3(ELTWISE_FLAT_THREADS_X); dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X))); kEltwiseBinaryOpFlat<Op><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), _numElements, op); } getLastCudaError("kEltwiseBinaryOp: Kernel execution failed"); } else { dim3 blocks(std::min(128, DIVUP(width, ELTWISE_THREADS_X)), std::min(128, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); // both x here since y divides x bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0); if (target.isTrans() == isTrans() && target.isTrans() != b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,false,false><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), height, width,getStride(), b.getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,false,false><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), height, width,getStride(), b.getStride(), target.getStride(), op); } } else if (target.isTrans() != isTrans() && target.isTrans() != b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,true,false><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), height, width,getStride(), b.getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,true,false><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), target.getDevData(), height, width,getStride(), b.getStride(), target.getStride(), op); } } else if (target.isTrans() != isTrans() && target.isTrans() == b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,false,true><<<blocks, threads, 0, stream>>>(b.getDevData(), getDevData(), target.getDevData(), height, width,b.getStride(), getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,false,true><<<blocks, threads, 0, stream>>>(b.getDevData(), getDevData(), target.getDevData(), height, width, b.getStride(), getStride(), target.getStride(), op); } } getLastCudaError("kEltwiseBinaryOpTrans: Kernel execution failed"); } } } template <class Op> void applyTernary(Op op, NVMatrix& b, NVMatrix& c, NVMatrix& target) { applyTernary(op, b, c, target, getDefaultStream()); } template <class Op> void applyTernary(Op op, NVMatrix& b, NVMatrix& c, NVMatrix& target, cudaStream_t stream) { assert(isSameDims(b)); assert(isSameDims(c)); // For now ternary ops are only supported for matrices of same transposedness assert(isTrans() == b.isTrans()); assert(isTrans() == c.isTrans()); if (!target.isSameDims(*this) || target.isTrans() != isTrans()) { target.resize(*this); } if (getNumElements() > 0) { int height = target.getFollowingDim(), width = target.getLeadingDim(); if (!isContiguous() || !b.isContiguous() || !c.isContiguous() || !target.isContiguous()) { dim3 blocks(std::min(512, DIVUP(width, ELTWISE_THREADS_X)), std::min(512, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); kEltwiseTernaryOp<Op><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), c.getDevData(), target.getDevData(), height, width, getStride(), b.getStride(), c.getStride(), target.getStride(), op); getLastCudaError("kEltwiseTernaryOp: Kernel execution failed"); } else { dim3 threads = dim3(ELTWISE_FLAT_THREADS_X); dim3 blocks = dim3(std::min(128, DIVUP(_numElements, ELTWISE_FLAT_THREADS_X))); kEltwiseTernaryOpFlat<Op><<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), c.getDevData(), target.getDevData(), _numElements, op); getLastCudaError("kEltwiseTernaryOpFlat: Kernel execution failed"); } } } bool resize(int numRows, int numCols, bool trans); bool resize(int numRows, int numCols); bool resize(const NVMatrix &like); bool resize(const Matrix &like); void reshape(int numRows, int numCols); NVMatrix& reshaped(int numRows, int numCols) const; void copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const; void copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol, cudaStream_t stream) const; void add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target, cudaStream_t stream); void add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target); void add(NVMatrix& b, float scaleB, NVMatrix& target); void add(NVMatrix& b, NVMatrix& target); void add(NVMatrix& b, float scaleB); void add(NVMatrix& b, float scaleA, float scaleB); void add(NVMatrix& b); void eltwiseMult(NVMatrix& b); void eltwiseMult(NVMatrix& b, NVMatrix& target); void eltwiseDivide(NVMatrix& b); void eltwiseDivide(NVMatrix& b, NVMatrix& target); void squaredDiff(NVMatrix& b); void squaredDiff(NVMatrix& b, NVMatrix& target); void subtract(NVMatrix& b, NVMatrix& target); void subtract(NVMatrix& b); void addVector(NVMatrix& vec, float scaleVec, NVMatrix& target, cudaStream_t stream); void addVector(NVMatrix& vec, float scaleVec, NVMatrix& target); void addVector(NVMatrix& vec); void addVector(NVMatrix& vec, float scaleVec); void addVector(NVMatrix& vec, NVMatrix& target); void equalsVector(NVMatrix& vec, NVMatrix& target); void equalsVector(NVMatrix& vec); void eltwiseMultByVector(NVMatrix& vec, NVMatrix& target, cudaStream_t stream); void eltwiseMultByVector(NVMatrix& vec, NVMatrix& target); void eltwiseMultByVector(NVMatrix& vec); void eltwiseMultByVector(NVMatrix& vec, cudaStream_t stream); void eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target); void eltwiseDivideByVector(NVMatrix& vec); void tile(int timesY, int timesX, NVMatrix& target); void tile(int timesY, int timesX, NVMatrix& target, cudaStream_t stream); void addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum); void addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum, cudaStream_t stream); void addMax(NVMatrix& a, int axis, float scaleThis, float scaleMax); void addMax(NVMatrix& a, int axis, float scaleThis, float scaleMax, cudaStream_t stream); void sum(int axis, NVMatrix& target, cudaStream_t stream); void sum(int axis, NVMatrix& target); void sum(int axis, NVMatrix& target, cudaStream_t stream, NVMatrix& tmp); void sum(int axis, NVMatrix& target, NVMatrix& tmp); NVMatrix& sum(int axis); void max(int axis, NVMatrix& target); void max(int axis, NVMatrix& target, NVMatrix& tmp); NVMatrix& max(int axis); void min(int axis, NVMatrix& target); NVMatrix& min(int axis); void sumOfSquares(int axis, NVMatrix& target, cudaStream_t stream); void sumOfSquares(int axis, NVMatrix& target); NVMatrix& sumOfSquares(int axis); float mean(); float sum(); float sum(NVMatrix& tmpbuf); float max(); float min(); float countInf(); float countNan(); float norm2(); float norm(); void inRangeInc(float lower, float upper); void inRangeInc(float lower, float upper, NVMatrix& target); void inRangeExc(float lower, float upper); void inRangeExc(float lower, float upper, NVMatrix& target); void biggerThanScalar(float scalar); void biggerThanScalar(float scalar, NVMatrix& target); void smallerThanScalar(float scalar); void smallerThanScalar(float scalar, NVMatrix& target); void addScalar(float scaleThis, float scalar, NVMatrix& target); void addScalar(float scalar, NVMatrix& target); void addScalar(float scalar); void minWithScalar(float scalar, NVMatrix& target); void minWithScalar(float scalar); void maxWithScalar(float scalar, NVMatrix& target); void maxWithScalar(float scalar); void pow(float p, NVMatrix& target); void pow(float p); void scale(float _scale); void scale(float _scale, NVMatrix& target); void scale(float _scale, NVMatrix& target, cudaStream_t stream); void scale(float _scale, cudaStream_t stream); void zero(); void zero(NVMatrix& like); float dotProduct(NVMatrix& b, NVMatrix& tmp, cudaStream_t stream); float dotProduct(NVMatrix& b, cudaStream_t stream); float dotProduct(NVMatrix& b); /* * Does SOFT transpose and returns result, leaving this matrix unchanged */ NVMatrix& getTranspose(); NVMatrix& getClone(); /* * Does HARD transpose and puts result in target */ void transpose(NVMatrix& target); /* * Does SOFT transpose */ void transpose(); bool transpose(bool trans); void flipTrans(NVMatrix& target, cudaStream_t stream); void flipTrans(NVMatrix& target); NVMatrix& flipTrans(); void print(int startRow, int rows, int startCol, int cols) const; void print(int rows, int cols) const; void printShape(const char* name) const; template <class Op> void applyBinaryV(Op op, NVMatrix& vec, NVMatrix& target) { applyBinaryV(op, vec, target, getDefaultStream()); } template <class Op> void applyBinaryV(Op op, NVMatrix& vec, NVMatrix& target, cudaStream_t stream) { assert(&target != &vec); // for now if (isSameDims(vec)) { applyBinary(op, vec, target, stream); return; } assert(vec.getNumRows() == 1 || vec.getNumCols() == 1); assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols); assert(vec.isContiguous()); target.resize(*this); // target must be same orientation as me for now int width = getLeadingDim(); //_isTrans ? _numRows : _numCols; int height = getFollowingDim(); //_isTrans ? _numCols : _numRows; dim3 threads(ADD_VEC_THREADS_X, ADD_VEC_THREADS_Y); if ((vec.getNumRows() == _numRows && !isTrans()) || (vec.getNumCols() == _numCols && isTrans())) { dim3 blocks(std::min(512, DIVUP(width, ADD_VEC_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y))); kColVectorOp<Op><<<blocks, threads, 0, stream>>>(getDevData(), vec.getDevData(), target.getDevData(), width, height, getStride(), target.getStride(), op); } else { dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ADD_VEC_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y))); kRowVectorOp<Op><<<blocks, threads, 0, stream>>>(getDevData(), vec.getDevData(), target.getDevData(), width, height, getStride(), target.getStride(), op); } getLastCudaError("Kernel execution failed"); // cudaThreadSynchronize(); } template<class UnaryOperator> float argMax(UnaryOperator u) { return _totalAgg(NVMatrixAggs::ArgMax<UnaryOperator>(u)); } static void batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, cudaStream_t stream, const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev); static void batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, cudaStream_t stream); static void batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev); static void batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB); static void assertSame(NVMatrixV& a); }; class HostNVMatrix : public NVMatrix { protected: void alloc(int numElements); void dealloc(); NVMatrix& construct() const; NVMatrix& construct(bool isTrans) const; NVMatrix& construct(int numRows, int numCols, bool isTrans=false) const; NVMatrix& construct(const Matrix& like, bool copy) const; NVMatrix& construct(const NVMatrix& like, bool copy) const; NVMatrix& construct(const NVMatrix& like) const; NVMatrix& construct(const Matrix& like) const; NVMatrix& construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const; public: ~HostNVMatrix(); HostNVMatrix(); HostNVMatrix(bool isTrans); HostNVMatrix(int numRows, int numCols, bool isTrans=false); HostNVMatrix(const Matrix& like, bool copy); HostNVMatrix(const NVMatrix& like, bool copy); HostNVMatrix(const NVMatrix& like); HostNVMatrix(const Matrix& like); HostNVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans); void copyFromHost(const Matrix& hostMatrix); void copyFromHost(const Matrix& hostMatrix, bool resizeTarget); void copyFromHost(const Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream); void copyToHost(Matrix& hostMatrix) const; void copyToHost(Matrix& hostMatrix, bool resizeTarget) const; void copyToHost(Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) const; cudaTextureObject_t getTextureObject(); }; #endif /* NVMATRIX_H_ */
the_stack
extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id); #include "cuda_helper.h" static __constant__ uint64_t stateo[25]; static __constant__ uint64_t RC[24]; static const uint64_t cpu_RC[24] = { 0x0000000000000001ull, 0x0000000000008082ull, 0x800000000000808aull, 0x8000000080008000ull, 0x000000000000808bull, 0x0000000080000001ull, 0x8000000080008081ull, 0x8000000000008009ull, 0x000000000000008aull, 0x0000000000000088ull, 0x0000000080008009ull, 0x000000008000000aull, 0x000000008000808bull, 0x800000000000008bull, 0x8000000000008089ull, 0x8000000000008003ull, 0x8000000000008002ull, 0x8000000000000080ull, 0x000000000000800aull, 0x800000008000000aull, 0x8000000080008081ull, 0x8000000000008080ull, 0x0000000080000001ull, 0x8000000080008008ull }; static __device__ __forceinline__ void keccak_block(uint64_t *s, const uint64_t *keccak_round_constants) { size_t i; uint64_t t[5], u[5], v, w; /* absorb input */ //#pragma unroll 24 for (i = 0; i < 24; i++) { /* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20]; t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21]; t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22]; t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23]; t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24]; /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ uint64_t temp0,temp1,temp2,temp3,temp4; temp0 = ROTL64(t[0], 1); temp1 = ROTL64(t[1], 1); temp2 = ROTL64(t[2], 1); temp3 = ROTL64(t[3], 1); temp4 = ROTL64(t[4], 1); u[0] = xor1(t[4],temp1); u[1] = xor1(t[0],temp2); u[2] = xor1(t[1],temp3); u[3] = xor1(t[2],temp4); u[4] = xor1(t[3],temp0); /* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */ s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0]; s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1]; s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2]; s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3]; s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[ 1]; s[ 1] = ROTL64(s[ 6], 44); s[ 6] = ROTL64(s[ 9], 20); s[ 9] = ROTL64(s[22], 61); s[22] = ROTL64(s[14], 39); s[14] = ROTL64(s[20], 18); s[20] = ROTL64(s[ 2], 62); s[ 2] = ROTL64(s[12], 43); s[12] = ROTL64(s[13], 25); s[13] = ROTL64(s[19], 8); s[19] = ROTL64(s[23], 56); s[23] = ROTL64(s[15], 41); s[15] = ROTL64(s[ 4], 27); s[ 4] = ROTL64(s[24], 14); s[24] = ROTL64(s[21], 2); s[21] = ROTL64(s[ 8], 55); s[ 8] = ROTL64(s[16], 45); s[16] = ROTL64(s[ 5], 36); s[ 5] = ROTL64(s[ 3], 28); s[ 3] = ROTL64(s[18], 21); s[18] = ROTL64(s[17], 15); s[17] = ROTL64(s[11], 10); s[11] = ROTL64(s[ 7], 6); s[ 7] = ROTL64(s[10], 3); s[10] = ROTL64( v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ v = s[ 0]; w = s[ 1]; s[ 0] ^= (~w) & s[ 2]; s[ 1] ^= (~s[ 2]) & s[ 3]; s[ 2] ^= (~s[ 3]) & s[ 4]; s[ 3] ^= (~s[ 4]) & v; s[ 4] ^= (~v) & w; v = s[ 5]; w = s[ 6]; s[ 5] ^= (~w) & s[ 7]; s[ 6] ^= (~s[ 7]) & s[ 8]; s[ 7] ^= (~s[ 8]) & s[ 9]; s[ 8] ^= (~s[ 9]) & v; s[ 9] ^= (~v) & w; v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w; v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w; v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w; /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[i]; } } static __device__ __forceinline__ void keccak_blockv35(uint2 *s, const uint64_t *keccak_round_constants) { size_t i; uint2 t[5], u[5], v, w; for (i = 0; i < 24; i++) { /* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20]; t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21]; t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22]; t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23]; t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24]; /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ u[0] = t[4] ^ ROL2(t[1], 1); u[1] = t[0] ^ ROL2(t[2], 1); u[2] = t[1] ^ ROL2(t[3], 1); u[3] = t[2] ^ ROL2(t[4], 1); u[4] = t[3] ^ ROL2(t[0], 1); /* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */ s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0]; s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1]; s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2]; s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3]; s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL2(s[19], 8); s[19] = ROL2(s[23], 56); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ v = s[0]; w = s[1]; s[0] ^= (~w) & s[2]; s[1] ^= (~s[2]) & s[3]; s[2] ^= (~s[3]) & s[4]; s[3] ^= (~s[4]) & v; s[4] ^= (~v) & w; v = s[5]; w = s[6]; s[5] ^= (~w) & s[7]; s[6] ^= (~s[7]) & s[8]; s[7] ^= (~s[8]) & s[9]; s[8] ^= (~s[9]) & v; s[9] ^= (~v) & w; v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w; v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w; v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w; /* iota: a[0,0] ^= round constant */ s[0] ^= vectorize(keccak_round_constants[i]); } } static __forceinline__ void keccak_block_host(uint64_t *s, const uint64_t *keccak_round_constants) { size_t i; uint64_t t[5], u[5], v, w; /* absorb input */ for (i = 0; i < 24; i++) { /* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20]; t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21]; t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22]; t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23]; t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24]; /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ u[0] = t[4] ^ ROTL64(t[1], 1); u[1] = t[0] ^ ROTL64(t[2], 1); u[2] = t[1] ^ ROTL64(t[3], 1); u[3] = t[2] ^ ROTL64(t[4], 1); u[4] = t[3] ^ ROTL64(t[0], 1); /* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */ s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0]; s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1]; s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2]; s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3]; s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[ 1]; s[ 1] = ROTL64(s[ 6], 44); s[ 6] = ROTL64(s[ 9], 20); s[ 9] = ROTL64(s[22], 61); s[22] = ROTL64(s[14], 39); s[14] = ROTL64(s[20], 18); s[20] = ROTL64(s[ 2], 62); s[ 2] = ROTL64(s[12], 43); s[12] = ROTL64(s[13], 25); s[13] = ROTL64(s[19], 8); s[19] = ROTL64(s[23], 56); s[23] = ROTL64(s[15], 41); s[15] = ROTL64(s[ 4], 27); s[ 4] = ROTL64(s[24], 14); s[24] = ROTL64(s[21], 2); s[21] = ROTL64(s[ 8], 55); s[ 8] = ROTL64(s[16], 45); s[16] = ROTL64(s[ 5], 36); s[ 5] = ROTL64(s[ 3], 28); s[ 3] = ROTL64(s[18], 21); s[18] = ROTL64(s[17], 15); s[17] = ROTL64(s[11], 10); s[11] = ROTL64(s[ 7], 6); s[ 7] = ROTL64(s[10], 3); s[10] = ROTL64( v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ v = s[ 0]; w = s[ 1]; s[ 0] ^= (~w) & s[ 2]; s[ 1] ^= (~s[ 2]) & s[ 3]; s[ 2] ^= (~s[ 3]) & s[ 4]; s[ 3] ^= (~s[ 4]) & v; s[ 4] ^= (~v) & w; v = s[ 5]; w = s[ 6]; s[ 5] ^= (~w) & s[ 7]; s[ 6] ^= (~s[ 7]) & s[ 8]; s[ 7] ^= (~s[ 8]) & s[ 9]; s[ 8] ^= (~s[ 9]) & v; s[ 9] ^= (~v) & w; v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w; v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w; v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w; /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[i]; } } __constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + padding) __global__ void m7_keccak512_gpu_hash_120(int threads, uint32_t startNounce, uint64_t *outputHash) { int thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = startNounce + thread; uint64_t state[25]; #pragma unroll 16 for (int i=9;i<25;i++) {state[i]=stateo[i];} state[0] = xor1(stateo[0],c_PaddedMessage80[9]); state[1] = xor1(stateo[1],c_PaddedMessage80[10]); state[2] = xor1(stateo[2],c_PaddedMessage80[11]); state[3] = xor1(stateo[3],c_PaddedMessage80[12]); state[4] = xor1(stateo[4],c_PaddedMessage80[13]); state[5] = xor1(stateo[5],REPLACE_HIDWORD(c_PaddedMessage80[14],nounce)); state[6] = xor1(stateo[6],c_PaddedMessage80[15]); state[7] = stateo[7]; state[8] = xor1(stateo[8],0x8000000000000000); keccak_block(state,RC); #pragma unroll 8 for (int i=0;i<8;i++) {outputHash[i*threads+thread]=state[i];} } //thread } __global__ void __launch_bounds__(256, 3) m7_keccak512_gpu_hash_120_v35(int threads, uint32_t startNounce, uint64_t *outputHash) { int thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = startNounce + thread; uint2 state[25]; #pragma unroll 25 for (int i = 0; i<25; i++) { state[i] = vectorize(stateo[i]); } state[0] ^= vectorize(c_PaddedMessage80[9]); state[1] ^= vectorize(c_PaddedMessage80[10]); state[2] ^= vectorize(c_PaddedMessage80[11]); state[3] ^= vectorize(c_PaddedMessage80[12]); state[4] ^= vectorize(c_PaddedMessage80[13]); state[5] ^= make_uint2(((uint32_t*)c_PaddedMessage80)[28],nounce); state[6] ^= vectorize(c_PaddedMessage80[15]); state[8] ^= make_uint2(0,0x80000000); keccak_blockv35(state, RC); #pragma unroll 8 for (int i = 0; i<8; i++) { outputHash[i*threads + thread] = devectorize(state[i]); } } //thread } void m7_keccak512_cpu_init(int thr_id, int threads) { cudaMemcpyToSymbol( RC,cpu_RC,sizeof(cpu_RC),0,cudaMemcpyHostToDevice); } __host__ void m7_keccak512_setBlock_120(void *pdata) { unsigned char PaddedMessage[128]; uint8_t ending =0x01; memcpy(PaddedMessage, pdata, 122); memset(PaddedMessage+122,ending,1); memset(PaddedMessage+123, 0, 5); cudaMemcpyToSymbol( c_PaddedMessage80, PaddedMessage, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); uint64_t* alt_data = (uint64_t*) pdata; uint64_t state[25]; for(int i=0;i<25;i++) {state[i]=0;} for (int i=0;i<9;i++) {state[i] ^= alt_data[i];} keccak_block_host(state,cpu_RC); cudaMemcpyToSymbol(stateo, state, 25*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); } __host__ void m7_keccak512_cpu_hash(int thr_id, int threads, uint32_t startNounce, uint64_t *d_hash, int order) { const int threadsperblock = 256; dim3 grid(threads/threadsperblock); dim3 block(threadsperblock); size_t shared_size = 0; if (device_sm[thr_id]<350) { m7_keccak512_gpu_hash_120<<<grid, block, shared_size>>>(threads, startNounce, d_hash); } else { m7_keccak512_gpu_hash_120_v35 << <grid, block, shared_size >> >(threads, startNounce, d_hash); } MyStreamSynchronize(NULL, order, thr_id); }
the_stack
#define WARPS_NUM 8 /** Natural order in constant memory */ __constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE]; /** * Huffman coding tables in constant memory - each has 257 items (256 + 1 extra) * There are are 4 of them - one after another, in following order: * - luminance (Y) AC * - luminance (Y) DC * - chroma (cb/cr) AC * - chroma (cb/cr) DC */ __device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4]; /** * Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive) * Mapping from coefficient value into the code for the value ind its bit size. */ __device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024]; /** Allocate huffman tables in constant memory */ __device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT]; struct gpujpeg_huffman_gpu_encoder { /** Size of occupied part of output buffer */ unsigned int * d_gpujpeg_huffman_output_byte_count; }; /** * Initializes coefficient decomposition table in global memory. (CC >= 2.0) * Output table is a mapping from some value into its code and bit size. */ __global__ static void gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() { // fetch some value const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int value = tid - 4096; // decompose it unsigned int value_code = value; int absolute = value; if ( value < 0 ) { // valu eis now absolute value of input absolute = -absolute; // For a negative input, want temp2 = bitwise complement of abs(input) // This code assumes we are on a two's complement machine value_code--; } // Find the number of bits needed for the magnitude of the coefficient unsigned int value_nbits = 0; while ( absolute ) { value_nbits++; absolute >>= 1; } // save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs) gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits)); } #if __CUDA_ARCH__ >= 200 /** * Adds up to 32 bits at once into ouptut buffer, applying byte stuffing. * Codeword value must be aligned to left (most significant bits). (CC >= 2.0) */ __device__ static void gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word) { // decompose packed codeword into the msb-aligned value and bit-length of the value const unsigned int code_word = packed_code_word & ~31; const unsigned int code_bit_size = packed_code_word & 31; // concatenate with remaining bits remaining_bits |= code_word >> bit_count; bit_count += code_bit_size; // flush some bytes if have more than 8 bits if (bit_count >= 8) { do { const unsigned int out_byte = remaining_bits >> 24; out_ptr[byte_count++] = out_byte; if(0xff == out_byte) { // keep zero byte after each 0xFF (buffer is expected to be zeroed) out_ptr[byte_count++] = 0; } remaining_bits <<= 8; bit_count -= 8; } while (bit_count >= 8); // keep only remaining bits in the buffer remaining_bits = code_word << (code_bit_size - bit_count); remaining_bits &= 0xfffffffe << (31 - bit_count); } } /** * Given some huffman table offset, RLE zero count and coefficient value, * this returns huffman codeword for the value (packed in 27 MSBs) * together with its bit size (in 5 LSBs). (CC >= 2.0) */ __device__ static unsigned int gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient, const int huffman_lut_offset) { // value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned) const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient]; // decompose value info into upshifted value and value's bit size const int value_nbits = packed_value & 0xf; const unsigned int value_code = packed_value & ~0xf; // find prefix of the codeword and size of the prefix const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits; const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx]; const unsigned int prefix_nbits = packed_prefix & 31; // compose packed codeword with its size return (packed_prefix + value_nbits) | (value_code >> prefix_nbits); } /** * Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0) */ __device__ static void gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) { // this works for up to 4 * 32 remaining codewords if(remaining_codewords) { // pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once s_out[remaining_codewords + tid] = 0; // save all remaining codewords at once (together with some zero sized padding codewords) *((uint4*)data_compressed) = ((uint4*)s_out)[tid]; // update codeword counter data_compressed += remaining_codewords; remaining_codewords = 0; } } #ifndef FULL_MASK #define FULL_MASK 0xffffffffu #endif // compat #if CUDART_VERSION < 9000 #define __ballot_sync(set, pred) __ballot(pred) #endif /** * Encode one 8x8 block (CC >= 2.0) * * @return 0 if succeeds, otherwise nonzero */ __device__ static int gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out, int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset) { // each thread loads a pair of values (pair after zigzag reordering) const int load_idx = tid * 2; int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]]; const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]]; // compute preceding zero count for even coefficient (actually compute the count multiplied by 16) const unsigned int nonzero_mask = (1 << tid) - 1; const unsigned int nonzero_bitmap_0 = 1 | __ballot_sync(FULL_MASK, in_even); // DC is always treated as nonzero const unsigned int nonzero_bitmap_1 = __ballot_sync(FULL_MASK, in_odd); const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1; const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask); int zeros_before_even = 2 * (zero_pair_count + tid - 32); if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) { zeros_before_even += 1; } // true if any nonzero pixel follows thread's odd pixel const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask; // count of consecutive zeros before odd value (either one more than // even if even is zero or none if even value itself is nonzero) // (the count is actually multiplied by 16) int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1; // clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited) // otherwise only trim extra bits from the counts of following zeros const int zero_count_mask = nonzero_follows ? 0xF : 0; zeros_before_even &= zero_count_mask; zeros_before_odd &= zero_count_mask; // pointer to LUT for encoding thread's even value // (only thread #0 uses DC table, others use AC table) int even_lut_offset = huffman_lut_offset; // first thread handles special DC coefficient if(0 == tid) { // first thread uses DC part of the table for its even value even_lut_offset += 256 + 1; // update last DC coefficient (saved at the special place at the end of the shared bufer) const int original_in_even = in_even; in_even -= ((int*)s_out)[last_dc_idx]; ((int*)s_out)[last_dc_idx] = original_in_even; } // last thread handles special block-termination symbol if(0 == ((tid ^ 31) | in_odd)) { // this causes selection of huffman symbol at index 256 (which contains the termination symbol) zeros_before_odd = 16; } // each thread gets codeword for its two pixels unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset); unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset); // concatenate both codewords into one if they are short enough const unsigned int even_code_size = even_code & 31; const unsigned int odd_code_size = odd_code & 31; const unsigned int total_size = even_code_size + odd_code_size; if(total_size <= 27) { even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31); odd_code = 0; } // each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block const unsigned int even_codeword_presence = __ballot_sync(FULL_MASK, even_code); const unsigned int odd_codeword_presence = __ballot_sync(FULL_MASK, odd_code); const int codeword_offset = __popc(nonzero_mask & even_codeword_presence) + __popc(nonzero_mask & odd_codeword_presence); // each thread saves its values into temporary shared buffer if(even_code) { s_out[remaining_codewords + codeword_offset] = even_code; if(odd_code) { s_out[remaining_codewords + codeword_offset + 1] = odd_code; } } // advance count of codewords in shared memory buffer remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence); // flush some codewords to global memory if there are too many of them in shared buffer const int flush_count = 32 * 4; // = half of the buffer if(remaining_codewords > flush_count) { // move first half of the buffer into output buffer in global memory and update output pointer *((uint4*)data_compressed) = ((uint4*)s_out)[tid]; data_compressed += flush_count; // shift remaining codewords to begin of the buffer and update their count ((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4 remaining_codewords -= flush_count; } // nothing to fail here return 0; } #endif // #if __CUDA_ARCH__ >= 200 /** * Huffman encoder kernel (For compute capability >= 2.0) * * @return void */ template <bool CONTINUOUS_BLOCK_LIST> #if __CUDA_ARCH__ >= 200 __launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32)) #endif __global__ static void gpujpeg_huffman_encoder_encode_kernel_warp( struct gpujpeg_segment* d_segment, int segment_count, uint8_t* d_data_compressed, const uint64_t* const d_block_list, int16_t* const d_data_quantized, struct gpujpeg_component* const d_component, const int comp_count, unsigned int * d_gpujpeg_huffman_output_byte_count ) { #if __CUDA_ARCH__ >= 200 int warpidx = threadIdx.x >> 5; int tid = threadIdx.x & 31; __shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM]; unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1)); // Number of remaining codewords in shared buffer int remaining_codewords = 0; // Select Segment const int block_idx = blockIdx.x + blockIdx.y * gridDim.x; const int segment_index = block_idx * WARPS_NUM + warpidx; // first thread initializes compact output size for next kernel if(0 == tid && 0 == warpidx && 0 == block_idx) { *d_gpujpeg_huffman_output_byte_count = 0; } // stop if out of segment bounds if ( segment_index >= segment_count ) return; struct gpujpeg_segment* segment = &d_segment[segment_index]; // Initialize last DC coefficients if(tid < 3) { s_out[256 + tid] = 0; } // Prepare data pointers unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index); unsigned int * data_compressed_start = data_compressed; // Pre-add thread ID to output pointer (it's allways used only with it) data_compressed += (tid * 4); // Encode all block in segment if(CONTINUOUS_BLOCK_LIST) { // Get component for current scan const struct gpujpeg_component* component = &d_component[segment->scan_index]; // mcu size of the component const int comp_mcu_size = component->mcu_size; // Get component data for MCU (first block) const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size; // Get huffman table offset const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables // Encode MCUs in segment for (int block_count = segment->mcu_count; block_count--;) { // Encode 8x8 block gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset); // Advance to next block block += comp_mcu_size; } } else { // Pointer to segment's list of 8x8 blocks and their count const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin; // Encode all blocks for(int block_count = segment->block_count; block_count--;) { // Get pointer to next block input data and info about its color type const uint64_t packed_block_info = *(packed_block_info_ptr++); // Get coder parameters const int last_dc_idx = 256 + (packed_block_info & 0x7f); // Get offset to right part of huffman table const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables // Source data pointer int16_t* block = &d_data_quantized[packed_block_info >> 8]; // Encode 8x8 block gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset); } } // flush remaining codewords gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid); // Set number of codewords. if (tid == 0 ) { segment->data_compressed_size = data_compressed - data_compressed_start; } #endif // #if __CUDA_ARCH__ >= 200 } #define SERIALIZATION_THREADS_PER_TBLOCK 192 /** * Codeword serialization kernel (CC >= 2.0). * * @return void */ #if __CUDA_ARCH__ >= 200 __launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK) #endif __global__ static void gpujpeg_huffman_encoder_serialization_kernel( struct gpujpeg_segment* d_segment, int segment_count, const uint8_t* const d_src, uint8_t* const d_dest ) { #if __CUDA_ARCH__ >= 200 // Temp buffer for all threads of the threadblock __shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK]; // Thread's 32 bytes in shared memory for output composition uint4 * const s_temp = s_temp_all + threadIdx.x * 2; // Select Segment const int block_idx = blockIdx.x + blockIdx.y * gridDim.x; int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x; if ( segment_index >= segment_count ) return; // Thread's segment struct gpujpeg_segment* const segment = &d_segment[segment_index]; // Input and output pointers const int data_offset = segment->data_temp_index; uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset); uint4 * d_dest_stream = d_dest_stream_start; const uint4 * d_src_codewords = (uint4*)(d_src + data_offset); // number of bytes in the temp buffer, remaining bits and their count int byte_count = 0, bit_count = 0; unsigned int remaining_bits = 0; // "data_compressed_size" is now initialized to number of codewords to be serialized for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once { // read 4 codewords and advance input pointer to next ones const uint4 cwords = *(d_src_codewords++); // encode first pair of codewords gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x); gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y); // possibly flush output if have at least 16 bytes if(byte_count >= 16) { // write 16 bytes into destination buffer *(d_dest_stream++) = s_temp[0]; // move remaining bytes to first half of the buffer s_temp[0] = s_temp[1]; // update number of remaining bits byte_count -= 16; } // encode other two codewords gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z); gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w); // possibly flush output if have at least 16 bytes if(byte_count >= 16) { // write 16 bytes into destination buffer *(d_dest_stream++) = s_temp[0]; // move remaining bytes to first half of the buffer s_temp[0] = s_temp[1]; // update number of remaining bits byte_count -= 16; } } // Emit left bits gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007); // Terminate codestream with restart marker ((uint8_t*)s_temp)[byte_count + 0] = 0xFF; ((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8); // flush remaining bytes d_dest_stream[0] = s_temp[0]; d_dest_stream[1] = s_temp[1]; // Set compressed size segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2; #endif // #if __CUDA_ARCH__ >= 200 } /** * Huffman coder compact output allocation kernel - serially reserves * some space for compressed output of segments in output buffer. * (For CC 1.0 - a workaround for missing atomic operations.) * * Only single threadblock with 512 threads is launched. */ __global__ static void gpujpeg_huffman_encoder_allocation_kernel ( struct gpujpeg_segment* const d_segment, const int segment_count, unsigned int * d_gpujpeg_huffman_output_byte_count ) { // offsets of segments __shared__ unsigned int s_segment_offsets[512]; // cumulative sum of bytes of all segments unsigned int total_byte_count = 0; // iterate over all segments const unsigned int segment_idx_end = (segment_count + 511) & ~511; for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) { // all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array s_segment_offsets[threadIdx.x] = segment_idx < segment_count ? (d_segment[segment_idx].data_compressed_size + 15) & ~15 : 0; // first thread runs a sort of serial prefix sum over the segment sizes to get their offsets __syncthreads(); if(0 == threadIdx.x) { #pragma unroll 4 for(int i = 0; i < 512; i++) { const unsigned int segment_size = s_segment_offsets[i]; s_segment_offsets[i] = total_byte_count; total_byte_count += segment_size; } } __syncthreads(); // all threads write offsets back into corresponding segment structures if(segment_idx < segment_count) { d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x]; } } // first thread finally saves the total sum of bytes needed for compressed data if(threadIdx.x == 0) { *d_gpujpeg_huffman_output_byte_count = total_byte_count; } } /** * Huffman coder output compaction kernel. * * @return void */ __global__ static void gpujpeg_huffman_encoder_compaction_kernel ( struct gpujpeg_segment* const d_segment, const int segment_count, const uint8_t* const d_src, uint8_t* const d_dest, unsigned int * d_gpujpeg_huffman_output_byte_count ) { // get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index) const int block_idx = blockIdx.x + blockIdx.y * gridDim.x; const int segment_idx = threadIdx.y + block_idx * blockDim.y; if(segment_idx >= segment_count) { return; } // temp variables for all warps __shared__ uint4* volatile s_out_ptrs[WARPS_NUM]; // get info about the segment const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16 const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary // first thread of each warp reserves space in output buffer if(0 == threadIdx.x) { // Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations) #if __CUDA_ARCH__ == 100 const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index; #else const unsigned int segment_out_offset = atomicAdd(d_gpujpeg_huffman_output_byte_count, segment_byte_count); d_segment[segment_idx].data_compressed_index = segment_out_offset; #endif s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset); } // we need to synchronize all our warps here to ensure s_out_ptrs is guaranteed to be provided on any thread. __syncthreads(); // all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset); uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y]; unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread) // copy the data! while(copy_iterations--) { *d_out = *d_in; d_out += 32; d_in += 32; } // copy remaining bytes (less than 512 bytes) if((threadIdx.x * 16) < (segment_byte_count & 511)) { *d_out = *d_in; } } // Threadblock size for CC 1.x kernel #define THREAD_BLOCK_SIZE 48 /** * Write one byte to compressed data (CC 1.x) * * @param data_compressed Data compressed * @param value Byte value to write * @return void */ #define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \ *data_compressed = (uint8_t)(value); \ data_compressed++; } /** * Write two bytes to compressed data (CC 1.x) * * @param data_compressed Data compressed * @param value Two-byte value to write * @return void */ #define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \ *data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \ data_compressed++; \ *data_compressed = (uint8_t)((value) & 0xFF); \ data_compressed++; } /** * Write marker to compressed data (CC 1.x) * * @param data_compressed Data compressed * @oaran marker Marker to write (JPEG_MARKER_...) * @return void */ #define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \ *data_compressed = 0xFF;\ data_compressed++; \ *data_compressed = (uint8_t)(marker); \ data_compressed++; } /** * Output bits to the file. Only the right 24 bits of put_buffer are used; * the valid bits are left-justified in this part. At most 16 bits can be * passed to EmitBits in one call, and we never retain more than 7 bits * in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x * * @param coder Huffman coder structure * @param code Huffman code * @param size Size in bits of the Huffman code * @return void */ __device__ static int gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed) { // This routine is heavily used, so it's worth coding tightly int _put_buffer = (int)code; int _put_bits = put_bits; // If size is 0, caller used an invalid Huffman table entry if ( size == 0 ) return -1; // Mask off any extra bits in code _put_buffer &= (((int)1) << size) - 1; // New number of bits in buffer _put_bits += size; // Align incoming bits _put_buffer <<= 24 - _put_bits; // And merge with old buffer contents _put_buffer |= put_value; // If there are more than 8 bits, write it out unsigned char uc; while ( _put_bits >= 8 ) { // Write one byte out uc = (unsigned char) ((_put_buffer >> 16) & 0xFF); gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc); // If need to stuff a zero byte if ( uc == 0xFF ) { // Write zero byte out gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0); } _put_buffer <<= 8; _put_bits -= 8; } // update state variables put_value = _put_buffer; put_bits = _put_bits; return 0; } /** * Emit left bits (CC 1.x) * * @param coder Huffman coder structure * @return void */ __device__ static void gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed) { // Fill 7 bits with ones if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 ) return; //unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF); // Write one byte out //gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc); put_value = 0; put_bits = 0; } /** * Encode one 8x8 block (for CC 1.x) * * @return 0 if succeeds, otherwise nonzero */ __device__ static int gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed, struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac) { typedef uint64_t loading_t; const int loading_iteration_count = 64 * 2 / sizeof(loading_t); // Load block to shared memory __shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE]; for ( int i = 0; i < loading_iteration_count; i++ ) { ((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i]; } int data_start = 64 * threadIdx.x; // Encode the DC coefficient difference per section F.1.2.1 int temp = s_data[data_start + 0] - dc; dc = s_data[data_start + 0]; int temp2 = temp; if ( temp < 0 ) { // Temp is abs value of input temp = -temp; // For a negative input, want temp2 = bitwise complement of abs(input) // This code assumes we are on a two's complement machine temp2--; } // Find the number of bits needed for the magnitude of the coefficient int nbits = 0; while ( temp ) { nbits++; temp >>= 1; } // Write category number if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) { return -1; } // Write category offset (EmitBits rejects calls with size 0) if ( nbits ) { if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 ) return -1; } // Encode the AC coefficients per section F.1.2.2 (r = run length of zeros) int r = 0; for ( int k = 1; k < 64; k++ ) { temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]]; if ( temp == 0 ) { r++; } else { // If run length > 15, must emit special run-length-16 codes (0xF0) while ( r > 15 ) { if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 ) return -1; r -= 16; } temp2 = temp; if ( temp < 0 ) { // temp is abs value of input temp = -temp; // This code assumes we are on a two's complement machine temp2--; } // Find the number of bits needed for the magnitude of the coefficient // there must be at least one 1 bit nbits = 1; while ( (temp >>= 1) ) nbits++; // Emit Huffman symbol for run length / number of bits int i = (r << 4) + nbits; if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 ) return -1; // Write Category offset if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 ) return -1; r = 0; } } // If all the left coefs were zero, emit an end-of-block code if ( r > 0 ) { if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 ) return -1; } return 0; } /** * Huffman encoder kernel (for CC 1.x) * * @return void */ __global__ static void gpujpeg_huffman_encoder_encode_kernel( struct gpujpeg_component* d_component, struct gpujpeg_segment* d_segment, int comp_count, int segment_count, uint8_t* d_data_compressed, unsigned int * d_gpujpeg_huffman_output_byte_count ) { int segment_index = blockIdx.x * blockDim.x + threadIdx.x; if ( segment_index >= segment_count ) return; struct gpujpeg_segment* segment = &d_segment[segment_index]; // first thread initializes compact output size for next kernel if(0 == segment_index) { *d_gpujpeg_huffman_output_byte_count = 0; } // Initialize huffman coder int put_value = 0; int put_bits = 0; int dc[GPUJPEG_MAX_COMPONENT_COUNT]; for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ ) dc[comp] = 0; // Prepare data pointers uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index]; uint8_t* data_compressed_start = data_compressed; // Non-interleaving mode if ( comp_count == 1 ) { int segment_index = segment->scan_segment_index; // Encode MCUs in segment for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) { // Get component for current scan struct gpujpeg_component* component = &d_component[segment->scan_index]; // Get component data for MCU int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size]; // Get coder parameters int & component_dc = dc[segment->scan_index]; // Get huffman tables struct gpujpeg_table_huffman_encoder* d_table_dc = NULL; struct gpujpeg_table_huffman_encoder* d_table_ac = NULL; if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) { d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC]; d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC]; } else { d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC]; d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]; } // Encode 8x8 block if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 ) break; } } // Interleaving mode else { int segment_index = segment->scan_segment_index; // Encode MCUs in segment for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) { //assert(segment->scan_index == 0); for ( int comp = 0; comp < comp_count; comp++ ) { struct gpujpeg_component* component = &d_component[comp]; // Prepare mcu indexes int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x; int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x; // Compute base data index int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE); // For all vertical 8x8 blocks for ( int y = 0; y < component->sampling_factor.vertical; y++ ) { // Compute base row data index int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE); // For all horizontal 8x8 blocks for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) { // Compute 8x8 block data index int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE; // Get component data for MCU int16_t* block = &component->d_data_quantized[data_index]; // Get coder parameters int & component_dc = dc[comp]; // Get huffman tables struct gpujpeg_table_huffman_encoder* d_table_dc = NULL; struct gpujpeg_table_huffman_encoder* d_table_ac = NULL; if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) { d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC]; d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC]; } else { d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC]; d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC]; } // Encode 8x8 block gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac); } } } } } // Emit left bits if ( put_bits > 0 ) gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed); // Output restart marker int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8); gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker); // Set compressed size segment->data_compressed_size = data_compressed - data_compressed_start; } /** Adds packed coefficients into the GPU version of Huffman lookup table. */ void gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) { // make a upshifted copy of the table for GPU encoding for ( int i = 0; i <= 256; i++ ) { const int size = src->size[i & 0xFF]; dest[i] = (src->code[i & 0xFF] << (32 - size)) | size; } // reserve first index in GPU version of AC table for special purposes if ( is_ac ) { dest[0] = 0; } } /* Documented at declaration */ struct gpujpeg_huffman_gpu_encoder * gpujpeg_huffman_gpu_encoder_create(const struct gpujpeg_encoder * encoder) { struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder = (struct gpujpeg_huffman_gpu_encoder *) malloc(sizeof(struct gpujpeg_huffman_gpu_encoder)); if ( huffman_gpu_encoder == NULL ) { return NULL; } memset(huffman_gpu_encoder, 0, sizeof(struct gpujpeg_huffman_gpu_encoder)); // Allocate cudaMalloc((void**)&huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int)); gpujpeg_cuda_check_error("Allocation of huffman output byte count failed", return NULL); // Initialize decomposition lookup table cudaFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, cudaFuncCachePreferShared); gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel<<<32, 256>>>(); // 8192 threads total cudaDeviceSynchronize(); gpujpeg_cuda_check_error("Decomposition LUT initialization failed", return NULL); // compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0) uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4]; gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true); gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false); gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true); gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false); cudaMemcpyToSymbol( gpujpeg_huffman_gpu_lut, gpujpeg_huffman_cpu_lut, (256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut), 0, cudaMemcpyHostToDevice ); gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)", return NULL); // Copy original Huffman coding tables to GPU memory (for CC 1.x) cudaMemcpyToSymbol( gpujpeg_huffman_gpu_encoder_table_huffman, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], sizeof(gpujpeg_huffman_gpu_encoder_table_huffman), 0, cudaMemcpyHostToDevice ); gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)", return NULL); // Copy natural order to constant device memory cudaMemcpyToSymbol( gpujpeg_huffman_gpu_encoder_order_natural, gpujpeg_order_natural, GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int), 0, cudaMemcpyHostToDevice ); gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)", return NULL); // Configure more shared memory for all kernels cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, cudaFuncCachePreferShared); return huffman_gpu_encoder; } void gpujpeg_huffman_gpu_encoder_destroy(struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder) { assert(huffman_gpu_encoder != NULL); if (huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count != NULL) { cudaFree(huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count); } free(huffman_gpu_encoder); } /** * Get grid size for specified count of threadblocks. (Grid size is limited * to 65536 in both directions, so if we need more threadblocks, we must use * both x and y coordinates.) */ dim3 gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count) { dim3 size(tblock_count); while(size.x > 0xffff) { size.x = (size.x + 1) >> 1; size.y <<= 1; } return size; } /* Documented at declaration */ int gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder, unsigned int * output_byte_count) { // Get coder struct gpujpeg_coder* coder = &encoder->coder; assert(coder->param.restart_interval > 0); // Select encoder kernel which either expects continuos segments of blocks or uses block lists int comp_count = 1; if ( coder->param.interleaved == 1 ) comp_count = coder->param_image.comp_count; assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT); // Select encoder kernel based on compute capability if ( encoder->coder.cuda_cc_major < 2 ) { // Run kernel dim3 thread(THREAD_BLOCK_SIZE); dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x)); gpujpeg_huffman_encoder_encode_kernel<<<grid, thread, 0, encoder->stream>>>( coder->d_component, coder->d_segment, comp_count, coder->segment_count, coder->d_temp_huffman, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count ); gpujpeg_cuda_check_error("Huffman encoding failed", return -1); } else { // Run encoder kernel dim3 thread(32 * WARPS_NUM); dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32))); if(comp_count == 1) { gpujpeg_huffman_encoder_encode_kernel_warp<true><<<grid, thread, 0, encoder->stream>>>( coder->d_segment, coder->segment_count, coder->d_data_compressed, coder->d_block_list, coder->d_data_quantized, coder->d_component, comp_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count ); gpujpeg_cuda_check_error("Huffman encoding failed", return -1); } else { gpujpeg_huffman_encoder_encode_kernel_warp<false><<<grid, thread, 0, encoder->stream>>>( coder->d_segment, coder->segment_count, coder->d_data_compressed, coder->d_block_list, coder->d_data_quantized, coder->d_component, comp_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count ); gpujpeg_cuda_check_error("Huffman encoding failed", return -1); } // Run codeword serialization kernel const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK); const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks); gpujpeg_huffman_encoder_serialization_kernel<<<num_serialization_tblocks, SERIALIZATION_THREADS_PER_TBLOCK, 0, encoder->stream>>>( coder->d_segment, coder->segment_count, coder->d_data_compressed, coder->d_temp_huffman ); gpujpeg_cuda_check_error("Codeword serialization failed", return -1); } // No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) { gpujpeg_huffman_encoder_allocation_kernel<<<1, 512, 0, encoder->stream>>>(coder->d_segment, coder->segment_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count); gpujpeg_cuda_check_error("Huffman encoder output allocation failed", return -1); } // Run output compaction kernel (one warp per segment) const dim3 compaction_thread(32, WARPS_NUM); const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM)); gpujpeg_huffman_encoder_compaction_kernel<<<compaction_grid, compaction_thread, 0, encoder->stream>>>( coder->d_segment, coder->segment_count, coder->d_temp_huffman, coder->d_data_compressed, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count ); gpujpeg_cuda_check_error("Huffman output compaction failed", return -1); // Read and return number of occupied bytes cudaMemcpyAsync(output_byte_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int), cudaMemcpyDeviceToHost, encoder->stream); gpujpeg_cuda_check_error("Huffman output size getting failed", return -1); // indicate success return 0; }
the_stack
// #if __CUDA_ARCH__ == 500 #define u64type uint2 #define vectype uint28 #define memshift 3 #else #define u64type uint2 #define vectype uint28 #define memshift 4 #endif __device__ vectype *DMatrix; static __device__ __forceinline__ void Gfunc_v35(uint2 & a, uint2 &b, uint2 &c, uint2 &d) { a += b; d = eorswap32(a, d); c += d; b ^= c; b = ROR24(b); a += b; d ^= a; d = ROR16(d); c += d; b ^= c; b = ROR2(b, 63); } static __device__ __forceinline__ void round_lyra_v35(vectype* s) { Gfunc_v35(s[0].x, s[1].x, s[2].x, s[3].x); Gfunc_v35(s[0].y, s[1].y, s[2].y, s[3].y); Gfunc_v35(s[0].z, s[1].z, s[2].z, s[3].z); Gfunc_v35(s[0].w, s[1].w, s[2].w, s[3].w); Gfunc_v35(s[0].x, s[1].y, s[2].z, s[3].w); Gfunc_v35(s[0].y, s[1].z, s[2].w, s[3].x); Gfunc_v35(s[0].z, s[1].w, s[2].x, s[3].y); Gfunc_v35(s[0].w, s[1].x, s[2].y, s[3].z); } static __device__ __forceinline__ void reduceDuplex(vectype state[4], uint32_t thread) { vectype state1[3]; uint32_t ps1 = (256 * thread); uint32_t ps2 = (memshift * 7 + memshift * 8 + 256 * thread); #pragma unroll 4 for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + i*memshift; uint32_t s2 = ps2 - i*memshift; for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix+s1)[j]); for (int j = 0; j < 3; j++) state[j] ^= state1[j]; round_lyra_v35(state); for (int j = 0; j < 3; j++) state1[j] ^= state[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state1[j]; } } static __device__ __forceinline__ void reduceDuplexV3(vectype state[4], uint32_t thread) { vectype state1[3]; uint32_t ps1 = (256 * thread); // colomn row uint32_t ps2 = (memshift * 7 * 8 + memshift * 1 + 64 * memshift * thread); #pragma unroll 4 for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + 8 * i *memshift; uint32_t s2 = ps2 - 8 * i *memshift; for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix + s1)[j]); for (int j = 0; j < 3; j++) state[j] ^= state1[j]; round_lyra_v35(state); for (int j = 0; j < 3; j++) state1[j] ^= state[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state1[j]; } } static __device__ __forceinline__ void reduceDuplexRowSetupV2(const int rowIn, const int rowInOut, const int rowOut, vectype state[4], uint32_t thread) { vectype state2[3],state1[3]; uint32_t ps1 = ( memshift * 8 * rowIn + 256 * thread); uint32_t ps2 = ( memshift * 8 * rowInOut + 256 * thread); uint32_t ps3 = (memshift*7 + memshift * 8 * rowOut + 256 * thread); #pragma unroll 1 for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + i*memshift; uint32_t s2 = ps2 + i*memshift; uint32_t s3 = ps3 - i*memshift; for (int j = 0; j < 3; j++) state1[j]= __ldg4(&(DMatrix + s1)[j]); for (int j = 0; j < 3; j++) state2[j]= __ldg4(&(DMatrix + s2)[j]); for (int j = 0; j < 3; j++) { vectype tmp = state1[j] + state2[j]; state[j] ^= tmp; } round_lyra_v35(state); for (int j = 0; j < 3; j++) { state1[j] ^= state[j]; (DMatrix + s3)[j] = state1[j]; } ((uint2*)state2)[0] ^= ((uint2*)state)[11]; for (int j = 0; j < 11; j++) ((uint2*)state2)[j+1] ^= ((uint2*)state)[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state2[j]; } } static __device__ __forceinline__ void reduceDuplexRowSetupV3(const int rowIn, const int rowInOut, const int rowOut, vectype state[4], uint32_t thread) { vectype state2[3], state1[3]; uint32_t ps1 = ( memshift * rowIn + 64 * memshift * thread); uint32_t ps2 = (memshift * rowInOut + 64 * memshift* thread); uint32_t ps3 = (8 * memshift * 7 + memshift * rowOut + 64 * memshift * thread); /* uint32_t ps1 = (256 * thread); uint32_t ps2 = (256 * thread); uint32_t ps3 = (256 * thread); */ #pragma nounroll for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + 8*i*memshift; uint32_t s2 = ps2 + 8*i*memshift; uint32_t s3 = ps3 - 8*i*memshift; for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix + s1 )[j]); for (int j = 0; j < 3; j++) state2[j] = __ldg4(&(DMatrix + s2 )[j]); for (int j = 0; j < 3; j++) { vectype tmp = state1[j] + state2[j]; state[j] ^= tmp; } round_lyra_v35(state); for (int j = 0; j < 3; j++) { state1[j] ^= state[j]; (DMatrix + s3)[j] = state1[j]; } ((uint2*)state2)[0] ^= ((uint2*)state)[11]; for (int j = 0; j < 11; j++) ((uint2*)state2)[j + 1] ^= ((uint2*)state)[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state2[j]; } } static __device__ __forceinline__ void reduceDuplexRowtV2(const int rowIn, const int rowInOut, const int rowOut, vectype* state, uint32_t thread) { vectype state1[3],state2[3]; uint32_t ps1 = (memshift * 8 * rowIn + 256 * thread); uint32_t ps2 = (memshift * 8 * rowInOut + 256 * thread); uint32_t ps3 = (memshift * 8 * rowOut + 256 * thread); #pragma unroll 1 for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + i*memshift; uint32_t s2 = ps2 + i*memshift; uint32_t s3 = ps3 + i*memshift; for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix + s1)[j]); for (int j = 0; j < 3; j++) state2[j] = __ldg4(&(DMatrix + s2)[j]); for (int j = 0; j < 3; j++) state1[j] += state2[j]; for (int j = 0; j < 3; j++) state[j] ^= state1[j]; round_lyra_v35(state); ((uint2*)state2)[0] ^= ((uint2*)state)[11]; for (int j = 0; j < 11; j++) ((uint2*)state2)[j + 1] ^= ((uint2*)state)[j]; if (rowInOut != rowOut) { for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state2[j]; for (int j = 0; j < 3; j++) (DMatrix + s3)[j] ^= state[j]; } else { for (int j = 0; j < 3; j++) state2[j] ^= state[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j]=state2[j]; } } } static __device__ __forceinline__ void reduceDuplexRowtV3(const int rowIn, const int rowInOut, const int rowOut, vectype* state, uint32_t thread) { vectype state1[3], state2[3]; uint32_t ps1 = (memshift * rowIn + 64 * memshift * thread); uint32_t ps2 = (memshift * rowInOut + 64 * memshift * thread); uint32_t ps3 = (memshift * rowOut + 64 *memshift * thread); #pragma nounroll for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 + 8 * i*memshift; uint32_t s2 = ps2 + 8 * i*memshift; uint32_t s3 = ps3 + 8 * i*memshift; for (int j = 0; j < 3; j++) state1[j] = __ldg4(&(DMatrix + s1)[j]); for (int j = 0; j < 3; j++) state2[j] = __ldg4(&(DMatrix + s2)[j]); for (int j = 0; j < 3; j++) state1[j] += state2[j]; for (int j = 0; j < 3; j++) state[j] ^= state1[j]; round_lyra_v35(state); ((uint2*)state2)[0] ^= ((uint2*)state)[11]; for (int j = 0; j < 11; j++) ((uint2*)state2)[j + 1] ^= ((uint2*)state)[j]; if (rowInOut != rowOut) { for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state2[j]; for (int j = 0; j < 3; j++) (DMatrix + s3)[j] ^= state[j]; } else { for (int j = 0; j < 3; j++) state2[j] ^= state[j]; for (int j = 0; j < 3; j++) (DMatrix + s2)[j] = state2[j]; } } } #if __CUDA_ARCH__ < 500 __global__ __launch_bounds__(48, 1) #elif __CUDA_ARCH__ == 500 __global__ __launch_bounds__(16, 1) #else __global__ __launch_bounds__(TPB, 1) #endif void lyra2_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *outputHash) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); vectype state[4]; #if __CUDA_ARCH__ > 350 const uint28 blake2b_IV[2] = { {{ 0xf3bcc908, 0x6a09e667 }, { 0x84caa73b, 0xbb67ae85 }, { 0xfe94f82b, 0x3c6ef372 }, { 0x5f1d36f1, 0xa54ff53a }}, {{ 0xade682d1, 0x510e527f }, { 0x2b3e6c1f, 0x9b05688c }, { 0xfb41bd6b, 0x1f83d9ab }, { 0x137e2179, 0x5be0cd19 }}}; #else const ulonglong4 blake2b_IV[2] = { { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 }, { 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 } }; #endif #if __CUDA_ARCH__ == 350 if (thread < threads) #endif { ((uint2*)state)[0] = __ldg(&outputHash[thread]); ((uint2*)state)[1] = __ldg(&outputHash[thread + threads]); ((uint2*)state)[2] = __ldg(&outputHash[thread + 2 * threads]); ((uint2*)state)[3] = __ldg(&outputHash[thread + 3 * threads]); // state[0] = __ldg4(&((vectype*)outputHash)[thread]); state[1] = state[0]; state[2] = ((vectype*)blake2b_IV)[0]; state[3] = ((vectype*)blake2b_IV)[1]; for (int i = 0; i<24; i++) { round_lyra_v35(state); } //because 12 is not enough uint32_t ps1 = (memshift * 7 + 256 * thread); for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 - memshift * i; for (int j = 0; j < 3; j++) (DMatrix + s1)[j] = (state)[j]; round_lyra_v35(state); } reduceDuplex(state, thread); reduceDuplexRowSetupV2(1, 0, 2, state, thread); reduceDuplexRowSetupV2(2, 1, 3, state, thread); reduceDuplexRowSetupV2(3, 0, 4, state, thread); reduceDuplexRowSetupV2(4, 3, 5, state, thread); reduceDuplexRowSetupV2(5, 2, 6, state, thread); reduceDuplexRowSetupV2(6, 1, 7, state, thread); uint32_t rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(7, rowa, 0, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(0, rowa, 3, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(3, rowa, 6, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(6, rowa, 1, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(1, rowa, 4, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(4, rowa, 7, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(7, rowa, 2, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV2(2, rowa, 5, state, thread); uint32_t shift = (memshift * 8 * rowa + 256 * thread); for (int j = 0; j < 3; j++) state[j] ^= __ldg4(&(DMatrix + shift)[j]); for (int i = 0; i < 12; i++) round_lyra_v35(state); outputHash[thread]= ((uint2*)state)[0]; outputHash[thread + threads] = ((uint2*)state)[1]; outputHash[thread + 2 * threads] = ((uint2*)state)[2]; outputHash[thread + 3 * threads] = ((uint2*)state)[3]; // ((vectype*)outputHash)[thread] = state[0]; } //thread } #if __CUDA_ARCH__ < 500 __global__ __launch_bounds__(48, 1) #elif __CUDA_ARCH__ == 500 __global__ __launch_bounds__(16, 1) #else __global__ __launch_bounds__(TPB, 1) #endif void lyra2_gpu_hash_32_v3(uint32_t threads, uint32_t startNounce, uint2 *outputHash) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); vectype state[4]; #if __CUDA_ARCH__ > 350 const uint28 blake2b_IV[2] = { { { 0xf3bcc908, 0x6a09e667 }, { 0x84caa73b, 0xbb67ae85 }, { 0xfe94f82b, 0x3c6ef372 }, { 0x5f1d36f1, 0xa54ff53a } }, { { 0xade682d1, 0x510e527f }, { 0x2b3e6c1f, 0x9b05688c }, { 0xfb41bd6b, 0x1f83d9ab }, { 0x137e2179, 0x5be0cd19 } } }; #else const ulonglong4 blake2b_IV[2] = { { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 }, { 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 } }; #endif #if __CUDA_ARCH__ == 350 if (thread < threads) #endif { ((uint2*)state)[0] = __ldg(&outputHash[thread]); ((uint2*)state)[1] = __ldg(&outputHash[thread + threads]); ((uint2*)state)[2] = __ldg(&outputHash[thread + 2 * threads]); ((uint2*)state)[3] = __ldg(&outputHash[thread + 3 * threads]); state[1] = state[0]; state[2] = ((vectype*)blake2b_IV)[0]; state[3] = ((vectype*)blake2b_IV)[1]; for (int i = 0; i<24; i++) round_lyra_v35(state); //because 12 is not enough uint32_t ps1 = (8 * memshift * 7 + 64 * memshift * thread); for (int i = 0; i < 8; i++) { uint32_t s1 = ps1 - 8 * memshift * i; for (int j = 0; j < 3; j++) (DMatrix + s1)[j] = (state)[j]; round_lyra_v35(state); } reduceDuplexV3(state, thread); reduceDuplexRowSetupV3(1, 0, 2, state, thread); reduceDuplexRowSetupV3(2, 1, 3, state, thread); reduceDuplexRowSetupV3(3, 0, 4, state, thread); reduceDuplexRowSetupV3(4, 3, 5, state, thread); reduceDuplexRowSetupV3(5, 2, 6, state, thread); reduceDuplexRowSetupV3(6, 1, 7, state, thread); uint32_t rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(7, rowa, 0, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(0, rowa, 3, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(3, rowa, 6, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(6, rowa, 1, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(1, rowa, 4, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(4, rowa, 7, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(7, rowa, 2, state, thread); rowa = ((uint2*)state)[0].x & 7; reduceDuplexRowtV3(2, rowa, 5, state, thread); uint32_t shift = (memshift * rowa + 64 * memshift * thread); for (int j = 0; j < 3; j++) state[j] ^= __ldg4(&(DMatrix + shift)[j]); for (int i = 0; i < 12; i++) round_lyra_v35(state); outputHash[thread] = ((uint2*)state)[0]; outputHash[thread + threads] = ((uint2*)state)[1]; outputHash[thread + 2 * threads] = ((uint2*)state)[2]; outputHash[thread + 3 * threads] = ((uint2*)state)[3]; } //thread } __host__ void lyra2_cpu_init(int thr_id, uint32_t threads,uint64_t *hash) { cudaMemcpyToSymbol(DMatrix, &hash, sizeof(hash), 0, cudaMemcpyHostToDevice); } __host__ void lyra2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *d_outputHash) { uint32_t tpb; if (device_sm[device_map[thr_id]]==500) tpb = 16; else tpb = TPB; dim3 grid((threads + tpb - 1) / tpb); dim3 block(tpb); if (device_sm[device_map[thr_id]] == 500) lyra2_gpu_hash_32 << <grid, block >> > (threads, startNounce, (uint2*)d_outputHash); else lyra2_gpu_hash_32_v3 <<<grid, block>>> (threads, startNounce,(uint2*) d_outputHash); }
the_stack
#include <thrust/random.h> #include <thrust/device_vector.h> #include <cooperative_groups.h> namespace cg = cooperative_groups; #include <helper_cuda.h> #include <helper_string.h> #include "cdpQuicksort.h" //////////////////////////////////////////////////////////////////////////////// // Inline PTX call to return index of highest non-zero bit in a word //////////////////////////////////////////////////////////////////////////////// static __device__ __forceinline__ unsigned int __qsflo(unsigned int word) { unsigned int ret; asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word)); return ret; } //////////////////////////////////////////////////////////////////////////////// // // ringbufAlloc // // Allocates from a ringbuffer. Allows for not failing when we run out // of stack for tracking the offset counts for each sort subsection. // // We use the atomicMax trick to allow out-of-order retirement. If we // hit the size limit on the ringbuffer, then we spin-wait for people // to complete. // //////////////////////////////////////////////////////////////////////////////// template <typename T> static __device__ T *ringbufAlloc(qsortRingbuf *ringbuf) { // Wait for there to be space in the ring buffer. We'll retry only a fixed // number of times and then fail, to avoid an out-of-memory deadlock. unsigned int loop = 10000; while (((ringbuf->head - ringbuf->tail) >= ringbuf->stacksize) && (loop-- > 0)) ; if (loop == 0) return NULL; // Note that the element includes a little index book-keeping, for freeing // later. unsigned int index = atomicAdd((unsigned int *)&ringbuf->head, 1); T *ret = (T *)(ringbuf->stackbase) + (index & (ringbuf->stacksize - 1)); ret->index = index; return ret; } //////////////////////////////////////////////////////////////////////////////// // // ringBufFree // // Releases an element from the ring buffer. If every element is released // up to and including this one, we can advance the tail to indicate that // space is now available. // //////////////////////////////////////////////////////////////////////////////// template <typename T> static __device__ void ringbufFree(qsortRingbuf *ringbuf, T *data) { unsigned int index = data->index; // Non-wrapped index to free unsigned int count = atomicAdd((unsigned int *)&(ringbuf->count), 1) + 1; unsigned int max = atomicMax((unsigned int *)&(ringbuf->max), index + 1); // Update the tail if need be. Note we update "max" to be the new value in // ringbuf->max if (max < (index + 1)) max = index + 1; if (max == count) atomicMax((unsigned int *)&(ringbuf->tail), count); } //////////////////////////////////////////////////////////////////////////////// // // qsort_warp // // Simplest possible implementation, does a per-warp quicksort with no // inter-warp // communication. This has a high atomic issue rate, but the rest should // actually // be fairly quick because of low work per thread. // // A warp finds its section of the data, then writes all data <pivot to one // buffer and all data >pivot to the other. Atomics are used to get a unique // section of the buffer. // // Obvious optimisation: do multiple chunks per warp, to increase in-flight // loads // and cover the instruction overhead. // //////////////////////////////////////////////////////////////////////////////// __global__ void qsort_warp(unsigned *indata, unsigned *outdata, unsigned int offset, unsigned int len, qsortAtomicData *atomicData, qsortRingbuf *atomicDataStack, unsigned int source_is_indata, unsigned int depth) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // Find my data offset, based on warp ID unsigned int thread_id = threadIdx.x + (blockIdx.x << QSORT_BLOCKSIZE_SHIFT); // unsigned int warp_id = threadIdx.x >> 5; // Used for debug only unsigned int lane_id = threadIdx.x & (warpSize - 1); // Exit if I'm outside the range of sort to be done if (thread_id >= len) return; // // First part of the algorithm. Each warp counts the number of elements that // are // greater/less than the pivot. // // When a warp knows its count, it updates an atomic counter. // // Read in the data and the pivot. Arbitrary pivot selection for now. unsigned pivot = indata[offset + len / 2]; unsigned data = indata[offset + thread_id]; // Count how many are <= and how many are > pivot. // If all are <= pivot then we adjust the comparison // because otherwise the sort will move nothing and // we'll iterate forever. cg::coalesced_group active = cg::coalesced_threads(); unsigned int greater = (data > pivot); unsigned int gt_mask = active.ballot(greater); if (gt_mask == 0) { greater = (data >= pivot); gt_mask = active.ballot(greater); // Must re-ballot for adjusted comparator } unsigned int lt_mask = active.ballot(!greater); unsigned int gt_count = __popc(gt_mask); unsigned int lt_count = __popc(lt_mask); // Atomically adjust the lt_ and gt_offsets by this amount. Only one thread // need do this. Share the result using shfl unsigned int lt_offset, gt_offset; if (lane_id == 0) { if (lt_count > 0) lt_offset = atomicAdd((unsigned int *)&atomicData->lt_offset, lt_count); if (gt_count > 0) gt_offset = len - (atomicAdd((unsigned int *)&atomicData->gt_offset, gt_count) + gt_count); } lt_offset = active.shfl((int)lt_offset, 0); // Everyone pulls the offsets from lane 0 gt_offset = active.shfl((int)gt_offset, 0); // Now compute my own personal offset within this. I need to know how many // threads with a lane ID less than mine are going to write to the same buffer // as me. We can use popc to implement a single-operation warp scan in this // case. unsigned lane_mask_lt; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt)); unsigned int my_mask = greater ? gt_mask : lt_mask; unsigned int my_offset = __popc(my_mask & lane_mask_lt); // Move data. my_offset += greater ? gt_offset : lt_offset; outdata[offset + my_offset] = data; // Count up if we're the last warp in. If so, then Kepler will launch the next // set of sorts directly from here. if (lane_id == 0) { // Count "elements written". If I wrote the last one, then trigger the next // qsorts unsigned int mycount = lt_count + gt_count; if (atomicAdd((unsigned int *)&atomicData->sorted_count, mycount) + mycount == len) { // We're the last warp to do any sorting. Therefore it's up to us to // launch the next stage. unsigned int lt_len = atomicData->lt_offset; unsigned int gt_len = atomicData->gt_offset; cudaStream_t lstream, rstream; cudaStreamCreateWithFlags(&lstream, cudaStreamNonBlocking); cudaStreamCreateWithFlags(&rstream, cudaStreamNonBlocking); // Begin by freeing our atomicData storage. It's better for the ringbuffer // algorithm // if we free when we're done, rather than re-using (makes for less // fragmentation). ringbufFree<qsortAtomicData>(atomicDataStack, atomicData); // Exceptional case: if "lt_len" is zero, then all values in the batch // are equal. We are then done (may need to copy into correct buffer, // though) if (lt_len == 0) { if (source_is_indata) cudaMemcpyAsync(indata + offset, outdata + offset, gt_len * sizeof(unsigned), cudaMemcpyDeviceToDevice, lstream); return; } // Start with lower half first if (lt_len > BITONICSORT_LEN) { // If we've exceeded maximum depth, fall through to backup // big_bitonicsort if (depth >= QSORT_MAXDEPTH) { // The final bitonic stage sorts in-place in "outdata". We therefore // re-use "indata" as the out-of-range tracking buffer. For (2^n)+1 // elements we need (2^(n+1)) bytes of oor buffer. The backup qsort // buffer is at least this large when sizeof(QTYPE) >= 2. big_bitonicsort<<<1, BITONICSORT_LEN, 0, lstream>>>( outdata, source_is_indata ? indata : outdata, indata, offset, lt_len); } else { // Launch another quicksort. We need to allocate more storage for the // atomic data. if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL) printf("Stack-allocation error. Failing left child launch.\n"); else { atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0; unsigned int numblocks = (unsigned int)(lt_len + (QSORT_BLOCKSIZE - 1)) >> QSORT_BLOCKSIZE_SHIFT; qsort_warp<<<numblocks, QSORT_BLOCKSIZE, 0, lstream>>>( outdata, indata, offset, lt_len, atomicData, atomicDataStack, !source_is_indata, depth + 1); } } } else if (lt_len > 1) { // Final stage uses a bitonic sort instead. It's important to // make sure the final stage ends up in the correct (original) buffer. // We launch the smallest power-of-2 number of threads that we can. unsigned int bitonic_len = 1 << (__qsflo(lt_len - 1U) + 1); bitonicsort<<<1, bitonic_len, 0, lstream>>>( outdata, source_is_indata ? indata : outdata, offset, lt_len); } // Finally, if we sorted just one single element, we must still make // sure that it winds up in the correct place. else if (source_is_indata && (lt_len == 1)) indata[offset] = outdata[offset]; if (cudaPeekAtLastError() != cudaSuccess) printf("Left-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError())); // Now the upper half. if (gt_len > BITONICSORT_LEN) { // If we've exceeded maximum depth, fall through to backup // big_bitonicsort if (depth >= QSORT_MAXDEPTH) big_bitonicsort<<<1, BITONICSORT_LEN, 0, rstream>>>( outdata, source_is_indata ? indata : outdata, indata, offset + lt_len, gt_len); else { // Allocate new atomic storage for this launch if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL) printf("Stack allocation error! Failing right-side launch.\n"); else { atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0; unsigned int numblocks = (unsigned int)(gt_len + (QSORT_BLOCKSIZE - 1)) >> QSORT_BLOCKSIZE_SHIFT; qsort_warp<<<numblocks, QSORT_BLOCKSIZE, 0, rstream>>>( outdata, indata, offset + lt_len, gt_len, atomicData, atomicDataStack, !source_is_indata, depth + 1); } } } else if (gt_len > 1) { unsigned int bitonic_len = 1 << (__qsflo(gt_len - 1U) + 1); bitonicsort<<<1, bitonic_len, 0, rstream>>>( outdata, source_is_indata ? indata : outdata, offset + lt_len, gt_len); } else if (source_is_indata && (gt_len == 1)) indata[offset + lt_len] = outdata[offset + lt_len]; if (cudaPeekAtLastError() != cudaSuccess) printf("Right-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError())); } } } //////////////////////////////////////////////////////////////////////////////// // // run_quicksort // // Host-side code to run the Kepler version of quicksort. It's pretty // simple, because all launch control is handled on the device via CDP. // // All parallel quicksorts require an equal-sized scratch buffer. This // must be passed in ahead of time. // // Returns the time elapsed for the sort. // //////////////////////////////////////////////////////////////////////////////// float run_quicksort_cdp(unsigned *gpudata, unsigned *scratchdata, unsigned int count, cudaStream_t stream) { unsigned int stacksize = QSORT_STACK_ELEMS; // This is the stack, for atomic tracking of each sort's status qsortAtomicData *gpustack; checkCudaErrors( cudaMalloc((void **)&gpustack, stacksize * sizeof(qsortAtomicData))); checkCudaErrors(cudaMemset( gpustack, 0, sizeof(qsortAtomicData))); // Only need set first entry to 0 // Create the memory ringbuffer used for handling the stack. // Initialise everything to where it needs to be. qsortRingbuf buf; qsortRingbuf *ringbuf; checkCudaErrors(cudaMalloc((void **)&ringbuf, sizeof(qsortRingbuf))); buf.head = 1; // We start with one allocation buf.tail = 0; buf.count = 0; buf.max = 0; buf.stacksize = stacksize; buf.stackbase = gpustack; checkCudaErrors( cudaMemcpy(ringbuf, &buf, sizeof(buf), cudaMemcpyHostToDevice)); // Timing events... cudaEvent_t ev1, ev2; checkCudaErrors(cudaEventCreate(&ev1)); checkCudaErrors(cudaEventCreate(&ev2)); checkCudaErrors(cudaEventRecord(ev1)); // Now we trivially launch the qsort kernel if (count > BITONICSORT_LEN) { unsigned int numblocks = (unsigned int)(count + (QSORT_BLOCKSIZE - 1)) >> QSORT_BLOCKSIZE_SHIFT; qsort_warp<<<numblocks, QSORT_BLOCKSIZE, 0, stream>>>( gpudata, scratchdata, 0U, count, gpustack, ringbuf, true, 0); } else { bitonicsort<<<1, BITONICSORT_LEN>>>(gpudata, gpudata, 0, count); } checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaEventRecord(ev2)); checkCudaErrors(cudaDeviceSynchronize()); float elapse = 0.0f; if (cudaPeekAtLastError() != cudaSuccess) printf("Launch failure: %s\n", cudaGetErrorString(cudaGetLastError())); else checkCudaErrors(cudaEventElapsedTime(&elapse, ev1, ev2)); // Sanity check that the stack allocator is doing the right thing checkCudaErrors( cudaMemcpy(&buf, ringbuf, sizeof(*ringbuf), cudaMemcpyDeviceToHost)); if (count > BITONICSORT_LEN && buf.head != buf.tail) { printf("Stack allocation error!\nRingbuf:\n"); printf("\t head = %u\n", buf.head); printf("\t tail = %u\n", buf.tail); printf("\tcount = %u\n", buf.count); printf("\t max = %u\n", buf.max); } // Release our stack data once we're done checkCudaErrors(cudaFree(ringbuf)); checkCudaErrors(cudaFree(gpustack)); return elapse; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// int run_qsort(unsigned int size, int seed, int debug, int loop, int verbose) { if (seed > 0) srand(seed); // Create and set up our test unsigned *gpudata, *scratchdata; checkCudaErrors(cudaMalloc((void **)&gpudata, size * sizeof(unsigned))); checkCudaErrors(cudaMalloc((void **)&scratchdata, size * sizeof(unsigned))); // Create CPU data. unsigned *data = new unsigned[size]; unsigned int min = loop ? loop : size; unsigned int max = size; loop = (loop == 0) ? 1 : loop; for (size = min; size <= max; size += loop) { if (verbose) printf(" Input: "); for (unsigned int i = 0; i < size; i++) { // Build data 8 bits at a time data[i] = 0; char *ptr = (char *)&(data[i]); for (unsigned j = 0; j < sizeof(unsigned); j++) { // Easy-to-read data in debug mode if (debug) { *ptr++ = (char)(rand() % 10); break; } *ptr++ = (char)(rand() & 255); } if (verbose) { if (i && !(i % 32)) printf("\n "); printf("%u ", data[i]); } } if (verbose) printf("\n"); checkCudaErrors(cudaMemcpy(gpudata, data, size * sizeof(unsigned), cudaMemcpyHostToDevice)); // So we're now populated and ready to go! We size our launch as // blocks of up to BLOCKSIZE threads, and appropriate grid size. // One thread is launched per element. float elapse; elapse = run_quicksort_cdp(gpudata, scratchdata, size, NULL); // run_bitonicsort<SORTTYPE>(gpudata, scratchdata, size, verbose); checkCudaErrors(cudaDeviceSynchronize()); // Copy back the data and verify correct sort checkCudaErrors(cudaMemcpy(data, gpudata, size * sizeof(unsigned), cudaMemcpyDeviceToHost)); if (verbose) { printf("Output: "); for (unsigned int i = 0; i < size; i++) { if (i && !(i % 32)) printf("\n "); printf("%u ", data[i]); } printf("\n"); } unsigned int check; for (check = 1; check < size; check++) { if (data[check] < data[check - 1]) { printf("FAILED at element: %d\n", check); break; } } if (check != size) { printf(" cdpAdvancedQuicksort FAILED\n"); exit(EXIT_FAILURE); } else printf(" cdpAdvancedQuicksort PASSED\n"); // Display the time between event recordings printf("Sorted %u elems in %.3f ms (%.3f Melems/sec)\n", size, elapse, (float)size / (elapse * 1000.0f)); fflush(stdout); } // Release everything and we're done checkCudaErrors(cudaFree(scratchdata)); checkCudaErrors(cudaFree(gpudata)); delete (data); return 0; } static void usage() { printf( "Syntax: cdpAdvancedQuicksort [-size=<num>] [-seed=<num>] [-debug] " "[-loop-step=<num>] [-verbose]\n"); printf( "If loop_step is non-zero, will run from 1->array_len in steps of " "loop_step\n"); } // Host side entry int main(int argc, char *argv[]) { int size = 1000000; unsigned int seed = 0; int debug = 0; int loop = 0; int verbose = 0; if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "h")) { usage(); printf("&&&& cdpAdvancedQuicksort WAIVED\n"); exit(EXIT_WAIVED); } if (checkCmdLineFlag(argc, (const char **)argv, "size")) { size = getCmdLineArgumentInt(argc, (const char **)argv, "size"); } if (checkCmdLineFlag(argc, (const char **)argv, "seed")) { seed = getCmdLineArgumentInt(argc, (const char **)argv, "seed"); } if (checkCmdLineFlag(argc, (const char **)argv, "loop-step")) { loop = getCmdLineArgumentInt(argc, (const char **)argv, "loop-step"); } if (checkCmdLineFlag(argc, (const char **)argv, "debug")) { debug = 1; } if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) { verbose = 1; } // Get device properties int cuda_device = findCudaDevice(argc, (const char **)argv); cudaDeviceProp properties; checkCudaErrors(cudaGetDeviceProperties(&properties, cuda_device)); int cdpCapable = (properties.major == 3 && properties.minor >= 5) || properties.major >= 4; printf("GPU device %s has compute capabilities (SM %d.%d)\n", properties.name, properties.major, properties.minor); if (!cdpCapable) { printf( "cdpAdvancedQuicksort requires SM 3.5 or higher to use CUDA Dynamic " "Parallelism. Exiting...\n"); exit(EXIT_WAIVED); } printf("Running qsort on %d elements with seed %d, on %s\n", size, seed, properties.name); run_qsort(size, seed, debug, loop, verbose); exit(EXIT_SUCCESS); }
the_stack
#if !defined(ENABLE_GPU) || !defined(ENABLE_CUDNN) #error "bilinearsampler_cudnn.cu can only be compiled with GPU and CUDNN (v5 or higher) support." #endif #include "nnbilinearsampler_cudnn.hpp" #include "cudnnhelper.hpp" #include "../datacu.hpp" #include <assert.h> #include <algorithm> #if CUDNN_VERSION < 5000 #warning "bilinearsampler_cudnn.cu will be disabled as it requires CUDNN v5 or higher." namespace vl { namespace impl { template<vl::Type dataType> vl::Error vl::impl::nnbilinearsampler_cudnn<dataType>::forward(Context& context, Tensor output, Tensor data, Tensor grid) { return vl::vlErrorUnsupported ; } template<vl::Type dataType> vl::Error vl::impl::nnbilinearsampler_cudnn<dataType>::backward(Context& context, Tensor derData, Tensor derGrid, Tensor data, Tensor grid, Tensor derOutput) { return vl::vlErrorUnsupported ; } }} #else using namespace vl ; // check if the descriptors, etc. were successfully created: #define CHECK(x) \ { \ cudnnError = x ; \ if (cudnnError != CUDNN_STATUS_SUCCESS) { \ error = context.setError(context.getCudaHelper().catchCudnnError(cudnnError, \ STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \ goto done ; \ } } /* ---------------------------------------------------------------- */ /* bilinearsampler_forward_cudnn */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template<vl::Type dataType> vl::Error vl::impl::nnbilinearsampler_cudnn<dataType>::forward(Context& context, Tensor output, Tensor data, Tensor grid) { assert(output) ; assert(data) ; assert(grid) ; typedef typename DataTypeTraits<dataType>::type type ; cudnnTensorDescriptor_t outputDesc, dataDesc ; cudnnSpatialTransformerDescriptor_t samplerDesc ; bool outputDescInitialized = false ; bool dataDescInitialized = false ; bool samplerDescInitialized = false ; // get the sizes: int inCardinality = data.getSize(); int inDepth = data.getDepth(); int inHeight = data.getHeight(); int inWidth = data.getWidth(); int outCardinality = output.getSize(); int outDepth = output.getDepth(); int outWidth = output.getWidth(); int outHeight = output.getHeight(); cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::Type dynDataType = output.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::Error error = vl::vlSuccess ; cudnnHandle_t handle ; // get number of transforms/image == groupSize: int groupSize = outCardinality / inCardinality ; int dimOut[4] = { 1, outDepth, outWidth, outHeight } ; // one-image // Get CuDNN CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descriptors: CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ; outputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(outputDesc, cudnnDataType, 1, outDepth, outWidth, outHeight, // sizes: n,c,w,h outHeight * outWidth * outDepth, //strides outHeight * outWidth, outHeight, 1)) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, cudnnDataType, 1, inDepth, inWidth, inHeight, // sizes: n,c,w,h inHeight * inWidth * inDepth, //strides inHeight * inWidth, inHeight, 1)) ; // Get bilinear-sampler descriptor: CHECK(cudnnCreateSpatialTransformerDescriptor(&samplerDesc)) ; samplerDescInitialized = true ; CHECK(cudnnSetSpatialTransformerNdDescriptor(samplerDesc, CUDNN_SAMPLER_BILINEAR, cudnnDataType, 4, dimOut)) ; { type alpha = 1.0f ; type beta = 0.0f ; const ptrdiff_t dataOffset = inHeight * inWidth * inDepth ; const ptrdiff_t gridOffset = 2 * outWidth * outHeight ; const ptrdiff_t outOffset = outHeight * outWidth * outDepth ; type const* data_ptr = (type const*) data.getMemory() ; type const* grid_ptr = (type const*) grid.getMemory() ; type * out_ptr = (type *) output.getMemory() ; for (int im=0; im < inCardinality; im++) { for (int ig=0; ig < groupSize; ig++) { cudnnSpatialTfSamplerForward(handle, samplerDesc, &alpha, dataDesc, data_ptr, grid_ptr, &beta, outputDesc, out_ptr) ; grid_ptr += gridOffset ; out_ptr += outOffset ; } data_ptr += dataOffset ; } } done: if (samplerDescInitialized) { cudnnDestroySpatialTransformerDescriptor(samplerDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; } return context.passError(error, __func__) ; } /* ---------------------------------------------------------------- */ /* bilinearsampler_backward_cudnn */ /* ---------------------------------------------------------------- */ template<vl::Type dataType> vl::Error vl::impl::nnbilinearsampler_cudnn<dataType>::backward(Context& context, Tensor derData, Tensor derGrid, Tensor data, Tensor grid, Tensor derOutput) { typedef typename DataTypeTraits<dataType>::type type ; /* no derDataDesc needed as same as dataDesc <-- nice! */ cudnnTensorDescriptor_t dataDesc, derOutputDesc ; cudnnSpatialTransformerDescriptor_t samplerDesc ; bool dataDescInitialized = false ; bool derOutputDescInitialized = false ; bool samplerDescInitialized = false ; // get the sizes: int inCardinality = data.getSize(); int inDepth = data.getDepth(); int inHeight = data.getHeight(); int inWidth = data.getWidth(); int outCardinality = derOutput.getSize(); int outDepth = derOutput.getDepth(); int outWidth = derOutput.getWidth(); int outHeight = derOutput.getHeight(); cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::id ; vl::Type dynDataType = derOutput.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::Error error = vl::vlSuccess ; cudnnHandle_t handle ; // get number of transforms/image == groupSize: int groupSize = outCardinality / inCardinality; int dimOut[4] = { 1, outDepth, outWidth, outHeight }; // Get CuDNN CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descriptors: CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc, cudnnDataType, 1, outDepth, outWidth, outHeight, // sizes: n,c,w,h outHeight * outWidth * outDepth, //strides outHeight * outWidth, outHeight, 1)) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, cudnnDataType, 1, inDepth, inWidth, inHeight, // sizes: n,c,w,h inHeight * inWidth * inDepth, //strides inHeight * inWidth, inHeight, 1)) ; // Get bilinear-sampler descriptor: CHECK(cudnnCreateSpatialTransformerDescriptor(&samplerDesc)) ; samplerDescInitialized = true ; CHECK(cudnnSetSpatialTransformerNdDescriptor(samplerDesc, CUDNN_SAMPLER_BILINEAR, cudnnDataType, 4, dimOut)); /* do the work */ { type alpha = 1.0f ; type dataBeta = 1.0f ; // assuming that the derData has been initialized to zero type gridBeta = 0.0f ; const ptrdiff_t dataOffset = inHeight * inWidth * inDepth ; const ptrdiff_t gridOffset = 2 * outWidth * outHeight ; const ptrdiff_t outOffset = outHeight * outWidth * outDepth ; type const* data_ptr = (type const*) data.getMemory() ; type * derData_ptr = (type *) derData.getMemory() ; type const* grid_ptr = (type const*) grid.getMemory() ; type * derGrid_ptr = (type *) derGrid.getMemory() ; type * derOut_ptr = (type *) derOutput.getMemory() ; for (int im=0; im < inCardinality; im++) { for (int ig=0; ig < groupSize; ig++) { cudnnSpatialTfSamplerBackward(handle, samplerDesc, &alpha, dataDesc, data_ptr, &dataBeta, dataDesc, derData_ptr, &alpha, derOutputDesc, derOut_ptr, grid_ptr, &gridBeta, derGrid_ptr) ; grid_ptr += gridOffset ; derGrid_ptr += gridOffset ; derOut_ptr += outOffset ; } data_ptr += dataOffset ; derData_ptr += dataOffset ; } } /* cleanup */ done: if (samplerDescInitialized) { cudnnDestroySpatialTransformerDescriptor(samplerDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } return context.passError(error, __func__) ; } }} #endif // CUDNN >= v5.0 // Instantiations template struct vl::impl::nnbilinearsampler_cudnn<vl::vlTypeFloat> ; #ifdef ENABLE_DOUBLE template struct vl::impl::nnbilinearsampler_cudnn<vl::vlTypeDouble> ; #endif
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include "detail/arrow_allocator.hpp" namespace cudf { namespace detail { namespace { /** * @brief Create arrow data buffer from given cudf column */ template <typename T> std::shared_ptr<arrow::Buffer> fetch_data_buffer(column_view input_view, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { const int64_t data_size_in_bytes = sizeof(T) * input_view.size(); auto data_buffer = allocate_arrow_buffer(data_size_in_bytes, ar_mr); CUDA_TRY(cudaMemcpyAsync(data_buffer->mutable_data(), input_view.data<T>(), data_size_in_bytes, cudaMemcpyDeviceToHost, stream.value())); return std::move(data_buffer); } /** * @brief Create arrow buffer of mask from given cudf column */ std::shared_ptr<arrow::Buffer> fetch_mask_buffer(column_view input_view, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { const int64_t mask_size_in_bytes = cudf::bitmask_allocation_size_bytes(input_view.size()); if (input_view.has_nulls()) { auto mask_buffer = allocate_arrow_bitmap(static_cast<int64_t>(input_view.size()), ar_mr); CUDA_TRY(cudaMemcpyAsync( mask_buffer->mutable_data(), (input_view.offset() > 0) ? cudf::copy_bitmask(input_view).data() : input_view.null_mask(), mask_size_in_bytes, cudaMemcpyDeviceToHost, stream.value())); // Resets all padded bits to 0 mask_buffer->ZeroPadding(); return mask_buffer; } return nullptr; } /** * @brief Functor to convert cudf column to arrow array */ struct dispatch_to_arrow { /** * @brief Creates vector Arrays from given cudf column children */ std::vector<std::shared_ptr<arrow::Array>> fetch_child_array( column_view input_view, std::vector<column_metadata> const& metadata, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { std::vector<std::shared_ptr<arrow::Array>> child_arrays; std::transform( input_view.child_begin(), input_view.child_end(), metadata.begin(), std::back_inserter(child_arrays), [&ar_mr, &stream](auto const& child, auto const& meta) { return type_dispatcher( child.type(), dispatch_to_arrow{}, child, child.type().id(), meta, ar_mr, stream); }); return child_arrays; } template <typename T, CUDF_ENABLE_IF(not is_rep_layout_compatible<T>())> std::shared_ptr<arrow::Array> operator()( column_view, cudf::type_id, column_metadata const&, arrow::MemoryPool*, rmm::cuda_stream_view) { CUDF_FAIL("Unsupported type for to_arrow."); } template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())> std::shared_ptr<arrow::Array> operator()(column_view input_view, cudf::type_id id, column_metadata const&, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { return to_arrow_array(id, static_cast<int64_t>(input_view.size()), fetch_data_buffer<T>(input_view, ar_mr, stream), fetch_mask_buffer(input_view, ar_mr, stream), static_cast<int64_t>(input_view.null_count())); } }; template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<numeric::decimal64>( column_view input, cudf::type_id, column_metadata const&, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { using DeviceType = int64_t; size_type const BIT_WIDTH_RATIO = 2; // Array::Type:type::DECIMAL (128) / int64_t rmm::device_uvector<DeviceType> buf(input.size() * BIT_WIDTH_RATIO, stream); auto count = thrust::make_counting_iterator(0); thrust::for_each(count, count + input.size(), [in = input.begin<DeviceType>(), out = buf.data()] __device__(auto in_idx) { auto const out_idx = in_idx * 2; out[out_idx] = in[in_idx]; out[out_idx + 1] = in[in_idx] < 0 ? -1 : 0; }); auto const buf_size_in_bytes = buf.size() * sizeof(DeviceType); auto data_buffer = allocate_arrow_buffer(buf_size_in_bytes, ar_mr); CUDA_TRY(cudaMemcpyAsync(data_buffer->mutable_data(), buf.data(), buf_size_in_bytes, cudaMemcpyDeviceToHost, stream.value())); auto type = arrow::decimal(18, -input.type().scale()); auto mask = fetch_mask_buffer(input, ar_mr, stream); auto buffers = std::vector<std::shared_ptr<arrow::Buffer>>{mask, std::move(data_buffer)}; auto data = std::make_shared<arrow::ArrayData>(type, input.size(), buffers); return std::make_shared<arrow::Decimal128Array>(data); } template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<bool>(column_view input, cudf::type_id id, column_metadata const&, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { auto bitmask = bools_to_mask(input, stream); auto data_buffer = allocate_arrow_buffer(static_cast<int64_t>(bitmask.first->size()), ar_mr); CUDA_TRY(cudaMemcpyAsync(data_buffer->mutable_data(), bitmask.first->data(), bitmask.first->size(), cudaMemcpyDeviceToHost, stream.value())); return to_arrow_array(id, static_cast<int64_t>(input.size()), std::move(data_buffer), fetch_mask_buffer(input, ar_mr, stream), static_cast<int64_t>(input.null_count())); } template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<cudf::string_view>( column_view input, cudf::type_id, column_metadata const&, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { std::unique_ptr<column> tmp_column = ((input.offset() != 0) or ((input.num_children() == 2) and (input.child(0).size() - 1 != input.size()))) ? std::make_unique<cudf::column>(input) : nullptr; column_view input_view = (tmp_column != nullptr) ? tmp_column->view() : input; auto child_arrays = fetch_child_array(input_view, {{}, {}}, ar_mr, stream); if (child_arrays.empty()) { // Empty string will have only one value in offset of 4 bytes auto tmp_offset_buffer = allocate_arrow_buffer(4, ar_mr); auto tmp_data_buffer = allocate_arrow_buffer(0, ar_mr); tmp_offset_buffer->mutable_data()[0] = 0; return std::make_shared<arrow::StringArray>( 0, std::move(tmp_offset_buffer), std::move(tmp_data_buffer)); } auto offset_buffer = child_arrays[0]->data()->buffers[1]; auto data_buffer = child_arrays[1]->data()->buffers[1]; return std::make_shared<arrow::StringArray>(static_cast<int64_t>(input_view.size()), offset_buffer, data_buffer, fetch_mask_buffer(input_view, ar_mr, stream), static_cast<int64_t>(input_view.null_count())); } template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<cudf::struct_view>( column_view input, cudf::type_id, column_metadata const& metadata, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { CUDF_EXPECTS(metadata.children_meta.size() == static_cast<std::size_t>(input.num_children()), "Number of field names and number of children doesn't match\n"); std::unique_ptr<column> tmp_column = nullptr; if (input.offset() != 0) { tmp_column = std::make_unique<cudf::column>(input); } column_view input_view = (tmp_column != nullptr) ? tmp_column->view() : input; auto child_arrays = fetch_child_array(input_view, metadata.children_meta, ar_mr, stream); auto mask = fetch_mask_buffer(input_view, ar_mr, stream); std::vector<std::shared_ptr<arrow::Field>> fields; std::transform(child_arrays.cbegin(), child_arrays.cend(), metadata.children_meta.cbegin(), std::back_inserter(fields), [](auto const array, auto const meta) { return std::make_shared<arrow::Field>( meta.name, array->type(), array->null_count() > 0); }); auto dtype = std::make_shared<arrow::StructType>(fields); return std::make_shared<arrow::StructArray>(dtype, static_cast<int64_t>(input_view.size()), child_arrays, mask, static_cast<int64_t>(input_view.null_count())); } template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<cudf::list_view>( column_view input, cudf::type_id, column_metadata const& metadata, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { std::unique_ptr<column> tmp_column = nullptr; if ((input.offset() != 0) or ((input.num_children() == 2) and (input.child(0).size() - 1 != input.size()))) { tmp_column = std::make_unique<cudf::column>(input); } column_view input_view = (tmp_column != nullptr) ? tmp_column->view() : input; auto children_meta = metadata.children_meta.empty() ? std::vector<column_metadata>{{}, {}} : metadata.children_meta; auto child_arrays = fetch_child_array(input_view, children_meta, ar_mr, stream); if (child_arrays.empty()) { return std::make_shared<arrow::ListArray>(arrow::list(arrow::null()), 0, nullptr, nullptr); } auto offset_buffer = child_arrays[0]->data()->buffers[1]; auto data = child_arrays[1]; return std::make_shared<arrow::ListArray>(arrow::list(data->type()), static_cast<int64_t>(input_view.size()), offset_buffer, data, fetch_mask_buffer(input_view, ar_mr, stream), static_cast<int64_t>(input_view.null_count())); } template <> std::shared_ptr<arrow::Array> dispatch_to_arrow::operator()<cudf::dictionary32>( column_view input, cudf::type_id, column_metadata const& metadata, arrow::MemoryPool* ar_mr, rmm::cuda_stream_view stream) { // Arrow dictionary requires indices to be signed integer std::unique_ptr<column> dict_indices = cast(cudf::dictionary_column_view(input).get_indices_annotated(), cudf::data_type{type_id::INT32}, stream, rmm::mr::get_current_device_resource()); auto indices = dispatch_to_arrow{}.operator()<int32_t>( dict_indices->view(), dict_indices->type().id(), {}, ar_mr, stream); auto dict_keys = cudf::dictionary_column_view(input).keys(); auto dictionary = type_dispatcher(dict_keys.type(), dispatch_to_arrow{}, dict_keys, dict_keys.type().id(), metadata.children_meta.empty() ? column_metadata{} : metadata.children_meta[0], ar_mr, stream); return std::make_shared<arrow::DictionaryArray>( arrow::dictionary(indices->type(), dictionary->type()), indices, dictionary); } } // namespace std::shared_ptr<arrow::Table> to_arrow(table_view input, std::vector<column_metadata> const& metadata, rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr) { CUDF_EXPECTS((metadata.size() == static_cast<std::size_t>(input.num_columns())), "columns' metadata should be equal to number of columns in table"); std::vector<std::shared_ptr<arrow::Array>> arrays; std::vector<std::shared_ptr<arrow::Field>> fields; std::transform( input.begin(), input.end(), metadata.begin(), std::back_inserter(arrays), [&](auto const& c, auto const& meta) { return c.type().id() != type_id::EMPTY ? type_dispatcher( c.type(), detail::dispatch_to_arrow{}, c, c.type().id(), meta, ar_mr, stream) : std::make_shared<arrow::NullArray>(c.size()); }); std::transform( arrays.begin(), arrays.end(), metadata.begin(), std::back_inserter(fields), [](auto const& array, auto const& meta) { return arrow::field(meta.name, array->type()); }); auto result = arrow::Table::Make(arrow::schema(fields), arrays); // synchronize the stream because after the return the data may be accessed from the host before // the above `cudaMemcpyAsync` calls have completed their copies (especially if pinned host // memory is used). stream.synchronize(); return result; } } // namespace detail std::shared_ptr<arrow::Table> to_arrow(table_view input, std::vector<column_metadata> const& metadata, arrow::MemoryPool* ar_mr) { CUDF_FUNC_RANGE(); return detail::to_arrow(input, metadata, rmm::cuda_stream_default, ar_mr); } } // namespace cudf
the_stack
namespace cg = cooperative_groups; #include <helper_string.h> #include "SobelFilter_kernels.h" // Texture object for reading image cudaTextureObject_t texObject; extern __shared__ unsigned char LocalBlock[]; static cudaArray *array = NULL; #define RADIUS 1 #ifdef FIXED_BLOCKWIDTH #define BlockWidth 80 #define SharedPitch 384 #endif // This will output the proper CUDA error strings in the event that a CUDA host // call returns an error #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } __device__ unsigned char ComputeSobel(unsigned char ul, // upper left unsigned char um, // upper middle unsigned char ur, // upper right unsigned char ml, // middle left unsigned char mm, // middle (unused) unsigned char mr, // middle right unsigned char ll, // lower left unsigned char lm, // lower middle unsigned char lr, // lower right float fScale) { short Horz = ur + 2 * mr + lr - ul - 2 * ml - ll; short Vert = ul + 2 * um + ur - ll - 2 * lm - lr; short Sum = (short)(fScale * (abs((int)Horz) + abs((int)Vert))); if (Sum < 0) { return 0; } else if (Sum > 0xff) { return 0xff; } return (unsigned char)Sum; } __global__ void SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch, #ifndef FIXED_BLOCKWIDTH short BlockWidth, short SharedPitch, #endif short w, short h, float fScale, cudaTextureObject_t tex) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); short u = 4 * blockIdx.x * BlockWidth; short v = blockIdx.y * blockDim.y + threadIdx.y; short ib; int SharedIdx = threadIdx.y * SharedPitch; for (ib = threadIdx.x; ib < BlockWidth + 2 * RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx + 4 * ib + 0] = tex2D<unsigned char>( tex, (float)(u + 4 * ib - RADIUS + 0), (float)(v - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 1] = tex2D<unsigned char>( tex, (float)(u + 4 * ib - RADIUS + 1), (float)(v - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 2] = tex2D<unsigned char>( tex, (float)(u + 4 * ib - RADIUS + 2), (float)(v - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 3] = tex2D<unsigned char>( tex, (float)(u + 4 * ib - RADIUS + 3), (float)(v - RADIUS)); } if (threadIdx.y < RADIUS * 2) { // // copy trailing RADIUS*2 rows of pixels into shared // SharedIdx = (blockDim.y + threadIdx.y) * SharedPitch; for (ib = threadIdx.x; ib < BlockWidth + 2 * RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx + 4 * ib + 0] = tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 0), (float)(v + blockDim.y - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 1] = tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 1), (float)(v + blockDim.y - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 2] = tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 2), (float)(v + blockDim.y - RADIUS)); LocalBlock[SharedIdx + 4 * ib + 3] = tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 3), (float)(v + blockDim.y - RADIUS)); } } cg::sync(cta); u >>= 2; // index as uchar4 from here uchar4 *pSobel = (uchar4 *)(((char *)pSobelOriginal) + v * SobelPitch); SharedIdx = threadIdx.y * SharedPitch; for (ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x) { unsigned char pix00 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 0]; unsigned char pix01 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 1]; unsigned char pix02 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 2]; unsigned char pix10 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 0]; unsigned char pix11 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 1]; unsigned char pix12 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 2]; unsigned char pix20 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 0]; unsigned char pix21 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 1]; unsigned char pix22 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 2]; uchar4 out; out.x = ComputeSobel(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); pix00 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 3]; pix10 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 3]; pix20 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 3]; out.y = ComputeSobel(pix01, pix02, pix00, pix11, pix12, pix10, pix21, pix22, pix20, fScale); pix01 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 4]; pix11 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 4]; pix21 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 4]; out.z = ComputeSobel(pix02, pix00, pix01, pix12, pix10, pix11, pix22, pix20, pix21, fScale); pix02 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 5]; pix12 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 5]; pix22 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 5]; out.w = ComputeSobel(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); if (u + ib < w / 4 && v < h) { pSobel[u + ib] = out; } } cg::sync(cta); } __global__ void SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch, int w, int h, float fscale, cudaTextureObject_t tex) { unsigned char *pSobel = (unsigned char *)(((char *)pSobelOriginal) + blockIdx.x * Pitch); for (int i = threadIdx.x; i < w; i += blockDim.x) { pSobel[i] = min( max((tex2D<unsigned char>(tex, (float)i, (float)blockIdx.x) * fscale), 0.f), 255.f); } } __global__ void SobelTex(Pixel *pSobelOriginal, unsigned int Pitch, int w, int h, float fScale, cudaTextureObject_t tex) { unsigned char *pSobel = (unsigned char *)(((char *)pSobelOriginal) + blockIdx.x * Pitch); for (int i = threadIdx.x; i < w; i += blockDim.x) { unsigned char pix00 = tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x - 1); unsigned char pix01 = tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x - 1); unsigned char pix02 = tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x - 1); unsigned char pix10 = tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x + 0); unsigned char pix11 = tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x + 0); unsigned char pix12 = tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x + 0); unsigned char pix20 = tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x + 1); unsigned char pix21 = tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x + 1); unsigned char pix22 = tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x + 1); pSobel[i] = ComputeSobel(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); } } extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp) { cudaChannelFormatDesc desc; if (Bpp == 1) { desc = cudaCreateChannelDesc<unsigned char>(); } else { desc = cudaCreateChannelDesc<uchar4>(); } checkCudaErrors(cudaMallocArray(&array, &desc, iw, ih)); checkCudaErrors(cudaMemcpy2DToArray( array, 0, 0, data, iw * Bpp * sizeof(Pixel), iw * Bpp * sizeof(Pixel), ih, cudaMemcpyHostToDevice)); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = array; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = cudaFilterModePoint; texDescr.addressMode[0] = cudaAddressModeWrap; texDescr.readMode = cudaReadModeElementType; checkCudaErrors( cudaCreateTextureObject(&texObject, &texRes, &texDescr, NULL)); } extern "C" void deleteTexture(void) { checkCudaErrors(cudaFreeArray(array)); checkCudaErrors(cudaDestroyTextureObject(texObject)); } // Wrapper for the __global__ call that sets up the texture and threads extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale) { switch (mode) { case SOBELDISPLAY_IMAGE: SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale, texObject); break; case SOBELDISPLAY_SOBELTEX: SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale, texObject); break; case SOBELDISPLAY_SOBELSHARED: { dim3 threads(16, 4); #ifndef FIXED_BLOCKWIDTH int BlockWidth = 80; // must be divisible by 16 for coalescing #endif dim3 blocks = dim3(iw / (4 * BlockWidth) + (0 != iw % (4 * BlockWidth)), ih / threads.y + (0 != ih % threads.y)); int SharedPitch = ~0x3f & (4 * (BlockWidth + 2 * RADIUS) + 0x3f); int sharedMem = SharedPitch * (threads.y + 2 * RADIUS); // for the shared kernel, width must be divisible by 4 iw &= ~3; SobelShared<<<blocks, threads, sharedMem>>>((uchar4 *)odata, iw, #ifndef FIXED_BLOCKWIDTH BlockWidth, SharedPitch, #endif iw, ih, fScale, texObject); } break; } }
the_stack
* \mainpage MC-GPU v1.1 * * * \b MC-GPU is an x ray transport simulation code that can generate radiographic * projection images and computed tomography (CT) scans of voxelized objects, * including realistic human anatomy phantoms. * * The code implements a massively multi-threaded Monte Carlo simulation algorithm * for the transport of x rays in a voxelized geometry. The program has been * developed using the \b CUDA programming model and the simulation can be executed in * parallel in a state-of-the-art GPU from \b NVIDIA, giving an speed up of the order * of 15-25 times, compared to a CPU execution. The x ray interaction models and * cross sections have been adapted from \b PENELOPE \b 2006. * Currently, the code does not transport secondary electrons and the electrons * that would be created in photoelectric and Compton events are assumed to be * locally absorbed (dose is not reported). * * The MC-GPU code has been described in different scientific publications. A brief * description of the code features is given below. This description has been taken * from the main paper that can be cited to refer to this code: * * \code * Andreu Badal and Aldo Badano, "Accelerating Monte Carlo simulations of photon transport in a voxelized * geometry using a massively parallel Graphics Processing Unit", Medical Physics 36, pp. 4878–4880 (2009) * \endcode * * This code is still in development, please report to the authors any issue/bug * that you may encounter. Feel free to suggest improvements to the code too. * * * \section sec_disc DISCLAIMER * * This software and documentation (the "Software") were developed at the Food and * Drug Administration (\b FDA) by employees of the Federal Government in the course * of their official duties. Pursuant to Title 17, Section 105 of the United States * Code, this work is not subject to copyright protection and is in the public * domain. Permission is hereby granted, free of charge, to any person obtaining a * copy of the Software, to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, distribute, * sublicense, or sell copies of the Software or derivatives, and to permit persons * to whom the Software is furnished to do so. FDA assumes no responsibility * whatsoever for use by other parties of the Software, its source code, * documentation or compiled executables, and makes no guarantees, expressed or * implied, about its quality, reliability, or any other characteristic. Further, * use of this code in no way implies endorsement by the FDA or confers any * advantage in regulatory decisions. Although this software can be redistributed * and/or modified freely, we ask that any derivative works bear some notice that * they are derived from it, and any modified versions bear some notice that they * have been modified. * * \section sec_Intro Code features * * MC-GPU does not currently simulate the transport of electrons. * The interactions between the photons and the material objects are simulated * using the well-known interaction sampling models from the PENELOPE 2006 * subroutine package. * * In order to speed up the ray-tracing of the code and minimize the access to the * slow GPU main memory, the photon trajectories across the voxels are computed * using the Woodcock tracking algorithm. * With this technique the photons perceive the geometry as a uniform medium * composed of the material of the most attenuating voxel. * In this way, the voxel boundaries do not have to be explicitly calculated and * multiple voxels can be crossed in a single step. * To keep the simulation unbiased, some of the interactions are considered * "virtual" (i.e., do not change the photon energy or direction of movement), * depending on the actual energy and the material at the interaction site. * In a typical simulation, several thousand threads are launched simultaneously in * the GPU, each one of them simulating a batch of 10000, or more, photon tracks. * * The random number generator used in PENELOPE, ranecu, is also used in the GPU * program. * To ensure that the simulated tracks are not correlated, each thread initializes * the generator to a unique position in the random sequence, far enough from the * other threads, using the algorithm implemented in the seedsMLCG code. * * The new code is currently used in the study of scatter in x-ray imaging and * includes a tally to generate radiographic images. * The image is formed by counting the energy that enters a user-defined 2D grid of * pixels, which is a simple approximation to a noise-free flat-panel detector with * 100% detection efficiency; the pixel values have units of eV/cm^2. * Four different images are reported at the end of the simulation, corresponding * to the signal produced by non-scattered, single Compton, single Rayleigh, and * multi-scattered photons. * The radiation source is implemented as a point source emitting monoenergetic * photons within a fan beam, producing a rectangular field on the detector * equivalent to a collimated cone beam. * * * \section sec_CPU Code compilation and execution * * MC-GPU has been tested only in the Linux operating system. * A Makefile script is provided to compile the MC-GPU code in Linux. * The CUDA libraries and the GNU GCC compiler must be previously installed. * The Makefile may have to be edited to modify the library path. * * A README text file is provided with the MC-GPU source code. Read this file for * more information on the code usage. An example simulation input file is also * provided. * * MC-GPU uses CUDA to access the GPU but all the actual computations are coded * in standard C code. * All the CUDA specific commands are enclosed within preprocessor if statements. * Defining the pre-processor variable "USING_CUDA" (i.e., compiling with * "-DUSING_CUDA") the particle transport is executed in parallel in an * NVIDIA GPU using CUDA. Otherwise, the code is sequentially executed in * the CPU. * * * \section sec_CT Parallel simulation of CT scans: * * From version 1.1, MC-GPU allows the simulation of a CT scan. The CT is * simulated generating multiple projection images around the static voxelized * geometry. To speed up the CT simulation, the MPI library is used to address * multiple GPUs and obtain multiple projections in parallel. In order to * activate the MPI code, the pre-processor variable "USING_MPI" has to be * defined (ie, compiling with "-DUSING_MPI"). * To use the code in parallel in N GPUs (in a single computer), the user * has to run the program with N MPI threads in the CPU (eg, * "mpirun -np 4 ./MC-GPU.x MC-GPU.in"). Each thread will get a unique id in * the CPU (myID=0->N) and will address a unique GPU. The CT simulation will * then be split so that the threads simulate consecutive projections * independently, avoiding any intercommunication between threads. * * * * * * @file MC-GPU_v1.1.cu * @author Andreu Badal (Andreu.Badal-Soler@fda.hhs.gov) * @date 2010/06/25 * -- First version: 2009/03/17 */ //////////////////////////////////////////////////////////////////////////////////////// // *** Include header file with the structures and functions declarations #include <MC-GPU_v1.1.h> // *** Include the computing kernel: #include <MC-GPU_kernel_v1.1.cu> //////////////////////////////////////////////////////////////////////////////// //! Main program to transport x rays in a 3D voxel geometry using the GPU. //! This function reads the description of the simulation from an external file //! given in the command line. This input file defines the number of particles to //! simulate, the characteristics of the x-ray source and the detector, the number //! and spacing of the projections (if simulating a CT), the location of the //! material files containing the interaction mean free paths, and the location //! of the voxelized geometry file. //! //! @author Andreu Badal //! @date 2010/03/19 //! //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { #ifdef USING_MPI // -- Using MPI to address multiple GPUs in the same workstation (use only to simulate a CT scan). int myID = -88, numprocs = -99; MPI_Init(&argc, &argv); // Init MPI and get the current thread ID MPI_Comm_rank(MPI_COMM_WORLD, &myID); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); printf(" *** MPI run: myId=%d , numprocs=%d\n",myID,numprocs); fflush(stdout); // Clear the screen output buffer MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads #else int myID = 0, numprocs = 1; // Only one CPU thread used when MPI is not activated (multiple projections will be simulated sequentially). #endif // *** Declare the arrays and structures that will contain the simulation data: struct voxel_struct voxel_data; // Define the geometric constants of the voxel file struct source_struct source_data; // Define the particles source struct detector_struct detector_data; // Define an x ray detector struct linear_interp mfp_table_data; // Constant data for the linear interpolation struct compton_struct compton_table; // Structure containing Compton sampling data (to be copied to CONSTANT memory) struct rayleigh_struct rayleigh_table; // Structure containing Rayleigh sampling data (to be copied to CONSTANT memory) float2 *voxel_mat_dens = NULL; // Poiter where voxels array will be allocated unsigned int voxel_mat_dens_bytes = 0; // Size (in bytes) of the voxels array (using unsigned int to allocate up to 4.2GBytes) float density_max[MAX_MATERIALS]; float density_nominal[MAX_MATERIALS]; unsigned long long int *image = NULL; // Poiter where image array will be allocated int image_bytes = -1; // Size of the image array int mfp_table_bytes = -1, mfp_Woodcock_table_bytes = -1; // Size of the table arrays float2 *mfp_Woodcock_table = NULL; // Linear interpolation data for the Woodcock mean free path [cm] float3 *mfp_table_a = NULL, *mfp_table_b = NULL; // Linear interpolation data for 3 different interactions: // (1) inverse total mean free path (divided by density, cm^2/g) // (2) inverse Compton mean free path (divided by density, cm^2/g) // (3) inverse Rayleigh mean free path (divided by density, cm^2/g) #ifdef USING_CUDA // - MASTER_THREAD == "if(0==myID)": macro to print the messages just once when using MPI threads; it has no effect if MPI is not used. MASTER_THREAD printf ("\n *** CUDA SIMULATION IN THE GPU ***\n"); #else MASTER_THREAD printf ("\n *** SIMULATION IN THE CPU ***\n"); #endif time_t current_time = time(NULL); // Get current time (in seconds) MASTER_THREAD printf("\n****** Code execution started on: %s\n", ctime(&current_time)); MASTER_THREAD printf(" -- INITIALIZATION phase:\n"); // -- Start time counter: clock_t clock_start, clock_end; // (requires standard header <time.h>) clock_start = clock(); // Get current clock counter unsigned long long int total_histories; int histories_per_thread, seed_input, num_threads_per_block, gpu_id, num_projections; double D_angle, angularROI_0, angularROI_1, initial_angle; char file_name_voxels[250], file_name_materials[MAX_MATERIALS][250], file_name_output[250]; // *** Read the input file given in the command line and return the significant data: read_input(argc, argv, myID, &total_histories, &seed_input, &gpu_id, &num_threads_per_block, &histories_per_thread, &detector_data, &image, &image_bytes, &source_data, file_name_voxels, file_name_materials, file_name_output, &num_projections, &D_angle, &angularROI_0, &angularROI_1, &initial_angle); float3 detector_center; detector_center.x = source_data.position[0].x + source_data.direction[0].x * detector_data.sdd; // Set the center of the detector straight ahead of the focal spot. detector_center.y = source_data.position[0].y + source_data.direction[0].y * detector_data.sdd; detector_center.z = source_data.position[0].z + source_data.direction[0].z * detector_data.sdd; MASTER_THREAD { printf(" total_histories = %lld\n", total_histories); printf(" random seed = %d\n", seed_input); printf(" source energy = %.3f keV\n", (source_data.energy/1.0e3f)); // printf(" azimuthal, polar apertures = %.6f , %.6f degrees\n", 2.0*atan(source_data.tan_phi_semiaperture)*RAD2DEG, 2.0*atan(source_data.tan_theta_semiaperture)*RAD2DEG); printf(" azimuthal (phi), polar apertures = %.6f , %.6f degrees\n", source_data.D_phi*RAD2DEG, 2.0*(90.0 - acos(source_data.cos_theta_low)*RAD2DEG) ); printf(" focal spot position = (%f, %f, %f)\n", source_data.position[0].x, source_data.position[0].y, source_data.position[0].z); printf(" source direction = (%f, %f, %f)\n", source_data.direction[0].x, source_data.direction[0].y, source_data.direction[0].z); printf(" initial angle from X = %lf\n", initial_angle*RAD2DEG); printf(" detector center = (%f, %f, %f)\n", detector_center.x, detector_center.y, detector_center.z); printf(" detector low corner (at +Y) = (%f, %f, %f)\n", detector_data.corner_min_rotated_to_Y[0].x, detector_data.corner_min_rotated_to_Y[0].y, detector_data.corner_min_rotated_to_Y[0].z); printf(" source-detector distance = %f cm\n", detector_data.sdd); printf(" number of pixels image = %dx%d\n", detector_data.num_pixels.x, detector_data.num_pixels.y); printf(" pixel size = %.3fx%.3f cm\n", 1.0f/detector_data.inv_pixel_size_X, 1.0f/detector_data.inv_pixel_size_Z); printf(" number of projections = %d\n", num_projections); if (num_projections!=1) { printf(" angle between projections = %lf\n", D_angle*RAD2DEG); printf(" angular region of interest = [%lf,%lf] degrees\n", angularROI_0*RAD2DEG, angularROI_1*RAD2DEG); } printf(" Input voxel file = %s\n", file_name_voxels); printf(" Output image file = %s\n", file_name_output); fflush(stdout); } // *** Set the detectors and sources for the CT trajectory (if needed, ie, for more than one projection): if (num_projections != 1) set_CT_trajectory(myID, num_projections, D_angle, angularROI_0, angularROI_1, &source_data, &detector_data); fflush(stdout); // *** Read the voxel data and allocate the density map matrix. Return the maximum density: load_voxels(myID, file_name_voxels, density_max, &voxel_data, &voxel_mat_dens, &voxel_mat_dens_bytes); MASTER_THREAD printf(" Total CPU memory allocated for voxels vector and data structures = %f Mbytes\n", (voxel_mat_dens_bytes+image_bytes+sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp)+2*mfp_table_bytes+sizeof(struct rayleigh_struct)+sizeof(struct compton_struct))/(1024.f*1024.f)); MASTER_THREAD fflush(stdout); // *** Read the material mean free paths and set the interaction table in a "linear_interp" structure: load_material(myID, file_name_materials, density_max, density_nominal, &mfp_table_data, &mfp_Woodcock_table, &mfp_Woodcock_table_bytes, &mfp_table_a, &mfp_table_b, &mfp_table_bytes, &rayleigh_table, &compton_table); // -- Check that the input material tables and the x-ray source are consistent: if ( (source_data.energy < mfp_table_data.e0) || (source_data.energy > (mfp_table_data.e0 + (mfp_table_data.num_values-1)/mfp_table_data.ide)) ) { MASTER_THREAD printf("\n !!ERROR!! The input x-ray source energy (%f eV) is outside the tabulated energy interval (from %f to %f eV)!!\n\n", source_data.energy, mfp_table_data.e0, (mfp_table_data.e0+(mfp_table_data.num_values-1)/mfp_table_data.ide)); exit(-1); } #ifdef USING_MPI if (myID != gpu_id) gpu_id = myID; // Use the GPU with the same number as the CPU thread number (from '0' to 'numprocs-1') else // but skip the GPU number given in the input file (useful if GPU 0 is attached to a display). gpu_id = numprocs; #endif #ifdef USING_CUDA // -- Declare the pointers to the device global memory, when using the GPU: float2 *voxel_mat_dens_device = NULL, *mfp_Woodcock_table_device = NULL; float3 *mfp_table_a_device = NULL, *mfp_table_b_device = NULL; unsigned long long int *image_device = NULL; struct rayleigh_struct *rayleigh_table_device = NULL; struct compton_struct *compton_table_device = NULL; // -- Sets the CUDA enabled GPU that will be used in the simulation, and allocate and copies the simulation data in the GPU global and constant memories. init_CUDA_device(&gpu_id, myID, &voxel_data, &source_data, &detector_data, &mfp_table_data, /*Variables GPU constant memory*/ voxel_mat_dens, &voxel_mat_dens_device, voxel_mat_dens_bytes, /*Variables GPU global memory*/ image, &image_device, image_bytes, mfp_Woodcock_table, &mfp_Woodcock_table_device, mfp_Woodcock_table_bytes, mfp_table_a, mfp_table_b, &mfp_table_a_device, &mfp_table_b_device, mfp_table_bytes, &rayleigh_table, &rayleigh_table_device, &compton_table, &compton_table_device); #endif clock_end = clock(); double time_elapsed_1 = ((double)(clock_end-clock_start))/CLOCKS_PER_SEC; double time_elapsed_2 = 0.0, time_total = 0.0; MASTER_THREAD printf("\n -- INITIALIZATION finished: elapsed time = %.3f s\n", time_elapsed_1); #ifdef USING_MPI fflush(stdout); MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads before starting the MC phase. #endif MASTER_THREAD printf("\n\n -- MONTE CARLO LOOP phase:\n\n"); MASTER_THREAD fflush(stdout); // Clear the screen output buffer // -- Scale the input number of particles taking into account that "histories_per_thread" tracks will be // simulated for each call to "track_particles", ie, each GPU thread. // The total number of particles simulated will be increased to the nearest multiple histories_per_thread. if(((unsigned long long int)(total_histories/histories_per_thread))>=2147483647) { MASTER_THREAD printf("\n\n !!ERROR!! \"total_histories/histories_per_thread = %lld > 2147483647\": this value can not be represented with a 4 byte integer.\n Please, increase histories_per_thread until the fraction is acceptable. Sorry about that.\n\n", ((unsigned long long int)(total_histories/histories_per_thread))); exit(-1); } int total_history_batch = (int)(total_histories/histories_per_thread); if (0!=(total_histories%histories_per_thread)) { total_history_batch++; // Input value is not multiple of HISTORIES_PER_THREAD: make an extra iteration total_histories = (unsigned long long int)total_history_batch*histories_per_thread; // Total histories will be higher than input value } // *** CT simulation: simulate multiple projections if requested: double current_angle; int num_p; // == current projection number for (num_p=0; num_p<num_projections; num_p++) { #ifdef USING_MPI // -- Distribute the projections to the multiple threads evenly: each thread simulates an angle and skip the following 'numprocs' angles if( (num_p % numprocs) != myID ) { // printf(" ... Thread %d skipped projection=%d, modulo=%d\n", myID, num_p, (num_p % numprocs)); continue; } #endif // -- Check if this projection is inside the angular region of interest current_angle = initial_angle + num_p * D_angle; if (current_angle<0.0) current_angle += 2.0*PI; // Make sure the angle is not negative. else if (current_angle>=(2.0*PI-0.0001)) current_angle -= 2.0*PI; // Make sure the angle is not above or equal to 360 degrees. if ((current_angle < angularROI_0) || (current_angle > angularROI_1)) { printf("\n << Skipping projection #%d >> Angle: %f degrees --> outside the angular region of interest.\n", num_p, current_angle*RAD2DEG); continue; // Cycle loop: do not simulate this projection! } if (num_projections!=1) printf("\n << Simulating Projection %d of %d >> Angle: %lf degrees.\n\n", num_p, num_projections-1, current_angle*RAD2DEG); #ifdef USING_CUDA // --Executing the kernel in the GPU: int total_history_batch_blocks = (int)(total_history_batch/num_threads_per_block); if (0!=(total_history_batch%num_threads_per_block)) { total_history_batch_blocks++; // Value is not multiple of num_threads: execute an extra block of threads total_histories = (unsigned long long int)total_history_batch_blocks*num_threads_per_block*histories_per_thread; // Total histories will be higher than input value } // -- Setup the execution parameters, taking into account the current CUDA specifications: // Warp size: 32, Max number threads per block: 512, Max sizes each dimension of block: 512x512x64, Max sizes each dimension of grid: 65535x65535x1 if (total_history_batch_blocks>65535) { MASTER_THREAD printf("\n\n !!ERROR!! \"total_histories/histories_per_thread/num_threads_per_block ~ %d > 65535\": the GPU can not simulate so many blocks (with a 1D grid).\n Please, increase histories_per_thread or num_threads_per_block until the fraction is acceptable. Sorry about that.\n\n", total_history_batch_blocks); exit(-1); } dim3 blocks(total_history_batch_blocks, 1); dim3 threads(num_threads_per_block, 1); printf(" Executing %d blocks of %d threads, with %d histories in each thread: %lld histories in total.\n", total_history_batch_blocks, num_threads_per_block, histories_per_thread, total_histories); fflush(stdout); // -- Execute the kernel unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); clock_start = clock(); track_particles<<<blocks,threads>>>(histories_per_thread, num_p, seed_input, image_device, voxel_mat_dens_device, mfp_Woodcock_table_device, mfp_table_a_device, mfp_table_b_device, rayleigh_table_device, compton_table_device); cudaThreadSynchronize(); // Force the runtime to wait until all device tasks have completed // -- Check if kernel execution generated any error: cutilCheckMsg(" !!Kernel execution failed while simulating particle tracks!! "); cutilCheckError( cutStopTimer( timer)); printf(" ==> CUDA: Kernel execution time in the device: %.3f s \n", 0.001f*cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); cutilSafeCall( cudaMemcpy( image, image_device, image_bytes, cudaMemcpyDeviceToHost) ); // Copy final results to host cutilCheckError( cutStopTimer( timer)); printf(" Time copying results from device to host: %.3f s\n", 0.001f*cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); #else // --Executing the kernel in the CPU: printf(" Executing %d history batches, with %d histories in each batch: %lld histories in total.\n", total_history_batch, histories_per_thread, total_histories); // -- Copy local structures to global struct variables accessible from "track_particles" (__constant__ variables in the GPU): source_data_CONST = source_data; detector_data_CONST = detector_data; voxel_data_CONST = voxel_data; mfp_table_data_CONST = mfp_table_data; clock_start = clock(); int n; for(n=0; n<total_history_batch; n++) { // -- Simulate a particle track initializing the PRNG with the particle number 'n': track_particles(n, histories_per_thread, num_p, seed_input, image, voxel_mat_dens, mfp_Woodcock_table, mfp_table_a, mfp_table_b, &rayleigh_table, &compton_table); } #endif // Get final time and calculate loop execution time: clock_end = clock(); time_elapsed_2 = ((double)(clock_end-clock_start))/CLOCKS_PER_SEC; time_total += time_elapsed_2; // Count total time (in seconds). // printf("\n -- MONTE CARLO LOOP finished: time tallied in MAIN program: %.3f s\n\n", time_elapsed_2); // *** Report the final results: char file_name_output_num_p[253]; if (1==num_projections) strcpy(file_name_output_num_p, file_name_output); // Use the input name for single projection else sprintf(file_name_output_num_p, "%s_%03d", file_name_output, num_p); // Create the output file name with the input name + projection number (3 digits, padding with 0) report_host(file_name_output_num_p, &detector_data, &source_data, image, time_elapsed_2, total_histories, num_p, num_projections, D_angle, initial_angle, myID, numprocs); // *** Clear the image after reporting, unless this is the last projection to simulate: if (num_p<(num_projections-1)) { int pixels_per_image = detector_data.num_pixels.x * detector_data.num_pixels.y; #ifdef USING_CUDA printf(" ==> CUDA: Launching kernel to reset the device image to 0: number of blocks = %d, threads per block = 128\n", (int)ceil(pixels_per_image/128.0f) ); init_image_array_GPU<<<(int)(ceil(pixels_per_image/128.0f)),128>>>(image_device, pixels_per_image); cudaThreadSynchronize(); cutilCheckMsg(" !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error: #else int j; for (j=0; j<pixels_per_image; j++) // INIT IMAGE ARRAY IN THE CPU { image[j ] = (unsigned long long int)(0); image[j+ pixels_per_image] = (unsigned long long int)(0); image[j+2*pixels_per_image] = (unsigned long long int)(0); image[j+3*pixels_per_image] = (unsigned long long int)(0); } #endif } } // [Loop end: iterate for next CT projection] MASTER_THREAD printf("\n -- MONTE CARLO LOOP finished: total time tallied in MAIN program: %.3f s\n", time_total); // *** Clean up RAM memory: free(voxel_mat_dens); free(image); free(mfp_Woodcock_table); free(mfp_table_a); free(mfp_table_b); #ifdef USING_CUDA unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); // -- Clean up GPU device memory: cutilSafeCall(cudaFree(voxel_mat_dens_device)); cutilSafeCall(cudaFree(image_device)); cutilSafeCall(cudaFree(mfp_Woodcock_table_device)); cutilSafeCall(cudaFree(mfp_table_a_device)); cutilSafeCall(cudaFree(mfp_table_b_device)); cudaThreadExit(); cutilCheckError( cutStopTimer( timer)); MASTER_THREAD printf(" ==> CUDA: Time freeing the device memory and ending the GPU threads: %.6f s\n", 0.001f*cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); #endif #ifdef USING_MPI current_time=time(NULL); // Get current time (in seconds) printf(" MPI node %d done! Time: %s\n", myID, ctime(&current_time)); fflush(stdout); // Clear the screen output buffer MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads MPI_Finalize(); // Finalize MPI library: no more MPI calls allowed below. #endif current_time=time(NULL); // Get current time (in seconds) MASTER_THREAD printf("\n****** Code execution finished on: %s\n", ctime(&current_time)); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Read the input file given in the command line and return the significant data. //! Example input file: //! //! 1000000 [Total number of histories to simulate] //! geometry.vox [Voxelized geometry file name] //! material.mat [Material data file name] //! //! @param[in] argc Command line parameters //! @param[in] argv Command line parameters: name opf input file //! @param[out] total_histories Total number of particles to simulate //! @param[out] seed_input Input random number generator seed //! @param[out] num_threads_per_block Number of CUDA threads for each GPU block //! @param[out] detector_data //! @param[out] image //! @param[out] source_data //! @param[out] file_name_voxels //! @param[out] file_name_materials //! @param[out] file_name_output //////////////////////////////////////////////////////////////////////////////// void read_input(int argc, char** argv, int myID, unsigned long long int* total_histories, int* seed_input, int* gpu_id, int* num_threads_per_block, int* histories_per_thread, struct detector_struct* detector_data, unsigned long long int** image_ptr, int* image_bytes, struct source_struct* source_data, char* file_name_voxels, char file_name_materials[MAX_MATERIALS][250] , char* file_name_output, int* num_projections, double* D_angle, double* angularROI_0, double* angularROI_1, double* initial_angle) { FILE* file_ptr = NULL; char new_line[250]; char *new_line_ptr = NULL; double dummy_double; // -- Read the input file name from command line, if given (otherwise keep default value): if (2==argc) { file_ptr = fopen(argv[1], "r"); if (NULL==file_ptr) { printf("\n\n !!read_input ERROR!! Input file not found or not readable. Input file name: \'%s\'\n\n", argv[1]); exit(-1); } } else if (argc>2) { printf("\n\n !!read_input ERROR!! Too many input parameter (argc=%d)!! Provide only the input file name.\n\n", argc); exit(-1); } else { printf("\n\n !!read_input ERROR!! Input file name not given as an execution parameter!! Try again...\n\n"); exit(-1); } MASTER_THREAD printf("\n -- Reading the input file \'%s\':\n", argv[1]); do { new_line_ptr = fgets(new_line, 250, file_ptr); // Read full line (max. 250 characters). if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SIMULATION\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION SIMULATION")==NULL); // Skip comments and empty lines until the section begins new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%lf", &dummy_double); *total_histories = (unsigned long long int) (dummy_double+0.0001); // Maximum unsigned long long value: 18446744073709551615 new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d", seed_input); // Set the RANECU PRNG seed (the same seed will be used to init the 2 MLCGs in RANECU) new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d", gpu_id); // GPU NUMBER WHERE SIMULATION WILL RUN new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d", num_threads_per_block); // GPU THREADS PER CUDA BLOCK if ((*num_threads_per_block%32)!=0) { printf("\n\n !!read_input ERROR!! The input number of GPU threads per CUDA block must be a multiple of 32 (warp size). Input value: %d !!\n\n", *num_threads_per_block); exit(-2); } new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d", histories_per_thread); // HISTORIES PER GPU THREAD do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SOURCE\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION SOURCE")==NULL); // Skip comments and empty lines until the section begins new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%f", &(source_data->energy)); // X-RAY ENERGY [eV] new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%f %f %f", &source_data->position[0].x, &source_data->position[0].y, &source_data->position[0].z); // SOURCE POSITION: X Y Z [cm] new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%f %f %f", &source_data->direction[0].x, &source_data->direction[0].y, &source_data->direction[0].z); // SOURCE DIRECTION COSINES: U V W // -- Normalize the input beam direction to 1: dummy_double = 1.0/sqrt((double)(source_data->direction[0].x*source_data->direction[0].x + source_data->direction[0].y*source_data->direction[0].y + source_data->direction[0].z*source_data->direction[0].z)); source_data->direction[0].x = (float)(((double)source_data->direction[0].x)*dummy_double); source_data->direction[0].y = (float)(((double)source_data->direction[0].y)*dummy_double); source_data->direction[0].z = (float)(((double)source_data->direction[0].z)*dummy_double); new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); /* !!DeBuG!! OLD WRONG FAN BEAM: sscanf(new_line, "%f %f", &source_data->tan_phi_semiaperture, &source_data->tan_theta_semiaperture); if ( (source_data->tan_phi_semiaperture>-1.0e-6) && (source_data->tan_theta_semiaperture>-1.0e-6) ) // If we enter a negative angle, the fan beam will cover exactly the detector surface (see below). { source_data->tan_phi_semiaperture = tan(0.5 * source_data->tan_phi_semiaperture*DEG2RAD); // Divide by 2 the input apertures to get semiapertures. source_data->tan_theta_semiaperture = tan(0.5 * source_data->tan_theta_semiaperture*DEG2RAD); // The tan of the semiaperture is the size of the field at distance 1, where we will sample uniform points. } */ // Read input fan beam polar (theta) and azimuthal (phi) aperture angles (deg): double phi_aperture, theta_aperture; sscanf(new_line, "%lf %lf", &phi_aperture, &theta_aperture); if (theta_aperture > 180.0) { printf("\n\n !!read_input ERROR!! Input polar aperture must be in [0,180] deg.!\n"); printf(" theta_aperture = %lf, phi_aperture = %lf\n", theta_aperture, phi_aperture); exit(-2); } if (phi_aperture > 360.0) { printf("\n\n !!read_input ERROR!! Input azimuthal aperture must be in [0,360] deg.!\n"); printf(" theta_aperture = %lf, phi_aperture = %lf\n", theta_aperture, phi_aperture); exit(-2); } // Entering a negative theta_aperture or phi_aperture, the emitted fan beam will cover exactly the detector: see below // *** RECTANGULAR BEAM INITIALIZATION: aperture initially centered at (0,1,0), ie, THETA_0=90, PHI_0=90 // Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat). source_data->cos_theta_low = (float)( cos((90.0 - 0.5*theta_aperture)*DEG2RAD) ); source_data->D_cos_theta = (float)( -2.0*source_data->cos_theta_low ); // Theta aperture is symetric above and below 90 deg source_data->phi_low = (float)( (90.0 - 0.5*phi_aperture)*DEG2RAD ); source_data->D_phi = (float)( phi_aperture*DEG2RAD ); source_data->max_height_at_y1cm = (float) ( tan(0.5*theta_aperture*DEG2RAD) ); do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION DETECTOR\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION DETECTOR")==NULL); // Skip comments and empty lines until the section begins new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); trim_name(new_line, file_name_output); // OUTPUT IMAGE FILE NAME (no spaces) new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d %d", &detector_data->num_pixels.x, &detector_data->num_pixels.y); // NUMBER OF PIXELS IN THE IMAGE: Nx Nz detector_data->total_num_pixels = detector_data->num_pixels.x * detector_data->num_pixels.y; new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%f %f", &detector_data->width_X, &detector_data->height_Z); // IMAGE SIZE (width, height): Dx Dz [cm] detector_data->inv_pixel_size_X = detector_data->num_pixels.x / detector_data->width_X; detector_data->inv_pixel_size_Z = detector_data->num_pixels.y / detector_data->height_Z; new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%f", &detector_data->sdd); // SOURCE-TO-DETECTOR DISTANCE [cm] (detector set in front of the source, normal to the input direction) float3 detector_center; // Center of the detector straight ahead of the focal spot. detector_center.x = source_data->position[0].x + source_data->direction[0].x * detector_data->sdd; detector_center.y = source_data->position[0].y + source_data->direction[0].y * detector_data->sdd; detector_center.z = source_data->position[0].z + source_data->direction[0].z * detector_data->sdd; if ((detector_data->sdd)<1.0e-6) { printf("\n\n !!read_input ERROR!! The source-to-detector distance must be positive. Input: ssd=%f!!\n\n", detector_data->sdd); exit(-2); } /* !!DeBuG!! OLD WRONG FAN BEAM: if ( (source_data->tan_phi_semiaperture < -1.0e-5) || (source_data->tan_theta_semiaperture < -1.0e-5) ) // If we enter a negative angle, the fan beam will cover exactly the detector surface. { source_data->tan_phi_semiaperture = 0.5 * detector_data->width_X / (detector_data->sdd); // Set the aperture to cover the whole detector exactly (detector is at distance ssd, move to 1cm). source_data->tan_theta_semiaperture = 0.5 * detector_data->height_Z / (detector_data->sdd); } */ if ( (theta_aperture < -1.0e-7) || (phi_aperture < -1.0e-7) ) // If we enter a negative angle, the fan beam will cover exactly the detector surface. { theta_aperture= 2.0 * atan(0.5*detector_data->height_Z/(detector_data->sdd)) * RAD2DEG; // Optimum angles phi_aperture = 2.0 * atan(0.5*detector_data->width_X/(detector_data->sdd)) * RAD2DEG; source_data->cos_theta_low = (float)( cos((90.0 - 0.5*theta_aperture)*DEG2RAD) ); source_data->D_cos_theta = (float)( -2.0*source_data->cos_theta_low ); // Theta aperture is symetric above and below 90 deg source_data->phi_low = (float)( (90.0 - 0.5*phi_aperture)*DEG2RAD ); source_data->D_phi = (float)( phi_aperture*DEG2RAD ); source_data->max_height_at_y1cm = (float) ( tan(0.5*theta_aperture*DEG2RAD) ); } do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION CT SCAN\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION CT")==NULL); // Skip comments and empty lines until the section begins new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%d", num_projections); // NUMBER OF PROJECTIONS (beam must be perpendicular to Z axis) new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%lf", D_angle); // ANGLE BETWEEN PROJECTIONS [degrees] (360/num_projections for full CT) *D_angle = (*D_angle)*DEG2RAD; // store the angle in radians // Calculate initial source angle: *initial_angle = acos((double)(source_data->direction[0].x)); if (source_data->direction[0].y<0) *initial_angle = -(*initial_angle); // Correct for the fact that positive and negative angles have the same ACOS if (*initial_angle<0.0) *initial_angle = (*initial_angle) + 2.0*PI; // Make sure the angle is not negative, between [0,360) degrees. *initial_angle = (*initial_angle) - PI; // Correct the fact that the source is opposite to the detector (180 degrees difference). if (*initial_angle<0.0) *initial_angle = (*initial_angle) + 2.0*PI; // Make sure the initial angle is not negative, between [0,360) degrees.. new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%lf %lf", angularROI_0, angularROI_1); // ANGLES OF INTEREST (projections outside this interval will be skipped) if (*angularROI_0<-0.001 || *angularROI_1>360.001) { printf("\n\n !!read_input ERROR!! The angles in the angular region of interest must be in the interval [0,360]. Input: %f, %f.\n\n", *angularROI_0, *angularROI_1); // The reconstructed planes are always parallel to the XY plane.\n"); exit(-2); } *angularROI_0 = (*angularROI_0 - 0.0001)*DEG2RAD; // Store the angles of interest in radians, increasing a little the interval to avoid precission problems *angularROI_1 = (*angularROI_1 + 0.0001)*DEG2RAD; if (0 == (*num_projections)) *num_projections = 1; // Zero projections has the same effect as 1 projection (ie, no CT scan rotation). Negative values are allowed and the source rotates in opposite rotation. if ( (fabs(*num_projections) > 1) && (fabs(source_data->direction[0].z)>0.00001f) ) { printf("\n\n !!read_input ERROR!! Sorry, but currently we can only simulate CT scans when the source direction is perpendicular to the Z axis (ie, w=0).\n\n"); // The reconstructed planes are always parallel to the XY plane.\n"); exit(-2); } do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION VOXELIZED GEOMETRY FILE\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION VOXEL")==NULL); // Skip comments and empty lines until the section begins new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); trim_name(new_line, file_name_voxels); // VOXEL GEOMETRY FILE (penEasy 2008 format) do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION MATERIAL FILE LIST\'!!\n"); exit(-2); } } while(strstr(new_line,"SECTION MATERIAL")==NULL); // Skip comments and empty lines until the section begins int i; for (i=0; i<MAX_MATERIALS; i++) { new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); if (new_line_ptr==NULL) file_name_materials[i][0]='\n'; // The input file is allowed to finish without defining all the materials else trim_name(new_line, file_name_materials[i]); } // [Finish reading input file] ///////////////////////////////////////////////////////////////////////////// // *** Set the rotation that will bring particles from the detector plane to +Y=(0,+1,0) through a rotation around X and around Z (counter-clock): double rotX, rotZ, cos_rX, cos_rZ, sin_rX, sin_rZ; // rotX = 1.5*PI - acos(source_data->direction.z); // Rotate to +Y = (0,+1,0) --> rotX_0 = 3/2*PI == -PI/2 rotX = acos(source_data->direction[0].z) - 0.5*PI; // Rotate to +Y = (0,+1,0) --> rotX_0 = -PI/2 // rotX = 0.5*PI - acos(source_data->direction.z); // Rotate to +Y = (0,+1,0) --> rotX_0 = PI/2 if ( (source_data->direction[0].x*source_data->direction[0].x + source_data->direction[0].y*source_data->direction[0].y) > 1.0e-8 ) // == u^2+v^2 > 0 { // rotZ = 0.5*PI - acos(source_data->direction.x/sqrt(source_data->direction.x*source_data->direction.x + source_data->direction.y*source_data->direction.y)); if (source_data->direction[0].y >= 0.0f) rotZ = 0.5*PI - acos(source_data->direction[0].x/sqrt(source_data->direction[0].x*source_data->direction[0].x + source_data->direction[0].y*source_data->direction[0].y)); else rotZ = 0.5*PI - (-acos(source_data->direction[0].x/sqrt(source_data->direction[0].x*source_data->direction[0].x + source_data->direction[0].y*source_data->direction[0].y))); } else rotZ = 0.0; // Vector pointing to +Z, do not rotate around Z then. // -- Set the rotation matrix RzRx (called inverse because moves from the correct position to the reference at +Y): cos_rX = cos(rotX); cos_rZ = cos(rotZ); sin_rX = sin(rotX); sin_rZ = sin(rotZ); // Rotation matrix RxRz: detector_data->rot_inv[0][0] = cos_rZ; detector_data->rot_inv[0][1] = -sin_rZ; detector_data->rot_inv[0][2] = 0.0f; detector_data->rot_inv[0][3] = cos_rX*sin_rZ; detector_data->rot_inv[0][4] = cos_rX*cos_rZ; detector_data->rot_inv[0][5] = -sin_rX; detector_data->rot_inv[0][6] = sin_rX*sin_rZ; detector_data->rot_inv[0][7] = sin_rX*cos_rZ; detector_data->rot_inv[0][8] = cos_rX; if ((source_data->direction[0].y > 0.99995f) && (*num_projections==1)) { // Simulating a single projection and initial beam pointing to +Y: no rotation needed!! detector_data->rotation_flag = 0; detector_data->corner_min_rotated_to_Y[0].x = detector_center.x; detector_data->corner_min_rotated_to_Y[0].y = detector_center.y; detector_data->corner_min_rotated_to_Y[0].z = detector_center.z; MASTER_THREAD printf(" Source pointing to (0,1,0): maximizing code efficiency -> detector not rotated, initial location in voxels found faster.\n"); // -> the simulation will be faster than for other angles."); } else { // Rotation needed to set the detector perpendicular to +Y: detector_data->rotation_flag = 1; // -- Rotate the detector center to +Y: detector_data->corner_min_rotated_to_Y[0].x = detector_center.x*detector_data->rot_inv[0][0] + detector_center.y*detector_data->rot_inv[0][1] + detector_center.z*detector_data->rot_inv[0][2]; detector_data->corner_min_rotated_to_Y[0].y = detector_center.x*detector_data->rot_inv[0][3] + detector_center.y*detector_data->rot_inv[0][4] + detector_center.z*detector_data->rot_inv[0][5]; detector_data->corner_min_rotated_to_Y[0].z = detector_center.x*detector_data->rot_inv[0][6] + detector_center.y*detector_data->rot_inv[0][7] + detector_center.z*detector_data->rot_inv[0][8]; MASTER_THREAD printf(" Rotations from the input direction to +Y [deg]: rotZ = %f , rotX = %f\n", rotZ*RAD2DEG, rotX*RAD2DEG); } // -- Set the lower corner (minimum) coordinates at the normalized orientation: +Y. The detector has thickness 0. detector_data->corner_min_rotated_to_Y[0].x = detector_data->corner_min_rotated_to_Y[0].x - 0.5*detector_data->width_X; detector_data->corner_min_rotated_to_Y[0].y = detector_data->corner_min_rotated_to_Y[0].y; detector_data->corner_min_rotated_to_Y[0].z = detector_data->corner_min_rotated_to_Y[0].z - 0.5*detector_data->height_Z; ///////////////////////////////////////////////////////////////////////////// // *** Init the fan beam source model: if (1 == detector_data->rotation_flag) { // Initial beam NOT pointing to +Y: rotation is needed to move the sampled vector from (0,1,0) to the given direction!! rotX = 0.5*PI - acos(source_data->direction[0].z); // ! Rotation about X: acos(wsrc)==theta, theta=90 for alpha=0, ie, +Y. rotZ = atan2(source_data->direction[0].y, source_data->direction[0].x) - 0.5*PI; // ! Rotation about Z: initial phi = 90 (+Y). [ATAN2(v,u) = TAN(v/u), with the angle in the correct quadrant. cos_rX = cos(rotX); cos_rZ = cos(rotZ); sin_rX = sin(rotX); sin_rZ = sin(rotZ); // --Rotation around X (alpha) and then around Z (phi): Rz*Rx (oposite of detector rotation) source_data->rot_fan[0][0] = cos_rZ; source_data->rot_fan[0][1] = -cos_rX*sin_rZ; source_data->rot_fan[0][2] = sin_rX*sin_rZ; source_data->rot_fan[0][3] = sin_rZ; source_data->rot_fan[0][4] = cos_rX*cos_rZ; source_data->rot_fan[0][5] = -sin_rX*cos_rZ; source_data->rot_fan[0][6] = 0.0f; source_data->rot_fan[0][7] = sin_rX; source_data->rot_fan[0][8] = cos_rX; MASTER_THREAD printf(" Rotations from +Y to the input direction for the fan beam source model [deg]: rotZ = %f , rotX = %f\n", rotZ*RAD2DEG, rotX*RAD2DEG); } ///////////////////////////////////////////////////////////////////////////// // *** Allocate array for the 4 detected images (non-scattered, Compton, Rayleigh, multiple-scatter): *image_bytes = 4 * sizeof(unsigned long long int)*(detector_data->num_pixels.x)*(detector_data->num_pixels.y); (*image_ptr) = (unsigned long long int*) malloc(*image_bytes); if (*image_ptr==NULL) { printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d pixels for the 4 scatter images (%f Mbytes)!!\n\n", ((detector_data->num_pixels.x)*(detector_data->num_pixels.y)), (*image_bytes)/(1024.f*1024.f)); exit(-2); } else { MASTER_THREAD printf(" Array for 4 scatter images correctly allocated (%f Mbytes)\n", (*image_bytes)/(1024.f*1024.f)); } // *** Initialize the images to 0 in the CPU. register int j, pixels_per_image = (detector_data->num_pixels.x * detector_data->num_pixels.y); for (j=0; j<pixels_per_image; j++) { (*image_ptr)[j ] = (unsigned long long int)(0); // Initialize non-scattered image (*image_ptr)[j+ pixels_per_image] = (unsigned long long int)(0); // Initialize Compton image (*image_ptr)[j+2*pixels_per_image] = (unsigned long long int)(0); // Initialize Rayleigh image (*image_ptr)[j+3*pixels_per_image] = (unsigned long long int)(0); // Initialize multiple scattering image } } //////////////////////////////////////////////////////////////////////////////// //! Extract a file name from an input text line, trimming the initial blanks, //! trailing comment (#) and stopping at the first blank (the file name should //! not contain blanks). //! //! @param[in] input_line Input sentence with blanks and a trailing comment //! @param[out] file_name Trimmed file name //////////////////////////////////////////////////////////////////////////////// void trim_name(char* input_line, char* file_name) { int a=0, b=0; // Discard initial blanks: while(' '==input_line[a]) { a++; } // Read file name until a blank or a comment symbol (#) is found: while ((' '!=input_line[a])&&('#'!=input_line[a])) { file_name[b] = input_line[a]; b++; a++; } file_name[b] = '\0'; // Terminate output string } //////////////////////////////////////////////////////////////////////////////// //! Read a line of text and trim initial blancks and trailing comments (#). //! //! @param[in] num Characters to read //! @param[in] file_ptr Pointer to the input file stream //! @param[out] trimmed_line Trimmed line from input file, skipping empty lines and comments //////////////////////////////////////////////////////////////////////////////// char* fgets_trimmed(char* trimmed_line, int num, FILE* file_ptr) { char new_line[250]; char *new_line_ptr = NULL; int a=0, b=0; trimmed_line[0] = '\0'; // Init with a mark that means no file input do { a=0; b=0; new_line_ptr = fgets(new_line, num, file_ptr); // Read new line if (new_line_ptr != NULL) { // Discard initial blanks: while(' '==new_line[a]) { a++; } // Read file until a comment symbol (#) or end-of-line are found: while (('\n'!=new_line[a])&&('#'!=new_line[a])) { trimmed_line[b] = new_line[a]; b++; a++; } } } while(new_line_ptr!=NULL && '\0'==trimmed_line[0]); // Keep reading lines until end-of-file or a line that is not empty or only comment is found trimmed_line[b] = '\0'; // Terminate output string return new_line_ptr; } //////////////////////////////////////////////////////////////////////////////// //! Read the voxel data and allocate the material and density matrix. //! Also find and report the maximum density defined in the geometry. //! // -- Sample voxel geometry file: // // # (comment lines...) // # // # Voxel order: X runs first, then Y, then Z. // # // [SECTION VOXELS HEADER v.2008-04-13] // 411 190 113 No. OF VOXELS IN X,Y,Z // 5.000e-02 5.000e-02 5.000e-02 VOXEL SIZE (cm) ALONG X,Y,Z // 1 COLUMN NUMBER WHERE MATERIAL ID IS LOCATED // 2 COLUMN NUMBER WHERE THE MASS DENSITY IS LOCATED // 1 BLANK LINES AT END OF X,Y-CYCLES (1=YES,0=NO) // [END OF VXH SECTION] // 1 0.00120479 // 1 0.00120479 // ... // //! @param[in] file_name_voxels Name of the voxelized geometry file. //! @param[out] density_max Array with the maximum density for each material in the voxels. //! @param[out] voxel_data Pointer to a structure containing the voxel number and size. //! @param[out] voxel_mat_dens_ptr Pointer to the vector with the voxel materials and densities. //////////////////////////////////////////////////////////////////////////////// void load_voxels(int myID, char* file_name_voxels, float* density_max, struct voxel_struct* voxel_data, float2** voxel_mat_dens_ptr, unsigned int* voxel_mat_dens_bytes) { char new_line[250]; char *new_line_ptr = NULL; FILE* file_ptr = fopen(file_name_voxels, "r"); if (file_ptr==NULL) { printf("\n\n !!fopen ERROR!! File %s does not exist!!\n", file_name_voxels); exit(-2); } MASTER_THREAD printf("\n -- Reading voxel file \'%s\':\n",file_name_voxels); MASTER_THREAD fflush(stdout); do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!Reading ERROR!! File is not readable or does not contain the string \'[SECTION VOXELS HEADER\'!!\n"); exit(-2); } } while(strstr(new_line,"[SECTION VOXELS")==NULL); // Skip comments and empty lines until the header begins new_line_ptr = fgets(new_line, 250, file_ptr); // Read full line (max. 250 characters). sscanf(new_line, "%d %d %d",&voxel_data->num_voxels.x, &voxel_data->num_voxels.y, &voxel_data->num_voxels.z); new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line, "%f %f %f", &voxel_data->inv_voxel_size.x, &voxel_data->inv_voxel_size.y, &voxel_data->inv_voxel_size.z); do { new_line_ptr = fgets(new_line, 250, file_ptr); if (new_line_ptr==NULL) { printf("\n\n !!Reading ERROR!! File is not readable or does not contain the string \'[END OF VXH SECTION]\'!!\n"); exit(-2); } } while(strstr(new_line,"[END OF VXH SECTION")==NULL); // Skip rest of the header // -- Store the size of the voxel bounding box (used in the source function): voxel_data->size_bbox.x = voxel_data->num_voxels.x * voxel_data->inv_voxel_size.x; voxel_data->size_bbox.y = voxel_data->num_voxels.y * voxel_data->inv_voxel_size.y; voxel_data->size_bbox.z = voxel_data->num_voxels.z * voxel_data->inv_voxel_size.z; MASTER_THREAD printf(" Number of voxels in the input geometry file: %d x %d x %d = %d\n", voxel_data->num_voxels.x, voxel_data->num_voxels.y, voxel_data->num_voxels.z, (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z)); MASTER_THREAD printf(" Size of the input voxels: %f x %f x %f cm\n", voxel_data->inv_voxel_size.x, voxel_data->inv_voxel_size.y, voxel_data->inv_voxel_size.z); MASTER_THREAD printf(" Voxel bounding box size: %f x %f x %f cm\n", voxel_data->size_bbox.x, voxel_data->size_bbox.y, voxel_data->size_bbox.z); // printf(" The geometry must be given in two columns, with the voxel density in the second column.\n"); // printf(" The X,Y-cycles may, or may not, be separated by blank lines.\n"); // -- Store the inverse of the pixel sides (in cm) to speed up the particle location in voxels. voxel_data->inv_voxel_size.x = 1.0f/(voxel_data->inv_voxel_size.x); voxel_data->inv_voxel_size.y = 1.0f/(voxel_data->inv_voxel_size.y); voxel_data->inv_voxel_size.z = 1.0f/(voxel_data->inv_voxel_size.z); // -- Allocate the voxel matrix and store array size: *voxel_mat_dens_bytes = sizeof(float2)*(voxel_data->num_voxels.x)*(voxel_data->num_voxels.y)*(voxel_data->num_voxels.z); *voxel_mat_dens_ptr = (float2*) malloc(*voxel_mat_dens_bytes); if (*voxel_mat_dens_ptr==NULL) { printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d voxels (%f Mbytes)!!\n\n", (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z), (*voxel_mat_dens_bytes)/(1024.f*1024.f)); exit(-2); } MASTER_THREAD printf("\n -- Initializing the voxel material and density vector (%f Mbytes)\n", (*voxel_mat_dens_bytes)/(1024.f*1024.f)); // -- Read the voxel densities: MASTER_THREAD printf(" Reading the voxel densities... "); int i, j, k, read_lines=0, dummy_material, read_items; float dummy_density; float2 *voxels_ptr = *voxel_mat_dens_ptr; for (k=0; k<MAX_MATERIALS; k++) density_max[k] = -999.0f; // Init array with an impossible low density value for(k=0; k<(voxel_data->num_voxels.z); k++) { for(j=0; j<(voxel_data->num_voxels.y); j++) { for(i=0; i<(voxel_data->num_voxels.x); i++) { read_items = fscanf(file_ptr,"%d %f", &dummy_material, &dummy_density); // Read the next 2 numbers if (read_items!=2) printf("\n !!WARNING!! Expecting to read 2 items (material and density). read_items=%d, read_lines=%d \n", read_items, read_lines); new_line_ptr = fgets(new_line, 250, file_ptr); // Continue reading until end-of-line if (dummy_material>MAX_MATERIALS) { printf("\n\n !!ERROR!! Voxel material number too high: #mat=%d, MAX_MATERIALS=%d\n\n", dummy_material, MAX_MATERIALS); exit(-2); } if (dummy_density > density_max[dummy_material-1]) density_max[dummy_material-1] = dummy_density; // Store maximum density for each material (*voxels_ptr).x = (float)(dummy_material)+0.0001f; // Assign material value as float (the integer value will be recovered by truncation) (*voxels_ptr).y = dummy_density; // Assign density value voxels_ptr++; // Move to next voxel read_lines++; } } } MASTER_THREAD printf("Total number of voxels read: %d\n",read_lines); fclose(file_ptr); // Close input file } //////////////////////////////////////////////////////////////////////////////// //! Read the material input files and set the mean free paths and the "linear_interp" structures. //! Find the material nominal density. Set the Woodcock trick data. // // -- Sample material data file (data obtained from the PENELOPE 2006 database and models): // // [MATERIAL NAME] // Water // [NOMINAL DENSITY (g/cm^3)] // 1.000 // [NUMBER OF DATA VALUES] // 4096 // [MEAN FREE PATHS :: Energy (eV) || Rayleigh | Compton | Photoelectric | Pair-production | TOTAL (cm)] // 1.00000E+03 7.27451E-01 9.43363E+01 2.45451E-04 1.00000E+35 2.45367E-04 // 5.00000E+03 1.80004E+00 8.35996E+00 2.38881E-02 1.00000E+35 2.35089E-02 // 1.00000E+04 4.34941E+00 6.26746E+00 2.02568E-01 1.00000E+35 1.87755E-01 // ... // #[RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database)] // ... // #[COMPTON INTERACTIONS (relativistic impulse model with approximated one-electron analytical profiles)] // ... // //! @param[in] file_name_materials Array with the names of the material files. //! @param[in] density_max maximum density in the geometry (needed to set Woodcock trick) //! @param[out] density_nominal Array with the nominal density of the materials read //! @param[out] mfp_table_data Constant values for the linear interpolation //! @param[out] mfp_table_a_ptr First element for the linear interpolation. //! @param[out] mfp_table_b_ptr Second element for the linear interpolation. //////////////////////////////////////////////////////////////////////////////// void load_material(int myID, char file_name_materials[MAX_MATERIALS][250], float* density_max, float* density_nominal, struct linear_interp* mfp_table_data, float2** mfp_Woodcock_table_ptr, int* mfp_Woodcock_table_bytes, float3** mfp_table_a_ptr, float3** mfp_table_b_ptr, int* mfp_table_bytes, struct rayleigh_struct *rayleigh_table_ptr, struct compton_struct *compton_table_ptr) { char new_line[250]; char *new_line_ptr = NULL; int mat, i, bin, input_num_values = 0, input_rayleigh_values = 0, input_num_shells = 0; double delta_e=-99999.0; // -- Init the number of shells to 0 for all materials for (mat=0; mat<MAX_MATERIALS; mat++) compton_table_ptr->noscco[mat] = 0; // --Read the material data files: MASTER_THREAD printf("\n -- Reading the material data files (MAX_MATERIALS=%d):\n", MAX_MATERIALS); for (mat=0; mat<MAX_MATERIALS; mat++) { if ((file_name_materials[mat][0]=='\0') || (file_name_materials[mat][0]=='\n') || (density_max[mat]<0)) // Empty file name, or material not found in the voxels continue; // Re-start loop for next material MASTER_THREAD printf(" Mat %d: File \'%s\'\n", mat, file_name_materials[mat]); // printf(" -- Reading material file #%d: \'%s\'\n", mat, file_name_materials[mat]); FILE* file_ptr = fopen(file_name_materials[mat], "r"); if (file_ptr==NULL) { printf("\n\n !!fopen ERROR!! File %d \'%s\' does not exist!!\n", mat, file_name_materials[mat]); exit(-2); } do { new_line_ptr = fgets(new_line, 250, file_ptr); // Read full line (max. 250 characters). if (new_line_ptr==NULL) { printf("\n\n !!Reading ERROR!! File is not readable or does not contain the string \'[NOMINAL DENSITY\'!!\n"); exit(-2); } } while(strstr(new_line,"[NOMINAL DENSITY")==NULL); // Skip rest of the header // Read the material nominal density: new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line, "# %f", &density_nominal[mat]); MASTER_THREAD printf(" Nominal density = %f g/cm^3; Max mat density in voxels = %f\n", density_nominal[mat], density_max[mat]); // --For the first material, set the number of energy values and allocate table arrays: new_line_ptr = fgets(new_line, 250, file_ptr); new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line, "# %d", &input_num_values); if (0==mat) { mfp_table_data->num_values = input_num_values; MASTER_THREAD printf(" Number of energy values in the mean free path database: %d.\n", input_num_values); // Allocate memory for the linear interpolation arrays: *mfp_Woodcock_table_bytes = sizeof(float2)*input_num_values; *mfp_Woodcock_table_ptr = (float2*) malloc(*mfp_Woodcock_table_bytes); // Allocate space for the 2 parameter table *mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS; *mfp_table_a_ptr = (float3*) malloc(*mfp_table_bytes); // Allocate space for the 4 MFP tables *mfp_table_b_ptr = (float3*) malloc(*mfp_table_bytes); *mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS; if (input_num_values>MAX_ENERGYBINS) { printf("\n\n !!load_material ERROR!! Too many energy bins (Input bins=%d): increase parameter MAX_ENERGYBINS=%d!!\n\n", input_num_values, MAX_ENERGYBINS); exit(-2); } if ((NULL==*mfp_Woodcock_table_ptr)||(NULL==*mfp_table_a_ptr)||(NULL==*mfp_table_b_ptr)) { printf("\n\n !!malloc ERROR!! Not enough memory to allocate the linear interpolation data: %d bytes!!\n\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes))); exit(-2); } else { MASTER_THREAD printf(" Linear interpolation data correctly allocated (%f Mbytes)\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes))/(1024.f*1024.f)); } for (i=0; i<input_num_values; i++) { (*mfp_Woodcock_table_ptr)[i].x = 99999999.99f; // Init this array with a huge MFP, the minimum values are calculated below } } else // Materials after first { if (input_num_values != mfp_table_data->num_values) { printf("\n\n !!load_material ERROR!! Incorrect number of energy values given in material \'%s\': input=%d, expected=%d\n",file_name_materials[mat], input_num_values, mfp_table_data->num_values); exit(-2); } } // -- Read the mean free paths (and Rayleigh cumulative prob): new_line_ptr = fgets(new_line, 250, file_ptr); new_line_ptr = fgets(new_line, 250, file_ptr); double d_energy, d_rayleigh, d_compton, d_photelectric, d_total_mfp, d_pmax, e_last=-1.0; for (i=0; i<input_num_values; i++) { new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line," %le %le %le %le %le %le", &d_energy, &d_rayleigh, &d_compton, &d_photelectric, &d_total_mfp, &d_pmax); // Find and store the minimum total MFP at the current energy, for every material's maximum density: float temp_mfp = d_total_mfp*(density_nominal[mat])/(density_max[mat]); if (temp_mfp < (*mfp_Woodcock_table_ptr)[i].x) (*mfp_Woodcock_table_ptr)[i].x = temp_mfp; // Store minimum total mfp [cm] // Store the inverse MFP data points with [num_values rows]*[MAX_MATERIALS columns] (*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].x = 1.0/(d_total_mfp*density_nominal[mat]); // inverse TOTAL mfp * nominal density (*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].y = 1.0/(d_compton *density_nominal[mat]); // inverse Compton mfp * nominal density (*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].z = 1.0/(d_rayleigh *density_nominal[mat]); // inverse Rayleigh mfp * nominal density rayleigh_table_ptr->pmax[i*(MAX_MATERIALS)+mat] = d_pmax; // Store the maximum cumulative probability of atomic form factor F^2 for if (0==i && 0==mat) { mfp_table_data->e0 = d_energy; // Store the first energy of the first material } if (0==i) { if (fabs(d_energy-mfp_table_data->e0)>1.0e-9) { printf("\n\n !!load_material ERROR!! Incorrect first energy value given in material \'%s\': input=%f, expected=%f\n", file_name_materials[mat], d_energy, mfp_table_data->e0); exit(-2); } } else if (1==i) { delta_e = d_energy-e_last; } else if (i>1) { if (((fabs((d_energy-e_last)-delta_e))/delta_e)>0.001) // Tolerate up to a 0.1% relative variation in the delta e (for each bin) to account for possible precission errors reading the energy values { printf(" !!ERROR reading material data!! The energy step between mean free path values is not constant!!\n (maybe not enough decimals given for the energy values)\n #value = %d, First delta: %f , New delta: %f, Energy: %f ; Rel.Dif=%f\n", i, delta_e, (d_energy-e_last), d_energy,((fabs((d_energy-e_last)-delta_e))/delta_e)); exit(-2); } } e_last = d_energy; } if (0==mat) MASTER_THREAD printf(" Minimum energy = %f, Maximum = %f; Delta E (1st bin) = %f\n", (mfp_table_data->e0), e_last, delta_e); // -- Store the inverse of delta energy: mfp_table_data->ide = 1.0f/delta_e; // -- Store MFP data slope 'b' (.y for Woodcock): for (i=0; i<(input_num_values-1); i++) { bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns (*mfp_table_b_ptr)[bin].x = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].x - (*mfp_table_a_ptr)[bin].x) / delta_e; (*mfp_table_b_ptr)[bin].y = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].y - (*mfp_table_a_ptr)[bin].y) / delta_e; (*mfp_table_b_ptr)[bin].z = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].z - (*mfp_table_a_ptr)[bin].z) / delta_e; } // After maximum energy (last bin), assume constant slope: (*mfp_table_b_ptr)[(input_num_values-1)*MAX_MATERIALS+mat] = (*mfp_table_b_ptr)[(input_num_values-2)*MAX_MATERIALS+mat]; // -- Rescale the 'a' parameter (.x for Woodcock) as if the bin started at energy = 0: we will not have to rescale to the bin minimum energy every time for (i=0; i<input_num_values; i++) { d_energy = mfp_table_data->e0 + i*delta_e; // Set current bin lowest energy value bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns (*mfp_table_a_ptr)[bin].x = (*mfp_table_a_ptr)[bin].x - d_energy*(*mfp_table_b_ptr)[bin].x; (*mfp_table_a_ptr)[bin].y = (*mfp_table_a_ptr)[bin].y - d_energy*(*mfp_table_b_ptr)[bin].y; (*mfp_table_a_ptr)[bin].z = (*mfp_table_a_ptr)[bin].z - d_energy*(*mfp_table_b_ptr)[bin].z; } // -- Reading data for RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database): do { new_line_ptr = fgets(new_line, 250, file_ptr); if (feof(file_ptr)!=0) { printf("\n\n !!End-of-file ERROR!! Rayleigh data not found: \"#[DATA VALUES...\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line); exit(-2); } } while(strstr(new_line,"[DATA VALUES")==NULL); // Skip all lines until this text is found new_line_ptr = fgets(new_line, 250, file_ptr); // Read the number of data points in Rayleigh sscanf(new_line, "# %d", &input_rayleigh_values); if (input_rayleigh_values != NP_RAYLEIGH) { printf("\n\n !!ERROR!! The number of values for Rayleigh sampling is different than the allocated space: input=%d, NP_RAYLEIGH=%d. File=\'%s\'\n", input_rayleigh_values, NP_RAYLEIGH, file_name_materials[mat]); exit(-2); } new_line_ptr = fgets(new_line, 250, file_ptr); // Comment line: #[SAMPLING DATA FROM COMMON/CGRA/: X, P, A, B, ITL, ITU] for (i=0; i<input_rayleigh_values; i++) { int itlco_tmp, ituco_tmp; bin = NP_RAYLEIGH*mat + i; new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line," %e %e %e %e %d %d", &(rayleigh_table_ptr->xco[bin]), &(rayleigh_table_ptr->pco[bin]), &(rayleigh_table_ptr->aco[bin]), &(rayleigh_table_ptr->bco[bin]), &itlco_tmp, &ituco_tmp); rayleigh_table_ptr->itlco[bin] = (unsigned char) itlco_tmp; rayleigh_table_ptr->ituco[bin] = (unsigned char) ituco_tmp; } // printf(" -- Rayleigh sampling data read. Input values = %d\n",input_rayleigh_values); // -- Reading COMPTON INTERACTIONS data (relativistic impulse model with approximated one-electron analytical profiles): do { new_line_ptr = fgets(new_line, 250, file_ptr); if (feof(file_ptr)!=0) { printf("\n\n !!End-of-file ERROR!! Compton data not found: \"[NUMBER OF SHELLS]\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line); exit(-2); } } while(strstr(new_line,"[NUMBER OF SHELLS")==NULL); // Skip all lines until this text is found new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line, "# %d", &input_num_shells); // Read the NUMBER OF SHELLS if (input_num_shells>MAX_SHELLS) { printf("\n\n !!ERROR!! Too many shells for Compton interactions in file \'%s\': input=%d, MAX_SHELLS=%d\n", file_name_materials[mat], input_num_shells, MAX_SHELLS); exit(-2); } compton_table_ptr->noscco[mat] = input_num_shells; // Store number of shells for this material in structure new_line_ptr = fgets(new_line, 250, file_ptr); // Comment line: #[SHELL INFORMATION FROM COMMON/CGCO/: FCO, UICO, FJ0, KZCO, KSCO] int kzco_dummy, ksco_dummy; for (i=0; i<input_num_shells; i++) { bin = mat + i*MAX_MATERIALS; new_line_ptr = fgets(new_line, 250, file_ptr); sscanf(new_line," %e %e %e %d %d", &(compton_table_ptr->fco[bin]), &(compton_table_ptr->uico[bin]), &(compton_table_ptr->fj0[bin]), &kzco_dummy, &ksco_dummy); } fclose(file_ptr); // Material data read. Close the current material input file } // ["for" loop: continue with next material] // -- Store Woodcock MFP slope in component '.y': for (i=0; i<(mfp_table_data->num_values-1); i++) (*mfp_Woodcock_table_ptr)[i].y = ((*mfp_Woodcock_table_ptr)[i+1].x - (*mfp_Woodcock_table_ptr)[i].x)/delta_e; // -- Rescale the first parameter in component .x for Woodcock for (i=0; i<mfp_table_data->num_values; i++) { (*mfp_Woodcock_table_ptr)[i].x = (*mfp_Woodcock_table_ptr)[i].x - (mfp_table_data->e0 + i*delta_e)*(*mfp_Woodcock_table_ptr)[i].y; } } //////////////////////////////////////////////////////////////////////////////// #ifdef USING_CUDA //////////////////////////////////////////////////////////////////////////////// //! Sets the CUDA enabled GPU that will be used in the simulation. //! Allocates and copies the simulation data in the GPU global and constant memories. //! //////////////////////////////////////////////////////////////////////////////// void init_CUDA_device( int* gpu_id, int myID, /*Variables to GPU constant memory:*/ struct voxel_struct* voxel_data, struct source_struct* source_data, struct detector_struct* detector_data, struct linear_interp* mfp_table_data, /*Variables to GPU global memory:*/ float2* voxel_mat_dens, float2** voxel_mat_dens_device, unsigned int voxel_mat_dens_bytes, unsigned long long int* image, unsigned long long int** image_device, int image_bytes, float2* mfp_Woodcock_table, float2** mfp_Woodcock_table_device, int mfp_Woodcock_table_bytes, float3* mfp_table_a, float3* mfp_table_b, float3** mfp_table_a_device, float3** mfp_table_b_device, int mfp_table_bytes, struct rayleigh_struct* rayleigh_table, struct rayleigh_struct** rayleigh_table_device, struct compton_struct* compton_table, struct compton_struct** compton_table_device ) { int deviceCount, coresPerSM; cutilSafeCall(cudaGetDeviceCount(&deviceCount)); if (0==deviceCount) { printf("\n !!ERROR!! No CUDA enabled GPU detected!!\n\n"); exit(-1); } if ( ((*gpu_id)>(deviceCount-1)) || ((*gpu_id)<0) ) { printf("\n !!WARNING!! The input GPU number is not valid: input_GPU=%d, maximum GPU number=%d.\n", (*gpu_id), deviceCount-1); // printf(" The device with the maximum Gflop count will be automatically selected.\n"); // (*gpu_id) = cutGetMaxGflopsDeviceId(); exit(-3); } cutilSafeCall(cudaSetDevice(*gpu_id)); // Set the GPU device. cudaDeviceProp deviceProp; cutilSafeCall(cudaGetDeviceProperties(&deviceProp, *gpu_id)); if (deviceProp.major>99 || deviceProp.minor>99) { printf("\n !!ERROR!! The selected GPU device does not support CUDA!! GPU_id=%d, deviceCount=%d, compute capability=%d.%d\n\n", (*gpu_id), deviceCount, deviceProp.major,deviceProp.minor); exit(-1); } if (deviceProp.major>1) { coresPerSM = 32; // Set number of cores per microprocessor for Fermi GPUs (compute capability 2.x) #ifdef LARGE_CACHE // -- Compute capability > 1: set a large L1 cache for the global memory, reducing the size of the shared memory: // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference printf("\n ==> CUDA: LARGE_CACHE defined --> setting a large global memory cache (L1) and a small shared memory (cudaFuncCachePreferL1).\n"); cudaFuncSetCacheConfig(track_particles, cudaFuncCachePreferL1); // !!DeBuG!! Set a large cache instead of a large shared memory. // #else // -- Using default: // printf("\n ==> CUDA: LARGE_CACHE not defined --> setting a large shared memory and a small global memory cache (cudaFuncCachePreferShared).\n"); // cudaFuncSetCacheConfig(track_particles, cudaFuncCachePreferShared); //!!DeBuG!! Setting size of shared memory/global cache #endif } else coresPerSM = 8; // Set number of cores per microprocessor before Fermi (compute capability 1.x) // -- Reading the device properties as shown in NVIDIA's SDK sample code "deviceQuery" printf("\n ==> CUDA: %d CUDA enabled GPU detected! Using device #%d: \"%s\"\n", deviceCount, (*gpu_id), deviceProp.name); printf(" Compute capability: %d.%d, Number multiprocessors: %d, Number cores: %d\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount, coresPerSM*deviceProp.multiProcessorCount); printf(" Clock rate: %.2f GHz, Global memory: %.3f Mbyte, Constant memory: %.2f kbyte\n", deviceProp.clockRate*1.0e-6f, deviceProp.totalGlobalMem/(1024.f*1024.f), deviceProp.totalConstMem/1024.f); printf(" Shared memory per block: %.2f kbyte, Registers per block: %.2f kbyte\n", deviceProp.sharedMemPerBlock/1024.f, deviceProp.regsPerBlock/1024.f); int driverVersion = 0, runtimeVersion = 0; cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version: %d.%d, Runtime Version: %d.%d\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100); if (0!=deviceProp.kernelExecTimeoutEnabled) { printf("\n !!ERROR!! The selected GPU is connected to an X-Win server and the kernel run time is limited to 5 sec. Aborting execution!!"); exit(-1); } unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); // -- Allocate the constant variables in the device: cutilSafeCall(cudaMemcpyToSymbol("voxel_data_CONST", voxel_data, sizeof(struct voxel_struct))); cutilSafeCall(cudaMemcpyToSymbol("source_data_CONST", source_data, sizeof(struct source_struct))); cutilSafeCall(cudaMemcpyToSymbol("detector_data_CONST", detector_data, sizeof(struct detector_struct))); cutilSafeCall(cudaMemcpyToSymbol("mfp_table_data_CONST", mfp_table_data, sizeof(struct linear_interp))); double total_mem = sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp); MASTER_THREAD printf(" ==> CUDA: Structures successfully copied to the device. CONSTANT memory used: %lf kbytes (%.1lf%%)\n", total_mem/1024.0, 100.0*total_mem/deviceProp.totalConstMem); // -- Allocate the device memory: cutilSafeCall(cudaMalloc((void**) voxel_mat_dens_device, voxel_mat_dens_bytes)); cutilSafeCall(cudaMalloc((void**) image_device, image_bytes)); cutilSafeCall(cudaMalloc((void**) mfp_Woodcock_table_device, mfp_Woodcock_table_bytes)); cutilSafeCall(cudaMalloc((void**) mfp_table_a_device, mfp_table_bytes)); cutilSafeCall(cudaMalloc((void**) mfp_table_b_device, mfp_table_bytes)); cutilSafeCall(cudaMalloc((void**) rayleigh_table_device, sizeof(struct rayleigh_struct))); cutilSafeCall(cudaMalloc((void**) compton_table_device, sizeof(struct compton_struct))); total_mem = voxel_mat_dens_bytes+image_bytes+mfp_Woodcock_table_bytes+2*mfp_table_bytes+sizeof(struct compton_struct)+sizeof(struct rayleigh_struct); if (*voxel_mat_dens_device==NULL || *image_device==NULL || *mfp_Woodcock_table_device==NULL || *mfp_table_a_device==NULL || *mfp_table_a_device==NULL || *rayleigh_table_device==NULL || *compton_table_device==NULL) { printf("\n cudaMalloc ERROR!! Device global memory not correctly allocated!! (%lf Mbytes)\n", total_mem/(1024.0*1024.0)); exit(-1); } else { MASTER_THREAD printf(" ==> CUDA: Device global memory correctly allocated. GLOBAL memory used: %lf Mbytes (%.1lf%%)\n", total_mem/(1024.0*1024.0), 100.0*total_mem/deviceProp.totalGlobalMem); } // --Copy the host memory to the device: cutilSafeCall(cudaMemcpy(*voxel_mat_dens_device, voxel_mat_dens, voxel_mat_dens_bytes, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(*mfp_Woodcock_table_device, mfp_Woodcock_table, mfp_Woodcock_table_bytes, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(*mfp_table_a_device, mfp_table_a, mfp_table_bytes, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(*mfp_table_b_device, mfp_table_b, mfp_table_bytes, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(*rayleigh_table_device, rayleigh_table, sizeof(struct rayleigh_struct), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(*compton_table_device, compton_table, sizeof(struct compton_struct), cudaMemcpyHostToDevice)); // --Init the image array to 0 using a GPU kernel instead of cudaMemcpy: // Simple version: cutilSafeCall( cudaMemcpy( image_device, image, image_bytes, cudaMemcpyHostToDevice) ); int pixels_per_image = detector_data->num_pixels.x * detector_data->num_pixels.y; MASTER_THREAD printf(" ==> CUDA: Launching kernel to initialize device image to 0: number of blocks = %d, threads per block = 128\n", (int)ceil(pixels_per_image/128.0f) ); init_image_array_GPU<<<(int)(ceil(pixels_per_image/128.0f)),128>>>(*image_device, pixels_per_image); cudaThreadSynchronize(); // Force the runtime to wait until all device tasks have completed cutilCheckMsg(" !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error: cutilCheckError(cutStopTimer(timer)); MASTER_THREAD printf(" Time spent allocating and copying memory to the device: %.6f s\n", 0.001f*cutGetTimerValue( timer)); cutilCheckError(cutDeleteTimer(timer)); } #endif //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //! Report the final results, from the host CPU. //! //! @param[in] file_name_output File where tallied image is reported //! @param[in] detector_data Detector description read from the input file (pointer to detector_struct) //! @param[in] image Tallied image (in meV per pixel) //! @param[in] time_elapsed Time elapsed during the main loop execution (in seconds) //! @param[in] total_histories Total number of x-rays simulated //////////////////////////////////////////////////////////////////////////////// int report_host(char* file_name_output, struct detector_struct* detector_data, struct source_struct* source_data, unsigned long long int* image, double time_elapsed, unsigned long long int total_histories, int current_projection, int num_projections, double D_angle, double initial_angle, int myID, int numprocs) { // printf("\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); // printf(" ~~ Simulation performance quick report ~~\n"); // printf(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); // -Find current angle double current_angle = initial_angle+current_projection*D_angle; if (current_angle<0.0) current_angle += 2.0*PI; // Make sure the angle is not negative. else if (current_angle>=2.0*PI) current_angle -= 2.0*PI; // Make sure the angle is not above 360 degrees. // -- Report data: printf("\n *** SIMULATION PERFORMANCE REPORT ***\n"); if(num_projections!=1) // Output the projection angle when simulating a CT: printf(" Projection %d of %d. Angle from X axis: %lf\n", current_projection, num_projections-1, current_angle*RAD2DEG); #ifdef USING_MPI printf(" Computed by the MPI thread %d of %d\n", myID+1, numprocs); #endif printf(" Simulated x rays: %lld\n", total_histories); printf(" Simulation time [s]: %.2f\n", time_elapsed); if (time_elapsed>0.000001) printf(" Speed [x-rays/s]: %.2f\n", ((double)total_histories)/time_elapsed); FILE* file_ptr = fopen(file_name_output, "w"); if (file_ptr==NULL) { printf("\n\n !!fopen ERROR!! File %s can not be opened!!\n", file_name_output); exit(-3); } fprintf(file_ptr, "# \n"); #ifdef USING_CUDA fprintf(file_ptr, "# *** SIMULATION IN THE GPU USING CUDA ***\n"); #else fprintf(file_ptr, "# *** SIMULATION IN THE CPU ***\n"); #endif fprintf(file_ptr, "#\n"); fprintf(file_ptr, "# Image created counting the energy arriving at each pixel.\n"); fprintf(file_ptr, "# Pixel value units: eV/cm^2 per history\n"); if(num_projections!=1) // Output the projection angle when simulating a CT: fprintf(file_ptr, "# CT projection %d of %d. Angle from X axis: %lf\n", current_projection, num_projections-1, current_angle*RAD2DEG); #ifdef USING_MPI fprintf(file_ptr, "# Computed by the MPI thread %d of %d\n", myID+1, numprocs); #endif fprintf(file_ptr, "# Pixel size: %lf x %lf = %lf cm^2\n", 1.0/(double)(detector_data->inv_pixel_size_X), 1.0/(double)(detector_data->inv_pixel_size_Z), 1.0/(double)(detector_data->inv_pixel_size_X*detector_data->inv_pixel_size_Z)); fprintf(file_ptr, "# Number of pixels in X and Z: %d %d\n", detector_data->num_pixels.x, detector_data->num_pixels.y); fprintf(file_ptr, "# (X rows given first, a blank line separates the different Z values)\n"); fprintf(file_ptr, "# \n"); fprintf(file_ptr, "# [NON-SCATTERED] [COMPTON] [RAYLEIGH] [MULTIPLE-SCATTING]\n"); fprintf(file_ptr, "# ==========================================================\n"); const double SCALE = 1.0/SCALE_eV; // conversion to eV using the inverse of the constant used in the "tally_image" kernel function (defined in the header file) const double NORM = SCALE * detector_data->inv_pixel_size_X * detector_data->inv_pixel_size_Z / ((double)total_histories); // ==> [eV/cm^2 per history] double energy_noScatter, energy_compton, energy_rayleigh, energy_multiscatter; double energy_integral = 0.0; // Integrate (add) the energy in the image pixels [meV] int pixels_per_image = (detector_data->num_pixels.x*detector_data->num_pixels.y), pixel=0; int i, j; for(j=0; j<detector_data->num_pixels.y; j++) { for(i=0; i<detector_data->num_pixels.x; i++) { energy_noScatter = (double)(image[pixel]); energy_compton = (double)(image[pixel + pixels_per_image]); energy_rayleigh = (double)(image[pixel + 2*pixels_per_image]); energy_multiscatter = (double)(image[pixel + 3*pixels_per_image]); // -- Write the results in an external file; the image corresponding to all particles not written: it has to be infered adding all images fprintf(file_ptr, "%.8lf %.8lf %.8lf %.8lf\n", NORM*energy_noScatter, NORM*energy_compton, NORM*energy_rayleigh, NORM*energy_multiscatter); energy_integral += energy_noScatter + energy_compton + energy_rayleigh + energy_multiscatter; pixel++; } fprintf(file_ptr, "\n"); // Separate rows with an empty line for visualization with gnuplot. } fprintf(file_ptr, "# *** Simulation REPORT: ***\n"); fprintf(file_ptr, "# Fraction energy detected: %.3lf%%\n", 100.0*SCALE*(energy_integral/(double)(total_histories))/(double)(source_data->energy)); fprintf(file_ptr, "# Simulated x rays: %lld\n", total_histories); fprintf(file_ptr, "# Simulation time [s]: %.2f\n", time_elapsed); if (time_elapsed>0.000001) fprintf(file_ptr, "# Speed [x-rays/sec]: %.2f\n\n", ((double)total_histories)/time_elapsed); fclose(file_ptr); // Close output file and flush stream printf(" Fraction of initial energy arriving at the detector (for a monoenergetic beam): %.3lf%%\n\n", 100.0*SCALE*(energy_integral/(double)(total_histories))/(double)(source_data->energy)); // !!DeBuG!! Caution: Only valid for a monoenergetic beam! fflush(stdout); return 0; // Report could return not 0 to continue the simulation... !!DeBuG!! } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //! Sets the CT trajectory: store in memory the source and detector rotations //! that are needed to calculate the multiple projections. //! The first projection (0) was previously initialized in function "read_input". //! //! //! ASSUMPTIONS: the CT scan plane must be perpendicular to the Z axis, ie, //! the initial direction of the particles must have w=0! //! /////////////////////////////////////////////////////////////////////////////// void set_CT_trajectory(int myID, int num_projections, double D_angle, double angularROI_0, double angularROI_1, struct source_struct* source_data, struct detector_struct* detector_data) { MASTER_THREAD printf("\n -- Setting the sources and detectors for the %d CT projections:\n", num_projections); double cos_rX, cos_rZ, sin_rX, sin_rZ, current_angle; // --Set center of rotation exactly half way between source and detector float3 center_rotation, detector_center; center_rotation.x = source_data->position[0].x + source_data->direction[0].x * (0.5*detector_data->sdd); center_rotation.y = source_data->position[0].y + source_data->direction[0].y * (0.5*detector_data->sdd); center_rotation.z = source_data->position[0].z; // + source_data->direction.z * (0.5*detector_data->sdd); // !!DeBuG!! w=0 all the time!! // --Angular span between projections: // -Set initial angle for the source (180 degress less than the detector pointed by the direction vector; the zero angle is the X axis, increasing to +Y axis). current_angle = acos((double)source_data->direction[0].x); if (source_data->direction[0].y<0) current_angle = -current_angle; // Correct for the fact that positive and negative angles have the same ACOS if (current_angle<0.0) current_angle += 2.0*PI; // Make sure the angle is not negative, between [0,360) degrees. current_angle = current_angle - PI; // Correct the fact that the source is opposite to the detector (180 degrees difference). if (current_angle<0.0) current_angle += 2.0*PI; // Make sure the angle is not negative, between [0,360) degrees.. MASTER_THREAD printf(" << Projection #0 >> initial_angle=%f , D_angle=%f\n", current_angle*RAD2DEG, D_angle*RAD2DEG); MASTER_THREAD printf(" Source direction=(%f,%f,%f), position=(%f,%f,%f)\n", source_data->direction[0].x,source_data->direction[0].y,source_data->direction[0].z, source_data->position[0].x,source_data->position[0].y,source_data->position[0].z); // !!DeBuG!! Verbose int i; for (i=1; i<num_projections; i++) // The first projection (i=0) was initialized in function "read_input". { // --Set the new source location and direction, for the current CT projection: current_angle += D_angle; if (current_angle>=(2.0*PI-0.0001)) current_angle -= 2.0*PI; // Make sure the angle is not above or equal to 360 degrees. // -- Check if this projection is inside the angular region of interest // if ((current_angle < angularROI_0) || (current_angle > angularROI_1)) // { // printf(" Skipping projection #%d: angle %f outside the angular region of interest.\n", i, current_angle*RAD2DEG); // !!DeBuG!! Verbose // continue; // } source_data->position[i].x = center_rotation.x + (0.5*detector_data->sdd)*cos(current_angle); source_data->position[i].y = center_rotation.y + (0.5*detector_data->sdd)*sin(current_angle); source_data->position[i].z = source_data->position[0].z; // !!DeBuG!! The Z position must be constant (w=0)!! !!DeBuG!! source_data->direction[i].x = center_rotation.x - source_data->position[i].x; source_data->direction[i].y = center_rotation.y - source_data->position[i].y; source_data->direction[i].z = 0.0f; // center_rotation.z - source_data->position.z; !!DeBuG!! w=0 all the time!! !!DeBuG!! double norm = 1.0/sqrt((double)source_data->direction[i].x*(double)source_data->direction[i].x + (double)source_data->direction[i].y*(double)source_data->direction[i].y /* + source_data->direction[i].z*source_data->direction[i].z*/); source_data->direction[i].x = (float)(((double)source_data->direction[i].x)*norm); source_data->direction[i].y = (float)(((double)source_data->direction[i].y)*norm); // source_data->direction[i].z = (float)(((double)source_data->direction[i].z)*norm); // --Set the new detector in front of the new source: detector_center.x = source_data->position[i].x + source_data->direction[i].x * detector_data->sdd; // Set the center of the detector straight ahead of the focal spot. detector_center.y = source_data->position[i].y + source_data->direction[i].y * detector_data->sdd; detector_center.z = source_data->position[i].z; // + source_data->direction[i].z * detector_data->sdd; !!DeBuG!! w=0 all the time!! double rotX, rotZ; // detector_data->rotation_flag = 1; // !!DeBuG!! Already set in read_input! // -- Rotate the detector center to +Y: // Set the rotation that will bring particles from the detector plane to +Y=(0,+1,0) through a rotation around X and around Z (counter-clock): rotX = 0.0; // !!DeBuG!! w=0 all the time!! CORRECT CALCULATION: acos(source_data->direction.z) - 0.5*PI; // Rotate to +Y = (0,+1,0) --> rotX_0 = -PI/2 if ( (source_data->direction[i].x*source_data->direction[i].x + source_data->direction[i].y*source_data->direction[i].y) > 1.0e-8 ) // == u^2+v^2 > 0 if (source_data->direction[i].y >= 0.0f) rotZ = 0.5*PI - acos(source_data->direction[i].x/sqrt(source_data->direction[i].x*source_data->direction[i].x + source_data->direction[i].y*source_data->direction[i].y)); else rotZ = 0.5*PI - (-acos(source_data->direction[i].x/sqrt(source_data->direction[i].x*source_data->direction[i].x + source_data->direction[i].y*source_data->direction[i].y))); else rotZ = 0.0; // Vector pointing to +Z, do not rotate around Z then. MASTER_THREAD printf(" << Projection #%d >> current_angle=%f, rotation around Z = %f\n", i, current_angle*RAD2DEG, rotZ*RAD2DEG); // !!DeBuG!! Verbose MASTER_THREAD printf(" Source direction=(%f,%f,%f), position=(%f,%f,%f)\n", source_data->direction[i].x,source_data->direction[i].y,source_data->direction[i].z, source_data->position[i].x,source_data->position[i].y,source_data->position[i].z); // !!DeBuG!! Verbose cos_rX = cos(rotX); cos_rZ = cos(rotZ); sin_rX = sin(rotX); sin_rZ = sin(rotZ); detector_data->rot_inv[i][0] = cos_rZ; // Rotation matrix RxRz: detector_data->rot_inv[i][1] = -sin_rZ; detector_data->rot_inv[i][2] = 0.0f; detector_data->rot_inv[i][3] = cos_rX*sin_rZ; detector_data->rot_inv[i][4] = cos_rX*cos_rZ; detector_data->rot_inv[i][5] = -sin_rX; detector_data->rot_inv[i][6] = sin_rX*sin_rZ; detector_data->rot_inv[i][7] = sin_rX*cos_rZ; detector_data->rot_inv[i][8] = cos_rX; detector_data->corner_min_rotated_to_Y[i].x = detector_center.x*detector_data->rot_inv[i][0] + detector_center.y*detector_data->rot_inv[i][1] + detector_center.z*detector_data->rot_inv[i][2]; detector_data->corner_min_rotated_to_Y[i].y = detector_center.x*detector_data->rot_inv[i][3] + detector_center.y*detector_data->rot_inv[i][4] + detector_center.z*detector_data->rot_inv[i][5]; detector_data->corner_min_rotated_to_Y[i].z = detector_center.x*detector_data->rot_inv[i][6] + detector_center.y*detector_data->rot_inv[i][7] + detector_center.z*detector_data->rot_inv[i][8]; // -- Set the lower corner (minimum) coordinates at the normalized orientation: +Y. The detector has thickness 0. detector_data->corner_min_rotated_to_Y[i].x = detector_data->corner_min_rotated_to_Y[i].x - 0.5*detector_data->width_X; detector_data->corner_min_rotated_to_Y[i].y = detector_data->corner_min_rotated_to_Y[i].y; detector_data->corner_min_rotated_to_Y[i].z = detector_data->corner_min_rotated_to_Y[i].z - 0.5*detector_data->height_Z; // *** Init the fan beam source model: rotZ = -rotZ; // The source rotation is the inverse of the detector. cos_rX = cos(rotX); cos_rZ = cos(rotZ); sin_rX = sin(rotX); sin_rZ = sin(rotZ); // --Rotation around X (alpha) and then around Z (phi): Rz*Rx (oposite of detector rotation) source_data->rot_fan[i][0] = cos_rZ; source_data->rot_fan[i][1] = -cos_rX*sin_rZ; source_data->rot_fan[i][2] = sin_rX*sin_rZ; source_data->rot_fan[i][3] = sin_rZ; source_data->rot_fan[i][4] = cos_rX*cos_rZ; source_data->rot_fan[i][5] = -sin_rX*cos_rZ; source_data->rot_fan[i][6] = 0.0f; source_data->rot_fan[i][7] = sin_rX; source_data->rot_fan[i][8] = cos_rX; // printf("\n -- Source location and direction for the following CT projection:\n"); // !!DeBuG!! Verbose // printf(" angle between projections = %lf degrees\n", D_angle*RAD2DEG); // printf(" current angle = %lf degrees\n", current_angle*RAD2DEG); // printf(" new focal spot position = (%f, %f, %f)\n", source_data->position[i].x, source_data->position[i].y, source_data->position[i].z); // printf(" new source direction = (%f, %f, %f)\n", source_data->direction[i].x, source_data->direction[i].y, source_data->direction[i].z); // printf(" new detector center = (%f, %f, %f)\n", detector_center.x, detector_center.y, detector_center.z); // printf(" new detector low corner (at +Y) = (%f, %f, %f)\n", detector_data->corner_min_rotated_to_Y[i].x, detector_data->corner_min_rotated_to_Y[i].y, detector_data->corner_min_rotated_to_Y[i].z); // printf(" center of rotation = (%f, %f, %f)\n", center_rotation.x, center_rotation.y, center_rotation.z); // printf(" detector width (X) and height (Z) = %f , %f cm\n", detector_data->width_X, detector_data->height_Z); // printf(" rotations to +Y around Z and X = %f , %f degrees\n", rotZ*RAD2DEG, rotX*RAD2DEG); } } ///////////////////////////////////////////////////////////////////////////////
the_stack
__device__ int getNewFileId() { return atomicAdd(&g_file_id,1); } DEBUG_NOINLINE __device__ void rt_node::init() volatile { /* n_leaves=0; char* ptr=(char*)&(leaves); for(int i=0;i<sizeof(leaves);i++){ ptr[i]=0;} */ } DEBUG_NOINLINE __device__ volatile rt_node* rt_node::alloc( volatile rt_node** toUpdatePtr) { *toUpdatePtr=(rt_node*)g_rtree_mempool.allocNode(); if (*toUpdatePtr == NULL) return NULL; (*toUpdatePtr)->init(); // empty function return *toUpdatePtr; } DEBUG_NOINLINE __device__ void rt_node::free(volatile rt_node* toFree){ g_rtree_mempool.freeNode(toFree); } DEBUG_NOINLINE __device__ void rtree::init_thread() volatile { dirty_tree=0; count=0; drop_cache=0; file_id=-1; tree_lock=0; swap_lock=0; root[0].init(); root[1].init(); root[2].init(); root[3].init(); root[3].n_leaves=root[2].n_leaves=root[1].n_leaves=1; root[0].n_leaves=0; root[3].leaves.nodes[0]=&(root[2]); root[2].leaves.nodes[0]=&(root[1]); root[1].leaves.nodes[0]=&(root[0]); LIST_HEAD_INIT(&busy_list); } #define GET_OFFSET_LEAF(d_offset,level,res) res[level]=d_offset&MASK; d_offset=d_offset>>LOGNUM_LEAVES; if (d_offset==0) return level; DEBUG_NOINLINE __device__ int rtree::getOffsetLevel(size_t d_offset, unsigned char* offset) { GET_OFFSET_LEAF(d_offset,0,offset); GET_OFFSET_LEAF(d_offset,1,offset); GET_OFFSET_LEAF(d_offset,2,offset); GET_OFFSET_LEAF(d_offset,3,offset); GPU_ASSERT(d_offset !=0); return -1; } #define MOVE_ONE_LEVEL_DOWN(curr_offset,new_ptr,tmp,ALLOC) \ (tmp)=&((new_ptr)->leaves.nodes[curr_offset]); \ if(*(tmp)) (new_ptr)= (volatile rt_node*)(*(tmp)); \ else \ if (ALLOC)\ { \ GPU_ASSERT(tree_lock);\ (new_ptr)->n_leaves++; GPU_ASSERT((new_ptr)->n_leaves<=NUM_LEAVES);\ (new_ptr)=rt_node::alloc(( rt_node volatile**)tmp);\ } \ else return NULL; //** requires tree lock if invoked with locked=true *// DEBUG_NOINLINE __device__ volatile FTable_page* rtree::getLeaf(size_t d_offset,FTable_page_locker::page_states_t* pstate, bool locked,int purpose) volatile { unsigned char offset_tmp[MAX_LEVELS]; unsigned char* offset=offset_tmp; int levels=getOffsetLevel(d_offset,offset); GPU_ASSERT(levels>=0); volatile rt_node* n=&(root[levels]); volatile void * volatile* target; volatile FTable_page* leaf=NULL; switch(levels){ case 3: MOVE_ONE_LEVEL_DOWN(offset[3],n,target,locked); case 2: MOVE_ONE_LEVEL_DOWN(offset[2],n,target,locked); case 1: MOVE_ONE_LEVEL_DOWN(offset[1],n,target,locked); case 0: leaf=&(n->leaves.pages[offset[0]]); if (!locked) { *pstate=leaf->locker.try_lock_rw(); return leaf; } *pstate=leaf->locker.try_lock_init(); if (*pstate == FTable_page_locker::P_INIT) { GPU_ASSERT(((int)n->n_leaves)>=0); if (!n->n_leaves) { LIST_ADD(&busy_list,n); } // if this returns true the node MUST be inited // and it's now locked. // otherwise we later need to try to // lock it for rw/init // can change only with tree_lock taken GPU_ASSERT(tree_lock); n->n_leaves++; count++; GPU_ASSERT(n->n_leaves<=NUM_LEAVES); if (purpose == PAGE_WRITE_ACCESS){ dirty_tree=1; } } break; default: GPU_ASSERT(NULL); } return leaf; } /* this function deletes only the last level node (assuming all its leaves have been deleted assumes tree_lock to be taken */ #define FREE_LAST_LEVEL(parent,offset) (parent)->n_leaves--; GPU_ASSERT(tree_lock); #define FREE_LEAF(parent,offset) FREE_LAST_LEVEL(parent,offset); (parent)->leaves.nodes[offset]=NULL; #define FREE_NODE(node) if ((node)->n_leaves != 0) break; rt_node::free(node); #define FREE_LAST_LEVEL_NODE(node) if ((node)->n_leaves != 0) break; LIST_DEL(node);rt_node::free(node); DEBUG_NOINLINE __device__ void rtree::delLastLevelNode( size_t d_offset) volatile { unsigned char offset[MAX_LEVELS]; volatile rt_node* route_node0; volatile rt_node* route_node1; volatile rt_node* route_node2; volatile rt_node* route_node3; int levels=getOffsetLevel(d_offset,offset); GPU_ASSERT(levels>=0); volatile rt_node* n=&root[levels]; switch(levels){ case 0: // nothing to delete GPU_ASSERT(((int)n->n_leaves)>=0); if ((n)->n_leaves==0) LIST_DEL(n); break; case 3: route_node3=n; route_node2=(volatile rt_node*)(route_node3)->leaves.nodes[offset[3]]; route_node1=(volatile rt_node*)(route_node2)->leaves.nodes[offset[2]]; route_node0=(volatile rt_node*)(route_node1)->leaves.nodes[offset[1]]; FREE_LAST_LEVEL_NODE(route_node0); FREE_LEAF(route_node1,offset[1]); FREE_NODE(route_node1); FREE_LEAF(route_node2,offset[2]); FREE_NODE(route_node2); FREE_LEAF(route_node3,offset[3]); break; case 2: route_node2=n; route_node1=(volatile rt_node*)(route_node2)->leaves.nodes[offset[2]]; route_node0=(volatile rt_node*)(route_node1)->leaves.nodes[offset[1]]; FREE_LAST_LEVEL_NODE(route_node0); FREE_LEAF(route_node1,offset[1]); FREE_NODE(route_node1); FREE_LEAF(route_node2,offset[2]); break; case 1: route_node1=n; route_node0=(volatile rt_node*)(route_node1)->leaves.nodes[offset[1]]; FREE_LAST_LEVEL_NODE(route_node0); FREE_LEAF(route_node1,offset[1]); break; default: GPU_ASSERT(NULL); } } // assumes that the tree had all its leaves deleted already // this operation assumes swap lock and tree lock to be taken DEBUG_NOINLINE __device__ void rtree::delete_tree() volatile { GPU_ASSERT(count==0); GPU_ASSERT(tree_lock); volatile rt_node* route_nodes[]={0,0,0,&root[3]}; uchar3 idx={0,0,0}; uchar3 loc_count; for(idx.x=0,loc_count.x=route_nodes[3]->n_leaves; loc_count.x>0;idx.x++){ if (!route_nodes[3]->leaves.nodes[idx.x]) continue; loc_count.x--; route_nodes[2]=(volatile rt_node*)route_nodes[3]->leaves.nodes[idx.x]; for(idx.y=0,loc_count.y=route_nodes[2]->n_leaves>0; loc_count.y>0;idx.y++){ if (!route_nodes[2]->leaves.nodes[idx.y]) continue; loc_count.y--; route_nodes[1]=(volatile rt_node*)route_nodes[2]->leaves.nodes[idx.y]; for(idx.z=0,loc_count.z=route_nodes[1]->n_leaves; loc_count.z>0;idx.z++){ if (!route_nodes[1]->leaves.nodes[idx.z]) continue; loc_count.z--; if ( route_nodes[1]->leaves.nodes[idx.z] ==&root[0]) continue; rt_node::free((volatile rt_node*)route_nodes[1]->leaves.nodes[idx.z]); // this zeros out rt_node structure } if ( route_nodes[1]==&root[1]) continue; rt_node::free(route_nodes[1]); } if (route_nodes[2]==&root[2]) continue; rt_node::free(route_nodes[2]); } init_thread(); /* count=0; drop_cache=0; dirty_tree=0; LIST_HEAD_INIT(&busy_list); */ } /** swapout starts from the last_flushed and moves on over the busy_list until it swaps out min_flushed pages + it swaps out FULL rt_node + it locks swap to prevent concurrent flushing + it locks per page every time it swaps it out + it locks a whole tree to delete rt_node from it + since the tree lock is not taken until the tree structure is changed, the # of leaves in rt_node + is changed later + it holds lock_swap and lock_tree at the same time + addLeaf holds lock_tree() + swapout holds both lock_swap and lock_tree + concurrent reads/writes are allowed when some part of the data is being swapped + **/ DEBUG_NOINLINE __device__ void rtree::traverse_all(int fd, bool toFree, bool dirty, int flags ) volatile { int all_flushed=1; if ( !dirty && !toFree) return; // we DO NOT update the number of leaves because traverse_all is either // invoked without removal // OR it is always the final stage before killing the whole tree for(volatile rt_node* n=LIST_PREV(&busy_list); n!=&busy_list;n=LIST_PREV(n)) { // EXPECT NULL HERE - the node could have been deleted if (n==NULL){ n=LIST_PREV(&busy_list); continue; } int leaf_count=0; GPU_ASSERT(n); if (n->n_leaves) KILL_BUFFER_CACHE; for(int i=0;i<NUM_LEAVES && leaf_count<n->n_leaves;){ volatile FTable_page *f=(volatile FTable_page*)&(n->leaves.pages[i]); i++; // lock the page - lock flush until managed to lock if (!f->frame) continue; MUTEX_LOCK(f->locker.lock); // hard lock if (f->locker.rw_counter>0 || !f->frame ) { if (f->locker.rw_counter>0) all_flushed=0; MUTEX_UNLOCK(f->locker.lock); continue; } // check that it's the page that corresponds to the right file_id if (f->frame->file_id!=file_id) { // if its not the right one, we are on the wrong rt_node MUTEX_UNLOCK(f->locker.lock); WRONG_FILE_ID; break; } f->locker.page_state=FTable_page_locker::P_FLUSH; leaf_count++; GPU_ASSERT(!(fd<0 && dirty)); if (dirty && f->frame->dirty) { writeback_page(fd,f,flags,1); f->frame->dirty=false; } if (toFree){ f->freePage(FREE_LOCKED); }else{ f->locker.page_state=FTable_page_locker::P_READY; f->locker.unlock_flush(); } } } if (toFree && !all_flushed) { GPU_ASSERT("trying to free a radix tree while not all nodes were possible to flush\n"==NULL); } if (all_flushed) { dirty_tree=0; count=0;} if (toFree && !(LIST_EMPTY(&busy_list))) delete_tree(); } /*** this function is internally synchronized ***/ DEBUG_NOINLINE __device__ int rtree::swapout(int fd, int num_flushed, int flags) volatile { // MUST TAKE SWAP LOCK HERE GPU_ASSERT(swap_lock); // GPU_ASSERT(fd>=0); // run through the busy list - volatile rt_node* n; n=LIST_PREV(&busy_list); // we can't get NULL here c's busy list is always initialized to NOT null volatile FTable_page* to_free[NUM_LEAVES]; // all those to be freed while( num_flushed>0 && n!=&busy_list ) { // here we MUST expect NULL because the node might be in the process of deletion // then restart if (n==NULL){ n=LIST_PREV(&busy_list); continue; } int flushed=0; size_t file_offset=0; GPU_ASSERT(n); for(int i=0;i<NUM_LEAVES && n->n_leaves;i++){ volatile FTable_page *f=(volatile FTable_page*)&(n->leaves.pages[i]); if (!f->frame) continue; // lock the page if (f->locker.try_lock_flush()) { GPU_ASSERT(f->frame); // check that it's the page that corresponds to the right file_id if (f->frame->file_id!=file_id) { // if its not the right one, we are on the wrong rt_node f->locker.unlock_flush(); WRONG_FILE_ID; break; } // we are going to flush here f->locker.page_state=FTable_page_locker::P_FLUSH; // GPU_ASSERT((f->frame->file_offset)>=0); //debug //{ //uchar o[4]; //int levels=getOffsetLevel(f->frame->file_offset>>FS_LOGBLOCKSIZE,o); //GPU_ASSERT(levels>=0); //} file_offset=f->frame->file_offset; GPU_ASSERT(fd>=0 || !f->frame->dirty); if (f->frame->dirty && f->frame->content_size) { writeback_page(fd,f,flags,1); f->frame->dirty=false; FLUSHED_WRITE }else{ FLUSHED_READ } to_free[flushed]=f; flushed++; }else{ TRY_LOCK_FAILED } } // nothing to remove if (flushed==0) { n=LIST_PREV(n); continue; } num_flushed-=flushed; // consider removing rt_node - lock the tree //no structural change to the tree is possible lock_tree(); GPU_ASSERT(tree_lock); for(int fp=0;fp<flushed;fp++){ to_free[fp]->freePage(FREE_UNLOCKED); __threadfence(); } // update count of valid nodes in the tree // n->n_leaves-=flushed; GPU_ASSERT(((int)n->n_leaves)>=0); count-=flushed; n->n_leaves-=flushed; if ( n->n_leaves ) { n=LIST_PREV(n); unlock_tree(); continue; // we do not delete the node } n=LIST_PREV(n); delLastLevelNode(file_offset>>FS_LOGBLOCKSIZE); GPU_ASSERT(count>=0); unlock_tree(); } return num_flushed; }
the_stack
#pragma once #include "cuda/ComputeCapabilities.cuh" #include "cuda/NumericLimits.cuh" #include "cuda/RegisterUtils.cuh" #include "cuda/SmallSort.cuh" #include "cuda/TopK.cuh" #include <assert.h> #include <boost/preprocessor/repetition/repeat.hpp> #include <cuda.h> #include <device_functions.h> #include <math_constants.h> #include <stdio.h> /** @file CUDA device code routines for finding all top K float elements in descending order in a set using the above top-Kth radix selection plus warp coherent bitonic sorting. */ namespace facebook { namespace cuda { namespace detail { /// Returns the index into the array that this lane will write. If this /// lane is not responsible for writing a value, this will return -1. __device__ __forceinline__ int laneWillWrite(float val, float topK, int& topKToWrite, int& next) { // Do we have a < top K value? Those must be written. // If we have a == top K value, only some of these may be written. const bool weHaveLessThanTopK = (val > topK); const bool weHaveEqualToTopK = (val == topK); // All threads with an on bit in this will write out to `out` const unsigned warpHasLessThanTopK = __ballot(weHaveLessThanTopK); // Only the first topKToWrite threads with an on bit in this will // write out to `out` unsigned warpHasEqualToTopK = __ballot(weHaveEqualToTopK); // We have to figure out which ones are the first topKToWrite ones // though const bool weWillWriteEqualToTopK = (__popc(getLaneMaskLt() & warpHasEqualToTopK) < topKToWrite) && weHaveEqualToTopK; // Tell all threads which ones will write out == top K elements warpHasEqualToTopK = __ballot(weWillWriteEqualToTopK); // Update the number of actual == top K elements to find remaining topKToWrite -= __popc(warpHasEqualToTopK); assert(topKToWrite >= 0); // Only the lanes with bits set in this mask will write out elements const unsigned warpWillWrite = warpHasLessThanTopK | warpHasEqualToTopK; // How many threads are writing before us? This will define our // output order. const unsigned numLanesBeforeUs = __popc(getLaneMaskLt() & warpWillWrite); // Thus, next + numLanesBeforeUs is the index into which we'll // write our value, if this lane wants to write a value. const int writeIndex = next + numLanesBeforeUs; // Advance where the next values go by how many values the current // warp wrote out next += __popc(warpWillWrite); // Only if this lane bit is on will we write something out return getBit(warpWillWrite, getLaneId()) ? writeIndex : -1; } /// For a given warp, find and write out the top-k highest floating /// point values in [start, end) to [out, out + k). The list written /// out occurs in the original source order (by original /// index). Returns the k-th highest element seen. /// Handles all floats except NaNs. /// Implementation for large arrays such that there are more elements /// than warp threads. __device__ float warpFindTopKElementsIndexOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, int k) { // First, have all warp threads find the top Kth element. const Pair<float, int> topKthElement = warpFindTopKthElement(data, k); // The next offset to write into `out` int next = 0; // Number of remaining == topKthElement values the warp still has to // write (because there can be duplicates) int topKToWrite = topKthElement.v; for (int index = getLaneId(); index < data.getSize(0); index += WARP_SIZE) { const float val = data[index]; const int idx = laneWillWrite(val, topKthElement.k, topKToWrite, next); // Does this lane have a value to write? if (idx != -1) { out[idx] = val; } } // We should have written out all the == top K elements. However, // only threads that were within bounds will have the proper values // of these, so share from the thread within the first lane, which // is guaranteed to participate in all array loops assert(__shfl(topKToWrite, 0) == 0); assert(__shfl(next, 0) == k); return topKthElement.k; } /// Version of warpFindTopKElementsUnorderedLarge, except also writes /// out the K indices chosen from `data` into `indices`. template <typename IndexType> __device__ float warpFindTopKElementsIndexOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, DeviceTensor<IndexType, 1>& indices, int k) { // First, have all warp threads find the top Kth element. const Pair<float, int> topKthElement = warpFindTopKthElement(data, k); // The next offset to write into `out` int next = 0; // Number of remaining == topKthElement values the warp still has to // write (because there can be duplicates) int topKToWrite = topKthElement.v; for (int index = getLaneId(); index < data.getSize(0); index += WARP_SIZE) { const float val = data[index]; const int idx = laneWillWrite(val, topKthElement.k, topKToWrite, next); // Does this lane have a value to write? if (idx != -1) { out[idx] = val; indices[idx] = (IndexType) index; } } // We should have written out all the == top K elements. However, // only threads that were within bounds will have the proper values // of these, so share from the thread within the first lane, which // is guaranteed to participate in all array loops assert(__shfl(topKToWrite, 0) == 0); assert(__shfl(next, 0) == k); return topKthElement.k; } /// For a given warp, find and write out the top-k highest floating /// point values in [start, end) to [out, out + k). The list written /// out is ordered. /// Handles all floats except NaNs. __device__ void warpFindTopKElementsValueOrderSmall(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, int k) { // We only handle in-warp sorting up to a max size; above this size, // the radix selection strategy wins. assert(data.getSize(0) <= 3 * WARP_SIZE); // There should be enough values to return the k-th highest. assert(k > 0 && k <= data.getSize(0)); const int lane = getLaneId(); #define HANDLE_SIZE(N) \ if (data.getSize(0) <= N * WARP_SIZE) { \ float val[N]; \ WarpRegisterLoaderUtils<float, N>::load( \ val, data, NumericLimits<float>::minPossible()); \ \ float sorted[N]; \ warpSortRegisters<float, GreaterThan<float>, N>(val, sorted); \ \ WarpRegisterLoaderUtils<float, N>::save(out, sorted, k); \ } HANDLE_SIZE(1); HANDLE_SIZE(2); HANDLE_SIZE(3); #undef HANDLE_SIZE } /// Version of warpFindTopKElementsOrderedSmall that also writes out /// the indices in `data` of the K elements chosen into `indices`. template <typename IndexType> __device__ void warpFindTopKElementsValueOrderSmall(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, DeviceTensor<IndexType, 1>& indices, int k) { // We only handle in-warp sorting up to a max size; above this size, // the radix selection strategy wins. assert(data.getSize(0) <= 3 * WARP_SIZE); // There should be enough values to return the k-th highest. assert(k > 0 && k <= data.getSize(0)); const int lane = getLaneId(); #define HANDLE_SIZE(N) \ if (data.getSize(0) <= N * WARP_SIZE) { \ Pair<float, IndexType> val[N]; \ WarpRegisterPairLoaderUtils<float, IndexType, N>::load( \ val, data, \ NumericLimits<float>::minPossible(), \ NumericLimits<IndexType>::minPossible()); \ \ Pair<float, IndexType> sorted[N]; \ warpSortRegisters<Pair<float, IndexType>, \ GreaterThan<Pair<float, IndexType> >, \ N>(val, sorted); \ \ WarpRegisterPairLoaderUtils<float, IndexType, N>::save( \ out, indices, sorted, k); \ } HANDLE_SIZE(1); HANDLE_SIZE(2); HANDLE_SIZE(3); #undef HANDLE_SIZE } /// For a given warp, find and write out the top-k highest floating /// point values in [start, end) to [out, out + k). The list written /// out is ordered. /// Handles all floats except NaNs. /// Implementation for large arrays such that there are more elements /// than warp threads. __device__ void warpFindTopKElementsValueOrderLarge(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, int k) { // We only have a sorting implementation that works up to k <= 4 * // warpSize. assert(k <= 4 * WARP_SIZE); // Find and write out the elements in index order warpFindTopKElementsIndexOrder(data, out, k); // Sort the elements in [out, out + k) based on float order bool sorted = warpSort<float, GreaterThan<float> >(out, out); assert(sorted); } /// Version of warpFindTopKElementsOrderedLage that also writes out the /// indices in `data` of the K elements chosen into `indices`. template <typename IndexType> __device__ void warpFindTopKElementsValueOrderLarge(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, DeviceTensor<IndexType, 1>& indices, int k) { // We only have a sorting implementation that works up to k <= 4 * // warpSize. assert(k <= 4 * WARP_SIZE); // Find and write out the elements in potentially unsorted order detail::warpFindTopKElementsIndexOrder<IndexType>(data, out, indices, k); // Sort the elements in [out, out + k) / [indices, indices + k) as // keys/values bool sorted = warpSort<float, IndexType, GreaterThan<Pair<float, IndexType> > >( out, indices, out, indices); assert(sorted); } } // detail /// For a given warp, find and write out the top-k highest floating /// point values in [start, end) to [out, out + k). The list written /// out is ordered based on original index order. Handles all floats /// except NaNs. __device__ void warpFindTopKElementsIndexOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, int k) { assert(out.getSize(0) >= k); detail::warpFindTopKElementsIndexOrder(data, out, k); } /// Version of warpFindTopKElementsOrdered which also writes out the /// indices of the found top elements from `data`. The list written out /// is ordered based on original index order. Handles all floats except /// NaNs. /// Supports writing out float or integer indices. template <typename IndexType> __device__ void warpFindTopKElementsIndexOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, DeviceTensor<IndexType, 1>& indices, int k) { assert(out.getSize(0) >= k && indices.getSize(0) >= k); detail::warpFindTopKElementsIndexOrder<IndexType>( data, out, indices, k); } /// For a given warp, find and write out the top-k highest floating /// point values in [start, end) to [out, out + k). The list written /// out is ordered based on float value. Handles all floats except /// NaNs. __device__ void warpFindTopKElementsValueOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, int k) { assert(out.getSize(0) >= k); assert(k <= 4 * WARP_SIZE); // Max size handled at the moment // In-register warp sorting is faster up to 3 x warpSize input if (data.getSize(0) <= 3 * WARP_SIZE) { detail::warpFindTopKElementsValueOrderSmall(data, out, k); } else { detail::warpFindTopKElementsValueOrderLarge(data, out, k); } } /// Version of warpFindTopKElementsOrdered which also writes out the /// indices of the found top elements from `data`. The list written out /// is ordered based on float value. Handles all floats except NaNs. /// Supports writing out float or integer indices. template <typename IndexType> __device__ void warpFindTopKElementsValueOrder(const DeviceTensor<float, 1>& data, DeviceTensor<float, 1>& out, DeviceTensor<IndexType, 1>& indices, int k) { assert(out.getSize(0) >= k && indices.getSize(0) >= k); assert(k <= 4 * WARP_SIZE); // Max size handled at the moment // In-register warp sorting is faster up to 3 x warpSize input if (data.getSize(0) <= 3 * WARP_SIZE) { detail::warpFindTopKElementsValueOrderSmall<IndexType>( data, out, indices, k); } else { detail::warpFindTopKElementsValueOrderLarge<IndexType>( data, out, indices, k); } } } } // namespace
the_stack
#include <thrust/count.h> #include <thrust/execution_policy.h> #include <thrust/logical.h> #include <thrust/transform_reduce.h> #include "impl.cuh" namespace { using namespace manifold; struct FaceAreaVolume { const Halfedge* halfedges; const glm::vec3* vertPos; const float precision; __host__ __device__ thrust::pair<float, float> operator()(int face) { float perimeter = 0; glm::vec3 edge[3]; for (int i : {0, 1, 2}) { const int j = (i + 1) % 3; edge[i] = vertPos[halfedges[3 * face + j].startVert] - vertPos[halfedges[3 * face + i].startVert]; perimeter += glm::length(edge[i]); } glm::vec3 crossP = glm::cross(edge[0], edge[1]); float area = glm::length(crossP); float volume = glm::dot(crossP, vertPos[halfedges[3 * face].startVert]); return area > perimeter * precision ? thrust::make_pair(area / 2.0f, volume / 6.0f) : thrust::make_pair(0.0f, 0.0f); } }; struct PosMin : public thrust::binary_function<glm::vec3, glm::vec3, glm::vec3> { __host__ __device__ glm::vec3 operator()(glm::vec3 a, glm::vec3 b) { if (isnan(a.x)) return b; if (isnan(b.x)) return a; return glm::min(a, b); } }; struct PosMax : public thrust::binary_function<glm::vec3, glm::vec3, glm::vec3> { __host__ __device__ glm::vec3 operator()(glm::vec3 a, glm::vec3 b) { if (isnan(a.x)) return b; if (isnan(b.x)) return a; return glm::max(a, b); } }; struct SumPair : public thrust::binary_function<thrust::pair<float, float>, thrust::pair<float, float>, thrust::pair<float, float>> { __host__ __device__ thrust::pair<float, float> operator()( thrust::pair<float, float> a, thrust::pair<float, float> b) { a.first += b.first; a.second += b.second; return a; } }; struct CurvatureAngles { float* meanCurvature; float* gaussianCurvature; float* area; float* degree; const Halfedge* halfedge; const glm::vec3* vertPos; const glm::vec3* triNormal; __host__ __device__ void operator()(int tri) { glm::vec3 edge[3]; glm::vec3 edgeLength; for (int i : {0, 1, 2}) { const int startVert = halfedge[3 * tri + i].startVert; const int endVert = halfedge[3 * tri + i].endVert; edge[i] = vertPos[endVert] - vertPos[startVert]; edgeLength[i] = glm::length(edge[i]); edge[i] /= edgeLength[i]; const int neighborTri = halfedge[3 * tri + i].pairedHalfedge / 3; const float dihedral = 0.25 * edgeLength[i] * glm::asin(glm::dot(glm::cross(triNormal[tri], triNormal[neighborTri]), edge[i])); AtomicAdd(meanCurvature[startVert], dihedral); AtomicAdd(meanCurvature[endVert], dihedral); AtomicAdd(degree[startVert], 1.0f); } glm::vec3 phi; phi[0] = glm::acos(-glm::dot(edge[2], edge[0])); phi[1] = glm::acos(-glm::dot(edge[0], edge[1])); phi[2] = glm::pi<float>() - phi[0] - phi[1]; const float area3 = edgeLength[0] * edgeLength[1] * glm::length(glm::cross(edge[0], edge[1])) / 6; for (int i : {0, 1, 2}) { const int vert = halfedge[3 * tri + i].startVert; AtomicAdd(gaussianCurvature[vert], -phi[i]); AtomicAdd(area[vert], area3); } } }; struct NormalizeCurvature { __host__ __device__ void operator()( thrust::tuple<float&, float&, float, float> inOut) { float& meanCurvature = thrust::get<0>(inOut); float& gaussianCurvature = thrust::get<1>(inOut); float area = thrust::get<2>(inOut); float degree = thrust::get<3>(inOut); float factor = degree / (6 * area); meanCurvature *= factor; gaussianCurvature *= factor; } }; struct CheckManifold { const Halfedge* halfedges; __host__ __device__ bool operator()(int edge) { const Halfedge halfedge = halfedges[edge]; if (halfedge.startVert == -1 && halfedge.endVert == -1 && halfedge.pairedHalfedge == -1) return true; const Halfedge paired = halfedges[halfedge.pairedHalfedge]; bool good = true; good &= paired.pairedHalfedge == edge; good &= halfedge.startVert != halfedge.endVert; good &= halfedge.startVert == paired.endVert; good &= halfedge.endVert == paired.startVert; return good; } }; struct NoDuplicates { const Halfedge* halfedges; __host__ __device__ bool operator()(int edge) { const Halfedge halfedge = halfedges[edge]; if (halfedge.startVert == -1 && halfedge.endVert == -1 && halfedge.pairedHalfedge == -1) return true; return halfedge.startVert != halfedges[edge + 1].startVert || halfedge.endVert != halfedges[edge + 1].endVert; } }; struct CheckCCW { const Halfedge* halfedges; const glm::vec3* vertPos; const glm::vec3* triNormal; const float tol; __host__ __device__ bool operator()(int face) { if (halfedges[3 * face].pairedHalfedge < 0) return true; const glm::mat3x2 projection = GetAxisAlignedProjection(triNormal[face]); glm::vec2 v[3]; for (int i : {0, 1, 2}) v[i] = projection * vertPos[halfedges[3 * face + i].startVert]; int ccw = CCW(v[0], v[1], v[2], glm::abs(tol)); bool check = tol > 0 ? ccw >= 0 : ccw == 0; if (tol > 0 && !check) { glm::vec2 v1 = v[1] - v[0]; glm::vec2 v2 = v[2] - v[0]; float area = v1.x * v2.y - v1.y * v2.x; float base2 = glm::max(glm::dot(v1, v1), glm::dot(v2, v2)); float base = glm::sqrt(base2); glm::vec3 V0 = vertPos[halfedges[3 * face].startVert]; glm::vec3 V1 = vertPos[halfedges[3 * face + 1].startVert]; glm::vec3 V2 = vertPos[halfedges[3 * face + 2].startVert]; glm::vec3 norm = glm::cross(V1 - V0, V2 - V0); printf( "Tri %d does not match normal, approx height = %g, base = %g\n" "tol = %g, area2 = %g, base2*tol2 = %g\n" "normal = %g, %g, %g\n" "norm = %g, %g, %g\nverts: %d, %d, %d\n", face, area / base, base, tol, area * area, base2 * tol * tol, triNormal[face].x, triNormal[face].y, triNormal[face].z, norm.x, norm.y, norm.z, halfedges[3 * face].startVert, halfedges[3 * face + 1].startVert, halfedges[3 * face + 2].startVert); } return check; } }; } // namespace namespace manifold { /** * Returns true if this manifold is in fact an oriented 2-manifold and all of * the data structures are consistent. */ bool Manifold::Impl::IsManifold() const { if (halfedge_.size() == 0) return true; bool isManifold = thrust::all_of(countAt(0), countAt(halfedge_.size()), CheckManifold({halfedge_.cptrD()})); VecDH<Halfedge> halfedge(halfedge_); thrust::sort(halfedge.beginD(), halfedge.endD()); isManifold &= thrust::all_of(countAt(0), countAt(2 * NumEdge() - 1), NoDuplicates({halfedge.cptrD()})); return isManifold; } /** * Returns true if all triangles are CCW relative to their triNormals_. */ bool Manifold::Impl::MatchesTriNormals() const { if (halfedge_.size() == 0 || faceNormal_.size() != NumTri()) return true; return thrust::all_of(thrust::device, countAt(0), countAt(NumTri()), CheckCCW({halfedge_.cptrD(), vertPos_.cptrD(), faceNormal_.cptrD(), 2 * precision_})); } /** * Returns the number of triangles that are colinear within precision_. */ int Manifold::Impl::NumDegenerateTris() const { if (halfedge_.size() == 0 || faceNormal_.size() != NumTri()) return true; return thrust::count_if(thrust::device, countAt(0), countAt(NumTri()), CheckCCW({halfedge_.cptrD(), vertPos_.cptrD(), faceNormal_.cptrD(), -1 * precision_ / 2})); } Properties Manifold::Impl::GetProperties() const { if (IsEmpty()) return {0, 0}; ApplyTransform(); thrust::pair<float, float> areaVolume = thrust::transform_reduce( countAt(0), countAt(NumTri()), FaceAreaVolume({halfedge_.cptrD(), vertPos_.cptrD(), precision_}), thrust::make_pair(0.0f, 0.0f), SumPair()); return {areaVolume.first, areaVolume.second}; } Curvature Manifold::Impl::GetCurvature() const { Curvature result; if (IsEmpty()) return result; ApplyTransform(); VecDH<float> vertMeanCurvature(NumVert(), 0); VecDH<float> vertGaussianCurvature(NumVert(), glm::two_pi<float>()); VecDH<float> vertArea(NumVert(), 0); VecDH<float> degree(NumVert(), 0); thrust::for_each( countAt(0), countAt(NumTri()), CurvatureAngles({vertMeanCurvature.ptrD(), vertGaussianCurvature.ptrD(), vertArea.ptrD(), degree.ptrD(), halfedge_.cptrD(), vertPos_.cptrD(), faceNormal_.cptrD()})); thrust::for_each_n( zip(vertMeanCurvature.beginD(), vertGaussianCurvature.beginD(), vertArea.beginD(), degree.beginD()), NumVert(), NormalizeCurvature()); result.minMeanCurvature = thrust::reduce(vertMeanCurvature.beginD(), vertMeanCurvature.endD(), 1.0f / 0.0f, thrust::minimum<float>()); result.maxMeanCurvature = thrust::reduce(vertMeanCurvature.beginD(), vertMeanCurvature.endD(), -1.0f / 0.0f, thrust::maximum<float>()); result.minGaussianCurvature = thrust::reduce( vertGaussianCurvature.beginD(), vertGaussianCurvature.endD(), 1.0f / 0.0f, thrust::minimum<float>()); result.maxGaussianCurvature = thrust::reduce( vertGaussianCurvature.beginD(), vertGaussianCurvature.endD(), -1.0f / 0.0f, thrust::maximum<float>()); result.vertMeanCurvature.insert(result.vertMeanCurvature.end(), vertMeanCurvature.begin(), vertMeanCurvature.end()); result.vertGaussianCurvature.insert(result.vertGaussianCurvature.end(), vertGaussianCurvature.begin(), vertGaussianCurvature.end()); return result; } /** * Calculates the bounding box of the entire manifold, which is stored * internally to short-cut Boolean operations and to serve as the precision * range for Morton code calculation. */ void Manifold::Impl::CalculateBBox() { bBox_.min = thrust::reduce(vertPos_.beginD(), vertPos_.endD(), glm::vec3(1 / 0.0f), PosMin()); bBox_.max = thrust::reduce(vertPos_.beginD(), vertPos_.endD(), glm::vec3(-1 / 0.0f), PosMax()); } } // namespace manifold
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <tuple> __global__ void forward_face_index_map_cuda_kernel( const float* faces, const int batch_size, const int num_faces, const int image_height, const int image_width, const float near, const float far, int32_t* face_index_map, float* weight_map, float* depth_map, int32_t* lock_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int ih = image_height; const int iw = image_width; const int bn = i / num_faces; const int fn = i % num_faces; const float* face = &faces[i * 9]; /* pi[0], pi[1], pi[2] = leftmost, middle, rightmost points */ int pi[3]; if (face[0] < face[3]) { if (face[6] < face[0]) pi[0] = 2; else pi[0] = 0; if (face[3] < face[6]) pi[2] = 2; else pi[2] = 1; } else { if (face[6] < face[3]) pi[0] = 2; else pi[0] = 1; if (face[0] < face[6]) pi[2] = 2; else pi[2] = 0; } for (int k = 0; k < 3; k++) { if (pi[0] != k && pi[2] != k) { pi[1] = k; } } /* p[num][xyz]: x, y is normalized from [-1, 1] to [0, ih or iw - 1]. */ float p[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { if (dim == 0) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * iw + iw - 1); } else if (dim == 1) { p[num][dim] = 0.5 * (face[3 * pi[num] + dim] * ih + ih - 1); } else { p[num][dim] = face[3 * pi[num] + dim]; } } } if (p[0][0] == p[2][0]) return; // line, not triangle /* compute face_inv */ float face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; float face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } const int xi_min = max(ceil(p[0][0]), 0.); const int xi_max = min(p[2][0], iw - 1.0); for (int xi = xi_min; xi <= xi_max; xi++) { /* compute yi_min and yi_max */ float yi1, yi2; if (xi <= p[1][0]) { if (p[1][0] - p[0][0] != 0) { yi1 = (p[1][1] - p[0][1]) / (p[1][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; } else { yi1 = p[1][1]; } } else { if (p[2][0] - p[1][0] != 0) { yi1 = (p[2][1] - p[1][1]) / (p[2][0] - p[1][0]) * (xi - p[1][0]) + p[1][1]; } else { yi1 = p[1][1]; } } yi2 = (p[2][1] - p[0][1]) / (p[2][0] - p[0][0]) * (xi - p[0][0]) + p[0][1]; const int yi_min = max(0., ceil(min(yi1, yi2))); const int yi_max = min(max(yi1, yi2), ih - 1.0); for (int yi = yi_min; yi <= yi_max; yi++) { /* index in output buffers */ int index = bn * ih * iw + yi * iw + xi; /* compute w = face_inv * p */ float w[3]; for (int k = 0; k < 3; k++) { w[k] = face_inv[3 * k + 0] * xi + face_inv[3 * k + 1] * yi + face_inv[3 * k + 2]; } /* sum(w) -> 1, 0 < w < 1 */ float w_sum = 0; for (int k = 0; k < 3; k++) { w[k] = min(max(w[k], 0.0), 1.0); w_sum += w[k]; } for (int k = 0; k < 3; k++) w[k] /= w_sum; /* compute 1 / zp = sum(w / z) */ const float zp = 1.0 / (w[0] / p[0][2] + w[1] / p[1][2] + w[2] / p[2][2]); if (zp <= near || far <= zp) continue; /* lock and update */ bool locked = false; do { if (locked = atomicCAS(&lock_map[index], 0, 1) == 0) { if (zp < atomicAdd(&depth_map[index], 0)) { float record = 0; atomicExch(&depth_map[index], zp); atomicExch(&face_index_map[index], fn); for (int k = 0; k < 3; k++) { atomicExch(&weight_map[3 * index + pi[k]], w[k]); } record += atomicAdd(&depth_map[index], 0.); record += atomicAdd(&face_index_map[index], 0.); if (record > 0) atomicExch(&lock_map[index], 0); } else { atomicExch(&lock_map[index], 0); } } } while (!locked); } } } __global__ void forward_texture_sampling_cuda_kernel( const float* faces, const float* textures, const int32_t* face_index_map, const float* weight_map, const size_t batch_size, const int num_faces, const int image_height, const int image_width, const int texture_size, float* feature_map) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int ts = texture_size; const int face_index = face_index_map[i]; float* pixel = &feature_map[i * (ts + 1)]; if (face_index >= 0) { /* from global variables: batch number, num of faces, image_size, face[v012][RGB], pixel[RGB], weight[v012], texture[ts][RGB]; */ const int bn = i / (image_height * image_width); const int nf = num_faces; const float* texture = &textures[(bn * nf + face_index) * ts * 3]; const float* weight = &weight_map[i * 3]; /* blend */ for (int k = 0; k < ts; k++) { for (int j = 0; j < 3; j++) { pixel[k] += weight[j] * texture[ts * j + k]; } } pixel[ts] = 1.0f; } } __global__ void backward_cuda_kernel( const int32_t* face_index_map, const float* weight_map, const float* grad_feature_map, float* grad_textures, size_t batch_size, size_t num_faces, int image_height, int image_width, size_t texture_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_height * image_width) { return; } const int face_index = face_index_map[i]; if (face_index >= 0) { int bn = i / (image_width * image_height); // batch number [0 -> bs] int nf = num_faces; int ts = texture_size; const float* weight = &weight_map[i * 3]; float* grad_texture = &grad_textures[(bn * nf + face_index) * ts * 3]; for (int k = 0; k < ts; k++) { const float grad_feature = grad_feature_map[i * (ts + 1) + k]; for (int j = 0; j < 3; j++) { atomicAdd(&grad_texture[ts * j + k], weight[j] * grad_feature); } } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> forward_cuda( at::Tensor feature_map, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor lock_map, const at::Tensor& faces, const at::Tensor& textures, const int image_height, const int image_width, const float near, const float far) { const int batch_size = faces.size(0); const int num_faces = faces.size(1); const int texture_size = textures.size(3); const int threads = 512; const dim3 blocks1 ((batch_size * num_faces - 1) / threads +1); forward_face_index_map_cuda_kernel<<<blocks1, threads>>>( faces.data_ptr<float>(), batch_size, num_faces, image_height, image_width, near, far, face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), depth_map.data_ptr<float>(), lock_map.data_ptr<int32_t>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in forward_face_index_map: %s\n", cudaGetErrorString(err)); } const dim3 blocks2 ((batch_size * image_height * image_width - 1) / threads + 1); forward_texture_sampling_cuda_kernel<<<blocks2, threads>>>( faces.data_ptr<float>(), textures.data_ptr<float>(), face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size, feature_map.data_ptr<float>()); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in forward_texture_sampling: %s\n", cudaGetErrorString(err)); } return std::make_tuple(face_index_map, weight_map, depth_map, feature_map); } at::Tensor backward_cuda( const at::Tensor& face_index_map, const at::Tensor& weight_map, at::Tensor& grad_feature_map, at::Tensor& grad_textures, int num_faces) { const int batch_size = face_index_map.size(0); const int image_height = face_index_map.size(1); const int image_width = face_index_map.size(2); const int texture_size = grad_textures.size(3); const int threads = 512; const dim3 blocks ((batch_size * image_height * image_width - 1) / threads + 1); backward_cuda_kernel<<<blocks, threads>>>( face_index_map.data_ptr<int32_t>(), weight_map.data_ptr<float>(), grad_feature_map.data_ptr<float>(), grad_textures.data_ptr<float>(), batch_size, num_faces, image_height, image_width, texture_size); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in backward: %s\n", cudaGetErrorString(err)); } return grad_textures; }
the_stack
#include "nvblox/integrators/cuda/projective_integrators_common.cuh" #include "nvblox/integrators/integrators_common.h" #include "nvblox/utils/timing.h" namespace nvblox { ProjectiveColorIntegrator::ProjectiveColorIntegrator() : ProjectiveIntegratorBase() { sphere_tracer_.params().maximum_ray_length_m = max_integration_distance_m_; checkCudaErrors(cudaStreamCreate(&integration_stream_)); } ProjectiveColorIntegrator::~ProjectiveColorIntegrator() { finish(); checkCudaErrors(cudaStreamDestroy(integration_stream_)); } void ProjectiveColorIntegrator::finish() const { cudaStreamSynchronize(integration_stream_); } void ProjectiveColorIntegrator::integrateFrame( const ColorImage& color_frame, const Transform& T_L_C, const Camera& camera, const TsdfLayer& tsdf_layer, ColorLayer* color_layer, std::vector<Index3D>* updated_blocks) { CHECK_NOTNULL(color_layer); CHECK_EQ(tsdf_layer.block_size(), color_layer->block_size()); // Metric truncation distance for this layer const float voxel_size = color_layer->block_size() / VoxelBlock<bool>::kVoxelsPerSide; const float truncation_distance_m = truncation_distance_vox_ * voxel_size; timing::Timer blocks_in_view_timer("color/integrate/get_blocks_in_view"); std::vector<Index3D> block_indices = getBlocksInView(T_L_C, camera, color_layer->block_size()); blocks_in_view_timer.Stop(); // Check which of these blocks are: // - Allocated in the TSDF, and // - have at least a single voxel within the truncation band // This is because: // - We don't allocate new geometry here, we just color existing geometry // - We don't color freespace. timing::Timer blocks_in_band_timer( "color/integrate/reduce_to_blocks_in_band"); block_indices = reduceBlocksToThoseInTruncationBand(block_indices, tsdf_layer, truncation_distance_m); blocks_in_band_timer.Stop(); // Allocate blocks (CPU) // We allocate color blocks where // - there are allocated TSDF blocks, AND // - these blocks are within the truncation band timing::Timer allocate_blocks_timer("color/integrate/allocate_blocks"); allocateBlocksWhereRequired(block_indices, color_layer); allocate_blocks_timer.Stop(); // Create a synthetic depth image timing::Timer sphere_trace_timer("color/integrate/sphere_trace"); std::shared_ptr<const DepthImage> synthetic_depth_image_ptr = sphere_tracer_.renderImageOnGPU( camera, T_L_C, tsdf_layer, truncation_distance_m, MemoryType::kDevice, depth_render_ray_subsampling_factor_); sphere_trace_timer.Stop(); // Update identified blocks // Calls out to the child-class implementing the integation (GPU) timing::Timer update_blocks_timer("color/integrate/update_blocks"); updateBlocks(block_indices, color_frame, *synthetic_depth_image_ptr, T_L_C, camera, truncation_distance_m, color_layer); update_blocks_timer.Stop(); if (updated_blocks != nullptr) { *updated_blocks = block_indices; } } __device__ inline Color blendTwoColors(const Color& first_color, float first_weight, const Color& second_color, float second_weight) { float total_weight = first_weight + second_weight; first_weight /= total_weight; second_weight /= total_weight; Color new_color; new_color.r = static_cast<uint8_t>(std::round( first_color.r * first_weight + second_color.r * second_weight)); new_color.g = static_cast<uint8_t>(std::round( first_color.g * first_weight + second_color.g * second_weight)); new_color.b = static_cast<uint8_t>(std::round( first_color.b * first_weight + second_color.b * second_weight)); return new_color; } __device__ inline bool updateVoxel(const Color color_measured, ColorVoxel* voxel_ptr, const float voxel_depth_m, const float truncation_distance_m, const float max_weight) { // NOTE(alexmillane): We integrate all voxels passed to this function, We // should probably not do this. We should no update some based on occlusion // and their distance in the distance field.... // TODO(alexmillane): The above. // Read CURRENT voxel values (from global GPU memory) const Color voxel_color_current = voxel_ptr->color; const float voxel_weight_current = voxel_ptr->weight; // Fuse constexpr float measurement_weight = 1.0f; const Color fused_color = blendTwoColors(voxel_color_current, voxel_weight_current, color_measured, measurement_weight); const float weight = fmin(measurement_weight + voxel_weight_current, max_weight); // Write NEW voxel values (to global GPU memory) voxel_ptr->color = fused_color; voxel_ptr->weight = weight; return true; } __global__ void integrateBlocks( const Index3D* block_indices_device_ptr, const Camera camera, const Color* color_image, const int color_rows, const int color_cols, const float* depth_image, const int depth_rows, const int depth_cols, const Transform T_C_L, const float block_size, const float truncation_distance_m, const float max_weight, const float max_integration_distance, const int depth_subsample_factor, ColorBlock** block_device_ptrs) { // Get - the image-space projection of the voxel associated with this thread // - the depth associated with the projection. Eigen::Vector2f u_px; float voxel_depth_m; if (!projectThreadVoxel(block_indices_device_ptr, camera, T_C_L, block_size, &u_px, &voxel_depth_m)) { return; } // If voxel further away than the limit, skip this voxel if (max_integration_distance > 0.0f) { if (voxel_depth_m > max_integration_distance) { return; } } const Eigen::Vector2f u_px_depth = u_px / static_cast<float>(depth_subsample_factor); float surface_depth_m; if (!interpolation::interpolate2DLinear<float>( depth_image, u_px_depth, depth_rows, depth_cols, &surface_depth_m)) { return; } // Occlusion testing // Get the distance of the voxel from the rendered surface. If outside // truncation band, skip. const float voxel_distance_from_surface = surface_depth_m - voxel_depth_m; if (fabsf(voxel_distance_from_surface) > truncation_distance_m) { return; } Color image_value; if (!interpolation::interpolate2DLinear<Color>(color_image, u_px, color_rows, color_cols, &image_value)) { return; } // Get the Voxel we'll update in this thread // NOTE(alexmillane): Note that we've reverse the voxel indexing order such // that adjacent threads (x-major) access adjacent memory locations in the // block (z-major). ColorVoxel* voxel_ptr = &(block_device_ptrs[blockIdx.x] ->voxels[threadIdx.z][threadIdx.y][threadIdx.x]); // Update the voxel using the update rule for this layer type updateVoxel(image_value, voxel_ptr, voxel_depth_m, truncation_distance_m, max_weight); } void ProjectiveColorIntegrator::updateBlocks( const std::vector<Index3D>& block_indices, const ColorImage& color_frame, const DepthImage& depth_frame, const Transform& T_L_C, const Camera& camera, const float truncation_distance_m, ColorLayer* layer_ptr) { CHECK_NOTNULL(layer_ptr); CHECK_EQ(color_frame.rows() % depth_frame.rows(), 0); CHECK_EQ(color_frame.cols() % depth_frame.cols(), 0); if (block_indices.empty()) { return; } const int num_blocks = block_indices.size(); const int depth_subsampling_factor = color_frame.rows() / depth_frame.rows(); CHECK_EQ(color_frame.cols() / depth_frame.cols(), depth_subsampling_factor); // Expand the buffers when needed if (num_blocks > block_indices_device_.size()) { constexpr float kBufferExpansionFactor = 1.5f; const int new_size = static_cast<int>(kBufferExpansionFactor * num_blocks); block_indices_device_.reserve(new_size); block_ptrs_device_.reserve(new_size); block_indices_host_.reserve(new_size); block_ptrs_host_.reserve(new_size); } // Stage on the host pinned memory block_indices_host_ = block_indices; block_ptrs_host_ = getBlockPtrsFromIndices(block_indices, layer_ptr); // Transfer to the device block_indices_device_ = block_indices_host_; block_ptrs_device_ = block_ptrs_host_; // We need the inverse transform in the kernel const Transform T_C_L = T_L_C.inverse(); // Kernel call - One ThreadBlock launched per VoxelBlock constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); const int num_thread_blocks = block_indices.size(); // clang-format off integrateBlocks<<<num_thread_blocks, kThreadsPerBlock, 0, integration_stream_>>>( block_indices_device_.data(), camera, color_frame.dataConstPtr(), color_frame.rows(), color_frame.cols(), depth_frame.dataConstPtr(), depth_frame.rows(), depth_frame.cols(), T_C_L, layer_ptr->block_size(), truncation_distance_m, max_weight_, max_integration_distance_m_, depth_subsampling_factor, block_ptrs_device_.data()); // clang-format on checkCudaErrors(cudaPeekAtLastError()); // Finish processing of the frame before returning control finish(); } __global__ void checkBlocksInTruncationBand( const VoxelBlock<TsdfVoxel>** block_device_ptrs, const float truncation_distance_m, bool* contains_truncation_band_device_ptr) { // A single thread in each block initializes the output to 0 if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { contains_truncation_band_device_ptr[blockIdx.x] = 0; } __syncthreads(); // Get the Voxel we'll check in this thread const TsdfVoxel voxel = block_device_ptrs[blockIdx.x] ->voxels[threadIdx.z][threadIdx.y][threadIdx.x]; // If this voxel in the truncation band, write the flag to say that the block // should be processed. // NOTE(alexmillane): There will be collision on write here. However, from my // reading, all threads' writes will result in a single write to global // memory. Because we only write a single value (1) it doesn't matter which // thread "wins". if (std::abs(voxel.distance) <= truncation_distance_m) { contains_truncation_band_device_ptr[blockIdx.x] = true; } } std::vector<Index3D> ProjectiveColorIntegrator::reduceBlocksToThoseInTruncationBand( const std::vector<Index3D>& block_indices, const TsdfLayer& tsdf_layer, const float truncation_distance_m) { // Check 1) Are the blocks allocated // - performed on the CPU because the hash-map is on the CPU std::vector<Index3D> block_indices_check_1; block_indices_check_1.reserve(block_indices.size()); for (const Index3D& block_idx : block_indices) { if (tsdf_layer.isBlockAllocated(block_idx)) { block_indices_check_1.push_back(block_idx); } } if (block_indices_check_1.empty()) { return block_indices_check_1; } // Check 2) Does each of the blocks have a voxel within the truncation band // - performed on the GPU because the blocks are there // Get the blocks we need to check std::vector<const TsdfBlock*> block_ptrs = getBlockPtrsFromIndices(block_indices_check_1, tsdf_layer); const int num_blocks = block_ptrs.size(); // Expand the buffers when needed if (num_blocks > truncation_band_block_ptrs_device_.size()) { constexpr float kBufferExpansionFactor = 1.5f; const int new_size = static_cast<int>(kBufferExpansionFactor * num_blocks); truncation_band_block_ptrs_host_.reserve(new_size); truncation_band_block_ptrs_device_.reserve(new_size); block_in_truncation_band_device_.reserve(new_size); block_in_truncation_band_host_.reserve(new_size); } // Host -> Device truncation_band_block_ptrs_host_ = block_ptrs; truncation_band_block_ptrs_device_ = truncation_band_block_ptrs_host_; // Prepare output space block_in_truncation_band_device_.resize(num_blocks); // Do the check on GPU // Kernel call - One ThreadBlock launched per VoxelBlock constexpr int kVoxelsPerSide = VoxelBlock<bool>::kVoxelsPerSide; const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); const int num_thread_blocks = num_blocks; // clang-format off checkBlocksInTruncationBand<<<num_thread_blocks, kThreadsPerBlock, 0, integration_stream_>>>( truncation_band_block_ptrs_device_.data(), truncation_distance_m, block_in_truncation_band_device_.data()); // clang-format on checkCudaErrors(cudaStreamSynchronize(integration_stream_)); checkCudaErrors(cudaPeekAtLastError()); // Copy results back block_in_truncation_band_host_ = block_in_truncation_band_device_; // Filter the indices using the result std::vector<Index3D> block_indices_check_2; block_indices_check_2.reserve(block_indices_check_1.size()); for (int i = 0; i < block_indices_check_1.size(); i++) { if (block_in_truncation_band_host_[i] == true) { block_indices_check_2.push_back(block_indices_check_1[i]); } } return block_indices_check_2; } } // namespace nvblox
the_stack
#include <cfloat> #include <fstream> #include <iostream> #include <thread> #define TINYOBJLOADER_IMPLEMENTATION #define USE_PNG #include <OpenGP/GL/Application.h> #include <OpenGP/GL/Components/GUICanvasComponent.h> #include <OpenGP/GL/ImguiRenderer.h> #include <OpenGP/Image/Image.h> #include "OctopusComponent.h" #include "Scene.h" #include "CollisionGrid.cuh" #define OPENGP_IMPLEMENT_ALL_IN_THIS_FILE #include <OpenGP/util/implementations.h> using namespace OpenGP; int main(int argc, char **argv) { int shadow_size = 2048; Application app; Scene scene; auto &light_entity = scene.create_entity_with<CameraComponent>(); light_entity.get<TransformComponent>().set_forward( Vec3(-1, -2, 0).normalized()); light_entity.get<TransformComponent>().position = Vec3(50, 100, 0); Mat4x4 shadow_matrix = (light_entity.get_projection(shadow_size, shadow_size) * light_entity.get_view()); auto &floor_entity = scene.create_entity_with<WorldRenderComponent>(); auto &floor_renderer = floor_entity.set_renderer<SurfaceMeshRenderer>(); floor_renderer.get_gpu_mesh().set_vpoint( {Vec3(-10000, 0, -10000), Vec3(10000, 0, -10000), Vec3(-10000, 0, 10000), Vec3(10000, 0, 10000)}); floor_renderer.get_gpu_mesh().set_vnormal( {Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0), Vec3(0, 1, 0)}); floor_renderer.get_gpu_mesh().set_triangles({0, 1, 2, 1, 2, 3}); Material floormat(R"GLSL( uniform sampler2D shadow_map; uniform vec3 light_pos; uniform mat4 shadow_matrix; uniform float shadow_near; uniform float shadow_far; vec3 world2uvdepth(vec3 pos, mat4 mat) { vec4 a = mat * vec4(pos, 1); vec3 b = a.xyz / a.w; return (b + vec3(1)) / 2; } float get_shadow_mask(vec2 uv) { return 1 - smoothstep(0.3, 0.5, length(uv - vec2(0.5, 0.5))); } vec3 get_ambient(vec3 pos) { vec3 ambient = vec3(0.14, 0.14, 0.18); vec3 uvd = world2uvdepth(pos, shadow_matrix); return ambient + vec3(0.2) * get_shadow_mask(uvd.xy); } float linear_shadow_depth(float d) { return shadow_near * shadow_far / (shadow_far + d * (shadow_near - shadow_far)); } float get_shadow(vec3 pos) { ivec2 dim = textureSize(shadow_map, 0); vec3 uvd = world2uvdepth(pos, shadow_matrix); vec2 base_coord = uvd.xy * dim; ivec2 base_coord_i = ivec2(floor(base_coord)); vec2 inter = fract(base_coord); mat4 shadow_depths; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { shadow_depths[i][j] = linear_shadow_depth(texelFetch(shadow_map, base_coord_i + ivec2(i-1, j-1), 0).r); } } float threshold = linear_shadow_depth(uvd.z) - 0.1; mat2 pcf_vals = mat2(0); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { for (int x = 0; x < 3; ++x) { for (int y = 0; y < 3; ++y) { pcf_vals[i][j] += (shadow_depths[x + i][y + j] < threshold) ? 0 : (1.0 / 9.0); } } } } float a = mix(pcf_vals[0][0], pcf_vals[1][0], inter.x); float b = mix(pcf_vals[0][1], pcf_vals[1][1], inter.x); return mix(a, b, inter.y) * get_shadow_mask(uvd.xy); } vec4 fragment_shade() { vec3 pos = get_position(); vec3 lightdir = normalize(light_pos - pos); vec3 white_color = vec3(1, 1, 1); vec3 black_color = vec3(0.6, 0.6, 0.6); vec3 background = (white_color + black_color) / 2; vec3 diffuse_color = white_color; vec3 modpos = mod(pos / 5, 1); if ((modpos.x < 0.5) ^^ (modpos.z < 0.5)) { diffuse_color = black_color; } float blur = exp(-2 * max(length(dFdx(pos)), length(dFdy(pos)))); blur = clamp(2 * blur, 0, 1); diffuse_color = mix(background, diffuse_color, blur); vec3 ambient = get_ambient(pos); float shadow = get_shadow(pos); vec3 out_color = shadow * 0.85 * clamp(dot(get_normal(), normalize(lightdir)), 0, 1) * diffuse_color; out_color += ambient * diffuse_color; return vec4(out_color, 1); } )GLSL"); floormat.set_property("ao_map", 6); floormat.set_property("shadow_map", 7); floormat.set_property("shadow_matrix", shadow_matrix); floormat.set_property("light_pos", light_entity.get<TransformComponent>().position); floormat.set_property("shadow_near", light_entity.near_plane); floormat.set_property("shadow_far", light_entity.far_plane); floor_renderer.set_material(floormat); floor_renderer.rebuild(); viper::Scene sim_scene; OctopusComponent::v_scene = &sim_scene; auto &octoswarm = scene.create_entity_with<OctopusComponent>(); octoswarm.renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.renderer->get_material().set_property("shadow_near", light_entity.near_plane); octoswarm.renderer->get_material().set_property("shadow_far", light_entity.far_plane); octoswarm.sphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.sphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.sphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.sphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.tsphere_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.tsphere_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.tsphere_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.tsphere_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.cannonball_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.cannonball_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.cannonball_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.cannonball_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); octoswarm.pillar_renderer->get_material().set_property("shadow_matrix", shadow_matrix); octoswarm.pillar_renderer->get_material().set_property( "light_pos", light_entity.get<TransformComponent>().position); octoswarm.pillar_renderer->get_material().set_property( "shadow_near", light_entity.near_plane); octoswarm.pillar_renderer->get_material().set_property( "shadow_far", light_entity.far_plane); auto &c_entity = scene.create_entity_with<TrackballComponent>(); c_entity.oriented = true; int ww = 3840, wh = 1080; Framebuffer fb, fb_shadow; RGB8Texture color_map, color_map_shadow; D32FTexture depth_map, depth_map_shadow; auto realloc = [&](int w, int h) { color_map.allocate(w, h); depth_map.allocate(w, h); }; realloc(ww, wh); depth_map_shadow.allocate(shadow_size, shadow_size); color_map_shadow.allocate(shadow_size, shadow_size); fb.attach_color_texture(color_map); fb.attach_depth_texture(depth_map); fb_shadow.attach_color_texture(color_map_shadow); fb_shadow.attach_depth_texture(depth_map_shadow); RGB8Texture colmap; Image<Eigen::Matrix<uint8_t, 3, 1>> colmap_cpu(2048, 2048); std::ifstream("texture.bin", std::ios::binary).read( reinterpret_cast<char*>(&colmap_cpu(0, 0)), 12582912); colmap.upload(colmap_cpu); FullscreenQuad fsquad; bool show_pills = false; bool splitscreen = false; auto set_pill_visibility = [&](bool visible) { show_pills = visible; octoswarm.render_comp->visible = !visible; octoswarm.sphere_render_comp->visible = visible; octoswarm.vis_update(); }; auto draw_scene = [&](int width, int height, int x, int y) { //====================================================================== // Draw shadow map fb_shadow.bind(); light_entity.draw(shadow_size, shadow_size); fb_shadow.unbind(); //====================================================================== // Draw scene with shadows fb.bind(); glActiveTexture(GL_TEXTURE5); colmap.bind(); glActiveTexture(GL_TEXTURE7); depth_map_shadow.bind(); glActiveTexture(GL_TEXTURE0); auto &cam = c_entity.get<CameraComponent>(); cam.draw(color_map.get_width(), color_map.get_height(), 0, 0, false); if (octoswarm.sphere_render_comp->visible) { RenderContext context; glDepthMask(GL_FALSE); context.aspect = (float)color_map.get_width() / (float)color_map.get_height(); context.vfov = cam.vfov; context.near = cam.near_plane; context.far = cam.far_plane; context.eye = cam.get<TransformComponent>().position; context.forward = cam.get<TransformComponent>().forward(); context.up = cam.get<TransformComponent>().up(); context.update_view(); context.update_projection(); auto &renderable = *octoswarm.tsphere_render_comp; auto &transform = renderable.get<TransformComponent>(); context.translation = transform.position; context.scale = transform.scale; context.rotation = transform.rotation; context.update_model(); glEnable(GL_DEPTH_TEST); renderable.get_renderer().render(context); glDepthMask(GL_TRUE); } cam.draw_gui(); fb.unbind(); //====================================================================== // Draw color map to window glViewport(x, y, width, height); fsquad.draw_texture(color_map); }; auto &window = app.create_window([&](Window &window) { std::tie(ww, wh) = window.get_size(); int fbw_new = splitscreen ? ww / 2 : ww; int fbh_new = wh; int fbw = color_map.get_width(); int fbh = color_map.get_height(); if (fbw_new != fbw || fbh_new != fbh) { realloc(fbw_new, fbh_new); } if (splitscreen) { set_pill_visibility(false); draw_scene(ww / 2, wh, 0, 0); set_pill_visibility(true); draw_scene(ww / 2, wh, ww / 2, 0); } else { octoswarm.vis_update(); draw_scene(ww, wh, 0, 0); } }); window.set_size(ww, wh); window.set_title("VIPER Demo"); auto &input = window.get_input(); c_entity.get<CameraComponent>().set_window(window); c_entity.center = Vec3(0, 1, 0); c_entity.get<TransformComponent>().position = Vec3(-12, 1, 0); auto &bsphere_entity = scene.create_entity_with<WorldRenderComponent>(); auto &bsphere_renderer = bsphere_entity.set_renderer<SphereMeshRenderer>(); auto get_mouse_ray = [&](Vec3 &eye, Vec3 &dir) { Vec2 pos = input.mouse_position; pos[1] = wh - pos[1]; int w = splitscreen ? ww / 2 : ww; pos = 2 * pos.cwiseQuotient(Vec2(w, wh)) - Vec2(1, 1); Vec4 cs(pos[0], pos[1], 0.1, 1); auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection(w, wh) * cam.get_view()).inverse(); Vec4 world = inv_mat * cs; Vec3 p = world.head<3>() / world[3]; eye = c_entity.get<TransformComponent>().position; dir = (p - eye).normalized(); }; int framerate = 0; double frametime = 0; double sim_frametime = 0; float playback = 1.0; int it_count = 10; bool hide_gui = false; bool simulating = true; bool single_step = false; bool bsphere_vis = false; std::vector<float> framerates(120); auto set_defaults = [&]() { show_pills = false; octoswarm.render_comp->visible = !show_pills; octoswarm.sphere_render_comp->visible = show_pills; it_count = 10; sim_scene.gravity_strength = 1.0; playback = 1.0; }; set_defaults(); auto &canvas = scene.create_entity_with<GUICanvasComponent>(); canvas.set_action([&]() { if (hide_gui) return; ImGui::SetNextWindowSize(ImVec2(400, 500)); ImGui::Begin("Controls", nullptr, ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoSavedSettings); char fr_label[256]; sprintf(fr_label, "Framerate %i fps\n Total: %3.1f ms\n Sim: %3.1f ms", framerate, frametime, sim_frametime); ImGui::PlotLines(fr_label, &(framerates[0]), framerates.size(), 0, "", 0, 60); ImGui::Separator(); if (ImGui::Button("Reset")) { octoswarm.reset(); } ImGui::SameLine(0, 4); const char *bname = simulating ? "Pause" : "Resume"; if (ImGui::Button(bname)) { simulating = !simulating; } if (!simulating) { ImGui::SameLine(0, 4); if (ImGui::Button("Step")) { single_step = true; } } ImGui::Checkbox("Split Screen", &splitscreen); if (ImGui::Checkbox("Show Primitives", &show_pills)) { set_pill_visibility(show_pills); } ImGui::SliderFloat("Gravity", &sim_scene.gravity_strength, -1.0f, 3.0f); ImGui::SliderInt("Solver Iterations", &it_count, 0, 50); if (ImGui::Button("Set Defaults")) { set_defaults(); octoswarm.vis_update(); } ImGui::Separator(); const char *const scenes[] = {"Empty", "Pillars", "Cannonballs", "Explosion"}; if (ImGui::ListBox("Scenes", &octoswarm.scene_index, scenes, sizeof(scenes) / sizeof(scenes[0]))) { octoswarm.reset(); } ImGui::Separator(); ImGui::LabelText("Controls", "Look: Middle Mouse"); ImGui::Text("Recenter: Right Mouse"); ImGui::Text("Pan: Shift + Middle Mouse"); ImGui::Text("Grab: Left Mouse"); ImGui::Text("Shoot: Spacebar"); ImGui::Text("Toggle Primitives: F10"); ImGui::Text("Pause/Resume: F11"); ImGui::Text("Show/Hide Window: F12"); ImGui::End(); }); canvas.set_camera(c_entity.get<CameraComponent>()); int chambered_cow = 0; long frame = 0; long sim_frame = 0; double last_time = glfwGetTime(); double frame_avg = 0; double sim_frame_avg = 0; int held = 0; int selected = -1; bool swapped_pills = false; bool swapped_pause = false; bool swapped_window = false; bool recentered = false; app.add_listener<ApplicationUpdateEvent>( [&](const ApplicationUpdateEvent &) { SphereMesh temp_smesh; auto vs_temp = temp_smesh.add_vertex(viper::CollisionGrid::b_sphere); temp_smesh.add_sphere(vs_temp); bsphere_entity.visible = bsphere_vis; bsphere_renderer.upload_mesh(temp_smesh); if (input.get_mouse(0)) { Vec3 eye, dir; get_mouse_ray(eye, dir); if (selected == -1) { selected = octoswarm.intersect(eye, dir); sim_scene.state.xa[selected] = 0; } else { Vec3 p = sim_scene.state.x[selected]; Vec3 x = p - eye; Vec3 new_pos = p - (x - dir * dir.dot(x)); new_pos[1] = std::max(new_pos[1], sim_scene.state.r[selected]); sim_scene.state.x[selected] = new_pos; sim_scene.state.xp[selected] = sim_scene.state.x[selected]; } } else if (selected != -1) { sim_scene.state.xa[selected] = 1; selected = -1; } if (input.get_mouse(1)) { Image<float> depth_im; depth_map.download(depth_im); int mxi = int(input.mouse_position[0]); int myi = int(wh - input.mouse_position[1]); auto &cam = c_entity.get<CameraComponent>(); Mat4x4 inv_mat = (cam.get_projection() * cam.get_view()).inverse(); if (!recentered && !(mxi < 0 || mxi >= ww || myi < 0 || myi >= wh)) { Vec3 uvdepth; uvdepth.head<2>() = Vec2(float(mxi) / ww, float(myi) / wh); uvdepth[2] = min(depth_im(myi, mxi), 0.999); Vec4 dev(0, 0, 0, 1); dev.head<3>() = 2 * uvdepth - Vec3::Ones(); Vec4 world_h = inv_mat * dev; Vec3 new_center = world_h.head<3>() / world_h[3]; Vec3 dc = new_center - c_entity.center; c_entity.center += dc; c_entity.get<TransformComponent>().position += dc; recentered = true; } } else { recentered = false; } if (input.get_key(GLFW_KEY_F10)) { if (!swapped_pills) { set_pill_visibility(!show_pills); swapped_pills = true; } } else { swapped_pills = false; } if (input.get_key(GLFW_KEY_F11)) { if (!swapped_pause) { simulating = !simulating; swapped_pause = true; } } else { swapped_pause = false; } if (input.get_key(GLFW_KEY_F12)) { if (!swapped_window) { hide_gui = !hide_gui; swapped_window = true; } } else { swapped_window = false; } if (input.get_key(GLFW_KEY_SPACE)) { if ((held % 5) == 0) { Vec3 p = c_entity.get<TransformComponent>().position; Vec3 v = c_entity.get<TransformComponent>().forward(); octoswarm.set_position(chambered_cow, p + 3 * v, v); chambered_cow = (chambered_cow + 1) % octoswarm.n_cows; } held++; } else { held = 0; } double frame_time = 0.0; double this_time = last_time; while (frame_time < 0.016667) { this_time = glfwGetTime(); frame_time = this_time - last_time; std::this_thread::yield(); } last_time = this_time; framerates.erase(framerates.begin()); framerates.push_back(1.0 / frame_time); frame_avg += frame_time; if ((frame % 10) == 0) { frametime = 1000 * frame_avg / 10.0; framerate = 0.5 + 10.0 / frame_avg; frame_avg = 0; } if (simulating || single_step) { double sim_time = sim_scene.step(playback / 60.f, it_count, true); sim_frame_avg += sim_time; if ((sim_frame % 10) == 0) { sim_frametime = sim_frame_avg / 10.0; sim_frame_avg = 0; } single_step = false; sim_frame++; } scene.update(); frame++; }); app.run(); return 0; }
the_stack
struct EmbedInitParams { DnnHandle handle; int batchSize, outputSize, vocabSize; }; Tensor RnnModel::add_embed_node(Tensor x, int vocab_size, int output_size, ParallelConfig pc, SharedVariable params) { assert(x.numDim == 2); assert(x.adim[1] == LSTM_PER_NODE_LENGTH); assert(x.pdim[1] == LSTM_PER_NODE_LENGTH); Embed* node = new Embed(config, x, vocab_size, output_size, pc, params); layers.push_back(node); return node->outputs[0]; } Embed::Embed(RnnConfig config, Tensor x, int _vocab_size, int _output_size, ParallelConfig pc, SharedVariable _params) : RnnOp(x, pc, _params), batchSize(x.adim[0]), vocabSize(_vocab_size), outputSize(_output_size) { Context ctx = config.lg_ctx; HighLevelRuntime* runtime = config.lg_hlr; assert(pc.nDims == 1); { Rect<1> rect(Point<1>(0), Point<1>(pc.dim[0]-1)); part_rect = rect; } IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect); FieldSpace fs = config.field_space; Rect<3, coord_t> y_rect(Point<3>(0, 0, 0), Point<3>(outputSize-1, batchSize-1, LSTM_PER_NODE_LENGTH-1)); IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect); LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs); LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs); int num_par_n = part_rect.hi[0] - part_rect.lo[0] + 1; assert(batchSize % num_par_n == 0); int extent_n = batchSize / num_par_n; int extent_c = outputSize; Rect<3, coord_t> extent(Point<3>(0, 0, 0), Point<3>(extent_c-1, extent_n-1, LSTM_PER_NODE_LENGTH-1)); Transform<3, 1, coord_t> trans; trans[0][0] = 0; trans[1][0] = extent_n; trans[2][0] = 0; IndexPartition y_ip = runtime->create_partition_by_restriction(ctx, y_is, part_is, trans, extent); assert(runtime->is_index_partition_disjoint(ctx, y_ip)); assert(runtime->is_index_partition_complete(ctx, y_ip)); LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip); LogicalPartition y_grad_lp = runtime->get_logical_partition(ctx, y_grad_lr, y_ip); outputs[0].region = y_lr; outputs[0].region_grad = y_grad_lr; outputs[0].partition = y_lp; outputs[0].partition_grad = y_grad_lp; outputs[0].numDim = 3; outputs[0].adim[0] = outputSize; outputs[0].adim[1] = batchSize; outputs[0].adim[2] = LSTM_PER_NODE_LENGTH; outputs[0].pdim[0] = extent_c; outputs[0].pdim[1] = extent_n; outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH; } /* regions[0] (I): x regions[1] (I): w regions[2] (O): y */ OpMeta* Embed::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const EmbedInitParams* embed = (EmbedInitParams*) task->args; Rect<2> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(rect_x.hi[0] - rect_x.lo[0] + 1 == embed->batchSize); assert(rect_x.hi[1] - rect_x.lo[1] + 1 == LSTM_PER_NODE_LENGTH); assert(rect_w.hi[0] - rect_w.lo[0] + 1 == embed->vocabSize * embed->outputSize); assert(rect_y.hi[0] - rect_y.lo[0] + 1 == embed->outputSize); assert(rect_y.hi[1] - rect_y.lo[1] + 1 == embed->batchSize); assert(rect_y.hi[2] - rect_y.lo[2] + 1 == LSTM_PER_NODE_LENGTH); EmbedMeta* m = new EmbedMeta(embed->handle); m->profiling_runtime = false; return m; } void Embed::init(const RnnModel& model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { EmbedInitParams initParams; initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]]; initParams.batchSize = outputs[0].pdim[1]; initParams.outputSize = outputs[0].pdim[0]; initParams.vocabSize = vocabSize; // batch is the first dim of input and the second dim of output assert(inputs[0].pdim[0] == outputs[0].pdim[1]); TaskLauncher launcher(EMBED_INIT_TASK_ID, TaskArgument(&initParams, sizeof(initParams)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(inputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } launcher.add_region_requirement( RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region)); launcher.add_field(1, FID_DATA); { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); } Future f = runtime->execute_task(ctx, launcher); meta[idx] = f.get_result<OpMeta*>(); } } __global__ void embedForward(const int* x_ptr, const float* embed, float* y_ptr, coord_t numElements, int shift, int outputSize) { CUDA_KERNEL_LOOP(i, numElements) { int idx = i >> shift; int off = i & (outputSize - 1); int wordIdx = x_ptr[idx]; y_ptr[i] = embed[(wordIdx << shift) + off]; } } __global__ void embedBackward(const int* x_ptr, float* embed, const float* y_ptr, coord_t numElements, int shift, int outputSize) { CUDA_KERNEL_LOOP(i, numElements) { int idx = i >> shift; int off = i & (outputSize - 1); int wordIdx = x_ptr[idx]; atomicAdd(embed + (wordIdx << shift) + off, y_ptr[i]); } } /* regions[0](I): x regions[1](I): w regions[2](O): y */ void Embed::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 3); assert(task->regions.size() == 3); const EmbedMeta* m = *((EmbedMeta**) task->args); const AccessorRO<int, 2> acc_x(regions[0], FID_DATA); const AccessorRO<float, 1> acc_w(regions[1], FID_DATA); const AccessorWO<float, 3> acc_y(regions[2], FID_DATA); Rect<2> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1; int output_size = rect_y.hi[0] - rect_y.lo[0] + 1; const int *x_ptr = acc_x.ptr(rect_x.lo); const float *w_ptr = acc_w.ptr(rect_w.lo); float *y_ptr = acc_y.ptr(rect_y.lo); cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } int shift = 0; int size = 1; while (size < output_size) { size = size * 2; shift = shift + 1; } assert(size == output_size); embedForward<<<GET_BLOCKS(rect_y.volume()), CUDA_NUM_THREADS>>>( x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Embed forward time = %.2lfms\n", elapsed); } #endif } void Embed::forward(const RnnModel &model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(EMBED_FWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(inputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } launcher.add_region_requirement( RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region)); launcher.add_field(1, FID_DATA); { LogicalRegion y = runtime->get_logical_subregion_by_color(outputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); } runtime->execute_task(ctx, launcher); } } /* regions[0](I): x regions[1](I/O): w_grad regions[2](I): y_grad */ void Embed::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 3); assert(task->regions.size() == 3); const EmbedMeta* m = *((EmbedMeta**) task->args); const AccessorRO<int, 2> acc_x(regions[0], FID_DATA); const AccessorRW<float, 1> acc_w(regions[1], FID_DATA); const AccessorRO<float, 3> acc_y(regions[2], FID_DATA); Rect<2> rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_w = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<3> rect_y = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); assert(acc_x.accessor.is_dense_arbitrary(rect_x)); assert(acc_w.accessor.is_dense_arbitrary(rect_w)); assert(acc_y.accessor.is_dense_arbitrary(rect_y)); int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1; int output_size = rect_y.hi[0] - rect_y.lo[0] + 1; const int *x_ptr = acc_x.ptr(rect_x.lo); float *w_ptr = acc_w.ptr(rect_w.lo); const float *y_ptr = acc_y.ptr(rect_y.lo); cudaEvent_t t_start, t_end; if (m->profiling_runtime) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } int shift = 0; int size = 1; while (size < output_size) { size = size * 2; shift = shift + 1; } assert(size == output_size); embedBackward<<<GET_BLOCKS(rect_y.volume()), CUDA_NUM_THREADS>>>( x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size); if (m->profiling_runtime) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Embed backward time = %.2lfms\n", elapsed); } #endif } void Embed::backward(const RnnModel &model) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; int idx = 0; for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) { OpMeta* mp = meta[idx]; TaskLauncher launcher(EMBED_BWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)), Predicate::TRUE_PRED, 0/*MapperID*/, RnnMapper::assign_to_gpu(paraConfig.gpu[idx])); DomainPoint dp(*it); { LogicalRegion x = runtime->get_logical_subregion_by_color(inputs[0].partition, dp); launcher.add_region_requirement( RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); } launcher.add_region_requirement( RegionRequirement(params.gradients[paraConfig.gpu[idx]], READ_WRITE, EXCLUSIVE, params.gradients[paraConfig.gpu[idx]])); launcher.add_field(1, FID_DATA); { LogicalRegion y_grad = runtime->get_logical_subregion_by_color(outputs[0].partition_grad, dp); launcher.add_region_requirement( RegionRequirement(y_grad, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(2, FID_DATA); } runtime->execute_task(ctx, launcher); } } void Embed::update(const RnnModel &model) {}
the_stack
#define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_of_float16_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; template<typename> void test_cuda_numext() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); bool* d_res_half = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_half( d_res_half, num_elem); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_res_float( d_res_float, num_elem); gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op<float>()); gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().unaryExpr(Eigen::internal::scalar_isnan_op<Eigen::half>()); Tensor<bool, 1> half_prec(num_elem); Tensor<bool, 1> full_prec(num_elem); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(bool)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { std::cout << "Checking numext " << i << std::endl; VERIFY_IS_EQUAL(full_prec(i), half_prec(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } #ifdef EIGEN_HAS_CUDA_FP16 template<typename> void test_cuda_conversion() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_half( d_half, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_conv( d_conv, num_elem); gpu_float.device(gpu_device) = gpu_float.random(); gpu_half.device(gpu_device) = gpu_float.cast<Eigen::half>(); gpu_conv.device(gpu_device) = gpu_half.cast<float>(); Tensor<float, 1> initial(num_elem); Tensor<float, 1> final(num_elem); gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); for (int i = 0; i < num_elem; ++i) { VERIFY_IS_APPROX(initial(i), final(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_half); gpu_device.deallocate(d_conv); } template<typename> void test_cuda_unary() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half( d_res_half, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float( d_res_float, num_elem); gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); gpu_res_float.device(gpu_device) = gpu_float.abs(); gpu_res_half.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().cast<float>(); Tensor<float, 1> half_prec(num_elem); Tensor<float, 1> full_prec(num_elem); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { std::cout << "Checking unary " << i << std::endl; VERIFY_IS_APPROX(full_prec(i), half_prec(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } template<typename> void test_cuda_elementwise() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1( d_float1, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2( d_float2, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half( d_res_half, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float( d_res_float, num_elem); gpu_float1.device(gpu_device) = gpu_float1.random(); gpu_float2.device(gpu_device) = gpu_float2.random(); gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1; gpu_res_half.device(gpu_device) = ((gpu_float1.cast<Eigen::half>() + gpu_float2.cast<Eigen::half>()) * gpu_float1.cast<Eigen::half>()).cast<float>(); Tensor<float, 1> half_prec(num_elem); Tensor<float, 1> full_prec(num_elem); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { std::cout << "Checking elemwise " << i << ": full prec = " << full_prec(i) << " vs half prec = " << half_prec(i) << std::endl; VERIFY_IS_APPROX(static_cast<Eigen::half>(full_prec(i)), static_cast<Eigen::half>(half_prec(i))); } gpu_device.deallocate(d_float1); gpu_device.deallocate(d_float2); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } template<typename> void test_cuda_trancendental() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_res1_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res1_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res2_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res2_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res3_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res3_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(d_float1, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(d_float2, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float3(d_float3, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_half(d_res1_half, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_half(d_res2_half, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_half(d_res3_half, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res4_half(d_res3_half, num_elem); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res4_float(d_res3_float, num_elem); gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f); gpu_float3.device(gpu_device) = gpu_float3.random(); gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast<Eigen::half>(); gpu_res2_float.device(gpu_device) = gpu_float2.log().cast<Eigen::half>(); gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast<Eigen::half>(); gpu_res4_float.device(gpu_device) = gpu_float3.expm1().cast<Eigen::half>(); gpu_res1_half.device(gpu_device) = gpu_float1.cast<Eigen::half>(); gpu_res1_half.device(gpu_device) = gpu_res1_half.exp(); gpu_res2_half.device(gpu_device) = gpu_float2.cast<Eigen::half>(); gpu_res2_half.device(gpu_device) = gpu_res2_half.log(); gpu_res3_half.device(gpu_device) = gpu_float3.cast<Eigen::half>(); gpu_res3_half.device(gpu_device) = gpu_res3_half.log1p(); gpu_res3_half.device(gpu_device) = gpu_float3.cast<Eigen::half>(); gpu_res3_half.device(gpu_device) = gpu_res3_half.expm1(); Tensor<float, 1> input1(num_elem); Tensor<Eigen::half, 1> half_prec1(num_elem); Tensor<Eigen::half, 1> full_prec1(num_elem); Tensor<float, 1> input2(num_elem); Tensor<Eigen::half, 1> half_prec2(num_elem); Tensor<Eigen::half, 1> full_prec2(num_elem); Tensor<float, 1> input3(num_elem); Tensor<Eigen::half, 1> half_prec3(num_elem); Tensor<Eigen::half, 1> full_prec3(num_elem); gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(half_prec3.data(), d_res3_half, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::half)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl; VERIFY_IS_APPROX(full_prec1(i), half_prec1(i)); } for (int i = 0; i < num_elem; ++i) { std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl; if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1 VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f)); else VERIFY_IS_APPROX(full_prec2(i), half_prec2(i)); } for (int i = 0; i < num_elem; ++i) { std::cout << "Checking elemwise plog1 " << i << " input = " << input3(i) << " full = " << full_prec3(i) << " half = " << half_prec3(i) << std::endl; VERIFY_IS_APPROX(full_prec3(i), half_prec3(i)); } gpu_device.deallocate(d_float1); gpu_device.deallocate(d_float2); gpu_device.deallocate(d_float3); gpu_device.deallocate(d_res1_half); gpu_device.deallocate(d_res1_float); gpu_device.deallocate(d_res2_half); gpu_device.deallocate(d_res2_float); gpu_device.deallocate(d_res3_float); gpu_device.deallocate(d_res3_half); } template<typename> void test_cuda_contractions() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int rows = 23; int cols = 23; int num_elem = rows*cols; float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1( d_float1, rows, cols); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2( d_float2, rows, cols); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_half( d_res_half, rows, cols); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 2>, Eigen::Aligned> gpu_res_float( d_res_float, rows, cols); gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f); typedef Tensor<float, 2>::DimensionPair DimPair; Eigen::array<DimPair, 1> dims(DimPair(1, 0)); gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast<Eigen::half>(); gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().contract(gpu_float2.cast<Eigen::half>(), dims); Tensor<Eigen::half, 2> half_prec(rows, cols); Tensor<Eigen::half, 2> full_prec(rows, cols); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::half)); gpu_device.synchronize(); for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { std::cout << "Checking contract " << i << " " << j << full_prec(i, j) << " " << half_prec(i, j) << std::endl; if (numext::abs(full_prec(i, j) - half_prec(i, j)) > Eigen::half(1e-2f)) { VERIFY_IS_APPROX(full_prec(i, j), half_prec(i, j)); } } } gpu_device.deallocate(d_float1); gpu_device.deallocate(d_float2); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } template<typename> void test_cuda_reductions(int size1, int size2, int redux) { std::cout << "Reducing " << size1 << " by " << size2 << " tensor along dim " << redux << std::endl; Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = size1*size2; int result_size = (redux == 1 ? size1 : size2); float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half)); Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half)); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1( d_float1, size1, size2); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2( d_float2, size1, size2); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_half( d_res_half, result_size); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 1>, Eigen::Aligned> gpu_res_float( d_res_float, result_size); gpu_float1.device(gpu_device) = gpu_float1.random() * 2.0f; gpu_float2.device(gpu_device) = gpu_float2.random() * 2.0f; Eigen::array<int, 1> redux_dim = {{redux}}; gpu_res_float.device(gpu_device) = gpu_float1.sum(redux_dim).cast<Eigen::half>(); gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(redux_dim); Tensor<Eigen::half, 1> half_prec(result_size); Tensor<Eigen::half, 1> full_prec(result_size); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, result_size*sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::half)); gpu_device.synchronize(); for (int i = 0; i < result_size; ++i) { std::cout << "EXPECTED " << full_prec(i) << " GOT " << half_prec(i) << std::endl; VERIFY_IS_APPROX(full_prec(i), half_prec(i)); } gpu_device.deallocate(d_float1); gpu_device.deallocate(d_float2); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } template<typename> void test_cuda_reductions() { test_cuda_reductions<void>(13, 13, 0); test_cuda_reductions<void>(13, 13, 1); test_cuda_reductions<void>(35, 36, 0); test_cuda_reductions<void>(35, 36, 1); test_cuda_reductions<void>(36, 35, 0); test_cuda_reductions<void>(36, 35, 1); } template<typename> void test_cuda_full_reductions() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int size = 13; int num_elem = size*size; float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half)); Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half)); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float1( d_float1, size, size); Eigen::TensorMap<Eigen::Tensor<float, 2>, Eigen::Aligned> gpu_float2( d_float2, size, size); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_half( d_res_half); Eigen::TensorMap<Eigen::Tensor<Eigen::half, 0>, Eigen::Aligned> gpu_res_float( d_res_float); gpu_float1.device(gpu_device) = gpu_float1.random(); gpu_float2.device(gpu_device) = gpu_float2.random(); gpu_res_float.device(gpu_device) = gpu_float1.sum().cast<Eigen::half>(); gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().sum(); Tensor<Eigen::half, 0> half_prec; Tensor<Eigen::half, 0> full_prec; gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half)); gpu_device.synchronize(); VERIFY_IS_APPROX(full_prec(), half_prec()); gpu_res_float.device(gpu_device) = gpu_float1.maximum().cast<Eigen::half>(); gpu_res_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().maximum(); gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half)); gpu_device.synchronize(); VERIFY_IS_APPROX(full_prec(), half_prec()); gpu_device.deallocate(d_float1); gpu_device.deallocate(d_float2); gpu_device.deallocate(d_res_half); gpu_device.deallocate(d_res_float); } template<typename> void test_cuda_forced_evals() { Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); int num_elem = 101; float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_half1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_half2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float( d_float, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_half1( d_res_half1, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Unaligned> gpu_res_half2( d_res_half2, num_elem); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res_float( d_res_float, num_elem); Eigen::array<int, 1> no_bcast; no_bcast[0] = 1; gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); gpu_res_float.device(gpu_device) = gpu_float.abs(); gpu_res_half1.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().eval().cast<float>(); gpu_res_half2.device(gpu_device) = gpu_float.cast<Eigen::half>().abs().broadcast(no_bcast).eval().cast<float>(); Tensor<float, 1> half_prec1(num_elem); Tensor<float, 1> half_prec2(num_elem); Tensor<float, 1> full_prec(num_elem); gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res_half1, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res_half1, num_elem*sizeof(float)); gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); gpu_device.synchronize(); for (int i = 0; i < num_elem; ++i) { std::cout << "Checking forced eval " << i << full_prec(i) << " vs " << half_prec1(i) << " vs " << half_prec2(i) << std::endl; VERIFY_IS_APPROX(full_prec(i), half_prec1(i)); VERIFY_IS_APPROX(full_prec(i), half_prec2(i)); } gpu_device.deallocate(d_float); gpu_device.deallocate(d_res_half1); gpu_device.deallocate(d_res_half2); gpu_device.deallocate(d_res_float); } #endif void test_cxx11_tensor_of_float16_cuda() { CALL_SUBTEST_1(test_cuda_numext<void>()); #ifdef EIGEN_HAS_CUDA_FP16 CALL_SUBTEST_1(test_cuda_conversion<void>()); CALL_SUBTEST_1(test_cuda_unary<void>()); CALL_SUBTEST_1(test_cuda_elementwise<void>()); CALL_SUBTEST_1(test_cuda_trancendental<void>()); CALL_SUBTEST_2(test_cuda_contractions<void>()); CALL_SUBTEST_3(test_cuda_reductions<void>()); CALL_SUBTEST_4(test_cuda_full_reductions<void>()); CALL_SUBTEST_5(test_cuda_forced_evals<void>()); #else std::cout << "Half floats are not supported by this version of cuda: skipping the test" << std::endl; #endif }
the_stack
#include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_geam<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const float alpha, const float* A, const float beta, const float* B, float* C){ int lda = (TransA == CblasNoTrans) ? N : M; int ldb = (TransB == CblasNoTrans) ? N : M; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), cuTransA, cuTransB, N, M, &alpha, A, lda, &beta, B, ldb, C, N)); } template <> void caffe_gpu_geam<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const double alpha, const double* A, const double beta, const double* B, double* C){ int lda = (TransA == CblasNoTrans) ? N : M; int ldb = (TransB == CblasNoTrans) ? N : M; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), cuTransA, cuTransB, N, M, &alpha, A, lda, &beta, B, ldb, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <typename Dtype> __global__ void keep_same_direction_kernel(const int n, const Dtype* X, Dtype* Y) { CUDA_KERNEL_LOOP(index, n) { if(X[index]*Y[index]<0) Y[index] = 0; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_keep_same_direction<float>(const int N, const float* X, float* Y){ keep_same_direction_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, X,Y); } template <> void caffe_gpu_keep_same_direction<double>(const int N, const double* X, double* Y){ keep_same_direction_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, X,Y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <typename Dtype> __global__ void div_kernel_check_zero(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { if(fabs(b[index]) <= ZERO_THRESHOLD) y[index] = 0; else y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div_check_zero<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel_check_zero<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div_check_zero<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel_check_zero<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <typename Dtype> __global__ void powx_kernel_check_negative(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { if (a[index]<=0) y[index]=0; else y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx_check_negative<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel_check_negative<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx_check_negative<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel_check_negative<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } #define XOFFSET(idx) ((idx)%blk_size_c) #define YOFFSET(idx) ((idx)/blk_size_c) //Usage: dim3 block(a,b); dim3 thread(get_threads_per_block,1); block_length_kernel<<<block,thread>>>(n,c,x,y); //one-D thread block processes two-D data block template <typename Dtype> __global__ void block_length_kernel(const int n, const int c, const Dtype *x, Dtype* y){ int c_offset = 0; const int blk_size_n = n%gridDim.y ? n/gridDim.y+1 : n/gridDim.y; const int blk_size_c = c%gridDim.x ? c/gridDim.x+1 : c/gridDim.x; while(c_offset<blk_size_n*blk_size_c){ int offset_x = XOFFSET(c_offset + threadIdx.x); int offset_y = YOFFSET(c_offset + threadIdx.x); int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; if(offset_x < blk_size_c && offset_y < blk_size_n){//WITHOUT THIS: THE C MUST BE MULTIPLE TIMES OF BLOCKDIM.X IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; } c_offset += blockDim.x; } __syncthreads(); //sum along block c_offset=0; Dtype res = 0; while(c_offset<blk_size_n*blk_size_c){ int len = (c_offset + blockDim.x)<blk_size_n*blk_size_c ? blockDim.x : (blk_size_n*blk_size_c-c_offset);//valid threads to process while(len/2>0){ if(threadIdx.x<len/2){ int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x + (len+1)/2); y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x + (len+1)/2); int idx2 = y_pos * c + x_pos; //BUG: we must ALWAYS store this data. Use shared memory with size of blk_size_n*blk_size_c!!! y[idx1] += y[idx2]; } __syncthreads(); len=(len+1)/2; } int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset); int idx1 = y_pos * c + x_pos; res += y[idx1]; c_offset += blockDim.x; } __syncthreads(); //copy c_offset=0; while(c_offset<blk_size_n*blk_size_c){ int offset_x = XOFFSET(c_offset + threadIdx.x); int offset_y = YOFFSET(c_offset + threadIdx.x); int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; if(offset_x < blk_size_c && offset_y < blk_size_n){ if(res){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } c_offset += blockDim.x; } } //Usage: dim3 block(c,1); dim3 thread(1,n); col_group_length_kernel<<<block,thread>>>(n,c,x,y,z); template <typename Dtype> __global__ void col_group_length_kernel(const int n, const int c, const Dtype *x, Dtype* y, Dtype* z){ int n_offset = 0; //initialize y while(n_offset<n){ int idx1 = (n_offset+threadIdx.y)*c+blockIdx.x; if(n_offset+threadIdx.y < n){//BUG: THE N MUST BE MULTIPLE TIMES OF BLOCKDIM.Y IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; } n_offset += blockDim.y; } __syncthreads(); //sum along columns n_offset=0; Dtype res = 0; while(n_offset<n){ int len = (n_offset + blockDim.y)<n ? blockDim.y : (n-n_offset);//valid threads to process while(len/2>0){ if(threadIdx.y<len/2){ int idx1 = (n_offset+threadIdx.y)*c+blockIdx.x; int idx2 = (n_offset+threadIdx.y+(len+1)/2)*c+blockIdx.x; y[idx1] += y[idx2]; } __syncthreads(); len=(len+1)/2; } res += y[n_offset*c+blockIdx.x]; n_offset += blockDim.y; } __syncthreads(); if(res>0 && 0==threadIdx.y){ z[blockIdx.x] = Dtype(sqrt(res)); }else if(0==threadIdx.y) { z[blockIdx.x] = Dtype(0); } //copy n_offset=0; while(n_offset<n){ int idx1 = (n_offset+threadIdx.y)*c + blockIdx.x; if(n_offset+threadIdx.y < n){ if(res>0){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } n_offset += blockDim.y; } } //Usage: dim3 block(1,n); dim3 thread(c,1); row_group_length_kernel<<<block,thread>>>(n,c,x,y,z); template <typename Dtype> __global__ void row_group_length_kernel(const int n, const int c, const Dtype *x, Dtype* y, Dtype* z){ int c_offset = 0; //initialize y while(c_offset<c){ int idx1 = blockIdx.y * c + c_offset + threadIdx.x; if(c_offset + threadIdx.x < c){//WITHOUT THIS: THE C MUST BE MULTIPLE TIMES OF BLOCKDIM.X IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; } c_offset += blockDim.x; } __syncthreads(); //sum along rows c_offset=0; Dtype res = 0; while(c_offset<c){ int len = (c_offset + blockDim.x)<c ? blockDim.x : (c-c_offset);//valid threads to process while(len/2>0){ if(threadIdx.x<len/2){ int idx1 = blockIdx.y * c + c_offset + threadIdx.x; int idx2 = blockIdx.y * c + c_offset + threadIdx.x + (len+1)/2; y[idx1] += y[idx2]; } __syncthreads(); len=(len+1)/2; } res += y[blockIdx.y * c + c_offset]; c_offset += blockDim.x; } __syncthreads(); if(res>0 && 0==threadIdx.x){ z[blockIdx.y] = Dtype(sqrt(res)); }else if(0==threadIdx.x) { z[blockIdx.y] = Dtype(0); } //copy c_offset=0; while(c_offset<c){ int idx1 = blockIdx.y * c + c_offset + threadIdx.x; if(c_offset + threadIdx.x < c){ if(res){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } c_offset += blockDim.x; } } /* template <> void caffe_gpu_bar_group_length<int>(const int n, const int c, const int* x, int* y, int* z, bool along_column_or_row){ NOT_IMPLEMENTED; } template <> void caffe_gpu_bar_group_length<unsigned int>(const int n, const int c, const unsigned int* x, unsigned int* y, unsigned int* z, bool along_column_or_row){ NOT_IMPLEMENTED; } */ template <> void caffe_gpu_bar_group_length<float>(const int n, const int c, const float* x, float* y, float* z, bool along_column_or_row){ int threads_per_block = Caffe::get_threads_per_block(); if(along_column_or_row){ dim3 block(c,1); dim3 thread(1,n>threads_per_block ? threads_per_block:n );//CAFFE_CUDA_NUM_THREADS col_group_length_kernel<<<block,thread>>>(n,c,x,y,z); }else{ dim3 block(1,n); dim3 thread(c>threads_per_block ? threads_per_block:c, 1);//CAFFE_CUDA_NUM_THREADS row_group_length_kernel<<<block,thread>>>(n,c,x,y,z); } CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_bar_group_length<double>(const int n, const int c, const double* x, double* y, double* z, bool along_column_or_row){ int threads_per_block = Caffe::get_threads_per_block(); if(along_column_or_row){ dim3 block(c,1); dim3 thread(1,n>threads_per_block ? threads_per_block:n );//CAFFE_CUDA_NUM_THREADS col_group_length_kernel<<<block,thread>>>(n,c,x,y,z); }else{ dim3 block(1,n); dim3 thread(c>threads_per_block ? threads_per_block:c, 1);//CAFFE_CUDA_NUM_THREADS row_group_length_kernel<<<block,thread>>>(n,c,x,y,z); } CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_block_length<float>(const int n, const int c, const int blk_size_n, const int blk_size_c, const float *x, float* y){ CHECK_LE(blk_size_n,n); CHECK_LE(blk_size_c,c); CHECK_EQ(n%blk_size_n,0); CHECK_EQ(c%blk_size_c,0); int threads_per_block = Caffe::get_threads_per_block(); const int blk_num_n = (n+blk_size_n-1)/blk_size_n; const int blk_num_c = (c+blk_size_c-1)/blk_size_c; const int blk_size = blk_size_n*blk_size_c; dim3 block(blk_num_c,blk_num_n); dim3 thread(blk_size>threads_per_block?threads_per_block:blk_size, 1); block_length_kernel<<<block,thread>>>(n, c,x,y); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_block_length<double>(const int n, const int c, const int blk_size_n, const int blk_size_c, const double *x, double* y){ NOT_IMPLEMENTED; } template <> void caffe_gpu_block_length<int>(const int n, const int c, const int blk_size_n, const int blk_size_c, const int *x, int* y){ NOT_IMPLEMENTED; } template <> void caffe_gpu_block_length<unsigned int>(const int n, const int c, const int blk_size_n, const int blk_size_c, const unsigned int *x, unsigned int* y){ NOT_IMPLEMENTED; } } // namespace caffe
the_stack
#include "thrust/device_vector.h" #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/syncedmem.hpp" #include "caffe/layers/operator/guided_crf_layer.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { static __global__ void softmax_forward_kernel(const int maxStates,const int nNodes, const float * energy,float * prob) { CUDA_KERNEL_LOOP(n, nNodes) { for(int s=0;s<maxStates;s++) prob[s*nNodes+n] = energy[s*nNodes+n]; float max_prob = float(-FLT_MAX); for(int s=0;s<maxStates;s++) max_prob =max(max_prob,prob[s*nNodes+n]); for(int s=0;s<maxStates;s++) prob[s*nNodes+n] -= max_prob; float sum = 0; for(int s=0;s<maxStates;s++) sum += exp(prob[s*nNodes+n]); for(int s=0;s<maxStates;s++) prob[s*nNodes+n] = exp(prob[s*nNodes+n]) / sum; } } static __global__ void softmax_backward_kernel(const int maxStates,const int nNodes, const float * top_diff,const float *prob,float * bottom_diff) { CUDA_KERNEL_LOOP(ind, nNodes*maxStates) { int n=ind % nNodes; int s=ind / nNodes; float sum = 0; for(int s2=0;s2<maxStates;s2++) sum += top_diff[s2*nNodes+n]*prob[s2*nNodes+n]*(float(s==s2)-prob[s*nNodes+n]); bottom_diff[s*nNodes+n] = sum; } } //-------------------------------------------------------------- static __global__ void vector_product_kernel(const int num,const int channels1,const int channels2, const int spatial_dim,const float * a,const float * b,float *var)//var = a .* b { CUDA_KERNEL_LOOP(ind, spatial_dim*channels1*channels2*num) { int n = ind / spatial_dim / channels1 / channels2; int c2 = ind / spatial_dim / channels1 % channels2; int c1 = ind / spatial_dim % channels1; int s = ind % spatial_dim; var[ind]=a[(n*channels1+c1)*spatial_dim+s]*b[(n*channels2+c2)*spatial_dim+s]; } } static __global__ void substract_vector_product_kernel(const int num, const int channels1,const int channels2,const int spatial_dim,const float *avg,const float *a,const float *b, float * var)//var = avg - a.*b; { CUDA_KERNEL_LOOP(ind, spatial_dim*channels1*channels2*num) { int n = ind / spatial_dim / channels1 / channels2; int c2 = ind / spatial_dim / channels1 % channels2; int c1 = ind / spatial_dim % channels1; int s = ind % spatial_dim; var[ind]=avg[ind]-a[(n*channels1+c1)*spatial_dim+s]*b[(n*channels2+c2)*spatial_dim+s]; } } static __global__ void inv_var_I_eps_kernel_3(const int num, const int channels, const int spatial_dim, const float eps,float *var_I,float *inv_var_I) { CUDA_KERNEL_LOOP(ind, spatial_dim*num) { int n = ind / spatial_dim; int s = ind % spatial_dim; for(int c=0;c<channels;c++) var_I[(n*channels*channels+(c*channels+c))*spatial_dim+s]=var_I[(n*channels*channels+(c*channels+c))*spatial_dim+s]+eps; float det = var_I[(n*channels*channels+0*channels+0)*spatial_dim+s]*(var_I[(n*channels*channels+1*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s]-var_I[(n*channels*channels+2*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s]) - var_I[(n*channels*channels+0*channels+1)*spatial_dim+s]*(var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s]-var_I[(n*channels*channels+2*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s]) + var_I[(n*channels*channels+0*channels+2)*spatial_dim+s]*(var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+1)*spatial_dim+s]-var_I[(n*channels*channels+2*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+1)*spatial_dim+s]); inv_var_I[(n*channels*channels+0*channels+0)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+1*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+2*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+0*channels+1)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+2*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+0*channels+2)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+1)*spatial_dim+s] -var_I[(n*channels*channels+2*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+1)*spatial_dim+s]); inv_var_I[(n*channels*channels+1*channels+0)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+2*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+0*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+0*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+1*channels+1)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+0*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+2*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+0*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+1*channels+2)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+0*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+0)*spatial_dim+s] -var_I[(n*channels*channels+0*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+2*channels+1)*spatial_dim+s]); inv_var_I[(n*channels*channels+2*channels+0)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+0*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+1*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+0*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+2*channels+1)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+0*channels+2)*spatial_dim+s] -var_I[(n*channels*channels+0*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+2)*spatial_dim+s]); inv_var_I[(n*channels*channels+2*channels+2)*spatial_dim+s] = 1/det*(var_I[(n*channels*channels+0*channels+0)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+1)*spatial_dim+s] -var_I[(n*channels*channels+0*channels+1)*spatial_dim+s]*var_I[(n*channels*channels+1*channels+0)*spatial_dim+s]); } } static __global__ void div_sum_kernel_3(const int num, const int channels,const int maxStates,const int spatial_dim,const float *inv_var_I,const float *cov_Ip, float *a) { CUDA_KERNEL_LOOP(ind, spatial_dim*maxStates*num) { int n = ind / spatial_dim / maxStates; int m = ind / spatial_dim % maxStates; int s = ind % spatial_dim; a[((n*maxStates+m)*channels+0)*spatial_dim+s] = cov_Ip[((n*maxStates+m)*channels+0)*spatial_dim+s]*inv_var_I[(n*channels*channels+0*channels+0)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+1)*spatial_dim+s]*inv_var_I[(n*channels*channels+0*channels+1)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+2)*spatial_dim+s]*inv_var_I[(n*channels*channels+0*channels+2)*spatial_dim+s]; a[((n*maxStates+m)*channels+1)*spatial_dim+s] = cov_Ip[((n*maxStates+m)*channels+0)*spatial_dim+s]*inv_var_I[(n*channels*channels+1*channels+0)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+1)*spatial_dim+s]*inv_var_I[(n*channels*channels+1*channels+1)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+2)*spatial_dim+s]*inv_var_I[(n*channels*channels+1*channels+2)*spatial_dim+s]; a[((n*maxStates+m)*channels+2)*spatial_dim+s] = cov_Ip[((n*maxStates+m)*channels+0)*spatial_dim+s]*inv_var_I[(n*channels*channels+2*channels+0)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+1)*spatial_dim+s]*inv_var_I[(n*channels*channels+2*channels+1)*spatial_dim+s] + cov_Ip[((n*maxStates+m)*channels+2)*spatial_dim+s]*inv_var_I[(n*channels*channels+2*channels+2)*spatial_dim+s]; } } static __global__ void substract_vector_matrix_product_kernel_3(const int num, const int channels,const int maxStates,const int spatial_dim,const float * mean_p,const float * a,const float * mean_I,float *b)// b = mean_p - mean_I *. a; { CUDA_KERNEL_LOOP(ind, spatial_dim*maxStates*num) { int n = ind / spatial_dim / maxStates; int m = ind / spatial_dim % maxStates; int s = ind % spatial_dim; b[ind] = mean_p[ind] - mean_I[(n*3+0)*spatial_dim+s] * a[((n*maxStates+m)*channels+0)*spatial_dim+s] - mean_I[(n*3+1)*spatial_dim+s] * a[((n*maxStates+m)*channels+1)*spatial_dim+s] - mean_I[(n*3+2)*spatial_dim+s] * a[((n*maxStates+m)*channels+2)*spatial_dim+s]; } } static __global__ void vector_matrix_product_sum_kernel_3(const int num, const int channels,const int maxStates,const int spatial_dim,const float *mean_a,const float *I,const float *mean_b,float *q)// q = I .* mean_a + mean_b; { CUDA_KERNEL_LOOP(ind, spatial_dim*maxStates*num) { int n = ind / spatial_dim / maxStates; int m = ind / spatial_dim % maxStates; int s = ind % spatial_dim; q[ind] = I[(n*3+0)*spatial_dim+s] * mean_a[((n*maxStates+m)*channels+0)*spatial_dim+s] + I[(n*3+1)*spatial_dim+s] * mean_a[((n*maxStates+m)*channels+1)*spatial_dim+s] + I[(n*3+2)*spatial_dim+s] * mean_a[((n*maxStates+m)*channels+2)*spatial_dim+s] + mean_b[ind]; } } //--------------------------------------------- void GuidedCRFLayer::guided_filter_gpu(const int num,const int channels,const int maxStates,const int height,const int width,const float *I,const float * p,float *output_p) { const int spatial_dim=height*width; //******************************** prob ************************************ box_filter_gpu(num,maxStates,height,width,radius,p,mean_p.mutable_gpu_data(),buffer_score.mutable_gpu_data()); vector_product_kernel<<<CAFFE_GET_BLOCKS(num*channels*maxStates*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,maxStates,spatial_dim,I,p,Ip);//Ip = I .* p; box_filter_gpu(num,channels*maxStates,height,width,radius,Ip,mean_Ip,buffer_image_score); substract_vector_product_kernel<<<CAFFE_GET_BLOCKS(num*channels*maxStates*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,maxStates,spatial_dim,mean_Ip,mean_I.gpu_data(),mean_p.gpu_data(), cov_Ip);//cov_Ip = mean_Ip - mean_I .* mean_p; inv_var_I_eps_kernel_3<<<CAFFE_GET_BLOCKS(num*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,spatial_dim,eps,var_I.mutable_gpu_data(),inv_var_I.mutable_gpu_data());//inv_var_I=inv(var_I + eps); div_sum_kernel_3<<<CAFFE_GET_BLOCKS(num*maxStates*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,maxStates,spatial_dim,inv_var_I.gpu_data(),cov_Ip,a);//a = cov_Ip ./ inv_var_I; box_filter_gpu(num,channels*maxStates,height,width,radius,a,mean_a,buffer_image_score); substract_vector_matrix_product_kernel_3<<<CAFFE_GET_BLOCKS(num*maxStates*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,maxStates,spatial_dim,mean_p.gpu_data(),a,mean_I.gpu_data(),b.mutable_gpu_data());// b = mean_p - mean_I .* a; box_filter_gpu(num,maxStates,height,width,radius,b.gpu_data(),mean_b.mutable_gpu_data(),buffer_score.mutable_gpu_data()); vector_matrix_product_sum_kernel_3<<<CAFFE_GET_BLOCKS(num*maxStates*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,maxStates,spatial_dim,mean_a,I,mean_b.gpu_data(),output_p);// q = I .* mean_a + mean_b; } void GuidedCRFLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const float * nodePot = bottom[0]->gpu_data(); const float * imageData = bottom[1]->gpu_data(); int num = bottom[0]->num(); int maxStates = bottom[0]->channels(); int channels = bottom[1]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int spatial_dim=height*width; int nNodes = num*width *height; //******************************** image ************************************ box_filter_gpu(num,channels,height,width,radius,imageData,mean_I.mutable_gpu_data(),buffer_image.mutable_gpu_data()); vector_product_kernel<<<CAFFE_GET_BLOCKS(num*channels*channels*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,channels,spatial_dim,imageData,imageData,II.mutable_gpu_data());// II = I .* I; box_filter_gpu(num,channels*channels,height,width,radius,II.gpu_data(),mean_II.mutable_gpu_data(),buffer_image_image.mutable_gpu_data()); substract_vector_product_kernel<<<CAFFE_GET_BLOCKS(num*channels*channels*spatial_dim), CAFFE_CUDA_NUM_THREADS>>> (num,channels,channels,spatial_dim,mean_II.gpu_data(),mean_I.gpu_data(),mean_I.gpu_data(), var_I.mutable_gpu_data());//var_I = mean_II - mean_I .* mean_I; //----------------------------------------------------------------------------------- caffe_copy(tempPot.count(),nodePot,tempPot.mutable_gpu_data()); for(int iter = 0; iter < maxIter; iter++) { softmax_forward_kernel<<<CAFFE_GET_BLOCKS(nNodes), CAFFE_CUDA_NUM_THREADS>>> (maxStates,nNodes,tempPot.gpu_data(),nodeBel[iter]->mutable_gpu_data()); guided_filter_gpu(num,channels,maxStates,height,width,imageData,nodeBel[iter]->gpu_data(),filterPot.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, maxStates, nNodes, maxStates, (float)1., this->blobs_[0]->gpu_data(), filterPot.gpu_data(), (float)0., compatPot.mutable_gpu_data()); caffe_gpu_add(maxStates*nNodes,float(1),nodePot,alpha,compatPot.gpu_data(),tempPot.mutable_gpu_data()); } caffe_copy(top[0]->count(),tempPot.gpu_data(),top[0]->mutable_gpu_data()); } void GuidedCRFLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int maxStates = bottom[0]->channels(); int channels = bottom[1]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int nNodes = num*width *height; //----------------------- workspace ------------------------- myworkspace_[0]->Reshape(num*maxStates,channels,height,width); myworkspace_[1]->Reshape(num*maxStates,channels,height,width); myworkspace_[2]->Reshape(num*maxStates,channels,height,width); Ip = myworkspace_[0]->mutable_gpu_data(); mean_Ip = myworkspace_[0]->mutable_gpu_diff(); cov_Ip = myworkspace_[1]->mutable_gpu_data(); a = myworkspace_[1]->mutable_gpu_diff(); mean_a = myworkspace_[2]->mutable_gpu_data(); buffer_image_score = myworkspace_[2]->mutable_gpu_diff(); //---------------------------------------------------------------- const float *top_diff = top[0]->gpu_diff(); float * bottom_diff = bottom[0]->mutable_gpu_diff(); const float * imageData = bottom[1]->gpu_data(); caffe_gpu_set(filterPot.count(),float(0),filterPot.mutable_gpu_diff()); caffe_gpu_set(compatPot.count(),float(0),compatPot.mutable_gpu_diff()); caffe_gpu_set(tempPot.count(),float(0),tempPot.mutable_gpu_diff()); caffe_gpu_set(bottom[0]->count(),float(0),bottom_diff); caffe_copy(tempPot.count(),top_diff,tempPot.mutable_gpu_diff()); for(int iter = maxIter-1; iter >= 0; iter--) { caffe_gpu_add(maxStates*nNodes,alpha,tempPot.gpu_diff(),float(0),compatPot.gpu_diff(),compatPot.mutable_gpu_diff()); caffe_gpu_add(maxStates*nNodes,float(1) ,tempPot.gpu_diff(),float(1),bottom_diff ,bottom_diff); caffe_gpu_gemm(CblasTrans, CblasNoTrans, maxStates, nNodes, maxStates, (float)1., this->blobs_[0]->gpu_data(), compatPot.gpu_diff(), (float)0., filterPot.mutable_gpu_diff()); guided_filter_gpu(num,channels,maxStates,height,width,imageData,filterPot.gpu_diff(),nodeBel[iter]->mutable_gpu_diff()); softmax_backward_kernel<<<CAFFE_GET_BLOCKS(maxStates*nNodes), CAFFE_CUDA_NUM_THREADS>>> (maxStates,nNodes,nodeBel[iter]->gpu_diff(),nodeBel[iter]->gpu_data(),tempPot.mutable_gpu_diff()); } caffe_gpu_add(tempPot.count(),float(1),tempPot.gpu_diff(),float(1),bottom[0]->gpu_diff(),bottom[0]->mutable_gpu_diff()); } void GuidedCRFLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { } } // namespace caffe
the_stack
#include "CollisionGrid.cuh" #include <chrono> #include <iomanip> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/iterator/counting_iterator.h> #include "CudaUtils.cuh" #define SK_DEVICE __host__ __device__ namespace viper { Vec4 CollisionGrid::b_sphere; const thrust::device_vector<Vec2i> & CollisionGrid::test_particles(const thrust::device_vector<Vec3> &c_in, const thrust::device_vector<float> &r_in, float eps) { float total_ms = 0; float time_ms = 0; auto zero_begin = thrust::make_counting_iterator(0); int n_particles = c_in.size(); // Input particle positions c = c_in; // Input particles radii r.resize(r_in.size()); auto add_margin = [=] SK_DEVICE(float x) -> float { return x + 0.5f * eps; }; thrust::transform(thrust::device, r_in.begin(), r_in.end(), r.begin(), add_margin); // Particle IDs i.clear(); i.insert(i.end(), zero_begin, zero_begin + n_particles); int max_i = thrust::max_element(thrust::device, r.begin(), r.end()) - r.begin(); float max_radius = r_in[max_i]; auto part_to_sphere = [=] SK_DEVICE(thrust::tuple<Vec3, float, int> part) -> Vec4 { return Vec4(part.get<0>()[0], part.get<0>()[1], part.get<0>()[2], part.get<1>()); }; auto particles_begin = thrust::make_zip_iterator( thrust::make_tuple(c.begin(), r.begin(), i.begin())); auto spheres_begin = thrust::make_transform_iterator(particles_begin, part_to_sphere); using Vec6 = Eigen::Matrix<float, 6, 1>; Vec3 c_init = c_in[0]; float r_init = r_in[0]; auto sphere_bbox = [=] SK_DEVICE(Vec4 s) -> Vec6 { Vec6 bbox; bbox.segment<3>(0) = s.head<3>() - Vec3::Ones() * s[3]; bbox.segment<3>(3) = s.head<3>() + Vec3::Ones() * s[3]; return bbox; }; auto bbox_union = [=] SK_DEVICE(Vec6 b0, Vec6 b1) -> Vec6 { Vec6 result; result.segment<3>(0) = b0.segment<3>(0).cwiseMin(b1.segment<3>(0)); result.segment<3>(3) = b0.segment<3>(3).cwiseMax(b1.segment<3>(3)); return result; }; auto bboxes_begin = thrust::make_transform_iterator(spheres_begin, sphere_bbox); Vec6 bbox_init = sphere_bbox(Vec4(c_init[0], c_init[1], c_init[2], r_init)); Vec6 bbox = thrust::reduce(thrust::device, bboxes_begin, bboxes_begin + n_particles, bbox_init, bbox_union); Vec3 bbox_min = bbox.segment<3>(0); Vec3 bbox_max = bbox.segment<3>(3); b_sphere.head<3>() = (bbox_max + bbox_min) / 2; b_sphere[3] = (bbox_max - bbox_min).norm() / 2; float grid_width = bbox_max[0] - bbox_min[0]; int grid_res = grid_width / (2 * max_radius); grid_res = max(min(grid_res, 32), 1); int n_cells = grid_res * grid_res * grid_res; float cell_size = grid_width / grid_res; auto center_to_cell_id = [=] SK_DEVICE(Vec3 c) { Vec3i i = ((c - bbox_min) / cell_size) .cast<int>() .cwiseMax(Vec3i::Zero()) .cwiseMin(Vec3i::Ones() * (grid_res - 1)); return i[0] + grid_res * i[1] + grid_res * grid_res * i[2]; }; auto part_cell_ids_gen = thrust::make_transform_iterator(c.begin(), center_to_cell_id); // ID of cell containing each particle part_cell_ids.clear(); part_cell_ids.insert(part_cell_ids.end(), part_cell_ids_gen, part_cell_ids_gen + n_particles); // Sort particle list by id of cells thrust::sort_by_key(thrust::device, part_cell_ids.begin(), part_cell_ids.end(), particles_begin); // Indices of first particle for each cell cell_starts.clear(); cell_starts.resize(n_cells, 0); cell_ends.clear(); cell_ends.resize(n_cells, 0); auto part_cell_ids_begin = thrust::raw_pointer_cast(part_cell_ids.data()); auto cell_starts_begin = thrust::raw_pointer_cast(cell_starts.data()); auto cell_ends_begin = thrust::raw_pointer_cast(cell_ends.data()); auto write_cell_starts = [=] SK_DEVICE(int i) { int id1 = part_cell_ids_begin[i]; if (i == 0) { cell_starts_begin[id1] = i; } else { int id0 = part_cell_ids_begin[i - 1]; if (id0 != id1) { cell_starts_begin[id1] = i; cell_ends_begin[id0] = i; } } if (i == n_particles - 1) { cell_ends_begin[id1] = n_particles; } }; // Write out start indices of each cell in sorted particle list thrust::for_each(thrust::device, zero_begin, zero_begin + n_particles, write_cell_starts); // Number of particles in each cell parts_per_cell.clear(); parts_per_cell.resize(n_cells, 0); auto count_cell_parts = [=] SK_DEVICE(int cell) { return cell_ends_begin[cell] - cell_starts_begin[cell]; }; thrust::transform(thrust::device, zero_begin, zero_begin + n_cells, parts_per_cell.begin(), count_cell_parts); auto parts_per_cell_begin = thrust::raw_pointer_cast(parts_per_cell.data()); auto n_neighbours = [=] SK_DEVICE(int pair_id) { int nhbr_off = pair_id % 27; int particle_id = pair_id / 27; int cell_id = part_cell_ids_begin[particle_id]; int c_i = cell_id % grid_res; int c_j = (cell_id / grid_res) % grid_res; int c_k = cell_id / (grid_res * grid_res); int i = (nhbr_off % 3) - 1; int j = ((nhbr_off / 3) % 3) - 1; int k = (nhbr_off / 9) - 1; int n_i = c_i + i; int n_j = c_j + j; int n_k = c_k + k; int nhbr_cell_id = n_i + grid_res * n_j + grid_res * grid_res * n_k; bool inside = n_i >= 0 && n_i < grid_res && n_j >= 0 && n_j < grid_res && n_k >= 0 && n_k < grid_res; int count = 0; if (inside) { count = parts_per_cell_begin[nhbr_cell_id]; } return count; }; auto neighbour_count_begin = thrust::make_transform_iterator(zero_begin, n_neighbours); int n_pairs = n_particles * 27; // Count potential collisions for each particle-cell pair // NOTE: promote from 27 threads per particle to 32 for warp coherence if // slow neighbours_per_pair.clear(); neighbours_per_pair.insert(neighbours_per_pair.end(), neighbour_count_begin, neighbour_count_begin + n_pairs); // Index of first collision for each particle-cell pair pair_collision_starts.resize(n_pairs); // tic(); // Find start indices for cell-particle_pairs thrust::exclusive_scan(thrust::device, neighbours_per_pair.begin(), neighbours_per_pair.end(), pair_collision_starts.begin()); int n_collisions = pair_collision_starts.back() + neighbours_per_pair.back(); // Number of particle-cell pairs pointing to a group of collisions pairs_per_coll_group.clear(); pairs_per_coll_group.resize(n_pairs, 1); // In-place scan to compute above count thrust::inclusive_scan_by_key(thrust::device, pair_collision_starts.begin(), pair_collision_starts.end(), pairs_per_coll_group.begin(), pairs_per_coll_group.begin()); // Compute vector with number of pairs pointing to the start of each // collision group collision_pair_ids.clear(); collision_pair_ids.resize(n_collisions, 0); thrust::scatter_if(thrust::device, pairs_per_coll_group.begin(), pairs_per_coll_group.end(), pair_collision_starts.begin(), neighbours_per_pair.begin(), collision_pair_ids.begin()); // Compute pair id of each collision thrust::inclusive_scan(thrust::device, collision_pair_ids.begin(), collision_pair_ids.end(), collision_pair_ids.begin()); // Subtract one to get indices thrust::for_each(thrust::device, collision_pair_ids.begin(), collision_pair_ids.end(), [] SK_DEVICE(int &i) { i -= 1; }); auto collision_pair_ids_begin = thrust::raw_pointer_cast(collision_pair_ids.data()); auto pair_collision_starts_begin = thrust::raw_pointer_cast(pair_collision_starts.data()); auto collision_generator = [=] SK_DEVICE(int collision_id) { int pair_id = collision_pair_ids_begin[collision_id]; int collision_start = pair_collision_starts_begin[pair_id]; int part_offset = collision_id - collision_start; int nhbr_off = pair_id % 27; int particle_id = pair_id / 27; int cell_id = part_cell_ids_begin[particle_id]; int c_i = cell_id % grid_res; int c_j = (cell_id / grid_res) % grid_res; int c_k = cell_id / (grid_res * grid_res); int i = (nhbr_off % 3) - 1; int j = ((nhbr_off / 3) % 3) - 1; int k = (nhbr_off / 9) - 1; int n_i = c_i + i; int n_j = c_j + j; int n_k = c_k + k; int nhbr_cell_id = n_i + grid_res * n_j + grid_res * grid_res * n_k; int cell_start = cell_starts_begin[nhbr_cell_id]; int other_particle_id = cell_start + part_offset; return Vec2i(particle_id, other_particle_id); }; auto collisions_gen = thrust::make_transform_iterator(zero_begin, collision_generator); // Generate particle ids for all potential collisions collisions.clear(); collisions.insert(collisions.end(), collisions_gen, collisions_gen + n_collisions); auto c_begin = thrust::raw_pointer_cast(c.data()); auto r_begin = thrust::raw_pointer_cast(r.data()); auto not_colliding = [=] SK_DEVICE(Vec2i collision) { Vec3 c0 = c_begin[collision[0]]; Vec3 c1 = c_begin[collision[1]]; float r0 = r_begin[collision[0]]; float r1 = r_begin[collision[1]]; return (c1 - c0).norm() > (r0 + r1) || (collision[0] >= collision[1]); }; auto valid_coll_end = thrust::remove_if(thrust::device, collisions.begin(), collisions.end(), not_colliding); collisions.erase(valid_coll_end, collisions.end()); auto i_begin = thrust::raw_pointer_cast(i.data()); auto permute_indices = [=] SK_DEVICE(Vec2i & coll) { coll[0] = i_begin[coll[0]]; coll[1] = i_begin[coll[1]]; }; thrust::for_each(thrust::device, collisions.begin(), collisions.end(), permute_indices); return collisions; } } // namespace viper
the_stack
* COMPILATION TIP * nvcc -lcurand XORMRGgens2distri.cu -o XORMRGgens2distri * nvcc -g -lcurand XORMRGgens2distri.cu -o XORMRGgens2distri * -g generate debug information for host code, * */ #include <stdio.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void setup_kernel(curandStatePhilox4_32_10_t *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(1234, id, 0, &state[id]) ; } __global__ void setup_kernel(curandStateMRG32k3a *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(0, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficienty */ curandState localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n; i++) { x = curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n;i++) { x=curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i<n/2; i++) { x = curand_normal2(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x < 1.0)) { count++; } if ((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i<n/2; i++) { x = curand_normal2(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x < 1.0)) { count++; } if ((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id]= localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n; i++) { x = curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; double x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x= curand_uniform_double(&localState); /* Check if > .5 */ if (x>.5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count =0; double2 x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i <n/2; i++) { x = curand_normal2_double(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x <1.0)) { count++; } if((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; curandState *devStates; curandStateMRG32k3a *devMRGStates; curandStatePhilox4_32_10_t *devPHILOXStates; unsigned int *devResults, *hostResults; bool useMRG = 0; bool usePHILOX = 0; int sampleCount = 10000; bool doubleSupported = 0; int device; struct cudaDeviceProp properties; /* check for double precision support */ CUDA_CALL(cudaGetDevice(&device)); CUDA_CALL(cudaGetDeviceProperties(&properties,device)); if (properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) { doubleSupported = 1; } /* Check for MRG32k3a option (default is XORWOW) */ if (argc >= 2) { if (strcmp(argv[1], "-m") == 0) { useMRG = 1; if (!doubleSupported) { printf("MRG32k3a requires double precision\n"); printf("^^^^ test WAIVED due to lack of double precision\n"); return EXIT_SUCCESS; } } else if (strcmp(argv[1],"-p") ==0) { usePHILOX = 1; } /* Allow over-ride of sample count */ sscanf(argv[argc-1], "%d", &sampleCount); } /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(64 * 64, sizeof(int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 64 * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0 ,64*64 * sizeof(unsigned int))); /* Allocate space for prng states on device; prng=Pseudorandom Number Generator */ if (useMRG) { CUDA_CALL(cudaMalloc((void**)&devMRGStates, 64*64* sizeof(curandStateMRG32k3a))); } else if (usePHILOX) { CUDA_CALL(cudaMalloc((void**)&devPHILOXStates, 64*64 * sizeof(curandStatePhilox4_32_10_t))); } else { CUDA_CALL(cudaMalloc((void **)&devStates, 64*64 * sizeof(curandState))); } /* Setup prng states */ if (useMRG) { setup_kernel<<<64, 64>>>(devMRGStates); } else if (usePHILOX) { setup_kernel<<<64,64>>>(devPHILOXStates); } else { setup_kernel<<<64, 64>>>(devStates); } /* Generate and use pseudo-random */ for (i=0; i < 50; i++) { if (useMRG) { generate_kernel<<<64,64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_kernel<<<64,64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_kernel<<<64,64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show results */ total = 0; for (i=0 ; i < 64*64; i++) { total+= hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64*64*sizeof(unsigned int))); /* Generate and use uniform pseudo-random */ for (i=0; i< 50; i++) { if (useMRG) { generate_uniform_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_uniform_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_uniform_kernel<<<64, 64 >>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for (i=0; i < 64 * 64 ; i++) { total += hostResults[i]; } printf("Fraction of uniforms > 0.5 was %10.13f\n", (float) total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 * sizeof(unsigned int))); /* Generate and use normal pseudo-random */ for (i=0; i < 50; i++) { if (useMRG) { generate_normal_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_normal_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_normal_kernel<<<64, 64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64* 64* sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for (i=0; i< 64*64; i++) { total += hostResults[i]; } printf("Fraction of normals within 1 standard deviation was %10.13f\n", (float) total/(64.0f * 64.0f * sampleCount *50.0f)); /* Cleanup */ if (useMRG) { CUDA_CALL(cudaFree(devMRGStates)); } else if (usePHILOX) { CUDA_CALL(cudaFree(devPHILOXStates)); } else { CUDA_CALL(cudaFree(devStates)); } CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
the_stack
// // Return fatal error and safely exit program // void FatalError(const int lineNumber = 0) { // std::cerr << "FatalError"; // if (lineNumber != 0) std::cerr << " at LINE " << lineNumber; // std::cerr << ". Program Terminated." << std::endl; // cudaDeviceReset(); // exit(EXIT_FAILURE); // } // // Check CUDA line and return CUDA error status // void checkCUDA(const int lineNumber, cudaError_t status) { // if (status != cudaSuccess) { // std::cerr << "CUDA failure at LINE " << lineNumber << ": " << status << std::endl; // FatalError(); // } // } #include <cmath> #include <iostream> #include <opencv2/opencv.hpp> #include "caffe/common.hpp" #include "suncg_fusion.hpp" #define ComputeT float #define StorageT float #define CPUStorage2ComputeT(x) (x) #define GPUCompute2StorageT(x) (x) #define GPUStorage2ComputeT(x) (x) #define CPUCompute2StorageT(x) (x) float GenRandFloat(float min, float max); /*-------------------- ComputeAccurateTSDF --------------------*/ __global__ void CompleteTSDF (ComputeT * vox_info, ComputeT * occupancy_label_crop_GPU , StorageT * vox_tsdf) { // Get voxel volume parameters ComputeT vox_unit = vox_info[0]; ComputeT vox_margin = vox_info[1]; int vox_size[3]; for (int i = 0; i < 3; ++i) vox_size[i] = vox_info[i + 2]; int vox_idx = threadIdx.x + blockIdx.x * blockDim.x; if (vox_idx >= vox_size[0] * vox_size[1] * vox_size[2]){ return; } int z = float((vox_idx / ( vox_size[0] * vox_size[1]))%vox_size[2]) ; int y = float((vox_idx / vox_size[0]) % vox_size[1]); int x = float(vox_idx % vox_size[0]); int search_region = (int)round(vox_margin/vox_unit); if (occupancy_label_crop_GPU[vox_idx] >0 ){ vox_tsdf[vox_idx] = -0.001;// inside mesh return; } for (int iix = max(0,x-search_region); iix < min((int)vox_size[0],x+search_region+1); iix++){ for (int iiy = max(0,y-search_region); iiy < min((int)vox_size[1],y+search_region+1); iiy++){ for (int iiz = max(0,z-search_region); iiz < min((int)vox_size[2],z+search_region+1); iiz++){ int iidx = iiz * vox_size[0] * vox_size[1] + iiy * vox_size[0] + iix; if (occupancy_label_crop_GPU[iidx] > 0){ float xd = abs(x - iix); float yd = abs(y - iiy); float zd = abs(z - iiz); float tsdf_value = sqrtf(xd * xd + yd * yd + zd * zd)/(float)search_region; if (tsdf_value < abs(vox_tsdf[vox_idx])){ vox_tsdf[vox_idx] = GPUCompute2StorageT(tsdf_value); } } } } } } __global__ void depth2Grid(ComputeT * cam_info, ComputeT * vox_info, ComputeT * depth_data, ComputeT * vox_binary_GPU){ // Get camera information int frame_width = cam_info[0]; //int frame_height = cam_info[1]; ComputeT cam_K[9]; for (int i = 0; i < 9; ++i) cam_K[i] = cam_info[i + 2]; ComputeT cam_pose[16]; for (int i = 0; i < 16; ++i) cam_pose[i] = cam_info[i + 11]; // Get voxel volume parameters ComputeT vox_unit = vox_info[0]; //ComputeT vox_margin = vox_info[1]; int vox_size[3]; for (int i = 0; i < 3; ++i) vox_size[i] = vox_info[i + 2]; ComputeT vox_origin[3]; for (int i = 0; i < 3; ++i) vox_origin[i] = vox_info[i + 5]; // Get point in world coordinate int pixel_x = blockIdx.x; int pixel_y = threadIdx.x; ComputeT point_depth = depth_data[pixel_y * frame_width + pixel_x]; ComputeT point_cam[3] = {0}; point_cam[0] = (pixel_x - cam_K[2])*point_depth/cam_K[0]; point_cam[1] = (pixel_y - cam_K[5])*point_depth/cam_K[4]; point_cam[2] = point_depth; ComputeT point_base[3] = {0}; point_base[0] = cam_pose[0 * 4 + 0]* point_cam[0] + cam_pose[0 * 4 + 1]* point_cam[1] + cam_pose[0 * 4 + 2]* point_cam[2]; point_base[1] = cam_pose[1 * 4 + 0]* point_cam[0] + cam_pose[1 * 4 + 1]* point_cam[1] + cam_pose[1 * 4 + 2]* point_cam[2]; point_base[2] = cam_pose[2 * 4 + 0]* point_cam[0] + cam_pose[2 * 4 + 1]* point_cam[1] + cam_pose[2 * 4 + 2]* point_cam[2]; point_base[0] = point_base[0] + cam_pose[0 * 4 + 3]; point_base[1] = point_base[1] + cam_pose[1 * 4 + 3]; point_base[2] = point_base[2] + cam_pose[2 * 4 + 3]; //printf("vox_origin: %f,%f,%f\n",vox_origin[0],vox_origin[1],vox_origin[2]); // World coordinate to grid coordinate int z = (int)floor((point_base[0] - vox_origin[0])/vox_unit); int x = (int)floor((point_base[1] - vox_origin[1])/vox_unit); int y = (int)floor((point_base[2] - vox_origin[2])/vox_unit); //printf("point_base: %f,%f,%f, %d,%d,%d, %d,%d,%d \n",point_base[0],point_base[1],point_base[2], z, x, y, vox_size[0],vox_size[1],vox_size[2]); // mark vox_binary_GPU if( x >= 0 && x < vox_size[0] && y >= 0 && y < vox_size[1] && z >= 0 && z < vox_size[2]){ int vox_idx = z * vox_size[0] * vox_size[1] + y * vox_size[0] + x; vox_binary_GPU[vox_idx] = ComputeT(1.0); } } __global__ void SquaredDistanceTransform(ComputeT * cam_info, ComputeT * vox_info, ComputeT * depth_data, ComputeT * vox_binary_GPU , StorageT * vox_tsdf, StorageT * vox_height) { // debug int frame_width = cam_info[0]; int frame_height = cam_info[1]; ComputeT cam_K[9]; for (int i = 0; i < 9; ++i) cam_K[i] = cam_info[i + 2]; ComputeT cam_pose[16]; for (int i = 0; i < 16; ++i) cam_pose[i] = cam_info[i + 11]; // Get voxel volume parameters ComputeT vox_unit = vox_info[0]; ComputeT vox_margin = vox_info[1]; int vox_size[3]; for (int i = 0; i < 3; ++i) vox_size[i] = vox_info[i + 2]; ComputeT vox_origin[3]; for (int i = 0; i < 3; ++i) vox_origin[i] = vox_info[i + 5]; int z = blockIdx.x; int y = threadIdx.x; int search_region = (int)round(vox_margin/vox_unit); for (int x = 0; x < vox_size[0]; ++x) { int vox_idx = z * vox_size[0] * vox_size[1] + y * vox_size[0] + x; //vox_tsdf[vox_idx] = 1.0 - vox_binary_GPU[vox_idx]; // Get point in world coordinates XYZ -> YZX ComputeT point_base[3] = {0}; point_base[0] = ComputeT(z) * vox_unit + vox_origin[0]; point_base[1] = ComputeT(x) * vox_unit + vox_origin[1]; point_base[2] = ComputeT(y) * vox_unit + vox_origin[2]; // Encode height from floor if (vox_height != NULL) { ComputeT height_val = ((point_base[2] + 0.05f) / 3.0f); vox_height[vox_idx] = GPUCompute2StorageT(fmin(1.0f, fmax(height_val, 0.0f))); } // Get point in current camera coordinates ComputeT point_cam[3] = {0}; point_base[0] = point_base[0] - cam_pose[0 * 4 + 3]; point_base[1] = point_base[1] - cam_pose[1 * 4 + 3]; point_base[2] = point_base[2] - cam_pose[2 * 4 + 3]; point_cam[0] = cam_pose[0 * 4 + 0] * point_base[0] + cam_pose[1 * 4 + 0] * point_base[1] + cam_pose[2 * 4 + 0] * point_base[2]; point_cam[1] = cam_pose[0 * 4 + 1] * point_base[0] + cam_pose[1 * 4 + 1] * point_base[1] + cam_pose[2 * 4 + 1] * point_base[2]; point_cam[2] = cam_pose[0 * 4 + 2] * point_base[0] + cam_pose[1 * 4 + 2] * point_base[1] + cam_pose[2 * 4 + 2] * point_base[2]; if (point_cam[2] <= 0) continue; // Project point to 2D int pixel_x = roundf(cam_K[0] * (point_cam[0] / point_cam[2]) + cam_K[2]); int pixel_y = roundf(cam_K[4] * (point_cam[1] / point_cam[2]) + cam_K[5]); if (pixel_x < 0 || pixel_x >= frame_width || pixel_y < 0 || pixel_y >= frame_height){ // outside FOV //vox_tsdf[vox_idx] = GPUCompute2StorageT(-1.0); continue; } // Get depth ComputeT point_depth = depth_data[pixel_y * frame_width + pixel_x]; if (point_depth < ComputeT(0.0f) || point_depth > ComputeT(10.0f)) continue; if (roundf(point_depth) == 0){ // mising depth vox_tsdf[vox_idx] = GPUCompute2StorageT(-1.0); continue; } // Get depth difference ComputeT point_dist = (point_depth - point_cam[2]) * sqrtf(1 + powf((point_cam[0] / point_cam[2]), 2) + powf((point_cam[1] / point_cam[2]), 2)); ComputeT sign = point_dist/abs(point_dist); vox_tsdf[vox_idx] = GPUCompute2StorageT(sign); //vox_tsdf[vox_idx] = sign*fmin(1, abs(point_dist)/vox_margin); if (abs(point_dist) < 2 * vox_margin){ for (int iix = max(0,x-search_region); iix < min((int)vox_size[0],x+search_region+1); iix++){ for (int iiy = max(0,y-search_region); iiy < min((int)vox_size[1],y+search_region+1); iiy++){ for (int iiz = max(0,z-search_region); iiz < min((int)vox_size[2],z+search_region+1); iiz++){ int iidx = iiz * vox_size[0] * vox_size[1] + iiy * vox_size[0] + iix; if (vox_binary_GPU[iidx] > 0){ float xd = abs(x - iix); float yd = abs(y - iiy); float zd = abs(z - iiz); float tsdf_value = (sqrtf(xd * xd + yd * yd + zd * zd)*vox_unit)/vox_margin; //printf("%f, %f, %f, %f, %f\n",tsdf_value,abs(vox_tsdf[vox_idx]), xd,yd,zd); if (tsdf_value < abs(vox_tsdf[vox_idx])){ vox_tsdf[vox_idx] = GPUCompute2StorageT(tsdf_value*sign); } } } } } } } } void ComputeTSDF(ComputeT * cam_info_CPU, ComputeT * vox_info_CPU, ComputeT * cam_info_GPU, ComputeT * vox_info_GPU, ComputeT * depth_data_GPU, StorageT * vox_tsdf_GPU, StorageT * vox_height_GPU) { int frame_width = cam_info_CPU[0]; int frame_height = cam_info_CPU[1]; int vox_size[3]; for (int i = 0; i < 3; ++i) vox_size[i] = vox_info_CPU[i + 2]; int num_crop_voxels = vox_size[0] * vox_size[1] * vox_size[2]; ComputeT * vox_binary_CPU = new ComputeT[num_crop_voxels]; memset(vox_binary_CPU, 0, num_crop_voxels * sizeof(ComputeT)); ComputeT * vox_binary_GPU; CUDA_CHECK(cudaMalloc(&vox_binary_GPU, num_crop_voxels * sizeof(ComputeT))); GPU_set_zeros(num_crop_voxels, vox_binary_GPU); CUDA_CHECK(cudaGetLastError()); // from depth map to binaray voxel representation depth2Grid<<<frame_width,frame_height>>>(cam_info_GPU, vox_info_GPU, depth_data_GPU, vox_binary_GPU); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaMemcpy(vox_binary_CPU, vox_binary_GPU, num_crop_voxels * sizeof(ComputeT), cudaMemcpyDeviceToHost)); // distance transform SquaredDistanceTransform <<< vox_size[2], vox_size[1] >>> (cam_info_GPU, vox_info_GPU, depth_data_GPU, vox_binary_GPU, vox_tsdf_GPU, vox_height_GPU); delete [] vox_binary_CPU; CUDA_CHECK(cudaFree(vox_binary_GPU)); } // Save voxel volume to point cloud ply file for visualization void SaveVox2Ply(const std::string &filename, std::vector<int> vox_size, StorageT * vox_tsdf) { float tsdf_threshold = 0.4f; // Count total number of points in point cloud int num_points = 0; for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) if (CPUStorage2ComputeT(abs(vox_tsdf[i])) < tsdf_threshold) num_points++; // Create header for ply file FILE *fp = fopen(filename.c_str(), "w"); fprintf(fp, "ply\n"); fprintf(fp, "format binary_little_endian 1.0\n"); fprintf(fp, "element vertex %d\n", num_points); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "property uchar red\n"); fprintf(fp, "property uchar green\n"); fprintf(fp, "property uchar blue\n"); fprintf(fp, "end_header\n"); // Create point cloud content for ply file for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) { // If TSDF value of voxel is less than some threshold, add voxel coordinates to point cloud if (CPUStorage2ComputeT(abs(vox_tsdf[i])) < tsdf_threshold) { // Compute voxel indices in int for higher positive number range int z = floor(i / (vox_size[0] * vox_size[1])); int y = floor((i - (z * vox_size[0] * vox_size[1])) / vox_size[0]); int x = i - (z * vox_size[0] * vox_size[1]) - (y * vox_size[0]); // Convert voxel indices to float, and save coordinates to ply file float float_x = (float) x; float float_y = (float) y; float float_z = (float) z; fwrite(&float_x, sizeof(float), 1, fp); fwrite(&float_y, sizeof(float), 1, fp); fwrite(&float_z, sizeof(float), 1, fp); unsigned char color_r = (unsigned char) 255; unsigned char color_g = (unsigned char) 0; unsigned char color_b = (unsigned char) 0; if (CPUStorage2ComputeT(vox_tsdf[i]) < 0) { color_r = (unsigned char) 0; color_g = (unsigned char) 255; color_b = (unsigned char) 0; } fwrite(&color_r, sizeof(unsigned char), 1, fp); fwrite(&color_g, sizeof(unsigned char), 1, fp); fwrite(&color_b, sizeof(unsigned char), 1, fp); } } fclose(fp); } // Save voxel volume to point cloud ply file for visualization void SaveVoxHeight2Ply(const std::string &filename, std::vector<int> vox_size, StorageT * vox_height) { // Count total number of points in point cloud int num_points = 0; for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) num_points++; // Create header for ply file FILE *fp = fopen(filename.c_str(), "w"); fprintf(fp, "ply\n"); fprintf(fp, "format binary_little_endian 1.0\n"); fprintf(fp, "element vertex %d\n", num_points); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "property uchar red\n"); fprintf(fp, "property uchar green\n"); fprintf(fp, "property uchar blue\n"); fprintf(fp, "end_header\n"); // Create point cloud content for ply file for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) { // Compute voxel indices in int for higher positive number range int z = floor(i / (vox_size[0] * vox_size[1])); int y = floor((i - (z * vox_size[0] * vox_size[1])) / vox_size[0]); int x = i - (z * vox_size[0] * vox_size[1]) - (y * vox_size[0]); // Convert voxel indices to float, and save coordinates to ply file float float_x = (float) x; float float_y = (float) y; float float_z = (float) z; fwrite(&float_x, sizeof(float), 1, fp); fwrite(&float_y, sizeof(float), 1, fp); fwrite(&float_z, sizeof(float), 1, fp); unsigned char color_r = (unsigned char)(255.0f * std::abs(CPUStorage2ComputeT(vox_height[i])) / 3.0); unsigned char color_g = (unsigned char)(255.0f * std::abs(CPUStorage2ComputeT(vox_height[i])) / 3.0); unsigned char color_b = (unsigned char)(255.0f * std::abs(CPUStorage2ComputeT(vox_height[i])) / 3.0); fwrite(&color_r, sizeof(unsigned char), 1, fp); fwrite(&color_g, sizeof(unsigned char), 1, fp); fwrite(&color_b, sizeof(unsigned char), 1, fp); } fclose(fp); } // Save voxel volume labels to point cloud ply file for visualization void SaveVoxLabel2Ply(const std::string &filename, std::vector<int> vox_size, int label_downscale, StorageT * vox_label) { // Count total number of points in point cloud int num_points = 0; for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) if (CPUStorage2ComputeT(vox_label[i]) > 0) num_points++; // Create header for ply file FILE *fp = fopen(filename.c_str(), "w"); fprintf(fp, "ply\n"); fprintf(fp, "format binary_little_endian 1.0\n"); fprintf(fp, "element vertex %d\n", num_points); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "property uchar red\n"); fprintf(fp, "property uchar green\n"); fprintf(fp, "property uchar blue\n"); fprintf(fp, "end_header\n"); // Create different colors for each class const int num_classes = 36; int class_colors[num_classes * 3]; for (int i = 0; i < num_classes; ++i) { class_colors[i * 3 + 0] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); class_colors[i * 3 + 1] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); class_colors[i * 3 + 2] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); } // Create point cloud content for ply file for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) { // If class of voxel non-empty, add voxel coordinates to point cloud if (CPUStorage2ComputeT(vox_label[i]) > 0) { // Compute voxel indices in int for higher positive number range int z = floor(i / (vox_size[0] * vox_size[1])); int y = floor((i - (z * vox_size[0] * vox_size[1])) / vox_size[0]); int x = i - (z * vox_size[0] * vox_size[1]) - (y * vox_size[0]); // Convert voxel indices to float, and save coordinates to ply file float float_x = (float)x * (float)label_downscale + (float)label_downscale / 2; float float_y = (float)y * (float)label_downscale + (float)label_downscale / 2; float float_z = (float)z * (float)label_downscale + (float)label_downscale / 2; fwrite(&float_x, sizeof(float), 1, fp); fwrite(&float_y, sizeof(float), 1, fp); fwrite(&float_z, sizeof(float), 1, fp); // Save color of class into voxel unsigned char color_r = (unsigned char) class_colors[(int)CPUStorage2ComputeT(vox_label[i]) * 3 + 0]; unsigned char color_g = (unsigned char) class_colors[(int)CPUStorage2ComputeT(vox_label[i]) * 3 + 1]; unsigned char color_b = (unsigned char) class_colors[(int)CPUStorage2ComputeT(vox_label[i]) * 3 + 2]; fwrite(&color_r, sizeof(unsigned char), 1, fp); fwrite(&color_g, sizeof(unsigned char), 1, fp); fwrite(&color_b, sizeof(unsigned char), 1, fp); } } fclose(fp); } // Save voxel volume weights to point cloud ply file for visualization void SaveVoxWeight2Ply(const std::string &filename, std::vector<int> vox_size, int label_downscale, StorageT * vox_label_weight) { // Count total number of points in point cloud int num_points = 0; for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) if (CPUStorage2ComputeT(vox_label_weight[i]) > 0) num_points++; // Create header for ply file FILE *fp = fopen(filename.c_str(), "w"); fprintf(fp, "ply\n"); fprintf(fp, "format binary_little_endian 1.0\n"); fprintf(fp, "element vertex %d\n", num_points); fprintf(fp, "property float x\n"); fprintf(fp, "property float y\n"); fprintf(fp, "property float z\n"); fprintf(fp, "property uchar red\n"); fprintf(fp, "property uchar green\n"); fprintf(fp, "property uchar blue\n"); fprintf(fp, "end_header\n"); // // Create different colors for each class // const int num_classes = 36; // int class_colors[num_classes * 3]; // for (int i = 0; i < num_classes; ++i) { // class_colors[i * 3 + 0] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); // class_colors[i * 3 + 1] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); // class_colors[i * 3 + 2] = (int)(std::round(GenRandFloat(0.0f, 255.0f))); // } // Create point cloud content for ply file for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) { // If class of voxel non-empty, add voxel coordinates to point cloud if (CPUStorage2ComputeT(vox_label_weight[i]) > 0) { // Compute voxel indices in int for higher positive number range int z = floor(i / (vox_size[0] * vox_size[1])); int y = floor((i - (z * vox_size[0] * vox_size[1])) / vox_size[0]); int x = i - (z * vox_size[0] * vox_size[1]) - (y * vox_size[0]); // Convert voxel indices to float, and save coordinates to ply file float float_x = (float)x * (float)label_downscale + (float)label_downscale / 2; float float_y = (float)y * (float)label_downscale + (float)label_downscale / 2; float float_z = (float)z * (float)label_downscale + (float)label_downscale / 2; fwrite(&float_x, sizeof(float), 1, fp); fwrite(&float_y, sizeof(float), 1, fp); fwrite(&float_z, sizeof(float), 1, fp); // Save color of class into voxel unsigned char color_r = (unsigned char) 0; unsigned char color_g = (unsigned char) 0; unsigned char color_b = (unsigned char) 255; if (CPUStorage2ComputeT(vox_label_weight[i]) > 1) { color_r = (unsigned char) 255; color_g = (unsigned char) 0; color_b = (unsigned char) 0; } fwrite(&color_r, sizeof(unsigned char), 1, fp); fwrite(&color_g, sizeof(unsigned char), 1, fp); fwrite(&color_b, sizeof(unsigned char), 1, fp); } } fclose(fp); } // Transform voxel label from single channel to 36 channel volume __global__ void SetVoxLabel(int num_classes, float * vox_info, float * vox_label_src, float * vox_label_dst) { // Get voxel volume parameters int vox_size[3]; for (int i = 0; i < 3; ++i) vox_size[i] = (int)(vox_info[i + 2]); int z = blockIdx.x; int y = threadIdx.x; for (int x = 0; x < vox_size[0]; ++x) { int vox_idx = z * vox_size[0] * vox_size[1] + y * vox_size[0] + x; int vox_val = vox_label_src[vox_idx]; for (int i = 0; i < num_classes; ++i) { if (vox_val == i) { vox_label_dst[i * vox_size[0] * vox_size[1] * vox_size[2] + vox_idx] = 1.0f; } else { vox_label_dst[i * vox_size[0] * vox_size[1] * vox_size[2] + vox_idx] = 0.0f; } } } } /*-------------------- Main Function ---------------------*/ // int main(int argc, char **argv) { // // Get camera intrinsics // int frame_width = 640; // in pixels // int frame_height = 480; // float cam_K[9] = {518.8579f, 0.0f, (float)frame_width / 2.0f, 0.0f, 518.8579f, (float)frame_height / 2.0f, 0.0f, 0.0f, 1.0f}; // // Set voxel volume parameters // float vox_unit = 0.02f; // in meters // float vox_margin = vox_unit * 5.0f; // in voxels // int vox_size[3] = {210, 120, 210}; // float vox_origin[3] = {}; // in camera coordinates // vox_origin[0] = (-(float)vox_size[0] / 2.0f + 0.5f) * vox_unit; // vox_origin[1] = (-(float)vox_size[1] / 2.0f + 0.5f) * vox_unit; // vox_origin[2] = 0.5f * vox_unit; // // CPU malloc voxel volume // float * vox_tsdf = new float[vox_size[0] * vox_size[1] * vox_size[2]]; // float * vox_weight = new float[vox_size[0] * vox_size[1] * vox_size[2]]; // memset(vox_weight, 0, sizeof(float) * vox_size[0] * vox_size[1] * vox_size[2]); // for (int i = 0; i < vox_size[0] * vox_size[1] * vox_size[2]; ++i) { // vox_tsdf[i] = 1.0f; // vox_weight[i] = 0.0f; // } // // CPU malloc voxel labels // int * vox_label = new int[vox_size[0] * vox_size[1] * vox_size[2]]; // memset(vox_label, 0, sizeof(int) * vox_size[0] * vox_size[1] * vox_size[2]); // // Get depth image // std::string depth_path = "data/depth/0000_000514ade3bcc292a613a4c2755a5050_fl001_rm0001_0000.png"; // float * depth_data = new float[frame_height * frame_width]; // ReadDepthImage(depth_path, depth_data, frame_width, frame_height); // // Get camera pose // // float cam_pose[16] = {-0.9948, -0.0201, 0.1002, 42.9729, -0.1022, 0.1954, -0.9754, 51.5820, 0, -0.9805, -0.1965, 1.3050, 0, 0, 0, 1}; // float cam_pose[16] = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}; // // Copy camera information to GPU // float cam_info[27]; // cam_info[0] = (float)frame_width; // cam_info[1] = (float)frame_height; // for (int i = 0; i < 9; ++i) // cam_info[i + 2] = cam_K[i]; // for (int i = 0; i < 16; ++i) // cam_info[i + 11] = cam_pose[i]; // float * d_cam_info; // CUDA_CHECKcudaMalloc(&d_cam_info, 27 * sizeof(float))); // CUDA_CHECKcudaMemcpy(d_cam_info, cam_info, 27 * sizeof(float), cudaMemcpyHostToDevice)); // // Copy voxel volume to GPU // float * d_vox_tsdf; // float * d_vox_weight; // CUDA_CHECKcudaMalloc(&d_vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float))); // CUDA_CHECKcudaMalloc(&d_vox_weight, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float))); // CUDA_CHECKcudaMemcpy(d_vox_tsdf, vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyHostToDevice)); // CUDA_CHECKcudaMemcpy(d_vox_weight, vox_weight, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyHostToDevice)); // // Copy voxel volume parameters to GPU // float vox_info[8]; // vox_info[0] = vox_unit; // vox_info[1] = vox_margin; // for (int i = 0; i < 3; ++i) // vox_info[i + 2] = vox_size[i]; // for (int i = 0; i < 3; ++i) // vox_info[i + 5] = vox_origin[i]; // float * d_vox_info; // CUDA_CHECKcudaMalloc(&d_vox_info, 8 * sizeof(float))); // CUDA_CHECKcudaMemcpy(d_vox_info, vox_info, 8 * sizeof(float), cudaMemcpyHostToDevice)); // // Copy depth data to GPU // float * d_depth_data; // CUDA_CHECKcudaMalloc(&d_depth_data, frame_height * frame_width * sizeof(float))); // CUDA_CHECKcudaMemcpy(d_depth_data, depth_data, frame_height * frame_width * sizeof(float), cudaMemcpyHostToDevice)); // // Fuse frame into voxel volume // int CUDA_NUM_BLOCKS = vox_size[2]; // int CUDA_NUM_THREADS = vox_size[1]; // Integrate<<< CUDA_NUM_BLOCKS, CUDA_NUM_THREADS >>>(d_cam_info, d_vox_info, d_depth_data, d_vox_tsdf, d_vox_weight); // CUDA_CHECKcudaGetLastError()); // // Copy voxel volume back to CPU // CUDA_CHECKcudaMemcpy(vox_tsdf, d_vox_tsdf, vox_size[0] * vox_size[1] * vox_size[2] * sizeof(float), cudaMemcpyDeviceToHost)); // SaveVox2Ply("vox.ply", vox_size, vox_tsdf); // return 0; // }
the_stack
namespace surfelwarp { namespace device { __device__ __constant__ float4 reference_node_coordinates[d_max_num_nodes]; /* This kernel do skinning of both vertex and nodes given * node coordinate and vertex coordinate, vertex.w can not be used */ __global__ void skinningVertexNodeBruteForceKernel( const DeviceArrayView<float4> vertex_confid_array, const int node_num, ushort4* vertex_knn_array, float4* vertex_knn_weight, ushort4* node_knn_array, float4* node_knn_weight ) { // Outof bound: for both vertex and node knn are updated by this kernel const int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= vertex_confid_array.Size() + node_num) return; // Load the vertex from memory float4 vertex; if (idx < vertex_confid_array.Size()) { const float4 vertex_confid = vertex_confid_array[idx]; vertex = make_float4(vertex_confid.x, vertex_confid.y, vertex_confid.z, 1.0); } else if (idx >= vertex_confid_array.Size() && idx < vertex_confid_array.Size() + node_num) { const int offset = idx - vertex_confid_array.Size(); const float4 node = reference_node_coordinates[offset]; vertex = make_float4(node.x, node.y, node.z, 1.0); } //Keep priority queue using heap float d0 = 1e6f, d1 = 1e6f, d2 = 1e6f, d3 = 1e6f; unsigned short i0 = 0, i1 = 0, i2 = 0, i3 = 0; //Burte force bruteForceSearch4Padded(vertex, reference_node_coordinates, node_num, d0, d1, d2, d3, i0, i1, i2, i3); //The forth part of vertex might be confidence const float3 v = make_float3(vertex.x, vertex.y, vertex.z); //Compute the knn weight given knn const float4 node0_v4 = reference_node_coordinates[i0]; const float3 node0_v = make_float3(node0_v4.x, node0_v4.y, node0_v4.z); const float vn_dist0 = squared_norm(v - node0_v); const float4 node1_v4 = reference_node_coordinates[i1]; const float3 node1_v = make_float3(node1_v4.x, node1_v4.y, node1_v4.z); const float vn_dist1 = squared_norm(v - node1_v); const float4 node2_v4 = reference_node_coordinates[i2]; const float3 node2_v = make_float3(node2_v4.x, node2_v4.y, node2_v4.z); const float vn_dist2 = squared_norm(v - node2_v); const float4 node3_v4 = reference_node_coordinates[i3]; const float3 node3_v = make_float3(node3_v4.x, node3_v4.y, node3_v4.z); const float vn_dist3 = squared_norm(v - node3_v); // Compute the weight of this node float4 weight; weight.x = __expf(-vn_dist0 / (2 * d_node_radius_square)); weight.y = __expf(-vn_dist1 / (2 * d_node_radius_square)); weight.z = __expf(-vn_dist2 / (2 * d_node_radius_square)); weight.w = __expf(-vn_dist3 / (2 * d_node_radius_square)); #if defined(USE_INTERPOLATE_WEIGHT_NORMALIZATION) //Do a normalization on the weights? const float inv_weight_sum = 1.0f / fabsf_sum(weight); weight.x *= inv_weight_sum; weight.y *= inv_weight_sum; weight.z *= inv_weight_sum; weight.w *= inv_weight_sum; #endif // Store the result to global memory if (idx < vertex_confid_array.Size()) { vertex_knn_array[idx] = make_ushort4(i0, i1, i2, i3); vertex_knn_weight[idx] = weight; } else if (idx >= vertex_confid_array.Size() && idx < vertex_confid_array.Size() + node_num) { const int offset = idx - vertex_confid_array.Size(); node_knn_array[offset] = make_ushort4(i0, i1, i2, i3); node_knn_weight[offset] = weight; } } /* This kernel do skinning of vertex given the node * and vertex coordinate, vertex.w can not be used */ __global__ void skinningVertexBruteForceKernel( const DeviceArrayView<float4> vertices, const int node_num, ushort4* nearest_neighbours, float4* knn_weight ) { //The query points and its nearest neighbour const int vertex_idx = threadIdx.x + blockDim.x * blockIdx.x; if (vertex_idx >= vertices.Size()) return; const float4 vertex = vertices[vertex_idx]; //Keep priority queue using heap float d0 = 1e6f, d1 = 1e6f, d2 = 1e6f, d3 = 1e6f; unsigned short i0 = 0, i1 = 0, i2 = 0, i3 = 0; //Burte force bruteForceSearch4Padded(vertex, reference_node_coordinates, node_num, d0, d1, d2, d3, i0, i1, i2, i3); //The forth part of vertex might be confidence const float3 v = make_float3(vertex.x, vertex.y, vertex.z); //Compute the knn weight given knn const float4& node0_v4 = reference_node_coordinates[i0]; const float3 node0_v = make_float3(node0_v4.x, node0_v4.y, node0_v4.z); const float vn_dist0 = squared_norm(v - node0_v); const float4& node1_v4 = reference_node_coordinates[i1]; const float3 node1_v = make_float3(node1_v4.x, node1_v4.y, node1_v4.z); const float vn_dist1 = squared_norm(v - node1_v); const float4& node2_v4 = reference_node_coordinates[i2]; const float3 node2_v = make_float3(node2_v4.x, node2_v4.y, node2_v4.z); const float vn_dist2 = squared_norm(v - node2_v); const float4& node3_v4 = reference_node_coordinates[i3]; const float3 node3_v = make_float3(node3_v4.x, node3_v4.y, node3_v4.z); const float vn_dist3 = squared_norm(v - node3_v); // Compute the weight of this node float4 weight; weight.x = __expf(-vn_dist0 / (2 * d_node_radius_square)); weight.y = __expf(-vn_dist1 / (2 * d_node_radius_square)); weight.z = __expf(-vn_dist2 / (2 * d_node_radius_square)); weight.w = __expf(-vn_dist3 / (2 * d_node_radius_square)); #if defined(USE_INTERPOLATE_WEIGHT_NORMALIZATION) //Do a normalization on the weights? const float inv_weight_sum = 1.0f / fabsf_sum(weight); weight.x *= inv_weight_sum; weight.y *= inv_weight_sum; weight.z *= inv_weight_sum; weight.w *= inv_weight_sum; #endif //Write the nearest neighbour to storage nearest_neighbours[vertex_idx] = make_ushort4(i0, i1, i2, i3); knn_weight[vertex_idx] = weight; }//End of kernel } // namespace device } // namespace surfelwarp /* The setup and allocation functions, all default */ surfelwarp::KNNBruteForceReferenceNodes::KNNBruteForceReferenceNodes() : m_num_nodes(0) { //Allocate clear memory m_invalid_nodes.create(Constants::kMaxNumNodes); //The other part of the constant memory should be filled with invalid points std::vector<float4> h_invalid_nodes; h_invalid_nodes.resize(Constants::kMaxNumNodes); float* begin = (float*)h_invalid_nodes.data(); float* end = begin + 4 * Constants::kMaxNumNodes; std::fill(begin, end, 1e6f); m_invalid_nodes.upload(h_invalid_nodes); //Clear it at first clearConstantPoints(); } surfelwarp::KNNBruteForceReferenceNodes::Ptr surfelwarp::KNNBruteForceReferenceNodes::Instance() { static KNNBruteForceReferenceNodes::Ptr instance = nullptr; if(instance == nullptr) { instance.reset(new KNNBruteForceReferenceNodes()); } return instance; } surfelwarp::KNNBruteForceReferenceNodes::~KNNBruteForceReferenceNodes() { m_invalid_nodes.release(); } void surfelwarp::KNNBruteForceReferenceNodes::AllocateBuffer(unsigned max_num_points) {} void surfelwarp::KNNBruteForceReferenceNodes::ReleaseBuffer() {} /* Clear the constant node coordinates */ void surfelwarp::KNNBruteForceReferenceNodes::clearConstantPoints(cudaStream_t stream) { cudaSafeCall(cudaMemcpyToSymbolAsync( device::reference_node_coordinates, m_invalid_nodes.ptr(), sizeof(float4) * Constants::kMaxNumNodes, 0, // no offset cudaMemcpyDeviceToDevice, stream )); } /* Build index copy the nodes into const memory */ void surfelwarp::KNNBruteForceReferenceNodes::BuildIndex(const DeviceArrayView<float4>& nodes, cudaStream_t stream) { //If the new nodes is more than previous nodes if(nodes.Size() >= m_num_nodes) { replaceWithMorePoints(nodes, stream); return; } //First clear the buffer clearConstantPoints(stream); //Copy the value to device cudaSafeCall(cudaMemcpyToSymbolAsync( device::reference_node_coordinates, nodes.RawPtr(), nodes.Size() * sizeof(float4), 0, // no offset cudaMemcpyDeviceToDevice, stream )); //Update size m_num_nodes = nodes.Size(); } void surfelwarp::KNNBruteForceReferenceNodes::UpdateIndex( const SynchronizeArray<float4>& nodes, size_t new_nodes_offset, cudaStream_t stream ) { //Copy the value to device const auto node_view = std::move(nodes.DeviceArrayReadOnly()); const auto new_node_size = node_view.Size() - new_nodes_offset; const float4* node_ptr = node_view.RawPtr() + new_nodes_offset; cudaSafeCall(cudaMemcpyToSymbolAsync( device::reference_node_coordinates, node_ptr, new_node_size * sizeof(float4), new_nodes_offset * sizeof(float4), cudaMemcpyDeviceToDevice, stream )); //Update the size m_num_nodes = nodes.DeviceArraySize(); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) SURFELWARP_CHECK_EQ(nodes.DeviceArraySize(), nodes.HostArraySize()); cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::KNNBruteForceReferenceNodes::replaceWithMorePoints(const DeviceArrayView<float4>& nodes, cudaStream_t stream) { SURFELWARP_CHECK_GE(nodes.Size(), m_num_nodes) << "Please use BuildIndex() instead!"; cudaSafeCall(cudaMemcpyToSymbolAsync( device::reference_node_coordinates, nodes.RawPtr(), nodes.Size() * sizeof(float4), 0, // no offset cudaMemcpyDeviceToDevice, stream )); m_num_nodes = nodes.Size(); } void surfelwarp::KNNBruteForceReferenceNodes::Skinning( const DeviceArrayView<float4>& vertex, DeviceArraySlice<ushort4> knn, DeviceArraySlice<float4> knn_weight, cudaStream_t stream ) { dim3 blk(256); dim3 grid(divUp(vertex.Size(), blk.x)); device::skinningVertexBruteForceKernel<<<grid, blk, 0, stream>>>( vertex, m_num_nodes, knn, knn_weight ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::KNNBruteForceReferenceNodes::Skinning( const DeviceArrayView<float4>& vertex, const DeviceArrayView<float4>& node, DeviceArraySlice<ushort4> vertex_knn, DeviceArraySlice<ushort4> node_knn, DeviceArraySlice<float4> vertex_knn_weight, DeviceArraySlice<float4> node_knn_weight, cudaStream_t stream ) { //Check the size SURFELWARP_CHECK_EQ(node.Size(), m_num_nodes); dim3 blk(256); dim3 grid(divUp(vertex.Size() + m_num_nodes, blk.x)); device::skinningVertexNodeBruteForceKernel<<<grid, blk, 0, stream>>>( vertex, m_num_nodes, vertex_knn, vertex_knn_weight, node_knn, node_knn_weight ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif }
the_stack
// // sunnet project // Copyright (C) 2018 by Contributors <https://github.com/Tyill/sunnet> // // This code is licensed under the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include <cuda_runtime.h> #include <cudnn.h> #include "../stdafx.h" #include "snOperatorCUDA/src/Operator/deconvolution.h" using namespace std; using namespace SN_Base; struct gpuParams{ cudnnHandle_t cudnn = 0; cudnnConvolutionDescriptor_t conv_desc = 0; cudnnTensorDescriptor_t in_desc = 0; cudnnTensorDescriptor_t out_desc = 0; cudnnTensorDescriptor_t grin_desc = 0; cudnnTensorDescriptor_t grout_desc = 0; cudnnFilterDescriptor_t w_desc = 0; cudnnFilterDescriptor_t dw_desc = 0; cudnnTensorDescriptor_t bias_desc = 0; cudnnConvolutionFwdAlgo_t algoFwd; cudnnConvolutionBwdDataAlgo_t algoBwdData; cudnnConvolutionBwdFilterAlgo_t algoBwdW; size_t wsFwdSz = 0; size_t wsBwdDataSz = 0; size_t wsBwdWSz = 0; size_t inszMem = 0; void* d_wsFwd = 0; void* d_wsBwdData = 0; void* d_wsBwdW = 0; }; void Deconvolution::iniParamCUDA(bool isLern, const snSize& insz, const snSize& outsz, const deconvParams& prms, void** pGpuPrm){ bool isFirst = false; gpuParams* gpuPrm = (gpuParams*)*pGpuPrm; if (!gpuPrm){ cudaDeviceProp cu_deviceProps; cudaGetDeviceProperties(&cu_deviceProps, 0); if (cu_deviceProps.major < 3){ ERROR_MESS("%s requires SM >= 3.0"); return; } gpuPrm = new gpuParams(); memset(gpuPrm, 0, sizeof(gpuParams)); *pGpuPrm = gpuPrm; cudnnHandle_t cudnn = nullptr; cuCHECK(cudnnCreate(&cudnn)); gpuPrm->cudnn = cudnn; isFirst = true; } // input cudnnTensorDescriptor_t in_desc = nullptr; cuCHECK(cudnnCreateTensorDescriptor(&in_desc)); cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->in_desc)); gpuPrm->in_desc = in_desc; // w cudnnFilterDescriptor_t w_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&w_desc)); cuCHECK(cudnnSetFilter4dDescriptor(w_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->w_desc)); gpuPrm->w_desc = w_desc; // conv cudnnConvolutionDescriptor_t conv_desc = nullptr; cuCHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); cuCHECK(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, int(prms.stride), int(prms.stride), 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (!isFirst) cuCHECK(cudnnDestroyConvolutionDescriptor((cudnnConvolutionDescriptor_t)gpuPrm->conv_desc)); gpuPrm->conv_desc = conv_desc; // output cudnnTensorDescriptor_t out_desc; cuCHECK(cudnnCreateTensorDescriptor(&out_desc)); cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->out_desc)); gpuPrm->out_desc = out_desc; // algorithm cudnnConvolutionBwdDataAlgo_t algoBwdData; cuCHECK(cudnnGetConvolutionBackwardDataAlgorithm(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoBwdData)); gpuPrm->algoBwdData = algoBwdData; // workspace size_t wsBwdDataSz = 0; cuCHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, algoBwdData, &wsBwdDataSz)); gpuPrm->wsBwdDataSz = wsBwdDataSz; size_t wsFwdSz = 0, wsBwdWSz = 0; if (isLern){ // grin cudnnTensorDescriptor_t grin_desc; cuCHECK(cudnnCreateTensorDescriptor(&grin_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grin_desc)); gpuPrm->grin_desc = grin_desc; // grout cudnnTensorDescriptor_t grout_desc; cuCHECK(cudnnCreateTensorDescriptor(&grout_desc)); cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w))); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grout_desc)); gpuPrm->grout_desc = grout_desc; // dw cudnnFilterDescriptor_t dw_desc = nullptr; cuCHECK(cudnnCreateFilterDescriptor(&dw_desc)); cuCHECK(cudnnSetFilter4dDescriptor(dw_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth))); if (!isFirst) cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->dw_desc)); gpuPrm->dw_desc = dw_desc; // bias cudnnTensorDescriptor_t bias_desc; cuCHECK(cudnnCreateTensorDescriptor(&bias_desc)); cuCHECK(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, int(insz.d), 1, 1)); if (!isFirst) cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->bias_desc)); gpuPrm->bias_desc = bias_desc; // algorithm cudnnConvolutionFwdAlgo_t algoFwd; cuCHECK(cudnnGetConvolutionForwardAlgorithm(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algoFwd)); gpuPrm->algoFwd = algoFwd; cudnnConvolutionBwdFilterAlgo_t algoBwdW; cuCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoBwdW)); gpuPrm->algoBwdW = algoBwdW; // workspace cuCHECK(cudnnGetConvolutionForwardWorkspaceSize(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, algoFwd, &wsFwdSz)); gpuPrm->wsFwdSz = wsFwdSz; cuCHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, algoBwdW, &wsBwdWSz)); gpuPrm->wsBwdWSz = wsBwdWSz; } if (isFirst){ cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } } else if (gpuPrm->inszMem < insz.size()){ cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0; cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz)); if (isLern){ cuCHECK(cudaFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0; cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0; cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz)); cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz)); } gpuPrm->inszMem = insz.size(); } } void Deconvolution::freeParamCUDA(void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; if (!gpuPrm) return; cuCHECK(cudnnDestroy(gpuPrm->cudnn)); cuCHECK(cudnnDestroyConvolutionDescriptor(gpuPrm->conv_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->w_desc)); cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); if (gpuPrm->grin_desc){ // isLern cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc)); cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->dw_desc)); cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->bias_desc)); cuCHECK(cudaFree(gpuPrm->d_wsFwd)); cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); } } void Deconvolution::forwardCUDA(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionBackwardData(gpuPrm->cudnn, &alpha, gpuPrm->w_desc, weight, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdData, gpuPrm->d_wsBwdData, gpuPrm->wsBwdDataSz, &beta, gpuPrm->out_desc, output)); } __global__ void cuBwdBias(snSize insz, const snFloat* bias, snFloat* grout){ size_t isz = insz.w * insz.h; snFloat* pGrOut = grout + isz * blockIdx.x + isz * insz.d * blockIdx.y; snFloat b = bias[blockIdx.x]; unsigned int i = threadIdx.x; while (i < isz){ pGrOut[i] += b; i += blockDim.x; } } void Deconvolution::backwardCUDA_GW(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snFloat* input, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); cuCHECK(cudnnConvolutionBackwardFilter(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->in_desc, input, gpuPrm->conv_desc, gpuPrm->algoBwdW, gpuPrm->d_wsBwdW, gpuPrm->wsBwdWSz, &beta, gpuPrm->dw_desc, dWeightOut)); cuCHECK(cudnnConvolutionBackwardBias(gpuPrm->cudnn, &alpha, gpuPrm->in_desc, input, &beta, gpuPrm->bias_desc, dWeightOut + wStepByN)); // +bias dim3 dimBlock(128); dim3 dimGrid(int(insz.d), int(insz.n)); cuBwdBias << < dimGrid, dimBlock >> > (insz, weight + wStepByN, gradOut); } void Deconvolution::backwardCUDA_G(const deconvParams& prms, const snFloat* weight, const snSize& insz, const snSize& outsz, const snFloat* gradIn, snFloat* gradOut, void* gpuPrms){ gpuParams* gpuPrm = (gpuParams*)gpuPrms; size_t wStepByN = prms.fWidth * prms.fHeight * insz.d * outsz.d; // run snFloat alpha = 1.f, beta = 0.f; cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn, &alpha, gpuPrm->grin_desc, gradIn, gpuPrm->w_desc, weight, gpuPrm->conv_desc, gpuPrm->algoFwd, gpuPrm->d_wsFwd, gpuPrm->wsFwdSz, &beta, gpuPrm->grout_desc, gradOut)); // +bias cuBwdBias << < int(insz.n), 128 >> > (insz, weight + wStepByN, gradOut); }
the_stack
extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) /*** Forward ***/ __device__ float bilinear_interpolate(const float* bottom_data, const int height, const int width, float y, float x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. -ly, hx = 1. - lx; // do bilinear interpolation float v1 = bottom_data[y_low * width + x_low]; float v2 = bottom_data[y_low * width + x_high]; float v3 = bottom_data[y_high * width + x_low]; float v4 = bottom_data[y_high * width + x_high]; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; const float* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 float output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } int ROIAlignForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* top_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_rois, top_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } /*** Backward ***/ inline __device__ float gpu_atomic_add(const float val, float* address); inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } __device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x, float& w1, float& w2, float& w3, float& w4, int& x_low, int& x_high, int& y_low, int& y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } __global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; const float* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical float roi_start_w = offset_bottom_rois[1] * spatial_scale; float roi_start_h = offset_bottom_rois[2] * spatial_scale; float roi_end_w = offset_bottom_rois[3] * spatial_scale; float roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f); float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f); float bin_size_h = roi_height / aligned_height; float bin_size_w = roi_width / aligned_width; float* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * aligned_height * aligned_width; const float* offset_top_diff = top_diff + top_offset; const float top_diff_this_bin = offset_top_diff[ph * aligned_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / aligned_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width); // We do average (integral) pooling inside a bin const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const float y = roi_start_h + ph * bin_size_h + (iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const float x = roi_start_w + pw * bin_size_w + (ix + .5f) * bin_size_w / roi_bin_grid_w; float w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); float g1 = top_diff_this_bin * w1 / count; float g2 = top_diff_this_bin * w2 / count; float g3 = top_diff_this_bin * w3 / count; float g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { // atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); // atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); // atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); // atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); gpu_atomic_add(g1, offset_bottom_diff + y_low * width + x_low); gpu_atomic_add(g2, offset_bottom_diff + y_low * width + x_high); gpu_atomic_add(g3, offset_bottom_diff + y_high * width + x_low); gpu_atomic_add(g4, offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio, const float* bottom_rois, float* bottom_diff, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, sampling_ratio, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
the_stack
The example demenstrates how to reduce one of the operands of the GEMM along the k-dimension when computing GEMM. So the output also contains either a Mx1 or 1XN vector. It only works with Ampere HMMA 16x8x16 FP16 tensor cores, though it is not difficult to apply to other Turing/Ampere tensor core instructions. Most of the reduction is done in gemm/warp level, see gemm/warp/mma_with_reduction_tensor_op.h A few bit of reduction is done in the epilouge before storing the vector, see epilogue/threadblock/epilogue_gemm_k_reduction.h */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = cutlass::half_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // Layout of the output vector using LayoutGemmKReduction = cutlass::layout::PitchLinear; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 4; // Reduce A or B operand along the K dimension constexpr bool ReduceKForA = true; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmWithKReduction< ElementInputA, LayoutInputA, cutlass::ComplexTransform::kNone, 8, ElementInputB, LayoutInputB, cutlass::ComplexTransform::kNone, 8, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, ReduceKForA, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd >::GemmKernel; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; // Below is the reduction kernel used in the case of parallel spiit-k using ReduceGemmSplitKShape = cutlass::MatrixShape<4, 64>;; using ReduceOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, ElementOutput, EpilogueOp::kCount >; using ReduceGemmSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceGemmSplitKShape, EpilogueOp, ReduceOp >; using ReduceGemmSplitK = cutlass::reduction::device::ReduceSplitK<ReduceGemmSplitKKernel>; using ReduceVectorSplitKShape = cutlass::MatrixShape<1, 256>;; // This code section describes the epilogue part of the kernel, we use default value using DummyEpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, cutlass::epilogue::thread::ScaleType::Nothing>; using ReduceVectorSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceVectorSplitKShape, DummyEpilogueOp, ReduceOp >; using ReduceVectorSplitK = cutlass::reduction::device::ReduceSplitK<ReduceVectorSplitKKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int split_k_slices; bool parallel_split_k; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), problem_size(1024, 1024, 1024), split_k_slices(1), parallel_split_k(false), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(-1), beta(-1), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } /// Updates input and filter sizes void update( cutlass::gemm::GemmCoord problem_size, int split_k_slices, bool parallel_split_k) { this->problem_size = problem_size; this->split_k_slices = split_k_slices; this->parallel_split_k = parallel_split_k; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("parallel-split-k")) { parallel_split_k = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "28_ampere_gemm_bias_fusion example\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m <int> GEMM M\n" << " --n <int> GEMM N\n" << " --k <int> GEMM K\n" << " --split-k-slices <int> Split K Slices\n" << " --alpha <float> Epilogue scalar alpha\n" << " --beta <float> Epilogue scalar beta\n\n" << " --parallel-split-k If set (true), use parallel split K\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several problem sizes.\n" << " --iterations <int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag <string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/28_ampere_gemm_bias_fusion_example/ampere_gemm_bias_fusion --m=1024 --n=1024 --k=1024 \n\n"; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "ID,M,N,K,SplitK-Slices,Parallel-SplitK,Runtime"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "gemm_" << idx << "," << options.problem_size.m() << "," << options.problem_size.n() << "," << options.problem_size.k() << "," << options.split_k_slices << "," << options.parallel_split_k << "," << runtime_ms ; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile(Options const &options) { Result result; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.problem_size.mk()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.problem_size.kn()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.problem_size.mn()); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.problem_size.mn()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.problem_size.mn()); int reduce_vector_length = ReduceKForA ? options.problem_size.m() : options.problem_size.n(); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_reduction({reduce_vector_length, 1}); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_ref_reduction({reduce_vector_length, 1}); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_reduction.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_reduction.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); tensor_reduction.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta); cutlass::gemm::GemmUniversalMode mode = options.parallel_split_k ? cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel : cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = options.split_k_slices; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ mode, options.problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to tensor A on device tensor_b.device_ref().data(), // <- reference to tensor B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix C on device tensor_reduction.device_ref().data(), // <- reference to tensor B on device options.problem_size.m() * options.problem_size.k(), options.problem_size.n() * options.problem_size.k(), options.problem_size.m() * options.problem_size.n(), options.problem_size.m() * options.problem_size.n(), reduce_vector_length, tensor_a.layout().stride(0), tensor_b.layout().stride(0), tensor_c.layout().stride(0), tensor_d.layout().stride(0), tensor_reduction.layout().stride(0) }; // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not result.status = gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); // Initialize CUTLASS kernel with arguments and workspace pointer result.status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // Launch initialized CUTLASS kernel result.status = gemm_op(); CUTLASS_CHECK(result.status); if (options.parallel_split_k && batch_count > 1) { // reduce gemm int splitk_gemm_stride = options.problem_size.m(); cutlass::layout::RowMajor splitk_gemm_layout(splitk_gemm_stride); void * workspace_gemm_ptr = workspace.get(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_gemm_tensorref(static_cast<ElementOutput *>(workspace_gemm_ptr), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_d_tensorref(tensor_d.device_ref().data(), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_c_tensorref(tensor_c.device_ref().data(), splitk_gemm_layout); typename ReduceGemmSplitK::Arguments reduce_gemm_splitk_arguments{ cutlass::MatrixCoord(options.problem_size.n(), options.problem_size.m()), batch_count, size_t(options.problem_size.m() * options.problem_size.n()), workspace_gemm_tensorref, tensor_d_tensorref, tensor_c_tensorref, {alpha, beta} }; ReduceGemmSplitK reduce_gemm_splitk_op; result.status = reduce_gemm_splitk_op.initialize(reduce_gemm_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_gemm_splitk_op(); CUTLASS_CHECK(result.status); // reduce k vector cutlass::layout::RowMajor splitk_vector_layout(reduce_vector_length); ElementOutput *workspace_vector_ptr = static_cast<ElementOutput *>(workspace_gemm_ptr) + batch_count * options.problem_size.m() * options.problem_size.n(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_vector_tensorref(workspace_vector_ptr, splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_reduction_tensorref(tensor_reduction.device_ref().data(), splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_nullptr_tensorref(nullptr, splitk_vector_layout); typename ReduceVectorSplitK::Arguments reduce_vector_splitk_arguments{ cutlass::MatrixCoord(1, reduce_vector_length), batch_count, size_t(reduce_vector_length), workspace_vector_tensorref, tensor_reduction_tensorref, tensor_nullptr_tensorref, {1.0f, 0.0f} }; ReduceVectorSplitK reduce_vector_splitk_op; result.status = reduce_vector_splitk_op.initialize(reduce_vector_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_vector_splitk_op(); CUTLASS_CHECK(result.status); } // // Create instantiation for device reference conv kernel // if (options.reference_check) { // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( options.problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); tensor_reduction.sync_host(); // Compute bias + relu in host code if (ReduceKForA) { for (int m = 0; m < options.problem_size.m(); ++m) { for (int k = 0; k < options.problem_size.k(); ++k) { tensor_ref_reduction.at({m, 0}) += tensor_a.at(cutlass::MatrixCoord(m, k)); } } } else { for (int k = 0; k < options.problem_size.k(); ++k) { for (int n = 0; n < options.problem_size.n(); ++n) { tensor_ref_reduction.at({n, 0}) += tensor_b.at(cutlass::MatrixCoord(k, n)); } } } // Check if output from CUTLASS kernel and reference kernel are equal or not bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); pass &= cutlass::reference::host::TensorEquals(tensor_ref_reduction.host_view(), tensor_reduction.host_view()); if (!pass) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "23_ampere_gemm_operand_reduction_fusion" << options.problem_size.m() << "x" << options.problem_size.n() << "x" << options.problem_size.k() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "A = \n" << tensor_a.host_view() << "\n\n" << "B = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference D = \n" << tensor_ref_d.host_view() << "\n\n"; output_workspace << "Reference reduction vector= \n" << tensor_ref_reduction.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl; output_workspace << "Computed reduction vector = \n" << tensor_reduction.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers struct Benchmark { int m, n, k, split_k_slices, parallel_split_k; } problem_sizes[] = { {4096, 6144, 4096, 1, false}, }; Result::print_header(std::cout, options) << "\n"; int idx = 1; for (auto const &problem_size : problem_sizes) { options.update({problem_size.m, problem_size.n, problem_size.k}, problem_size.split_k_slices, problem_size.parallel_split_k); Result result = profile(options); result.print(std::cout, idx, options) << "\n"; ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << "\n"; return -1; } Result result = profile(options); Result::print_header(std::cout, options) << "\n"; result.print(std::cout, 1, options) << "\n"; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
#define WARP_REDUCE_32BIT(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r1|p, %0, %1, %2, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t" \ "}" \ : "+"#ASM_CL(value) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_64BIT(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg .u32 lo;\n\t\t" \ ".reg .u32 hi;\n\t\t" \ ".reg ."#ASM_T" r1;\n\t\t" \ ".reg .pred p;\n\t\t" \ "mov.b64 {lo, hi}, %0;\n\t\t" \ "shfl.sync.down.b32 lo|p, lo, %1, %2, %3;\n\t\t" \ "shfl.sync.down.b32 hi|p, hi, %1, %2, %3;\n\t\t" \ "mov.b64 r1, {lo, hi};\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r1, %0;\n\t" \ "}" \ : "+"#ASM_CL(value) : "r"(1 << STEP), \ "r"(MIN_MAX_LANE), "r"(member_mask)); \ } //============================================================================== #define WARP_REDUCE_GEN2(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %2, %3, %4;\n\t\t" \ "shfl.sync.down.b32 r1|p, %1, %2, %3, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN3(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %3, %4, %5;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %3, %4, %5;\n\t\t" \ "shfl.sync.down.b32 r2|p, %2, %3, %4, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN4(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %4, %5, %6;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %4, %5, %6;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %4, %5, %6;\n\t\t" \ "shfl.sync.down.b32 r3|p, %3, %4, %5, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN5(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %5, %6, %7;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %5, %6, %7;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %5, %6, %7;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %5, %6, %7;\n\t\t" \ "shfl.sync.down.b32 r4|p, %4, %5, %6, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN6(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %6, %7, %8;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %6, %7, %8;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %6, %7, %8;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %6, %7, %8;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %6, %7, %8;\n\t\t" \ "shfl.sync.down.b32 r5|p, %5, %6, %7, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN7(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %7, %8, %9;\n\t\t" \ "shfl.sync.down.b32 r6|p, %6, %7, %8, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN8(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %8, %9, %10;\n\t\t" \ "shfl.sync.down.b32 r7|p, %7, %8, %9, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN9(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %9, %10, %11;\n\t\t" \ "shfl.sync.down.b32 r8|p, %8, %9, %10, %11;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN10(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %10, %11, %12;\n\t\t" \ "shfl.sync.down.b32 r9|p, %9, %10, %11, %12;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN11(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10;\n\t\t"\ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %11, %12, %13;\n\t\t" \ "shfl.sync.down.b32 r10|p, %10, %11, %12, %13;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN12(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;" \ "\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r10, %10, %12, %13, %14;\n\t\t" \ "shfl.sync.down.b32 r11|p, %11, %12, %13, %14;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %11, r11, %11;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]), "+"#ASM_CL(value[11]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN13(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11," \ "r12;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r10, %10, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r11, %11, %13, %14, %15;\n\t\t" \ "shfl.sync.down.b32 r12|p, %12, %13, %14, %15;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %11, r11, %11;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %12, r12, %12;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]), "+"#ASM_CL(value[11]), \ "+"#ASM_CL(value[12]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN14(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11," \ "r12, r13;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r10, %10, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r11, %11, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r12, %12, %14, %15, %16;\n\t\t" \ "shfl.sync.down.b32 r13|p, %13, %14, %15, %16;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %11, r11, %11;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %12, r12, %12;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %13, r13, %13;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]), "+"#ASM_CL(value[11]), \ "+"#ASM_CL(value[12]), "+"#ASM_CL(value[13]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN15(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11," \ "r12, r13, r14;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r10, %10, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r11, %11, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r12, %12, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r13, %13, %15, %16, %17;\n\t\t" \ "shfl.sync.down.b32 r14|p, %14, %15, %16, %17;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %11, r11, %11;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %12, r12, %12;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %13, r13, %13;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %14, r14, %14;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]), "+"#ASM_CL(value[11]), \ "+"#ASM_CL(value[12]), "+"#ASM_CL(value[13]), \ "+"#ASM_CL(value[14]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ } #define WARP_REDUCE_GEN16(ASM_OP, ASM_T, ASM_CL, MIN_MAX_LANE) \ _Pragma("unroll") \ for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) { \ asm( \ "{\n\t\t" \ ".reg ."#ASM_T" r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11," \ "r12, r13, r14, r15;\n\t\t" \ ".reg .pred p;\n\t\t" \ "shfl.sync.down.b32 r0, %0, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r1, %1, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r2, %2, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r3, %3, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r4, %4, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r5, %5, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r6, %6, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r7, %7, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r8, %8, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r9, %9, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r10, %10, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r11, %11, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r12, %12, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r13, %13, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r14, %14, %16, %17, %18;\n\t\t" \ "shfl.sync.down.b32 r15|p, %15, %16, %17, %18;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %0, r0, %0;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %1, r1, %1;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %2, r2, %2;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %3, r3, %3;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %4, r4, %4;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %5, r5, %5;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %6, r6, %6;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %7, r7, %7;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %8, r8, %8;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %9, r9, %9;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %10, r10, %10;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %11, r11, %11;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %12, r12, %12;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %13, r13, %13;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %14, r14, %14;\n\t\t" \ "@p "#ASM_OP"."#ASM_T" %15, r15, %15;\n\t" \ "}" \ : "+"#ASM_CL(value[0]), "+"#ASM_CL(value[1]), \ "+"#ASM_CL(value[2]), "+"#ASM_CL(value[3]), \ "+"#ASM_CL(value[4]), "+"#ASM_CL(value[5]), \ "+"#ASM_CL(value[6]), "+"#ASM_CL(value[7]), \ "+"#ASM_CL(value[8]), "+"#ASM_CL(value[9]), \ "+"#ASM_CL(value[10]), "+"#ASM_CL(value[11]), \ "+"#ASM_CL(value[12]), "+"#ASM_CL(value[13]), \ "+"#ASM_CL(value[14]), "+"#ASM_CL(value[15]) \ : "r"(1 << STEP), "r"(MIN_MAX_LANE), "r"(member_mask)); \ }
the_stack
#include "base/memory.h" #include "core/optimizer.h" #include "util/gpu.cuh" namespace graphvite { namespace gpu { namespace graph { /** * @brief Train node embedding with 0-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train(Memory<Vector, Index> vertex_embeddings, Memory<Vector, Index> context_embeddings, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float negative_weight) { static const size_t dim = Vector::dim; typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int batch_size = batch.count / 2; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; __shared__ Vector buffer[kThreadPerBlock / kWarpSize]; Vector &vertex_buffer = buffer[threadIdx.x / kWarpSize]; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {tail, head} Index head_id = batch[sample_id * 2 + 1]; Vector &vertex = vertex_embeddings[head_id]; vertex_buffer = vertex; Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index tail_id; int label; if (s < num_negative) { tail_id = negative_batch[sample_id * num_negative + s]; label = 0; } else { tail_id = batch[sample_id * 2]; label = 1; } Vector &context = context_embeddings[tail_id]; // Forward Float logit; model.forward(vertex_buffer, context, logit); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; weight = negative_weight; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(vertex_buffer, context, gradient, optimizer, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / (1 + num_negative * negative_weight); vertex = vertex_buffer; } } /** * @brief Train node embedding with 1-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train_1_moment(Memory <Vector, Index> vertex_embeddings, Memory <Vector, Index> context_embeddings, Memory<Vector, Index> vertex_moment1s, Memory<Vector, Index> context_moment1s, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float negative_weight) { static const size_t dim = Vector::dim; typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int batch_size = batch.count / 2; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; __shared__ Vector buffer[kThreadPerBlock / kWarpSize]; Vector &vertex_buffer = buffer[threadIdx.x / kWarpSize]; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {tail, head} Index head_id = batch[sample_id * 2 + 1]; Vector &vertex = vertex_embeddings[head_id]; Vector &vertex_moment1 = vertex_moment1s[head_id]; vertex_buffer = vertex; Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index tail_id; int label; if (s < num_negative) { tail_id = negative_batch[sample_id * num_negative + s]; label = 0; } else { tail_id = batch[sample_id * 2]; label = 1; } Vector &context = context_embeddings[tail_id]; Vector &context_moment1 = context_moment1s[tail_id]; // Forward Float logit; model.forward(vertex_buffer, context, logit); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; weight = negative_weight; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(vertex_buffer, context, vertex_moment1, context_moment1, gradient, optimizer, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / (1 + num_negative * negative_weight); vertex = vertex_buffer; } } /** * @brief Train node embedding with 2-moment optimizers * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model * @tparam optimizer_type type of optimizer */ template<class Vector, class Index, template<class> class Model, OptimizerType optimizer_type> __global__ void train_2_moment(Memory<Vector, Index> vertex_embeddings, Memory <Vector, Index> context_embeddings, Memory<Vector, Index> vertex_moment1s, Memory<Vector, Index> context_moment1s, Memory<Vector, Index> vertex_moment2s, Memory<Vector, Index> context_moment2s, Memory<Index, int> batch, Memory<Index, int> negative_batch, Memory<typename Vector::Float, int> loss, Optimizer optimizer, float negative_weight) { static const size_t dim = Vector::dim; typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int batch_size = batch.count / 2; const int num_negative = negative_batch.count / batch_size; Model<Vector> model; __shared__ Vector buffer[kThreadPerBlock / kWarpSize]; Vector &vertex_buffer = buffer[threadIdx.x / kWarpSize]; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {tail, head} Index head_id = batch[sample_id * 2 + 1]; Vector &vertex = vertex_embeddings[head_id]; Vector &vertex_moment1 = vertex_moment1s[head_id]; Vector &vertex_moment2 = vertex_moment2s[head_id]; vertex_buffer = vertex; Float sample_loss = 0; for (int s = 0; s <= num_negative; s++) { Index tail_id; int label; if (s < num_negative) { tail_id = negative_batch[sample_id * num_negative + s]; label = 0; } else { tail_id = batch[sample_id * 2]; label = 1; } Vector &context = context_embeddings[tail_id]; Vector &context_moment1 = context_moment1s[tail_id]; Vector &context_moment2 = context_moment2s[tail_id]; // Forward Float logit; model.forward(vertex_buffer, context, logit); Float prob = sigmoid(logit); // Backward Float gradient, weight; if (label) { gradient = prob - 1; weight = 1; sample_loss += weight * -log(prob + kEpsilon); } else { gradient = prob; weight = negative_weight; sample_loss += weight * -log(1 - prob + kEpsilon); } model.backward<optimizer_type>(vertex_buffer, context, vertex_moment1, context_moment1, vertex_moment2, context_moment2, gradient, optimizer, weight); } if (lane_id == 0) loss[sample_id] = sample_loss / (1 + num_negative * negative_weight); vertex = vertex_buffer; } } /** * @brief Predict logits for batch samples * @tparam Vector vector type of embeddings * @tparam Index integral type of indexes * @tparam Model embedding model */ template<class Vector, class Index, template<class> class Model> __global__ void predict(Memory<Vector, Index> vertex_embeddings, Memory<Vector, Index> context_embeddings, Memory<Index, int> batch, Memory<typename Vector::Float, int> logits) { static const size_t dim = Vector::dim; typedef typename Vector::Float Float; const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int lane_id = thread_id % kWarpSize; const int num_thread = gridDim.x * blockDim.x; const int batch_size = batch.count / 2; Model<Vector> model; __shared__ Vector buffer[kThreadPerBlock / kWarpSize]; Vector &vertex_buffer = buffer[threadIdx.x / kWarpSize]; for (int sample_id = thread_id / kWarpSize; sample_id < batch_size; sample_id += num_thread / kWarpSize) { // elements in std::tuple are stored in reverse order // each positive sample is {tail, head} Index head_id = batch[sample_id * 2 + 1]; Index tail_id = batch[sample_id * 2]; Vector &vertex = vertex_embeddings[head_id]; Vector &context = context_embeddings[tail_id]; Float logit; model.forward(vertex, context, logit); if (lane_id == 0) logits[sample_id] = logit; } } } // namespace graph } // namespace gpu } // namespace graphvite
the_stack