text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
extern "C" { #include "sph/sph_blake.h" #include "sph/sph_bmw.h" #include "sph/sph_groestl.h" #include "sph/sph_skein.h" #include "sph/sph_jh.h" #include "sph/sph_keccak.h" #include "sph/sph_luffa.h" #include "sph/sph_cubehash.h" #include "sph/sph_shavite.h" #include "sph/sph_simd.h" #include "sph/sph_echo.h" #include "sph/sph_hamsi.h" #include "sph/sph_fugue.h" #include "sph/sph_shabal.h" #include "sph/sph_whirlpool.h" #include "sph/sph_sha2.h" } #include "miner.h" #include "cuda_helper.h" #include "cuda_x16r.h" static uint32_t *d_hash[MAX_GPUS]; enum Algo { BLAKE = 0, BMW, GROESTL, JH, KECCAK, SKEIN, LUFFA, CUBEHASH, SHAVITE, SIMD, ECHO, HAMSI, FUGUE, SHABAL, WHIRLPOOL, SHA512, HASH_FUNC_COUNT }; static const char* algo_strings[] = { "blake", "bmw512", "groestl", "jh512", "keccak", "skein", "luffa", "cube", "shavite", "simd", "echo", "hamsi", "fugue", "shabal", "whirlpool", "sha512", NULL }; static __thread uint32_t s_ntime = UINT32_MAX; static __thread bool s_implemented = false; static __thread char hashOrder[HASH_FUNC_COUNT + 1] = { 0 }; static void getAlgoString(const uint32_t* prevblock, char *output) { uint8_t* data = (uint8_t*)prevblock; strcpy(output, "0123456789ABCDEF"); for(int i = 0; i < 16; i++){ uint8_t b = (15 - i) >> 1; // 16 ascii hex chars, reversed uint8_t algoDigit = (i & 1) ? data[b] & 0xF : data[b] >> 4; int offset = algoDigit; // insert the nth character at the front char oldVal = output[offset]; for(int j=offset; j-->0;){ output[j+1] = output[j]; } output[0] = oldVal; } } // X16R CPU Hash (Validation) extern "C" void x16s_hash(void *output, const void *input) { unsigned char _ALIGN(64) hash[128]; sph_blake512_context ctx_blake; sph_bmw512_context ctx_bmw; sph_groestl512_context ctx_groestl; sph_jh512_context ctx_jh; sph_keccak512_context ctx_keccak; sph_skein512_context ctx_skein; sph_luffa512_context ctx_luffa; sph_cubehash512_context ctx_cubehash; sph_shavite512_context ctx_shavite; sph_simd512_context ctx_simd; sph_echo512_context ctx_echo; sph_hamsi512_context ctx_hamsi; sph_fugue512_context ctx_fugue; sph_shabal512_context ctx_shabal; sph_whirlpool_context ctx_whirlpool; sph_sha512_context ctx_sha512; void *in = (void*) input; int size = 80; uint32_t *in32 = (uint32_t*) input; getAlgoString(&in32[1], hashOrder); for (int i = 0; i < 16; i++) { const char elem = hashOrder[i]; const uint8_t algo = elem >= 'A' ? elem - 'A' + 10 : elem - '0'; switch (algo) { case BLAKE: sph_blake512_init(&ctx_blake); sph_blake512(&ctx_blake, in, size); sph_blake512_close(&ctx_blake, hash); break; case BMW: sph_bmw512_init(&ctx_bmw); sph_bmw512(&ctx_bmw, in, size); sph_bmw512_close(&ctx_bmw, hash); break; case GROESTL: sph_groestl512_init(&ctx_groestl); sph_groestl512(&ctx_groestl, in, size); sph_groestl512_close(&ctx_groestl, hash); break; case SKEIN: sph_skein512_init(&ctx_skein); sph_skein512(&ctx_skein, in, size); sph_skein512_close(&ctx_skein, hash); break; case JH: sph_jh512_init(&ctx_jh); sph_jh512(&ctx_jh, in, size); sph_jh512_close(&ctx_jh, hash); break; case KECCAK: sph_keccak512_init(&ctx_keccak); sph_keccak512(&ctx_keccak, in, size); sph_keccak512_close(&ctx_keccak, hash); break; case LUFFA: sph_luffa512_init(&ctx_luffa); sph_luffa512(&ctx_luffa, in, size); sph_luffa512_close(&ctx_luffa, hash); break; case CUBEHASH: sph_cubehash512_init(&ctx_cubehash); sph_cubehash512(&ctx_cubehash, in, size); sph_cubehash512_close(&ctx_cubehash, hash); break; case SHAVITE: sph_shavite512_init(&ctx_shavite); sph_shavite512(&ctx_shavite, in, size); sph_shavite512_close(&ctx_shavite, hash); break; case SIMD: sph_simd512_init(&ctx_simd); sph_simd512(&ctx_simd, in, size); sph_simd512_close(&ctx_simd, hash); break; case ECHO: sph_echo512_init(&ctx_echo); sph_echo512(&ctx_echo, in, size); sph_echo512_close(&ctx_echo, hash); break; case HAMSI: sph_hamsi512_init(&ctx_hamsi); sph_hamsi512(&ctx_hamsi, in, size); sph_hamsi512_close(&ctx_hamsi, hash); break; case FUGUE: sph_fugue512_init(&ctx_fugue); sph_fugue512(&ctx_fugue, in, size); sph_fugue512_close(&ctx_fugue, hash); break; case SHABAL: sph_shabal512_init(&ctx_shabal); sph_shabal512(&ctx_shabal, in, size); sph_shabal512_close(&ctx_shabal, hash); break; case WHIRLPOOL: sph_whirlpool_init(&ctx_whirlpool); sph_whirlpool(&ctx_whirlpool, in, size); sph_whirlpool_close(&ctx_whirlpool, hash); break; case SHA512: sph_sha512_init(&ctx_sha512); sph_sha512(&ctx_sha512,(const void*) in, size); sph_sha512_close(&ctx_sha512,(void*) hash); break; } in = (void*) hash; size = 64; } memcpy(output, hash, 32); } static bool init[MAX_GPUS] = { 0 }; //#define _DEBUG #define _DEBUG_PREFIX "x16s-" #include "cuda_debug.cuh" //static int algo80_tests[HASH_FUNC_COUNT] = { 0 }; //static int algo64_tests[HASH_FUNC_COUNT] = { 0 }; static int algo80_fails[HASH_FUNC_COUNT] = { 0 }; extern "C" int scanhash_x16s(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done) { uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; const int dev_id = device_map[thr_id]; int intensity = (device_sm[dev_id] > 500 && !is_windows()) ? 20 : 19; if (strstr(device_name[dev_id], "GTX 1080")) intensity = 20; uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); //if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); if (!init[thr_id]) { cudaSetDevice(device_map[thr_id]); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); } gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); quark_blake512_cpu_init(thr_id, throughput); quark_bmw512_cpu_init(thr_id, throughput); quark_groestl512_cpu_init(thr_id, throughput); quark_skein512_cpu_init(thr_id, throughput); quark_jh512_cpu_init(thr_id, throughput); x11_shavite512_cpu_init(thr_id, throughput); x11_simd512_cpu_init(thr_id, throughput); // 64 x16_echo512_cuda_init(thr_id, throughput); x16_fugue512_cpu_init(thr_id, throughput); x15_whirlpool_cpu_init(thr_id, throughput, 0); x16_whirlpool512_init(thr_id, throughput); x11_luffa512_cpu_init(thr_id, throughput); // 64 x16_echo512_cuda_init(thr_id, throughput); x13_fugue512_cpu_init(thr_id, throughput); x16_fugue512_cpu_init(thr_id, throughput); x14_shabal512_cpu_init(thr_id, throughput); CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput), 0); cuda_check_cpu_init(thr_id, throughput); init[thr_id] = true; } if (opt_benchmark) { ((uint32_t*)ptarget)[7] = 0x003f; ((uint32_t*)pdata)[1] = 0xDDDDDDDD; ((uint32_t*)pdata)[2] = 0xDDDDDDDD; //((uint8_t*)pdata)[8] = 0x90; // hashOrder[0] = '9'; for simd 80 + blake512 64 //((uint8_t*)pdata)[8] = 0xA0; // hashOrder[0] = 'A'; for echo 80 + blake512 64 //((uint8_t*)pdata)[8] = 0xB0; // hashOrder[0] = 'B'; for hamsi 80 + blake512 64 //((uint8_t*)pdata)[8] = 0xC0; // hashOrder[0] = 'C'; for fugue 80 + blake512 64 //((uint8_t*)pdata)[8] = 0xE0; // hashOrder[0] = 'E'; for whirlpool 80 + blake512 64 } uint32_t _ALIGN(64) endiandata[20]; for (int k=0; k < 19; k++) be32enc(&endiandata[k], pdata[k]); uint32_t ntime = swab32(pdata[17]); if (s_ntime != ntime) { getAlgoString(&endiandata[1], hashOrder); s_ntime = ntime; s_implemented = true; if (!thr_id) applog(LOG_INFO, "hash order %s (%08x)", hashOrder, ntime); } if (!s_implemented) { sleep(1); return -1; } cuda_check_cpu_setTarget(ptarget); char elem = hashOrder[0]; const uint8_t algo80 = elem >= 'A' ? elem - 'A' + 10 : elem - '0'; switch (algo80) { case BLAKE: quark_blake512_cpu_setBlock_80(thr_id, endiandata); break; case BMW: quark_bmw512_cpu_setBlock_80(endiandata); break; case GROESTL: groestl512_setBlock_80(thr_id, endiandata); break; case JH: jh512_setBlock_80(thr_id, endiandata); break; case KECCAK: keccak512_setBlock_80(thr_id, endiandata); break; case SKEIN: skein512_cpu_setBlock_80((void*)endiandata); break; case LUFFA: qubit_luffa512_cpu_setBlock_80_alexis((void*)endiandata); break; case CUBEHASH: cubehash512_setBlock_80(thr_id, endiandata); break; case SHAVITE: x11_shavite512_setBlock_80((void*)endiandata); break; case SIMD: x16_simd512_setBlock_80((void*)endiandata); break; case ECHO: x16_echo512_setBlock_80((void*)endiandata); break; case HAMSI: x16_hamsi512_setBlock_80((void*)endiandata); break; case FUGUE: x16_fugue512_setBlock_80((void*)pdata); break; case SHABAL: x16_shabal512_setBlock_80((void*)endiandata); break; case WHIRLPOOL: x16_whirlpool512_setBlock_80((void*)endiandata); break; case SHA512: x16_sha512_setBlock_80(endiandata); break; default: { if (!thr_id) applog(LOG_WARNING, "kernel %s %c unimplemented, order %s", algo_strings[algo80], elem, hashOrder); s_implemented = false; sleep(5); return -1; } } int warn = 0; do { int order = 0; // Hash with CUDA switch (algo80) { case BLAKE: quark_blake512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("blake80:"); break; case BMW: quark_bmw512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++); TRACE("bmw80 :"); break; case GROESTL: groestl512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("grstl80:"); break; case JH: jh512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("jh51280:"); break; case KECCAK: keccak512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("kecck80:"); break; case SKEIN: skein512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], 1); order++; TRACE("skein80:"); break; case LUFFA: qubit_luffa512_cpu_hash_80_alexis(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("luffa80:"); break; case CUBEHASH: cubehash512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("cube 80:"); break; case SHAVITE: x11_shavite512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++); TRACE("shavite:"); break; case SIMD: x16_simd512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("simd512:"); break; case ECHO: x16_echo512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("echo :"); break; case HAMSI: x16_hamsi512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("hamsi :"); break; case FUGUE: x16_fugue512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("fugue :"); break; case SHABAL: x16_shabal512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("shabal :"); break; case WHIRLPOOL: x16_whirlpool512_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("whirl :"); break; case SHA512: x16_sha512_cuda_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++; TRACE("sha512 :"); break; } for (int i = 1; i < 16; i++) { const char elem = hashOrder[i]; const uint8_t algo64 = elem >= 'A' ? elem - 'A' + 10 : elem - '0'; switch (algo64) { case BLAKE: quark_blake512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("blake :"); break; case BMW: quark_bmw512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("bmw :"); break; case GROESTL: quark_groestl512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("groestl:"); break; case JH: quark_jh512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("jh512 :"); break; case KECCAK: //quark_keccak512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); quark_keccak512_cpu_hash_64(thr_id, throughput, NULL, d_hash[thr_id]); order++; TRACE("keccak :"); break; case SKEIN: quark_skein512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("skein :"); break; case LUFFA: x11_luffa512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("luffa :"); break; case CUBEHASH: x11_cubehash512_cpu_hash_64(thr_id, throughput, d_hash[thr_id]); order++; TRACE("cube :"); break; case SHAVITE: x11_shavite512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("shavite:"); break; case SIMD: x11_simd512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("simd :"); break; case ECHO: x11_echo512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("echo :"); break; case HAMSI: x13_hamsi512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("hamsi :"); break; case FUGUE: x13_fugue512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("fugue :"); break; case SHABAL: x14_shabal512_cpu_hash_64_alexis(thr_id, throughput, d_hash[thr_id]); order++; TRACE("shabal :"); break; case WHIRLPOOL: x15_whirlpool_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); TRACE("shabal :"); break; case SHA512: x17_sha512_cpu_hash_64(thr_id, throughput, d_hash[thr_id]); order++; TRACE("sha512 :"); break; } } *hashes_done = pdata[19] - first_nonce + throughput; work->nonces[0] = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]); #ifdef _DEBUG uint32_t _ALIGN(64) dhash[8]; be32enc(&endiandata[19], pdata[19]); x16s_hash(dhash, endiandata); applog_hash(dhash); return -1; #endif if (work->nonces[0] != UINT32_MAX) { const uint32_t Htarg = ptarget[7]; uint32_t _ALIGN(64) vhash[8]; be32enc(&endiandata[19], work->nonces[0]); x16s_hash(vhash, endiandata); if (vhash[7] <= Htarg && fulltest(vhash, ptarget)) { work->valid_nonces = 1; work->nonces[1] = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1); work_set_target_ratio(work, vhash); if (work->nonces[1] != 0) { be32enc(&endiandata[19], work->nonces[1]); x16s_hash(vhash, endiandata); bn_set_target_ratio(work, vhash, 1); work->valid_nonces++; pdata[19] = max(work->nonces[0], work->nonces[1]) + 1; } else { pdata[19] = work->nonces[0] + 1; // cursor } #if 0 gpulog(LOG_INFO, thr_id, "hash found with %s 80!", algo_strings[algo80]); algo80_tests[algo80] += work->valid_nonces; char oks64[128] = { 0 }; char oks80[128] = { 0 }; char fails[128] = { 0 }; for (int a = 0; a < HASH_FUNC_COUNT; a++) { const char elem = hashOrder[a]; const uint8_t algo64 = elem >= 'A' ? elem - 'A' + 10 : elem - '0'; if (a > 0) algo64_tests[algo64] += work->valid_nonces; sprintf(&oks64[strlen(oks64)], "|%X:%2d", a, algo64_tests[a] < 100 ? algo64_tests[a] : 99); sprintf(&oks80[strlen(oks80)], "|%X:%2d", a, algo80_tests[a] < 100 ? algo80_tests[a] : 99); sprintf(&fails[strlen(fails)], "|%X:%2d", a, algo80_fails[a] < 100 ? algo80_fails[a] : 99); } applog(LOG_INFO, "K64: %s", oks64); applog(LOG_INFO, "K80: %s", oks80); applog(LOG_ERR, "F80: %s", fails); #endif return work->valid_nonces; } else if (vhash[7] > Htarg) { // x11+ coins could do some random error, but not on retry gpu_increment_reject(thr_id); algo80_fails[algo80]++; if (!warn) { warn++; pdata[19] = work->nonces[0] + 1; continue; } else { if (!opt_quiet) gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU! %s %s", work->nonces[0], algo_strings[algo80], hashOrder); warn = 0; } } } if ((uint64_t)throughput + pdata[19] >= max_nonce) { pdata[19] = max_nonce; break; } pdata[19] += throughput; } while (pdata[19] < max_nonce && !work_restart[thr_id].restart); *hashes_done = pdata[19] - first_nonce; return 0; } // cleanup extern "C" void free_x16s(int thr_id) { if (!init[thr_id]) return; cudaThreadSynchronize(); cudaFree(d_hash[thr_id]); quark_blake512_cpu_free(thr_id); quark_groestl512_cpu_free(thr_id); x11_simd512_cpu_free(thr_id); x13_fugue512_cpu_free(thr_id); x16_fugue512_cpu_free(thr_id); // to merge with x13_fugue512 ? x15_whirlpool_cpu_free(thr_id); cuda_check_cpu_free(thr_id); cudaDeviceSynchronize(); init[thr_id] = false; }
the_stack
* \test Tests the two-dimensional FFT routines. **/ #include <iostream> #include <vector> #include <cmath> #include <complex> #include <algorithm> //#define VIENNACL_BUILD_INFO #include "viennacl/linalg/host_based/fft_operations.hpp" #ifdef VIENNACL_WITH_OPENCL #include "viennacl/linalg/opencl/fft_operations.hpp" #include "viennacl/linalg/opencl/kernels/fft.hpp" #endif #ifdef VIENNACL_WITH_CUDA #include "viennacl/linalg/cuda/fft_operations.hpp" #endif #include "viennacl/linalg/fft_operations.hpp" #include "viennacl/fft.hpp" typedef float ScalarType; const ScalarType EPS = ScalarType(0.06f); //use smaller values in double precision typedef ScalarType (*test_function_ptr)(std::vector<ScalarType>&, std::vector<ScalarType>&, unsigned int, unsigned int, unsigned int); typedef void (*input_function_ptr)(std::vector<ScalarType>&, std::vector<ScalarType>&, unsigned int&, unsigned int&, unsigned int&, const std::string&); struct testData { float input[2048]; float output[2048]; unsigned int batch_num; unsigned int row_num; unsigned int col_num; }; static testData direct_2d = { { 0.120294f, 0.839315f, 0.890936f, 0.775417f, 0.375051f, 0.775645f, 0.367671f, 0.309852f, 0.551154f, 0.166495f, 0.174865f, 0.340252f, 0.393914f, 0.439817f, 0.523974f, 0.291109f, 0.181803f, 0.811176f, 0.490668f, 0.234881f, 0.611783f, 0.098058f, 0.106492f, 0.399059f, 0.974164f, 0.403960f, 0.324111f, 0.772581f, 0.609412f, 0.917312f, 0.538254f, 0.729706f, 0.756627f, 0.429191f, 0.505123f, 0.131678f, 0.204836f, 0.872794f, 0.441530f, 0.755990f, 0.039289f, 0.616395f, 0.096242f, 0.433203f, 0.056212f, 0.620216f, 0.724312f, 0.238015f }, { 10.058718f, 12.402115f, 3.306907f, 0.570050f, -0.527832f, -1.052828f, -0.309640f, 1.578631f, 0.027247f, 1.441292f, -2.396150f, 0.396048f, -2.490234f, -0.923666f, -0.890061f, 1.154475f, -2.485666f, -0.029132f, -1.617884f, -0.788678f, 0.008640f, -0.751211f, -0.245883f, 2.815872f, 2.316608f, 0.780692f, 0.437285f, -0.798080f, 0.304596f, -0.176831f, 1.481121f, -0.633767f, -0.177035f, 0.302556f, -1.388328f, 0.109418f, 0.034794f, 0.568763f, 0.053167f, -0.332043f, 0.074045f, -1.350742f, -1.101494f, 1.267548f, -1.288304f, 2.578995f, -0.297569f, 1.014074f }, 1, 4, 6 }; static testData radix2_2d = { { 0.860600f, 0.020071f, 0.756794f, 0.472348f, 0.604630f, 0.445387f, 0.738811f, 0.644715f, 0.840903f, 0.746019f, 0.629334f, 0.682880f, 0.516268f, 0.235386f, 0.800333f, 0.175785f, 0.974124f, 0.485907f, 0.492256f, 0.696148f, 0.230253f, 0.600575f, 0.138786f, 0.136737f, 0.114667f, 0.516912f, 0.173743f, 0.899410f, 0.891824f, 0.704459f, 0.450209f, 0.752424f, 0.724530f, 0.207003f, 0.224772f, 0.329161f, 0.652390f, 0.963583f, 0.973876f, 0.493293f, 0.709602f, 0.603211f, 0.176173f, 0.225870f, 0.838596f, 0.976507f, 0.401655f, 0.812721f, 0.462413f, 0.893911f, 0.508869f, 0.692667f, 0.494486f, 0.647656f, 0.829403f, 0.609152f, 0.164568f, 0.003146f, 0.508563f, 0.056392f, 0.707605f, 0.958771f, 0.808816f, 0.432136f }, { 18.399853f, 17.120342f, 1.194352f, 0.639568f, -0.086731f, -0.384759f, 1.241270f, -2.175158f, 1.175068f, 0.896665f, 0.753659f, 0.780709f, -0.082556f, -3.727531f, 1.578434f, -0.294704f, 1.544822f, -0.169894f, 0.570453f, -1.065756f, 1.432534f, -1.146827f, -1.713843f, 2.376111f, -2.141517f, -3.200578f, -1.061705f, -1.680550f, 0.656694f, 2.493567f, -1.462913f, -3.195214f, 2.498683f, -1.052464f, -1.144435f, -4.022502f, 0.301723f, 0.550845f, -1.033154f, -0.872973f, 0.916475f, -0.175878f, 0.123236f, -1.495021f, 1.962570f, -0.616791f, -2.436357f, -1.537166f, 0.547337f, -2.207615f, 1.563801f, -0.916862f, 2.013805f, 1.934075f, 0.940849f, -0.143010f, -0.361511f, 0.364330f, -0.161776f, 1.245928f, -1.553198f, 1.579960f, 1.363282f, 0.741429f }, 1, 4, 8 }; static testData direct_2d_big = { { 0.475679f, 0.408864f, 0.313085f, 0.387599f, 0.767833f, 0.015767f, 0.832733f, 0.764867f, 0.850312f, 0.782744f, 0.355199f, 0.308463f, 0.496935f, 0.043339f, 0.309902f, 0.030681f, 0.497275f, 0.237185f, 0.229802f, 0.606489f, 0.720393f, 0.848826f, 0.704500f, 0.845834f, 0.451885f, 0.339276f, 0.523190f, 0.688469f, 0.646792f, 0.975192f, 0.933888f, 0.122471f, 0.384056f, 0.246973f, 0.510070f, 0.151889f, 0.262739f, 0.342803f, 0.916756f, 0.113051f, 0.125547f, 0.271954f, 0.421514f, 0.622482f, 0.315293f, 0.731416f, 0.653164f, 0.812568f, 0.968601f, 0.882965f, 0.419057f, 0.688994f, 0.731792f, 0.123557f, 0.534827f, 0.183676f, 0.462833f, 0.058017f, 0.872145f, 0.109626f, 0.033209f, 0.806033f, 0.232097f, 0.417265f, 0.053006f, 0.742167f, 0.569154f, 0.315745f, 0.084970f, 0.485910f, 0.428796f, 0.210517f, 0.757864f, 0.850311f, 0.832999f, 0.073158f, 0.581726f, 0.486163f, 0.885726f, 0.550328f, 0.369128f, 0.304783f, 0.239321f, 0.100920f }, { 21.755795f, 18.089336f, -1.248233f, -0.179035f, 1.307578f, 1.589876f, -1.680055f, 1.879153f, 0.500297f, 0.839735f, 0.046095f, -0.177522f, 0.742587f, -0.786261f, -3.427422f, -0.445572f, -1.376776f, 1.221333f, 0.334313f, -0.588123f, -2.070653f, 1.297694f, -1.879930f, -2.445690f, 1.692045f, 0.251480f, 0.435994f, 0.257269f, 1.513737f, 0.859310f, 0.538316f, -3.698363f, -3.243739f, 2.342074f, 1.255018f, -1.052454f, 0.450322f, 3.684811f, -0.951320f, 2.863686f, -0.170055f, 1.501932f, -0.800708f, 2.040001f, -0.229112f, -0.175461f, -5.128507f, -2.872447f, -2.125049f, -2.656515f, 0.632609f, -2.080163f, 2.527745f, -1.830541f, 0.086613f, -1.402300f, -0.900261f, -1.355287f, -0.909127f, 2.822799f, 2.142723f, -0.882929f, -3.627774f, 0.180693f, -0.073456f, 0.783774f, 2.144351f, -0.252458f, 0.090970f, -0.007880f, 3.457415f, 0.527979f, 0.505462f, 0.978198f, -1.807562f, -2.692160f, 2.556900f, -1.385276f, 3.526823f, 0.247212f, 1.879590f, 0.288942f, 1.504963f, -0.408566f }, 1, 7, 6 }; static testData transposeMatrix= {{0.139420f,0.539278f,0.547922f,0.672097f,0.528360f,0.158671f,0.596258f,0.432662f,0.445432f,0.597279f,0.966011f,0.707923f,0.705743f,0.282214f,0.100677f,0.143657f,0.040120f,0.346660f,0.279002f, 0.568480f,0.505332f,0.875261f,0.001142f,0.237294f,0.673498f,0.699611f,0.990521f,0.379241f,0.981826f,0.091198f,0.522898f,0.637506f}, {0.13942f,0.539278f,0.445432f,0.597279f,0.04012f,0.34666f,0.673498f,0.699611f, 0.547922f,0.672097f,0.966011f,0.707923f,0.279002f,0.56848f,0.990521f,0.379241f,0.52836f,0.158671f,0.705743f,0.282214f,0.505332f,0.875261f,0.981826f,0.091198f,0.596258f,0.432662f,0.100677f,0.143657f,0.001142f, 0.237294f,0.522898f,0.637506f},1,4,4}; void set_values_struct(std::vector<ScalarType>& input, std::vector<ScalarType>& output, unsigned int& rows, unsigned int& cols, unsigned int& batch_size, testData& data); void set_values_struct(std::vector<ScalarType>& input, std::vector<ScalarType>& output, unsigned int& rows, unsigned int& cols, unsigned int& batch_size, testData& data) { unsigned int size = data.col_num * data.batch_num * 2 * data.row_num; input.resize(size); output.resize(size); rows = data.row_num; cols = data.col_num; batch_size = data.batch_num; for (unsigned int i = 0; i < size; i++) { input[i] = data.input[i]; output[i] = data.output[i]; } } void read_matrices_pair(std::vector<ScalarType>& input, std::vector<ScalarType>& output, unsigned int& rows, unsigned int& cols, unsigned int& batch_size, const std::string& log_tag); void read_matrices_pair(std::vector<ScalarType>& input, std::vector<ScalarType>& output, unsigned int& rows, unsigned int& cols, unsigned int& batch_size, const std::string& log_tag) { if (log_tag == "fft:2d::direct::1_arg") set_values_struct(input, output, rows, cols, batch_size, direct_2d); if (log_tag == "fft:2d::radix2::1_arg") set_values_struct(input, output, rows, cols, batch_size, radix2_2d); if (log_tag == "fft:2d::direct::big::2_arg") set_values_struct(input, output, rows, cols, batch_size, direct_2d_big); if (log_tag == "fft::transpose" || log_tag == "fft::transpose_inplace") set_values_struct(input, output, rows, cols, batch_size, transposeMatrix); } template<typename ScalarType> ScalarType diff(std::vector<ScalarType>& vec, std::vector<ScalarType>& ref) { ScalarType df = 0.0; ScalarType norm_ref = 0; for (std::size_t i = 0; i < vec.size(); i++) { df = df + pow(vec[i] - ref[i], 2); norm_ref += ref[i] * ref[i]; } return sqrt(df / norm_ref); } template<typename ScalarType> ScalarType diff_max(std::vector<ScalarType>& vec, std::vector<ScalarType>& ref) { ScalarType df = 0.0; ScalarType mx = 0.0; ScalarType norm_max = 0; for (std::size_t i = 0; i < vec.size(); i++) { df = std::max<ScalarType>(std::fabs(vec[i] - ref[i]), df); mx = std::max<ScalarType>(std::fabs(vec[i]), mx); if (mx > 0) { if (norm_max < df / mx) norm_max = df / mx; } } return norm_max; } void copy_vector_to_matrix(viennacl::matrix<ScalarType> & input, std::vector<ScalarType> & in, unsigned int row, unsigned int col); void copy_vector_to_matrix(viennacl::matrix<ScalarType> & input, std::vector<ScalarType> & in, unsigned int row, unsigned int col) { std::vector<std::vector<ScalarType> > my_matrix(row, std::vector<ScalarType>(col * 2)); for (unsigned int i = 0; i < row; i++) for (unsigned int j = 0; j < col * 2; j++) my_matrix[i][j] = in[i * col * 2 + j]; viennacl::copy(my_matrix, input); } void copy_matrix_to_vector(viennacl::matrix<ScalarType> & input, std::vector<ScalarType> & in, unsigned int row, unsigned int col); void copy_matrix_to_vector(viennacl::matrix<ScalarType> & input, std::vector<ScalarType> & in, unsigned int row, unsigned int col) { std::vector<std::vector<ScalarType> > my_matrix(row, std::vector<ScalarType>(col * 2)); viennacl::copy(input, my_matrix); for (unsigned int i = 0; i < row; i++) for (unsigned int j = 0; j < col * 2; j++) in[i * col * 2 + j] = my_matrix[i][j]; } ScalarType fft_2d_1arg(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/); ScalarType fft_2d_1arg(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/) { viennacl::matrix<ScalarType> input(row, 2 * col); std::vector<ScalarType> res(in.size()); copy_vector_to_matrix(input, in, row, col); viennacl::inplace_fft(input); //std::cout << input << "\n"; viennacl::backend::finish(); copy_matrix_to_vector(input, res, row, col); return diff_max(res, out); } ScalarType transpose_inplace(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/); ScalarType transpose_inplace(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/) { viennacl::matrix<ScalarType> input(row, 2 * col); std::vector<ScalarType> res(in.size()); copy_vector_to_matrix(input, in, row, col); viennacl::linalg::transpose(input); viennacl::backend::finish(); copy_matrix_to_vector(input, res, row, col); return diff_max(res, out); } ScalarType transpose(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/); ScalarType transpose(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/) { viennacl::matrix<ScalarType> input(row, 2 * col); viennacl::matrix<ScalarType> output(row, 2 * col); std::vector<ScalarType> res(in.size()); copy_vector_to_matrix(input, in, row, col); viennacl::linalg::transpose(input,output); viennacl::backend::finish(); copy_matrix_to_vector(output, res, row, col); return diff_max(res, out); } ScalarType fft_2d_2arg(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/); ScalarType fft_2d_2arg(std::vector<ScalarType>& in, std::vector<ScalarType>& out, unsigned int row, unsigned int col, unsigned int /*batch_size*/) { viennacl::matrix<ScalarType> input(row, 2 * col); viennacl::matrix<ScalarType> output(row, 2 * col); std::vector<ScalarType> res(in.size()); copy_vector_to_matrix(input, in, row, col); //std::cout << input << "\n"; viennacl::fft(input, output); //std::cout << input << "\n"; viennacl::backend::finish(); copy_matrix_to_vector(output, res, row, col); return diff_max(res, out); } int test_correctness(const std::string& log_tag, input_function_ptr input_function, test_function_ptr func); int test_correctness(const std::string& log_tag, input_function_ptr input_function, test_function_ptr func) { std::vector<ScalarType> input; std::vector<ScalarType> output; std::cout << std::endl; std::cout << "*****************" << log_tag << "***************************\n"; unsigned int batch_size; unsigned int rows_num, cols_num; input_function(input, output, rows_num, cols_num, batch_size, log_tag); ScalarType df = func(input, output, rows_num, cols_num, batch_size); printf("%7s ROWS=%6d COLS=%6d; BATCH=%3d; DIFF=%3.15f;\n", ((fabs(df) < EPS) ? "[Ok]" : "[Fail]"), rows_num, cols_num, batch_size, df); std::cout << std::endl; if (df > EPS) return EXIT_FAILURE; return EXIT_SUCCESS; } int main() { std::cout << "*" << std::endl; std::cout << "* ViennaCL test: FFT" << std::endl; std::cout << "*" << std::endl; //2D FFT tests if (test_correctness("fft:2d::radix2::1_arg", read_matrices_pair, &fft_2d_1arg) == EXIT_FAILURE) return EXIT_FAILURE; if (test_correctness("fft:2d::direct::1_arg", read_matrices_pair, &fft_2d_1arg) == EXIT_FAILURE) return EXIT_FAILURE; if (test_correctness("fft:2d::direct::big::2_arg", read_matrices_pair, &fft_2d_2arg) == EXIT_FAILURE) return EXIT_FAILURE; if (test_correctness("fft::transpose_inplace", read_matrices_pair, &transpose_inplace) == EXIT_FAILURE) return EXIT_FAILURE; if (test_correctness("fft::transpose", read_matrices_pair, &transpose) == EXIT_FAILURE) return EXIT_FAILURE; std::cout << std::endl; std::cout << "------- Test completed --------" << std::endl; std::cout << std::endl; return EXIT_SUCCESS; }
the_stack
using namespace megdnn; using namespace cuda; using namespace local_share; namespace { template <int unroll_ci_, int unroll_co_, int unroll_n_> struct UnrollConfig { static int const unroll_ci = unroll_ci_; static int const unroll_co = unroll_co_; static int const unroll_n = unroll_n_; }; template <int thread_x, int thread_y> struct ThreadConfig { static int const nr_thread_x = thread_x; static int const nr_thread_y = thread_y; static int const nr_threads = nr_thread_x * nr_thread_y; }; template <typename UnrollConfig, typename ThreadConfig> struct DiffTileCount { static int const tile_batch = UnrollConfig::unroll_n; static int const tile_co = UnrollConfig::unroll_co * ThreadConfig::nr_thread_x; static int const load_x = tile_batch > 32 ? 32 : tile_batch; static int const load_y = ThreadConfig::nr_threads / load_x; static int const smem_h = tile_co; static int const smem_w = tile_batch; static int const smem_stride = smem_w % 2 == 0 ? smem_w + 1 : smem_w; static int const smem_tot = smem_h * smem_stride; static int const reg_row = (smem_h + load_y - 1) / load_y; static int const reg_col = (smem_w + load_x - 1) / load_x; static bool const check_bounds_h = smem_h % load_y != 0; static bool const check_bounds_w = smem_w % load_x != 0; }; template <typename UnrollConfig, typename ThreadConfig> struct DataTileCount { static int const tile_batch = UnrollConfig::unroll_n; static int const tile_ci = ThreadConfig::nr_thread_y * UnrollConfig::unroll_ci; static int const load_x = tile_batch > 32 ? 32 : tile_batch; static int const load_y = ThreadConfig::nr_threads / load_x; static int const smem_h = tile_ci; static int const smem_w = tile_batch; static int const smem_stride = smem_w % 2 == 0 ? smem_w + 1 : smem_w; static int const smem_tot = smem_h * smem_stride; static int const reg_row = (smem_h + load_y - 1) / load_y; static int const reg_col = (smem_w + load_x - 1) / load_x; static bool const check_bounds_h = smem_h % load_y != 0; static bool const check_bounds_w = smem_w % load_x != 0; }; template <bool check_bounds, typename TileCount> struct Global2ShareMemVisitor { typedef float copy_t; float* smem; const copy_t* g_ptr; int stride; int remain; const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int tid = tidy * blockDim.x + tidx; const int gl_load_y = tid / TileCount::load_x; const int gl_load_x = tid - gl_load_y * TileCount::load_x; copy_t reg[TileCount::reg_row][TileCount::reg_col]; __device__ Global2ShareMemVisitor(copy_t* smem, int stride, int remain) : smem{smem}, stride{stride}, remain{remain} {} __device__ __forceinline__ void first_copy() { #pragma unroll for (int i = 0; i < TileCount::reg_row; ++i) { int h_idx = gl_load_y + i * TileCount::load_y; #pragma unrol for (int j = 0; j < TileCount::reg_col; ++j) { int w_idx = gl_load_x + j * TileCount::load_x; if (TileCount::check_bounds_h && h_idx >= TileCount::smem_h) continue; if (TileCount::check_bounds_w && w_idx >= TileCount::smem_w) continue; if (check_bounds) { copy_t val = 0.f; if (h_idx < remain) { val = g_ptr[h_idx * stride + w_idx]; } *(sh_ptr(h_idx, w_idx)) = val; } else { *(sh_ptr(h_idx, w_idx)) = g_ptr[h_idx * stride + w_idx]; } } } } __device__ __forceinline__ void copy() { #pragma unroll for (int i = 0; i < TileCount::reg_row; ++i) { int h_idx = gl_load_y + i * TileCount::load_y; #pragma unrol for (int j = 0; j < TileCount::reg_col; ++j) { int w_idx = gl_load_x + j * TileCount::load_x; if (TileCount::check_bounds_h && h_idx >= TileCount::smem_h) continue; if (TileCount::check_bounds_w && w_idx >= TileCount::smem_w) continue; if (check_bounds) { copy_t val = 0.f; if (h_idx < remain) { val = g_ptr[h_idx * stride + w_idx]; } reg[i][j] = val; } else { reg[i][j] = g_ptr[h_idx * stride + w_idx]; } } } } __device__ __forceinline__ void commit() { #pragma unroll for (int i = 0; i < TileCount::reg_row; ++i) { int h_idx = gl_load_y + i * TileCount::load_y; #pragma unrol for (int j = 0; j < TileCount::reg_col; ++j) { int w_idx = gl_load_x + j * TileCount::load_x; if (TileCount::check_bounds_h && h_idx >= TileCount::smem_h) continue; if (TileCount::check_bounds_w && w_idx >= TileCount::smem_w) continue; *(sh_ptr(h_idx, w_idx)) = reg[i][j]; } } } __device__ __forceinline__ float* sh_ptr(int y, int x) { return &smem[y * TileCount::smem_stride + x]; } __device__ __forceinline__ void move_forward() { g_ptr += TileCount::tile_batch; } }; template <bool check_bounds, typename UnrollConfig, typename ThreadConfig> __device__ __forceinline__ void consume_block( Global2ShareMemVisitor<check_bounds, DataTileCount<UnrollConfig, ThreadConfig>>& src_gl2sh_visitor, Global2ShareMemVisitor<check_bounds, DiffTileCount<UnrollConfig, ThreadConfig>>& diff_gl2sh_visitor, float r_src[UnrollConfig::unroll_ci], float r_diff[UnrollConfig::unroll_co], float r_grad[UnrollConfig::unroll_ci][UnrollConfig::unroll_co]) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; #pragma unroll for (int b_inner = 0; b_inner < UnrollConfig::unroll_n; ++b_inner) { #pragma unroll for (int i = 0; i < UnrollConfig::unroll_ci; ++i) { r_src[i] = *(src_gl2sh_visitor.sh_ptr( tidy + i * ThreadConfig::nr_thread_y, b_inner)); } #pragma unroll for (int j = 0; j < UnrollConfig::unroll_co; ++j) { r_diff[j] = *(diff_gl2sh_visitor.sh_ptr( tidx + j * ThreadConfig::nr_thread_x, b_inner)); } #pragma unroll for (int i = 0; i < UnrollConfig::unroll_ci; ++i) { #pragma unroll for (int j = 0; j < UnrollConfig::unroll_co; ++j) { r_grad[i][j] += r_src[i] * r_diff[j]; } } } } template <bool check_bounds, typename UnrollConfig, typename ThreadConfig> __global__ void local_share_bwd_filter_device_template_f32( const float* __restrict__ src, const float* __restrict__ diff, float* __restrict__ grad, Param param, int fh, int fw, int sh, int sw) { typedef DiffTileCount<UnrollConfig, ThreadConfig> DiffTileCount; typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const int bidz = blockIdx.z; const int filter_sizes = fh * fw; const int sp_grp_idx = bidx / filter_sizes; const int kern_spatial = bidx - sp_grp_idx * filter_sizes; const int sgh_idx = sp_grp_idx / param.sgw; const int sgw_idx = sp_grp_idx - sgh_idx * param.sgw; const int kh = kern_spatial / fw; const int kw = kern_spatial - kh * fw; const int b_co = bidy * DiffTileCount::tile_co; const int b_ci = bidz * DataTileCount::tile_ci; const int t_co = tidx + b_co; const int t_ci = tidy + b_ci; const int ho = param.sgh * param.grp_ho; const int wo = param.sgw * param.grp_wo; extern __shared__ float smem[]; float* sh_src = smem; float* sh_diff = smem + DataTileCount::smem_tot; const float* __restrict__ g_ptr_src = src + b_ci * param.hi * param.wi * param.n; // input channel stride const float* __restrict__ g_ptr_diff = diff + b_co * ho * wo * param.n; float* __restrict__ g_ptr_grad = grad + sp_grp_idx * filter_sizes * param.co * param.ci // spatial group stride + t_ci * filter_sizes * param.co // input channel stride + kern_spatial * param.co // kernel spatial stride + t_co; // output channel stride Global2ShareMemVisitor<check_bounds, DataTileCount> src_gl2sh_visitor{ sh_src, param.hi * param.wi * param.n, param.ci - b_ci}; Global2ShareMemVisitor<check_bounds, DiffTileCount> diff_gl2sh_visitor{ sh_diff, ho * wo * param.n, param.co - b_co}; float r_src[UnrollConfig::unroll_ci]; float r_diff[UnrollConfig::unroll_co]; float r_grad[UnrollConfig::unroll_ci][UnrollConfig::unroll_co]; #pragma unroll for (int i = 0; i < UnrollConfig::unroll_ci; ++i) { #pragma unroll for (int j = 0; j < UnrollConfig::unroll_co; ++j) { r_grad[i][j] = 0.f; } } int sp_grp_h_start = sgh_idx * param.grp_ho; int sp_grp_h_end = sgh_idx * param.grp_ho + param.grp_ho - 1; int sp_grp_w_start = sgw_idx * param.grp_wo; int sp_grp_w_end = sgw_idx * param.grp_wo + param.grp_wo - 1; int height_start = (param.ph - kh + sh - 1) / sh; height_start = sp_grp_h_start >= height_start ? sp_grp_h_start : height_start; int width_start = (param.pw - kw + sw - 1) / sw; width_start = sp_grp_w_start >= width_start ? sp_grp_w_start : width_start; int height_end = (param.hi - 1 + param.ph - kh) / sh; height_end = sp_grp_h_end <= height_end ? sp_grp_h_end : height_end; int width_end = (param.wi - 1 + param.pw - kw) / sw; width_end = sp_grp_w_end <= width_end ? sp_grp_w_end : width_end; const int b_blks = (param.n + UnrollConfig::unroll_n - 1) / UnrollConfig::unroll_n; int ih_idx = height_start * sh - param.ph + kh; int iw_idx = width_start * sw - param.pw + kw; src_gl2sh_visitor.g_ptr = g_ptr_src + (ih_idx * param.wi + iw_idx) * param.n; diff_gl2sh_visitor.g_ptr = g_ptr_diff + (height_start * wo + width_start) * param.n; if (height_start <= height_end && width_start <= width_end) { src_gl2sh_visitor.first_copy(); diff_gl2sh_visitor.first_copy(); __syncthreads(); } for (int h = height_start; h <= height_end; ++h) { for (int w = width_start; w <= width_end; ++w) { for (int b_outer = 0; b_outer < b_blks; b_outer++) { if (b_outer == b_blks - 1) { // not last tile if (!(h == height_end && w == width_end)) { int w_next = w == width_end ? width_start : w + 1; int h_next = w == width_end ? h + 1 : h; int ih_idx = h_next * sh - param.ph + kh; int iw_idx = w_next * sw - param.pw + kw; src_gl2sh_visitor.g_ptr = g_ptr_src + (ih_idx * param.wi + iw_idx) * param.n; diff_gl2sh_visitor.g_ptr = g_ptr_diff + (h_next * wo + w_next) * param.n; src_gl2sh_visitor.copy(); diff_gl2sh_visitor.copy(); } } else { src_gl2sh_visitor.move_forward(); diff_gl2sh_visitor.move_forward(); src_gl2sh_visitor.copy(); diff_gl2sh_visitor.copy(); } consume_block<check_bounds, UnrollConfig, ThreadConfig>( src_gl2sh_visitor, diff_gl2sh_visitor, r_src, r_diff, r_grad); // last tile if (!(h == height_end && w == width_end && b_outer == b_blks - 1)) { __syncthreads(); src_gl2sh_visitor.commit(); diff_gl2sh_visitor.commit(); __syncthreads(); } } } } const int ci_stride = fh * fw * param.co; // store #pragma unroll for (int i = 0; i < UnrollConfig::unroll_ci; ++i) { #pragma unroll for (int j = 0; j < UnrollConfig::unroll_co; ++j) { if (check_bounds && (t_co + j * ThreadConfig::nr_thread_x >= param.co || t_ci + i * ThreadConfig::nr_thread_y >= param.ci)) { } else { g_ptr_grad [j * ThreadConfig::nr_thread_x + i * ThreadConfig::nr_thread_y * ci_stride] = r_grad[i][j]; } } } } void (*get_kern( const Param& param, const int filter_sizes, LaunchConfig& launch_config))( const float* __restrict__, const float* __restrict__, float* __restrict__, Param, int, int, int, int) { void (*kern)( const float* __restrict__, const float* __restrict__, float* __restrict__, Param, int, int, int, int); kern = nullptr; #define CHK3(ci_, co_, n_, tx_, ty_) \ if (param.ci >= ci_) { \ if (param.co >= co_) { \ if (param.n % n_ == 0) { \ static constexpr int unroll_ci = (ci_ + ty_ - 1) / ty_; \ static constexpr int unroll_co = (co_ + tx_ - 1) / tx_; \ static constexpr int unroll_n = n_; \ static constexpr int thread_x = tx_; \ static constexpr int thread_y = ty_; \ typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \ typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \ typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \ typedef DiffTileCount<UnrollConfig, ThreadConfig> DiffTileCount; \ kern = local_share_bwd_filter_device_template_f32< \ true, UnrollConfig, ThreadConfig>; \ launch_config.nr_threads_x = thread_x; \ launch_config.nr_threads_y = thread_y; \ launch_config.nr_threads_z = 1; \ launch_config.nr_blocks_x = param.sgh * param.sgw * filter_sizes; \ launch_config.nr_blocks_y = DIVUP(param.co, DiffTileCount::tile_co); \ launch_config.nr_blocks_z = DIVUP(param.ci, DataTileCount::tile_ci); \ launch_config.smem_size_in_bytes = \ sizeof(float) * \ (DataTileCount::smem_tot + DiffTileCount::smem_tot); \ } \ } \ } #define CHK2(ci_, co_) \ CHK3(ci_, co_, 4, 16, 8) \ CHK3(ci_, co_, 8, 16, 8) #define CHK2_(ci_, co_) \ CHK3(ci_, co_, 4, 8, 8) \ CHK3(ci_, co_, 8, 8, 8) #define CHK(ci_) \ CHK2_(ci_, 1) \ CHK2_(ci_, 8) CHK2_(ci_, 16) CHK2_(ci_, 32) CHK2_(ci_, 64) CHK2(ci_, 128) CHK(1) CHK(8); CHK(16); CHK(32); CHK(64); CHK(128); #undef CHK #undef CHK2 #undef CHK2_ #undef CHK3 #define CHK3(ci_, co_, n_, tx_, ty_) \ if (param.ci % ci_ == 0) { \ if (param.co % co_ == 0) { \ if (param.n % n_ == 0) { \ static constexpr int unroll_ci = (ci_) / (ty_); \ static constexpr int unroll_co = (co_) / (tx_); \ static constexpr int unroll_n = n_; \ static constexpr int thread_x = tx_; \ static constexpr int thread_y = ty_; \ typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \ typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \ typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \ typedef DiffTileCount<UnrollConfig, ThreadConfig> DiffTileCount; \ kern = local_share_bwd_filter_device_template_f32< \ false, UnrollConfig, ThreadConfig>; \ launch_config.nr_threads_x = thread_x; \ launch_config.nr_threads_y = thread_y; \ launch_config.nr_threads_z = 1; \ launch_config.nr_blocks_x = param.sgh * param.sgw * filter_sizes; \ launch_config.nr_blocks_y = DIVUP(param.co, DiffTileCount::tile_co); \ launch_config.nr_blocks_z = DIVUP(param.ci, DataTileCount::tile_ci); \ launch_config.smem_size_in_bytes = \ sizeof(float) * \ (DataTileCount::smem_tot + DiffTileCount::smem_tot); \ } \ } \ } #define CHK2(ci_, co_) CHK3(ci_, co_, 4, 8, 8) CHK3(ci_, co_, 8, 8, 8) #define CHK(ci_) \ CHK2(ci_, 8) \ CHK2(ci_, 16) \ CHK2(ci_, 32) \ CHK2(ci_, 64) \ CHK3(ci_, 128, 4, 16, 8) CHK3(ci_, 128, 8, 16, 8) CHK(8); CHK(16); CHK(32); CHK(64); CHK(128); #undef CHK #undef CHK2 #undef CHK3 megdnn_assert( kern != nullptr, "no usable kernel implementation for local share " "backward data (batch,co,ci)=(%d,%d,%d)", param.n, param.co, param.ci); return kern; } } // namespace void megdnn::cuda::local_share_bwd_filter::_do_local_share_bwd_filter_implicit_gemm( const float* d_src, const float* d_diff, float* d_grad, float* workspace, int fh, int fw, int sh, int sw, const Param& param, cublasHandle_t cublas_handle, cudaStream_t stream, float* one, float* zero) { int ho = param.grp_ho * param.sgh, wo = param.grp_wo * param.sgw; size_t nr_src_total = param.n * param.ci * param.hi * param.wi; float* ws_src = workspace; float* ws_diff = workspace + nr_src_total; // tensor reformat from (n, c, h, w) -> (c, h, w, n) { int m = param.n, n = param.ci * param.hi * param.wi; int lda, ldb; lda = ldb = param.ci * param.hi * param.wi; int ldc = param.n; cublas_check(cublasSgeam( cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, d_src, lda, zero, d_src, ldb, ws_src, ldc)); } { int m = param.n, n = param.co * ho * wo; int lda, ldb; lda = ldb = param.co * ho * wo; int ldc = param.n; cublas_check(cublasSgeam( cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, d_diff, lda, zero, d_diff, ldb, ws_diff, ldc)); } { int filter_sizes = fh * fw; void (*kern)( const float* __restrict__, const float* __restrict__, float* __restrict__, Param, int, int, int, int); LaunchConfig launch_config; kern = get_kern(param, filter_sizes, launch_config); uint32_t nr_threads_x = launch_config.nr_threads_x, nr_threads_y = launch_config.nr_threads_y, nr_blocks_x = launch_config.nr_blocks_x, nr_blocks_y = launch_config.nr_blocks_y, nr_blocks_z = launch_config.nr_blocks_z, smem_size_in_bytes = launch_config.smem_size_in_bytes; _check_launch_config(launch_config); dim3 block_size{nr_threads_x, nr_threads_y, 1}; dim3 grid_size{nr_blocks_x, nr_blocks_y, nr_blocks_z}; kern<<<grid_size, block_size, smem_size_in_bytes, stream>>>( ws_src, ws_diff, d_grad, param, fh, fw, sh, sw); after_kernel_launch(); } } // vim: syntax=cuda.doxygen
the_stack
* See: https://www.piday.org/million/ */ #include "MonteCarloPi.h" #include <algorithm> #define CUDA_DRIVER_API #include <helper_cuda.h> #include <iostream> #define ROUND_UP_TO_GRANULARITY(x, n) (((x + n - 1) / n) * n) // `ipcHandleTypeFlag` specifies the platform specific handle type this sample // uses for importing and exporting memory allocation. On Linux this sample // specifies the type as CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR meaning that // file descriptors will be used. On Windows this sample specifies the type as // CU_MEM_HANDLE_TYPE_WIN32 meaning that NT HANDLEs will be used. The // ipcHandleTypeFlag variable is a convenience variable and is passed by value // to individual requests. #if defined(__linux__) CUmemAllocationHandleType ipcHandleTypeFlag = CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR; #else CUmemAllocationHandleType ipcHandleTypeFlag = CU_MEM_HANDLE_TYPE_WIN32; #endif // Windows-specific LPSECURITYATTRIBUTES void getDefaultSecurityDescriptor(CUmemAllocationProp *prop) { #if defined(__linux__) return; #elif defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) static const char sddl[] = "D:P(OA;;GARCSDWDWOCCDCLCSWLODTWPRPCRFA;;;WD)"; static OBJECT_ATTRIBUTES objAttributes; static bool objAttributesConfigured = false; if (!objAttributesConfigured) { PSECURITY_DESCRIPTOR secDesc; BOOL result = ConvertStringSecurityDescriptorToSecurityDescriptorA( sddl, SDDL_REVISION_1, &secDesc, NULL); if (result == 0) { printf("IPC failure: getDefaultSecurityDescriptor Failed! (%d)\n", GetLastError()); } InitializeObjectAttributes(&objAttributes, NULL, 0, NULL, secDesc); objAttributesConfigured = true; } prop->win32HandleMetaData = &objAttributes; return; #endif } __global__ void monte_carlo_kernel(vec2 *xyVector, float *pointsInsideCircle, float *numPointsInCircle, unsigned int numPoints, float time) { const size_t stride = gridDim.x * blockDim.x; size_t tid = blockIdx.x * blockDim.x + threadIdx.x; float count = 0.0f; curandState rgnState; curand_init((unsigned long long)time, tid, 0, &rgnState); for (; tid < numPoints; tid += stride) { float x = curand_uniform(&rgnState); float y = curand_uniform(&rgnState); x = (2.0f * x) - 1.0f; y = (2.0f * y) - 1.0f; xyVector[tid][0] = x; xyVector[tid][1] = y; // Compute the distance of this point form the center(0, 0) float dist = sqrtf((x * x) + (y * y)); // If distance is less than the radius of the unit circle, the point lies in // the circle. pointsInsideCircle[tid] = (dist <= 1.0f); count += (dist <= 1.0f); } atomicAdd(numPointsInCircle, count); } MonteCarloPiSimulation::MonteCarloPiSimulation(size_t num_points) : m_xyVector(nullptr), m_pointsInsideCircle(nullptr), m_totalPointsInsideCircle(0), m_totalPointsSimulated(0), m_numPoints(num_points) {} MonteCarloPiSimulation::~MonteCarloPiSimulation() { if (m_numPointsInCircle) { checkCudaErrors(cudaFree(m_numPointsInCircle)); m_numPointsInCircle = nullptr; } if (m_hostNumPointsInCircle) { checkCudaErrors(cudaFreeHost(m_hostNumPointsInCircle)); m_hostNumPointsInCircle = nullptr; } cleanupSimulationAllocations(); } void MonteCarloPiSimulation::initSimulation(int cudaDevice, cudaStream_t stream) { m_cudaDevice = cudaDevice; getIdealExecutionConfiguration(); // Allocate a position buffer that contains random location of the points in // XY cartesian plane. // Allocate a bitmap buffer which holds information of whether a point in the // position buffer is inside the unit circle or not. setupSimulationAllocations(); checkCudaErrors( cudaMalloc((float **)&m_numPointsInCircle, sizeof(*m_numPointsInCircle))); checkCudaErrors(cudaMallocHost((float **)&m_hostNumPointsInCircle, sizeof(*m_hostNumPointsInCircle))); } void MonteCarloPiSimulation::stepSimulation(float time, cudaStream_t stream) { checkCudaErrors(cudaMemsetAsync(m_numPointsInCircle, 0, sizeof(*m_numPointsInCircle), stream)); monte_carlo_kernel<<<m_blocks, m_threads, 0, stream>>>( m_xyVector, m_pointsInsideCircle, m_numPointsInCircle, m_numPoints, time); getLastCudaError("Failed to launch CUDA simulation"); checkCudaErrors(cudaMemcpyAsync(m_hostNumPointsInCircle, m_numPointsInCircle, sizeof(*m_numPointsInCircle), cudaMemcpyDeviceToHost, stream)); // Queue up a stream callback to compute and print the PI value. checkCudaErrors( cudaLaunchHostFunc(stream, this->computePiCallback, (void *)this)); } void MonteCarloPiSimulation::computePiCallback(void *args) { MonteCarloPiSimulation *cbData = (MonteCarloPiSimulation *)args; cbData->m_totalPointsInsideCircle += *(cbData->m_hostNumPointsInCircle); cbData->m_totalPointsSimulated += cbData->m_numPoints; double piValue = 4.0 * ((double)cbData->m_totalPointsInsideCircle / (double)cbData->m_totalPointsSimulated); printf("Approximate Pi value for %zd data points: %lf \n", cbData->m_totalPointsSimulated, piValue); } void MonteCarloPiSimulation::getIdealExecutionConfiguration() { int warpSize = 0; int multiProcessorCount = 0; checkCudaErrors(cudaSetDevice(m_cudaDevice)); checkCudaErrors( cudaDeviceGetAttribute(&warpSize, cudaDevAttrWarpSize, m_cudaDevice)); // We don't need large block sizes, since there's not much inter-thread // communication m_threads = warpSize; // Use the occupancy calculator and fill the gpu as best as we can checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &m_blocks, monte_carlo_kernel, warpSize, 0)); checkCudaErrors(cudaDeviceGetAttribute( &multiProcessorCount, cudaDevAttrMultiProcessorCount, m_cudaDevice)); m_blocks *= multiProcessorCount; // Go ahead and the clamp the blocks to the minimum needed for this // height/width m_blocks = std::min(m_blocks, (int)((m_numPoints + m_threads - 1) / m_threads)); } void MonteCarloPiSimulation::setupSimulationAllocations() { CUdeviceptr d_ptr = 0U; size_t granularity = 0; CUmemGenericAllocationHandle cudaPositionHandle, cudaInCircleHandle; CUmemAllocationProp allocProp = {}; allocProp.type = CU_MEM_ALLOCATION_TYPE_PINNED; allocProp.location.type = CU_MEM_LOCATION_TYPE_DEVICE; allocProp.location.id = m_cudaDevice; allocProp.win32HandleMetaData = NULL; allocProp.requestedHandleTypes = ipcHandleTypeFlag; // Windows-specific LPSECURITYATTRIBUTES is required when // CU_MEM_HANDLE_TYPE_WIN32 is used. The security attribute defines the scope // of which exported allocations may be tranferred to other processes. For all // other handle types, pass NULL. getDefaultSecurityDescriptor(&allocProp); // Get the recommended granularity for m_cudaDevice. checkCudaErrors(cuMemGetAllocationGranularity( &granularity, &allocProp, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); size_t xyPositionVecSize = m_numPoints * sizeof(*m_xyVector); size_t inCircleVecSize = m_numPoints * sizeof(*m_pointsInsideCircle); size_t xyPositionSize = ROUND_UP_TO_GRANULARITY(xyPositionVecSize, granularity); size_t inCircleSize = ROUND_UP_TO_GRANULARITY(inCircleVecSize, granularity); m_totalAllocationSize = (xyPositionSize + inCircleSize); // Reserve the required contiguous VA space for the allocations checkCudaErrors( cuMemAddressReserve(&d_ptr, m_totalAllocationSize, granularity, 0U, 0)); // Create the allocations as a pinned allocation on this device. // Create an allocation to store all the positions of points on the xy plane // and a second allocation which stores information if the corresponding // position is inside the unit circle or not. checkCudaErrors( cuMemCreate(&cudaPositionHandle, xyPositionSize, &allocProp, 0)); checkCudaErrors( cuMemCreate(&cudaInCircleHandle, inCircleSize, &allocProp, 0)); // Export the allocation to a platform-specific handle. The type of handle // requested here must match the requestedHandleTypes field in the prop // structure passed to cuMemCreate. The handle obtained here will be passed to // vulkan to import the allocation. checkCudaErrors(cuMemExportToShareableHandle( (void *)&m_posShareableHandle, cudaPositionHandle, ipcHandleTypeFlag, 0)); checkCudaErrors( cuMemExportToShareableHandle((void *)&m_inCircleShareableHandle, cudaInCircleHandle, ipcHandleTypeFlag, 0)); CUdeviceptr va_position = d_ptr; CUdeviceptr va_InCircle = va_position + xyPositionSize; m_pointsInsideCircle = (float *)va_InCircle; m_xyVector = (vec2 *)va_position; // Assign the chunk to the appropriate VA range checkCudaErrors( cuMemMap(va_position, xyPositionSize, 0, cudaPositionHandle, 0)); checkCudaErrors( cuMemMap(va_InCircle, inCircleSize, 0, cudaInCircleHandle, 0)); // Release the handles for the allocation. Since the allocation is currently // mapped to a VA range with a previous call to cuMemMap the actual freeing of // memory allocation will happen on an eventual call to cuMemUnmap. Thus the // allocation will be kept live until it is unmapped. checkCudaErrors(cuMemRelease(cudaPositionHandle)); checkCudaErrors(cuMemRelease(cudaInCircleHandle)); CUmemAccessDesc accessDescriptor = {}; accessDescriptor.location.id = m_cudaDevice; accessDescriptor.location.type = CU_MEM_LOCATION_TYPE_DEVICE; accessDescriptor.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; // Apply the access descriptor to the whole VA range. Essentially enables // Read-Write access to the range. checkCudaErrors( cuMemSetAccess(d_ptr, m_totalAllocationSize, &accessDescriptor, 1)); } void MonteCarloPiSimulation::cleanupSimulationAllocations() { if (m_xyVector && m_pointsInsideCircle) { // Unmap the mapped virtual memory region // Since the handles to the mapped backing stores have already been released // by cuMemRelease, and these are the only/last mappings referencing them, // The backing stores will be freed. checkCudaErrors(cuMemUnmap((CUdeviceptr)m_xyVector, m_totalAllocationSize)); checkIpcErrors(ipcCloseShareableHandle(m_posShareableHandle)); checkIpcErrors(ipcCloseShareableHandle(m_inCircleShareableHandle)); // Free the virtual address region. checkCudaErrors( cuMemAddressFree((CUdeviceptr)m_xyVector, m_totalAllocationSize)); m_xyVector = nullptr; m_pointsInsideCircle = nullptr; } }
the_stack
#include <float.h> #include "hitables/bvh.h" #include "hitables/sphere.h" #include "hitables/movingsphere.h" #include "hitables/hitablelist.h" #include "materials/material.h" #include "materials/texture.h" #include "util/randomgenerator.h" #include "util/common.h" #ifdef CUDA_ENABLED CUDA_GLOBAL void simpleScene(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.4f, 0.2f, 0.1f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.7f, 0.6f, 0.5f), 0.0f)); *world = new HitableList(list, 4); //*world = new bvhNode(list, 4, 0.0, 1.0); } } CUDA_GLOBAL void simpleScene2(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int count = 58; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.4f, 0.2f, 0.1f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.7f, 0.6f, 0.5f), 0.0f)); int i = 4; for (int a = -3; a < 3; a++) { for (int b = -4; b < 5; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.5f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.75f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, count); } } CUDA_GLOBAL void randomScene(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 901; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.4f, 0.2f, 0.1f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.7f, 0.6f, 0.5f), 0.0f)); int i = 4; for (int a = -15; a < 15; a++) { for (int b = -15; b < 15; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.5f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.75f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } CUDA_GLOBAL void randomScene2(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 102; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.3f, 0.0f, 0.0f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.4f, 0.5f, 0.6f), 0.0f)); int i = 4; for (int a = -5; a < 5; a++) { for (int b = -5; b < 5; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.5f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.75f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } CUDA_GLOBAL void randomScene3(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 68; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.3f, 0.0f, 0.0f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.4f, 0.5f, 0.6f), 0.0f)); int i = 4; for (int a = -4; a < 4; a++) { for (int b = -4; b < 4; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.3f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.65f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } CUDA_GLOBAL void randomScene4(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 197; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.3f, 0.0f, 0.0f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.4f, 0.5f, 0.6f), 0.0f)); int i = 4; for (int a = -7; a < 7; a++) { for (int b = -7; b < 7; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.33f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.88f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } CUDA_GLOBAL void randomSceneWithMovingSpheres(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 197; list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(new ConstantTexture(Vec3(0.5f, 0.5f, 0.5f)))); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.3f, 0.0f, 0.0f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.4f, 0.5f, 0.6f), 0.0f)); int i = 4; for (int a = -7; a < 7; a++) { for (int b = -7; b < 7; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.33f) // diffuse { list[i++] = new MovingSphere(center, center+Vec3(0.0f, 0.5f*rng.get1f(), 0.0f), 0.0f, 1.0f, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.88f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } CUDA_GLOBAL void randomSceneTexture(Hitable** list, Hitable** world) { if (threadIdx.x == 0 && blockIdx.x == 0) { RandomGenerator rng; int n = 102; Texture *checker = new CheckerTexture( new ConstantTexture(Vec3(0.9, 0.05, 0.08)), new ConstantTexture(Vec3(0.9, 0.9, 0.9)) ); list[0] = new Sphere(Vec3(0.0f, -1000.0f, 0.0f), 1000.0f, new Lambertian(checker)); list[1] = new Sphere(Vec3(0.0f, 1.0f, 0.0f), 1.0f, new Dielectric(1.5f)); list[2] = new Sphere(Vec3(-4.0f, 1.0f, 0.0f), 1.0f, new Lambertian(new ConstantTexture(Vec3(0.3f, 0.0f, 0.0f)))); list[3] = new Sphere(Vec3(4.0f, 1.0f, 0.0f), 1.0f, new Metal(Vec3(0.4f, 0.5f, 0.6f), 0.0f)); int i = 4; for (int a = -5; a < 5; a++) { for (int b = -5; b < 5; b++) { float chooseMat = rng.get1f(); Vec3 center(a+0.9f*rng.get1f(), 0.2f, b+0.9f*rng.get1f()); if ((center-Vec3(4.0f, 0.2f, 0.0f)).length() > 0.9f) { if (chooseMat < 0.5f) // diffuse { list[i++] = new Sphere(center, 0.2f, new Lambertian(new ConstantTexture(Vec3(rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f(), rng.get1f()*rng.get1f())))); } else if (chooseMat < 0.75f) // metal { list[i++] = new Sphere(center, 0.2f, new Metal(Vec3(0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f()), 0.5f*(1.0f+rng.get1f())))); } else // glass { list[i++] = new Sphere(center, 0.2f, new Dielectric(1.5f)); } } } } *world = new HitableList(list, n); } } #endif // CUDA_ENABLED
the_stack
extern "C" { #include <sph/sph_blake.h> } /* threads per block */ #define TPB 768 #define NPT 192 #define maxResults 16 /* max count of found nonces in one call */ #define NBN 2 /* hash by cpu with blake 256 */ extern "C" void decred_hash(void *output, const void *input){ sph_blake256_context ctx; sph_blake256_set_rounds(14); sph_blake256_init(&ctx); sph_blake256(&ctx, input, 180); sph_blake256_close(&ctx, output); } #include <cuda_helper.h> #ifdef __INTELLISENSE__ #define __byte_perm(x, y, b) x #endif __constant__ static uint32_t _ALIGN(32) c_v[16]; __constant__ static uint32_t _ALIGN(8) c_h[ 2]; __constant__ static uint32_t c_m[ 3]; __constant__ static uint32_t _ALIGN(32) c_x[90]; /*Buffers of candidate nonce(s) */ static uint32_t *d_resNonce[MAX_GPUS]; static uint32_t *h_resNonce[MAX_GPUS]; #define GSn(a,b,c,d,x,y) { \ v[a]+= x + v[b]; \ v[d] = ROL16(v[d] ^ v[a]); \ v[c]+= v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 12); \ v[a]+= y + v[b]; \ v[d] = ROR8(v[d] ^ v[a]); \ v[c]+= v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 7); \ } #define GSn4(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2,a3,b3,c3,d3,x3,y3) { \ v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2]; v[a3]+= x3 + v[b3]; \ v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]); v[d3] = ROL16(v[d3] ^ v[a3]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12); v[b3] = ROTR32(v[b3] ^ v[c3], 12); \ v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2]; v[a3]+= y3 + v[b3]; \ v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]); v[d3] = ROR8(v[d3] ^ v[a3]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7); v[b3] = ROTR32(v[b3] ^ v[c3], 7); \ } #define GSn3(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2) { \ v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2]; \ v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12); \ v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2]; \ v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7); \ } __global__ __launch_bounds__(TPB,1) void decred_gpu_hash_nonce(const uint32_t threads,const uint32_t startNonce, uint32_t *resNonce){ uint64_t m3 = startNonce + blockDim.x * blockIdx.x + threadIdx.x; const uint32_t step = gridDim.x * blockDim.x; const uint64_t maxNonce = startNonce + threads; const uint32_t z[16] = { 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89, 0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917 }; uint32_t v[16]; uint32_t m[16]; #pragma unroll 3 for(int i=0;i<3;i++){ m[i] = c_m[i]; } m[13] = 0x80000001; m[15] = 0x000005a0; const uint32_t m130 = z[12]^m[13]; const uint32_t m131 = m[13]^z[ 6]; const uint32_t m132 = z[15]^m[13]; const uint32_t m133 = z[ 3]^m[13]; const uint32_t m134 = z[ 4]^m[13]; const uint32_t m135 = z[14]^m[13]; const uint32_t m136 = m[13]^z[11]; const uint32_t m137 = m[13]^z[ 7]; const uint32_t m138 = m[13]^z[ 0]; volatile uint32_t m150 = z[14]^m[15]; volatile uint32_t m151 = z[ 9]^m[15]; volatile uint32_t m152 = m[15]^z[13]; volatile uint32_t m153 = m[15]^z[ 8]; const uint32_t m154 = z[10]^m[15]; const uint32_t m155 = z[ 1]^m[15]; const uint32_t m156 = m[15]^z[ 4]; const uint32_t m157 = z[ 6]^m[15]; const uint32_t m158 = m[15]^z[11]; const uint32_t h7 = c_h[ 0]; for( ; m3<maxNonce ; m3+=step){ m[ 3] = m3; #pragma unroll 16 for(int i=0;i<16;i++){ v[i] = c_v[i]; } uint32_t xors[16],i=0; //partial: 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } xors[ 5] = z[ 2]^m[ 3]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = z[15]; xors[12]=c_x[i++]; xors[13] = c_x[i++]; xors[14] = m130; xors[15] = m150; v[ 1]+= xors[ 5]; v[13] = ROR8(v[13] ^ v[1]); v[ 9]+= v[13]; v[ 5] = ROTR32(v[5] ^ v[9], 7); v[ 0]+= v[5]; v[15] = ROL16(v[15] ^ v[0]); v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 12); v[ 0]+= xors[12] + v[5]; v[15] = ROR8(v[15] ^ v[0]); v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 7); GSn3(1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m131; xors[ 8] = m[ 1]^z[12]; xors[ 9] = m[ 0]^z[ 2]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = m151; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 0]^m[ 2]; xors[14] = c_x[i++]; xors[15] = z[ 5]^m[ 3]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m152; xors[ 8] = c_x[i++]; xors[ 9] = m[ 3]^z[ 6]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[12]^m[ 0]; xors[ 6] = z[ 5]^m[ 2]; xors[ 7] = m132; xors[12] = z[10]; xors[13] = c_x[i++]; xors[14] = z[ 7]^m[ 1]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 3]^z[ 1]; xors[ 2] = m130; xors[ 3] = c_x[i++]; xors[ 8] = m[ 2]^z[ 6]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = m153; xors[ 4] = c_x[i++]; xors[ 5] = z[ 3]^m[ 1]; xors[ 6] = c_x[i++]; xors[ 7] = z[11]; xors[12] = c_x[i++]; xors[13] = c_x[i++]; xors[14] = z[ 4]^m[ 0]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 4{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = m[ 2]^z[ 4]; xors[ 3] = c_x[i++]; xors[ 8] = z[ 1]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = m[ 3]^z[13]; xors[ 4] = z[ 9]^m[ 0]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = m154; xors[12] = z[14]^m[ 1]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = m133; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 5{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } xors[ 0] = m[ 2]^z[12]; xors[ 1] = c_x[i++]; xors[ 2] = m[ 0]^z[11]; xors[ 3] = c_x[i++]; xors[ 8] = c_x[i++]; xors[ 9] = c_x[i++]; xors[10] = m150; xors[11] = m[ 1]^z[ 9]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = z[ 8]^m[ 3]; xors[12] = m134; xors[13] = c_x[i++]; xors[14] = z[15]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 6{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 1]^z[15]; xors[ 2] = z[13]; xors[ 3] = c_x[i++]; xors[ 8] = m[ 0]^z[ 7]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = m155; xors[ 6] = m135; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 6]^m[ 3]; xors[14] = z[ 9]^m[ 2]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 7{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } xors[ 0] = m136; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 3]^z[ 9]; xors[ 8] = c_x[i++]; xors[ 9] = m156; xors[10] = c_x[i++]; xors[11] = m[ 2]^z[10]; xors[ 4] = c_x[i++]; xors[ 5] = z[ 7]; xors[ 6] = z[12]^m[ 1]; xors[ 7] = c_x[i++]; xors[12] = z[ 5]^m[ 0]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 8{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } xors[ 0] = c_x[i++]; xors[ 1] = z[ 9]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 0]^z[ 8]; xors[ 8] = c_x[i++]; xors[ 9] = m137; xors[10] = m[ 1]^z[ 4]; xors[11] = c_x[i++]; xors[ 4] = m157; xors[ 5] = c_x[i++]; xors[ 6] = z[11]^m[ 3]; xors[ 7] = c_x[i++]; xors[12] = z[12]^m[ 2]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 9{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 1]^z[ 5]; xors[ 8] = m158; xors[ 9] = c_x[i++]; xors[10] = m[ 3]^z[12]; xors[11] = m138; xors[ 4] = z[10]^m[ 2]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 9]; xors[14] = c_x[i++]; xors[15] = z[13]^m[ 0]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } xors[ 0] = m[ 0]^z[ 1]; xors[ 1] = m[ 2]^z[ 3]; xors[ 2] = c_x[i++]; xors[ 3] = c_x[i++]; xors[ 8] = c_x[i++]; xors[ 9] = c_x[ 0]; xors[10] = c_x[ 1]; xors[11] = z[15]; xors[ 4] = z[ 0]^m[ 1]; xors[ 5] = z[ 2]^m[ 3]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++]; xors[12] = c_x[ 2]; xors[13] = c_x[ 3]; xors[14] = m130; xors[15] = m150; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); //i=90 i=4; // 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m131; xors[ 8] = m[ 1]^z[12]; xors[ 9] = m[ 0]^z[ 2]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = m151; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 0]^m[ 2]; xors[14] = c_x[i++]; xors[15] = z[ 5]^m[ 3]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m152; xors[ 8] = c_x[i++]; xors[ 9] = m[ 3]^z[ 6]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[12]^m[ 0]; xors[ 6] = z[ 5]^m[ 2]; xors[ 7] = m132; xors[12] = z[10]; xors[13] = c_x[i++]; xors[14] = z[ 7]^m[ 1]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); GSn4(0, 5,10,15,xors[ 8],xors[12], 1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 3]^z[ 1]; xors[ 2] = m130; xors[ 3] = c_x[i++]; xors[ 8] = m[ 2]^z[ 6]; i++; xors[10] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[ 3]^m[ 1]; xors[ 6] = c_x[i++]; xors[ 7] = z[11]; xors[12] = c_x[i++]; xors[14] = z[ 4]^m[ 0]; GSn4(0, 4, 8,12,xors[ 0],xors[ 4], 1, 5, 9,13,xors[ 1],xors[ 5], 2, 6,10,14,xors[ 2],xors[ 6], 3, 7,11,15,xors[ 3],xors[ 7]); v[ 0]+= xors[ 8] + v[ 5]; v[ 2]+= xors[10] + v[ 7]; v[15] = ROL16(v[15] ^ v[ 0]); v[13] = ROL16(v[13] ^ v[ 2]); v[10]+= v[15]; v[ 8]+= v[13]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 0]+= xors[12] + v[ 5]; v[ 2]+= xors[14] + v[ 7]; v[15] = ROTR32(v[15] ^ v[ 0],1); v[13] = ROR8(v[13] ^ v[ 2]); v[ 8]+= v[13]; if(xor3x(v[ 7],h7,v[ 8])==v[15]){ uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1; if(pos<maxResults) resNonce[pos]=m[ 3]; return; } } } __host__ void decred_cpu_setBlock_52(const int thr_id,const uint32_t *input, const uint32_t *pend) { const uint32_t z[16] = { 0x243F6A88UL, 0x85A308D3UL, 0x13198A2EUL, 0x03707344UL,0xA4093822UL, 0x299F31D0UL, 0x082EFA98UL, 0xEC4E6C89UL, 0x452821E6UL, 0x38D01377UL, 0xBE5466CFUL, 0x34E90C6CUL,0xC0AC29B7UL, 0xC97C50DDUL, 0x3F84D5B5UL, 0xB5470917UL }; sph_u32 _ALIGN(64) v[16]; sph_u32 _ALIGN(64) m[16]; sph_u32 _ALIGN(64) h[ 2]; sph_blake256_context ctx; sph_blake256_set_rounds(14); sph_blake256_init(&ctx); sph_blake256(&ctx, input, 128); v[ 0] = ctx.H[0]; v[ 1] = ctx.H[1]; v[ 2] = ctx.H[2]; v[ 3] = ctx.H[3]; v[ 4] = ctx.H[4]; v[ 5] = ctx.H[5]; v[ 8] = ctx.H[6]; v[12] = swab32(input[35]); v[13] = ctx.H[7]; // pre swab32 m[ 0] = swab32(input[32]); m[ 1] = swab32(input[33]); m[ 2] = swab32(input[34]); m[ 3] = 0; m[ 4] = swab32(input[36]); m[ 5] = swab32(input[37]); m[ 6] = swab32(input[38]); m[ 7] = swab32(input[39]); m[ 8] = swab32(input[40]); m[ 9] = swab32(input[41]); m[10] = swab32(input[42]); m[11] = swab32(input[43]); m[12] = swab32(input[44]); m[13] = 0x80000001; m[14] = 0; m[15] = 0x000005a0; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_m,m, 3*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); h[ 0] = v[ 8]; h[ 1] = v[13]; v[ 0]+= (m[ 0] ^ z[1]) + v[ 4]; v[12] = SPH_ROTR32(z[4] ^ SPH_C32(0x5A0) ^ v[ 0], 16); v[ 8] = z[0]+v[12]; v[ 4] = SPH_ROTR32(v[ 4] ^ v[ 8], 12); v[ 0]+= (m[ 1] ^ z[0]) + v[ 4]; v[12] = SPH_ROTR32(v[12] ^ v[ 0],8); v[ 8]+= v[12]; v[ 4] = SPH_ROTR32(v[ 4] ^ v[ 8], 7); v[ 1]+= (m[ 2] ^ z[3]) + v[ 5]; v[13] = SPH_ROTR32((z[5] ^ SPH_C32(0x5A0)) ^ v[ 1], 16); v[ 9] = z[1]+v[13]; v[ 5] = SPH_ROTR32(v[ 5] ^ v[ 9], 12); v[ 1]+= v[ 5]; //+nonce ^ ... v[ 2]+= (m[ 4] ^ z[5]) + h[ 0]; v[14] = SPH_ROTR32(z[6] ^ v[ 2],16); v[10] = z[2] + v[14]; v[ 6] = SPH_ROTR32(h[ 0] ^ v[10], 12); v[ 2]+= (m[ 5] ^ z[4]) + v[ 6]; v[14] = SPH_ROTR32(v[14] ^ v[ 2], 8); v[10]+= v[14]; v[ 6] = SPH_ROTR32(v[ 6] ^ v[10], 7); v[ 3]+= (m[ 6] ^ z[7]) + h[ 1]; v[15] = SPH_ROTR32(z[7] ^ v[ 3],16); v[11] = z[3] + v[15]; v[ 7] = SPH_ROTR32(h[ 1] ^ v[11], 12); v[ 3]+= (m[ 7] ^ z[6]) + v[ 7]; v[15] = SPH_ROTR32(v[15] ^ v[ 3],8); v[11]+= v[15]; v[ 7] = SPH_ROTR32(v[11] ^ v[ 7], 7); v[ 0]+= m[ 8] ^ z[9]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_v, v,16*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); h[ 0] = SPH_ROTL32(h[ 1], 7); //align the rotation with v[7] v[15]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_h,h, 1*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); uint32_t x[90]; int i=0; x[i++] = m[10]^z[11]; x[i++] = m[12]^z[13]; x[i++] = m[ 9]^z[ 8]; x[i++] = z[10]^m[11]; x[i++] = m[ 4]^z[ 8]; x[i++] = m[ 9]^z[15]; x[i++] = m[11]^z[ 7]; x[i++] = m[ 5]^z[ 3]; x[i++] = z[14]^m[10]; x[i++] = z[ 4]^m[ 8]; x[i++] = z[13]^m[ 6]; x[i++] = z[ 1]^m[12]; x[i++] = z[11]^m[ 7]; x[i++] = m[11]^z[ 8]; x[i++] = m[12]^z[ 0]; x[i++] = m[ 5]^z[ 2]; x[i++] = m[10]^z[14]; x[i++] = m[ 7]^z[ 1]; x[i++] = m[ 9]^z[ 4]; x[i++] = z[11]^m[ 8]; x[i++] = z[ 3]^m[ 6]; x[i++] = z[ 9]^m[ 4]; x[i++] = m[ 7]^z[ 9]; x[i++] = m[11]^z[14]; x[i++] = m[ 5]^z[10]; x[i++] = m[ 4]^z[ 0]; x[i++] = z[ 7]^m[ 9]; x[i++] = z[13]^m[12]; x[i++] = z[ 2]^m[ 6]; x[i++] = z[ 5]^m[10]; x[i++] = z[15]^m[ 8]; x[i++] = m[ 9]^z[ 0]; x[i++] = m[ 5]^z[ 7]; x[i++] = m[10]^z[15]; x[i++] = m[11]^z[12]; x[i++] = m[ 6]^z[ 8]; x[i++] = z[ 5]^m[ 7]; x[i++] = z[ 2]^m[ 4]; x[i++] = z[11]^m[12]; x[i++] = z[ 6]^m[ 8]; x[i++] = m[ 6]^z[10]; x[i++] = m[ 8]^z[ 3]; x[i++] = m[ 4]^z[13]; x[i++] = m[ 7]^z[ 5]; x[i++] = z[ 2]^m[12]; x[i++] = z[ 6]^m[10]; x[i++] = z[ 0]^m[11]; x[i++] = z[ 7]^m[ 5]; x[i++] = z[ 1]^m[ 9]; x[i++] = m[12]^z[ 5]; x[i++] = m[ 4]^z[10]; x[i++] = m[ 6]^z[ 3]; x[i++] = m[ 9]^z[ 2]; x[i++] = m[ 8]^z[11]; x[i++] = z[12]^m[ 5]; x[i++] = z[ 4]^m[10]; x[i++] = z[ 0]^m[ 7]; x[i++] = z[ 8]^m[11]; x[i++] = m[ 7]^z[14]; x[i++] = m[12]^z[ 1]; x[i++] = m[ 5]^z[ 0]; x[i++] = m[ 8]^z[ 6]; x[i++] = z[13]^m[11]; x[i++] = z[ 3]^m[ 9]; x[i++] = z[15]^m[ 4]; x[i++] = z[ 8]^m[ 6]; x[i++] = z[ 2]^m[10]; x[i++] = m[ 6]^z[15]; x[i++] = m[11]^z[ 3]; x[i++] = m[12]^z[ 2]; x[i++] = m[10]^z[ 5]; x[i++] = z[14]^m[ 9]; x[i++] = z[ 0]^m[ 8]; x[i++] = z[13]^m[ 7]; x[i++] = z[ 1]^m[ 4]; x[i++] = z[10]^m[ 5]; x[i++] = m[10]^z[ 2]; x[i++] = m[ 8]^z[ 4]; x[i++] = m[ 7]^z[ 6]; x[i++] = m[ 9]^z[14]; x[i++] = z[ 8]^m[ 4]; x[i++] = z[ 7]^m[ 6]; x[i++] = z[ 1]^m[ 5]; x[i++] = z[15]^m[11]; x[i++] = z[ 3]^m[12]; x[i++] = m[ 4]^z[ 5]; x[i++] = m[ 6]^z[ 7]; x[i++] = m[ 8]^z[ 9]; x[i++] = z[ 4]^m[ 5]; x[i++] = z[ 6]^m[ 7]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_x, x, i*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); } /* ############################################################################################################################### */ static bool init[MAX_GPUS] = { 0 }; // nonce position is different in decred #define DCR_NONCE_OFT32 35 extern "C" int scanhash_decred(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done){ uint32_t _ALIGN(64) endiandata[48]; uint32_t *pdata = work->data; uint32_t *ptarget = work->target; uint32_t *pnonce = &pdata[DCR_NONCE_OFT32]; const uint32_t first_nonce = *pnonce; const int dev_id = device_map[thr_id]; int intensity = (device_sm[thr_id] > 500) ? 31 : 30; uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB)); const dim3 block(TPB); int rc = 0; if (opt_benchmark) { ptarget[6] = swab32(0xff); } if (!init[thr_id]){ cudaSetDevice(dev_id); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } gpulog(LOG_INFO,thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t))); h_resNonce[thr_id] = (uint32_t*) malloc(maxResults * sizeof(uint32_t)); if(h_resNonce[thr_id] == NULL){ gpulog(LOG_ERR,thr_id,"Host memory allocation failed"); exit(EXIT_FAILURE); } CUDA_LOG_ERROR(); init[thr_id] = true; } memcpy(endiandata, pdata, 180); decred_cpu_setBlock_52(thr_id,endiandata,&pdata[32]); cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t)); do { // GPU HASH decred_gpu_hash_nonce <<<grid, block>>> (throughput,(*pnonce), d_resNonce[thr_id]); cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost); if (h_resNonce[thr_id][0] != 0){ cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t)); if(h_resNonce[thr_id][0]>(maxResults-1)){ gpulog(LOG_WARNING,dev_id,"Candidate flood: %u",h_resNonce[thr_id][0]); h_resNonce[thr_id][0]=maxResults-1; } uint32_t i; for(i=1;i<h_resNonce[thr_id][0]+1;i++){ uint32_t vhash64[8]; be32enc(&endiandata[DCR_NONCE_OFT32], h_resNonce[thr_id][i]); decred_hash(vhash64, endiandata); if (vhash64[6] <= ptarget[6] && fulltest(vhash64, ptarget)){ work_set_target_ratio(work, vhash64); *hashes_done = (*pnonce) - first_nonce + throughput; work->nonces[0]= swab32(h_resNonce[thr_id][i]); rc = 1; //search for 2nd nonce for(uint32_t j=i+1;j<h_resNonce[thr_id][0]+1;j++){ be32enc(&endiandata[DCR_NONCE_OFT32], h_resNonce[thr_id][j]); decred_hash(vhash64, endiandata); if (vhash64[6] <= ptarget[6] && fulltest(vhash64, ptarget)){ work->nonces[1] = swab32(h_resNonce[thr_id][j]); // if(!opt_quiet) // gpulog(LOG_BLUE,dev_id,"Found 2nd nonce: %u/%08X - %u/%08X",i,pdata[19],j,pdata[21]); if(bn_hash_target_ratio(vhash64, ptarget) > work->shareratio[0]){ work_set_target_ratio(work, vhash64); uint32_t tmp = work->nonces[1]; work->nonces[1] = work->nonces[0]; work->nonces[0] = tmp; } rc = 2; break; } } *pnonce = work->nonces[ 0]; return rc; } } } *pnonce += throughput; } while (!work_restart[thr_id].restart && (uint64_t)max_nonce > (uint64_t)throughput + (uint64_t)(*pnonce)); *hashes_done = (*pnonce) - first_nonce; return rc; } // cleanup extern "C" void free_decred(int thr_id) { if (!init[thr_id]) return; cudaDeviceSynchronize(); free(h_resNonce[thr_id]); cudaFree(d_resNonce[thr_id]); init[thr_id] = false; cudaDeviceSynchronize(); }
the_stack
void kernel_driver_aa(std::string filename, std::vector<std::string> &reads, std::vector<std::string> &contigs, short h_scoring_matrix[], short openGap, short extendGap) { unsigned maxContigSize = getMaxLength(contigs); unsigned maxReadSize = getMaxLength(reads); unsigned totalAlignments = contigs.size(); //std::cout <<"max contig:"<<maxContigSize<<std::endl; //std::cout <<"max read:"<<maxReadSize<<std::endl; //std::cout <<"total aligns:"<<totalAlignments<<std::endl; short* h_ref_begin = (short*) malloc (sizeof(short) * totalAlignments); short* h_ref_end = (short*) malloc (sizeof(short) * totalAlignments); short* h_query_begin = (short*) malloc (sizeof(short) * totalAlignments); short* h_query_end = (short*) malloc (sizeof(short) * totalAlignments); short* h_top_scores = (short*) malloc (sizeof(short) * totalAlignments); unsigned* h_offsetA = (unsigned*) malloc (sizeof(unsigned) * totalAlignments); unsigned* h_offsetB = (unsigned*) malloc (sizeof(unsigned) * totalAlignments); char* h_strA = (char*) malloc (maxContigSize * totalAlignments); char* h_strB = (char*) malloc (maxContigSize * totalAlignments); short h_encoding_matrix[] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0, 23,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,20,4,3,6, 13,7,8,9,0,11,10,12,2,0,14,5, 1,15,16,0,19,17,22,18,21}; float total_packing; auto start = NOW; float total_time_cpu = 0; // total number of iterations int its = (totalAlignments>20000)?(ceil((float)totalAlignments/20000)):1; unsigned NBLOCKS = totalAlignments; unsigned leftOvers = NBLOCKS % its; unsigned stringsPerIt = NBLOCKS / its; unsigned maxAlignments = stringsPerIt + leftOvers; short* d_ref_start; hipMalloc((void**)&d_ref_start, maxAlignments * sizeof(short)); short* d_ref_end; hipMalloc((void**)&d_ref_end, maxAlignments * sizeof(short)); short* d_query_start; hipMalloc((void**)&d_query_start, maxAlignments * sizeof(short)); short* d_query_end; hipMalloc((void**)&d_query_end, maxAlignments * sizeof(short)); short* d_scores; hipMalloc((void**)&d_scores, maxAlignments * sizeof(short)); unsigned* d_offset_ref; hipMalloc((void**)&d_offset_ref, maxAlignments * sizeof(unsigned)); unsigned* d_offset_query; hipMalloc((void**)&d_offset_query, maxAlignments * sizeof(unsigned)); char* d_strA; hipMalloc((void**)&d_strA, maxContigSize * maxAlignments * sizeof(char)); char* d_strB; hipMalloc((void**)&d_strB, maxReadSize * maxAlignments * sizeof(char)); short* d_encoding_matrix; hipMalloc((void**)&d_encoding_matrix, ENCOD_MAT_SIZE * sizeof(short)); hipMemcpy(d_encoding_matrix, h_encoding_matrix, ENCOD_MAT_SIZE * sizeof(short), hipMemcpyHostToDevice); short* d_scoring_matrix; hipMalloc((void**)&d_scoring_matrix, SCORE_MAT_SIZE * sizeof(short)); hipMemcpy(d_scoring_matrix, h_scoring_matrix, SCORE_MAT_SIZE * sizeof(short), hipMemcpyHostToDevice); total_packing = 0; short* ref_begin = h_ref_begin; short* ref_end = h_ref_end; short* query_begin = h_query_begin; short* query_end = h_query_end; short* top_scores = h_top_scores; std::cout<<"Number of loop iterations: " << its << std::endl; for (int perGPUIts = 0; perGPUIts < its; perGPUIts++) { auto packing_start = NOW; int blocksLaunched; std::vector<std::string>::const_iterator beginAVec; std::vector<std::string>::const_iterator endAVec; std::vector<std::string>::const_iterator beginBVec; std::vector<std::string>::const_iterator endBVec; beginAVec = contigs.begin() + perGPUIts * stringsPerIt; beginBVec = reads.begin() + (perGPUIts * stringsPerIt); if(perGPUIts == its - 1) { endAVec = contigs.begin() + (perGPUIts + 1) * stringsPerIt + leftOvers; endBVec = reads.begin() + ((perGPUIts + 1) * stringsPerIt) + leftOvers; blocksLaunched = stringsPerIt + leftOvers; } else { endAVec = contigs.begin() + (perGPUIts + 1) * stringsPerIt; endBVec = reads.begin() + (perGPUIts + 1) * stringsPerIt; blocksLaunched = stringsPerIt; } std::vector<std::string> sequencesA(beginAVec, endAVec); std::vector<std::string> sequencesB(beginBVec, endBVec); unsigned running_sum = 0; int sequences_per_stream = blocksLaunched; auto start_cpu = NOW; for (unsigned int i = 0; i < sequencesA.size(); i++) { running_sum += sequencesA[i].size(); h_offsetA[i] = running_sum;//sequencesA[i].size(); // std::cout << "offsetA_h: " << h_offsetA[i] << std::endl; } unsigned totalLengthA = h_offsetA[sequencesA.size() - 1]; // std::cout << "totalLengthA: " << totalLengthA << std::endl; running_sum = 0; for (unsigned int i = 0; i < sequencesB.size(); i++) { running_sum += sequencesB[i].size(); h_offsetB[i] = running_sum; //sequencesB[i].size(); // std::cout << "offsetB_h: " << h_offsetB[i] << std::endl; } unsigned totalLengthB = h_offsetB[sequencesB.size() - 1]; // std::cout << "totalLengthB: " << totalLengthB << std::endl; auto end_cpu = NOW; std::chrono::duration<double> cpu_dur = end_cpu - start_cpu; total_time_cpu += cpu_dur.count(); unsigned offsetSumA = 0; unsigned offsetSumB = 0; for (unsigned int i = 0; i < sequencesA.size(); i++) { char* seqptrA = h_strA + offsetSumA; memcpy(seqptrA, sequencesA[i].c_str(), sequencesA[i].size()); char* seqptrB = h_strB + offsetSumB; memcpy(seqptrB, sequencesB[i].c_str(), sequencesB[i].size()); offsetSumA += sequencesA[i].size(); offsetSumB += sequencesB[i].size(); } auto packing_end = NOW; std::chrono::duration<double> packing_dur = packing_end - packing_start; total_packing += packing_dur.count(); hipMemcpyAsync(d_offset_ref, h_offsetA, sizeof(unsigned) * sequences_per_stream, hipMemcpyHostToDevice, 0); hipMemcpyAsync(d_offset_query, h_offsetB, sizeof(unsigned) * sequences_per_stream, hipMemcpyHostToDevice, 0); hipMemcpyAsync(d_strA, h_strA, sizeof(char) * totalLengthA, hipMemcpyHostToDevice, 0); hipMemcpyAsync(d_strB, h_strB, sizeof(char) * totalLengthB, hipMemcpyHostToDevice, 0); unsigned minSize = (maxReadSize < maxContigSize) ? maxReadSize : maxContigSize; unsigned totShmem = 3 * (minSize + 1) * sizeof(short); unsigned alignmentPad = 4 + (4 - totShmem % 4); size_t ShmemBytes = totShmem + alignmentPad; printf("Shared memory bytes: %lu\n", ShmemBytes); printf("sequences per stream (CUDA grid size): %d\n", sequences_per_stream); printf("minSize (CUDA block size): %d\n", minSize); dim3 gws_aa(sequences_per_stream); dim3 lws_aa(minSize); hipLaunchKernelGGL(sequence_aa_kernel, dim3(gws_aa), dim3(lws_aa), ShmemBytes, 0, false, d_strA, d_strB, d_offset_ref, d_offset_query, d_ref_start, d_ref_end, d_query_start, d_query_end, d_scores, openGap, extendGap, d_scoring_matrix, d_encoding_matrix ); // copyin back end index so that we can find new min hipMemcpyAsync(ref_end, d_ref_end, sizeof(short) * sequences_per_stream, hipMemcpyDeviceToHost, 0); hipMemcpyAsync(query_end, d_query_end, sizeof(short) * sequences_per_stream, hipMemcpyDeviceToHost, 0); hipDeviceSynchronize(); auto sec_cpu_start = NOW; // find the new largest of smaller lengths int newMin = get_new_min_length(ref_end, query_end, blocksLaunched); auto sec_cpu_end = NOW; std::chrono::duration<double> dur_sec_cpu = sec_cpu_end - sec_cpu_start; total_time_cpu += dur_sec_cpu.count(); dim3 gws_aa_r(sequences_per_stream); dim3 lws_aa_r(newMin); hipLaunchKernelGGL(sequence_aa_kernel, dim3(gws_aa_r), dim3(lws_aa_r), ShmemBytes, 0, true, d_strA, d_strB, d_offset_ref, d_offset_query, d_ref_start, d_ref_end, d_query_start, d_query_end, d_scores, openGap, extendGap, d_scoring_matrix, d_encoding_matrix ); hipMemcpyAsync(ref_begin, d_ref_start, sizeof(short) * sequences_per_stream, hipMemcpyDeviceToHost, 0); hipMemcpyAsync(query_begin, d_query_start, sizeof(short) * sequences_per_stream, hipMemcpyDeviceToHost, 0); hipMemcpyAsync(top_scores, d_scores, sizeof(short) * sequences_per_stream, hipMemcpyDeviceToHost, 0); ref_begin += stringsPerIt; query_begin += stringsPerIt; ref_end += stringsPerIt; query_end += stringsPerIt; top_scores += stringsPerIt; } // iterations end here hipDeviceSynchronize(); hipFree(d_ref_start); hipFree(d_ref_end); hipFree(d_query_start); hipFree(d_query_end); hipFree(d_scores); hipFree(d_offset_ref); hipFree(d_offset_query); hipFree(d_strA); hipFree(d_strB); hipFree(d_encoding_matrix); hipFree(d_scoring_matrix); auto end = NOW; std::cout <<"cpu time:"<<total_time_cpu<<std::endl; std::cout <<"packing time:"<<total_packing<<std::endl; std::chrono::duration<double> diff = end - start; std::cout << "Total Alignments:" << totalAlignments << "\n" << "Max Reference Size:" << maxContigSize << "\n" << "Max Query Size:"<< maxReadSize << "\n" << "Total Execution Time (seconds):"<< diff.count() << "\n"; std::ofstream results_file(filename); for (unsigned int k = 0; k < reads.size(); k++) { results_file << h_top_scores[k] <<"\t" << h_ref_begin[k] <<"\t" << h_ref_end[k] - 1 <<"\t" << h_query_begin[k] <<"\t" << h_query_end[k] - 1 << std::endl; } results_file.flush(); results_file.close(); long long int total_cells = 0; for (unsigned int l = 0; l < reads.size(); l++) { total_cells += reads.at(l).size()*contigs.at(l).size(); } std::cout << "Total Cells:"<<total_cells<<std::endl; free(h_top_scores); free(h_ref_begin); free(h_ref_end); free(h_query_begin); free(h_query_end); free(h_offsetA); free(h_offsetB); free(h_strA); free(h_strB); }// end of amino acids kernel
the_stack
#include <stdio.h> #include <stdlib.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/basic/dna.h> #include <nvbio/strings/string_set.h> #include <nvbio/strings/infix.h> #include <nvbio/strings/seeds.h> #include <nvbio/qgram/qgram.h> #include <nvbio/qgram/filter.h> #include <nvbio/io/sequence/sequence.h> #include "alignment.h" #include "util.h" using namespace nvbio; // query stats // struct Stats { Stats() : time(0), build_time(0), extract_time(0), rank_time(0), locate_time(0), align_time(0), reads(0), aligned(0), queries(0), matches(0), occurrences(0), merged(0) {} float time; float build_time; float extract_time; float rank_time; float locate_time; float align_time; uint64 reads; uint64 aligned; uint64 queries; uint64 matches; uint64 occurrences; uint64 merged; }; // build a set of q-grams from a given string, together with their sorted counterpart // template <typename genome_string, typename qgram_vector_type, typename index_vector_type> void build_qgrams( const uint32 Q, const uint32 genome_len, const uint32 genome_offset, const genome_string genome, const uint32 n_queries, qgram_vector_type& qgrams, qgram_vector_type& sorted_qgrams, index_vector_type& sorted_indices) { // build the q-grams qgrams.resize( n_queries ); generate_qgrams( Q, 2u, genome_len, genome, n_queries, thrust::make_counting_iterator<uint32>(genome_offset), qgrams.begin() ); // sort the q-grams sorted_qgrams = qgrams; sorted_indices.resize( n_queries ); thrust::copy( thrust::make_counting_iterator<uint32>(genome_offset), thrust::make_counting_iterator<uint32>(genome_offset) + n_queries, sorted_indices.begin() ); thrust::sort_by_key( sorted_qgrams.begin(), sorted_qgrams.end(), sorted_indices.begin() ); } // build a q-gram set-index from a string-set // template <typename string_set_type> void qgram_set_index_build( const uint32 Q, const uint32 seed_interval, const string_set_type string_set, QGramSetIndexDevice& qgram_index) { log_verbose(stderr, " building q-gram set-index... started\n"); Timer timer; timer.start(); // build the q-gram set index qgram_index.build( Q, // q-gram size 2u, // implicitly convert N to A string_set, uniform_seeds_functor<>( Q, seed_interval ), 12u ); cudaDeviceSynchronize(); timer.stop(); const float time = timer.seconds(); log_verbose(stderr, " building q-gram set-index... done\n"); log_verbose(stderr, " indexed q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_qgrams )); log_verbose(stderr, " unique q-grams : %6.2f M q-grams\n", 1.0e-6f * float( qgram_index.n_unique_qgrams )); log_verbose(stderr, " throughput : %5.1f M q-grams/s\n", 1.0e-6f * float( qgram_index.n_qgrams ) / time); log_verbose(stderr, " memory usage : %5.1f MB\n", float( qgram_index.used_device_memory() ) / float(1024*1024) ); } // perform q-gram index mapping // template <typename qgram_index_type, typename qgram_filter_type, typename genome_string> void map( qgram_index_type& qgram_index, qgram_filter_type& qgram_filter, const uint32 merge_intv, const io::SequenceDataDevice& reads, const uint32 n_queries, const uint32 genome_len, const uint32 genome_offset, const genome_string genome, nvbio::vector<device_tag,int16>& best_scores, Stats& stats) { typedef typename qgram_index_type::system_tag system_tag; // prepare some vectors to store the query qgrams nvbio::vector<system_tag,uint64> qgrams( n_queries ); nvbio::vector<system_tag,uint64> sorted_qgrams( n_queries ); nvbio::vector<system_tag,uint32> sorted_indices( n_queries ); const uint32 Q = qgram_index.Q; Timer timer; timer.start(); build_qgrams( Q, genome_len, genome_offset, genome, n_queries, qgrams, sorted_qgrams, sorted_indices ); cudaDeviceSynchronize(); timer.stop(); const float extract_time = timer.seconds(); stats.queries += n_queries; stats.extract_time += extract_time; // // search the sorted query q-grams with a q-gram filter // const uint32 batch_size = 32*1024*1024; typedef typename qgram_filter_type::hit_type hit_type; typedef typename qgram_filter_type::diagonal_type diagonal_type; // prepare storage for the output hits nvbio::vector<system_tag,hit_type> hits( batch_size ); nvbio::vector<system_tag,diagonal_type> merged_hits( batch_size ); nvbio::vector<system_tag,uint16> merged_counts( batch_size ); nvbio::vector<system_tag,int16> scores( batch_size ); nvbio::vector<system_tag,uint32> out_reads( batch_size ); nvbio::vector<system_tag,int16> out_scores( batch_size ); nvbio::vector<system_tag,uint8> temp_storage; timer.start(); // first step: rank the query q-grams const uint64 n_hits = qgram_filter.rank( qgram_index, n_queries, nvbio::raw_pointer( sorted_qgrams ), nvbio::raw_pointer( sorted_indices ) ); cudaDeviceSynchronize(); timer.stop(); stats.rank_time += timer.seconds(); stats.occurrences += n_hits; nvbio::vector<device_tag, aln::BestSink<int16> > sinks( batch_size ); nvbio::vector<device_tag,string_infix_coord_type> genome_infix_coords( batch_size ); nvbio::vector<device_tag,string_infix_coord_type> read_infix_coords( batch_size ); const static uint32 BAND_LEN = 31; // loop through large batches of hits and locate & merge them for (uint64 hits_begin = 0; hits_begin < n_hits; hits_begin += batch_size) { typedef io::SequenceDataAccess<DNA_N> read_access_type; typedef read_access_type::sequence_string_set_type read_string_set_type; typedef read_access_type::sequence_stream_type read_stream; // build an access pointer to the sequence data const read_access_type reads_access( reads ); const uint64 hits_end = nvbio::min( hits_begin + batch_size, n_hits ); timer.start(); qgram_filter.locate( hits_begin, hits_end, hits.begin() ); const uint32 n_merged = qgram_filter.merge( merge_intv, hits_end - hits_begin, hits.begin(), merged_hits.begin(), merged_counts.begin() ); cudaDeviceSynchronize(); timer.stop(); stats.locate_time += timer.seconds(); stats.merged += n_merged; timer.start(); // build the set of read infixes thrust::transform( merged_hits.begin(), merged_hits.begin() + hits_end - hits_begin, read_infix_coords.begin(), read_infixes( nvbio::plain_view( reads ) ) ); // build the set of genome infixes thrust::transform( merged_hits.begin(), merged_hits.begin() + hits_end - hits_begin, genome_infix_coords.begin(), genome_infixes<BAND_LEN>( genome_len, nvbio::plain_view( reads ) ) ); typedef nvbio::vector<device_tag,string_infix_coord_type>::const_iterator infix_iterator; // build a view of the reads const SparseStringSet<read_stream,infix_iterator> read_infix_set( hits_end - hits_begin, reads_access.sequence_stream(), read_infix_coords.begin() ); const SparseStringSet<genome_string,infix_iterator> genome_infix_set( hits_end - hits_begin, genome, genome_infix_coords.begin() ); typedef aln::MyersTag<5u> myers_dna5_tag; aln::batch_banded_alignment_score<BAND_LEN>( aln::make_edit_distance_aligner<aln::SEMI_GLOBAL, myers_dna5_tag>(), read_infix_set, genome_infix_set, sinks.begin(), aln::DeviceThreadScheduler(), reads.max_sequence_len(), reads.max_sequence_len() + BAND_LEN ); cudaDeviceSynchronize(); timer.stop(); stats.align_time += timer.seconds(); // compute the best score for each read in this batch; // note that we divide the string-id by 2 to merge results coming from the forward // and reverse-complemented strands cuda::reduce_by_key( n_merged, thrust::make_transform_iterator( merged_hits.begin(), make_composition_functor( divide_by_two(), component_functor<diagonal_type>( 1u ) ) ), // take the second component divided by 2 thrust::make_transform_iterator( sinks.begin(), sink_score() ), out_reads.begin(), out_scores.begin(), thrust::maximum<int16>(), temp_storage ); // and keep track of the global best update_scores( n_merged, nvbio::plain_view( out_reads ), nvbio::plain_view( out_scores ), nvbio::plain_view( best_scores ) ); } } // main test entry point // int main(int argc, char* argv[]) { // // perform some basic option parsing // const uint32 batch_reads = 1*1024*1024; const uint32 batch_bps = 100*1024*1024; const uint32 queries_batch = 16*1024*1024; const char* reads = argv[argc-1]; const char* index = argv[argc-2]; uint32 Q = 20; uint32 Q_intv = 10; uint32 merge_intv = 16; uint32 max_reads = uint32(-1); int16 score_threshold = -20; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-q" ) == 0) { Q = uint32( atoi( argv[++i] ) ); Q_intv = uint32( atoi( argv[++i] ) ); } if (strcmp( argv[i], "-m" ) == 0) merge_intv = uint32( atoi( argv[++i] ) ); else if (strcmp( argv[i], "-max-reads" ) == 0) max_reads = uint32( atoi( argv[++i] ) ); else if (strcmp( argv[i], "-t" ) == 0) score_threshold = int16( atoi( argv[++i] ) ); } log_info(stderr, "qmap... started\n"); // load a genome archive... log_visible(stderr, " loading reference index ... started\n"); log_info(stderr, " file: \"%s\"\n", index); io::SequenceDataHost h_genome_data; if (io::load_sequence_file( DNA, &h_genome_data, index ) == false) { log_error(stderr, " failed loading index \"%s\"\n", index); return 1u; } log_visible(stderr, " loading reference index ... done\n"); log_verbose(stderr, " sequences : %u\n", h_genome_data.size() ); log_verbose(stderr, " bps : %u\n", h_genome_data.bps() ); log_verbose(stderr, " avg bps : %u (min: %u, max: %u)\n", h_genome_data.avg_sequence_len(), h_genome_data.min_sequence_len(), h_genome_data.max_sequence_len() ); // build its device version const io::SequenceDataDevice d_genome_data( h_genome_data ); const io::SequenceDataAccess<DNA> d_genome_access( d_genome_data ); typedef io::SequenceDataAccess<DNA>::sequence_stream_type genome_type; // fetch the genome string const uint32 genome_len = d_genome_data.bps(); const genome_type d_genome( d_genome_access.sequence_stream() ); // open a read file log_info(stderr, " opening reads file... started\n"); SharedPointer<io::SequenceDataStream> read_data_file( io::open_sequence_file( reads, io::Phred33, 2*max_reads, uint32(-1), io::SequenceEncoding( io::FORWARD | io::REVERSE_COMPLEMENT ) ) ); // check whether the file opened correctly if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads); return 1u; } log_info(stderr, " opening reads file... done\n"); // keep stats Stats stats; io::SequenceDataHost h_read_data; while (1) { // load a batch of reads if (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_reads, batch_bps ) == 0) break; log_info(stderr, " loading reads... started\n"); // copy it to the device const io::SequenceDataDevice d_read_data( h_read_data ); const io::SequenceDataAccess<DNA_N> d_read_access( d_read_data ); const uint32 n_reads = d_read_data.size() / 2; log_info(stderr, " loading reads... done\n"); log_info(stderr, " %u reads\n", n_reads); // prepare some typedefs for the involved string-sets and infixes typedef io::SequenceDataAccess<DNA_N> read_access_type; // the read view type typedef read_access_type::sequence_string_set_type string_set_type; // the read string-set typedef string_set_infix_coord_type infix_coord_type; // the infix coordinate type, for string-sets typedef nvbio::vector<device_tag,infix_coord_type> infix_vector_type; // the device vector type for infix coordinates typedef InfixSet<string_set_type, const string_set_infix_coord_type*> seed_set_type; // the infix-set type for representing seeds // fetch the actual read string-set const string_set_type d_read_string_set = d_read_access.sequence_string_set(); // build the q-gram index QGramSetIndexDevice qgram_index; qgram_set_index_build( Q, Q_intv, d_read_string_set, qgram_index ); typedef QGramFilterDevice<QGramSetIndexDevice,const uint64*,const uint32*> qgram_filter_type; qgram_filter_type qgram_filter; float time = 0.0f; const int16 worst_score = Field_traits<int16>::min(); nvbio::vector<device_tag,int16> best_scores( n_reads, worst_score ); nvbio::vector<device_tag,uint8> temp_storage; // stream through the genome for (uint32 genome_begin = 0; genome_begin < genome_len; genome_begin += queries_batch) { const uint32 genome_end = nvbio::min( genome_begin + queries_batch, genome_len ); Timer timer; timer.start(); map( qgram_index, qgram_filter, merge_intv, d_read_data, genome_end - genome_begin, genome_len, genome_begin, d_genome, best_scores, stats ); cudaDeviceSynchronize(); timer.stop(); time += timer.seconds(); const float genome_ratio = float( genome_end ) / float( genome_len ); log_verbose(stderr, "\r aligned %5.2f%% of genome (%6.2f K reads/s)", 100.0f * genome_ratio, (1.0e-3f * n_reads) * genome_ratio / time ); } log_verbose_cont(stderr, "\n"); // accumulate the number of aligned reads stats.reads += n_reads; stats.time += time; // count how many reads have a score >= score_threshold const uint32 n_aligned = cuda::reduce( n_reads, thrust::make_transform_iterator( best_scores.begin(), above_threshold( score_threshold ) ), thrust::plus<uint32>(), temp_storage ); stats.aligned += n_aligned; log_info(stderr, " aligned %6.2f %% reads (%6.2f K reads/s)\n", 100.0f * float( stats.aligned ) / float( stats.reads ), (1.0e-3f * float( stats.reads )) / stats.time); log_verbose(stderr, " breakdown:\n"); log_verbose(stderr, " extract throughput : %.2f B q-grams/s\n", (1.0e-9f * float( stats.queries )) / stats.extract_time); log_verbose(stderr, " rank throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.rank_time); log_verbose(stderr, " : %6.2f B seeds/s\n", (1.0e-9f * float( stats.queries )) / stats.rank_time); log_verbose(stderr, " locate throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.locate_time); log_verbose(stderr, " align throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.align_time); log_verbose(stderr, " : %6.2f M hits/s\n", (1.0e-6f * float( stats.merged )) / stats.align_time); log_verbose(stderr, " occurrences : %.3f B\n", 1.0e-9f * float( stats.occurrences ) ); log_verbose(stderr, " merged occurrences : %.3f B (%.1f %%)\n", 1.0e-9f * float( stats.merged ), 100.0f * float(stats.merged)/float(stats.occurrences)); } log_info(stderr, "qmap... done\n"); return 0; }
the_stack
#include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/device_guard.h" #include "k2/csrc/fsa_algo.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/thread_pool.h" namespace k2 { class Connector { public: /** Connector object. You should call Connect() after constructing it. Please see Connect() declaration in header for high-level overview of the algorithm. @param [in] fsas A vector of FSAs; must have 3 axes. */ explicit Connector(FsaVec &fsas) : c_(fsas.Context()), fsas_(fsas) { K2_CHECK_EQ(fsas_.NumAxes(), 3); int32_t num_states = fsas_.shape.TotSize(1); accessible_ = Array1<char>(c_, num_states, 0); coaccessible_ = Array1<char>(c_, num_states, 0); } /* Computes the next batch of states @param [in] cur_states Ragged array with 2 axes, with the shape `[fsa][state]`, containing state-indexes (idx01) into fsas_. @return Returns the states which, after processing. */ std::unique_ptr<Ragged<int32_t>> GetNextBatch(Ragged<int32_t> &cur_states) { NVTX_RANGE(K2_FUNC); // Process arcs leaving all states in `cur_states` // First figure out how many arcs leave each state. // And set accessiblility for each state Array1<int32_t> num_arcs_per_state(c_, cur_states.NumElements() + 1); const int32_t *fsas_row_splits2_data = fsas_.RowSplits(2).Data(), *states_data = cur_states.values.Data(); int32_t *num_arcs_per_state_data = num_arcs_per_state.Data(); char *accessible_data = accessible_.Data(); K2_EVAL( c_, cur_states.NumElements(), lambda_set_arcs_and_accessible_per_state, (int32_t states_idx01)->void { int32_t idx01 = states_data[states_idx01], num_arcs = fsas_row_splits2_data[idx01 + 1] - fsas_row_splits2_data[idx01]; num_arcs_per_state_data[states_idx01] = num_arcs; // Set accessibility accessible_data[idx01] = 1; }); ExclusiveSum(num_arcs_per_state, &num_arcs_per_state); // arcs_shape `[fsa][state][arc]` RaggedShape arcs_shape = ComposeRaggedShapes( cur_states.shape, RaggedShape2(&num_arcs_per_state, nullptr, -1)); // We'll be figuring out the states that these arcs leading to is not // accessible yet (i.e. for which state_renumbering.Keep[i] == true). int32_t total_states = fsas_.shape.TotSize(1); Renumbering state_renumbering(c_, total_states, true); const int32_t *arcs_row_ids2_data = arcs_shape.RowIds(2).Data(), *arcs_row_splits2_data = arcs_shape.RowSplits(2).Data(), *dest_states_data = dest_states_.values.Data(); char *keep_state_data = state_renumbering.Keep().Data(); K2_EVAL( c_, arcs_shape.NumElements(), lambda_set_state_renumbering, (int32_t arcs_idx012)->void { // note: the prefix `arcs_` means it is an idxXXX w.r.t. `arcs_shape`. // the prefix `fsas_` means the variable is an idxXXX w.r.t. `fsas_`. int32_t arcs_idx01 = arcs_row_ids2_data[arcs_idx012], arcs_idx01x = arcs_row_splits2_data[arcs_idx01], arcs_idx2 = arcs_idx012 - arcs_idx01x, fsas_idx01 = states_data[arcs_idx01], // a state index fsas_idx01x = fsas_row_splits2_data[fsas_idx01], fsas_idx012 = fsas_idx01x + arcs_idx2, fsas_dest_state_idx01 = dest_states_data[fsas_idx012]; // 1. If this arc is a self-loop, just ignore this arc as we won't // processe the dest_state (current state) again. // 2. If the state this arc pointing to is accessible, skip it. if (fsas_dest_state_idx01 == fsas_idx01 || accessible_data[fsas_dest_state_idx01]) { return; } keep_state_data[fsas_dest_state_idx01] = 1; }); Array1<int32_t> new2old_map = state_renumbering.New2Old(); if (new2old_map.Dim() == 0) { // There are no new states. This means we terminated. return nullptr; } int32_t num_states = new2old_map.Dim(); Array1<int32_t> temp(c_, 2 * num_states); // `new_states` will contain state-ids which are idx01's into `fsas_`. Array1<int32_t> new_states = temp.Arange(0, num_states); // `ans_row_ids` will map to FSA index Array1<int32_t> ans_row_ids = temp.Arange(num_states, 2 * num_states); const int32_t *new2old_map_data = new2old_map.Data(), *fsas_row_ids1_data = fsas_.RowIds(1).Data(); int32_t *ans_row_ids_data = ans_row_ids.Data(), *new_states_data = new_states.Data(); K2_EVAL( c_, num_states, lambda_set_new_states_and_row_ids, (int32_t state_idx)->void { int32_t state_idx01 = new2old_map_data[state_idx], fsa_idx0 = fsas_row_ids1_data[state_idx01]; new_states_data[state_idx] = state_idx01; ans_row_ids_data[state_idx] = fsa_idx0; }); int32_t num_fsas = fsas_.Dim0(); Array1<int32_t> ans_row_splits(c_, num_fsas + 1); RowIdsToRowSplits(ans_row_ids, &ans_row_splits); auto ans = std::make_unique<Ragged<int32_t>>( RaggedShape2(&ans_row_splits, &ans_row_ids, new_states.Dim()), new_states); return ans; } /* Computes the next batch of states in reverse order @param [in] cur_states Ragged array with 2 axes, with the shape of `[fsa][state]`, containing state-indexes (idx01) into fsas_. @return Returns the states which, after processing. */ std::unique_ptr<Ragged<int32_t>> GetNextBatchBackward( Ragged<int32_t> &cur_states) { NVTX_RANGE(K2_FUNC); // Process arcs entering all states in `cur_states` // First figure out how many arcs enter each state. // And set coaccessibility for each state Array1<int32_t> num_arcs_per_state(c_, cur_states.NumElements() + 1); int32_t *num_arcs_per_state_data = num_arcs_per_state.Data(); const int32_t *incoming_arcs_row_splits2_data = incoming_arcs_.RowSplits(2).Data(), *states_data = cur_states.values.Data(); char *coaccessible_data = coaccessible_.Data(); K2_EVAL( c_, cur_states.NumElements(), lambda_set_arcs_and_coaccessible_per_state, (int32_t states_idx01)->void { int32_t idx01 = states_data[states_idx01], num_arcs = incoming_arcs_row_splits2_data[idx01 + 1] - incoming_arcs_row_splits2_data[idx01]; num_arcs_per_state_data[states_idx01] = num_arcs; // Set coaccessiblility coaccessible_data[idx01] = 1; }); ExclusiveSum(num_arcs_per_state, &num_arcs_per_state); // arcs_shape `[fsa][state][arc]` RaggedShape arcs_shape = ComposeRaggedShapes( cur_states.shape, RaggedShape2(&num_arcs_per_state, nullptr, -1)); // We'll be figuring out the states that these arcs coming from is not // coaccessible yet (i.e. for which state_renumbering.Keep[i] == true). int32_t total_states = fsas_.shape.TotSize(1); Renumbering state_renumbering(c_, total_states, true); const int32_t *arcs_row_ids2_data = arcs_shape.RowIds(2).Data(), *arcs_row_splits2_data = arcs_shape.RowSplits(2).Data(), *fsas_row_splits1_data = fsas_.RowSplits(1).Data(), *fsas_row_ids1_data = fsas_.RowIds(1).Data(), *incoming_arcs_data = incoming_arcs_.values.Data(); const Arc *fsas_data = fsas_.values.Data(); char *keep_state_data = state_renumbering.Keep().Data(); K2_EVAL( c_, arcs_shape.NumElements(), lambda_set_arc_renumbering, (int32_t arcs_idx012)->void { // note: the prefix `arcs_` means it is an idxXXX w.r.t. `arcs_shape`. // the prefix `fsas_` means the variable is an idxXXX w.r.t. `fsas_`. int32_t arcs_idx01 = arcs_row_ids2_data[arcs_idx012], arcs_idx01x = arcs_row_splits2_data[arcs_idx01], arcs_idx2 = arcs_idx012 - arcs_idx01x, fsas_idx01 = states_data[arcs_idx01], // a state index fsas_idx0 = fsas_row_ids1_data[fsas_idx01], fsas_idx01x = incoming_arcs_row_splits2_data[fsas_idx01], fsas_idx012 = fsas_idx01x + arcs_idx2, fsas_src_state_idx1 = fsas_data[incoming_arcs_data[fsas_idx012]].src_state, fsas_src_state_idx0x = fsas_row_splits1_data[fsas_idx0], fsas_src_state_idx01 = fsas_src_state_idx0x + fsas_src_state_idx1; // 1. If this arc is a self-loop, just ignore this arc as we won't // processe the src_state (current state) again. // 2. If the src state entering this arc is coaccessible, skip it. if (fsas_src_state_idx01 == fsas_idx01 || coaccessible_data[fsas_src_state_idx01]) { keep_state_data[fsas_src_state_idx01] = 0; return; } keep_state_data[fsas_src_state_idx01] = 1; }); Array1<int32_t> new2old_map = state_renumbering.New2Old(); if (new2old_map.Dim() == 0) { // There are no new states. This means we terminated. return nullptr; } int32_t num_states = new2old_map.Dim(); Array1<int32_t> temp(c_, 2 * num_states); // `new_states` will contain state-ids which are idx01's into `fsas_`. Array1<int32_t> new_states = temp.Arange(0, num_states); // `ans_row_ids` will map to FSA index Array1<int32_t> ans_row_ids = temp.Arange(num_states, 2 * num_states); const int32_t *new2old_map_data = new2old_map.Data(); int32_t *ans_row_ids_data = ans_row_ids.Data(), *new_states_data = new_states.Data(); K2_EVAL( c_, num_states, lambda_set_new_states_and_row_ids, (int32_t state_idx)->void { int32_t state_idx01 = new2old_map_data[state_idx], fsa_idx0 = fsas_row_ids1_data[state_idx01]; ans_row_ids_data[state_idx] = fsa_idx0; new_states_data[state_idx] = state_idx01; }); int32_t num_fsas = fsas_.Dim0(); Array1<int32_t> ans_row_splits(c_, num_fsas + 1); RowIdsToRowSplits(ans_row_ids, &ans_row_splits); auto ans = std::make_unique<Ragged<int32_t>>( RaggedShape2(&ans_row_splits, &ans_row_ids, num_states), new_states); return ans; } /* Returns the start batch of states. This will include all start-states that existed in the original FSAs. */ std::unique_ptr<Ragged<int32_t>> GetStartBatch() { NVTX_RANGE(K2_FUNC); int32_t num_fsas = fsas_.Dim0(); const int32_t *fsas_row_splits1_data = fsas_.RowSplits(1).Data(); Array1<int32_t> has_start_state(c_, num_fsas + 1); int32_t *has_start_state_data = has_start_state.Data(); K2_EVAL( c_, num_fsas, lambda_set_has_start_state, (int32_t i)->void { int32_t split = fsas_row_splits1_data[i], next_split = fsas_row_splits1_data[i + 1]; has_start_state_data[i] = (next_split > split); }); ExclusiveSum(has_start_state, &has_start_state); int32_t n = has_start_state[num_fsas]; auto ans = std::make_unique<Ragged<int32_t>>( RaggedShape2(&has_start_state, nullptr, n), Array1<int32_t>(c_, n)); int32_t *ans_data = ans->values.Data(); const int32_t *ans_row_ids1_data = ans->RowIds(1).Data(); K2_EVAL( c_, n, lambda_set_start_state, (int32_t i)->void { int32_t fsa_idx0 = ans_row_ids1_data[i], start_state = fsas_row_splits1_data[fsa_idx0]; // If the following fails, it likely means an input FSA was invalid // (e.g. had exactly one state, which is not allowed). Either that, // or a code error. K2_DCHECK_LT(start_state, fsas_row_splits1_data[fsa_idx0 + 1]); ans_data[i] = start_state; }); return ans; } /* Returns the final batch of states. This will include all final-states that existed in the original FSAs. */ std::unique_ptr<Ragged<int32_t>> GetFinalBatch() { NVTX_RANGE(K2_FUNC); int32_t num_fsas = fsas_.Dim0(); const int32_t *fsas_row_splits1_data = fsas_.RowSplits(1).Data(); Array1<int32_t> has_final_state(c_, num_fsas + 1); int32_t *has_final_state_data = has_final_state.Data(); K2_EVAL( c_, num_fsas, lambda_set_has_final_state, (int32_t i)->void { int32_t split = fsas_row_splits1_data[i], next_split = fsas_row_splits1_data[i + 1]; has_final_state_data[i] = (next_split > split); }); ExclusiveSum(has_final_state, &has_final_state); int32_t n = has_final_state[num_fsas]; auto ans = std::make_unique<Ragged<int32_t>>( RaggedShape2(&has_final_state, nullptr, n), Array1<int32_t>(c_, n)); int32_t *ans_data = ans->values.Data(); const int32_t *ans_row_ids1_data = ans->RowIds(1).Data(); K2_EVAL( c_, n, lambda_set_final_state, (int32_t i)->void { int32_t fsa_idx0 = ans_row_ids1_data[i], final_state = fsas_row_splits1_data[fsa_idx0 + 1] - 1; // If the following fails, it likely means an input FSA was invalid // (e.g. had exactly one state, which is not allowed). Either that, // or a code error. K2_DCHECK_GT(final_state, fsas_row_splits1_data[fsa_idx0]); ans_data[i] = final_state; }); return ans; } /* Traverse the fsa from start states to mark the accessible states. */ static void ForwardPassStatic(Connector* c) { // WARNING: this is run in a separate thread, so we have // to reset its default device. Otherwise, it will throw later // if the main thread is using a different device. DeviceGuard guard(c->c_); auto iter = c->GetStartBatch(); while (iter != nullptr) iter = c->GetNextBatch(*iter); } /* Traverse the fsa in reverse order (from final states) to mark the coaccessible states. */ static void BackwardPassStatic(Connector* c) { // WARNING: this is run in a separate thread, so we have // to reset its default device. Otherwise, it will throw later // if the main thread is using a different device. DeviceGuard guard(c->c_); auto riter = c->GetFinalBatch(); while (riter != nullptr) riter = c->GetNextBatchBackward(*riter); } /* Does the main work of connecting and returns the resulting FSAs. @param [out] arc_map if non-NULL, the map from (arcs in output) to (corresponding arcs in input) is written to here. @return Returns the connected FsaVec. */ FsaVec Connect(Array1<int32_t> *arc_map) { NVTX_RANGE(K2_FUNC); Array1<int32_t> dest_states_idx01 = GetDestStates(fsas_, true); dest_states_ = Ragged<int32_t>(fsas_.shape, dest_states_idx01); incoming_arcs_ = GetIncomingArcs(fsas_, dest_states_idx01); ThreadPool* pool = GetThreadPool(); // Mark accessible states pool->SubmitTask([this]() { ForwardPassStatic(this); }); // Mark coaccessible states pool->SubmitTask([this]() { BackwardPassStatic(this); }); pool->WaitAllTasksFinished(); // Get remaining states and construct row_ids1/row_splits1 int32_t num_states = fsas_.shape.TotSize(1); const char *accessible_data = accessible_.Data(), *coaccessible_data = coaccessible_.Data(); Renumbering states_renumbering(c_, num_states); char* states_renumbering_data = states_renumbering.Keep().Data(); K2_EVAL( c_, num_states, lambda_set_states_renumbering, (int32_t state_idx01)->void { if (accessible_data[state_idx01] && coaccessible_data[state_idx01]) states_renumbering_data[state_idx01] = 1; else states_renumbering_data[state_idx01] = 0; }); Array1<int32_t> new2old_map_states = states_renumbering.New2Old(); Array1<int32_t> old2new_map_states = states_renumbering.Old2New(); Array1<int32_t> new_row_ids1 = fsas_.RowIds(1)[new2old_map_states]; Array1<int32_t> new_row_splits1(c_, fsas_.Dim0() + 1); RowIdsToRowSplits(new_row_ids1, &new_row_splits1); // Get remaining arcs int32_t num_arcs = fsas_.NumElements(); Renumbering arcs_renumbering(c_, num_arcs); char* arcs_renumbering_data = arcs_renumbering.Keep().Data(); const Arc *fsas_data = fsas_.values.Data(); const int32_t *fsas_row_ids1_data = fsas_.RowIds(1).Data(), *fsas_row_ids2_data = fsas_.RowIds(2).Data(), *fsas_row_splits1_data = fsas_.RowSplits(1).Data(); K2_EVAL( c_, num_arcs, lambda_set_arcs_renumbering, (int32_t arc_idx012)->void { Arc arc = fsas_data[arc_idx012]; int32_t src_state_idx01 = fsas_row_ids2_data[arc_idx012], dest_state_idx01 = arc.dest_state - arc.src_state + src_state_idx01; if (accessible_data[src_state_idx01] && coaccessible_data[src_state_idx01] && accessible_data[dest_state_idx01] && coaccessible_data[dest_state_idx01]) arcs_renumbering_data[arc_idx012] = 1; else arcs_renumbering_data[arc_idx012] = 0; }); Array1<int32_t> new2old_map_arcs = arcs_renumbering.New2Old(); int32_t remaining_arcs_num = new2old_map_arcs.Dim(); // Construct row_ids2/row_splits2 Array1<int32_t> new_row_ids2(c_, remaining_arcs_num); int32_t *new_row_ids2_data = new_row_ids2.Data(); const int32_t *new2old_map_arcs_data = new2old_map_arcs.Data(), *old2new_map_states_data = old2new_map_states.Data(); K2_EVAL( c_, remaining_arcs_num, lambda_set_new_row_ids2, (int32_t arc_idx012)->void { int32_t idx012 = new2old_map_arcs_data[arc_idx012], state_idx01 = fsas_row_ids2_data[idx012]; new_row_ids2_data[arc_idx012] = old2new_map_states_data[state_idx01]; }); Array1<int32_t> new_row_splits2(c_, new2old_map_states.Dim() + 1); RowIdsToRowSplits(new_row_ids2, &new_row_splits2); // Update arcs to the renumbered states const int32_t *new_row_ids1_data = new_row_ids1.Data(), *new_row_splits1_data = new_row_splits1.Data(); Array1<Arc> remaining_arcs(c_, remaining_arcs_num); Arc *remaining_arcs_data = remaining_arcs.Data(); K2_EVAL( c_, remaining_arcs_num, lambda_set_arcs, (int32_t arc_idx012)->void { // note: the prefix `old_` means it is an idxXXX w.r.t. the origin // fsas (`fsas_`). the prefix `new_` means the variable is an idxXXX // w.r.t. the result fsas. int32_t old_idx012 = new2old_map_arcs_data[arc_idx012], old_idx01 = fsas_row_ids2_data[old_idx012], old_idx0 = fsas_row_ids1_data[old_idx01], old_idx0x = fsas_row_splits1_data[old_idx0]; Arc arc = fsas_data[old_idx012]; int32_t old_src_state_idx01 = old_idx0x + arc.src_state, new_src_state_idx01 = old2new_map_states_data[old_src_state_idx01], new_src_fsa_idx0 = new_row_ids1_data[new_src_state_idx01], new_src_state_idx0x = new_row_splits1_data[new_src_fsa_idx0], new_src_state_idx1 = new_src_state_idx01 - new_src_state_idx0x, old_dest_state_idx01 = old_idx0x + arc.dest_state, new_dest_state_idx01 = old2new_map_states_data[old_dest_state_idx01], new_dest_fsa_idx0 = new_row_ids1_data[new_dest_state_idx01], new_dest_state_idx0x = new_row_splits1_data[new_dest_fsa_idx0], new_dest_state_idx1 = new_dest_state_idx01 - new_dest_state_idx0x; Arc new_arc; new_arc.src_state = new_src_state_idx1; new_arc.dest_state = new_dest_state_idx1; new_arc.score = arc.score; new_arc.label = arc.label; remaining_arcs_data[arc_idx012] = new_arc; }); if (arc_map != nullptr) *arc_map = std::move(new2old_map_arcs); return Ragged<Arc>( RaggedShape3(&new_row_splits1, &new_row_ids1, new2old_map_states.Dim(), &new_row_splits2, &new_row_ids2, remaining_arcs_num), remaining_arcs); } ContextPtr c_; FsaVec &fsas_; // For each arc in fsas_ (with same structure as fsas_), dest-state // of that arc as an idx01. Ragged<int32_t> dest_states_; // For each state in fsas_ (with same structure as fsas_), incoming-arc // of that state as an idx012. Ragged<int32_t> incoming_arcs_; // With the Dim() the same as num-states, to mark the state (as an idx01) to // be accessible or not. Array1<char> accessible_; // With the Dim() the same as num-states, to mark the state (as an idx01) to // be coaccessible or not. Array1<char> coaccessible_; }; void Connect(FsaOrVec &src, FsaOrVec *dest, Array1<int32_t> *arc_map /* = nullptr */) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(src.NumAxes(), 2); K2_CHECK_LE(src.NumAxes(), 3); if (src.NumAxes() == 2) { // Turn single Fsa into FsaVec. FsaVec src_vec = FsaToFsaVec(src), dest_vec; // Recurse.. Connect(src_vec, &dest_vec, arc_map); *dest = GetFsaVecElement(dest_vec, 0); return; } Connector connector(src); *dest = connector.Connect(arc_map); } } // namespace k2
the_stack
#include <Core/CUDA/CUDAPCISPHSolver3.hpp> #include <Core/CUDA/CUDASPHKernels3.hpp> #include <thrust/extrema.h> using namespace CubbyFlow; namespace { class InitializeBuffersAndComputeForces { public: inline InitializeBuffersAndComputeForces( float m, float h, float4 gravity, float viscosity, const uint32_t* neighborStarts, const uint32_t* neighborEnds, const uint32_t* neighborLists, const float4* positions, const float4* velocities, float4* smoothedVelocities, float4* forces, const float* densities, float* pressures, float4* pressureForces, float* densityErrors, float* densitiesPredicted) : m_mass(m), m_massSquared(m * m), m_gravity(gravity), m_viscosity(viscosity), m_spikyKernel(h), m_neighborStarts(neighborStarts), m_neighborEnds(neighborEnds), m_neighborLists(neighborLists), m_positions(positions), m_velocities(velocities), m_smoothedVelocities(smoothedVelocities), m_forces(forces), m_densities(densities), m_pressures(pressures), m_pressureForces(pressureForces), m_densityErrors(densityErrors), m_densitiesPredicted(densitiesPredicted) { // Do nothing } template <typename Index> inline CUBBYFLOW_CUDA_DEVICE void operator()(Index i) { // Initialize buffers m_pressures[i] = 0.0f; m_pressureForces[i] = make_float4(0, 0, 0, 0); m_densityErrors[i] = 0.0f; m_densitiesPredicted[i] = m_densities[i]; // Compute forces uint32_t ns = m_neighborStarts[i]; uint32_t ne = m_neighborEnds[i]; float4 x_i = m_positions[i]; float4 v_i = m_velocities[i]; float d_i = m_densities[i]; float4 f = m_gravity; float w_i = m_mass / d_i * m_spikyKernel(0.0f); float weightSum = w_i; float4 smoothedVelocity = w_i * v_i; for (uint32_t jj = ns; jj < ne; ++jj) { uint32_t j = m_neighborLists[jj]; float4 r = m_positions[j] - x_i; float dist = Length(r); if (dist > 0.0f) { float4 v_j = m_velocities[j]; float d_j = m_densities[j]; // Viscosity force f += m_viscosity * m_massSquared * (v_j - v_i) / d_j * m_spikyKernel.SecondDerivative(dist); // Pseudo viscosity float w_j = m_mass / d_j * m_spikyKernel(dist); weightSum += w_j; smoothedVelocity += w_j * v_j; } } m_forces[i] = f; smoothedVelocity /= weightSum; m_smoothedVelocities[i] = smoothedVelocity; } private: float m_mass; float m_massSquared; float4 m_gravity; float m_viscosity; CUDASPHSpikyKernel3 m_spikyKernel; const uint32_t* m_neighborStarts; const uint32_t* m_neighborEnds; const uint32_t* m_neighborLists; const float4* m_positions; const float4* m_velocities; float4* m_smoothedVelocities; float4* m_forces; const float* m_densities; float* m_pressures; float4* m_pressureForces; float* m_densitiesPredicted; float* m_densityErrors; }; #define BND_R 0.0f class TimeIntegration { public: TimeIntegration(float dt, float m, float smoothFactor, float3 lower, float3 upper, float4* positions, float4* velocities, float4* newPositions, float4* newVelocities, float4* smoothedVelocities, float4* forces, float4* pressureForces) : m_dt(dt), m_mass(m), m_smoothFactor(smoothFactor), m_lower(lower), m_upper(upper), m_positions(positions), m_velocities(velocities), m_newPositions(newPositions), m_newVelocities(newVelocities), m_smoothedVelocities(smoothedVelocities), m_forces(forces), m_pressureForces(pressureForces) { // Do nothing } template <typename Index> inline CUBBYFLOW_CUDA_HOST_DEVICE void operator()(Index i) { float4 x = m_positions[i]; float4 v = m_velocities[i]; float4 s = m_smoothedVelocities[i]; float4 f = m_forces[i]; float4 pf = m_pressureForces[i]; v = (1.0f - m_smoothFactor) * v + m_smoothFactor * s; v += m_dt * (f + pf) / m_mass; x += m_dt * v; // TODO: Add proper collider support if (x.x > m_upper.x) { x.x = m_upper.x; v.x *= BND_R; } if (x.x < m_lower.x) { x.x = m_lower.x; v.x *= BND_R; } if (x.y > m_upper.y) { x.y = m_upper.y; v.y *= BND_R; } if (x.y < m_lower.y) { x.y = m_lower.y; v.y *= BND_R; } m_newPositions[i] = x; m_newVelocities[i] = v; } private: float m_dt; float m_mass; float m_smoothFactor; float3 m_lower; float3 m_upper; float4* m_positions; float4* m_velocities; float4* m_newPositions; float4* m_newVelocities; float4* m_smoothedVelocities; float4* m_forces; float4* m_pressureForces; }; class ComputeDensityError { public: inline ComputeDensityError(float m, float h, float targetDensity, float delta, float negativePressureScale, const uint32_t* neighborStarts, const uint32_t* neighborEnds, const uint32_t* neighborLists, const float4* positions, float* pressures, float* densityErrors, float* densitiesPredicted) : m_mass(m), m_targetDensity(targetDensity), m_delta(delta), m_negativePressureScale(negativePressureScale), m_neighborStarts(neighborStarts), m_neighborEnds(neighborEnds), m_neighborLists(neighborLists), m_positions(positions), m_pressures(pressures), m_densityErrors(densityErrors), m_densitiesPredicted(densitiesPredicted), m_stdKernel(h) { // Do nothing } template <typename Index> inline CUBBYFLOW_CUDA_DEVICE void operator()(Index i) { uint32_t ns = m_neighborStarts[i]; uint32_t ne = m_neighborEnds[i]; float4 x_i = m_positions[i]; float kernelSum = m_stdKernel(0.f); for (uint32_t jj = ns; jj < ne; ++jj) { uint32_t j = m_neighborLists[jj]; float4 r = m_positions[j] - x_i; float dist = Length(r); if (dist > 0.0f) { kernelSum += m_stdKernel(dist); } } float density = m_mass * kernelSum; float densityError = (density - m_targetDensity); float pressure = m_delta * densityError; if (pressure < 0.0f) { pressure *= m_negativePressureScale; densityError *= m_negativePressureScale; } m_pressures[i] += pressure; m_densitiesPredicted[i] = density; m_densityErrors[i] = densityError; } private: float m_mass; float m_targetDensity; float m_delta; float m_negativePressureScale; const uint32_t* m_neighborStarts; const uint32_t* m_neighborEnds; const uint32_t* m_neighborLists; const float4* m_positions; float* m_pressures; float* m_densitiesPredicted; float* m_densityErrors; CUDASPHStdKernel3 m_stdKernel; }; class ComputePressureForces { public: inline ComputePressureForces(float m, float h, const uint32_t* neighborStarts, const uint32_t* neighborEnds, const uint32_t* neighborLists, const float4* positions, float4* pressureForces, const float* densities, const float* pressures) : m_mass(m), m_massSquared(m * m), m_spikyKernel(h), m_neighborStarts(neighborStarts), m_neighborEnds(neighborEnds), m_neighborLists(neighborLists), m_positions(positions), m_pressureForces(pressureForces), m_densities(densities), m_pressures(pressures) { // Do nothing } template <typename Index> inline CUBBYFLOW_CUDA_HOST_DEVICE void operator()(Index i) { uint32_t ns = m_neighborStarts[i]; uint32_t ne = m_neighborEnds[i]; float4 x_i = m_positions[i]; float d_i = m_densities[i]; float p_i = m_pressures[i]; float4 f = make_float4(0, 0, 0, 0); for (uint32_t jj = ns; jj < ne; ++jj) { uint32_t j = m_neighborLists[jj]; float4 r = m_positions[j] - x_i; float dist = Length(r); if (dist > 0.0f) { float4 dir = r / dist; float d_j = m_densities[j]; float p_j = m_pressures[j]; // Pressure force f -= m_massSquared * (p_i / (d_i * d_i) + p_j / (d_j * d_j)) * m_spikyKernel.Gradient(dist, dir); } } m_pressureForces[i] = f; } private: float m_mass; float m_massSquared; CUDASPHSpikyKernel3 m_spikyKernel; const uint32_t* m_neighborStarts; const uint32_t* m_neighborEnds; const uint32_t* m_neighborLists; const float4* m_positions; float4* m_pressureForces; const float* m_densities; const float* m_pressures; }; } // namespace void CUDAPCISPHSolver3::OnAdvanceTimeStep(double timeStepInSeconds) { auto sph = SPHSystemData(); // Build neighbor searcher sph->BuildNeighborSearcher(); sph->BuildNeighborListsAndUpdateDensities(); auto d = sph->Densities(); auto p = sph->Pressures(); const float targetDensity = sph->TargetDensity(); size_t n = sph->NumberOfParticles(); float mass = sph->Mass(); float h = sph->KernelRadius(); auto ns = sph->NeighborStarts(); auto ne = sph->NeighborEnds(); auto nl = sph->NeighborLists(); auto x = sph->Positions(); auto v = sph->Velocities(); auto s = SmoothedVelocities(); auto f = Forces(); float dt = static_cast<float>(timeStepInSeconds); float factor = dt * PseudoViscosityCoefficient(); factor = Clamp(factor, 0.0f, 1.0f); auto xs = TempPositions(); auto vs = TempVelocities(); auto pf = PressureForces(); auto ds = TempDensities(); auto de = DensityErrors(); float delta = ComputeDelta(dt); // Initialize buffers and compute non-pressure forces thrust::for_each( thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(n), InitializeBuffersAndComputeForces( mass, h, ToFloat4(Gravity(), 0.0f), ViscosityCoefficient(), ns.data(), ne.data(), nl.data(), x.data(), v.data(), s.data(), f.data(), d.data(), p.data(), pf.data(), de.data(), ds.data())); // Prediction-correction auto lower = ToFloat3(Container().lowerCorner); auto upper = ToFloat3(Container().upperCorner); for (unsigned int k = 0; k < m_maxNumberOfIterations; ++k) { // Predict velocity / position and resolve collisions thrust::for_each(thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(n), TimeIntegration(dt, mass, 0.0f, lower, upper, x.data(), v.data(), xs.data(), vs.data(), s.data(), f.data(), pf.data())); // Compute pressure from density error thrust::for_each(thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(n), ComputeDensityError(mass, h, targetDensity, delta, NegativePressureScale(), ns.data(), ne.data(), nl.data(), xs.data(), p.data(), de.data(), ds.data())); // Compute pressure gradient force thrust::for_each( thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(n), ComputePressureForces(mass, h, ns.data(), ne.data(), nl.data(), x.data(), pf.data(), ds.data(), p.data())); } // Accumulate pressure force and time-integrate thrust::for_each( thrust::counting_iterator<size_t>(0), thrust::counting_iterator<size_t>(n), TimeIntegration(dt, mass, factor, lower, upper, x.data(), v.data(), x.data(), v.data(), s.data(), f.data(), pf.data())); }
the_stack
* \file * dnn/src/cuda/convolution_helper/block_tile_consumer/iconv_imma_block_consumer.cuh * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #pragma once #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { namespace convolution { template < typename IMMAConfig_, typename WarpTileConfig_, typename ThreadConfig_, bool pipelined> struct IConvIMMABlockConsumer; template <typename IMMAConfig_, typename WarpTileConfig_, typename ThreadConfig_> struct IConvIMMABlockConsumer<IMMAConfig_, WarpTileConfig_, ThreadConfig_, true> { using IMMAConfig = IMMAConfig_; using WarpTileConfig = WarpTileConfig_; using ThreadConfig = ThreadConfig_; #if __CUDA_ARCH__ >= 730 typename IMMAConfig::fragment_b frag_src[WarpTileConfig::warp_tile_n][2]; typename IMMAConfig::fragment_a frag_filter[WarpTileConfig::warp_tile_m][2]; typename IMMAConfig::fragment_c frag_acc[WarpTileConfig::warp_tile_m] [WarpTileConfig::warp_tile_n]; #endif __device__ __forceinline__ void init_accumulator() { #if __CUDA_ARCH__ >= 730 #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_m; ++i) { #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_n; ++j) { wmma::fill_fragment(frag_acc[i][j], 0.f); } } #endif } template < typename DataGlobal2ShareMemVisitor, typename FilterGlobal2ShareMemVisitor> __device__ __forceinline__ void consume_block( DataGlobal2ShareMemVisitor data_gl2sh_visitor, FilterGlobal2ShareMemVisitor filter_gl2sh_visitor) { #if __CUDA_ARCH__ >= 730 const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int warpx = tidx / ThreadConfig::warp_size; const int warpy = tidy; static bool const use_wide_store = !(WarpTileConfig::warp_tile_n & 0x1); if (use_wide_store) { #pragma unroll for (int i = 0; i < (WarpTileConfig::warp_tile_n >> 1); ++i) { int i2 = (i << 1); int warpx2 = (warpx << 1); int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( 0, (warpx2 + i2 * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i2][0], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); wmma::load_matrix_sync( frag_src[i2 + 1][0], reinterpret_cast<int8_t*>( data_sh_ptr + IMMAConfig::tile_b_sizes_int), IMMAConfig::wmma_k); } } else { #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_n; ++i) { int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( 0, (warpx + i * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i][0], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); } } #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_m; ++j) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr( 0, (warpy + j * ThreadConfig::nr_warp_y) * IMMAConfig::tile_a_sizes_int); wmma::load_matrix_sync( frag_filter[j][0], reinterpret_cast<int8_t*>(ker_sh_ptr), IMMAConfig::wmma_k); } #pragma unroll for (int ci_inner = 0; ci_inner < WarpTileConfig::warp_tile_k; ++ci_inner) { const int comp_idx = (ci_inner & 0x1); const int load_idx = 1 - comp_idx; if (ci_inner < WarpTileConfig::warp_tile_k - 1) { if (use_wide_store) { #pragma unroll for (int i = 0; i < (WarpTileConfig::warp_tile_n >> 1); ++i) { int i2 = (i << 1); int warpx2 = (warpx << 1); int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( ci_inner + 1, (warpx2 + i2 * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i2][load_idx], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); wmma::load_matrix_sync( frag_src[i2 + 1][load_idx], reinterpret_cast<int8_t*>( data_sh_ptr + IMMAConfig::tile_b_sizes_int), IMMAConfig::wmma_k); } } else { #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_n; ++i) { int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( ci_inner + 1, (warpx + i * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i][load_idx], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); } } #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_m; ++j) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr( ci_inner + 1, (warpy + j * ThreadConfig::nr_warp_y) * IMMAConfig::tile_a_sizes_int); wmma::load_matrix_sync( frag_filter[j][load_idx], reinterpret_cast<int8_t*>(ker_sh_ptr), IMMAConfig::wmma_k); } } // end if use_wide_store #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_m; ++i) { #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_n; ++j) { wmma::mma_sync( frag_acc[i][j], frag_filter[i][comp_idx], frag_src[j][comp_idx], frag_acc[i][j]); } } } // end ci_inner #endif } }; template <typename IMMAConfig_, typename WarpTileConfig_, typename ThreadConfig_> struct IConvIMMABlockConsumer<IMMAConfig_, WarpTileConfig_, ThreadConfig_, false> { using IMMAConfig = IMMAConfig_; using WarpTileConfig = WarpTileConfig_; using ThreadConfig = ThreadConfig_; #if __CUDA_ARCH__ >= 730 typename IMMAConfig::fragment_b frag_src[WarpTileConfig::warp_tile_n]; typename IMMAConfig::fragment_a frag_filter[WarpTileConfig::warp_tile_m]; typename IMMAConfig::fragment_c frag_acc[WarpTileConfig::warp_tile_m] [WarpTileConfig::warp_tile_n]; #endif __device__ __forceinline__ void init_accumulator() { #if __CUDA_ARCH__ >= 730 #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_m; ++i) { #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_n; ++j) { wmma::fill_fragment(frag_acc[i][j], 0.f); } } #endif } template < typename DataGlobal2ShareMemVisitor, typename FilterGlobal2ShareMemVisitor> __device__ __forceinline__ void consume_block( DataGlobal2ShareMemVisitor data_gl2sh_visitor, FilterGlobal2ShareMemVisitor filter_gl2sh_visitor) { #if __CUDA_ARCH__ >= 730 const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int warpx = tidx / ThreadConfig::warp_size; const int warpy = tidy; static bool const use_wide_store = !(WarpTileConfig::warp_tile_n & 0x1); #pragma unroll for (int ci_inner = 0; ci_inner < WarpTileConfig::warp_tile_k; ++ci_inner) { if (use_wide_store) { #pragma unroll for (int i = 0; i < (WarpTileConfig::warp_tile_n >> 1); ++i) { int i2 = (i << 1); int warpx2 = (warpx << 1); int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( ci_inner, (warpx2 + i2 * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i2], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); wmma::load_matrix_sync( frag_src[i2 + 1], reinterpret_cast<int8_t*>( data_sh_ptr + IMMAConfig::tile_b_sizes_int), IMMAConfig::wmma_k); } } else { #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_n; ++i) { int32_t* data_sh_ptr = data_gl2sh_visitor.sh_ptr( ci_inner, (warpx + i * ThreadConfig::nr_warp_x) * IMMAConfig::tile_b_sizes_int); wmma::load_matrix_sync( frag_src[i], reinterpret_cast<int8_t*>(data_sh_ptr), IMMAConfig::wmma_k); } } // end if use_wide_store #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_m; ++j) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr( ci_inner, (warpy + j * ThreadConfig::nr_warp_y) * IMMAConfig::tile_a_sizes_int); wmma::load_matrix_sync( frag_filter[j], reinterpret_cast<int8_t*>(ker_sh_ptr), IMMAConfig::wmma_k); } #pragma unroll for (int i = 0; i < WarpTileConfig::warp_tile_m; ++i) { #pragma unroll for (int j = 0; j < WarpTileConfig::warp_tile_n; ++j) { wmma::mma_sync( frag_acc[i][j], frag_filter[i], frag_src[j], frag_acc[i][j]); } } } // end for ci_inner #endif } }; } // namespace convolution } // namespace cuda } // namespace megdnn // vim: ft=cpp syntax=cuda.doxygen foldmethod=marker foldmarker=f{{{,f}}}
the_stack
// The Ray Tracing in One Weekend scene. // See https://github.com/raytracing/InOneWeekend/releases/ for this free book. // public owl API #include <owl/owl.h> // our device-side data structures #include "GeomTypes.h" // external helper stuff for image output #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb/stb_image_write.h" #include <random> #include <optix_device.h> #define LOG(message) \ std::cout << OWL_TERMINAL_BLUE; \ std::cout << "#owl.sample(main): " << message << std::endl; \ std::cout << OWL_TERMINAL_DEFAULT; #define LOG_OK(message) \ std::cout << OWL_TERMINAL_LIGHT_BLUE; \ std::cout << "#owl.sample(main): " << message << std::endl; \ std::cout << OWL_TERMINAL_DEFAULT; extern "C" char deviceCode_ptx[]; const char *outFileName = "s10-launch3D.png"; const vec2i fbSize(1600,800); const vec3f lookFrom(13, 2, 3); const vec3f lookAt(0, 0, 0); const vec3f lookUp(0.f,1.f,0.f); const float fovy = 20.f; std::vector<DielectricSphere> dielectricSpheres; std::vector<LambertianSphere> lambertianSpheres; std::vector<MetalSphere> metalSpheres; inline float rnd() { static std::mt19937 gen(0); //Standard mersenne_twister_engine seeded with rd() static std::uniform_real_distribution<float> dis(0.f, 1.f); return dis(gen); } inline vec3f rnd3f() { return vec3f(rnd(),rnd(),rnd()); } void createScene() { lambertianSpheres.push_back({Sphere{vec3f(0.f, -1000.0f, -1.f), 1000.f}, Lambertian{vec3f(0.5f, 0.5f, 0.5f)}}); for (int a = -11; a < 11; a++) { for (int b = -11; b < 11; b++) { float choose_mat = rnd(); vec3f center(a + rnd(), 0.2f, b + rnd()); if (choose_mat < 0.8f) lambertianSpheres.push_back({Sphere{center, 0.2f}, Lambertian{rnd3f()*rnd3f()}}); else if (choose_mat < 0.95f) metalSpheres.push_back({Sphere{center, 0.2f}, Metal{0.5f*(1.f+rnd3f()),0.5f*rnd()}}); else dielectricSpheres.push_back({Sphere{center, 0.2f}, Dielectric{1.5f}}); } } dielectricSpheres.push_back({Sphere{vec3f(0.f, 1.f, 0.f), 1.f}, Dielectric{1.5f}}); lambertianSpheres.push_back({Sphere{vec3f(-4.f,1.f, 0.f), 1.f}, Lambertian{vec3f(0.4f, 0.2f, 0.1f)}}); metalSpheres.push_back({Sphere{vec3f(4.f, 1.f, 0.f), 1.f}, Metal{vec3f(0.7f, 0.6f, 0.5f), 0.0f}}); } __global__ void convertToRGBA(uint32_t *rgba, const vec3f *floatFB, int numPixels) { int tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid < numPixels) { rgba[tid] = owl::make_rgba(floatFB[tid]); } } int main(int ac, char **av) { // ################################################################## // pre-owl host-side set-up // ################################################################## LOG("owl example '" << av[0] << "' starting up"); LOG("creating the scene ..."); createScene(); LOG_OK("created scene:"); LOG_OK(" num lambertian spheres: " << lambertianSpheres.size()); LOG_OK(" num dielectric spheres: " << dielectricSpheres.size()); LOG_OK(" num metal spheres : " << metalSpheres.size()); // ################################################################## // init owl // ################################################################## OWLContext context = owlContextCreate(nullptr,1); OWLModule module = owlModuleCreate(context,deviceCode_ptx); // ################################################################## // set up all the *GEOMETRY* graph we want to render // ################################################################## // ------------------------------------------------------- // declare geometry type(s) // ------------------------------------------------------- // ----------- metal ----------- OWLVarDecl metalSpheresGeomVars[] = { { "prims", OWL_BUFPTR, OWL_OFFSETOF(MetalSpheresGeom,prims)}, { /* sentinel to mark end of list */ } }; OWLGeomType metalSpheresGeomType = owlGeomTypeCreate(context, OWL_GEOMETRY_USER, sizeof(MetalSpheresGeom), metalSpheresGeomVars,-1); owlGeomTypeSetClosestHit(metalSpheresGeomType,0, module,"MetalSpheres"); owlGeomTypeSetIntersectProg(metalSpheresGeomType,0, module,"MetalSpheres"); owlGeomTypeSetBoundsProg(metalSpheresGeomType, module,"MetalSpheres"); // ----------- dielectric ----------- OWLVarDecl dielectricSpheresGeomVars[] = { { "prims", OWL_BUFPTR, OWL_OFFSETOF(DielectricSpheresGeom,prims)}, { /* sentinel to mark end of list */ } }; OWLGeomType dielectricSpheresGeomType = owlGeomTypeCreate(context, OWL_GEOMETRY_USER, sizeof(DielectricSpheresGeom), dielectricSpheresGeomVars,-1); owlGeomTypeSetClosestHit(dielectricSpheresGeomType,0, module,"DielectricSpheres"); owlGeomTypeSetIntersectProg(dielectricSpheresGeomType,0, module,"DielectricSpheres"); owlGeomTypeSetBoundsProg(dielectricSpheresGeomType, module,"DielectricSpheres"); // ----------- lambertian ----------- OWLVarDecl lambertianSpheresGeomVars[] = { { "prims", OWL_BUFPTR, OWL_OFFSETOF(LambertianSpheresGeom,prims)}, { /* sentinel to mark end of list */ } }; OWLGeomType lambertianSpheresGeomType = owlGeomTypeCreate(context, OWL_GEOMETRY_USER, sizeof(LambertianSpheresGeom), lambertianSpheresGeomVars,-1); owlGeomTypeSetClosestHit(lambertianSpheresGeomType,0, module,"LambertianSpheres"); owlGeomTypeSetIntersectProg(lambertianSpheresGeomType,0, module,"LambertianSpheres"); owlGeomTypeSetBoundsProg(lambertianSpheresGeomType, module,"LambertianSpheres"); // make sure to do that *before* setting up the geometry, since the // user geometry group will need the compiled bounds programs upon // accelBuild() owlBuildPrograms(context); // ################################################################## // set up all the *GEOMS* we want to run that code on // ################################################################## LOG("building geometries ..."); OWLBuffer frameBuffer = owlDeviceBufferCreate(context,OWL_FLOAT3,fbSize.x*fbSize.y,0); // = owlHostPinnedBufferCreate(context,OWL_INT,fbSize.x*fbSize.y); // ----------- metal ----------- OWLBuffer metalSpheresBuffer = owlDeviceBufferCreate(context,OWL_USER_TYPE(metalSpheres[0]), metalSpheres.size(),metalSpheres.data()); OWLGeom metalSpheresGeom = owlGeomCreate(context,metalSpheresGeomType); owlGeomSetPrimCount(metalSpheresGeom,metalSpheres.size()); owlGeomSetBuffer(metalSpheresGeom,"prims",metalSpheresBuffer); // ----------- lambertian ----------- OWLBuffer lambertianSpheresBuffer = owlDeviceBufferCreate(context,OWL_USER_TYPE(lambertianSpheres[0]), lambertianSpheres.size(),lambertianSpheres.data()); OWLGeom lambertianSpheresGeom = owlGeomCreate(context,lambertianSpheresGeomType); owlGeomSetPrimCount(lambertianSpheresGeom,lambertianSpheres.size()); owlGeomSetBuffer(lambertianSpheresGeom,"prims",lambertianSpheresBuffer); // ----------- dielectric ----------- OWLBuffer dielectricSpheresBuffer = owlDeviceBufferCreate(context,OWL_USER_TYPE(dielectricSpheres[0]), dielectricSpheres.size(),dielectricSpheres.data()); OWLGeom dielectricSpheresGeom = owlGeomCreate(context,dielectricSpheresGeomType); owlGeomSetPrimCount(dielectricSpheresGeom,dielectricSpheres.size()); owlGeomSetBuffer(dielectricSpheresGeom,"prims",dielectricSpheresBuffer); // ################################################################## // set up all *ACCELS* we need to trace into those groups // ################################################################## OWLGeom userGeoms[] = { lambertianSpheresGeom, metalSpheresGeom, dielectricSpheresGeom }; OWLGroup spheresGroup = owlUserGeomGroupCreate(context,3,userGeoms); owlGroupBuildAccel(spheresGroup); OWLGroup world = owlInstanceGroupCreate(context,1,&spheresGroup); owlGroupBuildAccel(world); // ################################################################## // set miss and raygen programs // ################################################################## // ------------------------------------------------------- // set up miss prog // ------------------------------------------------------- OWLVarDecl missProgVars[] = { { /* sentinel to mark end of list */ } }; // ........... create object ............................ OWLMissProg missProg = owlMissProgCreate(context,module,"miss",sizeof(MissProgData), missProgVars,-1); owlMissProgSet(context,0,missProg); // ........... set variables ............................ /* nothing to set */ // ------------------------------------------------------- // set up ray gen program // ------------------------------------------------------- OWLVarDecl rayGenVars[] = { { "fbPtr", OWL_BUFPTR, OWL_OFFSETOF(RayGenData,fbPtr)}, { "fbSize", OWL_INT2, OWL_OFFSETOF(RayGenData,fbSize)}, { "world", OWL_GROUP, OWL_OFFSETOF(RayGenData,world)}, { "camera.org", OWL_FLOAT3, OWL_OFFSETOF(RayGenData,camera.origin)}, { "camera.llc", OWL_FLOAT3, OWL_OFFSETOF(RayGenData,camera.lower_left_corner)}, { "camera.horiz", OWL_FLOAT3, OWL_OFFSETOF(RayGenData,camera.horizontal)}, { "camera.vert", OWL_FLOAT3, OWL_OFFSETOF(RayGenData,camera.vertical)}, { /* sentinel to mark end of list */ } }; // ........... create object ............................ OWLRayGen rayGen = owlRayGenCreate(context,module,"rayGen", sizeof(RayGenData), rayGenVars,-1); // ........... compute variable values .................. const float vfov = fovy; const vec3f vup = lookUp; const float aspect = fbSize.x / float(fbSize.y); const float theta = vfov * ((float)M_PI) / 180.0f; const float half_height = tanf(theta / 2.0f); const float half_width = aspect * half_height; const float focusDist = 10.f; const vec3f origin = lookFrom; const vec3f w = normalize(lookFrom - lookAt); const vec3f u = normalize(cross(vup, w)); const vec3f v = cross(w, u); const vec3f lower_left_corner = origin - half_width * focusDist*u - half_height * focusDist*v - focusDist * w; const vec3f horizontal = 2.0f*half_width*focusDist*u; const vec3f vertical = 2.0f*half_height*focusDist*v; // ----------- set variables ---------------------------- owlRayGenSetBuffer(rayGen,"fbPtr", frameBuffer); owlRayGenSet2i (rayGen,"fbSize", (const owl2i&)fbSize); owlRayGenSetGroup (rayGen,"world", world); owlRayGenSet3f (rayGen,"camera.org", (const owl3f&)origin); owlRayGenSet3f (rayGen,"camera.llc", (const owl3f&)lower_left_corner); owlRayGenSet3f (rayGen,"camera.horiz", (const owl3f&)horizontal); owlRayGenSet3f (rayGen,"camera.vert", (const owl3f&)vertical); // ################################################################## // build *SBT* required to trace the groups // ################################################################## // programs have been built before, but have to rebuild raygen and // miss progs owlBuildPrograms(context); owlBuildPipeline(context); owlBuildSBT(context); // ################################################################## // now that everything is ready: launch it .... // ################################################################## LOG("launching ..."); // owlRayGenLaunch2D(rayGen,fbSize.x,fbSize.y); owlBufferClear(frameBuffer); owlRayGenLaunch3D(rayGen,fbSize.x,fbSize.y,NUM_SAMPLES_PER_PIXEL); LOG("done with launch, calling CUDA kernel to convert from float3 to rgba8 ..."); // ------------------------------------------------------------------ // since we did atomics during rndering we used a float3 // framebuffer, so still need to convert to rgba8; let's do this // with a simple CUDA kernel here, this also demonstrates how easy // it is to od owl-cuda interop. // ------------------------------------------------------------------ uint32_t *fb = 0; cudaMallocManaged(&fb,fbSize.x*fbSize.y*sizeof(uint32_t)); int numPixels = fbSize.x*fbSize.y; convertToRGBA<<<divRoundUp(numPixels,1024),1024>>> (fb,(const vec3f*)owlBufferGetPointer(frameBuffer,0),numPixels); cudaDeviceSynchronize(); // ------------------------------------------------------------------ // frame buffer not in png friendly form, let's write it // ------------------------------------------------------------------ stbi_write_png(outFileName,fbSize.x,fbSize.y,4, fb,fbSize.x*sizeof(uint32_t)); cudaFree(fb); LOG_OK("written rendered frame buffer to file "<<outFileName); // ################################################################## // and finally, clean up // ################################################################## LOG("destroying devicegroup ..."); owlContextDestroy(context); LOG_OK("seems all went OK; app is done, this should be the last output ..."); }
the_stack
typedef unsigned char BYTE; typedef unsigned int WORD; typedef unsigned long long LONG; #define BLAKE2B_ROUNDS 12 #define BLAKE2B_BLOCK_LENGTH 128 #define BLAKE2B_CHAIN_SIZE 8 #define BLAKE2B_CHAIN_LENGTH (BLAKE2B_CHAIN_SIZE * sizeof(int64_t)) #define BLAKE2B_STATE_SIZE 16 #define BLAKE2B_STATE_LENGTH (BLAKE2B_STATE_SIZE * sizeof(int64_t)) typedef struct { WORD digestlen; BYTE buff[BLAKE2B_BLOCK_LENGTH]; int64_t chain[BLAKE2B_CHAIN_SIZE]; int64_t state[BLAKE2B_STATE_SIZE]; WORD pos; LONG t0; LONG t1; LONG f0; } cuda_blake2b_ctx_t; typedef cuda_blake2b_ctx_t CUDA_BLAKE2B_CTX; __constant__ CUDA_BLAKE2B_CTX c_CTX; __constant__ LONG BLAKE2B_IVS[8] = { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 }; __constant__ unsigned char BLAKE2B_SIGMAS[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; __device__ LONG cuda_blake2b_leuint64(BYTE *in) { LONG a; memcpy(&a, in, 8); return a; } __device__ LONG cuda_blake2b_ROTR64(LONG a, BYTE b) { return (a >> b) | (a << (64 - b)); } __device__ void cuda_blake2b_G(cuda_blake2b_ctx_t *ctx, int64_t m1, int64_t m2, int32_t a, int32_t b, int32_t c, int32_t d) { ctx->state[a] = ctx->state[a] + ctx->state[b] + m1; ctx->state[d] = cuda_blake2b_ROTR64(ctx->state[d] ^ ctx->state[a], 32); ctx->state[c] = ctx->state[c] + ctx->state[d]; ctx->state[b] = cuda_blake2b_ROTR64(ctx->state[b] ^ ctx->state[c], 24); ctx->state[a] = ctx->state[a] + ctx->state[b] + m2; ctx->state[d] = cuda_blake2b_ROTR64(ctx->state[d] ^ ctx->state[a], 16); ctx->state[c] = ctx->state[c] + ctx->state[d]; ctx->state[b] = cuda_blake2b_ROTR64(ctx->state[b] ^ ctx->state[c], 63); } __device__ __forceinline__ void cuda_blake2b_init_state(cuda_blake2b_ctx_t *ctx) { memcpy(ctx->state, ctx->chain, BLAKE2B_CHAIN_LENGTH); // Set blake2b initialization vectors 0-3 ctx->state[BLAKE2B_CHAIN_SIZE + 0] = 0x6a09e667f3bcc908; ctx->state[BLAKE2B_CHAIN_SIZE + 1] = 0xbb67ae8584caa73b; ctx->state[BLAKE2B_CHAIN_SIZE + 2] = 0x3c6ef372fe94f82b; ctx->state[BLAKE2B_CHAIN_SIZE + 3] = 0xa54ff53a5f1d36f1; // Hard code blake2b initialization vectors 4-7 ctx->state[12] = ctx->t0 ^ 0x510e527fade682d1; ctx->state[13] = ctx->t1 ^ 0x9b05688c2b3e6c1f; ctx->state[14] = ctx->f0 ^ 0x1f83d9abfb41bd6b; ctx->state[15] = 0x5be0cd19137e2179; } __device__ __forceinline__ void cuda_blake2b_compress(cuda_blake2b_ctx_t *ctx, BYTE* in, WORD inoffset) { cuda_blake2b_init_state(ctx); LONG m[16] = {0}; #pragma unroll for (int j = 0; j < 16; j++) m[j] = cuda_blake2b_leuint64(in + inoffset + (j << 3)); // 12 blake2b rounds in total // round 0 cuda_blake2b_G(ctx, m[0], m[1], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[2], m[3], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[4], m[5], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[6], m[7], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[8], m[9], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[10], m[11], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[12], m[13], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[14], m[15], 3, 4, 9, 14); // round 1 cuda_blake2b_G(ctx, m[14], m[10], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[4], m[8], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[9], m[15], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[13], m[6], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[1], m[12], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[0], m[2], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[11], m[7], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[5], m[3], 3, 4, 9, 14); // round 2 cuda_blake2b_G(ctx, m[11], m[8], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[12], m[0], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[5], m[2], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[15], m[13], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[10], m[14], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[3], m[6], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[7], m[1], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[9], m[4], 3, 4, 9, 14); // round 3 cuda_blake2b_G(ctx, m[7], m[9], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[3], m[1], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[13], m[12], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[11], m[14], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[2], m[6], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[5], m[10], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[4], m[0], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[15], m[8], 3, 4, 9, 14); // round 4 cuda_blake2b_G(ctx, m[9], m[0], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[5], m[7], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[2], m[4], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[10], m[15], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[14], m[1], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[11], m[12], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[6], m[8], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[3], m[13], 3, 4, 9, 14); // round 5 cuda_blake2b_G(ctx, m[2], m[12], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[6], m[10], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[0], m[11], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[8], m[3], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[4], m[13], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[7], m[5], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[15], m[14], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[1], m[9], 3, 4, 9, 14); // round 6 cuda_blake2b_G(ctx, m[12], m[5], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[1], m[15], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[14], m[13], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[4], m[10], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[0], m[7], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[6], m[3], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[9], m[2], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[8], m[11], 3, 4, 9, 14); // round 7 cuda_blake2b_G(ctx, m[13], m[11], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[7], m[14], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[12], m[1], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[3], m[9], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[5], m[0], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[15], m[4], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[8], m[6], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[2], m[10], 3, 4, 9, 14); // round 8 cuda_blake2b_G(ctx, m[6], m[15], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[14], m[9], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[11], m[3], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[0], m[8], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[12], m[2], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[13], m[7], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[1], m[4], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[10], m[5], 3, 4, 9, 14); // round 9 cuda_blake2b_G(ctx, m[10], m[2], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[8], m[4], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[7], m[6], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[1], m[5], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[15], m[11], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[9], m[14], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[3], m[12], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[13], m[0], 3, 4, 9, 14); // round 10 cuda_blake2b_G(ctx, m[0], m[1], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[2], m[3], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[4], m[5], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[6], m[7], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[8], m[9], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[10], m[11], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[12], m[13], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[14], m[15], 3, 4, 9, 14); // round 11 cuda_blake2b_G(ctx, m[14], m[10], 0, 4, 8, 12); cuda_blake2b_G(ctx, m[4], m[8], 1, 5, 9, 13); cuda_blake2b_G(ctx, m[9], m[15], 2, 6, 10, 14); cuda_blake2b_G(ctx, m[13], m[6], 3, 7, 11, 15); cuda_blake2b_G(ctx, m[1], m[12], 0, 5, 10, 15); cuda_blake2b_G(ctx, m[0], m[2], 1, 6, 11, 12); cuda_blake2b_G(ctx, m[11], m[7], 2, 7, 8, 13); cuda_blake2b_G(ctx, m[5], m[3], 3, 4, 9, 14); for (int offset = 0; offset < BLAKE2B_CHAIN_SIZE; offset++) ctx->chain[offset] = ctx->chain[offset] ^ ctx->state[offset] ^ ctx->state[offset + 8]; } __device__ void cuda_blake2b_init(cuda_blake2b_ctx_t *ctx, WORD digestbitlen) { memset(ctx, 0, sizeof(cuda_blake2b_ctx_t)); ctx->digestlen = digestbitlen >> 3; ctx->pos = 0; ctx->t0 = 0; ctx->t1 = 0; ctx->f0 = 0; // Inline the blake2b initialization vectors 0-7 ctx->chain[0] = 0x6a09e667f3bcc908 ^ (ctx->digestlen | 0x1010000); ctx->chain[1] = 0xbb67ae8584caa73b; ctx->chain[2] = 0x3c6ef372fe94f82b; ctx->chain[3] = 0xa54ff53a5f1d36f1; ctx->chain[4] = 0x510e527fade682d1; ctx->chain[5] = 0x9b05688c2b3e6c1f; ctx->chain[6] = 0x1f83d9abfb41bd6b; ctx->chain[7] = 0x5be0cd19137e2179; } __device__ void cuda_blake2b_update(cuda_blake2b_ctx_t *ctx, BYTE* in, LONG inlen) { if (inlen == 0) return; WORD start = 0; int64_t in_index = 0, block_index = 0; if (ctx->pos) { start = BLAKE2B_BLOCK_LENGTH - ctx->pos; if (start < inlen){ memcpy(ctx->buff + ctx->pos, in, start); ctx->t0 += BLAKE2B_BLOCK_LENGTH; if (ctx->t0 == 0) ctx->t1++; cuda_blake2b_compress(ctx, ctx->buff, 0); ctx->pos = 0; memset(ctx->buff, 0, BLAKE2B_BLOCK_LENGTH); } else { memcpy(ctx->buff + ctx->pos, in, inlen);//read the whole *in ctx->pos += inlen; return; } } block_index = inlen - BLAKE2B_BLOCK_LENGTH; for (in_index = start; in_index < block_index; in_index += BLAKE2B_BLOCK_LENGTH) { ctx->t0 += BLAKE2B_BLOCK_LENGTH; if (ctx->t0 == 0) ctx->t1++; cuda_blake2b_compress(ctx, in, in_index); } memcpy(ctx->buff, in + in_index, inlen - in_index); ctx->pos += inlen - in_index; } __device__ void cuda_blake2b_final(cuda_blake2b_ctx_t *ctx, BYTE* out) { ctx->f0 = 0xFFFFFFFFFFFFFFFFL; ctx->t0 += ctx->pos; if (ctx->pos > 0 && ctx->t0 == 0) ctx->t1++; cuda_blake2b_compress(ctx, ctx->buff, 0); memset(ctx->buff, 0, BLAKE2B_BLOCK_LENGTH); memset(ctx->state, 0, BLAKE2B_STATE_LENGTH); int i8 = 0; for (int i = 0; i < BLAKE2B_CHAIN_SIZE && ((i8 = i * 8) < ctx->digestlen); i++) { BYTE * BYTEs = (BYTE*)(&ctx->chain[i]); if (i8 < ctx->digestlen - 8) memcpy(out + i8, BYTEs, 8); else memcpy(out + i8, BYTEs, ctx->digestlen - i8); } } __global__ void kernel_blake2b_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch, WORD BLAKE2B_BLOCK_SIZE) { WORD thread = blockIdx.x * blockDim.x + threadIdx.x; if (thread >= n_batch) { return; } BYTE* in = indata + thread * inlen; BYTE* out = outdata + thread * BLAKE2B_BLOCK_SIZE; CUDA_BLAKE2B_CTX ctx = c_CTX; //if not precomputed CTX, call cuda_blake2b_init() with key cuda_blake2b_update(&ctx, in, inlen); cuda_blake2b_final(&ctx, out); } #define KECCAK_ROUND 24 #define KECCAK_STATE_SIZE 25 #define KECCAK_Q_SIZE 192 __constant__ LONG CUDA_KECCAK_CONSTS[24] = { 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 }; typedef struct { BYTE sha3_flag; WORD digestbitlen; LONG rate_bits; LONG rate_BYTEs; LONG absorb_round; int64_t state[KECCAK_STATE_SIZE]; BYTE q[KECCAK_Q_SIZE]; LONG bits_in_queue; } cuda_keccak_ctx_t; typedef cuda_keccak_ctx_t CUDA_KECCAK_CTX; __device__ LONG cuda_keccak_leuint64(void *in) { LONG a; memcpy(&a, in, 8); return a; } __device__ int64_t cuda_keccak_MIN(int64_t a, int64_t b) { if (a > b) return b; return a; } __device__ LONG cuda_keccak_UMIN(LONG a, LONG b) { if (a > b) return b; return a; } __device__ void cuda_keccak_extract(cuda_keccak_ctx_t *ctx) { LONG len = ctx->rate_bits >> 6; int64_t a; int s = sizeof(LONG); for (int i = 0;i < len;i++) { a = cuda_keccak_leuint64((int64_t*)&ctx->state[i]); memcpy(ctx->q + (i * s), &a, s); } } __device__ __forceinline__ LONG cuda_keccak_ROTL64(LONG a, LONG b) { return (a << b) | (a >> (64 - b)); } __device__ void cuda_keccak_permutations(cuda_keccak_ctx_t * ctx) { int64_t* A = ctx->state;; int64_t *a00 = A, *a01 = A + 1, *a02 = A + 2, *a03 = A + 3, *a04 = A + 4; int64_t *a05 = A + 5, *a06 = A + 6, *a07 = A + 7, *a08 = A + 8, *a09 = A + 9; int64_t *a10 = A + 10, *a11 = A + 11, *a12 = A + 12, *a13 = A + 13, *a14 = A + 14; int64_t *a15 = A + 15, *a16 = A + 16, *a17 = A + 17, *a18 = A + 18, *a19 = A + 19; int64_t *a20 = A + 20, *a21 = A + 21, *a22 = A + 22, *a23 = A + 23, *a24 = A + 24; for (int i = 0; i < KECCAK_ROUND; i++) { /* Theta */ int64_t c0 = *a00 ^ *a05 ^ *a10 ^ *a15 ^ *a20; int64_t c1 = *a01 ^ *a06 ^ *a11 ^ *a16 ^ *a21; int64_t c2 = *a02 ^ *a07 ^ *a12 ^ *a17 ^ *a22; int64_t c3 = *a03 ^ *a08 ^ *a13 ^ *a18 ^ *a23; int64_t c4 = *a04 ^ *a09 ^ *a14 ^ *a19 ^ *a24; int64_t d1 = cuda_keccak_ROTL64(c1, 1) ^ c4; int64_t d2 = cuda_keccak_ROTL64(c2, 1) ^ c0; int64_t d3 = cuda_keccak_ROTL64(c3, 1) ^ c1; int64_t d4 = cuda_keccak_ROTL64(c4, 1) ^ c2; int64_t d0 = cuda_keccak_ROTL64(c0, 1) ^ c3; *a00 ^= d1; *a05 ^= d1; *a10 ^= d1; *a15 ^= d1; *a20 ^= d1; *a01 ^= d2; *a06 ^= d2; *a11 ^= d2; *a16 ^= d2; *a21 ^= d2; *a02 ^= d3; *a07 ^= d3; *a12 ^= d3; *a17 ^= d3; *a22 ^= d3; *a03 ^= d4; *a08 ^= d4; *a13 ^= d4; *a18 ^= d4; *a23 ^= d4; *a04 ^= d0; *a09 ^= d0; *a14 ^= d0; *a19 ^= d0; *a24 ^= d0; /* Rho pi */ c1 = cuda_keccak_ROTL64(*a01, 1); *a01 = cuda_keccak_ROTL64(*a06, 44); *a06 = cuda_keccak_ROTL64(*a09, 20); *a09 = cuda_keccak_ROTL64(*a22, 61); *a22 = cuda_keccak_ROTL64(*a14, 39); *a14 = cuda_keccak_ROTL64(*a20, 18); *a20 = cuda_keccak_ROTL64(*a02, 62); *a02 = cuda_keccak_ROTL64(*a12, 43); *a12 = cuda_keccak_ROTL64(*a13, 25); *a13 = cuda_keccak_ROTL64(*a19, 8); *a19 = cuda_keccak_ROTL64(*a23, 56); *a23 = cuda_keccak_ROTL64(*a15, 41); *a15 = cuda_keccak_ROTL64(*a04, 27); *a04 = cuda_keccak_ROTL64(*a24, 14); *a24 = cuda_keccak_ROTL64(*a21, 2); *a21 = cuda_keccak_ROTL64(*a08, 55); *a08 = cuda_keccak_ROTL64(*a16, 45); *a16 = cuda_keccak_ROTL64(*a05, 36); *a05 = cuda_keccak_ROTL64(*a03, 28); *a03 = cuda_keccak_ROTL64(*a18, 21); *a18 = cuda_keccak_ROTL64(*a17, 15); *a17 = cuda_keccak_ROTL64(*a11, 10); *a11 = cuda_keccak_ROTL64(*a07, 6); *a07 = cuda_keccak_ROTL64(*a10, 3); *a10 = c1; /* Chi */ c0 = *a00 ^ (~*a01 & *a02); c1 = *a01 ^ (~*a02 & *a03); *a02 ^= ~*a03 & *a04; *a03 ^= ~*a04 & *a00; *a04 ^= ~*a00 & *a01; *a00 = c0; *a01 = c1; c0 = *a05 ^ (~*a06 & *a07); c1 = *a06 ^ (~*a07 & *a08); *a07 ^= ~*a08 & *a09; *a08 ^= ~*a09 & *a05; *a09 ^= ~*a05 & *a06; *a05 = c0; *a06 = c1; c0 = *a10 ^ (~*a11 & *a12); c1 = *a11 ^ (~*a12 & *a13); *a12 ^= ~*a13 & *a14; *a13 ^= ~*a14 & *a10; *a14 ^= ~*a10 & *a11; *a10 = c0; *a11 = c1; c0 = *a15 ^ (~*a16 & *a17); c1 = *a16 ^ (~*a17 & *a18); *a17 ^= ~*a18 & *a19; *a18 ^= ~*a19 & *a15; *a19 ^= ~*a15 & *a16; *a15 = c0; *a16 = c1; c0 = *a20 ^ (~*a21 & *a22); c1 = *a21 ^ (~*a22 & *a23); *a22 ^= ~*a23 & *a24; *a23 ^= ~*a24 & *a20; *a24 ^= ~*a20 & *a21; *a20 = c0; *a21 = c1; /* Iota */ *a00 ^= CUDA_KECCAK_CONSTS[i]; } } __device__ void cuda_keccak_absorb(cuda_keccak_ctx_t *ctx, BYTE* in) { LONG offset = 0; for (LONG i = 0; i < ctx->absorb_round; ++i) { ctx->state[i] ^= cuda_keccak_leuint64(in + offset); offset += 8; } cuda_keccak_permutations(ctx); } __device__ void cuda_keccak_pad(cuda_keccak_ctx_t *ctx) { ctx->q[ctx->bits_in_queue >> 3] |= (1L << (ctx->bits_in_queue & 7)); if (++(ctx->bits_in_queue) == ctx->rate_bits) { cuda_keccak_absorb(ctx, ctx->q); ctx->bits_in_queue = 0; } LONG full = ctx->bits_in_queue >> 6; LONG partial = ctx->bits_in_queue & 63; LONG offset = 0; for (int i = 0; i < full; ++i) { ctx->state[i] ^= cuda_keccak_leuint64(ctx->q + offset); offset += 8; } if (partial > 0) { LONG mask = (1L << partial) - 1; ctx->state[full] ^= cuda_keccak_leuint64(ctx->q + offset) & mask; } ctx->state[(ctx->rate_bits - 1) >> 6] ^= 9223372036854775808ULL;/* 1 << 63 */ cuda_keccak_permutations(ctx); cuda_keccak_extract(ctx); ctx->bits_in_queue = ctx->rate_bits; } /* * Digestbitlen must be 128 224 256 288 384 512 */ __device__ void cuda_keccak_init(cuda_keccak_ctx_t *ctx, WORD digestbitlen) { memset(ctx, 0, sizeof(cuda_keccak_ctx_t)); ctx->sha3_flag = 1; ctx->digestbitlen = digestbitlen; ctx->rate_bits = 1600 - ((ctx->digestbitlen) << 1); ctx->rate_BYTEs = ctx->rate_bits >> 3; ctx->absorb_round = ctx->rate_bits >> 6; ctx->bits_in_queue = 0; } /* * Digestbitlen must be 224 256 384 512 */ __device__ void cuda_keccak_sha3_init(cuda_keccak_ctx_t *ctx, WORD digestbitlen) { cuda_keccak_init(ctx, digestbitlen); ctx->sha3_flag = 1; } __device__ void cuda_keccak_update(cuda_keccak_ctx_t *ctx, BYTE *in, LONG inlen) { int64_t BYTEs = ctx->bits_in_queue >> 3; int64_t count = 0; while (count < inlen) { if (BYTEs == 0 && count <= ((int64_t)(inlen - ctx->rate_BYTEs))) { do { cuda_keccak_absorb(ctx, in + count); count += ctx->rate_BYTEs; } while (count <= ((int64_t)(inlen - ctx->rate_BYTEs))); } else { int64_t partial = cuda_keccak_MIN(ctx->rate_BYTEs - BYTEs, inlen - count); memcpy(ctx->q + BYTEs, in + count, partial); BYTEs += partial; count += partial; if (BYTEs == ctx->rate_BYTEs) { cuda_keccak_absorb(ctx, ctx->q); BYTEs = 0; } } } ctx->bits_in_queue = BYTEs << 3; } __device__ void cuda_keccak_final(cuda_keccak_ctx_t *ctx, BYTE *out) { if (ctx->sha3_flag) { int mask = (1 << 2) - 1; ctx->q[ctx->bits_in_queue >> 3] = (BYTE)(0x02 & mask); ctx->bits_in_queue += 2; } cuda_keccak_pad(ctx); LONG i = 0; while (i < ctx->digestbitlen) { if (ctx->bits_in_queue == 0) { cuda_keccak_permutations(ctx); cuda_keccak_extract(ctx); ctx->bits_in_queue = ctx->rate_bits; } LONG partial_block = cuda_keccak_UMIN(ctx->bits_in_queue, ctx->digestbitlen - i); memcpy(out + (i >> 3), ctx->q + (ctx->rate_BYTEs - (ctx->bits_in_queue >> 3)), partial_block >> 3); ctx->bits_in_queue -= partial_block; i += partial_block; } } __global__ void kernel_keccak_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch, WORD KECCAK_BLOCK_SIZE) { WORD thread = blockIdx.x * blockDim.x + threadIdx.x; if (thread >= n_batch) { return; } BYTE* in = indata + thread * inlen; BYTE* out = outdata + thread * KECCAK_BLOCK_SIZE; CUDA_KECCAK_CTX ctx; cuda_keccak_init(&ctx, KECCAK_BLOCK_SIZE << 3); cuda_keccak_update(&ctx, in, inlen); cuda_keccak_final(&ctx, out); } /** * The miner serialized header: * nonce - 4 * time - 8 * padding - 20 * prev_block - 32 * tree_root - 32 * mask hash - 32 * extra_nonce - 24 * reserved_root - 32 * witness_root - 32 * merkle_root - 32 * version - 4 * bits - 4 */ // Global memory is underscore prefixed __constant__ uint8_t _pre_header[96]; __constant__ uint8_t _target[32]; __constant__ uint8_t _padding[32]; __constant__ uint8_t _commit_hash[32]; __device__ int cuda_memcmp(const void *s1, const void *s2, size_t n) { const unsigned char *us1 = (const unsigned char *) s1; const unsigned char *us2 = (const unsigned char *) s2; while (n-- != 0) { if (*us1 != *us2) { return (*us1 < *us2) ? -1 : +1; } us1++; us2++; } return 0; } __global__ void kernel_hs_hash( uint32_t *out_nonce, bool *out_match, unsigned int start_nonce, unsigned int range, unsigned int threads ) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= threads || tid >= range) { return; } // Set the nonce based on the start_nonce and thread. uint32_t nonce = start_nonce + tid; CUDA_BLAKE2B_CTX b_ctx; CUDA_KECCAK_CTX s_ctx; uint8_t hash[32]; uint8_t left[64]; uint8_t right[32]; uint8_t share[128]; // Create the share using the nonce, // pre_header and commit_hash. memcpy(share, &nonce, 4); memcpy(share + 4, _pre_header + 4, 92); memcpy(share + 96, _commit_hash, 32); // Generate left by hashing the share // with blake2b-512. cuda_blake2b_init(&b_ctx, 512); cuda_blake2b_update(&b_ctx, share, 128); cuda_blake2b_final(&b_ctx, left); // Generate right by hashing the share // and first 8 bytes of padding with // sha3-256. cuda_keccak_init(&s_ctx, 256); cuda_keccak_update(&s_ctx, share, 128); cuda_keccak_update(&s_ctx, _padding, 8); cuda_keccak_final(&s_ctx, right); // Generate share hash by hashing together // the left, 32 bytes of padding and the // right with blake2b-256. cuda_blake2b_init(&b_ctx, 256); cuda_blake2b_update(&b_ctx, left, 64); cuda_blake2b_update(&b_ctx, _padding, 32); cuda_blake2b_update(&b_ctx, right, 32); cuda_blake2b_final(&b_ctx, hash); // Do a bytewise comparison to see if the // hash satisfies the target. This could be // either the network target or the pool target. if (cuda_memcmp(hash, _target, 32) <= 0) { *out_nonce = nonce; *out_match = true; return; } } // Calculate the commit hash on the CPU and copy to the GPU // before starting the GPU kernel. This saves the need for each // GPU thread to compute the exact same commit_hash. void hs_commit_hash(const uint8_t *sub_header, const uint8_t *mask_hash) { uint8_t sub_hash[32]; uint8_t commit_hash[32]; // Create the sub_hash by hashing the // sub_header with blake2b-256. hs_blake2b_ctx b_ctx; hs_blake2b_init(&b_ctx, 32); hs_blake2b_update(&b_ctx, sub_header, 128); hs_blake2b_final(&b_ctx, sub_hash, 32); // Create the commit_hash by hashing together // the sub_hash and the mask_hash with blake2b-256. // The mask_hash is included in the miner header serialization // that comes from `getwork` or stratum. hs_blake2b_init(&b_ctx, 32); hs_blake2b_update(&b_ctx, sub_hash, 32); hs_blake2b_update(&b_ctx, mask_hash, 32); hs_blake2b_final(&b_ctx, commit_hash, 32); cudaMemcpyToSymbol(_commit_hash, commit_hash, 32); } // At most 32 bytes of padding are needed, so calculate all 32 // bytes and then copy it to the GPU. void hs_padding(const uint8_t *prev_block, const uint8_t *tree_root, size_t len) { uint8_t padding[len]; size_t i; for (i = 0; i < len; i++) padding[i] = prev_block[i % 32] ^ tree_root[i % 32]; cudaMemcpyToSymbol(_padding, padding, 32); } // hs_miner_func for the cuda backend int32_t hs_cuda_run(hs_options_t *options, uint32_t *result, uint8_t *extra_nonce, bool *match) { uint32_t *out_nonce; bool *out_match; cudaSetDevice(options->device); cudaMalloc(&out_nonce, sizeof(uint32_t)); cudaMalloc(&out_match, sizeof(bool)); cudaMemset(out_match, 0, sizeof(bool)); // preheader + mask hash // nonce - 4 bytes // time - 8 bytes // pad - 20 bytes // prev - 32 bytes // tree root - 32 bytes // mask hash - 32 bytes // total - 128 bytes // subheader // extra nonce - 24 bytes // reserved - 32 bytes // witness - 32 bytes // merkle - 32 bytes // version - 4 bytes // bits - 4 bytes // total - 128 bytes cudaMemcpyToSymbol(_pre_header, options->header, 96); cudaMemcpyToSymbol(_target, options->target, 32); // Pointers to prev block and tree root. hs_padding(options->header + 32, options->header + 64, 32); // Pointers to the subheader and mask hash hs_commit_hash(options->header + 128, options->header + 96); kernel_hs_hash<<<options->grids, options->blocks>>>( out_nonce, out_match, options->nonce, options->range, options->threads ); cudaMemcpy(result, out_nonce, sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemcpy(match, out_match, sizeof(bool), cudaMemcpyDeviceToHost); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("error hs cuda hash: %s \n", cudaGetErrorString(error)); // TOOD: cudaFree? return HS_ENOSOLUTION; } cudaFree(out_nonce); cudaFree(out_match); if (*match) return HS_SUCCESS; return HS_ENOSOLUTION; }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #define eps 1e-15 // for the older gpus atomicAdd with double arguments does not exist #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); }while (assumed != old); return __longlong_as_double(old); } #endif template<typename scalar_t> __global__ void dr_cuda_backword_color_batch( const scalar_t* __restrict__ grad_im_bxhxwxd, const scalar_t* __restrict__ im_bxhxwxd, const scalar_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ imwei_bxhxwx3, const scalar_t* __restrict__ points2d_bxfx6, const scalar_t* __restrict__ features_bxfx3d, scalar_t* __restrict__ grad_points2d_bxfx6, scalar_t* __restrict__ grad_features_bxfx3d, scalar_t* __restrict__ debug_im_bxhxwx3, int bnum, int height, int width, int fnum, int dnum, int multiplier) { /* // thread index */ // bidx * height * width + heiidx * width + wididx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidx3 = totalidx1 * 3; const int totalidxd = totalidx1 * dnum; // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); // which face it belongs to? scalar_t fidx = imidx_bxhxwx1[totalidx1]; // face begins from 1 // convert it into int, use round! int fidxint = static_cast<int>(fidx + 0.5) - 1; // visible faces if (fidxint >= 0) { const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; const int shift3d = shift1 * 3 * dnum; // the imaging model is: // I(x, y) = w0 * c0 + w1 * c1 + w2 * c2 // gradient of colors // 3 points in one face for (int i = 0; i < 3; i++) { // directly use opengl weights scalar_t w = imwei_bxhxwx3[totalidx3 + i]; int pointshift = shift3d + i * dnum; // rgb value for (int rgb = 0; rgb < dnum; rgb++) { int colorshift = pointshift + rgb; // this should be atomic operation scalar_t * addr = grad_features_bxfx3d + colorshift; scalar_t val = grad_im_bxhxwxd[totalidxd + rgb] * w; atomicAdd(addr, val); } } // gradient of points // here, we calculate dl/dp // dl/dp = dldI * dI/dp // dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp // first // 4 coorinates scalar_t ax = points2d_bxfx6[shift6 + 0]; scalar_t ay = points2d_bxfx6[shift6 + 1]; scalar_t bx = points2d_bxfx6[shift6 + 2]; scalar_t by = points2d_bxfx6[shift6 + 3]; scalar_t cx = points2d_bxfx6[shift6 + 4]; scalar_t cy = points2d_bxfx6[shift6 + 5]; //////////////////////////////////////////////////////////////////////////////////// // replace with other variables scalar_t m = bx - ax; scalar_t p = by - ay; scalar_t n = cx - ax; scalar_t q = cy - ay; scalar_t s = x0 - ax; scalar_t t = y0 - ay; ////////////////////////////////////////////////////////////////////////////////////// scalar_t k1 = s * q - n * t; scalar_t k2 = m * t - s * p; scalar_t k3 = m * q - n * p; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// scalar_t dk1dm = 0; scalar_t dk1dn = -t; scalar_t dk1dp = 0; scalar_t dk1dq = s; scalar_t dk1ds = q; scalar_t dk1dt = -n; scalar_t dk2dm = t; scalar_t dk2dn = 0; scalar_t dk2dp = -s; scalar_t dk2dq = 0; scalar_t dk2ds = -p; scalar_t dk2dt = m; scalar_t dk3dm = q; scalar_t dk3dn = -p; scalar_t dk3dp = -n; scalar_t dk3dq = m; scalar_t dk3ds = 0; scalar_t dk3dt = 0; /////////////////////////////////////////////////////////////////////////////// // w1 = k1 / k3 // w2 = k2 / k3 // remember we need divide k3 ^ 2 scalar_t dw1dm = dk1dm * k3 - dk3dm * k1; scalar_t dw1dn = dk1dn * k3 - dk3dn * k1; scalar_t dw1dp = dk1dp * k3 - dk3dp * k1; scalar_t dw1dq = dk1dq * k3 - dk3dq * k1; scalar_t dw1ds = dk1ds * k3 - dk3ds * k1; scalar_t dw1dt = dk1dt * k3 - dk3dt * k1; scalar_t dw2dm = dk2dm * k3 - dk3dm * k2; scalar_t dw2dn = dk2dn * k3 - dk3dn * k2; scalar_t dw2dp = dk2dp * k3 - dk3dp * k2; scalar_t dw2dq = dk2dq * k3 - dk3dq * k2; scalar_t dw2ds = dk2ds * k3 - dk3ds * k2; scalar_t dw2dt = dk2dt * k3 - dk3dt * k2; ////////////////////////////////////////////////////////////////////////////////////// scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds); scalar_t dw1day = -(dw1dp + dw1dq + dw1dt); scalar_t dw1dbx = dw1dm; scalar_t dw1dby = dw1dp; scalar_t dw1dcx = dw1dn; scalar_t dw1dcy = dw1dq; scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds); scalar_t dw2day = -(dw2dp + dw2dq + dw2dt); scalar_t dw2dbx = dw2dm; scalar_t dw2dby = dw2dp; scalar_t dw2dcx = dw2dn; scalar_t dw2dcy = dw2dq; for (int rgb = 0; rgb < dnum; rgb++) { // the same color for 3 points // thus we can simplify it scalar_t c0 = features_bxfx3d[shift3d + rgb]; scalar_t c1 = features_bxfx3d[shift3d + dnum + rgb]; scalar_t c2 = features_bxfx3d[shift3d + dnum + dnum + rgb]; scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax; scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day; scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx; scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby; scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx; scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy; scalar_t dldI = multiplier * grad_im_bxhxwxd[totalidxd + rgb] / (k3 * k3 + eps); atomicAdd(grad_points2d_bxfx6 + shift6 + 0, dldI * dIdax); atomicAdd(grad_points2d_bxfx6 + shift6 + 1, dldI * dIday); atomicAdd(grad_points2d_bxfx6 + shift6 + 2, dldI * dIdbx); atomicAdd(grad_points2d_bxfx6 + shift6 + 3, dldI * dIdby); atomicAdd(grad_points2d_bxfx6 + shift6 + 4, dldI * dIdcx); atomicAdd(grad_points2d_bxfx6 + shift6 + 5, dldI * dIdcy); } } } template<typename scalar_t> __global__ void dr_cuda_backword_prob_batch( const scalar_t* __restrict__ grad_improb_bxhxwx1, const scalar_t* __restrict__ improb_bxhxwx1, const scalar_t* __restrict__ imidx_bxhxwx1, const scalar_t* __restrict__ probface_bxhxwxk, const scalar_t* __restrict__ probcase_bxhxwxk, const scalar_t* __restrict__ probdis_bxhxwxk, const scalar_t* __restrict__ probdep_bxhxwxk, const scalar_t* __restrict__ probacc_bxhxwxk, const scalar_t* __restrict__ points2d_bxfx6, scalar_t* __restrict__ grad_points2dprob_bxfx6, int bnum, int height, int width, int fnum, int knum, int multiplier, int sigmainv) { /* // thread index */ // bidx * height * width + heiidx * width + wididx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int wididx = presentthread % width; presentthread = (presentthread - wididx) / width; int heiidx = presentthread % height; int bidx = (presentthread - heiidx) / height; if (bidx >= bnum || heiidx >= height || wididx >= width) return; ////////////////////////////////////////////// // which pixel it belongs to const int totalidx1 = bidx * height * width + heiidx * width + wididx; const int totalidxk = totalidx1 * knum; ////////////////////////////////////////////// // coordinates scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width); scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1); ///////////////////////////////////// // which face it belongs to? scalar_t fidx = imidx_bxhxwx1[totalidx1]; // face begins from 1 // convert it into int, use round! int fidxint = static_cast<int>(fidx + 0.5) - 1; ///////////////////////////////////// // not covered by any faces if (fidxint < 0) { int fidxcover = fidxint; scalar_t dLdp = grad_improb_bxhxwx1[totalidx1]; scalar_t allprob = improb_bxhxwx1[totalidx1]; for (int kid = 0; kid < knum; kid++) { scalar_t fidx = probface_bxhxwxk[totalidxk + kid]; // face begins from 1 // convert it into int, use round! int fidxint = static_cast<int>(fidx + 0.5) - 1; if (fidxint < 0) break; const int shift1 = bidx * fnum + fidxint; const int shift6 = shift1 * 6; /////////////////////////////////////////// scalar_t prob = probdis_bxhxwxk[totalidxk + kid]; scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob) / (1.0 - prob + eps) * prob; /////////////////////////////////////////////////////////////////// scalar_t edgecase = probcase_bxhxwxk[totalidxk + kid]; int edgeid = static_cast<int>(edgecase + 0.5) - 1; ///////////////////////////////////////////////////////////// if (edgeid >= 3) { // point distance int pshift = shift6 + (edgeid - 3) * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; scalar_t dLdx1 = dLdz * 2 * (x1 - x0); scalar_t dLdy1 = dLdz * 2 * (y1 - y0); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); } else { // perpendicular distance int pshift = shift6 + edgeid * 2; scalar_t x1 = points2d_bxfx6[pshift + 0]; scalar_t y1 = points2d_bxfx6[pshift + 1]; int pshift2 = shift6 + ((edgeid + 1) % 3) * 2; scalar_t x2 = points2d_bxfx6[pshift2 + 0]; scalar_t y2 = points2d_bxfx6[pshift2 + 1]; // ax + by + c = 0 scalar_t A = y2 - y1; scalar_t B = x1 - x2; scalar_t C = x2 * y1 - x1 * y2; // dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2) // up = ax + by + c // down = a^2 + b^2 // dissquare = up^2 / down scalar_t up = A * x0 + B * y0 + C; scalar_t down = A * A + B * B; scalar_t dissquare = up * up / (down + eps); ////////////////////////////////// scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + eps); scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + eps); scalar_t dzdC = 2 * up / (down + eps); scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC); scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA); scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB); scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC); atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 0, dLdx2 / multiplier); atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 1, dLdy2 / multiplier); } } } return; } void dr_cuda_backward_batch(at::Tensor grad_image_bxhxwxd, at::Tensor grad_improb_bxhxwx1, at::Tensor image_bxhxwxd, at::Tensor improb_bxhxwx1, at::Tensor imidx_bxhxwx1, at::Tensor imwei_bxhxwx3, at::Tensor probface_bxhxwxk, at::Tensor probcase_bxhxwxk, at::Tensor probdis_bxhxwxk, at::Tensor probdep_bxhxwxk, at::Tensor probacc_bxhxwxk, at::Tensor points2d_bxfx6, at::Tensor colors_bxfx3d, at::Tensor grad_points2d_bxfx6, at::Tensor grad_colors_bxfx3d, at::Tensor grad_points2dprob_bxfx6, at::Tensor debug_im_bxhxwx3, int multiplier, int sigmainv) { int bnum = grad_image_bxhxwxd.size(0); int height = grad_image_bxhxwxd.size(1); int width = grad_image_bxhxwxd.size(2); int dnum = grad_image_bxhxwxd.size(3); int fnum = grad_points2d_bxfx6.size(1); int knum = probface_bxhxwxk.size(3); // for bxhxw image size const int threadnum = 1024; const int totalthread = bnum * height * width; const int blocknum = totalthread / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks(blocknum, 1, 1); // we exchange block and thread! AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.type(), "dr_cuda_backward_color_batch", ([&] { dr_cuda_backword_color_batch<scalar_t><<<blocks, threads>>>( grad_image_bxhxwxd.data<scalar_t>(), image_bxhxwxd.data<scalar_t>(), imidx_bxhxwx1.data<scalar_t>(), imwei_bxhxwx3.data<scalar_t>(), points2d_bxfx6.data<scalar_t>(), colors_bxfx3d.data<scalar_t>(), grad_points2d_bxfx6.data<scalar_t>(), grad_colors_bxfx3d.data<scalar_t>(), debug_im_bxhxwx3.data<scalar_t>(), bnum, height, width, fnum, dnum, multiplier); })); AT_DISPATCH_FLOATING_TYPES(grad_image_bxhxwxd.type(), "dr_cuda_backward_prob_batch", ([&] { dr_cuda_backword_prob_batch<scalar_t><<<blocks, threads>>>( grad_improb_bxhxwx1.data<scalar_t>(), improb_bxhxwx1.data<scalar_t>(), imidx_bxhxwx1.data<scalar_t>(), probface_bxhxwxk.data<scalar_t>(), probcase_bxhxwxk.data<scalar_t>(), probdis_bxhxwxk.data<scalar_t>(), probdep_bxhxwxk.data<scalar_t>(), probacc_bxhxwxk.data<scalar_t>(), points2d_bxfx6.data<scalar_t>(), grad_points2dprob_bxfx6.data<scalar_t>(), bnum, height, width, fnum, knum, multiplier, sigmainv); })); return; }
the_stack
#define at(A, r, c, ch) A[(r)*A##_step + (c)*CH + (ch)] #define AB_BITS 10 #define AB_SCALE (1 << AB_BITS) #define INTER_BITS 5 #define INTER_TAB_SIZE (1 << INTER_BITS) #define INTER_REMAP_COEF_BITS 15 #define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS) #define ROUND_DELTA (1 << (AB_BITS - INTER_BITS - 1)) #define rep(i, n) for (int i = 0; i < (n); ++i) #define BLOCK_THREADS_X0 64 #define BLOCK_THREADS_Y0 8 #define BLOCK_THREADS_X1 32 #define BLOCK_THREADS_Y1 8 #define PROCESS_PER_THREADS 8 namespace megdnn { namespace cuda { namespace warp_perspective { //! transform matrix __constant__ double M[9]; //! border_val __constant__ float border_val; using namespace megcv; __global__ void preprocess_trans(double* trans, const float* src) { //! The size is 9 #pragma unroll for (size_t i = 0; i < 9; i++) trans[i] = src[i]; } template <typename T, size_t CH, BorderMode bmode> __global__ void warp_perspective_cv_kernel_LAN_cacheToLandVECTOR( const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step) { int dc = threadIdx.x + blockIdx.x * blockDim.x; int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS); __shared__ double cols_data[BLOCK_THREADS_X1][3]; __shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3]; if (dr < dst_rows && dc < dst_cols) { if (threadIdx.y == 0) { cols_data[threadIdx.x][0] = M[0] * dc; cols_data[threadIdx.x][1] = M[3] * dc; cols_data[threadIdx.x][2] = M[6] * dc; } if (threadIdx.x == 0) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2]; rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5]; rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8]; } } } __syncthreads(); if (dr < dst_rows && dc < dst_cols) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2]; w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w; double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w; double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w; fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX; fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN; fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX; fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN; int sc = (int)lrint(fsc); int sr = (int)lrint(fsr); int fc = sc & (INTER_TAB_SIZE - 1); int fr = sr & (INTER_TAB_SIZE - 1); sc = sc >> INTER_BITS; sr = sr >> INTER_BITS; sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc); sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr); const int ksize = IModeTrait<INTER_LANCZOS4>::ksize; float coefr[ksize], coefc[ksize]; int x[ksize], y[ksize]; if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows || (unsigned)sc >= (unsigned)src_cols)) { continue; } interpolate_coefs<INTER_LANCZOS4>((float)fr / INTER_TAB_SIZE, coefr); interpolate_coefs<INTER_LANCZOS4>((float)fc / INTER_TAB_SIZE, coefc); const BorderMode bmode1 = BModeTrait<bmode>::bmode1; { #pragma unroll rep(k, ksize) { x[k] = border_interpolate<bmode1>( sr + k - (ksize / 2) + 1, src_rows); } #pragma unroll rep(k, ksize) { y[k] = border_interpolate<bmode1>( sc + k - (ksize / 2) + 1, src_cols); } } float sum[CH] = {0}; rep(kr, ksize) { if (x[kr] < 0) { #pragma unroll rep(ch, CH) sum[ch] += coefr[kr] * border_val; continue; } #pragma unroll rep(kc, ksize) { if (y[kc] < 0) { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; } } else { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch); } } } } #pragma unroll rep(ch, CH) { typedef typename TypeTrait<T>::WorkType WorkType; if (dr + i < dst_rows) { if (TypeTrait<T>::need_saturate) { at(dst, dr + i, dc, ch) = saturate<WorkType>( sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max()); } else { at(dst, dr + i, dc, ch) = sum[ch]; } } } } } } template <typename T, size_t CH, BorderMode bmode> __global__ void warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR( const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step) { int dc = threadIdx.x + blockIdx.x * blockDim.x; int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS); __shared__ double cols_data[BLOCK_THREADS_X1][3]; __shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3]; if (dr < dst_rows && dc < dst_cols) { if (threadIdx.y == 0) { cols_data[threadIdx.x][0] = M[0] * dc; cols_data[threadIdx.x][1] = M[3] * dc; cols_data[threadIdx.x][2] = M[6] * dc; } if (threadIdx.x == 0) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2]; rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5]; rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8]; } } } __syncthreads(); if (dr < dst_rows && dc < dst_cols) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2]; w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w; double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w; double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w; fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX; fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN; fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX; fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN; int sc = (int)lrint(fsc); int sr = (int)lrint(fsr); int fc = sc & (INTER_TAB_SIZE - 1); int fr = sr & (INTER_TAB_SIZE - 1); sc = sc >> INTER_BITS; sr = sr >> INTER_BITS; sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc); sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr); const int ksize = IModeTrait<INTER_CUBIC>::ksize; float coefr[ksize], coefc[ksize]; int x[ksize], y[ksize]; if (bmode == BORDER_TRANSPARENT && ((unsigned)sr >= (unsigned)src_rows || (unsigned)sc >= (unsigned)src_cols)) { continue; } interpolate_coefs<INTER_CUBIC>((float)fr / INTER_TAB_SIZE, coefr); interpolate_coefs<INTER_CUBIC>((float)fc / INTER_TAB_SIZE, coefc); const BorderMode bmode1 = BModeTrait<bmode>::bmode1; { #pragma unroll rep(k, ksize) { x[k] = border_interpolate<bmode1>( sr + k - (ksize / 2) + 1, src_rows); } #pragma unroll rep(k, ksize) { y[k] = border_interpolate<bmode1>( sc + k - (ksize / 2) + 1, src_cols); } } float sum[CH] = {0}; rep(kr, ksize) { if (x[kr] < 0) { #pragma unroll rep(ch, CH) sum[ch] += coefr[kr] * border_val; continue; } #pragma unroll rep(kc, ksize) { if (y[kc] < 0) { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; } } else { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch); } } } } #pragma unroll rep(ch, CH) { typedef typename TypeTrait<T>::WorkType WorkType; if (dr + i < dst_rows) { if (TypeTrait<T>::need_saturate) { at(dst, dr + i, dc, ch) = saturate<WorkType>( sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max()); } else { at(dst, dr + i, dc, ch) = sum[ch]; } } } } } } template <typename T, size_t CH, BorderMode bmode> __global__ void warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR( const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step) { int dc = threadIdx.x + blockIdx.x * blockDim.x; int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS); __shared__ double cols_data[BLOCK_THREADS_X1][3]; __shared__ double rows_data[BLOCK_THREADS_Y1 * PROCESS_PER_THREADS][3]; if (dr < dst_rows && dc < dst_cols) { if (threadIdx.y == 0) { cols_data[threadIdx.x][0] = M[0] * dc; cols_data[threadIdx.x][1] = M[3] * dc; cols_data[threadIdx.x][2] = M[6] * dc; } if (threadIdx.x == 0) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { rows_data[threadIdx.y + i][0] = M[1] * (dr + i) + M[2]; rows_data[threadIdx.y + i][1] = M[4] * (dr + i) + M[5]; rows_data[threadIdx.y + i][2] = M[7] * (dr + i) + M[8]; } } } __syncthreads(); if (dr < dst_rows && dc < dst_cols) { for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y + i][2]; w = (w == 0.000000) ? 0 : INTER_TAB_SIZE / w; double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y + i][0]) * w; double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y + i][1]) * w; fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX; fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN; fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX; fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN; int sc = (int)lrint(fsc); int sr = (int)lrint(fsr); int fc = sc & (INTER_TAB_SIZE - 1); int fr = sr & (INTER_TAB_SIZE - 1); sc = sc >> INTER_BITS; sr = sr >> INTER_BITS; sc = sc < -32768 ? -32768 : (sc > 32767 ? 32767 : sc); sr = sr < -32768 ? -32768 : (sr > 32767 ? 32767 : sr); const int ksize = IModeTrait<INTER_LINEAR>::ksize; float coefr[ksize], coefc[ksize]; int x[ksize], y[ksize]; if (bmode == BORDER_TRANSPARENT && ((unsigned)(sr + 1) >= (unsigned)src_rows || (unsigned)(sc + 1) >= (unsigned)src_cols)) { continue; } interpolate_coefs<INTER_LINEAR>((float)fr / INTER_TAB_SIZE, coefr); interpolate_coefs<INTER_LINEAR>((float)fc / INTER_TAB_SIZE, coefc); const BorderMode bmode1 = BModeTrait<bmode>::bmode1; { #pragma unroll rep(k, ksize) { x[k] = border_interpolate<bmode1>( sr + k - (ksize / 2) + 1, src_rows); } #pragma unroll rep(k, ksize) { y[k] = border_interpolate<bmode1>( sc + k - (ksize / 2) + 1, src_cols); } } float sum[CH] = {0}; rep(kr, ksize) { if (x[kr] < 0) { #pragma unroll rep(ch, CH) sum[ch] += coefr[kr] * border_val; continue; } #pragma unroll rep(kc, ksize) { if (y[kc] < 0) { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * border_val; } } else { #pragma unroll rep(ch, CH) { sum[ch] += coefr[kr] * coefc[kc] * at(src, x[kr], y[kc], ch); } } } } #pragma unroll rep(ch, CH) { typedef typename TypeTrait<T>::WorkType WorkType; if (dr + i < dst_rows) { if (TypeTrait<T>::need_saturate) { at(dst, dr + i, dc, ch) = saturate<WorkType>( sum[ch], TypeTrait<T>::min(), TypeTrait<T>::max()); } else { at(dst, dr + i, dc, ch) = sum[ch]; } } } } } } template <typename T, size_t CH, BorderMode bmode> __global__ void warp_perspective_cv_kernel_cacheToL_NEAREST( const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step) { #define SET_DST_CH_VALUE \ if (CH == 1) { \ dst[dst_address_increase] = src[src_address_increase]; \ } else { \ dst[dst_address_increase] = src[src_address_increase]; \ dst[dst_address_increase + 1] = src[src_address_increase + 1]; \ dst[dst_address_increase + 2] = src[src_address_increase + 2]; \ } int dc = threadIdx.x + blockIdx.x * blockDim.x; int dr = threadIdx.y + blockIdx.y * blockDim.y; __shared__ double cols_data[BLOCK_THREADS_X1][3]; __shared__ double rows_data[BLOCK_THREADS_Y1][3]; if (dr < dst_rows && dc < dst_cols) { if (threadIdx.y == 0) { cols_data[threadIdx.x][0] = M[0] * dc; cols_data[threadIdx.x][1] = M[3] * dc; cols_data[threadIdx.x][2] = M[6] * dc; } if (threadIdx.x == 0) { rows_data[threadIdx.y][0] = M[1] * dr + M[2]; rows_data[threadIdx.y][1] = M[4] * dr + M[5]; rows_data[threadIdx.y][2] = M[7] * dr + M[8]; } } __syncthreads(); if (dr < dst_rows && dc < dst_cols) { double w = cols_data[threadIdx.x][2] + rows_data[threadIdx.y][2]; w = (w == 0) ? 0 : 1 / w; double fsc = (cols_data[threadIdx.x][0] + rows_data[threadIdx.y][0]) * w; double fsr = (cols_data[threadIdx.x][1] + rows_data[threadIdx.y][1]) * w; int sc = saturate_cast_short(fsc); int sr = saturate_cast_short(fsr); size_t dst_address_increase = dr * dst_step + dc * CH; if ((size_t)sc < src_cols && (size_t)sr < src_rows) { size_t src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE return; } if (bmode == BORDER_REPLICATE) { sr = saturate(sr, 0, (int)src_rows - 1); sc = saturate(sc, 0, (int)src_cols - 1); size_t src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE } else if (bmode == BORDER_CONSTANT) { if (CH == 1) { dst[dst_address_increase] = border_val; } else { dst[dst_address_increase + 0] = border_val; dst[dst_address_increase + 1] = border_val; dst[dst_address_increase + 2] = border_val; } } else if (bmode != BORDER_TRANSPARENT) { sr = border_interpolate<bmode>(sr, src_rows); sc = border_interpolate<bmode>(sc, src_cols); size_t src_address_increase = sr * src_step + sc * CH; src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE } } #undef SET_DST_CH_VALUE } template <typename T, size_t CH, BorderMode bmode> __global__ void warp_perspective_cv_kernel_NEAREST_VECTOR( const T* __restrict__ src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step) { int dc = threadIdx.x + blockIdx.x * blockDim.x; int dr = threadIdx.y + blockIdx.y * (blockDim.y * PROCESS_PER_THREADS); #define SET_DST_CH_VALUE \ if (CH == 1) { \ dst[dst_address_increase] = src[src_address_increase]; \ } else { \ dst[dst_address_increase] = src[src_address_increase]; \ dst[dst_address_increase + 1] = src[src_address_increase + 1]; \ dst[dst_address_increase + 2] = src[src_address_increase + 2]; \ } if (dr < dst_rows && dc < dst_cols) { double w0 = M[6] * dc + M[7] * dr + M[8]; double fc0 = M[0] * dc + M[1] * dr + M[2]; double fr0 = M[3] * dc + M[4] * dr + M[5]; for (int i = 0; i < blockDim.y * PROCESS_PER_THREADS; i += blockDim.y) { if (dr + i >= dst_rows) return; //! To make the result equal to the naive version double w = w0 + M[7] * i; w = w ? 1. / w : 0; double fsc = (fc0 + M[1] * i) * w; double fsr = (fr0 + M[4] * i) * w; fsc = fsc < (double)INT_MAX ? fsc : (double)INT_MAX; fsc = fsc > (double)INT_MIN ? fsc : (double)INT_MIN; fsr = fsr < (double)INT_MAX ? fsr : (double)INT_MAX; fsr = fsr > (double)INT_MIN ? fsr : (double)INT_MIN; int sc = saturate_cast_short(fsc); int sr = saturate_cast_short(fsr); size_t dst_address_increase = (dr + i) * dst_step + dc * CH; if ((size_t)sc < src_cols && (size_t)sr < src_rows) { size_t src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE continue; } if (bmode == BORDER_REPLICATE) { sr = saturate(sr, 0, (int)src_rows - 1); sc = saturate(sc, 0, (int)src_cols - 1); size_t src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE } else if (bmode == BORDER_CONSTANT) { if (CH == 1) { dst[dst_address_increase] = border_val; } else { dst[dst_address_increase + 0] = border_val; dst[dst_address_increase + 1] = border_val; dst[dst_address_increase + 2] = border_val; } } else if (bmode != BORDER_TRANSPARENT) { sr = border_interpolate<bmode>(sr, src_rows); sc = border_interpolate<bmode>(sc, src_cols); size_t src_address_increase = sr * src_step + sc * CH; SET_DST_CH_VALUE } } } #undef SET_DST_CH_VALUE } template <typename T, size_t CH> void warp_perspective_cv_proxy( const T* src, T* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step, BorderMode bmode, InterpolationMode imode, const float* trans, const T bval, double* workspace, cudaStream_t stream) { preprocess_trans<<<1, 1, 0, stream>>>(workspace, trans); cuda_check(cudaStreamSynchronize(stream)); //! Copy trans to const memory cuda_check(cudaMemcpyToSymbol( M, workspace, sizeof(double) * 9, 0, cudaMemcpyHostToDevice)); //! Copy bval to const memory cuda_check(cudaMemcpyToSymbol( border_val, &bval, sizeof(float), 0, cudaMemcpyHostToDevice)); dim3 THREADS, BLOCKS; dim3 THREADS_VECTOR, BLOCKS_VECTOR; switch (imode) { case INTER_NEAREST: if (CH == 3 && sizeof(T) == sizeof(float)) { THREADS.x = BLOCK_THREADS_X1; THREADS.y = BLOCK_THREADS_Y1; BLOCKS.x = DIVUP(dst_cols, THREADS.x); BLOCKS.y = DIVUP(dst_rows, THREADS.y); switch (bmode) { case BORDER_REPLICATE: warp_perspective_cv_kernel_cacheToL_NEAREST< T, CH, BORDER_REPLICATE> <<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT: warp_perspective_cv_kernel_cacheToL_NEAREST< T, CH, BORDER_REFLECT><<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT_101: warp_perspective_cv_kernel_cacheToL_NEAREST< T, CH, BORDER_REFLECT_101> <<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_WRAP: warp_perspective_cv_kernel_cacheToL_NEAREST<T, CH, BORDER_WRAP> <<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_CONSTANT: warp_perspective_cv_kernel_cacheToL_NEAREST< T, CH, BORDER_CONSTANT><<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_TRANSPARENT: warp_perspective_cv_kernel_cacheToL_NEAREST< T, CH, BORDER_TRANSPARENT> <<<BLOCKS, THREADS, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; default: break; } } else { THREADS_VECTOR.x = BLOCK_THREADS_X1; THREADS_VECTOR.y = BLOCK_THREADS_Y1; BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x); BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); switch (bmode) { case BORDER_REPLICATE: warp_perspective_cv_kernel_NEAREST_VECTOR< T, CH, BORDER_REPLICATE> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT: warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_REFLECT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT_101: warp_perspective_cv_kernel_NEAREST_VECTOR< T, CH, BORDER_REFLECT_101> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_WRAP: warp_perspective_cv_kernel_NEAREST_VECTOR<T, CH, BORDER_WRAP> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_CONSTANT: warp_perspective_cv_kernel_NEAREST_VECTOR< T, CH, BORDER_CONSTANT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_TRANSPARENT: warp_perspective_cv_kernel_NEAREST_VECTOR< T, CH, BORDER_TRANSPARENT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; default: break; } } break; case INTER_LINEAR: { { cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); THREADS_VECTOR.x = BLOCK_THREADS_X1; THREADS_VECTOR.y = BLOCK_THREADS_Y1; BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x); BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS); switch (bmode) { case BORDER_REPLICATE: warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_REPLICATE> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT: warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_REFLECT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT_101: warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_REFLECT_101> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_WRAP: warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_WRAP> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_CONSTANT: warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_CONSTANT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_TRANSPARENT: if (CH == 3) warp_perspective_cv_kernel_LINEAR_cacheToLAndVECTOR< T, CH, BORDER_TRANSPARENT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; default: break; } } } break; case INTER_CUBIC: THREADS_VECTOR.x = BLOCK_THREADS_X1; THREADS_VECTOR.y = BLOCK_THREADS_Y1; BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x); BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS); switch (bmode) { case BORDER_REPLICATE: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_REPLICATE> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_REFLECT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT_101: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_REFLECT_101> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_WRAP: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_WRAP> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_CONSTANT: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_CONSTANT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_TRANSPARENT: warp_perspective_cv_kernel_CUBIC_cacheToLAndVECTOR< T, CH, BORDER_TRANSPARENT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; default: break; } break; case INTER_LANCZOS4: { THREADS_VECTOR.x = BLOCK_THREADS_X1; THREADS_VECTOR.y = BLOCK_THREADS_Y1; BLOCKS_VECTOR.x = DIVUP(dst_cols, THREADS_VECTOR.x); BLOCKS_VECTOR.y = DIVUP(dst_rows, THREADS_VECTOR.y * PROCESS_PER_THREADS); switch (bmode) { case BORDER_REPLICATE: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR< T, CH, BORDER_REPLICATE> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR< T, CH, BORDER_REFLECT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_REFLECT_101: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR< T, CH, BORDER_REFLECT_101> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_WRAP: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR<T, CH, BORDER_WRAP> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_CONSTANT: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR< T, CH, BORDER_CONSTANT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; case BORDER_TRANSPARENT: warp_perspective_cv_kernel_LAN_cacheToLandVECTOR< T, CH, BORDER_TRANSPARENT> <<<BLOCKS_VECTOR, THREADS_VECTOR, 0, stream>>>( src, dst, src_rows, src_cols, dst_rows, dst_cols, src_step, dst_step); break; default: break; } } break; default: break; } } template void warp_perspective_cv_proxy<float, 1>( const float* src, float* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step, BorderMode bmode, InterpolationMode imode, const float* trans, const float border_val, double* workspace, cudaStream_t stream); template void warp_perspective_cv_proxy<uchar, 1>( const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step, BorderMode bmode, InterpolationMode imode, const float* trans, const uchar border_val, double* workspace, cudaStream_t stream); template void warp_perspective_cv_proxy<float, 3>( const float* src, float* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step, BorderMode bmode, InterpolationMode imode, const float* trans, const float border_val, double* workspace, cudaStream_t stream); template void warp_perspective_cv_proxy<uchar, 3>( const uchar* src, uchar* dst, const size_t src_rows, const size_t src_cols, const size_t dst_rows, const size_t dst_cols, const size_t src_step, const size_t dst_step, BorderMode bmode, InterpolationMode imode, const float* trans, const uchar border_val, double* workspace, cudaStream_t stream); } // namespace warp_perspective } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
the_stack
#ifdef _WIN32 #include <windows.h> #include <process.h> const char *empty = ""; const char *_COLOR_GREEN (bool suppress_color) { if (!suppress_color) { HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE); SetConsoleTextAttribute(hStdout, FOREGROUND_GREEN | FOREGROUND_INTENSITY); } return empty; } const char *_COLOR_RED (bool suppress_color) { if (!suppress_color) { HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE); SetConsoleTextAttribute(hStdout, FOREGROUND_RED | FOREGROUND_INTENSITY); } return empty; } const char *_COLOR_NONE (bool suppress_color) { if (!suppress_color) { HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE); SetConsoleTextAttribute(hStdout, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE); } return empty; } #else const char *red_color = "\033[31m"; const char *green_color = "\033[32m"; const char *no_color = "\033[0m"; const char *empty = ""; const char *_COLOR_GREEN (bool suppress_color) { return suppress_color ? empty : green_color; } const char *_COLOR_RED (bool suppress_color) { return suppress_color ? empty : red_color; } const char *_COLOR_NONE (bool suppress_color) { return suppress_color ? empty : no_color; } #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #endif #define COLOR_RED _COLOR_RED(UnitTest::get_configuration().suppress_color) #define COLOR_NONE _COLOR_NONE(UnitTest::get_configuration().suppress_color) #define COLOR_GREEN _COLOR_GREEN(UnitTest::get_configuration().suppress_color) namespace amgx { void UnitTestDriver::register_test(UnitTest *test) { std::map< std::string, std::map<std::string, UnitTest *> >::iterator test_by_name = tests.find(test->name()); if (test_by_name == tests.end()) { tests[test->name()] = std::map<std::string, UnitTest *>(); test_by_name = tests.find(test->name()); } switch (test->getMode()) { #define AMGX_CASE_LINE(CASE) case CASE: test_by_name->second[std::string(ModeString<CASE>::getName())] = test; break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } int UnitTestDriver::run_test (const std::string &name, const std::string &mode, std::ostream &logfile) { logfile << name << " " << mode << ": "; logfile.flush(); std::map< std::string, std::map<std::string, UnitTest *> >::iterator test_by_name = tests.find(name); if (test_by_name == tests.end() || test_by_name->second.find(mode) == test_by_name->second.end()) { logfile << "[ERROR] Haven't found this test in the amgx" << std::endl; logfile.flush(); return -1; } else { int res = 0; UnitTest *test = test_by_name->second[mode]; test->start_test(); if (test->failed()) { test->print_assert(logfile); logfile << std::endl; res = 1; } else { logfile << "[PASSED]" << std::endl; } test_by_name->second.erase(mode); logfile.flush(); return res; } } int UnitTestDriver::run_tests (const std::vector< std::pair<std::string, std::string> > &tests, std::ostream &logfile) { std::ofstream process_log(f_worker_processed.c_str(), std::ios_base::app); amgx::initialize(); amgx::initializePlugins(); UnitTest::amgx_intialized = true; bool first_try = true; for (unsigned int i = 0; i < tests.size(); i++) { if (!UnitTest::amgx_intialized) { amgx::initialize(); amgx::initializePlugins(); } printf("[%4d/%4d] %s\t %s : ", i + 1, (int)(tests.size()), tests[i].first.c_str(), tests[i].second.c_str()); fflush(stdout); if (tests[i] != this->last_launched) { // first try of the test first_try = true; std::ofstream second_try_file(f_secondtry.c_str(), std::ios_base::out); second_try_file << tests[i].first.c_str() << std::endl << tests[i].second.c_str(); second_try_file.flush(); second_try_file.close(); } else { // second try of the test first_try = false; process_log << tests[i].first.c_str() << std::endl << tests[i].second.c_str(); process_log.flush(); remove(f_secondtry.c_str()); std::ofstream second_try_file_name(f_secondtry_name.c_str(), std::ios_base::out); second_try_file_name << tests[i].first.c_str() << std::endl << tests[i].second.c_str(); second_try_file_name.close(); } fflush(stdout); int res = run_test(tests[i].first, tests[i].second, logfile); if (res != 0) { std::cout << "[" << COLOR_RED << "FAILED" << COLOR_NONE << "]" << std::endl; if (first_try) { // if first try - exit and launch same test in new process exit(512); } else { // if second try - failed. write result to the log process_log << std::endl << 1 << std::endl; } } else { if (first_try) { process_log << tests[i].first.c_str() << std::endl << tests[i].second.c_str(); } process_log << std::endl << 0 << std::endl; std::cout << "[" << COLOR_GREEN << "PASSED" << COLOR_NONE << "]" << std::endl; } remove(f_secondtry.c_str()); logfile.flush(); std::cout.flush(); } if (UnitTest::amgx_intialized) { amgx::finalizePlugins(); amgx::finalize(); } UnitTest::amgx_intialized = false; process_log.close(); return 0; } UnitTest *UnitTestDriver::get_test (const std::string &name, const std::string &mode) const { std::map< std::string, std::map<std::string, UnitTest *> >::const_iterator test_by_name = tests.find(name); if (test_by_name == tests.end()) { return NULL; } std::map<std::string, UnitTest *>::const_iterator titer = test_by_name->second.find(mode); return (titer == test_by_name->second.end()) ? NULL : titer->second; } void UnitTestDriver::print_all_tests(std::ostream &str) { std::map< std::string, std::map<std::string, UnitTest *> >::iterator test_by_name = tests.begin(); for (; test_by_name != tests.end(); ++test_by_name) { str << test_by_name->first << ": "; std::map<std::string, UnitTest *>::iterator test_by_mode = test_by_name->second.begin(); for (; test_by_mode != test_by_name->second.end(); ++test_by_mode) { str << test_by_mode->first; } str << "\n"; } } /////////////////////////////////////////////////////////////////////// /////// UnitTestDriverFramework /////////// /////////////////////////////////////////////////////////////////////// // this thing runs workers, manages output and stuff... UnitTestDriverFramework::~UnitTestDriverFramework() { } void UnitTestDriverFramework::register_test(UnitTest *test) { test_driver.register_test(test); all_test_names.insert(test->name()); } void UnitTestDriverFramework::do_work() { std::ifstream schedule(f_schedule.c_str()); std::ifstream processed(f_worker_processed.c_str()); std::vector< std::pair<std::string, std::string> > processed_tests; std::vector< std::pair<std::string, std::string> > to_process; if (processed.is_open()) { while (!processed.eof()) { std::string name, mode, stub; int tres; // we don't need processed tests result now std::getline(processed, name); std::getline(processed, mode); processed >> tres; std::getline(processed, stub); processed_tests.push_back(std::make_pair(name, mode)); } processed.close(); } std::ifstream last_launched_file(f_secondtry.c_str(), std::ios_base::in); if (last_launched_file.is_open()) { test_driver.check_last_launch = true; std::string name, mode; std::getline(last_launched_file, name); std::getline(last_launched_file, mode); test_driver.last_launched = std::make_pair(name, mode); printf("Launching test %s %s in the new process for the second time\n", name.c_str(), mode.c_str()); last_launched_file.close(); remove(f_secondtry.c_str()); } if (schedule.is_open()) { while (!schedule.eof()) { std::string name, mode; std::getline(schedule, name); std::getline(schedule, mode); if (std::find(processed_tests.begin(), processed_tests.end(), std::make_pair(name, mode)) == processed_tests.end()) { to_process.push_back(std::make_pair(name, mode)); } } schedule.close(); } std::ofstream unit_test_log(f_assert_log.c_str(), std::ios_base::app); test_driver.run_tests(to_process, unit_test_log); unit_test_log.close(); printf("Done!\n"); fflush(stdout); // retrieve all required tests } int UnitTestDriverFramework::run_tests(int &total, int &failed, const std::vector<std::string> &str_modes, const std::vector<std::string> &kwords, const std::vector<std::string> &tests, const std::vector<std::string> &params) { // clear stuff remove(f_schedule.c_str()); remove(f_assert_log.c_str()); remove(f_worker_processed.c_str()); remove(f_statfile.c_str()); remove(f_secondtry.c_str()); remove(f_secondtry_name.c_str()); // write schedule total = 0; std::ofstream schedule(f_schedule.c_str(), std::ios_base::out | std::ios_base::trunc); bool need_cr = false; for (unsigned int i = 0; i < tests.size(); i++) { for (unsigned int j = 0; j < str_modes.size(); j++) { UnitTest *test = test_driver.get_test(tests[i], str_modes[j]); if (test != NULL) { bool write_test = true; if (kwords.size() > 0) { write_test = false; std::vector<std::string> str_keywords; split(test->getKeywords(), ';', str_keywords); for (unsigned int k = 0; k < str_keywords.size(); k++) { if (std::find(kwords.begin(), kwords.end(), str_keywords[k]) != kwords.end()) { write_test = true; break; } } } if (write_test) { if (need_cr) { schedule << std::endl; } schedule << tests[i] << std::endl << str_modes[j]; total ++; need_cr = true; } } } } schedule.close(); //printf("Going to process total %d tests\n", total); int processed = 0; const UnitTestConfiguration &cfg = UnitTest::get_configuration(); // creating arguments to spawn child std::vector<std::string> args; std::vector<std::string> targs; std::vector<char *> raw_args; { std::ostringstream oss; args.push_back(cfg.program_name); if (cfg.repeats != 1) { args.push_back("--repeat"); oss << cfg.repeats; args.push_back(oss.str()); oss.str(std::string()); }; if (cfg.random_seed != -1) { args.push_back("--seed"); oss << cfg.random_seed; args.push_back(oss.str()); oss.str(std::string()); }; args.push_back("--data"); args.push_back(cfg.data_folder); if (cfg.verbose) { args.push_back("--verbose"); } if (cfg.suppress_color) { args.push_back("--suppress_color"); } args.push_back("--child"); targs = args; for (unsigned int i = 0; i < targs.size(); i++) { raw_args.push_back(const_cast<char *>(targs[i].c_str())); } raw_args.push_back((char *)0); } while (processed < total) { printf("Spawning new worker\n"); fflush(stdout); int ret_code = -1; // spawn and wait for a worker to finish //@TODO: add timeouts (ez) #ifdef _WIN32 STARTUPINFO si; PROCESS_INFORMATION pi; std::string long_param; for (unsigned int i = 0; i < targs.size(); i++) { long_param += targs[i] + " "; } ZeroMemory( &si, sizeof(si) ); si.cb = sizeof(si); ZeroMemory( &pi, sizeof(pi) ); if ( !CreateProcess( args[0].c_str(), // No module name (use command line) const_cast<LPSTR>(long_param.c_str()), // Command line NULL, // Process handle not inheritable NULL, // Thread handle not inheritable FALSE, // Set handle inheritance to FALSE 0, // No creation flags NULL, // Use parent's environment block NULL, // Use parent's starting directory &si, // Pointer to STARTUPINFO structure &pi ) // Pointer to PROCESS_INFORMATION structure ) { std::cout << "[SYSTEM ERROR] Cannot spawn unit test Win32 process" << std::endl; return 1; } // Wait until child process exits. DWORD win_ret_code; if (!WaitForSingleObject( pi.hProcess, INFINITE )) if (GetExitCodeProcess(pi.hProcess, &win_ret_code)) { ret_code = win_ret_code; } // Close process and thread handles. CloseHandle( pi.hProcess ); CloseHandle( pi.hThread ); #else int local_ret_code; int phandle = fork(); if (phandle == 0) { local_ret_code = execv (raw_args[0], &(raw_args[0])); _exit(local_ret_code); } pid_t ws = waitpid( phandle, &local_ret_code, 0); if ( !WIFEXITED(local_ret_code) ) { ret_code = 1; } else if (WIFSIGNALED(local_ret_code)) { ret_code = 1; } else { ret_code = local_ret_code; } #endif // win32 // Child returns nonzero only in the case of emergency exit which means last test fail. // We assume that child haven't written fail status to log file, launched file and // console output doe to this emergency exit // Actually this assuming is not always true. // segfault or something if (ret_code > 0 && ret_code < 512) { std::cout << "[" << COLOR_RED << "FAILED" << COLOR_NONE << "]\n"; std::cout.flush(); std::ofstream fulog(f_assert_log.c_str(), std::ios_base::app); fulog << "[SYSTEM ERROR] Cannot find unit test log. Looks like unit test hasn't finished properly" << std::endl; fulog.close(); // check if last test was launched for the first time? std::ifstream last_launched_file(f_secondtry.c_str(), std::ios_base::in); if (!last_launched_file.is_open()) { // if not, just report as failed std::ofstream fuproc(f_worker_processed.c_str(), std::ios_base::app); fuproc << std::endl << 1 << std::endl; fuproc.close(); std::ifstream second_try_file_name(f_secondtry_name.c_str(), std::ios_base::in); std::string name, mode; std::getline(second_try_file_name, name); std::getline(second_try_file_name, mode); second_try_file_name.close(); remove(f_secondtry_name.c_str()); } else { // if yes - launch for the second time //std::string name, mode; //std::getline(last_launched_file, name); //std::getline(last_launched_file, mode); last_launched_file.close(); } } // manual exit() else if (ret_code == 512) { //do nothing, process made everything by himself } // peek into the processing log to count processed unit tests std::ifstream fuproc(f_worker_processed.c_str(), std::ios_base::in); int cur_processed = 0; failed = 0; { std::string t1; int tres; while (!fuproc.eof()) { std::getline(fuproc, t1); if (fuproc.eof()) { break; } std::getline(fuproc, t1); fuproc >> tres; if (tres != 0) { failed++; } std::getline(fuproc, t1); cur_processed ++; } } fuproc.close(); processed = cur_processed; } // print all asserts: std::ifstream fulog(f_assert_log.c_str(), std::ios_base::in); std::string line; std::cout << "\n---------------------------------------------------------\n" "\nUnit test logs:\n"; if (fulog.is_open()) { while (!fulog.eof()) { std::getline(fulog, line); std::cout << line << std::endl; } } else { std::cout << "Cannot retrieve tests logs" << std::endl; } std::cout << std::endl; printf("Total tests run: %d\t Failed tests: %d\n", total, failed); // clear temp stuff remove(f_schedule.c_str()); remove(f_assert_log.c_str()); remove(f_secondtry.c_str()); remove(f_secondtry_name.c_str()); return 0; } int UnitTestDriverFramework::run_all_tests(int &total, int &failed, const std::vector<std::string> &modes, const std::vector<std::string> &kwords, const std::vector<std::string> &params) { std::vector<std::string> names_vector; for (std::set<std::string>::iterator iter = all_test_names.begin(); iter != all_test_names.end(); ++iter) { names_vector.push_back(*iter); } return run_tests(total, failed, modes, kwords, names_vector, params); } UnitTestDriverFramework &UnitTestDriverFramework::framework() { static UnitTestDriverFramework s_instance; return s_instance; } } // end namespace
the_stack
//-------------------------------------------------------------------------------- // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met : // // *Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright(c) 2019, Sergen Eren // All rights reserved. //---------------------------------------------------------------------------------- // // Version 1.0.1: Sergen Eren, 30/10/2019 // // File: Custom path trace kernel: // Performs custom path tracing // //----------------------------------------------- #define DDA_STEP_TRUE #define _USE_MATH_DEFINES #include <cmath> #include <stdio.h> #include <float.h> // Cuda includes #include <cuda_runtime.h> #include <curand_kernel.h> #include <device_launch_parameters.h> #include "helper_math.h" typedef unsigned char uchar; typedef unsigned int uint; typedef unsigned short ushort; typedef unsigned long ulong; typedef unsigned long long uint64; // Internal includes #include "kernel_params.h" #include "atmosphere/definitions.h" #include "atmosphere/constants.h" #include "gpu_vdb.h" #include "camera.h" #include "light.h" #include "bvh/bvh.h" //#include "geometry/sphere.h" #include "geometry/geometry.h" // Definitions // #define COMBINED_SCATTERING_TEXTURES //#define RATIO_TRACKING #define RESIDUAL_RATIO_TRACKING #define BLACK make_float3(0.0f, 0.0f, 0.0f) #define WHITE make_float3(1.0f, 1.0f, 1.0f) #define RED make_float3(1.0f, 0.0f, 0.0f) #define GREEN make_float3(0.0f, 1.0f, 0.0f) #define BLUE make_float3(0.0f, 0.0f, 1.0f) #define EPS 0.001f #define INV_2_PI 1.0f / (2.0f * M_PI) #define INV_4_PI 1.0f / (4.0f * M_PI) #define INV_PI 1.0f / M_PI // Helper functions __device__ inline void coordinate_system( float3 v1, float3& v2, float3& v3) { if (fabsf(v1.x) > fabsf(v1.y)) v2 = make_float3(-v1.z, 0.0f, v1.x); else v2 = make_float3(0.0f, v1.z, -v1.y); v2 = normalize(v2); v3 = normalize(cross(v1, v2)); } __device__ inline float3 spherical_direction( float sinTheta, float cosTheta, float phi, float3 x, float3 y, float3 z) { return x * sinTheta * cosf(phi) + y * sinTheta * sinf(phi) + z * cosTheta; } __device__ inline float degree_to_radians( float degree) { return degree * M_PI / 180.0f; } __device__ inline float3 degree_to_cartesian( float azimuth, float elevation) { float az = clamp(azimuth, .0f, 360.0f); float el = clamp(elevation, -90.0f, 90.0f); az = degree_to_radians(az); el = degree_to_radians(90.0f - el); float x = sinf(el) * cosf(az); float y = cosf(el); float z = sinf(el) * sinf(az); return normalize(make_float3(x, y, z)); } __device__ inline float tex_lookup_1d( cudaTextureObject_t tex, float v) { const float texval = tex1D<float>(tex, v); return texval; } __device__ inline float tex_lookup_2d( cudaTextureObject_t tex, float u, float v) { const float texval = tex2D<float>(tex, u, v); return texval; } // Environment light samplers __device__ inline float draw_sample_from_distribution( Kernel_params kernel_params, Rand_state rand_state, float3& wo) { float xi = rand(&rand_state); float zeta = rand(&rand_state); float pdf = 1.0f; int v = 0; int res = kernel_params.env_sample_tex_res; // Find marginal row number // Find interval int first = 0, len = res; while (len > 0) { int half = len >> 1, middle = first + half; if (tex_lookup_1d(kernel_params.env_marginal_cdf_tex, middle) <= xi) { first = middle + 1; len -= half + 1; } else len = half; } v = clamp(first - 1, 0, res - 2); float dv = xi - tex_lookup_1d(kernel_params.env_marginal_cdf_tex, v); float d_cdf_marginal = tex_lookup_1d(kernel_params.env_marginal_cdf_tex, v + 1) - tex_lookup_1d(kernel_params.env_marginal_cdf_tex, v); if (d_cdf_marginal > .0f) dv /= d_cdf_marginal; // Calculate marginal pdf float marginal_pdf = tex_lookup_1d(kernel_params.env_marginal_func_tex, v + dv) / kernel_params.env_marginal_int; // calculate Φ (elevation) float theta = ((float(v) + dv) / float(res)) * M_PI; // v is now our row number. find the conditional value and pdf from v int u; first = 0, len = res; while (len > 0) { int half = len >> 1, middle = first + half; if (tex_lookup_2d(kernel_params.env_cdf_tex, middle, v) <= zeta) { first = middle + 1; len -= half + 1; } else len = half; } u = clamp(first - 1, 0, res - 2); float du = zeta - tex_lookup_2d(kernel_params.env_cdf_tex, u, v); float d_cdf_conditional = tex_lookup_2d(kernel_params.env_cdf_tex, u + 1, v) - tex_lookup_2d(kernel_params.env_cdf_tex, u, v); if (d_cdf_conditional > 0) du /= d_cdf_conditional; //Calculate conditional pdf float conditional_pdf = tex_lookup_2d(kernel_params.env_func_tex, u + du, v) / tex_lookup_1d(kernel_params.env_marginal_func_tex, v); // Find the θ (azimuth) float phi = ((float(u) + du) / float(res)) * M_PI * 2.0f; float cos_theta = cosf(theta); float sin_theta = sinf(theta); float sin_phi = sinf(phi); float cos_phi = cosf(phi); float3 sundir = normalize(make_float3(sinf(kernel_params.azimuth) * cosf(kernel_params.elevation), sinf(kernel_params.azimuth) * sinf(kernel_params.elevation), cosf(kernel_params.azimuth))); wo = normalize(make_float3(sin_theta * cos_phi, sin_theta * sin_phi, cos_theta)); pdf = (marginal_pdf * conditional_pdf) / (2 * M_PI * M_PI * sin_theta); //if (kernel_params.debug) printf("\n%f %f %f %d %d", ((float(u) + du) / float(res)), ((float(v) + dv) / float(res)), pdf, u, v); //if (kernel_params.debug) printf("\n%f %f %f %f", wo.x, wo.y,wo.z, dot(wo, sundir)); return pdf; } //Phase functions pdf __device__ inline float draw_pdf_from_distribution(Kernel_params kernel_params, float2 point) { int res = kernel_params.env_sample_tex_res; int iu = clamp(int(point.x * res), 0, res - 1); int iv = clamp(int(point.y * res), 0, res - 1); float conditional = tex_lookup_2d(kernel_params.env_func_tex, iu, iv); float marginal = tex_lookup_1d(kernel_params.env_marginal_func_tex, iv); return conditional / marginal; } __device__ inline float isotropic() { return INV_4_PI; } __device__ inline float double_henyey_greenstein( float cos_theta, float f, float g1, float g2) { return f * henyey_greenstein(cos_theta, g1) + (1 - f) * henyey_greenstein(cos_theta, g2); } //Phase function direction samplers __device__ inline float sample_spherical( Rand_state rand_state, float3& wi) { float phi = (float)(2.0f * M_PI) * rand(&rand_state); float cos_theta = 1.0f - 2.0f * rand(&rand_state); float sin_theta = sqrtf(1.0f - cos_theta * cos_theta); wi = make_float3(cosf(phi) * sin_theta, sinf(phi) * sin_theta, cos_theta); return isotropic(); } __device__ inline float sample_hg( float3& wo, Rand_state& randstate, float g) { float cos_theta; if (fabsf(g) < EPS) cos_theta = 1 - 2 * rand(&randstate); else { float sqr_term = (1 - g * g) / (1 - g + 2 * g * rand(&randstate)); cos_theta = (1 + g * g - sqr_term * sqr_term) / (2 * g); } float sin_theta = sqrtf(fmaxf(.0f, 1.0f - cos_theta * cos_theta)); float phi = (float)(2.0 * M_PI) * rand(&randstate); float3 v1, v2; coordinate_system(wo * -1.0f, v1, v2); wo = spherical_direction(sin_theta, cos_theta, phi, v1, v2, wo); return henyey_greenstein(-cos_theta, g); } __device__ inline float sample_double_hg( float3& wi, Rand_state randstate, float f, float g1, float g2) { wi *= -1.0f; float3 v1 = wi, v2 = wi; float cos_theta1, cos_theta2; if (f > 0.9999f) { cos_theta1 = sample_hg(v1, randstate, g1); wi = v1; return henyey_greenstein(cos_theta1, g1); } else if (f < EPS) { cos_theta2 = sample_hg(v2, randstate, g2); wi = v2; return henyey_greenstein(cos_theta2, g2); } else { cos_theta1 = sample_hg(v1, randstate, g1); cos_theta2 = sample_hg(v2, randstate, g2); wi = lerp(v1, v2, 1 - f); float cos_theta = lerp(cos_theta1, cos_theta2, 1 - f); return double_henyey_greenstein(cos_theta, f, g1, g2); } } // Atmosphere Functions //#define COMBINED_SCATTERING_TEXTURES __device__ float ClampCosine(float mu) { return clamp(mu, float(-1.0), float(1.0)); } __device__ float ClampDistance(float d) { return fmaxf(d, 0.0 * m); } __device__ float ClampRadius(const AtmosphereParameters atmosphere, float r) { return clamp(r, atmosphere.bottom_radius, atmosphere.top_radius); } __device__ float SafeSqrt(float a) { return sqrtf(fmaxf(a, 0.0 * m2())); } __device__ float DistanceToTopAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu) { float discriminant = r * r * (mu * mu - 1.0) + atmosphere.top_radius * atmosphere.top_radius; return ClampDistance(-r * mu + SafeSqrt(discriminant)); } __device__ float DistanceToBottomAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu) { float discriminant = r * r * (mu * mu - 1.0) + atmosphere.bottom_radius * atmosphere.bottom_radius; return ClampDistance(-r * mu - SafeSqrt(discriminant)); } __device__ bool RayIntersectsGround(const AtmosphereParameters atmosphere, float r, float mu) { return mu < 0.0 && r * r * (mu * mu - 1.0) + atmosphere.bottom_radius * atmosphere.bottom_radius >= 0.0 * m2(); } __device__ float GetLayerDensity(const DensityProfileLayer layer, float altitude) { float density = layer.exp_term * exp(layer.exp_scale * altitude) + layer.linear_term * altitude + layer.const_term; return clamp(density, float(0.0), float(1.0)); } __device__ float GetProfileDensity(const DensityProfile profile, float altitude) { return altitude < profile.layers[0].width ? GetLayerDensity(profile.layers[0], altitude) : GetLayerDensity(profile.layers[1], altitude); } __device__ float GetTextureCoordFromUnitRange(float x, int texture_size) { return 0.5 / float(texture_size) + x * (1.0 - 1.0 / float(texture_size)); } __device__ float GetUnitRangeFromTextureCoord(float u, int texture_size) { return (u - 0.5 / float(texture_size)) / (1.0 - 1.0 / float(texture_size)); } __device__ float2 GetTransmittanceTextureUvFromRMu(const AtmosphereParameters atmosphere, float r, float mu) { // Distance to top atmosphere boundary for a horizontal ray at ground level. float H = sqrtf(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the horizon. float rho = SafeSqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the top atmosphere boundary for the ray (r,mu), and its minimum // and maximum values over all mu - obtained for (r,1) and (r,mu_horizon). float d = DistanceToTopAtmosphereBoundary(atmosphere, r, mu); float d_min = atmosphere.top_radius - r; float d_max = rho + H; float x_mu = (d - d_min) / (d_max - d_min); float x_r = rho / H; return make_float2(GetTextureCoordFromUnitRange(x_mu, TRANSMITTANCE_TEXTURE_WIDTH), GetTextureCoordFromUnitRange(x_r, TRANSMITTANCE_TEXTURE_HEIGHT)); } __device__ void GetRMuFromTransmittanceTextureUv(const AtmosphereParameters atmosphere, float2 uv, float& r, float& mu) { float x_mu = GetUnitRangeFromTextureCoord(uv.x, TRANSMITTANCE_TEXTURE_WIDTH); float x_r = GetUnitRangeFromTextureCoord(uv.y, TRANSMITTANCE_TEXTURE_HEIGHT); // Distance to top atmosphere boundary for a horizontal ray at ground level. float H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the horizon, from which we can compute r: float rho = H * x_r; r = sqrt(rho * rho + atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the top atmosphere boundary for the ray (r,mu), and its minimum // and maximum values over all mu - obtained for (r,1) and (r,mu_horizon) - // from which we can recover mu: float d_min = atmosphere.top_radius - r; float d_max = rho + H; float d = d_min + x_mu * (d_max - d_min); mu = d == 0.0 * m ? float(1.0) : (H * H - rho * rho - d * d) / (2.0 * r * d); mu = ClampCosine(mu); } __device__ float3 GetTransmittanceToTopAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu) { float2 uv = GetTransmittanceTextureUvFromRMu(atmosphere, r, mu); const float3 texval = make_float3(tex2D<float4>(atmosphere.transmittance_texture, uv.x, uv.y)); return texval; } __device__ float3 GetTransmittance(const AtmosphereParameters atmosphere, float r, float mu, float d, bool ray_r_mu_intersects_ground) { float r_d = ClampRadius(atmosphere, sqrt(d * d + 2.0 * r * mu * d + r * r)); float mu_d = ClampCosine((r * mu + d) / r_d); if (ray_r_mu_intersects_ground) { return fminf(GetTransmittanceToTopAtmosphereBoundary(atmosphere, r_d, -mu_d) / GetTransmittanceToTopAtmosphereBoundary(atmosphere, r, -mu), make_float3(1.0f)); } else { return fminf(GetTransmittanceToTopAtmosphereBoundary(atmosphere, r, mu) / GetTransmittanceToTopAtmosphereBoundary(atmosphere, r_d, mu_d), make_float3(1.0)); } } __device__ float3 GetTransmittanceToSun(const AtmosphereParameters atmosphere, float r, float mu_s) { float sin_theta_h = atmosphere.bottom_radius / r; float cos_theta_h = -sqrtf(max(1.0 - sin_theta_h * sin_theta_h, 0.0)); return GetTransmittanceToTopAtmosphereBoundary( atmosphere, r, mu_s) * smoothstep(-sin_theta_h * atmosphere.sun_angular_radius / rad, sin_theta_h * atmosphere.sun_angular_radius / rad, mu_s - cos_theta_h); } __device__ float DistanceToNearestAtmosphereBoundary(const AtmosphereParameters atmosphere, float r, float mu, bool ray_r_mu_intersects_ground) { if (ray_r_mu_intersects_ground) { return DistanceToBottomAtmosphereBoundary(atmosphere, r, mu); } else { return DistanceToTopAtmosphereBoundary(atmosphere, r, mu); } } __device__ float RayleighPhaseFunction(float nu) { float k = 3.0 / (16.0 * PI * sr); return k * (1.0 + nu * nu); } __device__ float MiePhaseFunction(float g, float nu) { float k = 3.0 / (8.0 * PI * sr) * (1.0 - g * g) / (2.0 + g * g); return k * (1.0 + nu * nu) / pow(1.0 + g * g - 2.0 * g * nu, 1.5); } __device__ float4 GetScatteringTextureUvwzFromRMuMuSNu(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground) { // Distance to top atmosphere boundary for a horizontal ray at ground level. float H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the horizon. float rho = SafeSqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius); float u_r = GetTextureCoordFromUnitRange(rho / H, SCATTERING_TEXTURE_R_SIZE); // Discriminant of the quadratic equation for the intersections of the ray // (r,mu) with the ground (see RayIntersectsGround). float r_mu = r * mu; float discriminant = r_mu * r_mu - r * r + atmosphere.bottom_radius * atmosphere.bottom_radius; float u_mu; if (ray_r_mu_intersects_ground) { // Distance to the ground for the ray (r,mu), and its minimum and maximum // values over all mu - obtained for (r,-1) and (r,mu_horizon). float d = -r_mu - SafeSqrt(discriminant); float d_min = r - atmosphere.bottom_radius; float d_max = rho; u_mu = 0.5 - 0.5 * GetTextureCoordFromUnitRange(d_max == d_min ? 0.0 : (d - d_min) / (d_max - d_min), SCATTERING_TEXTURE_MU_SIZE / 2); } else { // Distance to the top atmosphere boundary for the ray (r,mu), and its // minimum and maximum values over all mu - obtained for (r,1) and // (r,mu_horizon). float d = -r_mu + SafeSqrt(discriminant + H * H); float d_min = atmosphere.top_radius - r; float d_max = rho + H; u_mu = 0.5 + 0.5 * GetTextureCoordFromUnitRange( (d - d_min) / (d_max - d_min), SCATTERING_TEXTURE_MU_SIZE / 2); } float d = DistanceToTopAtmosphereBoundary( atmosphere, atmosphere.bottom_radius, mu_s); float d_min = atmosphere.top_radius - atmosphere.bottom_radius; float d_max = H; float a = (d - d_min) / (d_max - d_min); float A = -2.0 * atmosphere.mu_s_min * atmosphere.bottom_radius / (d_max - d_min); float u_mu_s = GetTextureCoordFromUnitRange( max(1.0 - a / A, 0.0) / (1.0 + a), SCATTERING_TEXTURE_MU_S_SIZE); float u_nu = (nu + 1.0) / 2.0; return make_float4(u_nu, u_mu_s, u_mu, u_r); } __device__ void GetRMuMuSNuFromScatteringTextureUvwz(const AtmosphereParameters atmosphere, float4 uvwz, float& r, float& mu, float& mu_s, float& nu, bool& ray_r_mu_intersects_ground) { // Distance to top atmosphere boundary for a horizontal ray at ground level. float H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); // Distance to the horizon. float rho = H * GetUnitRangeFromTextureCoord(uvwz.w, SCATTERING_TEXTURE_R_SIZE); r = sqrt(rho * rho + atmosphere.bottom_radius * atmosphere.bottom_radius); if (uvwz.z < 0.5) { // Distance to the ground for the ray (r,mu), and its minimum and maximum // values over all mu - obtained for (r,-1) and (r,mu_horizon) - from which // we can recover mu: float d_min = r - atmosphere.bottom_radius; float d_max = rho; float d = d_min + (d_max - d_min) * GetUnitRangeFromTextureCoord( 1.0 - 2.0 * uvwz.z, SCATTERING_TEXTURE_MU_SIZE / 2); mu = d == 0.0 * m ? float(-1.0) : ClampCosine(-(rho * rho + d * d) / (2.0 * r * d)); ray_r_mu_intersects_ground = true; } else { // Distance to the top atmosphere boundary for the ray (r,mu), and its // minimum and maximum values over all mu - obtained for (r,1) and // (r,mu_horizon) - from which we can recover mu: float d_min = atmosphere.top_radius - r; float d_max = rho + H; float d = d_min + (d_max - d_min) * GetUnitRangeFromTextureCoord( 2.0 * uvwz.z - 1.0, SCATTERING_TEXTURE_MU_SIZE / 2); mu = d == 0.0 * m ? float(1.0) : ClampCosine((H * H - rho * rho - d * d) / (2.0 * r * d)); ray_r_mu_intersects_ground = false; } float x_mu_s = GetUnitRangeFromTextureCoord(uvwz.y, SCATTERING_TEXTURE_MU_S_SIZE); float d_min = atmosphere.top_radius - atmosphere.bottom_radius; float d_max = H; float A = -2.0 * atmosphere.mu_s_min * atmosphere.bottom_radius / (d_max - d_min); float a = (A - x_mu_s * A) / (1.0 + x_mu_s * A); float d = d_min + min(a, A) * (d_max - d_min); mu_s = d == 0.0 * m ? float(1.0) : ClampCosine((H * H - d * d) / (2.0 * atmosphere.bottom_radius * d)); nu = ClampCosine(uvwz.x * 2.0 - 1.0); } __device__ void GetRMuMuSNuFromScatteringTextureFragCoord(const AtmosphereParameters atmosphere, float3 frag_coord, float& r, float& mu, float& mu_s, float& nu, bool& ray_r_mu_intersects_ground) { const float4 SCATTERING_TEXTURE_SIZE = make_float4(SCATTERING_TEXTURE_NU_SIZE - 1, SCATTERING_TEXTURE_MU_S_SIZE, SCATTERING_TEXTURE_MU_SIZE, SCATTERING_TEXTURE_R_SIZE); float frag_coord_nu = floor(frag_coord.x / float(SCATTERING_TEXTURE_MU_S_SIZE)); float frag_coord_mu_s = fmodf(frag_coord.x, float(SCATTERING_TEXTURE_MU_S_SIZE)); float4 uvwz = make_float4(frag_coord_nu, frag_coord_mu_s, frag_coord.y, frag_coord.z) / SCATTERING_TEXTURE_SIZE; GetRMuMuSNuFromScatteringTextureUvwz(atmosphere, uvwz, r, mu, mu_s, nu, ray_r_mu_intersects_ground); // Clamp nu to its valid range of values, given mu and mu_s. nu = clamp(nu, mu * mu_s - sqrt((1.0 - mu * mu) * (1.0 - mu_s * mu_s)), mu * mu_s + sqrt((1.0 - mu * mu) * (1.0 - mu_s * mu_s))); } __device__ float3 GetIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s); __device__ float2 GetIrradianceTextureUvFromRMuS(const AtmosphereParameters atmosphere, float r, float mu_s) { float x_r = (r - atmosphere.bottom_radius) / (atmosphere.top_radius - atmosphere.bottom_radius); float x_mu_s = mu_s * 0.5 + 0.5; return make_float2(GetTextureCoordFromUnitRange(x_mu_s, IRRADIANCE_TEXTURE_WIDTH), GetTextureCoordFromUnitRange(x_r, IRRADIANCE_TEXTURE_HEIGHT)); } __device__ void GetRMuSFromIrradianceTextureUv(const AtmosphereParameters atmosphere, float2 uv, float& r, float& mu_s) { float x_mu_s = GetUnitRangeFromTextureCoord(uv.x, IRRADIANCE_TEXTURE_WIDTH); float x_r = GetUnitRangeFromTextureCoord(uv.y, IRRADIANCE_TEXTURE_HEIGHT); r = atmosphere.bottom_radius + x_r * (atmosphere.top_radius - atmosphere.bottom_radius); mu_s = ClampCosine(2.0 * x_mu_s - 1.0); } __device__ float3 GetIrradiance(const AtmosphereParameters atmosphere, float r, float mu_s) { float2 uv = GetIrradianceTextureUvFromRMuS(atmosphere, r, mu_s); const float3 val = make_float3(tex2D<float4>(atmosphere.irradiance_texture, uv.x, uv.y)); return val; } // Rendering kernels #ifdef COMBINED_SCATTERING_TEXTURES __device__ float3 GetExtrapolatedSingleMieScattering(const AtmosphereParameters atmosphere, const float4 scattering) { if (scattering.x == 0.0) { return make_float3(0.0); } return make_float3(scattering.x, scattering.y, scattering.z) * scattering.w / scattering.x * (atmosphere.rayleigh_scattering.x / atmosphere.mie_scattering.x) * (atmosphere.mie_scattering / atmosphere.rayleigh_scattering); } #endif __device__ float3 GetCombinedScattering(const AtmosphereParameters atmosphere, float r, float mu, float mu_s, float nu, bool ray_r_mu_intersects_ground, float3& single_mie_scattering) { float4 uvwz = GetScatteringTextureUvwzFromRMuMuSNu(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground); float tex_coord_x = uvwz.x * float(SCATTERING_TEXTURE_NU_SIZE - 1); float tex_x = floorf(tex_coord_x); float lerp = tex_coord_x - tex_x; float3 uvw0 = make_float3((tex_x + uvwz.y) / float(SCATTERING_TEXTURE_NU_SIZE), uvwz.z, uvwz.w); float3 uvw1 = make_float3((tex_x + 1.0 + uvwz.y) / float(SCATTERING_TEXTURE_NU_SIZE), uvwz.z, uvwz.w); #ifdef COMBINED_SCATTERING_TEXTURES float4 combined_scattering = tex3D<float4>(atmosphere.scattering_texture, uvw0.x, uvw0.y, uvw0.z) * (1.0 - lerp) + tex3D<float4>(atmosphere.scattering_texture, uvw1.x, uvw1.y, uvw1.z) * lerp; float3 scattering = make_float3(combined_scattering); single_mie_scattering = GetExtrapolatedSingleMieScattering(atmosphere, combined_scattering); #else float3 scattering = make_float3(tex3D<float4>(atmosphere.scattering_texture, uvw0.x, uvw0.y, uvw0.z) * (1.0 - lerp) + tex3D<float4>(atmosphere.scattering_texture, uvw1.x, uvw1.y, uvw1.z) * lerp); single_mie_scattering = make_float3(tex3D<float4>(atmosphere.single_mie_scattering_texture, uvw0.x, uvw0.y, uvw0.z) * (1.0 - lerp) + tex3D<float4>(atmosphere.single_mie_scattering_texture, uvw1.x, uvw1.y, uvw1.z) * lerp); #endif return scattering; } __device__ float3 GetSkyRadiance(const AtmosphereParameters atmosphere, float3 camera, float3 view_ray, float shadow_length, float3 sun_direction, float3& transmittance) { // Compute the distance to the top atmosphere boundary along the view ray, // assuming the viewer is in space (or NaN if the view ray does not intersect // the atmosphere). float r = length(camera); float rmu = dot(camera, view_ray); float distance_to_top_atmosphere_boundary = -rmu - sqrt(rmu * rmu - r * r + atmosphere.top_radius * atmosphere.top_radius); // If the viewer is in space and the view ray intersects the atmosphere, move // the viewer to the top atmosphere boundary (along the view ray): if (distance_to_top_atmosphere_boundary > 0.0 * m) { camera = camera + view_ray * distance_to_top_atmosphere_boundary; r = atmosphere.top_radius; rmu += distance_to_top_atmosphere_boundary; } else if (r > atmosphere.top_radius) { // If the view ray does not intersect the atmosphere, simply return 0. transmittance = make_float3(1.0f); return make_float3(0.0f * watt_per_square_meter_per_sr_per_nm()); } // Compute the r, mu, mu_s and nu parameters needed for the texture lookups. float mu = rmu / r; float mu_s = dot(camera, sun_direction) / r; float nu = dot(view_ray, sun_direction); bool ray_r_mu_intersects_ground = RayIntersectsGround(atmosphere, r, mu); transmittance = ray_r_mu_intersects_ground ? make_float3(0.0f) : GetTransmittanceToTopAtmosphereBoundary(atmosphere, r, mu); float3 single_mie_scattering; float3 scattering; if (shadow_length == 0.0 * m) { scattering = GetCombinedScattering(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground, single_mie_scattering); } else { // Case of light shafts (shadow_length is the total float noted l in our // paper): we omit the scattering between the camera and the point at // distance l, by implementing Eq. (18) of the paper (shadow_transmittance // is the T(x,x_s) term, scattering is the S|x_s=x+lv term). float d = shadow_length; float r_p = ClampRadius(atmosphere, sqrt(d * d + 2.0 * r * mu * d + r * r)); float mu_p = (r * mu + d) / r_p; float mu_s_p = (r * mu_s + d * nu) / r_p; scattering = GetCombinedScattering(atmosphere, r_p, mu_p, mu_s_p, nu, ray_r_mu_intersects_ground, single_mie_scattering); float3 shadow_transmittance = GetTransmittance(atmosphere, r, mu, shadow_length, ray_r_mu_intersects_ground); scattering = scattering * shadow_transmittance; single_mie_scattering = single_mie_scattering * shadow_transmittance; } float3 sky_radiance = scattering * RayleighPhaseFunction(nu) + single_mie_scattering * MiePhaseFunction(atmosphere.mie_phase_function_g, nu); if (atmosphere.use_luminance != 0) sky_radiance *= atmosphere.sky_spectral_radiance_to_luminance; return sky_radiance; } __device__ float3 GetSkyRadianceToPoint(const AtmosphereParameters atmosphere, float3 camera, float3 point, float shadow_length, float3 sun_direction, float3& transmittance) { // Compute the distance to the top atmosphere boundary along the view ray, // assuming the viewer is in space (or NaN if the view ray does not intersect // the atmosphere). float3 view_ray = normalize(point - camera); float r = length(camera); float rmu = dot(camera, view_ray); float distance_to_top_atmosphere_boundary = -rmu - sqrt(rmu * rmu - r * r + atmosphere.top_radius * atmosphere.top_radius); // If the viewer is in space and the view ray intersects the atmosphere, move // the viewer to the top atmosphere boundary (along the view ray): if (distance_to_top_atmosphere_boundary > 0.0 * m) { camera = camera + view_ray * distance_to_top_atmosphere_boundary; r = atmosphere.top_radius; rmu += distance_to_top_atmosphere_boundary; } // Compute the r, mu, mu_s and nu parameters for the first texture lookup. float mu = rmu / r; float mu_s = dot(camera, sun_direction) / r; float nu = dot(view_ray, sun_direction); float d = length(point - camera); bool ray_r_mu_intersects_ground = RayIntersectsGround(atmosphere, r, mu); transmittance = GetTransmittance(atmosphere, r, mu, d, ray_r_mu_intersects_ground); float3 single_mie_scattering; float3 scattering = GetCombinedScattering(atmosphere, r, mu, mu_s, nu, ray_r_mu_intersects_ground, single_mie_scattering); // Compute the r, mu, mu_s and nu parameters for the second texture lookup. // If shadow_length is not 0 (case of light shafts), we want to ignore the // scattering along the last shadow_length meters of the view ray, which we // do by subtracting shadow_length from d (this way scattering_p is equal to // the S|x_s=x_0-lv term in Eq. (17) of our paper). d = max(d - shadow_length, 0.0 * m); float r_p = ClampRadius(atmosphere, sqrt(d * d + 2.0 * r * mu * d + r * r)); float mu_p = (r * mu + d) / r_p; float mu_s_p = (r * mu_s + d * nu) / r_p; float3 single_mie_scattering_p; float3 scattering_p = GetCombinedScattering(atmosphere, r_p, mu_p, mu_s_p, nu, ray_r_mu_intersects_ground, single_mie_scattering_p); // Combine the lookup results to get the scattering between camera and point. float3 shadow_transmittance = transmittance; if (shadow_length > 0.0 * m) { // This is the T(x,x_s) term in Eq. (17) of our paper, for light shafts. shadow_transmittance = GetTransmittance(atmosphere, r, mu, d, ray_r_mu_intersects_ground); } scattering = scattering - shadow_transmittance * scattering_p; single_mie_scattering = single_mie_scattering - shadow_transmittance * single_mie_scattering_p; #ifdef COMBINED_SCATTERING_TEXTURES single_mie_scattering = GetExtrapolatedSingleMieScattering(atmosphere, make_float4(scattering, single_mie_scattering.x)); #endif // Hack to avoid rendering artifacts when the sun is below the horizon. single_mie_scattering = single_mie_scattering * smoothstep(float(0.0), float(0.01), mu_s); float3 sky_radiance = scattering * RayleighPhaseFunction(nu) + single_mie_scattering * MiePhaseFunction(atmosphere.mie_phase_function_g, nu); if (atmosphere.use_luminance != 0) sky_radiance *= atmosphere.sky_spectral_radiance_to_luminance; return sky_radiance; } __device__ float3 GetSunAndSkyIrradiance(const AtmosphereParameters atmosphere, float3 point, float3 normal, float3 sun_direction, float3& sky_irradiance) { float r = length(point); float mu_s = dot(point, sun_direction) / r; // Indirect irradiance (approximated if the surface is not horizontal). sky_irradiance = GetIrradiance(atmosphere, r, mu_s) * (1.0 + dot(normal, point) / r) * 0.5; float3 sun_irradiance = atmosphere.solar_irradiance * GetTransmittanceToSun(atmosphere, r, mu_s) * max(dot(normal, sun_direction), 0.0); if (atmosphere.use_luminance != 0) { sky_irradiance *= atmosphere.sky_spectral_radiance_to_luminance; sun_irradiance *= atmosphere.sun_spectral_radiance_to_luminance; } // Direct irradiance. return sun_irradiance; } __device__ float3 GetSolarRadiance(const AtmosphereParameters atmosphere) { float3 solar_radiance = atmosphere.solar_irradiance / (M_PI * atmosphere.sun_angular_radius * atmosphere.sun_angular_radius); if (atmosphere.use_luminance != 0) solar_radiance *= atmosphere.sun_spectral_radiance_to_luminance; return solar_radiance; } // Light Samplers __device__ inline float3 sample_atmosphere( const Kernel_params& kernel_params, const AtmosphereParameters& atmosphere, const float3 ray_pos, const float3 ray_dir) { float3 earth_center = make_float3(.0f, -atmosphere.bottom_radius, .0f); float3 sun_direction = degree_to_cartesian(kernel_params.azimuth, kernel_params.elevation); float3 p = ray_pos - earth_center; float p_dot_v = dot(p, ray_dir); float p_dot_p = dot(p, p); float ray_earth_center_squared_distance = p_dot_p - p_dot_v * p_dot_v; float distance_to_intersection = -p_dot_v - sqrt(earth_center.y * earth_center.y - ray_earth_center_squared_distance); float ground_alpha = 0.0; float3 ground_radiance = make_float3(0.0); if (distance_to_intersection > 0.0) { float3 point = ray_pos + ray_dir * distance_to_intersection; float3 normal = normalize(point - earth_center); // Compute the radiance reflected by the ground. float3 sky_irradiance; float3 sun_irradiance = GetSunAndSkyIrradiance(atmosphere, point - earth_center, normal, sun_direction, sky_irradiance); ground_radiance = atmosphere.ground_albedo * (1.0 / M_PI) * (sun_irradiance + sky_irradiance); float3 transmittance; float3 in_scatter = GetSkyRadianceToPoint(atmosphere, ray_pos - earth_center, point - earth_center, .0f, sun_direction, transmittance); ground_radiance = ground_radiance * transmittance + in_scatter; ground_alpha = 1.0; } float3 transmittance_sky; float3 radiance_sky = GetSkyRadiance(atmosphere, ray_pos - earth_center, ray_dir, .0f, sun_direction, transmittance_sky); float2 sun_size = make_float2(tanf(atmosphere.sun_angular_radius), cosf(atmosphere.sun_angular_radius)); if (dot(ray_dir, sun_direction) > sun_size.y) { radiance_sky = radiance_sky + transmittance_sky * GetSolarRadiance(atmosphere); } ground_radiance = lerp(radiance_sky, ground_radiance, ground_alpha); float3 exposure = atmosphere.use_luminance == 0 ? make_float3(atmosphere.exposure) : make_float3(atmosphere.exposure) * 1e-5; ground_radiance = powf(make_float3(1.0f) - expf(-ground_radiance / atmosphere.white_point * exposure), make_float3(1.0 / 2.2)); return ground_radiance; /* old sky float azimuth = atan2f(-ray_dir.z, -ray_dir.x) * INV_2_PI + 0.5f; float elevation = acosf(fmaxf(fminf(ray_dir.y, 1.0f), -1.0f)) * INV_PI; const float4 texval = tex2D<float4>( kernel_params.sky_tex, azimuth, elevation); return make_float3(texval.x, texval.y, texval.z); */ } __device__ inline float3 sample_env_tex( const Kernel_params kernel_params, const float3 wi) { const float4 texval = tex2D<float4>( kernel_params.env_tex, atan2f(wi.z, wi.x) * (float)(0.5 / M_PI) + 0.5f, acosf(fmaxf(fminf(wi.y, 1.0f), -1.0f)) * (float)(1.0 / M_PI)); return make_float3(texval); } __device__ __inline__ float3 get_color(float3 pos, const GPU_VDB& gpu_vdb) { if (!gpu_vdb.vdb_info.has_color) return WHITE; pos = gpu_vdb.get_xform().transpose().inverse().transform_point(pos); // object space position to index position pos -= gpu_vdb.vdb_info.bmin; // index position to [0-1] position pos.x /= float(gpu_vdb.vdb_info.dim.x); pos.y /= float(gpu_vdb.vdb_info.dim.y); pos.z /= float(gpu_vdb.vdb_info.dim.z); if (pos.x < .0f || pos.y < .0f || pos.z < .0f || pos.x>1.0f || pos.y>1.0f || pos.z>1.0f) return make_float3(.0f); float4 Cd = tex3D<float4>(gpu_vdb.vdb_info.color_texture, pos.x, pos.y, pos.z); return make_float3(Cd); } __device__ __inline__ float3 sum_color(float3 ray_pos, OCTNode* leaf_node, const GPU_VDB* volumes) { float3 color = make_float3(0.0f); for (int i = 0; i < leaf_node->num_volumes; ++i) { color = fmaxf(color, get_color(ray_pos, volumes[leaf_node->vol_indices[i]])); } return color; } __device__ __inline__ float3 get_emission(float3 pos, Kernel_params kernel_params, const GPU_VDB& gpu_vdb) { if (!gpu_vdb.vdb_info.has_emission) return BLACK; pos = gpu_vdb.get_xform().transpose().inverse().transform_point(pos); // object space position to index position pos -= gpu_vdb.vdb_info.bmin; // index position to [0-1] position pos.x /= float(gpu_vdb.vdb_info.dim.x); pos.y /= float(gpu_vdb.vdb_info.dim.y); pos.z /= float(gpu_vdb.vdb_info.dim.z); if (pos.x < .0f || pos.y < .0f || pos.z < .0f || pos.x>1.0f || pos.y>1.0f || pos.z>1.0f) return make_float3(.0f); float index = tex3D<float>(gpu_vdb.vdb_info.emission_texture, pos.x, pos.y, pos.z); index = clamp(index * 255.0f / kernel_params.emission_pivot, .0f, 255.0f); float3 emission = kernel_params.emission_texture[int(index)] * kernel_params.emission_scale; return emission; } __device__ __inline__ float3 sum_emission(float3 ray_pos, Kernel_params kernel_params, OCTNode* leaf_node, const GPU_VDB* volumes) { float3 emmission = make_float3(0.0f); for (int i = 0; i < leaf_node->num_volumes; ++i) { emmission += get_emission(ray_pos, kernel_params, volumes[leaf_node->vol_indices[i]]); } return emmission; } __device__ __inline__ float get_density(float3 pos, const GPU_VDB& gpu_vdb) { // world space to object space pos = gpu_vdb.get_xform().transpose().inverse().transform_point(pos); // object space position to index position pos -= gpu_vdb.vdb_info.bmin; // index position to [0-1] position pos.x /= float(gpu_vdb.vdb_info.dim.x); pos.y /= float(gpu_vdb.vdb_info.dim.y); pos.z /= float(gpu_vdb.vdb_info.dim.z); if (pos.x < .0f || pos.y < .0f || pos.z < .0f || pos.x>1.0f || pos.y>1.0f || pos.z>1.0f) return .0f; float density = tex3D<float>(gpu_vdb.vdb_info.density_texture, pos.x, pos.y, pos.z); return density; } __device__ inline float sum_density(float3 ray_pos, OCTNode* leaf_node, const GPU_VDB* volumes) { float density = 0.0f; for (int i = 0; i < leaf_node->num_volumes; ++i) { density += get_density(ray_pos, volumes[leaf_node->vol_indices[i]]); } return density; } __device__ inline bool traverse_octree(float3 ray_pos, float3 ray_dir, OCTNode* root, float& t_min, float& t_max) { /* // Recursive traversal (This doesn't work) if (root->depth == 1) { if (root->bbox.Intersect_no_t(ray_pos, ray_dir)) { if (root->num_volumes > 0) { return true; } } } else { for (int i = 0; i < 8; ++i) { traverse_octree(ray_pos, ray_dir, root->children[i]); } } return false;*/ // Serial traversal if (root->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) { for (int i = 0; i < 8; ++i) { float temp_min; if (root->children[i]->bbox.Intersect(ray_pos, ray_dir, temp_min, t_max)) { if (root->children[i]->num_volumes > 0) { for (int x = 0; x < 8; ++x) { if (root->children[i]->children[x]->bbox.Intersect(ray_pos, ray_dir, temp_min, t_max)) { if (root->children[i]->children[x]->num_volumes > 0) { for (int y = 0; y < 8; ++y) { if (root->children[i]->children[x]->children[y]->bbox.Intersect(ray_pos, ray_dir, temp_min, t_max)) { if (root->children[i]->children[x]->children[y]->num_volumes > 0) { t_min = fminf(t_min, temp_min); return true; } } } } } } } } } } return false; } __device__ inline OCTNode* get_closest_leaf_node(float3 ray_pos, float3 ray_dir, OCTNode* root, float& t_min, float& t_max) { OCTNode* node = NULL; // Lets first check if we intersect root if (root->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) { // intersected root. now check if we intersect depth 3 nodes for (int i = 0; i < 8; ++i) { if (root->children[i]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) { if (root->children[i]->num_volumes > 0) { for (int x = 0; x < 8; ++x) { if (root->children[i]->children[x]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) { if (root->children[i]->children[x]->num_volumes > 0) { for (int y = 0; y < 8; ++y) { float temp_min = FLT_MAX; if (root->children[i]->children[x]->children[y]->bbox.Intersect(ray_pos, ray_dir, temp_min, t_max)) { if (root->children[i]->children[x]->children[y]->num_volumes > 0) { if (t_min < temp_min) { t_min = fminf(t_min, temp_min); node = root->children[i]->children[x]->children[y]; } } } } } } } } } } } return node; } __device__ inline int get_quadrant(OCTNode* root, float3 pos) { int child_idx = -1; for (int i = 0; i < 8; ++i) { if (root->has_children) { if (Contains(root->children[i]->bbox, pos)) { child_idx = i; break; } } } return child_idx; } // Finds the closest geo and returns t_min TODO use bvh to determine closest interaction __device__ inline int get_closest_object(float3 ray_pos, float3 ray_dir, OCTNode* root, const sphere ref_sphere, float& t_min) { float tmin1 = M_INF, tmax1 = -M_INF, tmin2 = M_INF, tmax2 = -M_INF; bool i1 = root->bbox.Intersect(ray_pos, ray_dir, tmin1, tmax1); bool i2 = ref_sphere.intersect(ray_pos, ray_dir, tmin2, tmax2); if (i1 && !i2) { t_min = tmin1; return 1; } if (!i1 && i2) { t_min = tmin2; return 2; } if (i1 && i2) { if (tmin1 < tmin2) { t_min = tmin1; return 1; } if (tmin2 < tmin1) { t_min = tmin2; return 2; } } return 0; } __device__ inline float3 Tr( Rand_state& rand_state, float3 ray_pos, float3 ray_dir, const Kernel_params& kernel_params, const GPU_VDB* volumes, const sphere ref_sphere, OCTNode* root) { // Run residual ratio tracking to estimate transmittance float3 tr = WHITE; float t_min, t_max, geo_dist = .0f, distance = .0f, t = 0.0f; if (!Contains(root->bbox, ray_pos)) { // position is out of root bbox if (root->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) ray_pos += ray_dir * (t_min + EPS); // push the position to volume box else return tr; // We are missing volume box. No need to sample density tr } root->bbox.Intersect(ray_pos, ray_dir, t_min, distance); //if (ref_sphere.intersect(ray_pos, ray_dir, geo_dist, t_max)) distance = geo_dist; if (ref_sphere.intersect(ray_pos, ray_dir, geo_dist, t_max)) return BLACK; #ifdef RESIDUAL_RATIO_TRACKING // Control variate float sigma_c = root->min_extinction; float sigma_r_inv = 1.0f / (root->max_extinction - sigma_c); float T_c = expf(-sigma_c * distance); #endif // RESIDUAL_RATIO_TRACKING // Code path 1: // This is the old transmittance estimate algorithm that is agnostic of octree structure #ifndef DDA_STEP_TRUE float inv_max_density = 1.0f; while (true) { t -= logf(1 - rand(&rand_state)) * inv_max_density * kernel_params.tr_depth / kernel_params.extinction.x; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root, volumes); tr *= 1 - fmaxf(.0f, density * inv_max_density); tr = fmaxf(tr, make_float3(.0f)); } #endif // Code path 2: // This is a DDA stepping algorithm that checks the quadrant in Octree nodes and skips them if they contain no volumes #ifdef DDA_STEP_TRUE while (true) { int depth3_node = get_quadrant(root, ray_pos); if (depth3_node > -1) { if (root->children[depth3_node]->num_volumes == 0) { //We are in the depth3 node but it is empty root->children[depth3_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int depth2_node = get_quadrant(root->children[depth3_node], ray_pos); if (depth2_node > -1) { if (root->children[depth3_node]->children[depth2_node]->num_volumes == 0) { //We are in the depth2 node but it is empty root->children[depth3_node]->children[depth2_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int leaf_node = get_quadrant(root->children[depth3_node]->children[depth2_node], ray_pos); if (leaf_node > -1) { if (root->children[depth3_node]->children[depth2_node]->children[leaf_node]->num_volumes == 0) { //We are in the leaf node but it is empty root->children[depth3_node]->children[depth2_node]->children[leaf_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; #ifdef RESIDUAL_RATIO_TRACKING t -= logf(1 - rand(&rand_state)) * sigma_r_inv * kernel_params.tr_depth; if (t >= distance) break; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); tr *= 1 - ((density - sigma_c) * sigma_r_inv); #endif #ifdef RATIO_TRACKING float inv_max_density = 1 / root->max_extinction; t -= logf(1 - rand(&rand_state)) * inv_max_density * kernel_params.tr_depth; if (t >= distance) break; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); tr *= (1 - fmaxf(.0f, density * inv_max_density)); tr = fmaxf(tr, make_float3(.0f)); #endif if (length(tr) < EPS) break; } #endif #ifdef RESIDUAL_RATIO_TRACKING return clamp(tr * T_c, .0f, 1.0f); #endif #ifdef RATIO_TRACKING return tr; #endif } __device__ inline float3 estimate_emission( Rand_state& rand_state, float3 ray_pos, float3 ray_dir, const Kernel_params& kernel_params, const GPU_VDB* volumes, OCTNode* root) { // Run ratio tracking to estimate emission if (kernel_params.emission_scale == 0) return BLACK; float3 emission = BLACK; float t_min, t_max, t = 0.0f; while (true) { int depth3_node = get_quadrant(root, ray_pos); if (depth3_node > -1) { if (root->children[depth3_node]->num_volumes == 0) { //We are in the depth3 node but it is empty root->children[depth3_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int depth2_node = get_quadrant(root->children[depth3_node], ray_pos); if (depth2_node > -1) { if (root->children[depth3_node]->children[depth2_node]->num_volumes == 0) { //We are in the depth2 node but it is empty root->children[depth3_node]->children[depth2_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int leaf_node = get_quadrant(root->children[depth3_node]->children[depth2_node], ray_pos); if (leaf_node > -1) { if (root->children[depth3_node]->children[depth2_node]->children[leaf_node]->num_volumes == 0) { //We are in the leaf node but it is empty root->children[depth3_node]->children[depth2_node]->children[leaf_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; float inv_max_density = 1 / root->max_extinction; t -= logf(1 - rand(&rand_state)) * inv_max_density * kernel_params.tr_depth / kernel_params.extinction.x; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; emission += sum_emission(ray_pos, kernel_params, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); } return emission; } __device__ inline float pdf_li( Kernel_params kernel_params, float3 wi) { float theta = acosf(clamp(wi.y, -1.0f, 1.0f)); float phi = atan2f(wi.z, wi.x); float sin_theta = sinf(theta); if (sin_theta == .0f) return .0f; float2 polar_pos = make_float2(phi * INV_2_PI, theta * INV_PI) / (2.0f * M_PI * M_PI * sin_theta); return draw_pdf_from_distribution(kernel_params, polar_pos); } __device__ inline float3 estimate_sky( Kernel_params kernel_params, Rand_state& randstate, const float3& ray_pos, float3& ray_dir, const GPU_VDB* gpu_vdb, const sphere ref_sphere, OCTNode* root, const AtmosphereParameters atmosphere) { float3 Ld = BLACK; for (int i = 0; i < 1; i++) { float3 Li = BLACK; float3 wi; float light_pdf = .0f, phase_pdf = .0f; float az = rand(&randstate) * 360.0f; float el = rand(&randstate) * 180.0f; // Sample light source with multiple importance sampling if (kernel_params.environment_type == 0) { light_pdf = draw_sample_from_distribution(kernel_params, randstate, wi); Li = sample_atmosphere(kernel_params, atmosphere, ray_pos, wi); } else { light_pdf = sample_spherical(randstate, wi); Li = sample_env_tex(kernel_params, wi); } if (light_pdf > .0f && !isBlack(Li)) { float cos_theta = dot(ray_dir, wi); phase_pdf = henyey_greenstein(cos_theta, kernel_params.phase_g1); if (phase_pdf > .0f) { float3 tr = Tr(randstate, ray_pos, wi, kernel_params, gpu_vdb, ref_sphere, root); Li *= tr; if (!isBlack(Li)) { float weight = power_heuristic(1, light_pdf, 1, phase_pdf); Ld += Li * phase_pdf * weight / light_pdf; } } } // Sample BSDF with multiple importance sampling wi = ray_dir; phase_pdf = sample_hg(wi, randstate, kernel_params.phase_g1); float3 f = make_float3(phase_pdf); if (phase_pdf > .0f) { Li = BLACK; float weight = 1.0f; if (kernel_params.environment_type == 0) { light_pdf = pdf_li(kernel_params, wi); } else light_pdf = isotropic(); if (light_pdf == 0.0f) return Ld; weight = power_heuristic(1, phase_pdf, 1, light_pdf); float3 tr = Tr(randstate, ray_pos, wi, kernel_params, gpu_vdb, ref_sphere, root); if (kernel_params.environment_type == 0) { Li = sample_atmosphere(kernel_params, atmosphere, ray_pos, wi); } else Li = sample_env_tex(kernel_params, wi); if (!isBlack(Li)) Ld += Li * tr * weight; } } return Ld; } __device__ inline float3 estimate_point_light( Kernel_params kernel_params, const light_list lights, Rand_state& randstate, const float3& ray_pos, float3& ray_dir, const GPU_VDB* gpu_vdb, const sphere ref_sphere, OCTNode* root) { float3 Ld = make_float3(.0f); float max_density = root->max_extinction; int light_budget = 10; while (light_budget >= 0) { int light_index = int(floor(rand(&randstate) * lights.num_lights)); float3 dir = normalize(lights.light_ptr[light_index].pos - ray_pos); float3 tr = Tr(randstate, ray_pos, dir, kernel_params, gpu_vdb, ref_sphere, root); if(light_budget < lights.num_lights) Ld += lights.light_ptr[light_index].Le(randstate, ray_pos, ray_dir, kernel_params.phase_g1, tr, max_density, kernel_params.density_mult, kernel_params.tr_depth); light_budget--; } return Ld; } __device__ inline float3 estimate_sun( Kernel_params kernel_params, Rand_state& randstate, const float3& ray_pos, float3& ray_dir, const GPU_VDB* gpu_vdb, const sphere& ref_sphere, OCTNode* root, const AtmosphereParameters atmosphere) { float3 Ld = BLACK; float3 wi; float phase_pdf = .0f; // sample sun light with multiple importance sampling //Find sun direction wi = degree_to_cartesian(kernel_params.azimuth, kernel_params.elevation); // find scattering pdf float cos_theta = dot(ray_dir, wi); phase_pdf = henyey_greenstein(cos_theta, kernel_params.phase_g1); // Check visibility of light source float3 tr = Tr(randstate, ray_pos, wi, kernel_params, gpu_vdb, ref_sphere, root); float3 sky_irradiance; float3 sun_irradiance = GetSunAndSkyIrradiance(atmosphere, ray_pos, ray_dir, wi, sky_irradiance); // Ld = Li * visibility.Tr * scattering_pdf / light_pdf //Ld = (length(sky_irradiance)*sun_irradiance) * tr * phase_pdf; Ld = tr * phase_pdf; // No need for sampling BSDF with importance sampling // please see: http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Direct_Lighting.html#fragment-SampleBSDFwithmultipleimportancesampling-0 return Ld * kernel_params.sun_color * kernel_params.sun_mult; } __device__ inline float3 uniform_sample_one_light( Kernel_params kernel_params, const light_list lights, const float3& ray_pos, float3& ray_dir, Rand_state& randstate, const GPU_VDB* gpu_vdb, const sphere ref_sphere, OCTNode* root, const AtmosphereParameters atmosphere) { int nLights = 3; // number of lights float light_num = rand(&randstate) * nLights; float3 L = BLACK; if (light_num < 1) { if (kernel_params.sun_mult > .0f) L += estimate_sun(kernel_params, randstate, ray_pos, ray_dir, gpu_vdb, ref_sphere, root, atmosphere); } else if (light_num >= 1 && light_num < 2) { if (lights.num_lights > 0) L += estimate_point_light(kernel_params, lights, randstate, ray_pos, ray_dir, gpu_vdb, ref_sphere, root); } else { if (kernel_params.sky_mult > .0f) L += estimate_sky(kernel_params, randstate, ray_pos, ray_dir, gpu_vdb, ref_sphere, root, atmosphere) * kernel_params.sky_mult; } return L * (float)nLights; } __device__ inline float3 sample( Rand_state& rand_state, float3& ray_pos, const float3& ray_dir, bool& interaction, int& obj, float& Alpha, const Kernel_params& kernel_params, const GPU_VDB* volumes, const sphere ref_sphere, OCTNode* root) { // Run delta tracking with octree traversal // We assume that the ray_pos is inside the root bbox at the beginning float t_min, t_max, geo_dist = .0f, distance = .0f, t = 0.0f; bool geo; #ifndef DDA_STEP_TRUE float inv_max_density = 1.0f; float inv_density_mult = 1.0f / kernel_params.density_mult; while (true) { bool geo = ref_sphere.intersect(ray_pos, ray_dir, distance, t_max); t -= logf(1 - rand(&rand_state)) * inv_max_density * inv_density_mult; if (geo && t >= distance) { obj = 2; break; } ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root, volumes); if (density * inv_max_density > rand(&rand_state)) { interaction = true; return (kernel_params.albedo / kernel_params.extinction) * float(kernel_params.energy_inject); } } #endif #ifdef DDA_STEP_TRUE // Code path 2: // This is the old algorithm with extra position awareness while (true) { int depth3_node = get_quadrant(root, ray_pos); if (depth3_node > -1) { if (root->children[depth3_node]->num_volumes == 0) { //We are in the depth3 node but it is empty root->children[depth3_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int depth2_node = get_quadrant(root->children[depth3_node], ray_pos); if (depth2_node > -1) { if (root->children[depth3_node]->children[depth2_node]->num_volumes == 0) { //We are in the depth2 node but it is empty root->children[depth3_node]->children[depth2_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; int leaf_node = get_quadrant(root->children[depth3_node]->children[depth2_node], ray_pos); if (leaf_node > -1) { if (root->children[depth3_node]->children[depth2_node]->children[leaf_node]->num_volumes == 0) { //We are in the leaf node but it is empty root->children[depth3_node]->children[depth2_node]->children[leaf_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1f); ray_pos += ray_dir * t_max; continue; } } else break; float inv_max_density = 1.0f / root->max_extinction; float inv_density_mult = 1.0f / kernel_params.density_mult; root->bbox.Intersect(ray_pos, ray_dir, t_min, distance); if (ref_sphere.intersect(ray_pos, ray_dir, geo_dist, t_max)) { distance = geo_dist; geo = true; } t -= logf(1 - rand(&rand_state)) * inv_max_density * inv_density_mult; if (t >= distance) { if (geo) obj = 2; break; } ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); float3 Cd = sum_color(ray_pos, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); int index = int(floorf(fminf(fmaxf((density * inv_max_density * 255.0f / kernel_params.emission_pivot), 0.0f), 255.0f))); float3 density_color = kernel_params.density_color_texture[index]; if (Alpha < 1.0f) Alpha += density; if (density * inv_max_density > rand(&rand_state)) { interaction = true; return (kernel_params.albedo * Cd * density_color / kernel_params.extinction) * float(kernel_params.energy_inject); } } #endif return WHITE; } __device__ void traverse_bvh(float3 ray_pos, float3 ray_dir, const GPU_VDB* volumes, BVHNode* node, float3& t, int& volume_idx) { float tmin, tmax; if (node->boundingBox.Intersect(ray_pos, ray_dir, tmin, tmax)) { if (node->IsLeaf()) { t = volumes[node->volIndex].rayBoxIntersect(ray_pos, ray_dir); if (t.z != NOHIT) volume_idx = node->volIndex; } else { BVHNode* leftChild = node->leftChild; BVHNode* rightChild = node->rightChild; traverse_bvh(ray_pos, ray_dir, volumes, leftChild, t, volume_idx); traverse_bvh(ray_pos, ray_dir, volumes, rightChild, t, volume_idx); } } } ////////////////////////////////////////////////////////////////////////// // Rendering Integrators ////////////////////////////////////////////////////////////////////////// // PBRT Volume Integrator __device__ inline float3 vol_integrator( Rand_state rand_state, const light_list lights, float3 ray_pos, float3 ray_dir, float& tr, const Kernel_params kernel_params, const GPU_VDB* gpu_vdb, const sphere ref_sphere, OCTNode* root, const AtmosphereParameters atmosphere) { float3 L = BLACK; float3 beta = WHITE; float3 env_pos = ray_pos; bool mi; float t, tmax; int obj; if (root->bbox.Intersect(ray_pos, ray_dir, t, tmax)) { // found an intersection ray_pos += ray_dir * (t + EPS); for (int depth = 1; depth <= kernel_params.ray_depth; depth++) { mi = false; beta *= sample(rand_state, ray_pos, ray_dir, mi, obj, tr, kernel_params, gpu_vdb, ref_sphere, root); if (isBlack(beta)) break; if (mi) { // medium interaction L += beta * uniform_sample_one_light(kernel_params, lights, ray_pos, ray_dir, rand_state, gpu_vdb, ref_sphere, root, atmosphere) + estimate_emission(rand_state, ray_pos, ray_dir, kernel_params, gpu_vdb, root); sample_hg(ray_dir, rand_state, kernel_params.phase_g1); } } ray_dir = normalize(ray_dir); } if (length(beta) > 0.9999f) ray_pos = env_pos; L += beta * sample_atmosphere(kernel_params, atmosphere, ray_pos, ray_dir); tr = fminf(tr, 1.0f); return L; } // From Ray Tracing Gems Vol-28 __device__ inline float3 direct_integrator( Rand_state rand_state, float3 ray_pos, float3 ray_dir, float& tr, const Kernel_params kernel_params, const GPU_VDB* gpu_vdb, const light_list lights, const sphere& ref_sphere, OCTNode* root, const AtmosphereParameters atmosphere) { float3 L = BLACK; float3 beta = WHITE; bool mi = false; float3 env_pos = ray_pos; float t_min; int obj; // TODO use bvh to determine if we intersect volume or geometry for (int ray_depth = 1; ray_depth <= kernel_params.ray_depth; ray_depth++) { obj = get_closest_object(ray_pos, ray_dir, root, ref_sphere, t_min); if (obj == 1) { ray_pos += ray_dir * (t_min + EPS); for (int volume_depth = 1; volume_depth <= kernel_params.volume_depth; volume_depth++) { mi = false; beta *= sample(rand_state, ray_pos, ray_dir, mi, obj, tr, kernel_params, gpu_vdb, ref_sphere, root); if (isBlack(beta) || obj == 2) break; if (mi) { // medium interaction sample_hg(ray_dir, rand_state, kernel_params.phase_g1); } } if (mi) { L += estimate_sun(kernel_params, rand_state, ray_pos, ray_dir, gpu_vdb, ref_sphere, root, atmosphere) * beta; if(lights.num_lights>0) L += estimate_point_light(kernel_params, lights, rand_state, ray_pos, ray_dir, gpu_vdb, ref_sphere, root) * beta; } if (kernel_params.emission_scale > 0 && mi) { L += estimate_emission(rand_state, ray_pos, ray_dir, kernel_params, gpu_vdb, root); } } obj = get_closest_object(ray_pos, ray_dir, root, ref_sphere, t_min); if (obj == 2) { ray_pos += ray_dir * t_min; float3 normal = normalize((ray_pos - ref_sphere.center) / ref_sphere.radius); float3 nl = dot(normal, ray_dir) < 0 ? normal : normal * -1; float phi = 2 * M_PI * rand(&rand_state); float r2 = rand(&rand_state); float r2s = sqrtf(r2); float3 w = normalize(nl); float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = cross(w, u); float3 hemisphere_dir = normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2)); float3 ref = reflect(ray_dir, nl); ray_dir = lerp(ref, hemisphere_dir, ref_sphere.roughness); float3 light_dir = degree_to_cartesian(kernel_params.azimuth, kernel_params.elevation); ray_pos += normal * EPS; beta *= ref_sphere.color; float3 v_tr = Tr(rand_state, ray_pos, light_dir, kernel_params, gpu_vdb, ref_sphere, root); L += kernel_params.sun_color * kernel_params.sun_mult * v_tr * fmaxf(dot(light_dir, normal), .0f) * beta; env_pos = ray_pos; } } if (kernel_params.environment_type == 0) { L += sample_atmosphere(kernel_params, atmosphere, env_pos, ray_dir) * beta * kernel_params.sky_mult * kernel_params.sky_color; } else { const float4 texval = tex2D<float4>( kernel_params.env_tex, atan2f(ray_dir.z, ray_dir.x) * (float)(0.5 / M_PI) + 0.5f, acosf(fmaxf(fminf(ray_dir.y, 1.0f), -1.0f)) * (float)(1.0 / M_PI)); L += make_float3(texval.x, texval.y, texval.z) * kernel_params.sky_color * beta * isotropic(); } tr = fminf(tr, 1.0f); return L; } __device__ inline float depth_calculator( Rand_state rand_state, float3 ray_pos, float3 ray_dir, float& tr, const Kernel_params kernel_params, const GPU_VDB* gpu_vdb, const sphere& ref_sphere, OCTNode* root) { float3 orig = ray_pos; bool mi = false; float t_min; int obj; obj = get_closest_object(ray_pos, ray_dir, root, ref_sphere, t_min); if (obj == 1) { ray_pos += ray_dir * (t_min + EPS); sample(rand_state, ray_pos, ray_dir, mi, obj, tr, kernel_params, gpu_vdb, ref_sphere, root); if (mi) return length(orig - ray_pos); else return .0f; } if (obj == 2) { ray_pos += ray_dir * t_min; return length(orig - ray_pos); } return .0f; } ////////////////////////////////////////////////////////////////////////// // Test Kernels ////////////////////////////////////////////////////////////////////////// /* __device__ inline float3 test_geometry_list(float3 ray_pos, float3 ray_dir, const geometry_list **geo_list) { float t_min, t_max; if ((*geo_list)->intersect(ray_pos, ray_dir, t_min, t_max)) return RED; return BLACK; } */ __device__ inline float3 sample_cost( Rand_state& rand_state, float3& ray_pos, const float3& ray_dir, bool& interaction, float& tr, const Kernel_params& kernel_params, const GPU_VDB* volumes, OCTNode* root) { float t_min, t_max, t = 0.0f; float3 COST = BLACK; #ifndef DDA_STEP_TRUE float inv_max_density = 1.0f; float inv_density_mult = 1.0f / kernel_params.density_mult; while (true) { COST += RED; t -= logf(1 - rand(&rand_state)) * inv_max_density * inv_density_mult; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root, volumes); tr *= 1 - fmaxf(.0f, density * inv_max_density); if (tr < 1.0f) tr += density; if (density * inv_max_density > rand(&rand_state)) { interaction = true; return COST; } } #endif #ifdef DDA_STEP_TRUE // Code path 2: // This is the old algorithm with extra position awareness while (true) { COST += RED; int depth3_node = get_quadrant(root, ray_pos); if (depth3_node > -1) { if (root->children[depth3_node]->num_volumes < 1) { //We are in the depth3 node but it is empty root->children[depth3_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1); ray_pos += ray_dir * t_max; continue; } } else break; int depth2_node = get_quadrant(root->children[depth3_node], ray_pos); if (depth2_node > -1) { if (root->children[depth3_node]->children[depth2_node]->num_volumes < 1) { //We are in the depth2 node but it is empty root->children[depth3_node]->children[depth2_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1); ray_pos += ray_dir * t_max; continue; } } else break; int leaf_node = get_quadrant(root->children[depth3_node]->children[depth2_node], ray_pos); if (leaf_node > -1) { if (root->children[depth3_node]->children[depth2_node]->children[leaf_node]->num_volumes < 1) { //We are in the leaf node but it is empty root->children[depth3_node]->children[depth2_node]->children[leaf_node]->bbox.Intersect(ray_pos, ray_dir, t_min, t_max); t_max = fmaxf(t_max, 0.1); ray_pos += ray_dir * t_max; continue; } } else break; float inv_max_density = 1.0f / root->children[depth3_node]->children[depth2_node]->children[leaf_node]->max_extinction; //float inv_max_density = 1.0f; float inv_density_mult = 1.0f / kernel_params.density_mult; t -= logf(1 - rand(&rand_state)) * inv_max_density * inv_density_mult; ray_pos += ray_dir * t; if (!Contains(root->bbox, ray_pos)) break; float density = sum_density(ray_pos, root->children[depth3_node]->children[depth2_node]->children[leaf_node], volumes); tr *= 1 - fmaxf(.0f, density * inv_max_density); if (tr < 1.0f) tr += density; if (density * inv_max_density > rand(&rand_state)) { interaction = true; return COST; } } #endif return COST; } __device__ inline float3 cost_calculator( Rand_state rand_state, float3 ray_pos, float3 ray_dir, float& tr, const Kernel_params kernel_params, const GPU_VDB* gpu_vdb, OCTNode* root, const AtmosphereParameters atmosphere) { float3 L = BLACK; float3 beta = WHITE; bool mi = false; float t_min = FLT_MIN, t_max = FLT_MAX; if (root->bbox.Intersect(ray_pos, ray_dir, t_min, t_max)) { ray_pos += ray_dir * (t_min + EPS); for (int depth = 1; depth <= kernel_params.ray_depth; depth++) { mi = false; beta += sample_cost(rand_state, ray_pos, ray_dir, mi, tr, kernel_params, gpu_vdb, root); if (isBlack(beta)) break; if (mi) { // medium interaction L = beta; sample_hg(ray_dir, rand_state, kernel_params.phase_g1); } } } tr = 1.0f; return L; } __device__ inline float3 octree_integrator( Rand_state rand_state, float3 ray_pos, float3 ray_dir, float& tr, const Kernel_params kernel_params, const GPU_VDB* gpu_vdb, const sphere ref_sphere, OCTNode* root_node, const AtmosphereParameters atmosphere) { float3 L = BLACK; float3 beta = WHITE; bool mi = false; float3 env_pos = ray_pos; int obj; float t, tmax; if (root_node->bbox.Intersect(ray_pos, ray_dir, t, tmax)) { ray_pos += ray_dir * t; for (int depth = 1; depth <= kernel_params.ray_depth; depth++) { mi = false; beta *= sample(rand_state, ray_pos, ray_dir, mi, obj, tr, kernel_params, gpu_vdb, ref_sphere, root_node); if (isBlack(beta)) break; if (mi) { // medium interaction sample_hg(ray_dir, rand_state, kernel_params.phase_g1); } } } ray_dir = normalize(ray_dir); if (kernel_params.environment_type == 0) { if (mi) L += estimate_sun(kernel_params, rand_state, ray_pos, ray_dir, gpu_vdb, ref_sphere, root_node, atmosphere) * beta * kernel_params.sun_color * kernel_params.sun_mult; L += sample_atmosphere(kernel_params, atmosphere, env_pos, ray_dir) * beta; } else { const float4 texval = tex2D<float4>( kernel_params.env_tex, atan2f(ray_dir.z, ray_dir.x) * (float)(0.5 / M_PI) + 0.5f, acosf(fmaxf(fminf(ray_dir.y, 1.0f), -1.0f)) * (float)(1.0 / M_PI)); L += make_float3(texval.x, texval.y, texval.z) * kernel_params.sky_color * beta * isotropic(); } tr = fminf(tr, 1.0f); return L; } __device__ inline float3 visualize_BVH(float3 ray_pos, float3 ray_dir, const GPU_VDB* volumes, BVHNode* root_node) { float3 L = BLACK; float3 t = make_float3(NOHIT); int vol_index; traverse_bvh(ray_pos, ray_dir, volumes, root_node, t, vol_index); if (t.z != NOHIT) { ray_pos += ray_dir * t.x; L = make_float3(float(vol_index) / 10.0f); } return L; } __device__ inline float3 visualize_OCTree(float3 ray_pos, float3 ray_dir, const GPU_VDB* volumes, OCTNode* root) { float3 L = BLACK; float3 t = make_float3(NOHIT); float t_min, t_max; if (traverse_octree(ray_pos, ray_dir, root, t_min, t_max)) return RED; return L; } __device__ inline float3 render_earth(float3 ray_pos, float3 ray_dir, const Kernel_params kernel_params, const AtmosphereParameters atmosphere) { float3 earth_center = make_float3(.0f, -atmosphere.bottom_radius, .0f); float3 sun_direction = degree_to_cartesian(kernel_params.azimuth, kernel_params.elevation); float3 p = ray_pos - earth_center; float p_dot_v = dot(p, ray_dir); float p_dot_p = dot(p, p); float ray_earth_center_squared_distance = p_dot_p - p_dot_v * p_dot_v; float distance_to_intersection = -p_dot_v - sqrt(earth_center.y * earth_center.y - ray_earth_center_squared_distance); float ground_alpha = 0.0; float3 ground_radiance = make_float3(0.0); if (distance_to_intersection > 0.0) { float3 point = ray_pos + ray_dir * distance_to_intersection; float3 normal = normalize(point - earth_center); // Compute the radiance reflected by the ground. float3 sky_irradiance; float3 sun_irradiance = GetSunAndSkyIrradiance(atmosphere, point - earth_center, normal, sun_direction, sky_irradiance); ground_radiance = atmosphere.ground_albedo * (1.0 / M_PI) * (sun_irradiance + sky_irradiance); float3 transmittance; float3 in_scatter = GetSkyRadianceToPoint(atmosphere, ray_pos - earth_center, point - earth_center, .0f, sun_direction, transmittance); ground_radiance = ground_radiance * transmittance + in_scatter; ground_alpha = 1.0; } float3 transmittance_sky; float3 radiance_sky = GetSkyRadiance(atmosphere, ray_pos - earth_center, ray_dir, .0f, sun_direction, transmittance_sky); float2 sun_size = make_float2(tanf(atmosphere.sun_angular_radius), cosf(atmosphere.sun_angular_radius)); if (dot(ray_dir, sun_direction) > sun_size.y) { radiance_sky = radiance_sky + transmittance_sky * GetSolarRadiance(atmosphere); } ground_radiance = lerp(radiance_sky, ground_radiance, ground_alpha); return ground_radiance; } __device__ inline float3 test_geometry_list(float3 ray_pos, float3 ray_dir, const geometry_list geo_list, Rand_state rand_state) { float t_min, t_max; float3 atten = WHITE; for (int i = 0; i < 20; ++i) { int idx = geo_list.list[0].intersect(ray_pos, ray_dir, t_min, t_max); /* if (idx > -1) { float3 normal; if(!geo_list.list[idx].scatter(ray_pos, ray_dir, t_min, normal, atten, rand_state)) break; } */ } return atten; } ////////////////////////////////////////////////////////////////////////// // Main kernel accessors ////////////////////////////////////////////////////////////////////////// // Tone mapping functions __device__ inline float3 rtt_and_odt_fit(float3 v) { float3 a = v * (v + 0.0245786f) - 0.000090537f; float3 b = v * (0.983729f * v + 0.4329510f) + 0.238081f; return a / b; } extern "C" __global__ void volume_rt_kernel( const camera cam, const light_list lights, const GPU_VDB * gpu_vdb, const sphere & sphere, const geometry_list & geo_list, BVHNode * root_node, OCTNode * oct_root, const AtmosphereParameters atmosphere, const Kernel_params kernel_params) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= kernel_params.resolution.x || y >= kernel_params.resolution.y) return; // Initialize pseudorandom number generator (PRNG); assume we need no more than 4096 random numbers. const unsigned int idx = y * kernel_params.resolution.x + x; Rand_state rand_state; curand_init(idx, 0, kernel_params.iteration * 4096, &rand_state); // Get a blue noise sample from buffer int x_new = x % 256; int y_new = y % 256; int bn_index = y_new * 256 + x_new; float3 bn = kernel_params.blue_noise_buffer[bn_index]; float u = float(x + bn.x) / float(kernel_params.resolution.x); float v = float(y + bn.y) / float(kernel_params.resolution.y); ray camera_ray = cam.get_ray(u, v, &rand_state); float3 ray_dir = normalize(camera_ray.B); float3 ray_pos = camera_ray.A; float3 value = WHITE; float3 cost = BLACK; float depth = .0f; float tr = .0f; if (kernel_params.iteration < kernel_params.max_interactions && kernel_params.render) { //value = test_geometry_list(ray_pos, ray_dir, geo_list, rand_state); depth = depth_calculator(rand_state, ray_pos, ray_dir, tr, kernel_params, gpu_vdb, sphere, oct_root); if (kernel_params.integrator) value = vol_integrator(rand_state, lights, ray_pos, ray_dir, tr, kernel_params, gpu_vdb, sphere, oct_root, atmosphere); else value = direct_integrator(rand_state, ray_pos, ray_dir, tr, kernel_params, gpu_vdb, lights, sphere, oct_root, atmosphere); } // Check if values contains nan or infinite values if (isNan(value) || isInf(value)) value = kernel_params.accum_buffer[idx]; if (isnan(tr) || isinf(tr)) tr = 1.0f; float aof = 1 / cam.lens_radius; aof = clamp(aof, .0f, FLT_MAX); if (cam.viz_dof) { // TODO focus plane viz if (depth > (cam.focus_dist + aof) ) value = lerp(value, RED, 0.5f); if (depth < (cam.focus_dist - aof) ) value = lerp(value, BLUE, 0.5f); if (depth > (cam.focus_dist - aof) && depth < (cam.focus_dist + aof)) value = lerp(value, GREEN, 0.5f); } // Accumulate. if (kernel_params.iteration == 0) { kernel_params.accum_buffer[idx] = value; kernel_params.cost_buffer[idx] = cost; kernel_params.depth_buffer[idx] = depth; } else if (kernel_params.iteration < kernel_params.max_interactions) { kernel_params.accum_buffer[idx] = kernel_params.accum_buffer[idx] + (value - kernel_params.accum_buffer[idx]) / (float)(kernel_params.iteration + 1); kernel_params.cost_buffer[idx] = kernel_params.cost_buffer[idx] + (cost - kernel_params.cost_buffer[idx]) / (float)(kernel_params.iteration + 1); kernel_params.depth_buffer[idx] = kernel_params.depth_buffer[idx] + (depth - kernel_params.depth_buffer[idx]) / (float)(kernel_params.iteration + 1); } // Update display buffer (ACES Tonemapping). // https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl mat3 aces_input_matrix( 0.59719f, 0.35458f, 0.04823f, 0.07600f, 0.90834f, 0.01566f, 0.02840f, 0.13383f, 0.83777f); mat3 aces_output_matrix( 1.60475f, -0.53108f, -0.07367f, -0.10208f, 1.10813f, -0.00605f, -0.00327f, -0.07276f, 1.07602f); float3 val = aces_input_matrix * kernel_params.accum_buffer[idx]; val = rtt_and_odt_fit(val); val = aces_output_matrix * val * kernel_params.exposure_scale; // gamma correction const unsigned int r = (unsigned int)(255.0f * fminf(powf(fmaxf(val.x, 0.0f), (float)(1.0 / 2.2)), 1.0f)); const unsigned int g = (unsigned int)(255.0f * fminf(powf(fmaxf(val.y, 0.0f), (float)(1.0 / 2.2)), 1.0f)); const unsigned int b = (unsigned int)(255.0f * fminf(powf(fmaxf(val.z, 0.0f), (float)(1.0 / 2.2)), 1.0f)); kernel_params.display_buffer[idx] = 0xff000000 | (r << 16) | (g << 8) | b; // fill in raw buffer float4 raw = make_float4(val.x, val.y, val.z, tr); kernel_params.raw_buffer[idx] = raw; // Update blue_noise texture with golden ratio if (idx < (256 * 256)) { float3 val = kernel_params.blue_noise_buffer[idx]; val += (1.0f + sqrtf(5.0f)) / 2.0f; val = fmodf(val, make_float3(1.0f)); kernel_params.blue_noise_buffer[idx] = val; } }
the_stack
#include "cupoch/camera/pinhole_camera_intrinsic.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/geometry_utils.h" #include "cupoch/geometry/image.h" #include "cupoch/geometry/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { template <class... Args> struct check_nan_functor { check_nan_functor(bool remove_nan, bool remove_infinite) : remove_nan_(remove_nan), remove_infinite_(remove_infinite){}; const bool remove_nan_; const bool remove_infinite_; __device__ bool operator()(const thrust::tuple<Args...> &x) const { const Eigen::Vector3f &point = thrust::get<0>(x); bool is_nan = remove_nan_ && (isnan(point(0)) || isnan(point(1)) || isnan(point(2))); bool is_infinite = remove_infinite_ && (isinf(point(0)) || isinf(point(1)) || isinf(point(2))); return is_nan || is_infinite; } }; struct gaussian_filter_functor { gaussian_filter_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, const Eigen::Vector3f *colors, const int *indices, const float *dists, float sigma2, int num_max_search_points, bool has_normal, bool has_color) : points_(points), normals_(normals), colors_(colors), indices_(indices), dists_(dists), sigma2_(sigma2), num_max_search_points_(num_max_search_points), has_normal_(has_normal), has_color_(has_color){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const Eigen::Vector3f *colors_; const int *indices_; const float *dists_; const float sigma2_; const int num_max_search_points_; const bool has_normal_; const bool has_color_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> operator()(size_t idx) const { float total_weight = 0.0; Eigen::Vector3f res_p = Eigen::Vector3f::Zero(); Eigen::Vector3f res_n = Eigen::Vector3f::Zero(); Eigen::Vector3f res_c = Eigen::Vector3f::Zero(); for (int i = 0; i < num_max_search_points_; ++i) { const int j = idx * num_max_search_points_ + i; const int idx_j = __ldg(&indices_[j]); if (idx_j >= 0) { float weight = exp(-0.5 * dists_[j] / sigma2_); res_p += weight * points_[idx_j]; if (has_normal_) res_n += weight * normals_[idx_j]; if (has_color_) res_c += weight * colors_[idx_j]; total_weight += weight; } } res_p /= total_weight; res_n /= total_weight; res_c /= total_weight; return thrust::make_tuple(res_p, res_n, res_c); } }; template <class... Args> struct pass_through_filter_functor { pass_through_filter_functor(int axis_no, float min_bound, float max_bound) : axis_no_(axis_no), min_bound_(min_bound), max_bound_(max_bound){}; const int axis_no_; const float min_bound_; const float max_bound_; __device__ bool operator()( const thrust::tuple<Eigen::Vector3f, Args...> &x) const { float val = thrust::get<0>(x)[axis_no_]; return val < min_bound_ || max_bound_ < val; } }; } // namespace PointCloud::PointCloud() : GeometryBase3D(Geometry::GeometryType::PointCloud) {} PointCloud::PointCloud(const thrust::host_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const std::vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const utility::device_vector<Eigen::Vector3f> &points) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(points) {} PointCloud::PointCloud(const PointCloud &other) : GeometryBase3D(Geometry::GeometryType::PointCloud), points_(other.points_), normals_(other.normals_), colors_(other.colors_) {} PointCloud::~PointCloud() {} PointCloud &PointCloud::operator=(const PointCloud &other) { points_ = other.points_; normals_ = other.normals_; colors_ = other.colors_; return *this; } void PointCloud::SetPoints(const thrust::host_vector<Eigen::Vector3f> &points) { points_ = points; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetPoints() const { thrust::host_vector<Eigen::Vector3f> points = points_; return points; } void PointCloud::SetNormals( const thrust::host_vector<Eigen::Vector3f> &normals) { normals_ = normals; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetNormals() const { thrust::host_vector<Eigen::Vector3f> normals = normals_; return normals; } void PointCloud::SetColors(const thrust::host_vector<Eigen::Vector3f> &colors) { colors_ = colors; } thrust::host_vector<Eigen::Vector3f> PointCloud::GetColors() const { thrust::host_vector<Eigen::Vector3f> colors = colors_; return colors; } PointCloud &PointCloud::Clear() { points_.clear(); normals_.clear(); colors_.clear(); return *this; } bool PointCloud::IsEmpty() const { return !HasPoints(); } Eigen::Vector3f PointCloud::GetMinBound() const { return ComputeMinBound<3>(points_); } Eigen::Vector3f PointCloud::GetMaxBound() const { return ComputeMaxBound<3>(points_); } Eigen::Vector3f PointCloud::GetCenter() const { return ComputeCenter<3>(points_); } AxisAlignedBoundingBox<3> PointCloud::GetAxisAlignedBoundingBox() const { return AxisAlignedBoundingBox<3>::CreateFromPoints(points_); } OrientedBoundingBox PointCloud::GetOrientedBoundingBox() const { return OrientedBoundingBox::CreateFromPoints(points_); } PointCloud &PointCloud::Translate(const Eigen::Vector3f &translation, bool relative) { TranslatePoints<3>(translation, points_, relative); return *this; } PointCloud &PointCloud::Scale(const float scale, bool center) { ScalePoints<3>(scale, points_, center); return *this; } PointCloud &PointCloud::Rotate(const Eigen::Matrix3f &R, bool center) { RotatePoints<3>(utility::GetStream(0), R, points_, center); RotateNormals(utility::GetStream(1), R, normals_); cudaSafeCall(cudaDeviceSynchronize()); return *this; } PointCloud &PointCloud::operator+=(const PointCloud &cloud) { // We do not use std::vector::insert to combine std::vector because it will // crash if the pointcloud is added to itself. if (cloud.IsEmpty()) return (*this); size_t old_vert_num = points_.size(); size_t add_vert_num = cloud.points_.size(); size_t new_vert_num = old_vert_num + add_vert_num; if ((!HasPoints() || HasNormals()) && cloud.HasNormals()) { normals_.resize(new_vert_num); thrust::copy(cloud.normals_.begin(), cloud.normals_.end(), normals_.begin() + old_vert_num); } else { normals_.clear(); } if ((!HasPoints() || HasColors()) && cloud.HasColors()) { colors_.resize(new_vert_num); thrust::copy(cloud.colors_.begin(), cloud.colors_.end(), colors_.begin() + old_vert_num); } else { colors_.clear(); } points_.resize(new_vert_num); thrust::copy(cloud.points_.begin(), cloud.points_.end(), points_.begin() + old_vert_num); return (*this); } PointCloud PointCloud::operator+(const PointCloud &cloud) const { return (PointCloud(*this) += cloud); } PointCloud &PointCloud::NormalizeNormals() { thrust::for_each(normals_.begin(), normals_.end(), [] __device__(Eigen::Vector3f & nl) { nl.normalize(); }); return *this; } PointCloud &PointCloud::PaintUniformColor(const Eigen::Vector3f &color) { ResizeAndPaintUniformColor(colors_, points_.size(), color); return *this; } PointCloud &PointCloud::Transform(const Eigen::Matrix4f &transformation) { TransformPoints<3>(utility::GetStream(0), transformation, points_); TransformNormals(utility::GetStream(1), transformation, normals_); cudaSafeCall(cudaDeviceSynchronize()); return *this; } std::shared_ptr<PointCloud> PointCloud::Crop( const AxisAlignedBoundingBox<3> &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } std::shared_ptr<PointCloud> PointCloud::Crop( const OrientedBoundingBox &bbox) const { if (bbox.IsEmpty()) { utility::LogError( "[CropPointCloud] AxisAlignedBoundingBox either has zeros " "size, or has wrong bounds."); } return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); } PointCloud &PointCloud::RemoveNoneFinitePoints(bool remove_nan, bool remove_infinite) { bool has_normal = HasNormals(); bool has_color = HasColors(); size_t old_point_num = points_.size(); size_t k = 0; if (!has_normal && !has_color) { remove_if_vectors( utility::exec_policy(0)->on(0), check_nan_functor<Eigen::Vector3f>(remove_nan, remove_infinite), points_); } else if (has_normal && !has_color) { remove_if_vectors(utility::exec_policy(0)->on(0), check_nan_functor<Eigen::Vector3f, Eigen::Vector3f>( remove_nan, remove_infinite), points_, normals_); } else if (!has_normal && has_color) { remove_if_vectors(utility::exec_policy(0)->on(0), check_nan_functor<Eigen::Vector3f, Eigen::Vector3f>( remove_nan, remove_infinite), points_, colors_); } else { remove_if_vectors( utility::exec_policy(0)->on(0), check_nan_functor<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f>(remove_nan, remove_infinite), points_, normals_, colors_); } utility::LogDebug( "[RemoveNoneFinitePoints] {:d} nan points have been removed.", (int)(old_point_num - k)); return *this; } std::shared_ptr<PointCloud> PointCloud::GaussianFilter( float search_radius, float sigma2, int num_max_search_points) { auto out = std::make_shared<PointCloud>(); if (search_radius <= 0 || sigma2 <= 0 || num_max_search_points <= 0) { utility::LogError( "[GaussianFilter] Illegal input parameters, radius and sigma2 " "must be positive."); return out; } bool has_normal = HasNormals(); bool has_color = HasColors(); KDTreeFlann kdtree; kdtree.SetGeometry(*this); utility::device_vector<int> indices; utility::device_vector<float> dist; kdtree.SearchRadius(points_, search_radius, num_max_search_points, indices, dist); size_t n_pt = points_.size(); out->points_.resize(n_pt); if (has_normal) out->normals_.resize(n_pt); if (has_color) out->colors_.resize(n_pt); gaussian_filter_functor func(thrust::raw_pointer_cast(points_.data()), thrust::raw_pointer_cast(normals_.data()), thrust::raw_pointer_cast(colors_.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(dist.data()), sigma2, num_max_search_points, has_normal, has_color); if (has_normal && has_color) { thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), make_tuple_begin(out->points_, out->normals_, out->colors_), func); } else if (has_normal) { thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), make_tuple_iterator(out->points_.begin(), out->normals_.begin(), thrust::make_discard_iterator()), func); } else if (has_color) { thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), make_tuple_iterator(out->points_.begin(), thrust::make_discard_iterator(), out->colors_.begin()), func); } else { thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(points_.size()), make_tuple_iterator(out->points_.begin(), thrust::make_discard_iterator(), thrust::make_discard_iterator()), func); } return out; } std::shared_ptr<PointCloud> PointCloud::PassThroughFilter(int axis_no, float min_bound, float max_bound) { auto out = std::make_shared<PointCloud>(); if (axis_no < 0 || axis_no >= 3) { utility::LogError( "[PassThroughFilter] Illegal input parameters, axis_no " "must be 0, 1 or 2."); return out; } *out = *this; bool has_normal = HasNormals(); bool has_color = HasColors(); if (has_normal && has_color) { remove_if_vectors( utility::exec_policy(0)->on(0), pass_through_filter_functor<Eigen::Vector3f, Eigen::Vector3f>( axis_no, min_bound, max_bound), out->points_, out->normals_, out->colors_); } else if (has_normal) { remove_if_vectors(utility::exec_policy(0)->on(0), pass_through_filter_functor<Eigen::Vector3f>( axis_no, min_bound, max_bound), out->points_, out->normals_); } else if (has_color) { remove_if_vectors(utility::exec_policy(0)->on(0), pass_through_filter_functor<Eigen::Vector3f>( axis_no, min_bound, max_bound), out->points_, out->colors_); } else { remove_if_vectors( utility::exec_policy(0)->on(0), pass_through_filter_functor<>(axis_no, min_bound, max_bound), out->points_); } return out; } } // namespace geometry } // namespace cupoch
the_stack
#include "unit_test.h" #include "matrix.h" #include "csr_multiply.h" #include "matrix_coloring/matrix_coloring.h" namespace amgx { DECLARE_UNITTEST_BEGIN(CsrSparsityILU1Tests_Base); std::string base_keywords() { return "csr"; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Index_vector > void count_non_zeroes( const Index_vector &A_rows, const Index_vector &A_cols, const Index_vector &A_coloring, Index_vector &B_rows, Index_vector &B_cols, bool countOnly ) { typedef typename Index_vector::value_type Index_type; #ifdef USE_CPP_TR1 typedef std::tr1::unordered_set<Index_type> Set; #else typedef std::set<Index_type> Set; #endif int nRows = static_cast<int>( A_rows.size( ) - 1 ); #pragma omp parallel for shared(nRows) for ( int aRowId = 0 ; aRowId < nRows ; ++aRowId ) { Set bCols; Index_type aRowColor = A_coloring[aRowId]; // Insert the column of A inside the set. for ( Index_type aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt ) { bCols.insert( A_cols[aColIt] ); } // If the color of the row is != 0. if ( aRowColor != 0 ) { for ( Index_type aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt ) { Index_type bRowId = A_cols[aColIt]; if ( A_coloring[bRowId] < aRowColor ) { for ( Index_type bColIt = A_rows[bRowId], bColEnd = A_rows[bRowId + 1] ; bColIt < bColEnd ; ++bColIt ) { Index_type bColId = A_cols[bColIt]; if ( A_coloring[bColId] >= A_coloring[bRowId] && A_coloring[bColId] != aRowColor ) { bCols.insert( bColId ); } } } } } if ( countOnly ) { B_rows[aRowId] = static_cast<Index_type>( bCols.size( ) ); } else { Index_type cRowIt = B_rows[aRowId]; for ( typename Set::const_iterator it = bCols.begin() ; it != bCols.end() ; ++it, ++cRowIt ) { assert( cRowIt < B_rows[aRowId + 1] ); B_cols[cRowIt] = *it; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Config > void compare_matrices( Matrix<Config> &A, Matrix<Config> &B ) { A.sortByRowAndColumn(); B.sortByRowAndColumn(); UNITTEST_ASSERT_EQUAL_DESC( "Rows", A.row_offsets, B.row_offsets ); UNITTEST_ASSERT_EQUAL_DESC( "Cols", A.col_indices, B.col_indices ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_sparsity_ilu1( const Matrix<TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> > &A_h, void *wk ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; typedef Matrix<Config_d> Matrix_d; typedef Vector<typename Config_h::template setVecPrec<AMGX_vecInt>::Type> IVector_h; typedef Vector<typename Config_d::template setVecPrec<AMGX_vecInt>::Type> IVector_d; Matrix_h B_h; B_h.set_num_rows( A_h.get_num_rows() ); B_h.set_num_cols( A_h.get_num_rows() ); B_h.row_offsets.resize( A_h.get_num_rows() + 1 ); std::ostringstream buffer; { count_non_zeroes( A_h.row_offsets, A_h.col_indices, A_h.getMatrixColoring().getRowColors(), B_h.row_offsets, B_h.col_indices, true ); thrust::exclusive_scan( B_h.row_offsets.begin( ), B_h.row_offsets.end( ), B_h.row_offsets.begin( ) ); cudaCheckError(); int nVals = B_h.row_offsets[A_h.get_num_rows()]; B_h.col_indices.resize( nVals ); B_h.values.resize( nVals ); B_h.set_num_nz( nVals ); count_non_zeroes( A_h.row_offsets, A_h.col_indices, A_h.getMatrixColoring().getRowColors(), B_h.row_offsets, B_h.col_indices, false ); } Matrix_d B_d; { Matrix_d A_d( A_h ); IVector_d row_colors_d = A_h.getMatrixColoring().getRowColors();; A_d.getMatrixColoring().setRowColors(row_colors_d);; CSR_Multiply<Config_d>::csr_sparsity_ilu1( A_d, B_d, wk ); } Matrix_d B_d_ref( B_h ); compare_matrices( B_d, B_d_ref ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_sparsity_poisson( int points, int nx, int ny, int nz, AMG_Config &cfg ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; typedef Matrix<Config_d> Matrix_d; typedef Vector<typename Config_h::template setVecPrec<AMGX_vecInt>::Type> IVector_h; typedef Vector<typename Config_d::template setVecPrec<AMGX_vecInt>::Type> IVector_d; typedef AMG<VecPrecision, MatPrecision, AMGX_indInt> AMG_Class; Matrix_h A_h; A_h.set_initialized(0); switch (points) { case 5: case 7: case 9: case 27: generatePoissonForTest(A_h, 1, 0, points, nx, ny, nz); break; default: printf("Error invalid number of poisson points specified, valid numbers are 5, 7, 9, 27\n"); } A_h.set_initialized(1); Matrix_d A_d( A_h ); UNITTEST_ASSERT_TRUE ( cfg.parseParameterString("coloring_level=1") == AMGX_OK); A_d.set_initialized(0); A_d.colorMatrix(cfg, "default"); A_h.set_initialized(0); A_h.copyMatrixColoring(A_d.getMatrixColoring()); A_h.set_initialized(1); void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" ); check_csr_sparsity_ilu1( A_h, wk ); CSR_Multiply<Config_d>::csr_workspace_delete( wk ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_sparsity_square_file( const std::string &filename, AMG_Config &cfg ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; typedef Matrix<Config_d> Matrix_d; typedef Vector<typename Config_h::template setVecPrec<AMGX_vecInt>::Type> IVector_h; typedef Vector<typename Config_d::template setVecPrec<AMGX_vecInt>::Type> IVector_d; typedef AMG<T_Config::vecPrec, T_Config::matPrec, AMGX_indInt> AMG_Class; Matrix_h A_h; Vector_h x_h, b_h; A_h.set_initialized(0); A_h.addProps(CSR); UNITTEST_ASSERT_TRUE(MatrixIO<Config_h>::readSystem( filename.c_str(), A_h, b_h, x_h ) == AMGX_OK); A_h.set_initialized(1); Matrix_d A_d( A_h ); UNITTEST_ASSERT_TRUE ( cfg.parseParameterString("coloring_level=1") == AMGX_OK); A_d.set_initialized(0); A_h.set_initialized(0); A_d.colorMatrix(cfg, "default"); A_h.copyMatrixColoring(A_d.getMatrixColoring()); A_h.set_initialized(1); void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" ); check_csr_sparsity_ilu1( A_h, wk ); CSR_Multiply<Config_d>::csr_workspace_delete( wk ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Base); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson5_10_10, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson5_10_10) CsrSparsityILU1Tests_Poisson5_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson5_10_10_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson5_100_100, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson5_100_100) CsrSparsityILU1Tests_Poisson5_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson5_100_100_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson7_10_10, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson7_10_10) CsrSparsityILU1Tests_Poisson7_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson7_10_10_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson7_100_100, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson7_100_100) CsrSparsityILU1Tests_Poisson7_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson7_100_100_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson9_10_10, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson9_10_10) CsrSparsityILU1Tests_Poisson9_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson9_10_10_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson9_100_100, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson9_100_100) CsrSparsityILU1Tests_Poisson9_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson9_100_100_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson27_10_10, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson27_10_10) CsrSparsityILU1Tests_Poisson27_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson27_10_10_dDDI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrSparsityILU1Tests_Poisson27_100_100, CsrSparsityILU1Tests_Base<T_Config>); void run() { AMG_Config cfg; CsrSparsityILU1Tests_Base<T_Config>::template check_csr_sparsity_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrSparsityILU1Tests_Poisson27_100_100) CsrSparsityILU1Tests_Poisson27_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrSparsityILU1Tests_Poisson27_100_100_dDDI; } // namespace amgx
the_stack
#include <cuda.h> #include <cub/util_allocator.cuh> #include <algorithm> #include "sharedmem.cuh" using namespace std; using namespace cub; #define NUM_ELEM_PT 16 #define NUM_ELEM_BITSHIFT 4 //#define K 32 //#define KLog2 5 #define ORDERV(x,a,b) { bool swap = reverse ^ (x[a]<x[b]); \ T auxa = x[a]; \ if (swap) { x[a] = x[b]; x[b] = auxa; } } //T auxa = x[a]; T auxb = x[b]; \ //x[a] = (swap)?auxb:auxa; x[b] = (swap)?auxa:auxb;} #define B2V(x,a) { ORDERV(x,a,a+1) } #define B4V(x,a) { for (int i4=0;i4<2;i4++) { ORDERV(x,a+i4,a+i4+2) } B2V(x,a) B2V(x,a+2) } #define B8V(x,a) { for (int i8=0;i8<4;i8++) { ORDERV(x,a+i8,a+i8+4) } B4V(x,a) B4V(x,a+4) } #define B16V(x,a) { for (int i16=0;i16<8;i16++) { ORDERV(x,a+i16,a+i16+8) } B8V(x,a) B8V(x,a+8) } #define B32V(x,a) { for (int i32=0;i32<16;i32++) { ORDERV(x,a+i32,a+i32+16) } B16V(x,a) B16V(x,a+16) } #define B64V(x,a) { for (int i64=0;i64<32;i64++) { ORDERV(x,a+i64,a+i64+32) } B32V(x,a) B32V(x,a+32) } template<typename T> __forceinline__ __device__ T get(T* sdata, int i) { return sdata[i + (i>>5)]; } #define set(a,b,c) { int tempIndex = b; a[tempIndex + (tempIndex >> 5)] = c; } #define NUM_GROUPS (NUM_ELEM_PT/2) #define NUM_GROUPS_BITSHIFT (NUM_ELEM_BITSHIFT-1) #define RUN_64(X) { \ inc >>= 5; \ low = t & (inc - 1); \ tCur = ((t - low) << 6) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(32 * X); j++) { \ for (int i=0; i<64; i++) x[i] = get(sdata, tCur+i*inc); \ B64V(x,0); \ for (int i=0; i<64; i++) set(sdata, tCur+i*inc, x[i]); \ } \ inc >>= 1; \ } #define RUN_32(X) { \ inc >>= 4; \ low = t & (inc - 1); \ tCur = ((t - low) << 5) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(16 * X); j++) { \ for (int i=0; i<32; i++) x[i] = get(sdata, tCur+i*inc); \ B32V(x,0); \ for (int i=0; i<32; i++) set(sdata, tCur+i*inc, x[i]); \ } \ inc >>= 1; \ } #define RUN_16(X) { \ inc >>= 3; \ low = t & (inc - 1); \ tCur = ((t - low) << 4) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(8 * X); j++) { \ for (int i=0; i<16; i++) x[i] = get(sdata, tCur+i*inc); \ B16V(x,0); \ for (int i=0; i<16; i++) set(sdata, tCur+i*inc, x[i]); \ } \ inc >>= 1; \ } #define RUN_8(X) { \ inc >>= 2; \ low = t & (inc - 1); \ tCur = ((t - low) << 3) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(4 * X); j++) { \ for (int i=0; i<8; i++) x[i] = get(sdata, tCur+i*inc); \ B8V(x,0); \ for (int i=0; i<8; i++) set(sdata, tCur+i*inc, x[i]); \ } \ inc >>= 1; \ } #define RUN_4(X) { \ inc >>= 1; \ low = t & (inc - 1); \ tCur = ((t - low) << 2) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(2 * X); j++) { \ for (int i=0;i<4;i++) x[i] = get(sdata, 4*wg*j + tCur + i*inc); \ B4V(x,0); \ for (int i=0;i<4;i++) set(sdata, 4*wg*j + tCur + i*inc, x[i]); \ } \ inc >>= 1; \ } #define RUN_2(X) { \ low = t & (inc - 1); \ tCur = ((t - low) << 1) + low; \ reverse = ((dir & tCur) == 0); \ for (int j=0; j<NUM_GROUPS/(X); j++) { \ for (int i=0;i<2;i++) x[i] = get(sdata, 2*wg*j + tCur + i*inc); \ B2V(x,0); \ for (int i=0;i<2;i++) set(sdata, 2*wg*j + tCur + i*inc, x[i]); \ } \ inc >>= 1; \ } #define REDUCE(X) { \ tCur = ((t >> klog2) << (klog2 + 1)) + (t & (k-1)); \ for(int j=0; j<NUM_GROUPS/(X); j++) { \ x[j] = max(get(sdata, 2*wg*j + tCur), get(sdata, 2*wg*j + tCur + k)); \ } \ __syncthreads(); \ for(int j=0; j<NUM_GROUPS/(X); j++) { \ set(sdata, wg*j + t, x[j]); \ } \ } template<typename T> __global__ void Bitonic_TopKLocalSortInPlace(T* __restrict__ in, T* __restrict__ out, const int k, const int klog2) { /* const int k = K;*/ /*const int klog2 = KLog2;*/ // Shared mem size is determined by the host app at run time. // For n elements, we have n * 33/32 shared memory. // We use this to break bank conflicts. SharedMemory<T> smem; T* sdata = smem.getPointer(); const int t = threadIdx.x; // index in workgroup const int wg = blockDim.x; // workgroup size = block size, power of 2 const int gid = blockIdx.x; int length = min(NUM_GROUPS, k >> 1); int inc = length; inc >>= NUM_GROUPS_BITSHIFT; int low = t & (inc - 1); int dir = length << 1; bool reverse; T x[NUM_ELEM_PT]; // Move IN, OUT to block start in += NUM_ELEM_PT * gid * wg; int tCur = t << NUM_ELEM_BITSHIFT; for (int i=0; i<NUM_ELEM_PT; i++) x[i] = in[tCur + i]; for (int i=0; i<NUM_ELEM_PT; i+=2) { reverse = ((i >> 1) + 1)&1; B2V(x,i); } if (k > 2) { #if NUM_ELEM_PT > 4 for (int i=0; i<NUM_ELEM_PT; i+=4) { reverse = ((i >> 2) + 1)&1; B4V(x,i); } if (k > 4) { #if NUM_ELEM_PT > 8 for (int i=0; i<NUM_ELEM_PT; i+=8) { reverse = ((i >> 3) + 1)&1; B8V(x,i); } if (k > 8) { #if NUM_ELEM_PT > 16 for (int i=0; i<NUM_ELEM_PT; i+=16) { reverse = ((i >> 4) + 1)&1; B16V(x,i); } if (k > 16) { #if NUM_ELEM_PT > 32 for (int i=0; i<NUM_ELEM_PT; i+=32) { reverse = ((i >> 5) + 1)&1; B32V(x,i); } if (k > 32) { reverse = ((dir & tCur) == 0); B64V(x,0); } #else reverse = ((dir & tCur) == 0); B32V(x,0); #endif } #else reverse = ((dir & tCur) == 0); B16V(x,0); #endif } #else reverse = ((dir & tCur) == 0); B8V(x,0); #endif } #else reverse = ((dir & tCur) == 0); B4V(x,0); #endif } for (int i=0; i<NUM_ELEM_PT; i++) set(sdata, tCur+i, x[i]); __syncthreads(); // Complete the remaining steps to create sorted sequences of length k. int mod; unsigned int mask; for (length=NUM_ELEM_PT; length<k; length<<=1) { dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(1) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0); if (mod & 1) { RUN_2(1) __syncthreads(); } if (mod & 2) { RUN_4(1) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 4) { RUN_8(1) __syncthreads(); } #if NUM_ELEM_PT > 16 if (mod & 8) { RUN_16(1) __syncthreads(); } while (inc > 8) { RUN_32(1) __syncthreads(); } #else while (inc > 4) { RUN_16(1) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 2) { RUN_8(1) __syncthreads(); } #endif // NUM_ELEM_PT > 8 } // Step 2: Reduce the size by factor 2 by pairwise comparing adjacent sequences. REDUCE(1) __syncthreads(); // End of Step 2; // Step 3: Construct sorted sequence of length k from bitonic sequence of length k. // We now have n/2 elements. length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(1) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0); if (mod & 1) { RUN_2(2) __syncthreads(); } #if NUM_ELEM_PT > 4 if (mod & 2) { RUN_4(2) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 4) { RUN_8(2) __syncthreads(); } while (inc > 4) { if (t < (wg >> 1)) { RUN_16(1) } else { inc >>= 4; } __syncthreads(); } #else while (inc > 2) { RUN_8(2) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 1) { RUN_4(2) __syncthreads(); } #endif // NUM_ELEM_PT > 8 // Step 4: Reduce size again by 2. REDUCE(2) __syncthreads(); // End of Step 1; length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(2) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 1); #if NUM_ELEM_PT > 4 if (mod & 1) { RUN_2(4) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 2) { RUN_4(4) __syncthreads(); } while (inc > 2) { if (t < (wg >> 1)) { RUN_8(2) } else { inc >>= 3; } __syncthreads(); } #else while (inc > 1) { RUN_4(4) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 0) { RUN_2(4) __syncthreads(); } #endif // NUM_ELEM_PT > 8 while (inc > 0) // Step 4: Reduce size again by 2. REDUCE(4) __syncthreads(); // End of Step 1; length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(4) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 2); if (mod & 1) { RUN_2(8) __syncthreads(); } while (inc > 0) { if (t < (wg >> 1)) { RUN_4(4) } else { inc >>= 2; } __syncthreads(); } out += (NUM_ELEM_PT/16) * gid * wg; tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1)); for (int j=0; j<NUM_GROUPS/8; j++) { T x0 = get(sdata, 2*wg*j + tCur); T x1 = get(sdata, 2*wg*j + tCur + k); out[wg*j + t] = max(x0, x1); } /* out += (NUM_ELEM_PT/8) * gid * wg;*/ //tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1)); //for (int j=0; j<NUM_GROUPS/4; j++) { //T x0 = get(sdata, 2*wg*j + tCur); //T x1 = get(sdata, 2*wg*j + tCur + k); //out[wg*j + t] = max(x0, x1); /*}*/ } template<typename T> __global__ void Bitonic_TopKReduce(T* __restrict__ in, T* __restrict__ out, const int k, const int klog2) { /* const int k = K;*/ /*const int klog2 = KLog2;*/ // Shared mem size is determined by the host app at run time. // For n elements, we have n * 33/32 shared memory. // We use this to break bank conflicts. SharedMemory<T> smem; T* sdata = smem.getPointer(); const int t = threadIdx.x; // index in workgroup const int wg = blockDim.x; // workgroup size = block size, power of 2 const int gid = blockIdx.x; int length = min(NUM_GROUPS, k >> 1); int inc = length; inc >>= NUM_GROUPS_BITSHIFT; int low = t & (inc - 1); int dir = length << 1; bool reverse; T x[NUM_ELEM_PT]; // Move IN, OUT to block start in += NUM_ELEM_PT * gid * wg; int tCur = t << NUM_ELEM_BITSHIFT; for (int i=0; i<NUM_ELEM_PT; i++) x[i] = in[tCur + i]; for (int i=0; i<NUM_ELEM_PT; i++) set(sdata, tCur+i, x[i]); __syncthreads(); // Complete the remaining steps to create sorted sequences of length k. int mod; unsigned int mask; length = (k >> 1); dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(1) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0); if (mod & 1) { RUN_2(1) __syncthreads(); } if (mod & 2) { RUN_4(1) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 4) { RUN_8(1) __syncthreads(); } #if NUM_ELEM_PT > 16 if (mod & 8) { RUN_16(1) __syncthreads(); } while (inc > 8) { RUN_32(1) __syncthreads(); } #else while (inc > 4) { RUN_16(1) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 2) { RUN_8(1) __syncthreads(); } #endif // NUM_ELEM_PT > 8 // Step 2: Reduce the size by factor 2 by pairwise comparing adjacent sequences. REDUCE(1) __syncthreads(); // End of Step 2; // Step 3: Construct sorted sequence of length k from bitonic sequence of length k. // We now have n/2 elements. length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(1) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 0); if (mod & 1) { RUN_2(2) __syncthreads(); } #if NUM_ELEM_PT > 4 if (mod & 2) { RUN_4(2) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 4) { RUN_8(2) __syncthreads(); } while (inc > 4) { if (t < (wg >> 1)) { RUN_16(1) } else { inc >>= 4; } __syncthreads(); } #else while (inc > 2) { RUN_8(2) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 1) { RUN_4(2) __syncthreads(); } #endif // NUM_ELEM_PT > 8 // Step 4: Reduce size again by 2. REDUCE(2) __syncthreads(); // End of Step 1; length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(2) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 1); #if NUM_ELEM_PT > 4 if (mod & 1) { RUN_2(4) __syncthreads(); } #if NUM_ELEM_PT > 8 if (mod & 2) { RUN_4(4) __syncthreads(); } while (inc > 2) { if (t < (wg >> 1)) { RUN_8(2) } else { inc >>= 3; } __syncthreads(); } #else while (inc > 1) { RUN_4(4) __syncthreads(); } #endif // NUM_ELEM_PT > 16 #else while (inc > 0) { RUN_2(4) __syncthreads(); } #endif // NUM_ELEM_PT > 8 while (inc > 0) // Step 4: Reduce size again by 2. REDUCE(4) __syncthreads(); // End of Step 1; length = k >> 1; dir = length << 1; // Loop on comparison distance (between keys) inc = length; mod = inc; mask = ~(NUM_ELEM_PT/(4) - 1); while ((mod & mask) != 0) mod >>= (NUM_ELEM_BITSHIFT - 2); if (mod & 1) { RUN_2(8) __syncthreads(); } while (inc > 0) { if (t < (wg >> 1)) { RUN_4(4) } else { inc >>= 2; } __syncthreads(); } out += (NUM_ELEM_PT/16) * gid * wg; tCur = ((t >> klog2) << (klog2+1)) + (t&(k-1)); for (int j=0; j<NUM_GROUPS/8; j++) { T x0 = get(sdata, 2*wg*j + tCur); T x1 = get(sdata, 2*wg*j + tCur + k); out[wg*j + t] = max(x0, x1); } } const int tab32[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31}; int log2_32 (uint value) { value |= value >> 1; value |= value >> 2; value |= value >> 4; value |= value >> 8; value |= value >> 16; return tab32[(uint)(value*0x07C4ACDD) >> 27]; } template<typename KeyT> cudaError_t bitonicTopK(KeyT *d_keys_in, unsigned int num_items, unsigned int k, KeyT *d_keys_out, CachingDeviceAllocator& g_allocator) { if (k < 16) k = 16; int klog2 = log2_32(k); DoubleBuffer<KeyT> d_keys; d_keys.d_buffers[0] = d_keys_in; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(KeyT) * num_items)); int current = 0; int numThreads = num_items; int wg_size = max(64,k); numThreads >>= 1; // Each thread processes 2 elements. numThreads >>= NUM_GROUPS_BITSHIFT; Bitonic_TopKLocalSortInPlace<KeyT><<<numThreads/wg_size, wg_size, ((2*NUM_GROUPS*wg_size*33)/32)*sizeof(KeyT)>>>(d_keys.Current(), d_keys.Alternate(), k, klog2); current = 1-current; // Toggle the buffer index in the double buffer d_keys.selector = d_keys.selector ^ 1; numThreads >>= (1 + NUM_GROUPS_BITSHIFT); while (numThreads >= wg_size) { Bitonic_TopKReduce<KeyT><<<numThreads/wg_size, wg_size, ((2*NUM_GROUPS*wg_size*33)/32)*sizeof(KeyT)>>>(d_keys.Current(), d_keys.Alternate(), k, klog2); // Toggle the buffer index in the double buffer d_keys.selector = d_keys.selector ^ 1; numThreads >>= (1 + NUM_GROUPS_BITSHIFT); } KeyT* res_vec = (KeyT*) malloc(sizeof(KeyT) * 2 * numThreads * NUM_GROUPS); cudaMemcpy(res_vec, d_keys.Current(), 2 * numThreads * NUM_GROUPS * sizeof(KeyT), cudaMemcpyDeviceToHost); std::sort(res_vec, res_vec + 2*numThreads*NUM_GROUPS, std::greater<KeyT>()); cudaMemcpy(d_keys_out, res_vec, k * sizeof(KeyT), cudaMemcpyHostToDevice); if (d_keys.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[1])); return cudaSuccess; }
the_stack
/****************************************************************************** * 1D_scalar routines ******************************************************************************/ #pragma once #include <gunrock/oprtr/1D_oprtr/for_all.cuh> #include <gunrock/oprtr/1D_oprtr/for_each.cuh> namespace gunrock { namespace oprtr { // TODO: The memset kernels are getting nasty. // Need to use operator overload to rewrite most // of these some day. /** * \addtogroup PublicInterface * @{ */ /** * @brief Memset a device vector. * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Value we want to set * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Set_Kernel(ValueT *d_out, T value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] = value; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Set(ValueT *elements, T value, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, [value] __host__ __device__(ValueT & element) { element = value; }, length, target, stream); } /*template <typename VertexId, typename SizeT, typename Value> __global__ void MemsetAddEdgeValKernel(Coo<VertexId, Value> *d_out, VertexId value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx].row += value; d_out[idx].col += value; } }*/ /** * @brief Memset a device vector with the element's index in the vector * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] length Vector length * @param[in] scale The scale for indexing (1 by default) */ template <typename ValueT, typename T, typename SizeT> __global__ void SetIdx_Kernel(ValueT *d_out, T scale, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] = idx * scale; } } template <typename ValueT, typename T, typename SizeT> cudaError_t SetIdx(ValueT *elements, T scale, // = 1, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForAll(elements, [scale] __host__ __device__(ValueT * elements, int pos) { elements[pos] = pos * scale; }, length, target, stream); } /** * @brief Add value to each element in a device vector. * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Value we want to add to each element in the vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Add_Kernel(ValueT *d_out, T value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] += value; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Add(ValueT *elements, T value, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, [value] __host__ __device__(ValueT & element) { element += value; }, length, target, stream); } /** * @brief Minus value to each element in a device vector. * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Value we want to add to each element in the vector * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Minus_Kernel(ValueT *d_out, T value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] -= value; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Minus(ValueT *elements, T value, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, [value] __host__ __device__(ValueT & element) { element -= value; }, length, target, stream); } /** * @brief Multiply each element in a device vector to a certain factor. * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Scale factor * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Mul_Kernel(ValueT *d_out, T value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] *= value; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Mul(ValueT *elements, T value, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, [value] __host__ __device__(ValueT & element) { element *= value; }, length, target, stream); } /** * @brief Divide each element in a device vector to a certain factor. * TODO: divide by zero check * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Scale factor * @param[in] length Vector length */ template <typename ValueT, typename T, typename SizeT> __global__ void Div_Kernel(ValueT *d_out, T value, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { d_out[idx] /= value; } } template <typename ValueT, typename T, typename SizeT> cudaError_t Div(ValueT *elements, T value, SizeT length, util::Location target = util::DEVICE, cudaStream_t stream = 0) { return ForEach( elements, [value] __host__ __device__(ValueT & element) { element /= value; }, length, target, stream); } /** * @brief Compare an element to a comp, if equal, assign val to it * * @tparam T datatype of the vector. * * @param[in] d_out Device-side vector we need to process on * @param[in] value Scale factor * @param[in] length Vector length */ template <typename ValueT, typename CompareT, typename AssignT, typename SizeT> __global__ void CAS_Kernel(ValueT *d_dst, CompareT compare, AssignT val, SizeT length) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { if (d_dst[idx] == compare) d_dst[idx] = val; } } template <typename ValueT, typename CompareT, typename AssignT, typename SizeT> __global__ void CAS_Kernel(ValueT *d_dst, CompareT compare, AssignT val, SizeT *length_) { const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x; SizeT length = length_[0]; for (SizeT idx = ((SizeT)blockIdx.x * blockDim.x) + threadIdx.x; idx < length; idx += STRIDE) { if (d_dst[idx] == compare) d_dst[idx] = val; } } /** @} */ } // namespace oprtr namespace util { template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag> &Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::operator=(T val) { GRError(Set(val), std::string(name) + " Set() failed.", __FILE__, __LINE__); return (*this); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag> &Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::operator+=(T val) { GRError(Add(val), std::string(name) + " Add() failed.", __FILE__, __LINE__); return (*this); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag> &Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::operator-=(T val) { GRError(Minus(val), std::string(name) + " Minus() failed.", __FILE__, __LINE__); return (*this); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag> &Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::operator*=(T val) { GRError(Minus(val), std::string(name) + " Mul() failed.", __FILE__, __LINE__); return (*this); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag> &Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::operator/=(T val) { GRError(Minus(val), std::string(name) + " Div() failed.", __FILE__, __LINE__); return (*this); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Set( T value, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForEach( [value] __host__ __device__(ValueT & element) { element = value; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::SetIdx( T scale, // = 1, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForAll( [scale] __host__ __device__(ValueT * elements, SizeT pos) { elements[pos] = pos * scale; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Add( T value, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForEach( [value] __host__ __device__(ValueT & element) { element += value; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Minus( T value, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForEach( [value] __host__ __device__(ValueT & element) { element -= value; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Mul( T value, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForEach( [value] __host__ __device__(ValueT & element) { element *= value; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename T> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::Div( T value, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { return ForEach( [value] __host__ __device__(ValueT & element) { element /= value; }, length, target, stream); } template <typename SizeT, typename ValueT, ArrayFlag FLAG, unsigned int cudaHostRegisterFlag> template <typename CompareT, typename AssignT> cudaError_t Array1D<SizeT, ValueT, FLAG, cudaHostRegisterFlag>::CAS( CompareT compare, AssignT assign, SizeT length, // = PreDefinedValues<typename ArrayT::SizeT>::InvalidValue, Location target, // = LOCATION_DEFAULT, cudaStream_t stream) // = 0) { // typedef typename ArrayT::ValueT ValueT; return ForEach( [compare, assign] __host__ __device__(ValueT & element) { if (element == compare) element = assign; }, length, target, stream); } } // namespace util } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "camera_calibration/bundle_adjustment/cuda_joint_optimization.cuh" #include <cub/cub.cuh> #include <cuda_runtime.h> #include <libvis/cuda/cuda_auto_tuner.h> #include <libvis/cuda/cuda_util.h> #include <math_constants.h> #include "camera_calibration/bundle_adjustment/joint_optimization_jacobians.h" #include "camera_calibration/cuda/cuda_matrix.cuh" #include "camera_calibration/cuda/cuda_util.cuh" #include "camera_calibration/models/cuda_central_generic_model.cuh" namespace vis { /* * Schema for accumulator classes: * * struct Accumulator { * /// Called if the residuals belonging to this thread are invalid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index); * * /// Called if the residuals belonging to this thread are valid. * /// This is only called once and before any SetJacobianComponent() call, * /// except for calls to SetJacobianComponent_AllThreadsSameIndex() with valid == false. * __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index); * * /// Sets the values of one column in the [2 x N] Jacobian of the pixel position * /// wrt. the unknowns. I.e., value_x corresponds to the x-residual (row 0) and value_y * /// to the y-residual (row 1). * /// * /// This version is called if there are no possible conflicts between * /// different threads in the kernel call, i.e., for a given thread, no other * /// thread in the thread grid possibly writes to the same index. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if all threads in the thread block write to the same index. * /// * /// This variant of SetJacobianComponent() is called both if the residuals * /// are valid and if they are invalid to enable block-wide operations. * /// * /// NOTE: If the temp_storage is used before, a __syncthreads() has to be done. * __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid); * * /// See SetJacobianComponent_ThreadConflictFree(). This version is used * /// if none of the other two versions applies. * /// * /// In case the residuals are invalid (SetResidualsInvalid() has been called * /// before), this function must not be called. * __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y); * }; */ constexpr PCGScalar kHuberWeight = 1.0; // TODO: Make parameter constexpr int kResidualJacobianBlockSize = 256; template<int block_width, int block_height, bool compute_jacobians, bool are_camera_tr_rig_in_state, class Model, class Accumulator> __device__ void ComputeResidualAndJacobian( bool valid, u32 feature_index, CUDADatasetAndState& s, Model& model, Accumulator& accumulator) { u16 point_index = s.features_index[feature_index]; float3 point = make_float3(s.points[0][point_index], s.points[1][point_index], s.points[2][point_index]); u16 image_index = s.features_image[feature_index]; float3 local_point = s.image_tr_global[image_index] * point; float2 pixel = make_float2(0.5f * (model.calibration_min_x() + model.calibration_max_x() + 1), 0.5f * (model.calibration_min_y() + model.calibration_max_y() + 1)); if (!model.ProjectWithInitialEstimate(local_point, &pixel)) { if (valid) { accumulator.SetResidualsInvalid(s.features_residual_x, feature_index); } valid = false; } if (valid) { accumulator.SetResiduals( pixel.x - s.features_x[feature_index], pixel.y - s.features_y[feature_index], s.features_residual_x, s.features_residual_y, feature_index); } if (compute_jacobians) { // Compute Jacobian wrt. image pose, optionally camera_tr_rig, and point position [2 x (6 + (rig ? 6 : 0) + 3)]. // Residual: Project(exp(delta) * image_tr_pattern * pattern_point) - measurement // Compute Jacobian as follows: // (d pixel) / (d local_point) [2 x 3], numerical // * (d local_point) / (d pose and global_point) [3 x (7 + (rig ? 7 : 0) + 3)], analytical // Numerical part: CUDAMatrix<PCGScalar, 2, 3> pixel_wrt_local_point; const PCGScalar kDelta = s.numerical_diff_delta * (model.is_central_camera_model() ? Norm(local_point) : 0.1); #pragma unroll for (int dimension = 0; dimension < 3; ++ dimension) { float3 offset_point = local_point; *(&offset_point.x + dimension) += kDelta; float2 offset_pixel = pixel; if (!model.ProjectWithInitialEstimate(offset_point, &offset_pixel)) { valid = false; break; } pixel_wrt_local_point(0, dimension) = (offset_pixel.x - pixel.x) / kDelta; pixel_wrt_local_point(1, dimension) = (offset_pixel.y - pixel.y) / kDelta; } // Analytical part: CUDAMatrix<PCGScalar, 3, 7 + 7 + 3> local_point_wrt_poses_and_global_point; if (are_camera_tr_rig_in_state) { ComputeRigJacobian( s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], point.x, point.y, point.z, s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], s.rig_tr_global[7 * image_index + 4], s.rig_tr_global[7 * image_index + 5], s.rig_tr_global[7 * image_index + 6], local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } else { // NOTE: The first row expects image_q_global values. Thus, here we assume // that rig_q_global == image_q_global, i.e., the camera_q_rig // transformation is identity. ComputeJacobian( s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], point.x, point.y, point.z, local_point_wrt_poses_and_global_point.row(0), local_point_wrt_poses_and_global_point.row(1), local_point_wrt_poses_and_global_point.row(2)); } CUDAMatrix<PCGScalar, 2, 6> pose_jacobian; CUDAMatrix<PCGScalar, 2, 6> rig_jacobian; CUDAMatrix<PCGScalar, 2, 3> point_jacobian; if (are_camera_tr_rig_in_state) { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - camera_tr_rig (indices 7 .. 13) // - global_point (indices 14 .. 16) CUDAMatrix<PCGScalar, 4, 3> camera_q_rig_wrt_update; QuaternionJacobianWrtLocalUpdate(s.camera_q_rig[4 * s.camera_index + 0], s.camera_q_rig[4 * s.camera_index + 1], s.camera_q_rig[4 * s.camera_index + 2], s.camera_q_rig[4 * s.camera_index + 3], &camera_q_rig_wrt_update); CUDAMatrix<PCGScalar, 4, 3> rig_q_global_wrt_update; QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &rig_q_global_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, rig_q_global_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(0 + 4)); MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(7)); MatrixMultiply(rig_jacobian.cols<3>(0), temp, camera_q_rig_wrt_update); MatrixMultiply(rig_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7 + 4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(14)); } else { // local_point_wrt_poses_and_global_point contains the Jacobian wrt.: // - rig_tr_global (indices 0 .. 6) // - global_point (indices 7 .. 9) // NOTE: Here, we assume that rig_q_global == image_q_global, i.e., the // camera_q_rig transformation is identity. CUDAMatrix<PCGScalar, 4, 3> quaternion_wrt_update; // derived in derive_jacobians.py QuaternionJacobianWrtLocalUpdate(s.rig_tr_global[7 * image_index + 0], s.rig_tr_global[7 * image_index + 1], s.rig_tr_global[7 * image_index + 2], s.rig_tr_global[7 * image_index + 3], &quaternion_wrt_update); CUDAMatrix<PCGScalar, 2, 4> temp; MatrixMultiply(temp, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<4>(0)); MatrixMultiply(pose_jacobian.cols<3>(0), temp, quaternion_wrt_update); MatrixMultiply(pose_jacobian.cols<3>(3), pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(4)); MatrixMultiply(point_jacobian, pixel_wrt_local_point, local_point_wrt_poses_and_global_point.cols<3>(7)); } // Get the model Jacobian constexpr int num_intrinsic_variables = Model::IntrinsicsJacobianSize; CUDAMatrix<u32, num_intrinsic_variables, 1> grid_update_indices; CUDAMatrix<PCGScalar, 2, num_intrinsic_variables> intrinsic_jac; if (!model.ProjectionJacobianWrtIntrinsics( local_point, pixel, s.numerical_diff_delta, grid_update_indices.data(), intrinsic_jac.row(0), intrinsic_jac.row(1))) { valid = false; } // Accumulate Jacobians: if (are_camera_tr_rig_in_state) { if (valid) { for (int i = 0; valid && i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } } for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_AllThreadsSameIndex( s.camera_tr_rig_start_index + s.camera_index * 6 + i, rig_jacobian(0, i), rig_jacobian(1, i), valid); } if (valid) { for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } else { if (valid) { for (int i = 0; i < 6; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.rig_tr_global_start_index + 6 * image_index + i, pose_jacobian(0, i), pose_jacobian(1, i)); } for (int i = 0; i < 3; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.points_start_index + point_index * 3 + i, point_jacobian(0, i), point_jacobian(1, i)); } } } if (valid) { for (int i = 0; i < num_intrinsic_variables; ++ i) { accumulator.SetJacobianComponent_RandomThreadConflicts( s.intrinsic_start_index + grid_update_indices(i), intrinsic_jac(0, i), intrinsic_jac(1, i)); } } } } template <int block_width, int block_height> struct PCGCompareCostAccumulator { __forceinline__ __device__ PCGCompareCostAccumulator( typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // No need to do anything, as these residuals do not matter for the comparison } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { PCGScalar this_cost = ComputeHuberCost(residual_x, residual_y, kHuberWeight); cost_ += this_cost; if (::isnan(features_residual_x[feature_index])) { // These residuals were invalid for the other cost, so ignore them for the comparison return; } // Both in the old and the new state, the residuals are valid. Compare them. PCGScalar other_cost = ComputeHuberCost(features_residual_x[feature_index], features_residual_y[feature_index], kHuberWeight); relative_cost_ += this_cost - other_cost; } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/, bool /*valid*/) {} __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 /*index*/, PCGScalar /*jac_x*/, PCGScalar /*jac_y*/) {} PCGScalar cost_ = 0; PCGScalar relative_cost_ = 0; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGCompareCostCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_cost, CUDABuffer_<PCGScalar> pcg_relative_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGCompareCostAccumulator<block_width, block_height> accumulator(&temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ false, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_relative_cost(0, 0), accumulator.relative_cost_, valid, &temp_storage); } template <class Model> void PCGCompareCostCUDA( cudaStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGCompareCostCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_cost, *pcg_relative_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGCompareCostCUDA<CUDACentralGenericModel>( cudaStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_cost, CUDABuffer_<PCGScalar>* pcg_relative_cost); template <int block_width, int block_height> struct PCGInitAccumulator { __forceinline__ __device__ PCGInitAccumulator( CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : pcg_r_(pcg_r), pcg_M_(pcg_M), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { features_residual_x[feature_index] = CUDART_NAN_F; // features_residual_y[feature_index] = CUDART_NAN_F; } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { features_residual_x[feature_index] = residual_x; features_residual_y[feature_index] = residual_y; // Cache residuals and weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); weighted_residual_x_ = weight_ * residual_x; weighted_residual_y_ = weight_ * residual_y; cost_ += ComputeHuberCost(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_r_)(0, index) -= jac_x * weighted_residual_x_ + jac_y * weighted_residual_y_; (*pcg_M_)(0, index) += jac_x * weight_ * jac_x + jac_y * weight_ * jac_y; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_, valid, temp_storage_); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_r_)(0, index), -1 * jac_x * weighted_residual_x_ + -1 * jac_y * weighted_residual_y_); atomicAddFloatOrDouble( &(*pcg_M_)(0, index), jac_x * weight_ * jac_x + jac_y * weight_ * jac_y); } PCGScalar weight_; PCGScalar weighted_residual_x_; PCGScalar weighted_residual_y_; PCGScalar cost_ = 0; CUDABuffer_<PCGScalar>* pcg_r_; CUDABuffer_<PCGScalar>* pcg_M_; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGInitCUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_cost) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGInitAccumulator<block_width, block_height> accumulator(&pcg_r, &pcg_M, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); __syncthreads(); BlockedAtomicSum<block_width, block_height>( &pcg_cost(0, 0), accumulator.cost_, valid, &temp_storage); } template <class Model> void PCGInitCUDA( cudaStream_t stream, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGInitCUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_r, *pcg_M, *pcg_cost);); CHECK_CUDA_NO_ERROR(); } template void PCGInitCUDA<CUDACentralGenericModel>( cudaStream_t stream, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_r, CUDABuffer_<PCGScalar>* pcg_M, CUDABuffer_<PCGScalar>* pcg_cost); template<int block_width> __global__ void PCGInit2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar alpha_term; if (unknown_index < unknown_count) { pcg_g(0, unknown_index) = 0; // p_0 = M^-1 r_0 // The addition of lambda is also handled here. PCGScalar r_value = pcg_r(0, unknown_index); PCGScalar p_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_p(0, unknown_index) = p_value; // delta_0 = 0 pcg_delta(0, unknown_index) = 0; // alpha_n_0 = r_0^T p_0 alpha_term = r_value * p_value; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_n(0, 0), alpha_term, unknown_index < unknown_count, &temp_storage); } void PCGInit2CUDA( cudaStream_t stream, u32 unknown_count, PCGScalar lambda, const CUDABuffer_<PCGScalar>& pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } cudaMemsetAsync(pcg_alpha_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGInit2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n); CHECK_CUDA_NO_ERROR(); } template <int block_width, int block_height> struct PCGStep1SumAccumulator { __forceinline__ __device__ PCGStep1SumAccumulator(const CUDABuffer_<PCGScalar>& pcg_p) : pcg_p_(pcg_p) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Cache weights weight_ = ComputeHuberWeight(residual_x, residual_y, kHuberWeight); } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { if (valid) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { PCGScalar p = pcg_p_(0, index); sum_x_ += jac_x * p; sum_y_ += jac_y * p; } PCGScalar sum_x_ = 0; // holds the result of (J * p) for the row of the first residual. PCGScalar sum_y_ = 0; // holds the result of (J * p) for the row of the second residual. PCGScalar weight_ = 0; const CUDABuffer_<PCGScalar>& pcg_p_; }; template <int block_width, int block_height> struct PCGStep1ResolveAccumulator { __forceinline__ __device__ PCGStep1ResolveAccumulator( PCGScalar sum_x, PCGScalar sum_y, CUDABuffer_<PCGScalar>* pcg_g, typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage) : sum_x_(sum_x), sum_y_(sum_y), pcg_g_(pcg_g), temp_storage_(temp_storage) {} __forceinline__ __device__ void SetResidualsInvalid(PCGScalar* features_residual_x, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetResiduals(PCGScalar residual_x, PCGScalar residual_y, PCGScalar* features_residual_x, PCGScalar* features_residual_y, u32 feature_index) { // Do nothing } __forceinline__ __device__ void SetJacobianComponent_ThreadConflictFree(u32 index, PCGScalar jac_x, PCGScalar jac_y) { (*pcg_g_)(0, index) += jac_x * sum_x_ + jac_y * sum_y_; } __forceinline__ __device__ void SetJacobianComponent_AllThreadsSameIndex(u32 index, PCGScalar jac_x, PCGScalar jac_y, bool valid) { BlockedAtomicSum<block_width, block_height>( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_, valid, temp_storage_); __syncthreads(); } __forceinline__ __device__ void SetJacobianComponent_RandomThreadConflicts(u32 index, PCGScalar jac_x, PCGScalar jac_y) { atomicAddFloatOrDouble( &(*pcg_g_)(0, index), jac_x * sum_x_ + jac_y * sum_y_); } PCGScalar sum_x_; PCGScalar sum_y_; CUDABuffer_<PCGScalar>* pcg_g_; typename cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height>::TempStorage* temp_storage_; }; template<int block_width, class Model, bool are_camera_tr_rig_in_state> __global__ void __launch_bounds__(/*maxThreadsPerBlock*/ kResidualJacobianBlockSize, /*minBlocksPerMultiprocessor*/ 1) PCGStep1CUDAKernel( CUDADatasetAndState s, Model model, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int feature_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = feature_index < s.num_features; if (!valid) { feature_index = 0; } constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; PCGScalar weight; PCGScalar sum_x; PCGScalar sum_y; { PCGStep1SumAccumulator<block_width, block_height> accumulator(pcg_p); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); weight = accumulator.weight_; sum_x = accumulator.sum_x_; sum_y = accumulator.sum_y_; } BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), sum_x * weight * sum_x + sum_y * weight * sum_y, valid, &temp_storage); sum_x *= weight; sum_y *= weight; __syncthreads(); // TODO: Try storing sum_x and sum_y in global memory here and moving the // part below into its own kernel. It might be faster since it might be // possible to run one of the two resulting kernels with higher // parallelism than the current large kernel. { PCGStep1ResolveAccumulator<block_width, block_height> accumulator(sum_x, sum_y, &pcg_g, &temp_storage); ComputeResidualAndJacobian<block_width, block_height, /*compute_jacobians*/ true, are_camera_tr_rig_in_state>(valid, feature_index, s, model, accumulator); } } template<int block_width> __global__ void AddAlphaDEpsilonTermsCUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_d) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; bool valid = unknown_index < unknown_count; if (!valid) { unknown_index = unknown_count - 1; } PCGScalar p_value = pcg_p(0, unknown_index); PCGScalar term = lambda * p_value * p_value; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; BlockedAtomicSum<block_width, block_height>( &pcg_alpha_d(0, 0), term, valid, &temp_storage); } template <class Model> void PCGStep1CUDA( cudaStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const Model& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d) { CHECK_CUDA_NO_ERROR(); if (s.num_features == 0) { return; } constexpr int block_width = kResidualJacobianBlockSize; dim3 grid_dim(GetBlockCount(s.num_features, block_width)); dim3 block_dim(block_width); bool are_camera_tr_rig_in_state = s.are_camera_tr_rig_in_state; COMPILE_OPTION( are_camera_tr_rig_in_state, PCGStep1CUDAKernel<block_width, Model, _are_camera_tr_rig_in_state> <<<grid_dim, block_dim, 0, stream>>>( s, model, *pcg_p, *pcg_g, *pcg_alpha_d);); CHECK_CUDA_NO_ERROR(); } template void PCGStep1CUDA<CUDACentralGenericModel>( cudaStream_t stream, u32 unknown_count, const CUDADatasetAndState& s, const CUDACentralGenericModel& model, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_alpha_d); template<int block_width> __global__ void PCGStep2CUDAKernel( u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar> pcg_r, CUDABuffer_<PCGScalar> pcg_M, CUDABuffer_<PCGScalar> pcg_delta, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_alpha_d, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; PCGScalar beta_term; constexpr int block_height = 1; typedef cub::BlockReduce<PCGScalar, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceScalar; __shared__ typename BlockReduceScalar::TempStorage temp_storage; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar alpha = (pcg_alpha_d(0, 0) >= 1e-35f) ? (pcg_alpha_n(0, 0) / pcg_alpha_d(0, 0)) : 0; PCGScalar p_value = pcg_p(0, unknown_index); pcg_delta(0, unknown_index) += alpha * p_value; PCGScalar r_value = pcg_r(0, unknown_index); r_value -= alpha * (pcg_g(0, unknown_index) + lambda * p_value); pcg_r(0, unknown_index) = r_value; // This is called z in the Opt paper, but stored in g here to save memory. PCGScalar z_value = r_value / (pcg_M(0, unknown_index) + lambda); pcg_g(0, unknown_index) = z_value; beta_term = z_value * r_value; } BlockedAtomicSum<block_width, block_height>( &pcg_beta_n(0, 0), beta_term, unknown_index < unknown_count, &temp_storage); } void PCGStep2CUDA( cudaStream_t stream, u32 unknown_count, PCGScalar lambda, CUDABuffer_<PCGScalar>* pcg_r, const CUDABuffer_<PCGScalar>& pcg_M, CUDABuffer_<PCGScalar>* pcg_delta, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_alpha_d, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( AddAlphaDEpsilonTermsCUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_p, *pcg_alpha_d); CHECK_CUDA_NO_ERROR(); cudaMemsetAsync(pcg_beta_n->address(), 0, 1 * sizeof(PCGScalar), stream); CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep2CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, lambda, *pcg_r, pcg_M, *pcg_delta, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_alpha_d, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } template<int block_width> __global__ void PCGStep3CUDAKernel( u32 unknown_count, CUDABuffer_<PCGScalar> pcg_g, CUDABuffer_<PCGScalar> pcg_p, CUDABuffer_<PCGScalar> pcg_alpha_n, CUDABuffer_<PCGScalar> pcg_beta_n) { unsigned int unknown_index = blockIdx.x * blockDim.x + threadIdx.x; if (unknown_index < unknown_count) { // TODO: Default to 1 or to 0 if denominator is near-zero? Stop optimization if that happens? PCGScalar beta = (pcg_alpha_n(0, 0) >= 1e-35f) ? (pcg_beta_n(0, 0) / pcg_alpha_n(0, 0)) : 0; pcg_p(0, unknown_index) = pcg_g/*z*/(0, unknown_index) + beta * pcg_p(0, unknown_index); } } void PCGStep3CUDA( cudaStream_t stream, u32 unknown_count, CUDABuffer_<PCGScalar>* pcg_g, CUDABuffer_<PCGScalar>* pcg_p, CUDABuffer_<PCGScalar>* pcg_alpha_n, CUDABuffer_<PCGScalar>* pcg_beta_n) { CHECK_CUDA_NO_ERROR(); if (unknown_count == 0) { return; } CUDA_AUTO_TUNE_1D_TEMPLATED( PCGStep3CUDAKernel, 1024, unknown_count, 0, stream, TEMPLATE_ARGUMENTS(block_width), /* kernel parameters */ unknown_count, *pcg_g, *pcg_p, *pcg_alpha_n, *pcg_beta_n); CHECK_CUDA_NO_ERROR(); } }
the_stack
#include <ops/declarable/helpers/convolutions.h> #include "cudnnUtils.h" namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // weights descriptor FilterDesc w; w.set4D(cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); // output descriptor CudnnTensor z; if (output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; // err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed as the count is 0", 0); algo = algoPerf.algo; PointersManager manager(context, __func__); // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize)); void* wsData = manager.allocateDevMem(wsSize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionForward), cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer())); // add bias if it is present if (bias != nullptr) { CudnnTensor b; // b.set4D(format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: // bias->lengthOf()); b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer())); } // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({output}, {input, weights, bias}); } ////////////////////////////////////////////////////////////////////////// static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); PointersManager manager(context, __func__); // input descriptor, gradO descriptor, gradI descriptor CudnnTensor x, dz, dx; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); if (gradI->ews() == 1 && gradI->ordering() == 'c') dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); // gradW descriptor FilterDesc dw; dw.set4D(cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; // err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, // CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); if (count == 0) throw sd::cuda_exception::build( "conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0", 0); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; // err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, // 0, &algoGradI); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0", 0); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); void* wsGradWData = manager.allocateDevMem(wsGradWSize); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); void* wsGradIData = manager.allocateDevMem(wsGradISize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if (gradB != nullptr) { CudnnTensor db; // db.set4D(format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: // gradB->lengthOf()); db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardBias), cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); } // run calculation for gradW CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardFilter), cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardData), cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) { REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " "%i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); } std::unique_ptr<NDArray> tmpWeight = {}, tmpInput = {}; NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if (0 == wFormat) { tmpWeight.reset( new NDArray(weights->ordering(), isNCHW ? std::vector<sd::LongType>({oC, iC, kH, kW}) : std::vector<sd::LongType>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newWeights = tmpWeight.get(); newWeights->assign(weights->permute( isNCHW ? std::vector<int>({3, 2, 0, 1}) : std::vector<int>( {3, 0, 1, 2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); // prolong life if (tmpInput) input = tmpInput.get(); } conv2dCUDNN(block.launchContext(), input, newWeights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW, wFormat); return sd::Status::OK; } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; Requirements req("CUDNN CONV2d OP"); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] int kH = INT_ARG(0); // filter(kernel) height int kW = INT_ARG(1); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got " "%i instead !", gradO->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); int trueoH, trueoW; // true output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS, oC, trueoH, trueoW, 0, indIOioC, indOoH, indOoH + 1}); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but " "got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " "%i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::unique_ptr<NDArray> tmpGradI = {}, tmpInput = {}, tmpWeights = {}, tmpGradW = {}; NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if (0 == wFormat) { tmpGradW.reset( new NDArray(gradW->ordering(), isNCHW ? std::vector<sd::LongType>({oC, iC, kH, kW}) : std::vector<sd::LongType>({oC, kH, kW, iC}), gradW->dataType(), gradW->getContext())); tmpWeights.reset( new NDArray(weights->ordering(), isNCHW ? std::vector<sd::LongType>({oC, iC, kH, kW}) : std::vector<sd::LongType>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newGradW = tmpGradW.get(); newWeights = tmpWeights.get(); newWeights->assign(weights->permute( isNCHW ? std::vector<int>({3, 2, 0, 1}) : std::vector<int>( {3, 0, 1, 2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = gradI; if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); tmpGradI = std::move(std::get<1>(ret)); if (tmpInput) newInput = tmpInput.get(); if (tmpGradI) newGradI = tmpGradI.get(); } conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW, wFormat); if (0 == wFormat) { newGradW->permutei( isNCHW ? std::vector<int>({2, 3, 1, 0}) : std::vector<int>( {1, 2, 3, 0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) gradW->assign(newGradW); } if (newInput != input) { if (isNCHW) gradI->assign((*newGradI)({0, 0, 0, 0, 0, gradI->sizeAt(2), 0, gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0, 0, 0, gradI->sizeAt(1), 0, gradI->sizeAt(2), 0, 0})); } return sd::Status::OK; } PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC Requirements req("CUDNN CONV2d_BP OP"); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectTrue(makeInfoVariable(isNCHW, "isNCHW")) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } else { req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } } // namespace platforms } // namespace ops } // namespace sd
the_stack
#include <thrust/iterator/counting_iterator.h> #include <cstring> #include <algorithm> #include <exception> #include <cstdio> #include <cstdlib> #include "algorithm.hpp" #include "iterator.hpp" #include "memory.hpp" #include "query/time_series_aggregate.h" #include "concurrent_unordered_map.hpp" #include "utils.hpp" CGoCallResHandle HashReduce(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, void *stream, int device) { CGoCallResHandle resHandle = {nullptr, nullptr}; try { #ifndef SUPPORT_HASH_REDUCTION resHandle.res = reinterpret_cast<void *>(0); #else #ifdef RUN_ON_DEVICE cudaSetDevice(device); #endif cudaStream_t cudaStream = reinterpret_cast<cudaStream_t>(stream); resHandle.res = reinterpret_cast<void *>(ares::hash_reduction(inputKeys, inputValues, outputKeys, outputValues, valueBytes, length, aggFunc, cudaStream)); CheckCUDAError("HashReduce"); #endif } catch (std::exception &e) { std::cerr << "Exception happend when doing Reduce:" << e.what() << std::endl; resHandle.pStrErr = strdup(e.what()); } return resHandle; } #ifdef SUPPORT_HASH_REDUCTION namespace ares { template<typename map_type> struct ExtractGroupByResultFunctor { typedef typename map_type::key_type key_type; typedef typename map_type::mapped_type element_type; explicit ExtractGroupByResultFunctor( map_type *const __restrict__ map, uint8_t *dimInputValues, size_t vectorCapacity, uint8_t *dimOutputValues, uint8_t dimWidthPrefixSum[NUM_DIM_WIDTH], uint8_t *measureValues, uint32_t *global_write_index) : map(map), dimInputValues(dimInputValues), vectorCapacity(vectorCapacity), dimOutputValues(dimOutputValues), measureValues(measureValues), global_write_index( global_write_index) { memcpy(this->dimWidthPrefixSum, dimWidthPrefixSum, sizeof(uint8_t) * NUM_DIM_WIDTH); } map_type *map; uint8_t *dimInputValues; uint32_t *global_write_index; uint8_t *measureValues; uint8_t *dimOutputValues; size_t vectorCapacity; uint8_t dimWidthPrefixSum[NUM_DIM_WIDTH]; // copy_dim_values moves the inputDimValues at inputIdx to curDimOutput // at outputIdx. __host_or_device__ void copy_dim_values(uint32_t inputIdx, size_t outputIdx) { uint8_t * curDimInput = dimInputValues; uint8_t * curDimOutput = dimOutputValues; // idx in numDimsPerDimWidth; int widthIdx = 0; uint8_t totalDims = dimWidthPrefixSum[NUM_DIM_WIDTH - 1]; // write values // pointer address for dim value d on row r is // (base_ptr + accumulatedValueBytes * vectorCapacity) + r * dimValueBytes. for (uint8_t dimIndex = 0; dimIndex < totalDims; dimIndex += 1) { // find correct widthIdx. while (widthIdx < NUM_DIM_WIDTH && dimIndex >= dimWidthPrefixSum[widthIdx]) { widthIdx++; } uint16_t dimValueBytes = 1 << (NUM_DIM_WIDTH - widthIdx - 1); curDimInput += dimValueBytes * inputIdx; curDimOutput += dimValueBytes * outputIdx; setDimValue(curDimOutput, curDimInput, dimValueBytes); // set to the start of next dim value curDimInput += (vectorCapacity - inputIdx) * dimValueBytes; curDimOutput += (vectorCapacity - outputIdx) * dimValueBytes; } // write nulls. // Now both curDimInput and curDimOutput should point to start of // null vector. for (uint8_t dimIndex = 0; dimIndex < totalDims; dimIndex += 1) { curDimInput += inputIdx; curDimOutput += outputIdx; *curDimOutput = *curDimInput; // set to the start of next dim null vector curDimInput += vectorCapacity - inputIdx; curDimOutput += vectorCapacity - outputIdx; } } __host_or_device__ void operator()(const int i) { if (i < map->capacity()) { key_type unusedKey = map->get_unused_key(); auto iter = ares::map_value_at(map, i); key_type current_key = iter->first; if (current_key != unusedKey) { uint32_t outputIdx = ares::atomicAdd(global_write_index, (uint32_t)1); copy_dim_values(static_cast<uint32_t>(current_key), outputIdx); element_type value = iter->second; reinterpret_cast<element_type *>(measureValues)[outputIdx] = value; } } } }; // hasher is the struct to return the hash value for a given key of concurrent // map. It just returns the higher 32 bits of the key as the hash value. Note // we cannot use lambda for hasher since the concurrent_unordered_map need // hash_value_type defined in the struct. struct Higher32BitsHasher { using result_type = uint32_t; using key_type = int64_t; __host_or_device__ result_type operator()(key_type key) const { return static_cast<result_type>(key >> 32); } }; // HashReductionContext wraps the concurrent_unordered_map for hash reduction. // The key of the hash map is a 8 byte integer where the first 4 bytes are the // hash_value and second 4 bytes are the dim row index, hash value of the map is // pre-calculated during insertion because we use the same hash value for // equality check as well when collision happens. Therefore the hash function // of the hash map will be just extract the 1st item of the pair. // value_type of the hash map is just the measure value. Therefore we can use // atomic functions for most aggregate functions. // After the reduction part is done, we need to output the result in the format // of <index, aggregated_values>. Notes we don't need to output hash values. template<typename value_type, typename agg_func> class HashReductionContext { using key_type = int64_t; private: // A reasonably large enough capacity of different dimension values for // aggregation query. We also use this as the capacity for the hash map. // Note we cannot make this assumption if we are going to use this map // for join. constexpr static float load_factor = 2; agg_func f; uint32_t capacity; uint32_t length; value_type identity; public: explicit HashReductionContext(uint32_t length, value_type identity) : capacity(length * load_factor), length(length), identity(identity) { f = agg_func(); } // reduce reduces dimension value with same hash key into a single element // using corresponding aggregation function. Note the key is actually // < hash_value, index> pair but the equability check is only on // hash_value. Therefore the first index paired with the hash value will // be the final output value. template<typename map_type> void reduce(map_type *map, DimensionVector inputKeys, uint8_t *inputValues, cudaStream_t cudaStream) { DimensionHashIterator<32, thrust::counting_iterator<uint32_t>> hashIter(inputKeys.DimValues, inputKeys.NumDimsPerDimWidth, inputKeys.VectorCapacity); auto rawMapKeyIter = thrust::make_zip_iterator( thrust::make_tuple(hashIter, thrust::counting_iterator<uint32_t>(0))); auto hashIndexFusionFunc = [] __host_or_device__( typename decltype(rawMapKeyIter)::value_type tuple) { return (static_cast<int64_t>(thrust::get<0>(tuple)) << 32) | (static_cast<int64_t>(thrust::get<1>(tuple))); }; auto mapKeyIter = thrust::make_transform_iterator(rawMapKeyIter, hashIndexFusionFunc); auto mapKeyValuePairIter = thrust::make_zip_iterator( thrust::make_tuple( mapKeyIter, reinterpret_cast<value_type *>(inputValues))); auto equality = [] __host_or_device__(key_type lhs, key_type rhs) { return lhs >> 32 == rhs >> 32; }; auto insertionFunc = [=] __host_or_device__( thrust::tuple<key_type, value_type> key_value_tuple) { map->insert(tuple2pair(key_value_tuple), f, equality); }; thrust::for_each_n(GET_EXECUTION_POLICY(cudaStream), mapKeyValuePairIter, length, insertionFunc); } template<typename map_type> int output(map_type *map, DimensionVector inputKeys, DimensionVector outputKeys, uint8_t *outputValue, cudaStream_t cudaStream) { // calculate prefix sum of NumDimsPerDimWidth so that it will be easier // to tell the valueBytes given a dim index. The prefix sum is inclusive. uint8_t dimWidthPrefixSum[NUM_DIM_WIDTH]; for (int i = 0; i < NUM_DIM_WIDTH; i++) { dimWidthPrefixSum[i] = inputKeys.NumDimsPerDimWidth[i]; if (i > 0) { dimWidthPrefixSum[i] += dimWidthPrefixSum[i - 1]; } } device_unique_ptr<uint32_t> globalWriteIndexDevice = make_device_unique<uint32_t>(cudaStream); ExtractGroupByResultFunctor<map_type> extractorFunc( map, inputKeys.DimValues, inputKeys.VectorCapacity, outputKeys.DimValues, dimWidthPrefixSum, outputValue, globalWriteIndexDevice.get()); thrust::for_each_n(GET_EXECUTION_POLICY(cudaStream), thrust::counting_iterator<uint32_t>(0), capacity, extractorFunc); // copy the reduced count back from GPU. uint32_t globalWriteIndexHost; ares::asyncCopyDeviceToHost( reinterpret_cast<void *>(&globalWriteIndexHost), reinterpret_cast<void *>(globalWriteIndexDevice.get()), sizeof(uint32_t), cudaStream); ares::waitForCudaStream(cudaStream); return globalWriteIndexHost; } // execute reduces the dimension values with measures first and then extract // aggregated result. int execute(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, cudaStream_t cudaStream) { auto equality = [] __host_or_device__(key_type lhs, key_type rhs) { return (lhs >> 32) == (rhs >> 32); }; typedef ares::hash_map<key_type, value_type, Higher32BitsHasher, decltype(equality)> map_type; host_unique_ptr<map_type> mapHost = make_host_unique<map_type>(capacity, identity, 0, Higher32BitsHasher(), equality); device_unique_ptr<map_type> mapDevice = make_device_unique<map_type>(cudaStream, mapHost.get()); reduce(mapDevice.get(), inputKeys, inputValues, cudaStream); return output(mapDevice.get(), inputKeys, outputKeys, outputValues, cudaStream); } }; // hashReduceInternal reduces the dimension value using the aggregation // function into a concurrent hash map. After reduction it will extract // the dimension and measure values from hash map to corresponding // dimension vector and value vector. template<typename value_type, typename agg_func_type> int hashReduceInternal(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, int length, value_type identity, cudaStream_t cudaStream) { HashReductionContext<value_type, agg_func_type> context(length, identity); return context.execute(inputKeys, inputValues, outputKeys, outputValues, cudaStream); } // This function simply binds the measure value type and aggregate function // type. int hash_reduction(DimensionVector inputKeys, uint8_t *inputValues, DimensionVector outputKeys, uint8_t *outputValues, int valueBytes, int length, AggregateFunction aggFunc, cudaStream_t cudaStream) { int outputLength = 0; switch (aggFunc) { #define REDUCE_INTERNAL(value_type, agg_func_type) \ outputLength = hashReduceInternal< \ value_type, agg_func_type>( \ inputKeys, \ inputValues, \ outputKeys, \ outputValues, \ length, \ get_identity_value<value_type>(aggFunc), \ cudaStream); break; case AGGR_SUM_UNSIGNED: if (valueBytes == 4) { REDUCE_INTERNAL(uint32_t, sum_op < uint32_t >) } else { REDUCE_INTERNAL(uint64_t, sum_op < uint64_t >) } case AGGR_SUM_SIGNED: if (valueBytes == 4) { REDUCE_INTERNAL(int32_t, sum_op < int32_t >) } else { REDUCE_INTERNAL(int64_t, sum_op < int64_t >) } case AGGR_SUM_FLOAT: if (valueBytes == 4) { REDUCE_INTERNAL(float_t, sum_op < float_t >) } else { REDUCE_INTERNAL(double_t, sum_op < double_t >) } case AGGR_MIN_UNSIGNED:REDUCE_INTERNAL(uint32_t, min_op < uint32_t >) case AGGR_MIN_SIGNED:REDUCE_INTERNAL(int32_t, min_op < int32_t >) case AGGR_MIN_FLOAT:REDUCE_INTERNAL(float_t, min_op < float_t >) case AGGR_MAX_UNSIGNED:REDUCE_INTERNAL(uint32_t, max_op < uint32_t >) case AGGR_MAX_SIGNED:REDUCE_INTERNAL(int32_t, max_op < int32_t >) case AGGR_MAX_FLOAT:REDUCE_INTERNAL(float_t, max_op < float_t >) case AGGR_AVG_FLOAT:REDUCE_INTERNAL(uint64_t, RollingAvgFunctor) default: throw std::invalid_argument("Unsupported aggregation function type"); } return outputLength; } } // namespace ares #endif
the_stack
#include <Eigen/Dense> #include <limits> #include "cupoch/camera/pinhole_camera_intrinsic.h" #include "cupoch/geometry/image.h" #include "cupoch/geometry/laserscanbuffer.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/rgbdimage.h" #include "cupoch/utility/console.h" #include "cupoch/utility/helper.h" #include "cupoch/utility/range.h" namespace cupoch { namespace geometry { namespace { struct depth_to_pointcloud_functor { depth_to_pointcloud_functor( const uint8_t *depth, const int width, const int stride, const thrust::pair<float, float> &principal_point, const thrust::pair<float, float> &focal_length, const Eigen::Matrix4f &camera_pose) : depth_(depth), width_(width), stride_(stride), principal_point_(principal_point), focal_length_(focal_length), camera_pose_(camera_pose){}; const uint8_t *depth_; const int width_; const int stride_; const thrust::pair<float, float> principal_point_; const thrust::pair<float, float> focal_length_; const Eigen::Matrix4f camera_pose_; __device__ Eigen::Vector3f operator()(size_t idx) { int strided_width = width_ / stride_; int row = idx / strided_width * stride_; int col = idx % strided_width * stride_; const float d = *(float *)(&depth_[(row * width_ + col) * sizeof(float)]); if (d <= 0.0) { return Eigen::Vector3f::Constant( std::numeric_limits<float>::infinity()); } else { float z = d; float x = (col - principal_point_.first) * z / focal_length_.first; float y = (row - principal_point_.second) * z / focal_length_.second; Eigen::Vector4f point = camera_pose_ * Eigen::Vector4f(x, y, z, 1.0); return point.block<3, 1>(0, 0); } } }; std::shared_ptr<PointCloud> CreatePointCloudFromFloatDepthImage( const Image &depth, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic, int stride) { auto pointcloud = std::make_shared<PointCloud>(); const Eigen::Matrix4f camera_pose = extrinsic.inverse(); const auto focal_length = intrinsic.GetFocalLength(); const auto principal_point = intrinsic.GetPrincipalPoint(); const size_t depth_size = (depth.width_ / stride) * (depth.height_ / stride); pointcloud->points_.resize(depth_size); depth_to_pointcloud_functor func( thrust::raw_pointer_cast(depth.data_.data()), depth.width_, stride, principal_point, focal_length, camera_pose); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(depth_size), pointcloud->points_.begin(), func); pointcloud->RemoveNoneFinitePoints(true, true); return pointcloud; } template <typename TC, int NC> struct convert_from_rgbdimage_functor { convert_from_rgbdimage_functor( const uint8_t *depth, const uint8_t *color, int width, const Eigen::Matrix4f &camera_pose, const thrust::pair<float, float> &principal_point, const thrust::pair<float, float> &focal_length, float scale, float depth_cutoff) : depth_(depth), color_(color), width_(width), camera_pose_(camera_pose), principal_point_(principal_point), focal_length_(focal_length), scale_(scale), depth_cutoff_(depth_cutoff){}; const uint8_t *depth_; const uint8_t *color_; const int width_; const Eigen::Matrix4f camera_pose_; const thrust::pair<float, float> principal_point_; const thrust::pair<float, float> focal_length_; const float scale_; const float depth_cutoff_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t idx) const { int i = idx / width_; int j = idx % width_; float *p = (float *)(depth_ + idx * sizeof(float)); TC *pc = (TC *)(color_ + idx * NC * sizeof(TC)); if (*p > 0 && (depth_cutoff_ <= 0 || depth_cutoff_ > *p)) { float z = (float)(*p); float x = (j - principal_point_.first) * z / focal_length_.first; float y = (i - principal_point_.second) * z / focal_length_.second; Eigen::Vector4f point = camera_pose_ * Eigen::Vector4f(x, y, z, 1.0); Eigen::Vector3f points = point.block<3, 1>(0, 0); Eigen::Vector3f colors = Eigen::Vector3f(pc[0], pc[(NC - 1) / 2], pc[NC - 1]) / scale_; return thrust::make_tuple(points, colors); } else { return thrust::make_tuple( Eigen::Vector3f::Constant( std::numeric_limits<float>::infinity()), Eigen::Vector3f::Constant( std::numeric_limits<float>::infinity())); } } }; struct compute_normals_from_structured_pointcloud_functor { compute_normals_from_structured_pointcloud_functor( const Eigen::Vector3f *points, int width, int height) : points_(points), width_(width), height_(height){}; const Eigen::Vector3f *points_; const int width_; const int height_; __device__ Eigen::Vector3f operator()(size_t idx) const { int i = idx / width_; int j = idx % width_; if (i < 1 || i >= height_ || j < 1 || j >= width_) { return Eigen::Vector3f(0.0f, 0.0f, 0.0f); } Eigen::Vector3f left = *(points_ + width_ * i + j - 1); if (!Eigen::device_all(left.array().isFinite())) { left = Eigen::Vector3f::Zero(); } Eigen::Vector3f right = *(points_ + width_ * i + j + 1); if (!Eigen::device_all(right.array().isFinite())) { right = Eigen::Vector3f::Zero(); } Eigen::Vector3f upper = *(points_ + width_ * (i - 1) + j); if (!Eigen::device_all(upper.array().isFinite())) { upper = Eigen::Vector3f::Zero(); } Eigen::Vector3f lower = *(points_ + width_ * (i + 1) + j); if (!Eigen::device_all(lower.array().isFinite())) { lower = Eigen::Vector3f::Zero(); } Eigen::Vector3f hor = left - right; Eigen::Vector3f ver = upper - lower; Eigen::Vector3f normal = hor.cross(ver); float norm = normal.norm(); if (norm == 0) { return Eigen::Vector3f::Zero(); } normal /= norm; if (normal.z() > 0) normal *= -1.0f; return normal; } }; struct compute_points_from_scan_functor { compute_points_from_scan_functor(float min_range, float max_range, float min_angle, float angle_increment, int num_steps) : min_range_(min_range), max_range_(max_range), min_angle_(min_angle), angle_increment_(angle_increment), num_steps_(num_steps){}; const float min_range_; const float max_range_; const float min_angle_; const float angle_increment_; const int num_steps_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const thrust::tuple<size_t, float, Eigen::Matrix4f_u, float> &x) const { size_t idx = thrust::get<0>(x); float r = thrust::get<1>(x); Eigen::Vector3f color = Eigen::Vector3f::Constant(thrust::get<3>(x)); if (isnan(r) || r < min_range_ || max_range_ < r) { return thrust::make_tuple( Eigen::Vector3f::Constant( std::numeric_limits<float>::quiet_NaN()), color); } Eigen::Matrix4f origin = thrust::get<2>(x); int i = idx % num_steps_; float angle = min_angle_ + i * angle_increment_; Eigen::Vector4f pt = origin * Eigen::Vector4f(r * cos(angle), r * sin(angle), 0.0, 1.0); return thrust::make_tuple(pt.head<3>(), color); } }; struct compute_points_from_occvoxels_functor { compute_points_from_occvoxels_functor(float voxel_size, int resolution, const Eigen::Vector3f &origin) : voxel_size_(voxel_size), resolution_(resolution), origin_(origin){}; const float voxel_size_; const int resolution_; const Eigen::Vector3f origin_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const OccupancyVoxel &v) const { const Eigen::Vector3f pt = (v.grid_index_.cast<float>() + Eigen::Vector3f::Constant(-resolution_ / 2 + 0.5)) * voxel_size_ + origin_; return thrust::make_tuple(pt, v.color_); } }; template <typename T> struct compute_points_from_disparity { compute_points_from_disparity(const uint8_t *disp, const uint8_t *color, int width, const Eigen::Matrix4f &q, float k) : disp_(disp), color_(color), width_(width), q_(q), k_(k){}; const uint8_t *disp_; const uint8_t *color_; const int width_; const Eigen::Matrix4f q_; const float k_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( size_t idx) const { int v = idx / width_; int u = idx % width_; float disp = disp_[idx]; float r = (float)(*(T *)(color_[(idx * 3) * sizeof(T)])) / k_; float g = (float)(*(T *)(color_[(idx * 3 + 1) * sizeof(T)])) / k_; float b = (float)(*(T *)(color_[(idx * 3 + 2) * sizeof(T)])) / k_; Eigen::Vector3f point(q_(0, 0) * u + q_(0, 3), q_(1, 1) * v + q_(1, 3), q_(2, 3)); float w = q_(3, 2) * disp + q_(3, 3); point *= 1.0 / w; return thrust::make_tuple(point, Eigen::Vector3f(r, g, b)); } }; template <typename TC, int NC> std::shared_ptr<PointCloud> CreatePointCloudFromRGBDImageT( const RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic, bool project_valid_depth_only, float depth_cutoff, bool compute_normals) { auto pointcloud = std::make_shared<PointCloud>(); Eigen::Matrix4f camera_pose = extrinsic.inverse(); auto focal_length = intrinsic.GetFocalLength(); auto principal_point = intrinsic.GetPrincipalPoint(); float scale = (sizeof(TC) == 1) ? 255.0 : 1.0; int num_valid_pixels = image.depth_.height_ * image.depth_.width_; pointcloud->points_.resize(num_valid_pixels); pointcloud->colors_.resize(num_valid_pixels); convert_from_rgbdimage_functor<TC, NC> func( thrust::raw_pointer_cast(image.depth_.data_.data()), thrust::raw_pointer_cast(image.color_.data_.data()), image.depth_.width_, camera_pose, principal_point, focal_length, scale, depth_cutoff); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(num_valid_pixels), make_tuple_begin(pointcloud->points_, pointcloud->colors_), func); if (compute_normals) { pointcloud->normals_.resize(num_valid_pixels); compute_normals_from_structured_pointcloud_functor func_n( thrust::raw_pointer_cast(pointcloud->points_.data()), image.depth_.width_, image.depth_.height_); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(num_valid_pixels), pointcloud->normals_.begin(), func_n); } pointcloud->RemoveNoneFinitePoints(project_valid_depth_only, project_valid_depth_only); return pointcloud; } } // namespace std::shared_ptr<PointCloud> PointCloud::CreateFromDepthImage( const Image &depth, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic /* = Eigen::Matrix4f::Identity()*/, float depth_scale /* = 1000.0*/, float depth_trunc /* = 1000.0*/, int stride /* = 1*/) { if (depth.num_of_channels_ == 1) { if (depth.bytes_per_channel_ == 2) { auto float_depth = depth.ConvertDepthToFloatImage(depth_scale, depth_trunc); return CreatePointCloudFromFloatDepthImage(*float_depth, intrinsic, extrinsic, stride); } else if (depth.bytes_per_channel_ == 4) { return CreatePointCloudFromFloatDepthImage(depth, intrinsic, extrinsic, stride); } } utility::LogError( "[PointCloud::CreateFromDepthImage] Unsupported image format."); return std::make_shared<PointCloud>(); } std::shared_ptr<PointCloud> PointCloud::CreateFromRGBDImage( const RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic /* = Eigen::Matrix4f::Identity()*/, bool project_valid_depth_only, float depth_cutoff, bool compute_normals) { if (image.color_.bytes_per_channel_ == 1 && image.color_.num_of_channels_ == 3) { return CreatePointCloudFromRGBDImageT<uint8_t, 3>( image, intrinsic, extrinsic, project_valid_depth_only, depth_cutoff, compute_normals); } else if (image.color_.bytes_per_channel_ == 4 && image.color_.num_of_channels_ == 1) { return CreatePointCloudFromRGBDImageT<float, 1>( image, intrinsic, extrinsic, project_valid_depth_only, depth_cutoff, compute_normals); } utility::LogError( "[PointCloud::CreateFromRGBDImage] Unsupported image format."); return std::make_shared<PointCloud>(); } std::shared_ptr<PointCloud> PointCloud::CreateFromLaserScanBuffer( const LaserScanBuffer &scan, float min_range, float max_range) { auto pointcloud = std::make_shared<PointCloud>(); thrust::repeated_range< utility::device_vector<Eigen::Matrix4f_u>::const_iterator> range(scan.origins_.begin(), scan.origins_.end(), scan.num_steps_); compute_points_from_scan_functor func(min_range, max_range, scan.min_angle_, scan.GetAngleIncrement(), scan.num_steps_); pointcloud->points_.resize(scan.ranges_.size()); if (scan.HasIntensities()) { pointcloud->colors_.resize(scan.ranges_.size()); thrust::transform( enumerate_begin(scan.ranges_, range, scan.intensities_), enumerate_end(scan.ranges_, range, scan.intensities_), make_tuple_begin(pointcloud->points_, pointcloud->colors_), func); } else { thrust::transform( make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), scan.ranges_.begin(), range.begin(), thrust::make_constant_iterator<float>(0)), make_tuple_iterator( thrust::make_counting_iterator(scan.ranges_.size()), scan.ranges_.end(), range.end(), thrust::make_constant_iterator<float>(0)), make_tuple_iterator(pointcloud->points_.begin(), thrust::make_discard_iterator()), func); } pointcloud->RemoveNoneFinitePoints(true, true); return pointcloud; } std::shared_ptr<PointCloud> PointCloud::CreateFromOccupancyGrid( const OccupancyGrid &occgrid) { auto pointcloud = std::make_shared<PointCloud>(); auto occvoxels = occgrid.ExtractOccupiedVoxels(); pointcloud->points_.resize(occvoxels->size()); pointcloud->colors_.resize(occvoxels->size()); compute_points_from_occvoxels_functor func( occgrid.voxel_size_, occgrid.resolution_, occgrid.origin_); thrust::transform( occvoxels->begin(), occvoxels->end(), make_tuple_begin(pointcloud->points_, pointcloud->colors_), func); return pointcloud; } std::shared_ptr<PointCloud> PointCloud::CreateFromDisparity( const Image &disp, const Image &color, const camera::PinholeCameraIntrinsic &left_intrinsic, const camera::PinholeCameraIntrinsic &right_intrinsic, float baseline) { if (disp.num_of_channels_ == 1 && disp.bytes_per_channel_ == 1 && disp.width_ == color.width_ && disp.height_ == color.height_) { auto pointcloud = std::make_shared<PointCloud>(); const size_t n_total = disp.width_ * disp.height_; pointcloud->points_.resize(n_total); pointcloud->colors_.resize(n_total); const float tx = -baseline; Eigen::Matrix4f q = Eigen::Matrix4f::Zero(); auto focal_length = left_intrinsic.GetFocalLength(); auto principal_l = left_intrinsic.GetPrincipalPoint(); auto principal_r = right_intrinsic.GetPrincipalPoint(); q(0, 0) = focal_length.second * tx; q(0, 3) = -focal_length.second * principal_l.first * tx; q(1, 1) = focal_length.first * tx; q(1, 3) = -focal_length.first * principal_l.second * tx; q(2, 3) = focal_length.first * focal_length.second * tx; q(3, 2) = -focal_length.second; q(3, 3) = focal_length.second * (principal_l.first - principal_r.first); if (color.bytes_per_channel_ == 1) { compute_points_from_disparity<uint8_t> func( thrust::raw_pointer_cast(disp.data_.data()), thrust::raw_pointer_cast(color.data_.data()), disp.width_, q, 255.0f); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_total), make_tuple_begin(pointcloud->points_, pointcloud->colors_), func); } else if (color.bytes_per_channel_ == 2) { compute_points_from_disparity<uint16_t> func( thrust::raw_pointer_cast(disp.data_.data()), thrust::raw_pointer_cast(color.data_.data()), disp.width_, q, 65535.0f); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_total), make_tuple_begin(pointcloud->points_, pointcloud->colors_), func); } return pointcloud; } utility::LogError( "[PointCloud::CreateFromDisparity] Unsupported image format."); return std::make_shared<PointCloud>(); } } // namespace geometry } // namespace cupoch
the_stack
#include "RGBDOdometryCudaDevice.cuh" #include <Cuda/Common/ReductionCuda.h> #include <math_constants.h> namespace open3d { namespace cuda { template<size_t N> __global__ void DoSingleIterationKernel(RGBDOdometryCudaDevice<N> odometry, size_t level) { /** Add more memory blocks if we have **/ /** TODO: check this version vs 1 __shared__ array version **/ __shared__ float local_sum0[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum1[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum2[THREAD_2D_UNIT * THREAD_2D_UNIT]; const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int tid = threadIdx.x + threadIdx.y * blockDim.x; /** Proper initialization **/ local_sum0[tid] = 0; local_sum1[tid] = 0; local_sum2[tid] = 0; if (x >= odometry.source_intensity_[level].width_ || y >= odometry.source_intensity_[level].height_) return; int x_target = -1, y_target = -1; float d_target; Vector3f X_source_on_target; bool mask = odometry.ComputePixelwiseCorrespondence( x, y, level, x_target, y_target, X_source_on_target, d_target); float residual_I, residual_D; Vector6f jacobian_I, jacobian_D, Jtr; HessianCuda<6> JtJ; mask = mask && odometry.ComputePixelwiseJacobianAndResidual( x, y, x_target, y_target, level, X_source_on_target, d_target, jacobian_I, jacobian_D, residual_I, residual_D); if (mask) { odometry.correspondences_.push_back(Vector4i(x, y, x_target, y_target)); ComputeJtJAndJtr(jacobian_I, jacobian_D, residual_I, residual_D, JtJ, Jtr); /* printf("- (%d, %d), (%d, %d) -> " */ /* "(%f %f %f %f %f %f) - %f " */ /* "(%f %f %f %f %f %f) - %f\n", */ /* x, y, x_target, y_target, */ /* jacobian_D(0), jacobian_D(1), jacobian_D(2), */ /* jacobian_D(3), jacobian_D(4), jacobian_D(5), residual_D, */ /* jacobian_I(0), jacobian_I(1), jacobian_I(2), */ /* jacobian_I(3), jacobian_I(4), jacobian_I(5), residual_I); */ } /** Reduce Sum JtJ -> 2ms **/ for (size_t i = 0; i < 21; i += 3) { local_sum0[tid] = mask ? JtJ(i + 0) : 0; local_sum1[tid] = mask ? JtJ(i + 1) : 0; local_sum2[tid] = mask ? JtJ(i + 2) : 0; __syncthreads(); BlockReduceSum<float, THREAD_2D_UNIT * THREAD_2D_UNIT>(tid, local_sum0, local_sum1, local_sum2); if (tid == 0) { atomicAdd(&odometry.results_.at(i + 0), local_sum0[0]); atomicAdd(&odometry.results_.at(i + 1), local_sum1[0]); atomicAdd(&odometry.results_.at(i + 2), local_sum2[0]); } __syncthreads(); } /** Reduce Sum Jtr **/ const int OFFSET1 = 21; for (size_t i = 0; i < 6; i += 3) { local_sum0[tid] = mask ? Jtr(i + 0) : 0; local_sum1[tid] = mask ? Jtr(i + 1) : 0; local_sum2[tid] = mask ? Jtr(i + 2) : 0; __syncthreads(); BlockReduceSum<float, THREAD_2D_UNIT * THREAD_2D_UNIT>(tid, local_sum0, local_sum1, local_sum2); if (tid == 0) { atomicAdd(&odometry.results_.at(i + 0 + OFFSET1), local_sum0[0]); atomicAdd(&odometry.results_.at(i + 1 + OFFSET1), local_sum1[0]); atomicAdd(&odometry.results_.at(i + 2 + OFFSET1), local_sum2[0]); } __syncthreads(); } /** Reduce Sum loss and inlier **/ const int OFFSET2 = 27; { local_sum0[tid] = mask ? residual_I * residual_I + residual_D * residual_D : 0; local_sum1[tid] = mask ? 1 : 0; __syncthreads(); BlockReduceSum<float, THREAD_2D_UNIT * THREAD_2D_UNIT>(tid, local_sum0, local_sum1); if (tid == 0) { atomicAdd(&odometry.results_.at(0 + OFFSET2), local_sum0[0]); atomicAdd(&odometry.results_.at(1 + OFFSET2), local_sum1[0]); } __syncthreads(); } } template<size_t N> void RGBDOdometryCudaKernelCaller<N>::DoSingleIteration( RGBDOdometryCuda<N> &odometry, size_t level) { const dim3 blocks( DIV_CEILING(odometry.source_intensity_[level].width_, THREAD_2D_UNIT), DIV_CEILING(odometry.source_intensity_[level].height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); DoSingleIterationKernel << < blocks, threads >> > ( *odometry.device_, level); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } template<size_t N> __global__ void ComputeInformationMatrixKernel(RGBDOdometryCudaDevice<N> odometry) { /** Add more memory blocks if we have **/ /** TODO: check this version vs 1 __shared__ array version **/ __shared__ float local_sum0[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum1[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum2[THREAD_2D_UNIT * THREAD_2D_UNIT]; const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int tid = threadIdx.x + threadIdx.y * blockDim.x; /** Proper initialization **/ local_sum0[tid] = 0; local_sum1[tid] = 0; local_sum2[tid] = 0; if (x >= odometry.source_intensity_[0].width_ || y >= odometry.source_intensity_[0].height_) return; Vector6f jacobian_x, jacobian_y, jacobian_z; HessianCuda<6> JtJ; bool mask = odometry.ComputePixelwiseCorrespondenceAndInformationJacobian( x, y, jacobian_x, jacobian_y, jacobian_z); if (mask) { ComputeJtJ(jacobian_x, jacobian_y, jacobian_z, JtJ); } /** Reduce Sum JtJ -> 2ms **/ for (size_t i = 0; i < 21; i += 3) { local_sum0[tid] = mask ? JtJ(i + 0) : 0; local_sum1[tid] = mask ? JtJ(i + 1) : 0; local_sum2[tid] = mask ? JtJ(i + 2) : 0; __syncthreads(); BlockReduceSum<float, THREAD_2D_UNIT * THREAD_2D_UNIT>(tid, local_sum0, local_sum1, local_sum2); if (tid == 0) { atomicAdd(&odometry.results_.at(i + 0), local_sum0[0]); atomicAdd(&odometry.results_.at(i + 1), local_sum1[0]); atomicAdd(&odometry.results_.at(i + 2), local_sum2[0]); } __syncthreads(); } } template<size_t N> void RGBDOdometryCudaKernelCaller<N>::ComputeInformationMatrix( RGBDOdometryCuda<N> &odometry) { const dim3 blocks( DIV_CEILING(odometry.source_intensity_[0].width_, THREAD_2D_UNIT), DIV_CEILING(odometry.source_intensity_[0].height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); ComputeInformationMatrixKernel << < blocks, threads >> > (*odometry.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } template<size_t N> __global__ void PreprocessInputKernel(RGBDOdometryCudaDevice<N> odometry, ImageCudaDevicef source_depth_preprocessed, ImageCudaDevicef source_intensity_preprocessed, ImageCudaDevicef target_depth_preprocessed, ImageCudaDevicef target_intensity_preprocessed) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= odometry.source_input_.depth_raw_.width_ || y >= odometry.source_input_.depth_raw_.height_) return; float &depth_src = odometry.source_input_.depth_.at(x, y)(0); source_depth_preprocessed.at(x, y, 0) = odometry.IsValidDepth(depth_src) ? depth_src : CUDART_NAN_F; auto rgb_src = odometry.source_input_.color_raw_.at(x, y); source_intensity_preprocessed.at(x, y, 0) = (0.2990f * rgb_src(0) + 0.5870f * rgb_src(1) + 0.1140f * rgb_src(2)) / 255.0f; float &depth_tgt = odometry.target_input_.depth_.at(x, y)(0); target_depth_preprocessed.at(x, y, 0) = odometry.IsValidDepth(depth_tgt) ? depth_tgt : CUDART_NAN_F; auto rgb_tgt = odometry.target_input_.color_raw_.at(x, y); target_intensity_preprocessed.at(x, y, 0) = (0.2990f * rgb_tgt(0) + 0.5870f * rgb_tgt(1) + 0.1140f * rgb_tgt(2)) / 255.0f; } template<size_t N> void RGBDOdometryCudaKernelCaller<N>::PreprocessInput( RGBDOdometryCuda<N> &odometry, ImageCudaf &source_depth_preprocessed, ImageCudaf &source_intensity_preprocessed, ImageCudaf &target_depth_preprocessed, ImageCudaf &target_intensity_preprocessed) { const dim3 blocks( DIV_CEILING(odometry.source_input_.depth_raw_.width_, THREAD_2D_UNIT), DIV_CEILING(odometry.target_input_.depth_raw_.height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); PreprocessInputKernel << < blocks, threads >> > (*odometry.device_, *source_depth_preprocessed.device_, *source_intensity_preprocessed.device_, *target_depth_preprocessed.device_, *target_intensity_preprocessed.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } //!TODO(Akash): Almost identical to previous (optimize) template<size_t N> __global__ void PreprocessInputKernel(RGBDOdometryCudaDevice<N> odometry, ImageCudaDevicef source_depth_preprocessed, ImageCudaDevicef source_intensity_preprocessed, ImageCudaDevicef target_intensity_preprocessed) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= odometry.source_input_.depth_raw_.width_ || y >= odometry.source_input_.depth_raw_.height_) return; float &depth_src = odometry.source_input_.depth_.at(x, y)(0); source_depth_preprocessed.at(x, y, 0) = odometry.IsValidDepth(depth_src) ? depth_src : CUDART_NAN_F; auto rgb_src = odometry.source_input_.color_raw_.at(x, y); source_intensity_preprocessed.at(x, y, 0) = (0.2990f * rgb_src(0) + 0.5870f * rgb_src(1) + 0.1140f * rgb_src(2)) / 255.0f; auto rgb_tgt = odometry.target_input_color_.at(x, y); target_intensity_preprocessed.at(x, y, 0) = (0.2990f * rgb_tgt(0) + 0.5870f * rgb_tgt(1) + 0.1140f * rgb_tgt(2)) / 255.0f; } template<size_t N> void RGBDOdometryCudaKernelCaller<N>::PreprocessInput( RGBDOdometryCuda<N> &odometry, ImageCudaf &source_depth_preprocessed, ImageCudaf &source_intensity_preprocessed, ImageCudaf &target_intensity_preprocessed) { const dim3 blocks( DIV_CEILING(odometry.source_input_.depth_raw_.width_, THREAD_2D_UNIT), DIV_CEILING(odometry.source_input_.depth_raw_.height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); PreprocessInputKernel << < blocks, threads >> > (*odometry.device_, *source_depth_preprocessed.device_, *source_intensity_preprocessed.device_, *target_intensity_preprocessed.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } template<size_t N> __global__ void ComputeInitCorrespondenceMeanKernel( RGBDOdometryCudaDevice<N> odometry, ArrayCudaDevice<float> means) { __shared__ float local_sum0[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum1[THREAD_2D_UNIT * THREAD_2D_UNIT]; __shared__ float local_sum2[THREAD_2D_UNIT * THREAD_2D_UNIT]; const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int tid = threadIdx.x + threadIdx.y * blockDim.x; /** Proper initialization **/ local_sum0[tid] = 0; local_sum1[tid] = 0; local_sum2[tid] = 0; if (x >= odometry.source_intensity_[0].width_ || y >= odometry.source_intensity_[0].height_) return; int x_target = -1, y_target = -1; float d_target; Vector3f X_source_on_target; bool mask = odometry.ComputePixelwiseCorrespondence( x, y, 0, x_target, y_target, X_source_on_target, d_target); if (mask) { odometry.correspondences_.push_back(Vector4i(x, y, x_target, y_target)); } local_sum0[tid] = mask ? odometry.source_intensity_[0](x, y)(0) : 0; local_sum1[tid] = mask ? odometry.target_intensity_[0](x_target, y_target)(0) : 0; local_sum2[tid] = mask ? 1 : 0; __syncthreads(); BlockReduceSum<float, THREAD_2D_UNIT * THREAD_2D_UNIT>(tid, local_sum0, local_sum1, local_sum2); if (tid == 0) { atomicAdd(&means[0], local_sum0[0]); atomicAdd(&means[1], local_sum1[0]); atomicAdd(&means[2], local_sum2[0]); } __syncthreads(); } template<size_t N> __global__ void NormalizeIntensityKernel(RGBDOdometryCudaDevice<N> odometry, ArrayCudaDevice<float> means) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= odometry.source_intensity_[0].width_ || y >= odometry.source_intensity_[0].height_) return; float &intensity_source = odometry.source_intensity_[0].at(x, y)(0); intensity_source *= 0.5f * (means[2] / means[0]); float &intensity_target = odometry.target_intensity_[0].at(x, y)(0); intensity_target *= 0.5f * (means[2] / means[1]); } template<size_t N> void RGBDOdometryCudaKernelCaller<N>::NormalizeIntensity( RGBDOdometryCuda<N> &odometry) { ArrayCuda<float> means; means.Create(3); means.Memset(0); const dim3 blocks( DIV_CEILING(odometry.source_intensity_[0].width_, THREAD_2D_UNIT), DIV_CEILING(odometry.source_intensity_[0].height_, THREAD_2D_UNIT)); const dim3 threads(THREAD_2D_UNIT, THREAD_2D_UNIT); ComputeInitCorrespondenceMeanKernel << < blocks, threads >> > ( *odometry.device_, *means.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); auto means_intensity = means.DownloadAll(); NormalizeIntensityKernel << < blocks, threads >> > ( *odometry.device_, *means.device_); CheckCuda(cudaDeviceSynchronize()); CheckCuda(cudaGetLastError()); } } // cuda } // open3d
the_stack
#include <logger.h> #include <amgx_types/util.h> using namespace std; namespace amgx { namespace block_jacobi_solver { template <typename ValueTypeA, typename ValueTypeB> struct jacobi_presmooth_functor { double omega; jacobi_presmooth_functor( double omega ) : omega( omega ) {} __host__ __device__ ValueTypeB operator()( const ValueTypeB &b, const ValueTypeA &d ) const { return isNotCloseToZero(d) ? b * omega / d : b * omega / epsilon(d); } }; template <typename ValueTypeA, typename ValueTypeB> struct jacobi_postsmooth_functor { double omega; jacobi_postsmooth_functor( double omega ) : omega( omega ) {} template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const { ValueTypeB x = thrust::get<0>(t); ValueTypeA d = thrust::get<1>(t); ValueTypeB b = thrust::get<2>(t); ValueTypeB y = thrust::get<3>(t); // return x + omega * (b - y) / d. d = isNotCloseToZero(d) ? d : epsilon(d); d = types::util<ValueTypeA>::get_one() / d; b = b - y; b = b * omega; return b * d + x; } }; template <typename ValueTypeB> struct add_functor { __host__ __device__ ValueTypeB operator()( const ValueTypeB &x, const ValueTypeB &y )const { return x + y; } }; template<typename T> __device__ __forceinline__ T fmnaOp (T a, T b, T c) { return -(a * b) + c; } template<typename T> __device__ __forceinline__ T mulOp (T a, T b) { return a * b; } template<typename T> __device__ __forceinline__ T rcpOp (T a) { return 1.0 / (isNotCloseToZero(a) ? a : epsilon(a)); } template<typename T> __device__ __forceinline__ T absOp (T a) { return fabs(a); } // ----------------------------------- // KERNELS // ----------------------------------- template<typename T1, typename T2, int N> __global__ void matinv_matrix_per_thread_pivot (const T1 *A, T2 *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; int perm0, perm1, perm2, perm3; int icol0, icol1, icol2, icol3; T2 AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T2 AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T2 p, t; int i, pvt; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; perm0 = 0; perm1 = 1; perm2 = 2; perm3 = 3; /****************** iteration 0 ***********/ /* search pivot row */ p = absOp (AA00); pvt = 0; t = absOp (AA10); if (t > p) { p = t; pvt = 1; } t = absOp (AA20); if (t > p) { p = t; pvt = 2; } t = absOp (AA30); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 0 */ if (pvt == 1) { t = AA00; AA00 = AA10; AA10 = t; t = AA01; AA01 = AA11; AA11 = t; t = AA02; AA02 = AA12; AA12 = t; t = AA03; AA03 = AA13; AA13 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm1; perm1 = i; } if (pvt == 2) { t = AA00; AA00 = AA20; AA20 = t; t = AA01; AA01 = AA21; AA21 = t; t = AA02; AA02 = AA22; AA22 = t; t = AA03; AA03 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm2; perm2 = i; } if (pvt == 3) { t = AA00; AA00 = AA30; AA30 = t; t = AA01; AA01 = AA31; AA31 = t; t = AA02; AA02 = AA32; AA32 = t; t = AA03; AA03 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA00); icol0 = perm0; AA00 = t; AA01 = mulOp (t, AA01); AA02 = mulOp (t, AA02); AA03 = mulOp (t, AA03); /* eliminate above and below current row */ t = AA10; AA10 = mulOp (-t, AA00); AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = mulOp (-t, AA00); AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = mulOp (-t, AA00); AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* search pivot row */ p = absOp (AA11); pvt = 1; t = absOp (AA21); if (t > p) { p = t; pvt = 2; } t = absOp (AA31); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 1 */ if (pvt == 2) { t = AA10; AA10 = AA20; AA20 = t; t = AA11; AA11 = AA21; AA21 = t; t = AA12; AA12 = AA22; AA22 = t; t = AA13; AA13 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm2; perm2 = i; } else if (pvt == 3) { t = AA10; AA10 = AA30; AA30 = t; t = AA11; AA11 = AA31; AA31 = t; t = AA12; AA12 = AA32; AA32 = t; t = AA13; AA13 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA11); icol1 = perm1; AA10 = mulOp (t, AA10); AA11 = t; AA12 = mulOp (t, AA12); AA13 = mulOp (t, AA13); /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = mulOp (-t, AA11); AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = mulOp (-t, AA11); AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = mulOp (-t, AA11); AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* search pivot row */ p = absOp (AA22); pvt = 2; t = absOp (AA32); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 2 */ if (pvt == 3) { t = AA20; AA20 = AA30; AA30 = t; t = AA21; AA21 = AA31; AA31 = t; t = AA22; AA22 = AA32; AA32 = t; t = AA23; AA23 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm2; perm2 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA22); icol2 = perm2; AA20 = mulOp (t, AA20); AA21 = mulOp (t, AA21); AA22 = t; AA23 = mulOp (t, AA23); /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = mulOp (-t, AA22); AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = mulOp (-t, AA22); AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = mulOp (-t, AA22); AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = rcpOp (AA33); icol3 = perm3; AA30 = mulOp (t, AA30); AA31 = mulOp (t, AA31); AA32 = mulOp (t, AA32); AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = mulOp (-t, AA33); t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = mulOp (-t, AA33); t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = mulOp (-t, AA33); /* sort columns into the correct order */ Ainv(0, icol0) = AA00; Ainv(1, icol0) = AA10; Ainv(2, icol0) = AA20; Ainv(3, icol0) = AA30; Ainv(0, icol1) = AA01; Ainv(1, icol1) = AA11; Ainv(2, icol1) = AA21; Ainv(3, icol1) = AA31; Ainv(0, icol2) = AA02; Ainv(1, icol2) = AA12; Ainv(2, icol2) = AA22; Ainv(3, icol2) = AA32; Ainv(0, icol3) = AA03; Ainv(1, icol3) = AA13; Ainv(2, icol3) = AA23; Ainv(3, icol3) = AA33; } } template<typename T, int N> __global__ void matinv_matrix_per_thread_no_pivot (const T *A, T *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; T AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T t; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; /****************** iteration 0 ***********/ /* search pivot row */ t = 1.0 / (AA00); AA00 = t; AA01 = t * AA01; AA02 = t * AA02; AA03 = t * AA03; /* eliminate above and below current row */ t = AA10; AA10 = -t * AA00; AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = -t * AA00; AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = -t * AA00; AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* scale current row */ t = 1.0 / (AA11); AA10 = t * AA10; AA11 = t; AA12 = t * AA12; AA13 = t * AA13; /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = -t * AA11; AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = -t * AA11; AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = -t * AA11; AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* scale current row */ t = 1.0 / (AA22); AA20 = t * AA20; AA21 = t * AA21; AA22 = t; AA23 = t * AA23; /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = -t * AA22; AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = -t * AA22; AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = -t * AA22; AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = 1.0 / (AA33); AA30 = t * AA30; AA31 = t * AA31; AA32 = t * AA32; AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = -t * AA33; t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = -t * AA33; t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = -t * AA33; /* sort columns into the correct order */ Ainv(0, 0) = AA00; Ainv(1, 0) = AA10; Ainv(2, 0) = AA20; Ainv(3, 0) = AA30; Ainv(0, 1) = AA01; Ainv(1, 1) = AA11; Ainv(2, 1) = AA21; Ainv(3, 1) = AA31; Ainv(0, 2) = AA02; Ainv(1, 2) = AA12; Ainv(2, 2) = AA22; Ainv(3, 2) = AA32; Ainv(0, 3) = AA03; Ainv(1, 3) = AA13; Ainv(2, 3) = AA23; Ainv(3, 3) = AA33; } } template<typename IndexType, typename ValueTypeA, int threads_per_block, int halfwarps_per_block> __global__ void setupBlockJacobiSmoothbBigBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows, int bsize, int bsize_sq, ValueTypeA *temp1) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> 4; const int block_halfwarp_id = threadIdx.x >> 4; const int mat_entry_index = threadIdx.x & (16 - 1); const int i_ind = mat_entry_index >> 2; const int j_ind = mat_entry_index & 3; extern __shared__ volatile char schar[]; volatile ValueTypeA *s_Amat; s_Amat = (ValueTypeA *)&schar[0]; int tile_num = (bsize - 1) / 4 + 1; ValueTypeA *e_out = &temp1[(blockIdx.x * blockDim.x + threadIdx.x) * tile_num * tile_num]; while (halfwarp_id < num_block_rows) { int offset = halfwarp_id * bsize_sq + i_ind * bsize + j_ind; int s_offset = block_halfwarp_id * bsize_sq; // Store the diagonal for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { e_out[t1 * tile_num + t2] = values[bsize_sq * dia_indices[halfwarp_id] + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind]; } // Each thread stores its entry in s_Amat for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { types::util<ValueTypeA>::volcast( e_out[t1 * tile_num + t2], s_Amat + (s_offset + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind) ); } compute_block_inverse2<IndexType, ValueTypeA, halfwarps_per_block> ( s_Amat, s_offset, offset, i_ind, j_ind, Dinv, tile_num, bsize, bsize_sq ); halfwarp_id += gridDim.x * halfwarps_per_block; } } template<typename IndexType, typename ValueTypeA, int blockrows_per_cta, int blockrows_per_warp, int bsize, int bsize_sq> __global__ void setupBlockJacobiSmoothBbyBBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows) { int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x & 31; // padding row blocks to fit in a single warp if ( warp_thread_id >= blockrows_per_warp * bsize_sq ) { return; } // new thread id with padding int tid = warp_id * blockrows_per_warp * bsize_sq + warp_thread_id; int cta_blockrow_id = tid / bsize_sq; int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id; const int mat_entry_index = tid - cta_blockrow_id * bsize_sq; const int i_ind = mat_entry_index / bsize; const int j_ind = mat_entry_index - i_ind * bsize; volatile __shared__ ValueTypeA s_Amat[bsize_sq * blockrows_per_cta ]; ValueTypeA e_out; while (blockrow_id < num_block_rows) { int offset = blockrow_id * bsize_sq + mat_entry_index; // Store the diagonal e_out = values[bsize_sq * dia_indices[blockrow_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + tid); compute_block_inverse_row_major<IndexType, ValueTypeA, blockrows_per_cta, bsize, bsize_sq> ( s_Amat, cta_blockrow_id * bsize_sq, offset, i_ind, j_ind, Dinv ); blockrow_id += gridDim.x * blockrows_per_cta; } } template<typename ValueTypeA, typename ValueTypeB, typename IndexType, int threads_per_block, int halfwarps_per_block, int bsize, int log_bsize, int bsize_sq, int log_bsize_sq> __global__ void setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2(const IndexType *dia_indices, const ValueTypeA *A_values, ValueTypeA *Dinv, const int num_block_rows) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> log_bsize_sq; const int block_halfwarp_id = threadIdx.x >> log_bsize_sq; const int mat_entry_index = threadIdx.x & (bsize_sq - 1); const int i_ind = mat_entry_index >> log_bsize; const int j_ind = mat_entry_index & (bsize - 1); volatile __shared__ ValueTypeA s_Amat[bsize_sq * halfwarps_per_block ]; int offset; ValueTypeA e_out; while (halfwarp_id < num_block_rows) { // Store the diagonal offset = halfwarp_id * bsize_sq; e_out = A_values[bsize_sq * dia_indices[halfwarp_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + threadIdx.x); compute_block_inverse_row_major<int, ValueTypeA, halfwarps_per_block, bsize, bsize_sq> ( s_Amat, block_halfwarp_id * bsize_sq, offset + mat_entry_index, i_ind, j_ind, Dinv ); halfwarp_id += gridDim.x * blockDim.x >> log_bsize_sq; } } // Kernel to smooth with Jacobi smoother, Dinv assumed to be computed template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq> __global__ void jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv, const ValueTypeB *b, const ValueTypeB *x, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int eighthwarp_id = row_offset + (tid >> log_bsize); const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx, xin; ValueTypeB temp[bsize]; int offset, i, s_offset; while (eighthwarp_id < num_block_rows) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; // 1. COMPUTING b-Ax bmAx = b[offset]; // Contribution from diagonal xin = x[offset]; types::util<ValueTypeB>::volcast(xin, s_xtemp + threadIdx.x); // Load dia_values and do matrix multiply loadAsVector<bsize>(nonzero_values + bsize_sq * dia_indices[i] + vec_entry_index * bsize, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } // Contribution from each nonzero column int jmax = row_offsets[i + 1]; for (int jind = row_offsets[i]; jind < jmax; jind++) { IndexType jcol = __cachingLoad(&column_indices[jind]); if (jcol != i) { offset = jcol * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(x[offset], s_xtemp + threadIdx.x); // Load nonzero_values s_offset = block_eighthwarp_id * bsize; offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(nonzero_values + offset, temp); #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } } } types::util<ValueTypeB>::volcast(bmAx, s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // 2. Multiply by Dinv // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = xin + bmAx * weight; eighthwarp_id += gridDim.x * blockDim.x >> log_bsize; } } // Kernel to smooth with jacobi smoother, zero initial guess template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize> __global__ void jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2(const ValueTypeA *Dinv, const ValueTypeB *b, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx; ValueTypeB temp[bsize]; int offset, i, s_offset; for (int eighthwarp_id = row_offset + (tid >> log_bsize); eighthwarp_id < num_block_rows; eighthwarp_id += (gridDim.x * blockDim.x >> log_bsize)) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(b[offset], s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = bmAx * weight; } } //-------------------------------- // Methods //-------------------------------- // Constructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::BlockJacobiSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope) { weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope); if (weight == 0) { weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n"); } } // Destructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::~BlockJacobiSolver_Base() { this->Dinv.resize(0); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor= " << this->weight << std::endl; } // Solver setup template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!A_as_matrix) { FatalError("BlockJacobiSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } computeDinv( *A_as_matrix ); if ( A_as_matrix->getBlockFormat() != ROW_MAJOR ) { FatalError("Block Jacobi solver only supports row major format", AMGX_ERR_CONFIGURATION); } } // template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool BlockJacobiSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { //bool done = false; Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; if (xIsZero) { x.dirtybit = 0; } ViewType oldView = A_as_matrix->currentView(); A_as_matrix->setViewExterior(); ViewType flags = (ViewType)(A_as_matrix->getViewInterior() | A_as_matrix->getViewExterior()); if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1) { if (xIsZero) { smooth_with_0_initial_guess_1x1(*A_as_matrix, b, x, flags); } else { smooth_1x1(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (xIsZero) { smooth_with_0_initial_guess_4x4(*A_as_matrix, b, x, flags); } else { smooth_4x4(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == A_as_matrix->get_block_dimy()) { if (xIsZero) { thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); } smooth_BxB(*A_as_matrix, b, x, true, flags); } else { FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (!xIsZero) // we write to t_res vector to avoid race condition in the kernel { x.swap(this->t_res); } } x.dirtybit = 1; A_as_matrix->setView(oldView); return this->converged( b, x ); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void BlockJacobiSolver_Base<T_Config>::computeDinv( Matrix<T_Config> &A) { Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; ViewType oldView = A.currentView(); A.setView(A_as_matrix->getViewExterior()); if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1) { this->computeDinv_1x1(A); } else if (A.get_block_dimx() == 2 && A.get_block_dimy() == 2) { this->computeDinv_bxb<2>(A); } else if (A.get_block_dimx() == 3 && A.get_block_dimy() == 3) { this->computeDinv_3x3(A); } else if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { this->computeDinv_4x4(A); } else if (A.get_block_dimx() == 5 && A.get_block_dimy() == 5) { this->computeDinv_bxb<5>(A); } else if (A.get_block_dimx() == A.get_block_dimy() && A.get_block_dimy() > 5) { this->computeDinv_Big(A, A.get_block_dimx()); } A.setView(oldView); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_d &A) { Matrix_d *A_as_matrix = (Matrix_d *) this->m_A; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); if ( A_as_matrix->hasProps(DIAG) ) { const int num_values = A_as_matrix->diagOffset() * A_as_matrix->get_block_size(); thrust::copy( A_as_matrix->values.begin() + num_values, A_as_matrix->values.begin() + num_values + A_as_matrix->get_num_rows()*A_as_matrix->get_block_size(), this->Dinv.begin() ); cudaCheckError(); } else { find_diag( *A_as_matrix ); } } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_h &A) { // Do nothing } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_h &A) { //FatalError("Block Jacobi smoother not implemented with this format, exiting"); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_h &A) { FatalError("3*3 Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_h &A, const int bsize) { FatalError("Big Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_h &A ) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { if (A.col_indices[j] == i) { this->Dinv[i] = A.values[j]; break; } if (j == A.row_offsets[i + 1] - 1) { FatalError("Could not find a diagonal value", AMGX_ERR_BAD_PARAMETERS); } } } } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_d &A ) { AMGX_CPU_PROFILER( "JacobiSolver::find_diag " ); const size_t THREADS_PER_BLOCK = 128; const size_t NUM_BLOCKS = min(AMGX_GRID_MAX_SIZE, (int)ceil((float)(A.get_num_rows()) / (float)(THREADS_PER_BLOCK))); find_diag_kernel_indexed_dia <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>( A.get_num_rows(), A.diag.raw(), A.values.raw(), this->Dinv.raw()); cudaCheckError(); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_d &A) { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_indices_ptr = A.diag.raw(); const ValueTypeA *A_values = A.values.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); #if 1 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4>, cudaFuncCachePreferL1); setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4> <<< num_blocks, threads_per_block>>> (A_dia_indices_ptr, A_values, Dinv_ptr, A.get_num_rows()); cudaCheckError(); #else cudaFuncSetCacheConfig(matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4>, cudaFuncCachePreferL1); matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4> <<< (A.num_block_rows + 127) / 128, 128 >>> (A_dia_values_ptr, Dinv_ptr, A.num_block_rows); cudaCheckError(); #endif } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_d &A) { const int bsize = 3; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); // blockrows per cta = blockrows_per_warp * number_of_warps_per_cta const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp ; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, cudaFuncCachePreferL1); setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_d &A, const int bsize) { //both DIAG supported this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); MVector temp(AMGX_GRID_MAX_SIZE * ((bsize - 1) / 4 + 1) * ((bsize - 1) / 4 + 1)); ValueTypeA *temp_ptr = temp.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block>, cudaFuncCachePreferL1); setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block> <<< num_blocks, threads_per_block, sizeof(ValueTypeA)*bsize *bsize *halfwarps_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows(), bsize, bsize * bsize, temp_ptr); cudaCheckError(); } template<class T_Config> template<int bsize> void BlockJacobiSolver_Base<T_Config>::computeDinv_bxb(const Matrix<T_Config> &A) { if (TConfig::memSpace == AMGX_host) { FatalError("BlockJacobiSmooth Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp; const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, cudaFuncCachePreferL1); setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_h &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { FatalError("M*M Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); int bsize = A.get_block_dimx(); // Allocate space for block_matrix ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *bmAx = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Compute b - sum_j A_j x_j (denoted by bmAx) for block_row i // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } bmAx[m] = types::util<ValueTypeB>::get_zero(); temp[m] = types::util<ValueTypeB>::get_zero(); } // Contribution from diagonal for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - E[m][n] * x[i * bsize + n]; } // Contribution from each nonzero column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { IndexType jcol = A.col_indices[j]; for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - A.values[j * bsize * bsize + bsize * m + n] * x[jcol * bsize + n]; } } // Add b for (int m = 0; m < bsize; m++) { bmAx[m] = bmAx[m] + b[i * bsize + m]; } gaussianEliminationRowMajor(E, temp, bmAx, bsize); // Compute new value of x for (int m = 0; m < bsize; m++) { newx[i * bsize + m] = x[i * bsize + m] + temp[m] * this->weight; } } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { IndexType bsize = A.get_block_dimy(); ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *rhs = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } rhs[m] = types::util<ValueTypeB>::get_zero(); } //rhs for (int m = 0; m < bsize; m++) { rhs[m] = rhs[m] + b[i * bsize + m]; } // Solve for temp gaussianEliminationRowMajor(E, temp, rhs, bsize); for (int m = 0; m < bsize; m++) { x[i * bsize + m] = temp[m] * this->weight; } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_const(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB Axi = types::util<ValueTypeB>::get_zero(); ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiaginv = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d) ); //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { Axi = Axi + A.values[j] * x[A.col_indices[j]]; } newx[i] = x[i] + (b[i] - Axi) * mydiaginv ; } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_h &A, VVector &b, VVector &x, ViewType separation_flags) { this->smooth_1x1_const(A, b, x, separation_flags); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_1x1 " ); if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); this->y.dirtybit = 0; multiply( A, x, this->y, separation_flags ); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( x.begin() + offset, this->Dinv.begin() + offset, b.begin() + offset, this->y.begin() + offset)), thrust::make_zip_iterator(thrust::make_tuple( x.begin() + A.get_num_rows(), this->Dinv.begin() + A.get_num_rows(), b.begin() + A.get_num_rows(), this->y.begin() + A.get_num_rows())), x.begin() + offset, jacobi_postsmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiag = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d)); x[i] = b[i] * mydiag; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_with_0_initial_guess_1x1 " ); int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); thrust::transform( b.begin( ) + offset, b.begin( ) + A.get_num_rows(), this->Dinv.begin( ) + offset, x.begin( ) + offset, jacobi_presmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_d &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { IndexType num_rows; IndexType offset; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); // aux vector initialization if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } thrust::copy(b.begin(), b.end(), this->y.begin()); // copy of vector b cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_minus_one(), A, x, types::util<ValueTypeB>::get_one(), this->y, separation_flags); // y= -1.0f*(A.x) + y cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_one() * this->weight, A, this->Dinv, this->y, types::util<ValueTypeB>::get_one(), x, separation_flags); // t_res = t_res + w*(Dinv.y) @ view cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); ValueTypeB *x_ptr = x.raw(); ValueTypeB *xout_ptr = this->t_res.raw(); // always store original x IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); cudaFuncSetCacheConfig(jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16>, cudaFuncCachePreferL1); jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr, b_ptr, x_ptr, this->weight, offset + num_rows, xout_ptr, offset); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { cudaCheckError(); const ValueTypeA *A_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); ValueTypeB *x_ptr = x.raw(); IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); cudaFuncSetCacheConfig(jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>, cudaFuncCachePreferL1); jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2> <<< num_blocks, threads_per_block>>> (Dinv_ptr, b_ptr, this->weight, offset + num_rows, x_ptr, offset); cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace block_jacobi } // namespace amgx
the_stack
#include <kat/on_device/common.cuh> ///@cond #include <kat/detail/execution_space_specifiers.hpp> ///@endcond #include <cassert> namespace kat { /** * A richer (kind-of-a-)wrapper for CUDA's `dim3` class, used * to specify dimensions for blocks and grid (up to 3 dimensions). * * @note same as `cuda::dimensions_t` from the cuda-api-wrappers library... * * @todo consider templating this on the number of dimensions. */ struct dimensions_t // this almost-inherits dim3 { grid_dimension_t x, y, z; constexpr KAT_FHD dimensions_t(unsigned x_ = 1, unsigned y_ = 1, unsigned z_ = 1) noexcept : x(x_), y(y_), z(z_) {} constexpr KAT_FHD dimensions_t(uint3 v) noexcept : dimensions_t(v.x, v.y, v.z) { } constexpr KAT_FHD dimensions_t(dim3 dims) noexcept : dimensions_t(dims.x, dims.y, dims.z) { } constexpr KAT_FHD operator uint3(void) const noexcept { return { x, y, z }; } // This _should_ have been constexpr, but nVIDIA have not marked the dim3 constructors // as constexpr, so it isn't KAT_FHD operator dim3(void) const noexcept { return { x, y, z }; } constexpr KAT_FHD unsigned volume() const noexcept { return x * y * z; } // TODO: Do we need this to be size_t? constexpr KAT_FHD bool empty() const noexcept { return (x == 0) or (y == 0) or (z == 0); } /** * @brief The number of actual dimensions (i.e. dimensions/axes with more than a single value) */ constexpr KAT_FHD unsigned dimensionality() const noexcept { return empty() ? 0 : ((z > 1) + (y > 1) + (x > 1)); } }; template <unsigned Dimensionality = 3> KAT_FD unsigned size(dimensions_t dims) { switch (Dimensionality) { case 0: return 1; case 1: return dims.x; case 2: return dims.x * dims.y; case 3: default: return dims.volume(); } } /** * A position within a 3-dimensional grid or block. * * @note all coordinates are non-negative - positions are taken from the "corner", not the center. */ using position_t = uint3; constexpr KAT_FHD bool operator==(const dimensions_t& lhs, const dimensions_t& rhs) noexcept { return static_cast<uint3>(lhs) == static_cast<uint3>(rhs); } /** * A dimensions-conscious version of operator== */ template <unsigned Dimensionality = 3> constexpr KAT_FHD bool equals(const uint3& lhs, const uint3& rhs) noexcept { return ((Dimensionality < 1) or (lhs.x == rhs.x)) and ((Dimensionality < 2) or (lhs.y == rhs.y)) and ((Dimensionality < 3) or (lhs.z == rhs.z)); } template <unsigned Dimensionality = 3> constexpr KAT_FHD bool equals(const dimensions_t& lhs, const dimensions_t& rhs) noexcept { return equals<Dimensionality>(static_cast<uint3>(lhs), static_cast<uint3>(rhs)); } /** * A dimensions-conscious version of operator< */ template <unsigned Dimensionality = 3> constexpr KAT_FHD bool less_than(const uint3& lhs, const uint3& rhs) noexcept { return ( (Dimensionality < 1) or (lhs.x < rhs.x) ) and ( (Dimensionality < 2) or ((lhs.x == rhs.x) and (lhs.y < rhs.y)) ) and ( (Dimensionality < 3) or ((lhs.x == rhs.x) and (lhs.y == rhs.y) and (lhs.z < rhs.z)) ); } template <unsigned Dimensionality = 3> constexpr KAT_FHD bool less_than(const dimensions_t& lhs, const dimensions_t& rhs) noexcept { return less_than<Dimensionality>(static_cast<uint3>(lhs), static_cast<uint3>(rhs)); } namespace detail { template <unsigned Dimensionality = 3, typename Size = unsigned> KAT_FHD Size row_major_linearization(position_t position, dimensions_t dims) { // If you're wondering why this doesn't use a switch statement - that's // due to an (apparent) NVCC bug, complaining about "unreachable statements" // which _are_ reachable for different template parameters. if (Dimensionality == 0) { return 0; } else if (Dimensionality == 1) { return position.x; } else if (Dimensionality == 2) { return position.x + position.y * dims.x; } else if (Dimensionality == 3) { return position.x + position.y * dims.x + position.z * dims.x * dims.y; } else return {}; } } // namespace detail /** * @brief Determines whether a dimensions specification follows CUDA's * convention of having non-trivial dimensions first. * * @param[in] dims A dimensions specification. Assumed to not be "empty", * i.e. assumed to have a value of at least 1 in every axis. * * @return true if no non-trivial dimensions follow trivial dimensions */ constexpr KAT_FHD bool dimensionality_is_canonical(dimensions_t dims) { return (dims.x > 1 or (dims.y == 1 and dims.z == 1)) and (dims.y > 1 or dims.z == 1); } /** * ************************************************************************ * Convenience one-liners relating to grid dimensions, indices within * the grid, block or warp, lane functionality etc. ************************************************************************ */ // TODO: Perhaps have functions for strided copy in and out namespace grid_info { namespace detail { template <unsigned Dimensionality = 3> KAT_FD position_t last_position_for(dimensions_t dims) { return { (Dimensionality < 1) ? 0 : dims.x - 1, (Dimensionality < 2) ? 0 : dims.y - 1, (Dimensionality < 3) ? 0 : dims.z - 1 }; } template <unsigned Dimensionality = 3> KAT_FD position_t first_position() { return { 0, 0, 0 }; } } // namespace detail namespace grid { /** * @note These are the dimensions of the grid over blocks; the blocks may have additional "dimensions" relative to threads. */ KAT_FD dimensions_t dimensions_in_blocks() { return gridDim; } template <unsigned Dimensionality = 3> KAT_FD unsigned num_blocks() { return size(dimensions_in_blocks()); } KAT_FD position_t first_block_position() { return dimensions_t{0, 0, 0}; } template <unsigned Dimensionality = 3> KAT_FD position_t last_block_position() { return detail::last_position_for(gridDim); } /** * @note These are the dimensions of the grid in terms of threads. This means that a grid can have less blocks (or * even one block) in each dimension, but each block many have multiple threads, contributing to the overall dimension. */ template <unsigned Dimensionality = 3> KAT_FD dimensions_t dimensions_in_threads() { switch (Dimensionality) { case 0: return { 1, 1, 1 }; case 1: return { gridDim.x * blockDim.x, 1, 1 }; case 2: return { gridDim.x * blockDim.x, gridDim.y * blockDim.y, 1 }; case 3: default: return { gridDim.x * blockDim.x, gridDim.y * blockDim.y, gridDim.z * blockDim.z }; } } } // namespace grid namespace block { KAT_FD dimensions_t dimensions() { return blockDim; } KAT_FD position_t position_in_grid() { return blockIdx; } /** * @note Remember a thread's index is a multi-dimensional entity, not a single linear value */ KAT_FD position_t index() { return position_in_grid(); } template <unsigned Dimensionality = 3> KAT_FD bool is_first_in_grid() { return equals<Dimensionality>(block::position_in_grid(), grid::first_block_position() ); }; template <unsigned Dimensionality = 3> KAT_FD bool is_last_in_grid() { return equals<Dimensionality>(block::position_in_grid(), grid::last_block_position() ); }; /** * @brief Produces the linearization of a block's index in the grid. * * @note Remember a thread's index is a multi-dimensional entity, not a single linear value. The id is * the linearization of the index */ template <unsigned Dimensionality = 3> KAT_FD grid_dimension_t id_in_grid() { return kat::detail::row_major_linearization<Dimensionality>( position_in_grid(), grid::dimensions_in_blocks()); } template <unsigned Dimensionality = 3> KAT_FD grid_dimension_t id() { return id_in_grid(); } template <unsigned Dimensionality = 3> KAT_FD grid_block_dimension_t size() { return size(dimensions()); } KAT_FD position_t first_thread_position() { return position_t{0, 0, 0}; } template <unsigned Dimensionality = 3> KAT_FD position_t last_thread_position() { return grid_info::detail::last_position_for(blockDim); } template <unsigned Dimensionality = 3> KAT_FD grid_block_dimension_t num_full_warps() { return block::size<Dimensionality>() / warp_size; } template <unsigned Dimensionality = 3> KAT_FD grid_block_dimension_t num_warps() { return (block::size<Dimensionality>() + warp_size - 1) >> log_warp_size; // While this form of rounded-up-division may generally overflow, that's not possible // here, since CUDA block size is capped at 1024 as of 2019, and is unlikely to get close // to the maximum integer value. } KAT_FD grid_block_dimension_t id_of_first_warp() { return 0; } KAT_FD position_t index_of_first_warp() { return {0, 0, 0}; } KAT_FD grid_block_dimension_t id_of_last_warp() { return num_warps() - 1; } /** * @note assumes linear kernels only use the x dimension - which is a reasonable assumptions, * since the y and z dimensions are limited in extent by CUDA. */ KAT_FD bool is_linear() { return block::dimensions().y == 1 and block::dimensions().z == 1; } } // namespace block namespace thread_block = block; namespace grid { /** * Determines whether the grid's non-trivial dimensions - in blocks and in threads - are on the x axis only. * * @note One could consider y-only or z-only dimensions as linear; this definition was chosen for convenience * (and performance) and is used throughout this library */ KAT_FD bool is_linear() { return gridDim.y == 1 and gridDim.z == 1 and grid_info::block::is_linear(); } // TODO: Consider templatizing this on the dimensions too template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned num_warps() { return num_blocks<OuterDimensionality>() * block::num_warps<InnerDimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned num_threads() { return num_blocks<OuterDimensionality>() * block::size<InnerDimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned total_size() { return num_threads<OuterDimensionality, InnerDimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned num_warps_per_block() { return block::num_warps<Dimensionality>(); } } // namespace grid namespace warp { enum : unsigned { first_lane = 0, last_lane = warp_size - 1 }; KAT_FD unsigned size() { return warp_size; } KAT_FD unsigned length() { return warp_size; } } // namespace warp namespace thread { // TODO: Should we avoid reading alll of threadIdx and only take some of its fields? // The compiler might not optimize the read away. KAT_FD position_t position_in_block() { return threadIdx; } KAT_FD position_t position() { return position_in_block(); } KAT_FD position_t index_in_block() { return position(); } KAT_FD position_t index() { return position(); } template <unsigned Dimensionality = 3> KAT_FD bool is_first_in_block() { return equals<Dimensionality>(position(), block::first_thread_position()); } template <unsigned Dimensionality = 3> KAT_FD bool is_last_in_block() { return equals<Dimensionality>(position(), block::last_thread_position<Dimensionality>()); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD bool is_first_in_grid() { return block::is_first_in_grid<OuterDimensionality>() and thread::is_first_in_block<InnerDimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD bool is_last_in_grid() { return block::is_last_in_grid<OuterDimensionality>() and thread::is_last_in_block<InnerDimensionality>(); } /** * @brief Linearizes of a thread's position within its block. * * @param thread_position_in_block the Dimensionality-dimensional version of the thread index within its grid block, * represented in the 3-dimensional dimensions_t structure * @return The 1-d index of the specified thread within its block, when it's * flattened so that threads with identical z and y axis coordinates form a contiguous * sequence */ template <unsigned Dimensionality = 3> KAT_FD unsigned id_in_block(position_t thread_position_in_block) { return kat::detail::row_major_linearization<Dimensionality, unsigned>(thread_position_in_block, block::dimensions()); } template <unsigned Dimensionality = 3> KAT_FD unsigned id_in_block() { return id_in_block<Dimensionality>(thread::position_in_block()); } template <unsigned Dimensionality = 3> KAT_FD unsigned id() { return id_in_block<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD position_t position_in_grid(position_t block_position_in_grid, position_t thread_position_in_block) { return { (Dimensionality < 1) ? 0 : (block_position_in_grid.x * blockDim.x + thread_position_in_block.x), (Dimensionality < 2) ? 0 : (block_position_in_grid.y * blockDim.y + thread_position_in_block.y), (Dimensionality < 3) ? 0 : (block_position_in_grid.z * blockDim.z + thread_position_in_block.z), }; } template <unsigned OuterDimensionality = 3> KAT_FD unsigned id_in_grid(unsigned block_id_in_grid, unsigned thread_id_in_block) { return thread_id_in_block + block::size<OuterDimensionality>() * block_id_in_grid; } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned id_in_grid(position_t block_position_in_grid, position_t thread_position_in_block) { return thread::id_in_grid<OuterDimensionality>( thread::id_in_block<InnerDimensionality>(thread_position_in_block), block::id_in_grid<OuterDimensionality>(thread_position_in_block) ); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned id_in_grid() { return thread::id_in_grid<OuterDimensionality>( block::id_in_grid<OuterDimensionality>(), thread::id_in_block<InnerDimensionality>()); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD position_t position_in_grid() { constexpr const unsigned overall_dimensionality = (OuterDimensionality < InnerDimensionality) ? InnerDimensionality : OuterDimensionality; return thread::position_in_grid<overall_dimensionality>( block::position_in_grid(), thread::position_in_block()); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD position_t index_in_grid() { return position_in_grid<OuterDimensionality, InnerDimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned global_id() { return id_in_grid<OuterDimensionality, InnerDimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD position_t global_index() { return position_in_grid<OuterDimensionality, InnerDimensionality>(); } } // namespace thread namespace warp { template <unsigned Dimensionality = 3> KAT_FD unsigned id_in_block() { return grid_info::thread::id_in_block<Dimensionality>() / warp_size; } template <unsigned Dimensionality = 3> KAT_FD unsigned index_in_block() { return id_in_block<Dimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned id_in_grid() { return grid_info::thread::id_in_grid<OuterDimensionality, InnerDimensionality>() / warp_size; } template <unsigned Dimensionality> KAT_FD unsigned index() { return index_in_block<Dimensionality>(); } template <unsigned OuterDimensionality = 3, unsigned InnerDimensionality = 3> KAT_FD unsigned global_id() { return id_in_grid<OuterDimensionality, InnerDimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned id_of_first_lane() { constexpr const auto lane_index_mask = warp_size - 1; return thread::id_in_block<Dimensionality>() & lane_index_mask; } template <unsigned Dimensionality = 3> KAT_FD unsigned index_in_block_of_first_lane() { return id_of_first_lane<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned global_id_of_first_lane() { constexpr const auto lane_index_mask = warp_size - 1; return thread::global_id<Dimensionality>() & lane_index_mask; } template <unsigned Dimensionality = 3> KAT_FD unsigned index_in_grid_of_first_lane() { return warp::global_id_of_first_lane<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned id() { return id_in_block<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD bool is_first_in_block() { return warp::id_in_block<Dimensionality>() == block::id_of_first_warp(); } template <unsigned Dimensionality = 3> KAT_FD bool is_last_in_block() { return warp::id_in_block() == block::id_of_last_warp(); } template <unsigned Dimensionality = 3> KAT_FD bool is_first_in_grid() { return warp::is_first_in_block() and block::is_first_in_grid(); } template <unsigned Dimensionality = 3> KAT_FD bool is_last_in_grid() { return warp::is_last_in_block() and block::is_last_in_grid(); } } // namespace warp namespace lane { enum { half_warp_size = warp_size / 2 }; template <unsigned Dimensionality = 3> KAT_FD unsigned id(position_t thread_position) { // we could use a special register: // // return builtins::lane_index(); // // but apparently, retrieving a special register takes a good // number of clock cycles (why?!), so in practice, this might be // faster: constexpr const auto lane_id_mask = warp_size - 1; return thread::id_in_block<Dimensionality>(thread_position) & lane_id_mask; // ... but it's less obvious than the linear grid case, where // no linearization is required. } template <unsigned Dimensionality = 3> KAT_FD unsigned id_in_warp() { return id<Dimensionality>(threadIdx); } template <unsigned Dimensionality = 3> KAT_FD unsigned id() { return id_in_warp<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned index() { return id<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned index_in_warp() { return id<Dimensionality>(); } template <unsigned Dimensionality = 3> KAT_FD unsigned is_first() { return id_in_warp<Dimensionality>() == warp::first_lane; } template <unsigned Dimensionality = 3> KAT_FD unsigned is_last() { return id_in_warp<Dimensionality>() == warp::last_lane; } } // namespace lane namespace thread { template <unsigned Dimensionality = 3> KAT_FD bool is_first_in_warp() { return lane::id<Dimensionality>() == warp::first_lane; } template <unsigned Dimensionality = 3> KAT_FD bool is_last_in_warp() { return lane::id<Dimensionality>() == warp::last_lane; } } // namespace thread } // namespace grid_info // I couldn't use '1d, 2d, 3d since those aren't valid identifiers... namespace linear_grid { namespace grid_info { namespace grid { // TODO: Should we use the same return types as for the non-linear case? // For now, we don't, relying on the implicit convertibility of the // return types here to the general-case ones. But some of the types // are admittedly a bit fudged. KAT_FD grid_dimension_t num_blocks() { return gridDim.x; } KAT_FD grid_dimension_t dimensions_in_blocks() { return num_blocks(); } KAT_FD grid_dimension_t index_of_first_block() { return 0; } KAT_FD grid_dimension_t index_of_last_block() { return num_blocks() - 1; } KAT_FD grid_dimension_t first_block_position() { return index_of_first_block(); } KAT_FD grid_dimension_t first_last_position() { return index_of_last_block(); } } // namespace grid namespace block { using kat::grid_info::block::dimensions; KAT_FD unsigned index_in_grid() { return blockIdx.x; } KAT_FD grid_block_dimension_t index() { return index_in_grid(); } KAT_FD unsigned id_in_grid() { return index_in_grid(); } KAT_FD grid_block_dimension_t id() { return id_in_grid(); } KAT_FD grid_block_dimension_t position_in_grid() { return index_in_grid(); } KAT_FD bool is_first_in_grid() { return block::id_in_grid() == grid::index_of_first_block(); } KAT_FD bool is_last_in_grid() { return id() == grid::index_of_last_block(); } KAT_FD grid_block_dimension_t length() { return blockDim.x; } KAT_FD grid_block_dimension_t size() { return length(); } KAT_FD grid_block_dimension_t num_threads() { return length(); } KAT_FD unsigned num_full_warps() { return length() >> log_warp_size; } KAT_FD unsigned index_of_first_thread() { return 0; } KAT_FD unsigned index_of_last_thread() { return num_threads() - 1; } KAT_FD unsigned first_thread_position() { return index_of_first_thread(); } KAT_FD unsigned last_thread_position() { return index_of_last_thread(); } KAT_FD unsigned num_warps() { return (block::size() + warp_size - 1) >> log_warp_size; // While this form of rounded-up-division may generally overflow, that's not possible // here, since CUDA block size is capped at 1024 as of 2019, and is unlikely to get close // to the maximum integer value. } KAT_FD grid_block_dimension_t index_of_first_warp() { return 0; } KAT_FD grid_block_dimension_t index_of_last_warp() { return num_warps() - 1; } KAT_FD grid_block_dimension_t index_of_last_full_warp() { return num_full_warps() - 1; } KAT_FD bool is_linear() { return true; } /** * @note These are the dimensions of the grid in terms of threads. This means that a grid can have less blocks (or * even one block) in each dimension, but each block many have multiple threads, contributing to the overall dimension. */ KAT_FD dimensions_t dimensions_in_threads() { return dimensions_t{ gridDim.x * blockDim.x }; } } // namespace block namespace thread_block = block; namespace grid { KAT_FD unsigned num_warps() { return num_blocks() * block::num_warps(); } KAT_FD unsigned num_threads() { return num_blocks() * block::size(); } KAT_FD unsigned total_size() { return num_threads(); } KAT_FD unsigned num_warps_per_block() { return block::num_warps(); } } // namespace grid namespace warp { using kat::grid_info::warp::first_lane; using kat::grid_info::warp::last_lane; using kat::grid_info::warp::size; using kat::grid_info::warp::length; } namespace thread { KAT_FD grid_block_dimension_t index_in_block(uint3 position_in_block) { return position_in_block.x; } KAT_FD grid_block_dimension_t index_in_block() { return index_in_block(threadIdx); } KAT_FD grid_block_dimension_t id_in_block(uint3 position_in_block) { return index_in_block(position_in_block); } KAT_FD grid_block_dimension_t index() { return index_in_block(); } KAT_FD grid_block_dimension_t id_in_block() { return index_in_block(); } KAT_FD grid_block_dimension_t id() { return id_in_block(); } KAT_FD grid_block_dimension_t position() { return index_in_block(); } KAT_FD grid_block_dimension_t position_in_block() { return index_in_block(); } KAT_FD bool is_first_in_block() { return index_in_block() == block::first_thread_position(); } KAT_FD bool is_last_in_block() { return index_in_block() == block::last_thread_position(); } KAT_FD bool is_first_in_grid() { return block::is_first_in_grid() and thread::is_first_in_block(); } KAT_FD bool is_last_in_grid() { return block::is_last_in_grid() and thread::is_last_in_block(); } using ::kat::grid_info::thread::is_first_in_warp; using ::kat::grid_info::thread::is_last_in_warp; /** * Returns the global index of the thread - not within the block (the work group), but * considering all threads for the current kernel together - assuming a one-dimensional * grid. */ KAT_FD unsigned index_in_grid(grid_dimension_t block_index, grid_dimension_t thread_index) { return thread_index + block_index * block::size(); } KAT_FD unsigned id_in_grid(grid_dimension_t block_index, grid_dimension_t thread_index) { return index_in_grid(block_index, thread_index); } KAT_FD unsigned index_in_grid() { return index_in_grid(block::index(), index()); } KAT_FD unsigned id_in_grid() { return index_in_grid(); } KAT_FD unsigned global_index() { return index_in_grid(); } KAT_FD unsigned global_id() { return index_in_grid(); } /** * Use this for kernels in a 1-dimensional (linear) grid, in which each block of K * threads handles K * serialization_factor consecutive elements. That's pretty * common... (?) * * Anyway, each individual thread accesses data with a stride of K. * * @param serialization_factor The number of elements each thread would access * @return the initial position for a given thread */ KAT_FD unsigned block_stride_start_position(unsigned serialization_factor = 1) { return index() + serialization_factor * block::index() * block::length(); } } // namespace thread namespace warp { KAT_FD grid_block_dimension_t index_in_block() { return thread::index_in_block() >> log_warp_size; } KAT_FD grid_block_dimension_t index() { return index_in_block(); } KAT_FD grid_block_dimension_t id_in_block() { return index_in_block(); } KAT_FD grid_block_dimension_t id() { return id_in_block(); } KAT_FD unsigned index_in_grid() { return thread::index_in_grid() >> log_warp_size; } KAT_FD unsigned id_in_grid() { return index_in_grid(); } KAT_FD unsigned global_index() { return index_in_grid(); } KAT_FD unsigned global_id() { return id_in_grid(); } KAT_FD unsigned index_of_first_lane() { constexpr const auto lane_index_mask = warp_size - 1; return thread::index_in_block() & lane_index_mask; } KAT_FD unsigned index_in_block_of_first_lane() { return index_of_first_lane(); } KAT_FD unsigned global_index_of_first_lane() { constexpr const auto lane_index_mask = warp_size - 1; return thread::global_index() & lane_index_mask; } KAT_FD unsigned index_in_grid_of_first_lane() { return warp::global_index_of_first_lane(); } KAT_FD bool is_first_in_block() { return warp::index_in_block() == block::index_of_first_warp(); } KAT_FD bool is_last_in_block() { return warp::index_in_block() == block::index_of_last_warp(); } KAT_FD bool is_first_in_grid() { return warp::is_first_in_block() and block::is_first_in_grid(); } KAT_FD bool is_last_in_grid() { return warp::is_last_in_block() and block::is_last_in_grid(); } } // namespace warp namespace lane { // Note: Warps are strictly one-dimensional entities, // so within a warp, a lane's ID and its index are one and the // same thing. However... because we use thread indices to // obtain the lane index rather than the special register for warps, // directly - we have to separate the code for the linear-grid and // non-linear-grid cases. enum { half_warp_size = kat::grid_info::lane::half_warp_size }; KAT_FD unsigned id(unsigned thread_index) { // we could use a special register: // // return builtins::lane_index(); // // but apparently, retrieving a special register takes a good // number of clock cycles (why?!), so in practice, this is // probably faster: enum { lane_id_mask = warp_size - 1 }; return thread_index & lane_id_mask; } KAT_FD unsigned id_in_warp() { return id(threadIdx.x); } KAT_FD unsigned id() { return id_in_warp(); } KAT_FD unsigned index() { return id(); } KAT_FD unsigned index_in_warp() { return id(); } KAT_FD unsigned is_first() { return id_in_warp() == warp::first_lane; } KAT_FD unsigned is_last() { return id_in_warp() == warp::last_lane; } KAT_FHD unsigned id_in_half_warp(unsigned thread_or_lane_index) { enum { half_warp_index_mask = half_warp_size - 1 }; return thread_or_lane_index & half_warp_index_mask; } KAT_FD unsigned id_in_half_warp() { return id_in_half_warp(threadIdx.x); } KAT_FD unsigned index_in_half_warp() { return id_in_half_warp(threadIdx.x); } KAT_FD unsigned is_in_first_half_warp() { return id_in_warp() < half_warp_size; } KAT_FD unsigned is_in_second_half_warp() { return id_in_warp() >= half_warp_size; } } // namespace lane } // namespace grid_info } // namespace linear_grid } // namespace kat #endif // CUDA_KAT_ON_DEVICE_GRID_INFO_CUH_
the_stack
#include <thrust/device_ptr.h> #include <thrust/scan.h> #include <cuComplex.h> #include "../cuspreadinterp.h" #include "../memtransfer.h" using namespace std; int CUFINUFFT_SPREAD1D(int nf1, CUCPX* d_fw, int M, FLT *d_kx, CUCPX *d_c, CUFINUFFT_PLAN d_plan) /* This c function is written for only doing 1D spreading. See test/spread1d_test.cu for usage. note: not allocate,transfer and free memories on gpu. Melody Shih 11/21/21 */ { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); d_plan->kx = d_kx; d_plan->c = d_c; d_plan->fw = d_fw; int ier; d_plan->nf1 = nf1; d_plan->M = M; d_plan->maxbatchsize = 1; cudaEventRecord(start); ier = ALLOCGPUMEM1D_PLAN(d_plan); ier = ALLOCGPUMEM1D_NUPTS(d_plan); if(d_plan->opts.gpu_method == 1){ ier = CUSPREAD1D_NUPTSDRIVEN_PROP(nf1,M,d_plan); if(ier != 0 ){ printf("error: cuspread1d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method); return ier; } } if(d_plan->opts.gpu_method == 2){ ier = CUSPREAD1D_SUBPROB_PROP(nf1,M,d_plan); if(ier != 0 ){ printf("error: cuspread1d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method); return ier; } } #ifdef TIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Obtain Spread Prop\t %.3g ms\n", milliseconds); #endif cudaEventRecord(start); ier = CUSPREAD1D(d_plan,1); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Spread (%d)\t\t %5.3f ms\n", d_plan->opts.gpu_method, milliseconds); #endif cudaEventRecord(start); FREEGPUMEMORY1D(d_plan); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Free GPU memory\t %.3g ms\n", milliseconds); #endif return ier; } int CUSPREAD1D(CUFINUFFT_PLAN d_plan, int blksize) /* A wrapper for different spreading methods. Methods available: (1) Non-uniform points driven (2) Subproblem Melody Shih 11/21/21 */ { int nf1 = d_plan->nf1; int M = d_plan->M; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ier; switch(d_plan->opts.gpu_method) { case 1: { cudaEventRecord(start); ier = CUSPREAD1D_NUPTSDRIVEN(nf1, M, d_plan, blksize); if(ier != 0 ){ cout<<"error: cnufftspread1d_gpu_nuptsdriven"<<endl; return 1; } } break; case 2: { cudaEventRecord(start); ier = CUSPREAD1D_SUBPROB(nf1, M, d_plan, blksize); if(ier != 0 ){ cout<<"error: cnufftspread1d_gpu_subprob"<<endl; return 1; } } break; default: cout<<"error: incorrect method, should be 1,2"<<endl; return 2; } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); cout<<"[time ]"<< " Spread " << milliseconds <<" ms"<<endl; #endif return ier; } int CUSPREAD1D_NUPTSDRIVEN_PROP(int nf1, int M, CUFINUFFT_PLAN d_plan) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if(d_plan->opts.gpu_sort){ int bin_size_x=d_plan->opts.gpu_binsizex; if(bin_size_x < 0){ cout<<"error: invalid binsize (binsizex) = ("<<bin_size_x<<")"<<endl; return 1; } int numbins = ceil((FLT) nf1/bin_size_x); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins<<"]"<<endl; #endif FLT* d_kx = d_plan->kx; #ifdef DEBUG FLT *h_kx; h_kx = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT), cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout<<"[debug ] "; cout <<"("<<setw(3)<<h_kx[i]<<")"<<endl; } #endif int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_sortidx = d_plan->sortidx; int *d_idxnupts = d_plan->idxnupts; int pirange = d_plan->spopts.pirange; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_binsize,0,numbins*sizeof(int))); CalcBinSize_noghost_1d<<<(M+1024-1)/1024, 1024>>>(M,nf1, bin_size_x,numbins,d_binsize,d_kx,d_sortidx,pirange); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcBinSize_noghost_1d \t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins*sizeof(int), cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"bin["<<setw(1)<<i<<"]="<<h_binsize[i]; } cout<<endl; free(h_binsize); cout<<"[debug ] ------------------------------------------------"<<endl; int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ if(h_sortidx[i] < 0){ cout<<"[debug ] "; cout <<"point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; cout<<"[debug ] "; printf("(%10.10f) ", RESCALE(h_kx[i],nf1,pirange)); printf("(%10.10f) ", RESCALE(h_kx[i],nf1,pirange)/32); printf("(%f)\n", floor(RESCALE(h_kx[i],nf1,pirange)/32)); } } #endif cudaEventRecord(start); int n=numbins; thrust::device_ptr<int> d_ptr(d_binsize); thrust::device_ptr<int> d_result(d_binstartpts); thrust::exclusive_scan(d_ptr, d_ptr + n, d_result); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel BinStartPts_1d \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binstartpts; h_binstartpts = (int*)malloc((numbins)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binstartpts,d_binstartpts,(numbins) *sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan bin_size array:"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"bin["<<setw(1)<<i<<"]="<<h_binstartpts[i]; } cout<<endl; free(h_binstartpts); cout<<"[debug ] ------------------------------------------------"<<endl; #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_1d<<<(M+1024-1)/1024,1024>>>(M,bin_size_x, numbins,d_binstartpts,d_sortidx,d_kx,d_idxnupts,pirange,nf1); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcInvertofGlobalSortIdx_1d \t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,M*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<M; i++){ cout <<"[debug ] idx="<< h_idxnupts[i]<<endl; } free(h_idxnupts); #endif }else{ int *d_idxnupts = d_plan->idxnupts; cudaEventRecord(start); TrivialGlobalSortIdx_1d<<<(M+1024-1)/1024, 1024>>>(M,d_idxnupts); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel TrivialGlobalSortIDx_1d \t\t%.3g ms\n", milliseconds); #endif } return 0; } int CUSPREAD1D_NUPTSDRIVEN(int nf1, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 threadsPerBlock; dim3 blocks; int ns=d_plan->spopts.nspread; // psi's support in terms of number of cells int pirange=d_plan->spopts.pirange; int *d_idxnupts=d_plan->idxnupts; FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; FLT sigma=d_plan->spopts.upsampfac; FLT* d_kx = d_plan->kx; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; threadsPerBlock.x = 16; threadsPerBlock.y = 1; blocks.x = (M + threadsPerBlock.x - 1)/threadsPerBlock.x; blocks.y = 1; cudaEventRecord(start); if(d_plan->opts.gpu_kerevalmeth){ for(int t=0; t<blksize; t++){ Spread_1d_NUptsdriven_Horner<<<blocks, threadsPerBlock>>>(d_kx, d_c+t*M, d_fw+t*nf1, M, ns, nf1, sigma, d_idxnupts, pirange); } }else{ for(int t=0; t<blksize; t++){ Spread_1d_NUptsdriven<<<blocks, threadsPerBlock>>>(d_kx, d_c+t*M, d_fw+t*nf1, M, ns, nf1, es_c, es_beta, d_idxnupts, pirange); } } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_1d_NUptsdriven (%d)\t%.3g ms\n", milliseconds, d_plan->opts.gpu_kerevalmeth); #endif return 0; } int CUSPREAD1D_SUBPROB_PROP(int nf1, int M, CUFINUFFT_PLAN d_plan) /* This function determines the properties for spreading that are independent of the strength of the nodes, only relates to the locations of the nodes, which only needs to be done once. */ { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; int bin_size_x=d_plan->opts.gpu_binsizex; if(bin_size_x < 0){ cout<<"error: invalid binsize (binsizex) = ("; cout<<bin_size_x<<")"<<endl; return 1; } int numbins = ceil((FLT) nf1/bin_size_x); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins<<"]"<<endl; #endif FLT* d_kx = d_plan->kx; #ifdef DEBUG FLT *h_kx; h_kx = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT),cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout<<"[debug ]"; cout <<"("<<setw(3)<<h_kx[i]<<")"<<endl; } #endif int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_sortidx = d_plan->sortidx; int *d_numsubprob = d_plan->numsubprob; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int *d_subprob_to_bin = NULL; int pirange=d_plan->spopts.pirange; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_binsize,0,numbins*sizeof(int))); CalcBinSize_noghost_1d<<<(M+1024-1)/1024, 1024>>>(M,nf1,bin_size_x, numbins,d_binsize,d_kx,d_sortidx,pirange); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcBinSize_noghost_1d \t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"bin["<<setw(3)<<i<<"]="<<h_binsize[i]; } free(h_binsize); cout<<"[debug ] ----------------------------------------------------"<<endl; #endif #ifdef DEBUG int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); cout<<"[debug ]"; for(int i=0; i<M; i++){ cout <<"[debug] point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; } #endif cudaEventRecord(start); int n=numbins; thrust::device_ptr<int> d_ptr(d_binsize); thrust::device_ptr<int> d_result(d_binstartpts); thrust::exclusive_scan(d_ptr, d_ptr + n, d_result); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel BinStartPts_1d \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binstartpts; h_binstartpts = (int*)malloc(numbins*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binstartpts,d_binstartpts,numbins*sizeof(int), cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan bin_size array:"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"bin["<<setw(3)<<i<<"] = "<<setw(2)<<h_binstartpts[i]; } free(h_binstartpts); cout<<"[debug ] ---------------------------------------------------"<<endl; #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_1d<<<(M+1024-1)/1024,1024>>>(M,bin_size_x, numbins,d_binstartpts,d_sortidx,d_kx,d_idxnupts,pirange,nf1); #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,M*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<M; i++){ cout <<"[debug ] idx="<< h_idxnupts[i]<<endl; } free(h_idxnupts); #endif cudaEventRecord(start); CalcSubProb_1d<<<(M+1024-1)/1024, 1024>>>(d_binsize,d_numsubprob, maxsubprobsize,numbins); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcSubProb_1d\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int* h_numsubprob; h_numsubprob = (int*) malloc(n*sizeof(int)); checkCudaErrors(cudaMemcpy(h_numsubprob,d_numsubprob,numbins* sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"nsub["<<setw(3)<<i<<"] = "<<setw(2)<<h_numsubprob[i]; } cout << endl; free(h_numsubprob); #endif d_ptr = thrust::device_pointer_cast(d_numsubprob); d_result = thrust::device_pointer_cast(d_subprobstartpts+1); thrust::inclusive_scan(d_ptr, d_ptr + n, d_result); checkCudaErrors(cudaMemset(d_subprobstartpts,0,sizeof(int))); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Scan Subprob array\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Subproblem start points\n"); int* h_subprobstartpts; h_subprobstartpts = (int*) malloc((n+1)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprobstartpts,d_subprobstartpts, (n+1)*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] "; for(int i=0; i<numbins; i++){ if(i!=0) cout<<" "; cout <<"nsub["<<setw(3)<<i<<"] = "<<setw(2)<<h_subprobstartpts[i]; } cout << endl; printf("[debug ] Total number of subproblems = %d\n", h_subprobstartpts[n]); free(h_subprobstartpts); #endif cudaEventRecord(start); int totalnumsubprob; checkCudaErrors(cudaMemcpy(&totalnumsubprob,&d_subprobstartpts[n], sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&d_subprob_to_bin,totalnumsubprob*sizeof(int))); MapBintoSubProb_1d<<<(numbins+1024-1)/1024, 1024>>>( d_subprob_to_bin,d_subprobstartpts,d_numsubprob,numbins); assert(d_subprob_to_bin != NULL); if (d_plan->subprob_to_bin != NULL) cudaFree(d_plan->subprob_to_bin); d_plan->subprob_to_bin = d_subprob_to_bin; assert(d_plan->subprob_to_bin != NULL); d_plan->totalnumsubprob = totalnumsubprob; #ifdef DEBUG printf("[debug ] Map Subproblem to Bins\n"); int* h_subprob_to_bin; h_subprob_to_bin = (int*) malloc((totalnumsubprob)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprob_to_bin,d_subprob_to_bin, (totalnumsubprob)*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<totalnumsubprob; j++){ cout<<"[debug ] "; cout <<"nsub["<<j<<"] = "<<setw(2)<<h_subprob_to_bin[j]; cout<<endl; } free(h_subprob_to_bin); #endif #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Subproblem to Bin map\t\t%.3g ms\n", milliseconds); #endif return 0; } int CUSPREAD1D_SUBPROB(int nf1, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ns=d_plan->spopts.nspread;// psi's support in terms of number of cells FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; // assume that bin_size_x > ns/2; int bin_size_x=d_plan->opts.gpu_binsizex; int numbins = ceil((FLT) nf1/bin_size_x); FLT* d_kx = d_plan->kx; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_numsubprob = d_plan->numsubprob; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int totalnumsubprob=d_plan->totalnumsubprob; int *d_subprob_to_bin = d_plan->subprob_to_bin; int pirange=d_plan->spopts.pirange; FLT sigma=d_plan->opts.upsampfac; cudaEventRecord(start); size_t sharedplanorysize = (bin_size_x+2*(int)ceil(ns/2.0))*sizeof(CUCPX); if(sharedplanorysize > 49152){ cout<<"error: not enough shared memory"<<endl; return 1; } if(d_plan->opts.gpu_kerevalmeth){ for(int t=0; t<blksize; t++){ Spread_1d_Subprob_Horner<<<totalnumsubprob, 256, sharedplanorysize>>>(d_kx, d_c+t*M, d_fw+t*nf1, M, ns, nf1, sigma, d_binstartpts, d_binsize, bin_size_x, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts, pirange); } }else{ for(int t=0; t<blksize; t++){ Spread_1d_Subprob<<<totalnumsubprob, 256, sharedplanorysize>>>( d_kx, d_c+t*M, d_fw+t*nf1, M, ns, nf1, es_c, es_beta, sigma, d_binstartpts, d_binsize, bin_size_x, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts, pirange); } } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_1d_Subprob (%d)\t\t%.3g ms\n", milliseconds, d_plan->opts.gpu_kerevalmeth); #endif return 0; }
the_stack
namespace anakin{ namespace saber{ const int BLOCK_SIZE = 32; template <typename dtype> __global__ void concat_impl_cuda(const int nthreads, const dtype* in_data, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; out_data[top_index] = in_data[index]; } } template <typename dtype> __global__ void concat_impl_2d_impl(const int inner_size, const int num_concats, const dtype* in_data, const int concat_size, const int out_concat_axis, const int offset_concat_axis, dtype* out_data) { int idx_inner = threadIdx.x + blockIdx.x * blockDim.x; int idx_outer = threadIdx.y + blockIdx.y * blockDim.y; if (idx_inner < inner_size && idx_outer < num_concats) { int idx_input = idx_outer * inner_size + idx_inner; int idx_output = (idx_outer * out_concat_axis + offset_concat_axis) * \ concat_size + idx_inner; out_data[idx_output] = in_data[idx_input]; } } template <> SaberStatus SaberConcat<NV, AK_FLOAT>::create(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param, Context<NV>& ctx) { _num_concats = inputs[0]->count_valid(0, param.axis); _concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims()); return SaberSuccess; } template <> SaberStatus SaberConcat<NV, AK_FLOAT>::init(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param, Context<NV> &ctx) { // get context this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberConcat<NV, AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) { cudaStream_t stream = this->_ctx->get_compute_stream(); int input_size = inputs.size(); //! get output data, valid shape and stride shape OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data(); int offset_concat_axis = 0; Shape out_shape = outputs[0]->valid_shape(); const int out_concat_axis = out_shape[param.axis]; bool out_cont_flag = outputs[0]->is_continue_mem(); bool in_cont_flag = inputs[0]->is_continue_mem(); for (int i = 1; i < input_size; ++i) { in_cont_flag &= inputs[i]->is_continue_mem(); } //! inputs and outputs are all with continuous memory if (in_cont_flag && out_cont_flag){ for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); //std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]}; const OpDataType* in_data = (const OpDataType*)inputs[i]->data(); const int in_concat_axis = in_shape[param.axis]; const int in_concat_size = in_concat_axis * _concat_input_size; const int nthreads = in_concat_size * _num_concats; float ratio = (float)in_concat_size / _num_concats; bool is_balance = (ratio > 0.1 && ratio < 10); if (is_balance) { int block_x = BLOCK_SIZE; int block_y = BLOCK_SIZE; int grid_x = (in_concat_size + block_x - 1) / block_x; int grid_y = (_num_concats + block_y - 1) / block_y; dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); concat_impl_2d_impl<OpDataType><<<grid, block, 0, stream>>>( in_concat_size, _num_concats, in_data, _concat_input_size, out_concat_axis, offset_concat_axis, out_data ); } else { // NOLINT_NEXT_LINE(whitespace/operators) concat_impl_cuda<OpDataType><<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, stream>>>( \ nthreads, in_data, _num_concats, _concat_input_size, \ out_concat_axis, in_concat_axis, offset_concat_axis, out_data); } offset_concat_axis += in_concat_axis; } } else { //! inputs or outputs memory is not continuous Shape offset_out = outputs[0]->offset(); Tensor<NV> tsub; for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); tsub.share_sub_buffer(*outputs[0], in_shape, offset_out); offset_out[param.axis] += in_shape[param.axis]; tsub.async_copy_from(*inputs[i], stream); } } return SaberSuccess; } template <> SaberStatus SaberConcat<NV, AK_INT8>::create(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param, Context<NV>& ctx) { _num_concats = inputs[0]->count_valid(0, param.axis); _concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims()); _input_v.resize(inputs.size()); for (int i = 0; i < inputs.size(); ++i) { if (inputs[i]->get_dtype() == AK_FLOAT) { _input_v[i].re_alloc(inputs[i]->valid_shape(), AK_INT8); } else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) { Shape new_shape = Shape({inputs[i]->num(), inputs[i]->channel(), inputs[i]->height(), inputs[i]->width()}, Layout_NCHW); _input_v[i].re_alloc(new_shape, AK_INT8); } else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) { // good, nothing to do } else { LOG(FATAL) << "Not support this situation, pls contact the r&d."; } } if (outputs[0]->get_dtype() == AK_FLOAT) { _output.re_alloc(outputs[0]->valid_shape(), AK_INT8); _output.set_scale(outputs[0]->get_scale()); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) { Shape new_shape = outputs[0]->valid_shape(); new_shape.set_layout(Layout_NCHW); _output.re_alloc(new_shape, AK_INT8); _output.set_scale(outputs[0]->get_scale()); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) { // good, nothing to do. } else { LOG(FATAL) << "Not support this situation, pls contact the r&d."; } return SaberSuccess; } template <> SaberStatus SaberConcat<NV, AK_INT8>::init(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param, Context<NV> &ctx) { // get context this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberConcat<NV, AK_INT8>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) { cudaStream_t stream = this->_ctx->get_compute_stream(); int input_size = inputs.size(); //! get output data, valid shape and stride shape char* out_data = nullptr; if (outputs[0]->get_dtype() == AK_FLOAT) { out_data = (char*)_output.mutable_data(); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) { out_data = (char*)_output.mutable_data(); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) { out_data = (char*)outputs[0]->mutable_data(); } else { LOG(FATAL) << "Not support this situation, pls contact the r&d."; } int offset_concat_axis = 0; Shape out_shape = outputs[0]->valid_shape(); const int out_concat_axis = out_shape[param.axis]; //! inputs and outputs are all with continuous memory for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); //std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]}; const char* in_data = nullptr; if (inputs[i]->get_dtype() == AK_FLOAT) { flatten_calibrate<NV, char, float> (_input_v[i], *inputs[i], *_ctx); in_data = (char*)_input_v[i].mutable_data(); } else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) { convert_nchwc4_to_nchw<NV>(_input_v[i], *inputs[i], *_ctx); in_data = (char*)_input_v[i].mutable_data(); } else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) { in_data = (char*)inputs[i]->mutable_data(); } else { LOG(FATAL) << "Not support this situation, pls contact the r&d."; } const int in_concat_axis = in_shape[param.axis]; const int in_concat_size = in_concat_axis * _concat_input_size; const int nthreads = in_concat_size * _num_concats; float ratio = (float)in_concat_size / _num_concats; bool is_balance = (ratio > 0.1 && ratio < 10); if (is_balance) { int block_x = BLOCK_SIZE; int block_y = BLOCK_SIZE; int grid_x = (in_concat_size + block_x - 1) / block_x; int grid_y = (_num_concats + block_y - 1) / block_y; dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); concat_impl_2d_impl<char><<<grid, block, 0, stream>>>( in_concat_size, _num_concats, in_data, _concat_input_size, out_concat_axis, offset_concat_axis, out_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) concat_impl_cuda<char><<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, stream>>>( nthreads, in_data, _num_concats, _concat_input_size, out_concat_axis, in_concat_axis, offset_concat_axis, out_data); } offset_concat_axis += in_concat_axis; } if (outputs[0]->get_dtype() == AK_FLOAT) { flatten_calibrate<NV, float, char>(*outputs[0], _output, *_ctx); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) { convert_nchw_to_nchwc4<NV>(*outputs[0], _output, *_ctx); } else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) { // good, nothing to be done; } else { LOG(FATAL) << "Not support this situation, pls contact the r&d."; } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_HALF); } //namespace anakin } //namespace anakin
the_stack
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp" #include "HugeCTR/include/embeddings/hybrid_embedding/frequent_embedding.hpp" #include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp" #include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp" #include "hybrid_embedding_cpu.hpp" #include "test_common.cuh" /******************** Infrequent embedding: model indices ********************/ template <typename dtype, typename emtype> class CalculateModelIndicesTest : public HybridEmbeddingUnitTest<dtype, emtype> { public: CalculateModelIndicesTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size, size_t seed = 1234ll) : HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {} void run() { /* Compute expected results on host */ HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size, this->category_location, this->category_frequent_index, this->samples); cpu_embedding.calculate_infrequent_model_indices(); /* Compute indices */ this->build_infrequent(); std::vector<std::vector<uint32_t>> h_model_indices(this->num_instances); std::vector<std::vector<uint32_t>> h_model_indices_offsets(this->num_instances); for (size_t i = 0; i < this->num_instances; i++) { this->infrequent_embeddings[i].set_current_indices(&this->infrequent_embedding_indices[i], this->stream); this->infrequent_embeddings[i].indices_->calculate_model_indices(this->stream); download_tensor(h_model_indices[i], this->infrequent_embeddings[i].indices_->model_indices_, this->stream); download_tensor(h_model_indices_offsets[i], this->infrequent_embeddings[i].indices_->model_indices_offsets_, this->stream); } /* Compare */ for (size_t i = 0; i < this->num_instances; i++) { h_model_indices[i].resize(h_model_indices_offsets[i][this->num_instances]); EXPECT_THAT(h_model_indices[i], ::testing::ElementsAreArray(cpu_embedding.model_indices[i])); EXPECT_THAT(h_model_indices_offsets[i], ::testing::ElementsAreArray(cpu_embedding.model_indices_offsets[i])); } } }; /******************* Infrequent embedding: network indices *******************/ template <typename dtype, typename emtype> class CalculateNetworkIndicesTest : public HybridEmbeddingUnitTest<dtype, emtype> { public: CalculateNetworkIndicesTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size, size_t seed = 1234ll) : HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {} void run() { /* Compute expected results on host */ HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size, this->category_location, this->category_frequent_index, this->samples); cpu_embedding.calculate_infrequent_network_indices(); /* Compute indices */ this->build_infrequent(); std::vector<std::vector<uint32_t>> h_network_indices(this->num_instances); std::vector<std::vector<uint32_t>> h_network_indices_offsets(this->num_instances); for (size_t i = 0; i < this->num_instances; i++) { this->infrequent_embeddings[i].set_current_indices(&this->infrequent_embedding_indices[i], this->stream); this->infrequent_embeddings[i].indices_->calculate_network_indices(80, this->stream); download_tensor(h_network_indices[i], this->infrequent_embeddings[i].indices_->network_indices_, this->stream); download_tensor(h_network_indices_offsets[i], this->infrequent_embeddings[i].indices_->network_indices_offsets_, this->stream); } /* Compare */ for (size_t i = 0; i < this->num_instances; i++) { h_network_indices[i].resize(h_network_indices_offsets[i][this->num_instances]); EXPECT_THAT(h_network_indices[i], ::testing::ElementsAreArray(cpu_embedding.network_indices[i])); EXPECT_THAT(h_network_indices_offsets[i], ::testing::ElementsAreArray(cpu_embedding.network_indices_offsets[i])); } } }; /**************** Frequent embedding: frequent sample indices ****************/ template <typename dtype, typename emtype> class CalculateFrequentSampleIndicesTest : public HybridEmbeddingUnitTest<dtype, emtype> { public: CalculateFrequentSampleIndicesTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size, size_t seed = 1234ll) : HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {} void run() { /* Compute expected results on host */ HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size, this->category_location, this->category_frequent_index, this->samples); cpu_embedding.calculate_frequent_sample_indices(); /* Compute indices */ this->build_frequent(); std::vector<std::vector<uint32_t>> h_frequent_sample_indices(this->num_instances); for (size_t i = 0; i < this->num_instances; i++) { this->frequent_embeddings[i].set_current_indices(&this->frequent_embedding_indices[i], this->stream); this->frequent_embeddings[i].indices_->calculate_frequent_sample_indices(this->stream); download_tensor(h_frequent_sample_indices[i], this->frequent_embeddings[i].indices_->frequent_sample_indices_, this->stream); } /* Compare */ for (size_t i = 0; i < this->num_instances; i++) { uint32_t num_frequent_sample_indices; HCTR_LIB_THROW(cudaMemcpyAsync( &num_frequent_sample_indices, this->frequent_embeddings[i].indices_->d_num_frequent_sample_indices_.get_ptr(), sizeof(uint32_t), cudaMemcpyDeviceToHost, this->stream)); HCTR_LIB_THROW(cudaStreamSynchronize(this->stream)); h_frequent_sample_indices[i].resize(num_frequent_sample_indices); EXPECT_THAT(h_frequent_sample_indices[i], ::testing::ElementsAreArray(cpu_embedding.frequent_sample_indices[i])); } } }; /****************** Frequent embedding: model cache indices ******************/ template <typename dtype, typename emtype> class CalculateModelCacheIndicesTest : public HybridEmbeddingUnitTest<dtype, emtype> { public: CalculateModelCacheIndicesTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size, size_t seed = 1234ll) : HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {} void run() { /* Compute expected results on host */ HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size, this->category_location, this->category_frequent_index, this->samples); cpu_embedding.calculate_frequent_model_cache_indices(); /* Compute indices */ this->build_frequent(); std::vector<std::vector<uint32_t>> h_model_cache_indices(this->num_instances); std::vector<std::vector<uint32_t>> h_model_cache_indices_offsets(this->num_instances); for (size_t i = 0; i < this->num_instances; i++) { this->frequent_embeddings[i].set_current_indices(&this->frequent_embedding_indices[i], this->stream); this->frequent_embeddings[i].indices_->calculate_cache_masks(this->stream); this->frequent_embeddings[i].indices_->calculate_model_cache_indices(80, this->stream); download_tensor(h_model_cache_indices[i], this->frequent_embeddings[i].indices_->model_cache_indices_, this->stream); download_tensor(h_model_cache_indices_offsets[i], this->frequent_embeddings[i].indices_->model_cache_indices_offsets_, this->stream); } /* Compare */ for (size_t i = 0; i < this->num_instances; i++) { h_model_cache_indices[i].resize(h_model_cache_indices_offsets[i][this->num_instances]); EXPECT_THAT(h_model_cache_indices[i], ::testing::ElementsAreArray(cpu_embedding.model_cache_indices[i])); EXPECT_THAT(h_model_cache_indices_offsets[i], ::testing::ElementsAreArray(cpu_embedding.model_cache_indices_offsets[i])); } } }; /***************** Frequent embedding: network cache indices *****************/ template <typename dtype, typename emtype> class CalculateNetworkCacheIndicesTest : public HybridEmbeddingUnitTest<dtype, emtype> { public: CalculateNetworkCacheIndicesTest(const HybridEmbeddingConfig<dtype> config, size_t batch_size, size_t seed = 1234ll) : HybridEmbeddingUnitTest<dtype, emtype>(config, batch_size, seed) {} void run() { /* Compute expected results on host */ HybridEmbeddingCpu<dtype, emtype> cpu_embedding(this->config, this->batch_size, this->category_location, this->category_frequent_index, this->samples); cpu_embedding.calculate_frequent_network_cache_mask(); cpu_embedding.calculate_frequent_network_cache_indices(); /* Compute mask and indices */ this->build_frequent(); std::vector<std::vector<uint8_t>> h_network_cache_mask(this->num_instances); std::vector<std::vector<uint32_t>> h_network_cache_indices(this->num_instances); std::vector<std::vector<uint32_t>> h_network_cache_indices_offsets(this->num_instances); for (size_t i = 0; i < this->num_instances; i++) { this->frequent_embeddings[i].set_current_indices(&this->frequent_embedding_indices[i], this->stream); this->frequent_embeddings[i].indices_->calculate_cache_masks(this->stream); this->frequent_embeddings[i].indices_->calculate_network_cache_indices(this->stream); download_tensor(h_network_cache_indices[i], this->frequent_embeddings[i].indices_->network_cache_indices_, this->stream); download_tensor(h_network_cache_indices_offsets[i], this->frequent_embeddings[i].indices_->network_cache_indices_offsets_, this->stream); h_network_cache_mask[i].resize(this->config.num_frequent); HCTR_LIB_THROW(cudaMemcpyAsync( h_network_cache_mask[i].data(), reinterpret_cast<uint8_t*>(this->frequent_embeddings[i].indices_->cache_masks_.get_ptr()), this->config.num_frequent, cudaMemcpyDeviceToHost, this->stream)); HCTR_LIB_THROW(cudaStreamSynchronize(this->stream)); } /* Compare */ for (size_t i = 0; i < this->num_instances; i++) { h_network_cache_indices[i].resize( cpu_embedding.network_cache_indices_offsets[i][this->num_instances]); EXPECT_THAT(h_network_cache_indices[i], ::testing::ElementsAreArray(cpu_embedding.network_cache_indices[i])); EXPECT_THAT(h_network_cache_indices_offsets[i], ::testing::ElementsAreArray(cpu_embedding.network_cache_indices_offsets[i])); EXPECT_THAT(h_network_cache_mask[i], ::testing::ElementsAreArray(cpu_embedding.network_cache_mask[i])); } } }; /**************************** Test instantiations ****************************/ static const HybridEmbeddingConfig<uint32_t> config_uint32 = { 4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink}; static const HybridEmbeddingConfig<long long> config_int64 = { 4, 32, 10, 128, 1000, 128, 0.5f, CommunicationType::IB_NVLink}; // Edge cases: no frequent, all frequent static const HybridEmbeddingConfig<uint32_t> config_no_freq = { 4, 32, 10, 128, 1000, 0, 0.5f, CommunicationType::IB_NVLink}; static const HybridEmbeddingConfig<uint32_t> config_all_freq = { 4, 32, 10, 128, 1000, 1000, 0.5f, CommunicationType::IB_NVLink}; /* hybrid_embedding_model_indices_test */ TEST(hybrid_embedding_model_indices_test, uint32_float_64) { CalculateModelIndicesTest<uint32_t, float>(config_uint32, 64).run(); } TEST(hybrid_embedding_model_indices_test, int64_float_64) { CalculateModelIndicesTest<long long, float>(config_int64, 64).run(); } TEST(hybrid_embedding_model_indices_test, uint32_float_2048) { CalculateModelIndicesTest<uint32_t, float>(config_uint32, 2048).run(); } TEST(hybrid_embedding_model_indices_test, int64_float_2048) { CalculateModelIndicesTest<long long, float>(config_int64, 2048).run(); } TEST(hybrid_embedding_model_indices_test, uint32_float_128_no_freq) { CalculateModelIndicesTest<uint32_t, float>(config_no_freq, 128).run(); } TEST(hybrid_embedding_model_indices_test, uint32_float_128_all_freq) { CalculateModelIndicesTest<uint32_t, float>(config_all_freq, 128).run(); } /* hybrid_embedding_network_indices_test */ TEST(hybrid_embedding_network_indices_test, uint32_float_64) { CalculateNetworkIndicesTest<uint32_t, float>(config_uint32, 64).run(); } TEST(hybrid_embedding_network_indices_test, int64_float_64) { CalculateNetworkIndicesTest<long long, float>(config_int64, 64).run(); } TEST(hybrid_embedding_network_indices_test, uint32_float_2048) { CalculateNetworkIndicesTest<uint32_t, float>(config_uint32, 2048).run(); } TEST(hybrid_embedding_network_indices_test, int64_float_2048) { CalculateNetworkIndicesTest<long long, float>(config_int64, 2048).run(); } TEST(hybrid_embedding_network_indices_test, uint32_float_128_no_freq) { CalculateNetworkIndicesTest<uint32_t, float>(config_no_freq, 128).run(); } TEST(hybrid_embedding_network_indices_test, uint32_float_128_all_freq) { CalculateNetworkIndicesTest<uint32_t, float>(config_all_freq, 128).run(); } /* hybrid_embedding_frequent_sample_indices_test */ TEST(hybrid_embedding_frequent_sample_indices_test, uint32_float_64) { CalculateFrequentSampleIndicesTest<uint32_t, float>(config_uint32, 64).run(); } TEST(hybrid_embedding_frequent_sample_indices_test, int64_float_64) { CalculateFrequentSampleIndicesTest<long long, float>(config_int64, 64).run(); } TEST(hybrid_embedding_frequent_sample_indices_test, uint32_float_2048) { CalculateFrequentSampleIndicesTest<uint32_t, float>(config_uint32, 2048).run(); } TEST(hybrid_embedding_frequent_sample_indices_test, int64_float_2048) { CalculateFrequentSampleIndicesTest<long long, float>(config_int64, 2048).run(); } TEST(hybrid_embedding_frequent_sample_indices_test, uint32_float_128_no_freq) { CalculateFrequentSampleIndicesTest<uint32_t, float>(config_no_freq, 128).run(); } TEST(hybrid_embedding_frequent_sample_indices_test, uint32_float_128_all_freq) { CalculateFrequentSampleIndicesTest<uint32_t, float>(config_all_freq, 128).run(); } /* hybrid_embedding_model_cache_indices_test */ TEST(hybrid_embedding_model_cache_indices_test, uint32_float_64) { CalculateModelCacheIndicesTest<uint32_t, float>(config_uint32, 64).run(); } TEST(hybrid_embedding_model_cache_indices_test, int64_float_64) { CalculateModelCacheIndicesTest<long long, float>(config_int64, 64).run(); } TEST(hybrid_embedding_model_cache_indices_test, uint32_float_2048) { CalculateModelCacheIndicesTest<uint32_t, float>(config_uint32, 2048).run(); } TEST(hybrid_embedding_model_cache_indices_test, int64_float_2048) { CalculateModelCacheIndicesTest<long long, float>(config_int64, 2048).run(); } TEST(hybrid_embedding_model_cache_indices_test, uint32_float_128_no_freq) { CalculateModelCacheIndicesTest<uint32_t, float>(config_no_freq, 128).run(); } TEST(hybrid_embedding_model_cache_indices_test, uint32_float_128_all_freq) { CalculateModelCacheIndicesTest<uint32_t, float>(config_all_freq, 128).run(); } /* hybrid_embedding_network_cache_indices_test */ TEST(hybrid_embedding_network_cache_indices_test, uint32_float_64) { CalculateNetworkCacheIndicesTest<uint32_t, float>(config_uint32, 64).run(); } TEST(hybrid_embedding_network_cache_indices_test, int64_float_64) { CalculateNetworkCacheIndicesTest<long long, float>(config_int64, 64).run(); } TEST(hybrid_embedding_network_cache_indices_test, uint32_float_2048) { CalculateNetworkCacheIndicesTest<uint32_t, float>(config_uint32, 2048).run(); } TEST(hybrid_embedding_network_cache_indices_test, int64_float_2048) { CalculateNetworkCacheIndicesTest<long long, float>(config_int64, 2048).run(); } TEST(hybrid_embedding_network_cache_indices_test, uint32_float_128_no_freq) { CalculateNetworkCacheIndicesTest<uint32_t, float>(config_no_freq, 128).run(); } TEST(hybrid_embedding_network_cache_indices_test, uint32_float_128_all_freq) { CalculateNetworkCacheIndicesTest<uint32_t, float>(config_all_freq, 128).run(); }
the_stack
__global__ void kernel_baseToNumber(char *reads, const long length) { long index = blockIdx.x * blockDim.x + threadIdx.x; while (index < length) { switch (reads[index]) { case 'A': reads[index] = 0; break; case 'a': reads[index] = 0; break; case 'C': reads[index] = 1; break; case 'c': reads[index] = 1; break; case 'G': reads[index] = 2; break; case 'g': reads[index] = 2; break; case 'T': reads[index] = 3; break; case 't': reads[index] = 3; break; case 'U': reads[index] = 3; break; case 'u': reads[index] = 3; break; default: reads[index] = 4; break; } index += 128*128; } } // 1 base use 2 bit, drop gap // kernel_compressedData __global__ void kernel_compressData( const int *lengths, const long *offsets, const char *reads, unsigned int *compressed, int *gaps, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range long mark = offsets[index]/16; // compressed data offset int round = 0; // write when round is 16 int gapCount = 0; // gap count unsigned int compressedTemp = 0; // compressed data long start = offsets[index]; long end = start + lengths[index]; for (long i=start; i<end; i++) { unsigned char base = reads[i]; // read a base if (base < 4) { compressedTemp += base << (15-round)*2; round++; if (round == 16) { compressed[mark] = compressedTemp; compressedTemp = 0; round = 0; mark++; } } else { // gap gapCount++; } } compressed[mark] = compressedTemp; gaps[index] = gapCount; } __global__ void kernel_createIndex4( const char *reads, const int *lengths, const long *offsets, unsigned short *indexs, unsigned short *orders, long *words, int *magicBase, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range int start = offsets[index]; int end = start + lengths[index]; int magic0=0, magic1=0, magic2=0, magic3=0; // magic base char bases[4]; for(int i=0; i<4; i++) { // default is gap bases[i] = 4; } int wordCount = 0; for (int i=start; i<end; i++) { for(int j=0; j<3; j++) { // copy base to array bases[j] = bases[j+1]; } bases[3] = reads[i]; switch (bases[3]) { // update magic case 0: magic0++; break; case 1: magic1++; break; case 2: magic2++; break; case 3: magic3++; break; } unsigned short indexValue = 0; int flag = 0; // if has gap for (int j=0; j<4; j++) { indexValue += (bases[j]&3)<<(3-j)*2; flag += max((int)(bases[j] - 3), 0); } indexs[i] = flag?65535:indexValue; // gap value is 65535 wordCount += flag?0:1; } words[index] = wordCount; // index length magicBase[index*4+0] = magic0; // update magicBase magicBase[index*4+1] = magic1; magicBase[index*4+2] = magic2; magicBase[index*4+3] = magic3; } __global__ void kernel_createIndex5( const char *reads, const int *lengths, const long *offsets, unsigned short *indexs, unsigned short *orders, long *words, int *magicBase, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; int start = offsets[index]; int end = start + lengths[index]; int magic0=0, magic1=0, magic2=0, magic3=0; char bases[5]; for(int i=0; i<5; i++) { bases[i] = 4; } int wordCount = 0; for (int i=start; i<end; i++) { for(int j=0; j<4; j++) { bases[j] = bases[j+1]; } bases[4] = reads[i]; switch (bases[4]) { case 0: magic0++; break; case 1: magic1++; break; case 2: magic2++; break; case 3: magic3++; break; } unsigned short indexValue = 0; int flag = 0; for (int j=0; j<5; j++) { indexValue += (bases[j]&3)<<(4-j)*2; flag += max((int)(bases[j] - 3), 0); } indexs[i] = flag?65535:indexValue; wordCount += flag?0:1; } words[index] = wordCount; magicBase[index*4+0] = magic0; magicBase[index*4+1] = magic1; magicBase[index*4+2] = magic2; magicBase[index*4+3] = magic3; } __global__ void kernel_createIndex6( const char *reads, const int *lengths, const long *offsets, unsigned short *indexs, unsigned short *orders, long *words, int *magicBase, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; int start = offsets[index]; int end = start + lengths[index]; int magic0=0, magic1=0, magic2=0, magic3=0; char bases[6]; for(int i=0; i<6; i++) { bases[i] = 4; } int wordCount = 0; for (int i=start; i<end; i++) { for(int j=0; j<5; j++) { bases[j] = bases[j+1]; } bases[5] = reads[i]; switch (bases[5]) { case 0: magic0++; break; case 1: magic1++; break; case 2: magic2++; break; case 3: magic3++; break; } unsigned short indexValue = 0; int flag = 0; for (int j=0; j<6; j++) { indexValue += (bases[j]&3)<<(5-j)*2; flag += max((int)(bases[j] - 3), 0); } indexs[i] = flag?65535:indexValue; wordCount += flag?0:1; } words[index] = wordCount; magicBase[index*4+0] = magic0; magicBase[index*4+1] = magic1; magicBase[index*4+2] = magic2; magicBase[index*4+3] = magic3; } __global__ void kernel_createIndex7( const char *reads, const int *lengths, const long *offsets, unsigned short *indexs, unsigned short *orders, long *words, int *magicBase, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; int start = offsets[index]; int end = start + lengths[index]; int magic0=0, magic1=0, magic2=0, magic3=0; char bases[7]; for(int i=0; i<7; i++) { bases[i] = 4; } int wordCount = 0; for (int i=start; i<end; i++) { for(int j=0; j<6; j++) { bases[j] = bases[j+1]; } bases[6] = reads[i]; switch (bases[6]) { case 0: magic0++; break; case 1: magic1++; break; case 2: magic2++; break; case 3: magic3++; break; } unsigned short indexValue = 0; int flag = 0; for (int j=0; j<7; j++) { indexValue += (bases[j]&3)<<(6-j)*2; flag += max((int)(bases[j] - 3), 0); } indexs[i] = flag?65535:indexValue; wordCount += flag?0:1; } words[index] = wordCount; magicBase[index*4+0] = magic0; magicBase[index*4+1] = magic1; magicBase[index*4+2] = magic2; magicBase[index*4+3] = magic3; } __global__ void kernel_createCutoff( float threshold, int wordLength, const int *lengths, long *words, int *wordCutoff, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range int length = lengths[index]; int required = length - wordLength + 1; int cutoff = ceil((float)length * (1.f - threshold) * (float)wordLength); required -= cutoff; wordCutoff[index] = required; } __global__ void kernel_mergeIndex( const long *offsets, const unsigned short *indexs, unsigned short *orders, const long *words, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range int start = offsets[index]; int end = start + words[index]; unsigned short basePrevious = indexs[start]; unsigned short baseNow; int count = 1; for (int i=start+1; i<end; i++) { // merge same index orders is count baseNow = indexs[i]; if (baseNow == basePrevious) { count++; orders[i-1] = 0; } else { basePrevious = baseNow; orders[i-1] = count; count = 1; } } orders[end-1] = count; } __global__ void kernel_updateRepresentative( int *cluster, int *representative, const int readsCount) { int r = *representative; r++; while (r < readsCount) { if (cluster[r] < 0) { // is representative cluster[r] = r; break; } r++; } *representative = r; } // updateRepresentative void updateRepresentative( int *d_cluster, int *representative, int readsCount) { int *d_r; cudaMalloc((void**)&d_r, sizeof(int)); cudaMemcpy(d_r, representative, sizeof(int), cudaMemcpyHostToDevice); kernel_updateRepresentative<<<1, 1>>>( d_cluster, d_r, readsCount); cudaMemcpy(representative, d_r, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_r); } // kernel_makeTable __global__ void kernel_makeTable( const long *offsets, const unsigned short *indexs, const unsigned short *orders, const long *words, unsigned short *table, int representative) { int index = blockIdx.x * blockDim.x + threadIdx.x; int start = offsets[representative]; int end = start + words[representative]; for (int i=index+start; i<end; i+=128*128) { unsigned short order = orders[i]; if (order == 0) continue; table[indexs[i]] = order; } } // kernel_cleanTable __global__ void kernel_cleanTable( const long *offsets, const unsigned short *indexs, const unsigned short *orders, const long *words, unsigned short *table, const int representative) { int index = blockIdx.x * blockDim.x + threadIdx.x; int start = offsets[representative]; int end = start + words[representative]; for (int i=index+start; i<end; i+=128*128) { if (orders[i] == 0) continue; table[indexs[i]] = 0; } } __global__ void kernel_magic(float threshold, const int *lengths, const int *magicBase, int *cluster, const int representative, const int readsCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range if (cluster[index] >= 0) return; // is clustered int offsetOne = representative*4; // representative magic offset int offsetTwo = index*4; // query magic offset int magic = min(magicBase[offsetOne + 0], magicBase[offsetTwo + 0]) + min(magicBase[offsetOne + 1], magicBase[offsetTwo + 1]) + min(magicBase[offsetOne + 2], magicBase[offsetTwo + 2]) + min(magicBase[offsetOne + 3], magicBase[offsetTwo + 3]); int length = lengths[index]; int minLength = ceil((float)length*threshold); if (magic > minLength) { // pass filter cluster[index] = -2; } } // kernel_filter __global__ void kernel_filter( const float threshold, const int wordLength, const int *lengths, const long *offsets, const unsigned short *indexs, const unsigned short *orders, const long *words, const int *wordCutoff, int *cluster, const unsigned short *table, const int readsCount) { __shared__ int result[128]; int gid = blockIdx.x; int lid = threadIdx.x; if (gid >= readsCount) return; // out of range if (cluster[gid] != -2) return; // out of filter result[lid] = 0; // result in thread int start = offsets[gid]; int end = start + words[gid]; for (int i = lid + start; i < end; i += 128) { result[lid] += min(table[indexs[i]], orders[i]); } __syncthreads(); if (lid == 0) { for (int i=1; i<128; i++) { result[0] += result[i]; } if (result[0] > wordCutoff[gid]) { // pass filter cluster[gid] = -3; } else { cluster[gid] = -1; // not pass filter } } } // kernel_align __global__ void kernel_align( const float threshold, const int *lengths, const long *offsets, const unsigned int *compressed, const int *gaps, const int representative, int *cluster, const int readsCount) { // variables int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= readsCount) return; // out of range if (cluster[index] != -3) return; // not pass filter int target = representative; // representative read int query = index; // query read int minLength = ceil((float)lengths[index] * threshold); int targetLength = lengths[target] - gaps[target]; // representative base count int queryLength = lengths[query] - gaps[query]; // query base count int target32Length = targetLength/16+1; // compressed target length int query32Length = queryLength/16+1; // compressed query length int targetOffset = offsets[target]/16; // representative offset int queryOffset = offsets[query]/16; // query offset short rowNow[3000] = {0}; // dynamic matrix row short rowPrevious[3000] = {0}; // dynamic matrix row int columnPrevious[17] = {0}; // dynamic matrix column int columnNow[17] = {0}; // dynamic matrix column int shift = ceil((float)targetLength - (float)queryLength*threshold); shift = ceil((float)shift / 16.f); // shift form diagonal // compute for (int i = 0; i < query32Length; i++) { // query is column // first big loop for (int j=0; j<17; j++) { columnPrevious[j] = 0; columnNow[j] = 0; } int targetIndex = 0; // target position unsigned int queryPack = compressed[queryOffset+i]; // get 16 query bases int jstart = i-shift; jstart = max(jstart, 0); int jend = i+shift; jend = min(jend, target32Length); for (int j=0; j<target32Length; j++) { // target is row columnPrevious[0] = rowPrevious[targetIndex]; unsigned int targetPack = compressed[targetOffset+j]; // get 16 target bases //---16*16core----// for (int k=30; k>=0; k-=2) { // 16 loops get target bases // first small loop int targetBase = (targetPack>>k)&3; // get base from target int m=0; columnNow[m] = rowPrevious[targetIndex+1]; for (int l=30; l>=0; l-=2) { // 16 loops get query bases m++; int queryBase = (queryPack>>l)&3; // get base from query int diffScore = queryBase == targetBase; columnNow[m] = columnPrevious[m-1] + diffScore; columnNow[m] = max(columnNow[m], columnNow[m - 1]); columnNow[m] = max(columnNow[m], columnPrevious[m]); } targetIndex++; rowNow[targetIndex] = columnNow[16]; if (targetIndex == targetLength) { // last column of dynamic matirx if(i == query32Length-1) { // complete int score = columnNow[queryLength%16]; if (score >= minLength) { cluster[index] = target; } else { cluster[index] = -1; } return; } break; } // secode small loop exchange columnPrevious and columnNow k-=2; targetBase = (targetPack>>k)&3; m=0; columnPrevious[m] = rowPrevious[targetIndex+1]; for (int l=30; l>=0; l-=2) { m++; int queryBase = (queryPack>>l)&3; int diffScore = queryBase == targetBase; columnPrevious[m] = columnNow[m-1] + diffScore; columnPrevious[m] = max(columnPrevious[m], columnPrevious[m - 1]); columnPrevious[m] = max(columnPrevious[m], columnNow[m]); } targetIndex++; rowNow[targetIndex] = columnPrevious[16]; if (targetIndex == targetLength) { if(i == query32Length-1) { int score = columnPrevious[queryLength%16]; if (score >= minLength) { cluster[index] = target; } else { cluster[index] = -1; } return; } break; } } } // secode big loop exchage rowPrevious and rowNow i++; for (int j=0; j<17; j++) { columnPrevious[j] = 0; columnNow[j] = 0; } targetIndex = 0; queryPack = compressed[queryOffset+i]; jstart = i-shift; jstart = max(jstart, 0); jend = i+shift; jend = min(jend, target32Length); for (int j=0; j<target32Length; j++) { unsigned int targetPack = compressed[targetOffset+j]; //---16*16 core----// for (int k=30; k>=0; k-=2) { // first small loop int targetBase = (targetPack>>k)&3; int m=0; columnNow[m] = rowNow[targetIndex+1]; for (int l=30; l>=0; l-=2) { m++; int queryBase = (queryPack>>l)&3; int diffScore = queryBase == targetBase; columnNow[m] = columnPrevious[m-1] + diffScore; columnNow[m] = max(columnNow[m], columnNow[m - 1]); columnNow[m] = max(columnNow[m], columnPrevious[m]); } targetIndex++; rowPrevious[targetIndex] = columnNow[16]; if (targetIndex == targetLength) { if(i == query32Length-1) { int score = columnNow[queryLength%16]; if (score >= minLength) { cluster[index] = target; } else { cluster[index] = -1; } return; } break; } // second small loop k-=2; targetBase = (targetPack>>k)&3; m=0; columnPrevious[m] = rowNow[targetIndex+1]; for (int l=30; l>=0; l-=2) { m++; int queryBase = (queryPack>>l)&3; int diffScore = queryBase == targetBase; columnPrevious[m] = columnNow[m-1] + diffScore; columnPrevious[m] = max(columnPrevious[m], columnPrevious[m - 1]); columnPrevious[m] = max(columnPrevious[m], columnNow[m]); } targetIndex++; rowPrevious[targetIndex] = columnPrevious[16]; if (targetIndex == targetLength) { if(i == query32Length-1) { int score = columnPrevious[queryLength%16]; if (score >= minLength) { cluster[index] = target; } else { cluster[index] = -1; } return; } break; } } } } }
the_stack
* bisection. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include "cutil_inline.h" #include "config.h" #include "structs.h" #include "matlab.h" #include "util.h" #include "gerschgorin.h" #include "bisect_small.cuh" #include "bisect_large.cuh" const char *sSDKsample = "CUDA eigenvalues"; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { printf("[ %s ]\n", sSDKsample); runTest( argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Initialize the input data to the algorithm //! @param input handles to the input data //! @param exec_path path where executable is run (argv[0]) //! @param mat_size size of the matrix //! @param user_defined 1 if the matrix size has been requested by the user, //! 0 if the default size //////////////////////////////////////////////////////////////////////////////// void initInputData( InputData& input, char* exec_path, const unsigned int mat_size, const unsigned int user_defined) { // allocate memory input.a = (float*) malloc( sizeof(float) * mat_size); input.b = (float*) malloc( sizeof(float) * mat_size); if( 1 == user_defined) { // initialize diagonal and superdiagonal entries with random values srand( 278217421); // srand( clock()); for( unsigned int i = 0; i < mat_size; ++i) { input.a[i] = (float) (2.0 * (((double)rand() / (double) RAND_MAX) - 0.5)); input.b[i] = (float) (2.0 * (((double)rand() / (double) RAND_MAX) - 0.5)); } // the first element of s is used as padding on the device (thus the // whole vector is copied to the device but the kernels are launched // with (s+1) as start address input.b[0] = 0.0f; } else { // read default matrix unsigned int input_data_size = mat_size; char* diag_path = cutFindFilePath( "diagonal.dat", exec_path); cutilCondition( 0 != diag_path); cutilCheckError( cutReadFilef( diag_path, &(input.a), &input_data_size)); char* sdiag_path = cutFindFilePath( "superdiagonal.dat", exec_path); cutilCondition( 0 != sdiag_path); cutilCheckError( cutReadFilef( sdiag_path, &(input.b), &input_data_size, 1)); cutFree( diag_path); cutFree( sdiag_path); } // allocate device memory for input cutilSafeCall( cudaMalloc( (void**) &(input.g_a) , sizeof(float) * mat_size)); cutilSafeCall( cudaMalloc( (void**) &(input.g_b_raw), sizeof(float) * mat_size)); // copy data to device cutilSafeCall( cudaMemcpy( input.g_a , input.a, sizeof(float) * mat_size, cudaMemcpyHostToDevice )); cutilSafeCall( cudaMemcpy( input.g_b_raw, input.b, sizeof(float) * mat_size, cudaMemcpyHostToDevice )); input.g_b = input.g_b_raw + 1; } //////////////////////////////////////////////////////////////////////////////// //! Clean up input data, in particular allocated memory //! @param input handles to the input data //////////////////////////////////////////////////////////////////////////////// void cleanupInputData( InputData& input) { freePtr( input.a); freePtr( input.b); cutilSafeCall( cudaFree( input.g_a)); input.g_a = NULL; cutilSafeCall( cudaFree( input.g_b_raw)); input.g_b_raw = NULL; input.g_b = NULL; } //////////////////////////////////////////////////////////////////////////////// //! Check if a specific matrix size has to be used //! @param argc number of command line arguments (from main(argc, argv) //! @param argv pointers to command line arguments (from main(argc, argv) //! @param matrix_size size of matrix, updated if specific size specified on //! command line //////////////////////////////////////////////////////////////////////////////// void getMatrixSize( int argc, char** argv, unsigned int& mat_size, unsigned int& user_defined) { int temp = -1; cutGetCmdLineArgumenti( argc, (const char**) argv, "matrix-size", &temp); if( temp > 0) { mat_size = (unsigned int) temp; // data type short is used in the kernel cutilCondition( mat_size < (1 << 16)); user_defined = 1; } printf( "Matrix size: %i x %i\n", mat_size, mat_size); } //////////////////////////////////////////////////////////////////////////////// //! Check if a specific precision of the eigenvalue has to be obtained //! @param argc number of command line arguments (from main(argc, argv) //! @param argv pointers to command line arguments (from main(argc, argv) //! @param iters_timing numbers of iterations for timing, updated if a //! specific number is specified on the command line //////////////////////////////////////////////////////////////////////////////// void getPrecision( int argc, char** argv, float& precision) { float temp = -1.0f; cutGetCmdLineArgumentf( argc, (const char**) argv, "precision", &temp); if( temp > 0.0f) { precision = temp; } printf( "Precision: %f\n", precision); } //////////////////////////////////////////////////////////////////////////////// //! Check if a particular number of iterations for timings has to be used //! @param argc number of command line arguments (from main(argc, argv) //! @param argv pointers to command line arguments (from main(argc, argv) //! @param iters_timing number of timing iterations, updated if user //! specific value //////////////////////////////////////////////////////////////////////////////// void getItersTiming( int argc, char** argv, unsigned int& iters_timing) { int temp = -1; cutGetCmdLineArgumenti( argc, (const char**) argv, "iters-timing", &temp); if( temp > 0) { iters_timing = temp; } printf( "Iterations to be timed: %i\n", iters_timing); } //////////////////////////////////////////////////////////////////////////////// //! Check if a particular filename has to be used for the file where the result //! is stored //! @param argc number of command line arguments (from main(argc, argv) //! @param argv pointers to command line arguments (from main(argc, argv) //! @param filename filename of result file, updated if user specified //! filename //////////////////////////////////////////////////////////////////////////////// void getResultFilename( int argc, char** argv, char*& filename) { char* temp = NULL; cutGetCmdLineArgumentstr( argc, (const char**) argv, "filename-result", &temp); if( NULL != temp) { filename = (char*) malloc( sizeof(char) * strlen( temp)); strcpy( filename, temp); cutFree( temp); } printf( "Result filename: '%s'\n", filename); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); unsigned int timer = 0; unsigned int timer_total = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutCreateTimer( &timer_total)); // default unsigned int mat_size = 2048; // flag if the matrix size is due to explicit user request unsigned int user_defined = 0; // desired precision of eigenvalues float precision = 0.00001f; #ifdef __DEVICE_EMULATION__ unsigned int iters_timing = 1; #else unsigned int iters_timing = 100; #endif char* result_file = "eigenvalues.dat"; // check if there is a command line request for the matrix size getMatrixSize( argc, argv, mat_size, user_defined); // check if user requested specific precision getPrecision( argc, argv, precision); // check if user requested specific number of iterations for timing getItersTiming( argc, argv, iters_timing); // file name for result file getResultFilename( argc, argv, result_file); // set up input InputData input; initInputData( input, argv[0], mat_size, user_defined); // compute Gerschgorin interval float lg = FLT_MAX; float ug = -FLT_MAX; computeGerschgorin( input.a, input.b+1, mat_size, lg, ug); printf( "Gerschgorin interval: %f / %f\n", lg, ug); // two kernels, for small matrices a lot of overhead can be avoided if( mat_size <= MAX_SMALL_MATRIX) { // initialize memory for result ResultDataSmall result; initResultSmallMatrix( result, mat_size); // run the kernel computeEigenvaluesSmallMatrix( input, result, mat_size, lg, ug, precision, iters_timing); // get the result from the device and do some sanity checks, // save the result processResultSmallMatrix( input, result, mat_size, result_file); // clean up cleanupResultSmallMatrix( result); } else { // initialize memory for result ResultDataLarge result; initResultDataLargeMatrix( result, mat_size); // run the kernel computeEigenvaluesLargeMatrix( input, result, mat_size, precision, lg, ug, iters_timing ); // get the result from the device and do some sanity checks // save the result if user specified matrix size processResultDataLargeMatrix( input, result, mat_size, result_file, user_defined, argv[0]); // cleanup cleanupResultDataLargeMatrix(result); } cleanupInputData( input); cudaThreadExit(); }
the_stack
#include <stdio.h> #include <stdlib.h> #include <memory> #include <iostream> #include <cassert> #include <cuda.h> #include "helper_math.h" #include "main.h" #include "shrUtils.h" // Inline device function to convert 32-bit unsigned integer to floating point rgba color //***************************************************************** __device__ float4 rgbaUintToFloat4(const unsigned int uiPackedRGBA) { float4 rgba; rgba.x = uiPackedRGBA & 0xff; rgba.y = (uiPackedRGBA >> 8) & 0xff; rgba.z = (uiPackedRGBA >> 16) & 0xff; rgba.w = (uiPackedRGBA >> 24) & 0xff; return rgba; } // Inline device function to convert floating point rgba color to 32-bit unsigned integer //***************************************************************** __device__ unsigned int rgbaFloat4ToUint(const float4 rgba) { unsigned int uiPackedRGBA = 0U; uiPackedRGBA |= 0x000000FF & (unsigned int)rgba.x; uiPackedRGBA |= 0x0000FF00 & (((unsigned int)rgba.y) << 8); uiPackedRGBA |= 0x00FF0000 & (((unsigned int)rgba.z) << 16); uiPackedRGBA |= 0xFF000000 & (((unsigned int)rgba.w) << 24); return uiPackedRGBA; } // Transpose kernel (see transpose SDK sample for details) //***************************************************************** __global__ void Transpose(const unsigned int* uiDataIn, unsigned int* uiDataOut, const int iWidth, const int iHeight) { // read the matrix tile into LMEM unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y; __shared__ unsigned int uiLocalBuff[16*17]; if((xIndex < iWidth) && (yIndex < iHeight)) { //uiLocalBuff[get_local_id(1) * (get_local_size(0) + 1) + get_local_id(0)] = uiDataIn[(yIndex * iWidth) + xIndex]; uiLocalBuff[threadIdx.y * (blockDim.x + 1) + threadIdx.x] = uiDataIn[(yIndex * iWidth) + xIndex]; } // Synchronize the read into LMEM __syncthreads(); // write the transposed matrix tile to global memory xIndex = __mul24(blockIdx.y, blockDim.y) + threadIdx.x; yIndex = __mul24(blockIdx.x, blockDim.x) + threadIdx.y; if((xIndex < iHeight) && (yIndex < iWidth)) { uiDataOut[(yIndex * iHeight) + xIndex] = //uiLocalBuff[get_local_id(0) * (get_local_size(1) + 1) + get_local_id(1)]; uiLocalBuff[threadIdx.x * (blockDim.y + 1) + threadIdx.y]; } } // simple 1st order recursive filter kernel //***************************************************************** // - processes one image column per thread // parameters: // uiDataIn - pointer to input data (RGBA image packed into 32-bit integers) // uiDataOut - pointer to output data // iWidth - image width // iHeight - image height // a - blur parameter //***************************************************************** __global__ void SimpleRecursiveRGBA( const unsigned int* uiDataIn, unsigned int* uiDataOut, const int iWidth, const int iHeight, const float a) { // compute X pixel location and check in-bounds unsigned int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= iWidth) return; // advance global pointers to correct column for this work item and x position uiDataIn += X; uiDataOut += X; // start forward filter pass float4 yp = rgbaUintToFloat4(*uiDataIn); // previous output for (int Y = 0; Y < iHeight; Y++) { float4 xc = rgbaUintToFloat4(*uiDataIn); float4 yc = xc + (yp - xc) * make_float4(a, a, a, a); *uiDataOut = rgbaFloat4ToUint(yc); yp = yc; uiDataIn += iWidth; // move to next row uiDataOut += iWidth; // move to next row } // reset global pointers to point to last element in column for this work item and x position uiDataIn -= iWidth; uiDataOut -= iWidth; // start reverse filter pass: ensures response is symmetrical yp = rgbaUintToFloat4(*uiDataIn); for (int Y = iHeight - 1; Y > -1; Y--) { float4 xc = rgbaUintToFloat4(*uiDataIn); float4 yc = xc + (yp - xc) * make_float4(a, a, a, a); *uiDataOut = rgbaFloat4ToUint((rgbaUintToFloat4(*uiDataOut) + yc) * 0.5f); yp = yc; uiDataIn -= iWidth; // move to previous row uiDataOut -= iWidth; // move to previous row } } // Recursive Gaussian filter //***************************************************************** // parameters: // uiDataIn - pointer to input data (RGBA image packed into 32-bit integers) // uiDataOut - pointer to output data // iWidth - image width // iHeight - image height // a0-a3, b1, b2, coefp, coefn - filter parameters // // If used, CLAMP_TO_EDGE is passed in via OpenCL clBuildProgram call options string at app runtime //***************************************************************** __global__ void RecursiveRGBA( const unsigned int* uiDataIn, unsigned int* uiDataOut, const int iWidth, const int iHeight, const float a0, const float a1, const float a2, const float a3, const float b1, const float b2, const float coefp, const float coefn) { // compute X pixel location and check in-bounds //unsigned int X = mul24(get_group_id(0), get_local_size(0)) + get_local_id(0); unsigned int X = blockIdx.x * blockDim.x + threadIdx.x; if (X >= iWidth) return; // advance global pointers to correct column for this work item and x position uiDataIn += X; uiDataOut += X; // start forward filter pass float4 xp = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // previous input float4 yp = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // previous output float4 yb = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // previous output by 2 #ifdef CLAMP_TO_EDGE xp = rgbaUintToFloat4(*uiDataIn); yb = xp * make_float4(coefp,coefp,coefp,coefp); yp = yb; #endif for (int Y = 0; Y < iHeight; Y++) { float4 xc = rgbaUintToFloat4(*uiDataIn); float4 yc = (xc * a0) + (xp * a1) - (yp * b1) - (yb * b2); *uiDataOut = rgbaFloat4ToUint(yc); xp = xc; yb = yp; yp = yc; uiDataIn += iWidth; // move to next row uiDataOut += iWidth; // move to next row } // reset global pointers to point to last element in column for this work item and x position uiDataIn -= iWidth; uiDataOut -= iWidth; // start reverse filter pass: ensures response is symmetrical float4 xn = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float4 xa = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float4 yn = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float4 ya = make_float4(0.0f, 0.0f, 0.0f, 0.0f); #ifdef CLAMP_TO_EDGE xn = rgbaUintToFloat4(*uiDataIn); xa = xn; yn = xn * make_float4(coefn,coefn,coefn,coefn); ya = yn; #endif for (int Y = iHeight - 1; Y > -1; Y--) { float4 xc = rgbaUintToFloat4(*uiDataIn); float4 yc = (xn * a2) + (xa * a3) - (yn * b1) - (ya * b2); xa = xn; xn = xc; ya = yn; yn = yc; *uiDataOut = rgbaFloat4ToUint(rgbaUintToFloat4(*uiDataOut) + yc); uiDataIn -= iWidth; // move to previous row uiDataOut -= iWidth; // move to previous row } } void GPUGaussianFilterRGBA(const unsigned int* uiInput, unsigned int* uiOutput, unsigned int* d_BufIn, unsigned int* d_BufTmp, unsigned int* d_BufOut, const unsigned int uiImageWidth, const unsigned int uiImageHeight, const GaussParms* pGP) { #if USE_SIMPLE_FILTER float ema = pGP->ema; #else float a0 = pGP->a0; float a1 = pGP->a1; float a2 = pGP->a2; float a3 = pGP->a3; float b1 = pGP->b1; float b2 = pGP->b2; float coefp = pGP->coefp; float coefn = pGP->coefn; #endif unsigned int szBuffBytes = uiImageWidth * uiImageHeight * sizeof (unsigned int); cudaMemcpy(d_BufIn, uiInput, szBuffBytes, cudaMemcpyHostToDevice); // const int iTransposeBlockDim = 16; // initial height and width dimension of 2D transpose workgroup size_t szGaussLocalWork = 256; size_t szGaussGlobalWork = shrRoundUp((int)szGaussLocalWork, uiImageWidth); dim3 g_grid (szGaussGlobalWork / szGaussLocalWork); dim3 g_block (szGaussLocalWork); #if USE_SIMPLE_FILTER SimpleRecursiveRGBA<<<g_grid, g_block>>>(d_BufIn, d_BufTmp, uiImageWidth, uiImageHeight, ema); #else RecursiveRGBA<<<g_grid, g_block>>>(d_BufIn, d_BufTmp, uiImageWidth, uiImageHeight, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif size_t szTransposeGlobalWork[2]; size_t szTransposeLocalWork[2] = {16, 16}; // Launch transpose kernel in 1st direction szTransposeGlobalWork[0] = shrRoundUp((int)szTransposeLocalWork[0], uiImageWidth); szTransposeGlobalWork[1] = shrRoundUp((int)szTransposeLocalWork[1], uiImageHeight); dim3 t1_grid (szTransposeGlobalWork[0] / szTransposeLocalWork[0], szTransposeGlobalWork[1] / szTransposeLocalWork[1]); dim3 t1_block (szTransposeLocalWork[0], szTransposeLocalWork[1]); Transpose<<<t1_grid, t1_block>>>(d_BufTmp, d_BufOut, uiImageWidth, uiImageHeight); // Reset Gaussian global work dimensions and variable args, then process in 2nd dimension // note width and height parameters flipped due to transpose szGaussGlobalWork = shrRoundUp((int)szGaussLocalWork, uiImageHeight); dim3 g2_grid (szGaussGlobalWork / szGaussLocalWork); #if USE_SIMPLE_FILTER SimpleRecursiveRGBA<<<g2_grid, g_block>>>(d_BufOut, d_BufTmp, uiImageHeight, uiImageWidth, ema); #else RecursiveRGBA<<<g2_grid, g_block>>>(d_BufOut, d_BufTmp, uiImageHeight, uiImageWidth, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif // Reset transpose global work dimensions and variable args // note width and height parameters flipped due to 1st transpose szTransposeGlobalWork[0] = shrRoundUp((int)szTransposeLocalWork[0], uiImageHeight); szTransposeGlobalWork[1] = shrRoundUp((int)szTransposeLocalWork[1], uiImageWidth); dim3 t2_grid (szTransposeGlobalWork[0] / szTransposeLocalWork[0] , szTransposeGlobalWork[1] / szTransposeLocalWork[1]); //range<1> t2_lws (szTransposeLocalWork[1], szTransposeLobalWork[0]); // Launch transpose kernel in 2nd direction Transpose<<<t2_grid, t1_block>>>(d_BufTmp, d_BufOut, uiImageHeight, uiImageWidth); cudaMemcpy(uiOutput, d_BufOut, szBuffBytes, cudaMemcpyDeviceToHost); } int main(int argc, char** argv) { const float fSigma = 10.0f; // filter sigma (blur factor) const int iOrder = 0; // filter order unsigned int uiImageWidth = 1920; // Image width unsigned int uiImageHeight = 1080; // Image height unsigned int* uiInput = NULL; // Host buffer to hold input image data unsigned int* uiTemp = NULL; // Host buffer to hold intermediate image data unsigned int* uiOutput = NULL; // Host buffer to hold output image data shrLoadPPM4ub(argv[1], (unsigned char **)&uiInput, &uiImageWidth, &uiImageHeight); printf("Image Width = %i, Height = %i, bpp = %lu\n\n", uiImageWidth, uiImageHeight, sizeof(unsigned int)<<3); // Allocate intermediate and output host image buffers unsigned int szBuff = uiImageWidth * uiImageHeight; unsigned int szBuffBytes = szBuff * sizeof (unsigned int); uiTemp = (unsigned int*)malloc(szBuffBytes); uiOutput = (unsigned int*)malloc(szBuffBytes); printf("Allocate Host Image Buffers...\n"); // Allocate the source, intermediate and result buffer memory objects on the device GMEM unsigned int* d_BufIn; unsigned int* d_BufTmp; unsigned int* d_BufOut; cudaMalloc((void**)&d_BufIn, szBuffBytes); cudaMalloc((void**)&d_BufTmp, szBuffBytes); cudaMalloc((void**)&d_BufOut, szBuffBytes); // init filter coefficients PreProcessGaussParms (fSigma, iOrder, &GP); // Warmup call to assure OpenCL driver is awake GPUGaussianFilterRGBA(uiInput, uiOutput, d_BufIn, d_BufTmp, d_BufOut, uiImageWidth, uiImageHeight, &GP); // Start round-trip timer and process iCycles loops on the GPU const int iCycles = 150; printf("\nRunning GPUGaussianFilterRGBA for %d cycles...\n\n", iCycles); for (int i = 0; i < iCycles; i++) { GPUGaussianFilterRGBA(uiInput, uiOutput, d_BufIn, d_BufTmp, d_BufOut, uiImageWidth, uiImageHeight, &GP); } // Compute on host unsigned int* uiGolden = (unsigned int*)malloc(szBuffBytes); HostRecursiveGaussianRGBA(uiInput, uiTemp, uiGolden, uiImageWidth, uiImageHeight, &GP); printf("Comparing GPU Result to CPU Result...\n"); shrBOOL bMatch = shrCompareuit(uiGolden, uiOutput, (uiImageWidth * uiImageHeight), 1.0f, 0.01f); printf("\nGPU Result %s CPU Result within tolerance...\n", (bMatch == shrTRUE) ? "matches" : "DOESN'T match"); free(uiGolden); free(uiInput); free(uiTemp); free(uiOutput); cudaFree(d_BufIn); cudaFree(d_BufTmp); cudaFree(d_BufOut); return 0; }
the_stack
#include <nvbench/create.cuh> #include <nvbench/type_list.cuh> #include "test_asserts.cuh" #include <fmt/format.h> //============================================================================== // Declare a couple benchmarks for testing: void DummyBench(nvbench::state &state) { state.skip("Skipping for testing."); } NVBENCH_BENCH(DummyBench).clear_devices(); using Ts = nvbench::type_list<void, nvbench::int8_t, nvbench::uint8_t>; using Us = nvbench::type_list<bool, nvbench::float32_t, nvbench::float64_t>; template <typename T, typename U> void TestBench(nvbench::state &state, nvbench::type_list<T, U>) { DummyBench(state); } NVBENCH_BENCH_TYPES(TestBench, NVBENCH_TYPE_AXES(Ts, Us)) .set_type_axes_names({"T", "U"}) .add_int64_axis("Ints", {42}) .add_int64_power_of_two_axis("PO2s", {3}) .add_float64_axis("Floats", {3.14}) .add_string_axis("Strings", {"S1"}) .clear_devices(); //============================================================================== namespace { [[nodiscard]] std::string states_to_string(const std::vector<nvbench::state> &states) { fmt::memory_buffer buffer; std::string table_format = "| {:^5} | {:^10} | {:^4} | {:^4} | {:^4} " "| {:^4} | {:^6} | {:^8} |\n"; fmt::format_to(buffer, "\n"); fmt::format_to(buffer, table_format, "State", "TypeConfig", "T", "U", "Ints", "PO2s", "Floats", "Strings"); std::size_t config = 0; for (const auto &state : states) { fmt::format_to(buffer, table_format, config++, state.get_type_config_index(), state.get_string("T"), state.get_string("U"), state.get_int64("Ints"), state.get_int64("PO2s"), state.get_float64("Floats"), std::string{"\'"} + state.get_string("Strings") + "'"); } return fmt::to_string(buffer); } // Expects the parser to have a single TestBench benchmark. Runs the benchmark // and returns the resulting states. [[nodiscard]] const auto& parser_to_states(nvbench::option_parser &parser) { const auto &benches = parser.get_benchmarks(); ASSERT(benches.size() == 1); const auto &bench = benches.front(); ASSERT(bench != nullptr); bench->run(); return bench->get_states(); } // Expects the parser to have a single TestBench benchmark. Runs the benchmark // and converts the generated states into a fingerprint string for regression // testing. [[nodiscard]] std::string parser_to_state_string(nvbench::option_parser &parser) { return states_to_string(parser_to_states(parser)); } } // namespace void test_empty() { { nvbench::option_parser parser; parser.parse({}); ASSERT(parser.get_benchmarks().size() == 2); ASSERT(parser.get_args().empty()); } { nvbench::option_parser parser; parser.parse(0, nullptr); ASSERT(parser.get_benchmarks().size() == 2); ASSERT(parser.get_args().empty()); } } void test_exec_name_tolerance() { nvbench::option_parser parser; parser.parse({"TestExec"}); ASSERT(parser.get_benchmarks().size() == 2); ASSERT(parser.get_args() == std::vector<std::string>{"TestExec"}); } void test_argc_argv_parse() { char const *const argv[] = {"TestExec"}; { nvbench::option_parser parser; parser.parse(1, argv); ASSERT(parser.get_benchmarks().size() == 2); ASSERT(parser.get_args() == std::vector<std::string>{"TestExec"}); } { nvbench::option_parser parser; parser.parse(0, nullptr); ASSERT(parser.get_benchmarks().size() == 2); ASSERT(parser.get_args().empty()); } } void test_invalid_option() { nvbench::option_parser parser; ASSERT_THROWS_ANY(parser.parse({"--not-a-real-option"})); } void test_benchmark_long() // --benchmark { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 42 | 8 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 42 | 8 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 42 | 8 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 42 | 8 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 42 | 8 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 42 | 8 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "1"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_benchmark_short() // -b { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 42 | 8 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 42 | 8 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 42 | 8 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 42 | 8 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 42 | 8 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 42 | 8 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"-b", "TestBench"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"-b", "1"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 2 | 8 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 2 | 8 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 2 | 8 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 2 | 8 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 2 | 8 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 2 | 8 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 2 | 8 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 2 | 8 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 2 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ ] = 2 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Ints=2"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 ]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints=[2]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 2 : 1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints=[2:2]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 2 | 8 | 3.14 | 'S1' | | 1 | 0 | void | bool | 7 | 8 | 3.14 | 'S1' | | 2 | 1 | void | F32 | 2 | 8 | 3.14 | 'S1' | | 3 | 1 | void | F32 | 7 | 8 | 3.14 | 'S1' | | 4 | 2 | void | F64 | 2 | 8 | 3.14 | 'S1' | | 5 | 2 | void | F64 | 7 | 8 | 3.14 | 'S1' | | 6 | 3 | I8 | bool | 2 | 8 | 3.14 | 'S1' | | 7 | 3 | I8 | bool | 7 | 8 | 3.14 | 'S1' | | 8 | 4 | I8 | F32 | 2 | 8 | 3.14 | 'S1' | | 9 | 4 | I8 | F32 | 7 | 8 | 3.14 | 'S1' | | 10 | 5 | I8 | F64 | 2 | 8 | 3.14 | 'S1' | | 11 | 5 | I8 | F64 | 7 | 8 | 3.14 | 'S1' | | 12 | 6 | U8 | bool | 2 | 8 | 3.14 | 'S1' | | 13 | 6 | U8 | bool | 7 | 8 | 3.14 | 'S1' | | 14 | 7 | U8 | F32 | 2 | 8 | 3.14 | 'S1' | | 15 | 7 | U8 | F32 | 7 | 8 | 3.14 | 'S1' | | 16 | 8 | U8 | F64 | 2 | 8 | 3.14 | 'S1' | | 17 | 8 | U8 | F64 | 7 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 , 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints=[2,7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ ] = [ 2 : 7 : 5 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints=[2:7:5]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_pow2_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 128 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 42 | 128 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 42 | 128 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 42 | 128 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 42 | 128 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 42 | 128 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 42 | 128 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 42 | 128 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 42 | 128 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = 7 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s[pow2]=7"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s[pow2]=[7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 7 : 7 : 1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s[pow2]=[7:7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_pow2_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 4 | 3.14 | 'S1' | | 1 | 0 | void | bool | 42 | 128 | 3.14 | 'S1' | | 2 | 1 | void | F32 | 42 | 4 | 3.14 | 'S1' | | 3 | 1 | void | F32 | 42 | 128 | 3.14 | 'S1' | | 4 | 2 | void | F64 | 42 | 4 | 3.14 | 'S1' | | 5 | 2 | void | F64 | 42 | 128 | 3.14 | 'S1' | | 6 | 3 | I8 | bool | 42 | 4 | 3.14 | 'S1' | | 7 | 3 | I8 | bool | 42 | 128 | 3.14 | 'S1' | | 8 | 4 | I8 | F32 | 42 | 4 | 3.14 | 'S1' | | 9 | 4 | I8 | F32 | 42 | 128 | 3.14 | 'S1' | | 10 | 5 | I8 | F64 | 42 | 4 | 3.14 | 'S1' | | 11 | 5 | I8 | F64 | 42 | 128 | 3.14 | 'S1' | | 12 | 6 | U8 | bool | 42 | 4 | 3.14 | 'S1' | | 13 | 6 | U8 | bool | 42 | 128 | 3.14 | 'S1' | | 14 | 7 | U8 | F32 | 42 | 4 | 3.14 | 'S1' | | 15 | 7 | U8 | F32 | 42 | 128 | 3.14 | 'S1' | | 16 | 8 | U8 | F64 | 42 | 4 | 3.14 | 'S1' | | 17 | 8 | U8 | F64 | 42 | 128 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 , 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s[pow2]=[2,7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ pow2 ] = [ 2 : 7 : 5 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s[pow2]=[2:7:5]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_none_to_pow2_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 128 | 8 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 128 | 8 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 128 | 8 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 128 | 8 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 128 | 8 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 128 | 8 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 128 | 8 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 128 | 8 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 128 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = 7 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints[pow2]=7"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints[pow2]=[7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 7 : 7 : 1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints[pow2]=[7:7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_none_to_pow2_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 4 | 8 | 3.14 | 'S1' | | 1 | 0 | void | bool | 128 | 8 | 3.14 | 'S1' | | 2 | 1 | void | F32 | 4 | 8 | 3.14 | 'S1' | | 3 | 1 | void | F32 | 128 | 8 | 3.14 | 'S1' | | 4 | 2 | void | F64 | 4 | 8 | 3.14 | 'S1' | | 5 | 2 | void | F64 | 128 | 8 | 3.14 | 'S1' | | 6 | 3 | I8 | bool | 4 | 8 | 3.14 | 'S1' | | 7 | 3 | I8 | bool | 128 | 8 | 3.14 | 'S1' | | 8 | 4 | I8 | F32 | 4 | 8 | 3.14 | 'S1' | | 9 | 4 | I8 | F32 | 128 | 8 | 3.14 | 'S1' | | 10 | 5 | I8 | F64 | 4 | 8 | 3.14 | 'S1' | | 11 | 5 | I8 | F64 | 128 | 8 | 3.14 | 'S1' | | 12 | 6 | U8 | bool | 4 | 8 | 3.14 | 'S1' | | 13 | 6 | U8 | bool | 128 | 8 | 3.14 | 'S1' | | 14 | 7 | U8 | F32 | 4 | 8 | 3.14 | 'S1' | | 15 | 7 | U8 | F32 | 128 | 8 | 3.14 | 'S1' | | 16 | 8 | U8 | F64 | 4 | 8 | 3.14 | 'S1' | | 17 | 8 | U8 | F64 | 128 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 , 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints[pow2]=[2,7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Ints [ pow2 ] = [ 2 : 7 : 5 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Ints[pow2]=[2:7:5]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_pow2_to_none_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 2 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 42 | 2 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 42 | 2 | 3.14 | 'S1' | | 3 | 3 | I8 | bool | 42 | 2 | 3.14 | 'S1' | | 4 | 4 | I8 | F32 | 42 | 2 | 3.14 | 'S1' | | 5 | 5 | I8 | F64 | 42 | 2 | 3.14 | 'S1' | | 6 | 6 | U8 | bool | 42 | 2 | 3.14 | 'S1' | | 7 | 7 | U8 | F32 | 42 | 2 | 3.14 | 'S1' | | 8 | 8 | U8 | F64 | 42 | 2 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ ] = 2 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s=2"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s=[2]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 2 : 1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s=[2:2]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_int64_axis_pow2_to_none_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 2 | 3.14 | 'S1' | | 1 | 0 | void | bool | 42 | 7 | 3.14 | 'S1' | | 2 | 1 | void | F32 | 42 | 2 | 3.14 | 'S1' | | 3 | 1 | void | F32 | 42 | 7 | 3.14 | 'S1' | | 4 | 2 | void | F64 | 42 | 2 | 3.14 | 'S1' | | 5 | 2 | void | F64 | 42 | 7 | 3.14 | 'S1' | | 6 | 3 | I8 | bool | 42 | 2 | 3.14 | 'S1' | | 7 | 3 | I8 | bool | 42 | 7 | 3.14 | 'S1' | | 8 | 4 | I8 | F32 | 42 | 2 | 3.14 | 'S1' | | 9 | 4 | I8 | F32 | 42 | 7 | 3.14 | 'S1' | | 10 | 5 | I8 | F64 | 42 | 2 | 3.14 | 'S1' | | 11 | 5 | I8 | F64 | 42 | 7 | 3.14 | 'S1' | | 12 | 6 | U8 | bool | 42 | 2 | 3.14 | 'S1' | | 13 | 6 | U8 | bool | 42 | 7 | 3.14 | 'S1' | | 14 | 7 | U8 | F32 | 42 | 2 | 3.14 | 'S1' | | 15 | 7 | U8 | F32 | 42 | 7 | 3.14 | 'S1' | | 16 | 8 | U8 | F64 | 42 | 2 | 3.14 | 'S1' | | 17 | 8 | U8 | F64 | 42 | 7 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 , 7 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s=[2,7]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " PO2s [ ] = [ 2 : 7 : 5 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "PO2s=[2:7:5]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_float64_axis_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.5 | 'S1' | | 1 | 1 | void | F32 | 42 | 8 | 3.5 | 'S1' | | 2 | 2 | void | F64 | 42 | 8 | 3.5 | 'S1' | | 3 | 3 | I8 | bool | 42 | 8 | 3.5 | 'S1' | | 4 | 4 | I8 | F32 | 42 | 8 | 3.5 | 'S1' | | 5 | 5 | I8 | F64 | 42 | 8 | 3.5 | 'S1' | | 6 | 6 | U8 | bool | 42 | 8 | 3.5 | 'S1' | | 7 | 7 | U8 | F32 | 42 | 8 | 3.5 | 'S1' | | 8 | 8 | U8 | F64 | 42 | 8 | 3.5 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = 3.5 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Floats=3.5"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Floats=[3.5]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 : 3.6 : 1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Floats=[3.5:3.6]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_float64_axis_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.5 | 'S1' | | 1 | 0 | void | bool | 42 | 8 | 4.1 | 'S1' | | 2 | 1 | void | F32 | 42 | 8 | 3.5 | 'S1' | | 3 | 1 | void | F32 | 42 | 8 | 4.1 | 'S1' | | 4 | 2 | void | F64 | 42 | 8 | 3.5 | 'S1' | | 5 | 2 | void | F64 | 42 | 8 | 4.1 | 'S1' | | 6 | 3 | I8 | bool | 42 | 8 | 3.5 | 'S1' | | 7 | 3 | I8 | bool | 42 | 8 | 4.1 | 'S1' | | 8 | 4 | I8 | F32 | 42 | 8 | 3.5 | 'S1' | | 9 | 4 | I8 | F32 | 42 | 8 | 4.1 | 'S1' | | 10 | 5 | I8 | F64 | 42 | 8 | 3.5 | 'S1' | | 11 | 5 | I8 | F64 | 42 | 8 | 4.1 | 'S1' | | 12 | 6 | U8 | bool | 42 | 8 | 3.5 | 'S1' | | 13 | 6 | U8 | bool | 42 | 8 | 4.1 | 'S1' | | 14 | 7 | U8 | F32 | 42 | 8 | 3.5 | 'S1' | | 15 | 7 | U8 | F32 | 42 | 8 | 4.1 | 'S1' | | 16 | 8 | U8 | F64 | 42 | 8 | 3.5 | 'S1' | | 17 | 8 | U8 | F64 | 42 | 8 | 4.1 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 , 4.1 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Floats=[3.5,4.1]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " Floats [ ] = [ 3.5 : 4.2 : 0.6 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", "Floats=[3.5:4.2:0.6]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_string_axis_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.14 | 'fo br' | | 1 | 1 | void | F32 | 42 | 8 | 3.14 | 'fo br' | | 2 | 2 | void | F64 | 42 | 8 | 3.14 | 'fo br' | | 3 | 3 | I8 | bool | 42 | 8 | 3.14 | 'fo br' | | 4 | 4 | I8 | F32 | 42 | 8 | 3.14 | 'fo br' | | 5 | 5 | I8 | F64 | 42 | 8 | 3.14 | 'fo br' | | 6 | 6 | U8 | bool | 42 | 8 | 3.14 | 'fo br' | | 7 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'fo br' | | 8 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'fo br' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Strings [ ] = fo br "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Strings=fo br"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Strings=[fo br]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_string_axis_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.14 | 'fo br' | | 1 | 0 | void | bool | 42 | 8 | 3.14 | 'baz' | | 2 | 1 | void | F32 | 42 | 8 | 3.14 | 'fo br' | | 3 | 1 | void | F32 | 42 | 8 | 3.14 | 'baz' | | 4 | 2 | void | F64 | 42 | 8 | 3.14 | 'fo br' | | 5 | 2 | void | F64 | 42 | 8 | 3.14 | 'baz' | | 6 | 3 | I8 | bool | 42 | 8 | 3.14 | 'fo br' | | 7 | 3 | I8 | bool | 42 | 8 | 3.14 | 'baz' | | 8 | 4 | I8 | F32 | 42 | 8 | 3.14 | 'fo br' | | 9 | 4 | I8 | F32 | 42 | 8 | 3.14 | 'baz' | | 10 | 5 | I8 | F64 | 42 | 8 | 3.14 | 'fo br' | | 11 | 5 | I8 | F64 | 42 | 8 | 3.14 | 'baz' | | 12 | 6 | U8 | bool | 42 | 8 | 3.14 | 'fo br' | | 13 | 6 | U8 | bool | 42 | 8 | 3.14 | 'baz' | | 14 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'fo br' | | 15 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'baz' | | 16 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'fo br' | | 17 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'baz' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " Strings [ ] = [ fo br , baz ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "Strings=[fo br,baz]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_type_axis_single() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 6 | U8 | bool | 42 | 8 | 3.14 | 'S1' | | 1 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'S1' | | 2 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " T [ ] = U8 "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "T=U8"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", " T [ ] = [ U8 ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "T=[U8]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_type_axis_multi() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 42 | 8 | 3.14 | 'S1' | | 1 | 1 | void | F32 | 42 | 8 | 3.14 | 'S1' | | 2 | 2 | void | F64 | 42 | 8 | 3.14 | 'S1' | | 3 | 6 | U8 | bool | 42 | 8 | 3.14 | 'S1' | | 4 | 7 | U8 | F32 | 42 | 8 | 3.14 | 'S1' | | 5 | 8 | U8 | F64 | 42 | 8 | 3.14 | 'S1' | )expected"; { nvbench::option_parser parser; parser.parse( {"--benchmark", "TestBench", "--axis", " T [ ] = [ U8, void ] "}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({"--benchmark", "TestBench", "--axis", "T=[void,U8]"}); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } void test_multi_axis() { const std::string ref = R"expected( | State | TypeConfig | T | U | Ints | PO2s | Floats | Strings | | 0 | 0 | void | bool | 2 | 4 | 0.25 | 'foo' | | 1 | 0 | void | bool | 5 | 4 | 0.25 | 'foo' | | 2 | 0 | void | bool | 2 | 32 | 0.25 | 'foo' | | 3 | 0 | void | bool | 5 | 32 | 0.25 | 'foo' | | 4 | 0 | void | bool | 2 | 256 | 0.25 | 'foo' | | 5 | 0 | void | bool | 5 | 256 | 0.25 | 'foo' | | 6 | 0 | void | bool | 2 | 4 | 0.5 | 'foo' | | 7 | 0 | void | bool | 5 | 4 | 0.5 | 'foo' | | 8 | 0 | void | bool | 2 | 32 | 0.5 | 'foo' | | 9 | 0 | void | bool | 5 | 32 | 0.5 | 'foo' | | 10 | 0 | void | bool | 2 | 256 | 0.5 | 'foo' | | 11 | 0 | void | bool | 5 | 256 | 0.5 | 'foo' | | 12 | 0 | void | bool | 2 | 4 | 0.75 | 'foo' | | 13 | 0 | void | bool | 5 | 4 | 0.75 | 'foo' | | 14 | 0 | void | bool | 2 | 32 | 0.75 | 'foo' | | 15 | 0 | void | bool | 5 | 32 | 0.75 | 'foo' | | 16 | 0 | void | bool | 2 | 256 | 0.75 | 'foo' | | 17 | 0 | void | bool | 5 | 256 | 0.75 | 'foo' | | 18 | 0 | void | bool | 2 | 4 | 1 | 'foo' | | 19 | 0 | void | bool | 5 | 4 | 1 | 'foo' | | 20 | 0 | void | bool | 2 | 32 | 1 | 'foo' | | 21 | 0 | void | bool | 5 | 32 | 1 | 'foo' | | 22 | 0 | void | bool | 2 | 256 | 1 | 'foo' | | 23 | 0 | void | bool | 5 | 256 | 1 | 'foo' | | 24 | 0 | void | bool | 2 | 4 | 0.25 | 'bar' | | 25 | 0 | void | bool | 5 | 4 | 0.25 | 'bar' | | 26 | 0 | void | bool | 2 | 32 | 0.25 | 'bar' | | 27 | 0 | void | bool | 5 | 32 | 0.25 | 'bar' | | 28 | 0 | void | bool | 2 | 256 | 0.25 | 'bar' | | 29 | 0 | void | bool | 5 | 256 | 0.25 | 'bar' | | 30 | 0 | void | bool | 2 | 4 | 0.5 | 'bar' | | 31 | 0 | void | bool | 5 | 4 | 0.5 | 'bar' | | 32 | 0 | void | bool | 2 | 32 | 0.5 | 'bar' | | 33 | 0 | void | bool | 5 | 32 | 0.5 | 'bar' | | 34 | 0 | void | bool | 2 | 256 | 0.5 | 'bar' | | 35 | 0 | void | bool | 5 | 256 | 0.5 | 'bar' | | 36 | 0 | void | bool | 2 | 4 | 0.75 | 'bar' | | 37 | 0 | void | bool | 5 | 4 | 0.75 | 'bar' | | 38 | 0 | void | bool | 2 | 32 | 0.75 | 'bar' | | 39 | 0 | void | bool | 5 | 32 | 0.75 | 'bar' | | 40 | 0 | void | bool | 2 | 256 | 0.75 | 'bar' | | 41 | 0 | void | bool | 5 | 256 | 0.75 | 'bar' | | 42 | 0 | void | bool | 2 | 4 | 1 | 'bar' | | 43 | 0 | void | bool | 5 | 4 | 1 | 'bar' | | 44 | 0 | void | bool | 2 | 32 | 1 | 'bar' | | 45 | 0 | void | bool | 5 | 32 | 1 | 'bar' | | 46 | 0 | void | bool | 2 | 256 | 1 | 'bar' | | 47 | 0 | void | bool | 5 | 256 | 1 | 'bar' | | 48 | 0 | void | bool | 2 | 4 | 0.25 | 'baz' | | 49 | 0 | void | bool | 5 | 4 | 0.25 | 'baz' | | 50 | 0 | void | bool | 2 | 32 | 0.25 | 'baz' | | 51 | 0 | void | bool | 5 | 32 | 0.25 | 'baz' | | 52 | 0 | void | bool | 2 | 256 | 0.25 | 'baz' | | 53 | 0 | void | bool | 5 | 256 | 0.25 | 'baz' | | 54 | 0 | void | bool | 2 | 4 | 0.5 | 'baz' | | 55 | 0 | void | bool | 5 | 4 | 0.5 | 'baz' | | 56 | 0 | void | bool | 2 | 32 | 0.5 | 'baz' | | 57 | 0 | void | bool | 5 | 32 | 0.5 | 'baz' | | 58 | 0 | void | bool | 2 | 256 | 0.5 | 'baz' | | 59 | 0 | void | bool | 5 | 256 | 0.5 | 'baz' | | 60 | 0 | void | bool | 2 | 4 | 0.75 | 'baz' | | 61 | 0 | void | bool | 5 | 4 | 0.75 | 'baz' | | 62 | 0 | void | bool | 2 | 32 | 0.75 | 'baz' | | 63 | 0 | void | bool | 5 | 32 | 0.75 | 'baz' | | 64 | 0 | void | bool | 2 | 256 | 0.75 | 'baz' | | 65 | 0 | void | bool | 5 | 256 | 0.75 | 'baz' | | 66 | 0 | void | bool | 2 | 4 | 1 | 'baz' | | 67 | 0 | void | bool | 5 | 4 | 1 | 'baz' | | 68 | 0 | void | bool | 2 | 32 | 1 | 'baz' | | 69 | 0 | void | bool | 5 | 32 | 1 | 'baz' | | 70 | 0 | void | bool | 2 | 256 | 1 | 'baz' | | 71 | 0 | void | bool | 5 | 256 | 1 | 'baz' | | 72 | 6 | U8 | bool | 2 | 4 | 0.25 | 'foo' | | 73 | 6 | U8 | bool | 5 | 4 | 0.25 | 'foo' | | 74 | 6 | U8 | bool | 2 | 32 | 0.25 | 'foo' | | 75 | 6 | U8 | bool | 5 | 32 | 0.25 | 'foo' | | 76 | 6 | U8 | bool | 2 | 256 | 0.25 | 'foo' | | 77 | 6 | U8 | bool | 5 | 256 | 0.25 | 'foo' | | 78 | 6 | U8 | bool | 2 | 4 | 0.5 | 'foo' | | 79 | 6 | U8 | bool | 5 | 4 | 0.5 | 'foo' | | 80 | 6 | U8 | bool | 2 | 32 | 0.5 | 'foo' | | 81 | 6 | U8 | bool | 5 | 32 | 0.5 | 'foo' | | 82 | 6 | U8 | bool | 2 | 256 | 0.5 | 'foo' | | 83 | 6 | U8 | bool | 5 | 256 | 0.5 | 'foo' | | 84 | 6 | U8 | bool | 2 | 4 | 0.75 | 'foo' | | 85 | 6 | U8 | bool | 5 | 4 | 0.75 | 'foo' | | 86 | 6 | U8 | bool | 2 | 32 | 0.75 | 'foo' | | 87 | 6 | U8 | bool | 5 | 32 | 0.75 | 'foo' | | 88 | 6 | U8 | bool | 2 | 256 | 0.75 | 'foo' | | 89 | 6 | U8 | bool | 5 | 256 | 0.75 | 'foo' | | 90 | 6 | U8 | bool | 2 | 4 | 1 | 'foo' | | 91 | 6 | U8 | bool | 5 | 4 | 1 | 'foo' | | 92 | 6 | U8 | bool | 2 | 32 | 1 | 'foo' | | 93 | 6 | U8 | bool | 5 | 32 | 1 | 'foo' | | 94 | 6 | U8 | bool | 2 | 256 | 1 | 'foo' | | 95 | 6 | U8 | bool | 5 | 256 | 1 | 'foo' | | 96 | 6 | U8 | bool | 2 | 4 | 0.25 | 'bar' | | 97 | 6 | U8 | bool | 5 | 4 | 0.25 | 'bar' | | 98 | 6 | U8 | bool | 2 | 32 | 0.25 | 'bar' | | 99 | 6 | U8 | bool | 5 | 32 | 0.25 | 'bar' | | 100 | 6 | U8 | bool | 2 | 256 | 0.25 | 'bar' | | 101 | 6 | U8 | bool | 5 | 256 | 0.25 | 'bar' | | 102 | 6 | U8 | bool | 2 | 4 | 0.5 | 'bar' | | 103 | 6 | U8 | bool | 5 | 4 | 0.5 | 'bar' | | 104 | 6 | U8 | bool | 2 | 32 | 0.5 | 'bar' | | 105 | 6 | U8 | bool | 5 | 32 | 0.5 | 'bar' | | 106 | 6 | U8 | bool | 2 | 256 | 0.5 | 'bar' | | 107 | 6 | U8 | bool | 5 | 256 | 0.5 | 'bar' | | 108 | 6 | U8 | bool | 2 | 4 | 0.75 | 'bar' | | 109 | 6 | U8 | bool | 5 | 4 | 0.75 | 'bar' | | 110 | 6 | U8 | bool | 2 | 32 | 0.75 | 'bar' | | 111 | 6 | U8 | bool | 5 | 32 | 0.75 | 'bar' | | 112 | 6 | U8 | bool | 2 | 256 | 0.75 | 'bar' | | 113 | 6 | U8 | bool | 5 | 256 | 0.75 | 'bar' | | 114 | 6 | U8 | bool | 2 | 4 | 1 | 'bar' | | 115 | 6 | U8 | bool | 5 | 4 | 1 | 'bar' | | 116 | 6 | U8 | bool | 2 | 32 | 1 | 'bar' | | 117 | 6 | U8 | bool | 5 | 32 | 1 | 'bar' | | 118 | 6 | U8 | bool | 2 | 256 | 1 | 'bar' | | 119 | 6 | U8 | bool | 5 | 256 | 1 | 'bar' | | 120 | 6 | U8 | bool | 2 | 4 | 0.25 | 'baz' | | 121 | 6 | U8 | bool | 5 | 4 | 0.25 | 'baz' | | 122 | 6 | U8 | bool | 2 | 32 | 0.25 | 'baz' | | 123 | 6 | U8 | bool | 5 | 32 | 0.25 | 'baz' | | 124 | 6 | U8 | bool | 2 | 256 | 0.25 | 'baz' | | 125 | 6 | U8 | bool | 5 | 256 | 0.25 | 'baz' | | 126 | 6 | U8 | bool | 2 | 4 | 0.5 | 'baz' | | 127 | 6 | U8 | bool | 5 | 4 | 0.5 | 'baz' | | 128 | 6 | U8 | bool | 2 | 32 | 0.5 | 'baz' | | 129 | 6 | U8 | bool | 5 | 32 | 0.5 | 'baz' | | 130 | 6 | U8 | bool | 2 | 256 | 0.5 | 'baz' | | 131 | 6 | U8 | bool | 5 | 256 | 0.5 | 'baz' | | 132 | 6 | U8 | bool | 2 | 4 | 0.75 | 'baz' | | 133 | 6 | U8 | bool | 5 | 4 | 0.75 | 'baz' | | 134 | 6 | U8 | bool | 2 | 32 | 0.75 | 'baz' | | 135 | 6 | U8 | bool | 5 | 32 | 0.75 | 'baz' | | 136 | 6 | U8 | bool | 2 | 256 | 0.75 | 'baz' | | 137 | 6 | U8 | bool | 5 | 256 | 0.75 | 'baz' | | 138 | 6 | U8 | bool | 2 | 4 | 1 | 'baz' | | 139 | 6 | U8 | bool | 5 | 4 | 1 | 'baz' | | 140 | 6 | U8 | bool | 2 | 32 | 1 | 'baz' | | 141 | 6 | U8 | bool | 5 | 32 | 1 | 'baz' | | 142 | 6 | U8 | bool | 2 | 256 | 1 | 'baz' | | 143 | 6 | U8 | bool | 5 | 256 | 1 | 'baz' | )expected"; { nvbench::option_parser parser; parser.parse({ // clang-format off "--benchmark", "TestBench", "--axis", "T=[U8,void]", "--axis", "U=bool", "--axis", "Ints=[2:6:3]", "--axis", "PO2s[pow2]=[2:10:3]", "--axis", "Floats=[0.25:1:0.25]", "--axis", "Strings=[foo,bar,baz]", // clang-format on }); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } { nvbench::option_parser parser; parser.parse({ // clang-format off "-b", "TestBench", "-a", "Strings=[foo,bar,baz]", "-a", "U=bool", "-a", "Floats=[0.25:1:0.25]", "-a", "Ints=[2:6:3]", "-a", "PO2s[pow2]=[2:10:3]", "-a", "T=[U8,void]", // clang-format on }); const auto test = parser_to_state_string(parser); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } } // `--axis` affects the last `--benchmark`. An exception is thrown if there is // no benchmark specified for an axis. void test_axis_before_benchmark() { { nvbench::option_parser parser; ASSERT_THROWS_ANY(parser.parse({"--axis", "--benchmark"})); } { nvbench::option_parser parser; ASSERT_THROWS_ANY(parser.parse({"--axis", "-b"})); } { nvbench::option_parser parser; ASSERT_THROWS_ANY(parser.parse({"-a", "--benchmark"})); } { nvbench::option_parser parser; ASSERT_THROWS_ANY(parser.parse({"-a", "-b"})); } } void test_min_samples() { nvbench::option_parser parser; parser.parse( {"--benchmark", "DummyBench", "--min-samples", "12345"}); const auto& states = parser_to_states(parser); ASSERT(states.size() == 1); ASSERT(states[0].get_min_samples() == 12345); } void test_min_time() { nvbench::option_parser parser; parser.parse( {"--benchmark", "DummyBench", "--min-time", "12345e2"}); const auto& states = parser_to_states(parser); ASSERT(states.size() == 1); ASSERT(std::abs(states[0].get_min_time() - 12345e2) < 1.); } void test_max_noise() { nvbench::option_parser parser; parser.parse( {"--benchmark", "DummyBench", "--max-noise", "50.3"}); const auto& states = parser_to_states(parser); ASSERT(states.size() == 1); ASSERT(std::abs(states[0].get_max_noise() - 0.503) < 1.e-4); } void test_skip_time() { nvbench::option_parser parser; parser.parse( {"--benchmark", "DummyBench", "--skip-time", "12345e2"}); const auto& states = parser_to_states(parser); ASSERT(states.size() == 1); ASSERT(std::abs(states[0].get_skip_time() - 12345e2) < 1.); } void test_timeout() { nvbench::option_parser parser; parser.parse( {"--benchmark", "DummyBench", "--timeout", "12345e2"}); const auto& states = parser_to_states(parser); ASSERT(states.size() == 1); ASSERT(std::abs(states[0].get_timeout() - 12345e2) < 1.); } int main() try { test_empty(); test_exec_name_tolerance(); test_argc_argv_parse(); test_invalid_option(); test_benchmark_long(); test_benchmark_short(); test_int64_axis_single(); test_int64_axis_multi(); test_int64_axis_pow2_single(); test_int64_axis_pow2_multi(); test_int64_axis_none_to_pow2_single(); test_int64_axis_none_to_pow2_multi(); test_int64_axis_pow2_to_none_single(); test_int64_axis_pow2_to_none_multi(); test_float64_axis_single(); test_float64_axis_multi(); test_string_axis_single(); test_string_axis_multi(); test_type_axis_single(); test_type_axis_multi(); test_multi_axis(); test_axis_before_benchmark(); test_min_samples(); test_min_time(); test_max_noise(); test_skip_time(); test_timeout(); return 0; } catch (std::exception &err) { fmt::print(stderr, "{}", err.what()); return 1; }
the_stack
__device__ __constant__ real psifnb[22] = { 1.f,-.5f,.166666666666666667f, -.0333333333333333333f,.0238095238095238095f, -.0333333333333333333f,.0757575757575757576f, -.253113553113553114f,1.16666666666666667f,-7.09215686274509804f, 54.9711779448621554f,-529.124242424242424f,6192.1231884057971f, -86580.2531135531136f,1425517.16666666667f,-27298231.067816092f, 601580873.900642368f,-15116315767.0921569f,429614643061.166667f, -13711655205088.3328f,488332318973593.167f,-19296579341940068.1f } ; /* DECK PSIFN */ /* Subroutine */ __device__ int slatec_psifn(real *x, integer *n, integer *kode, integer *m, real *ans, integer *nz, integer *ierr) { /* Initialized data */ const integer nmax = 100; /* System generated locals */ integer i__1, i__2; real r__1, r__2; /* Builtin functions */ /* Local variables */ integer i__, j, k; real s, t, t1, t2, fn, ta; integer mm, nn, np; real fx, tk; integer mx, nx; real xm, tt, xq, den, arg, fln, fnp, r1m4, r1m5, fns, eps, rln, tol, xln, trm[22], tss, tst, elim, xinc, xmin, tols, xdmy, yint, trmr[100], rxsq, slope, xdmln, wdtol; /* ***BEGIN PROLOGUE PSIFN */ /* ***PURPOSE Compute derivatives of the Psi function. */ /* ***LIBRARY SLATEC */ /* ***CATEGORY C7C */ /* ***TYPE SINGLE PRECISION (PSIFN-S, DPSIFN-D) */ /* ***KEYWORDS DERIVATIVES OF THE GAMMA FUNCTION, POLYGAMMA FUNCTION, */ /* PSI FUNCTION */ /* ***AUTHOR Amos, D. E., (SNLA) */ /* ***DESCRIPTION */ /* The following definitions are used in PSIFN: */ /* Definition 1 */ /* PSI(X) = d/dx (ln(GAMMA(X)), the first derivative of */ /* the LOG GAMMA function. */ /* Definition 2 */ /* K K */ /* PSI(K,X) = d /dx (PSI(X)), the K-th derivative of PSI(X). */ /* ___________________________________________________________________ */ /* PSIFN computes a sequence of SCALED derivatives of */ /* the PSI function; i.e. for fixed X and M it computes */ /* the M-member sequence */ /* ((-1)**(K+1)/GAMMA(K+1))*PSI(K,X) */ /* for K = N,...,N+M-1 */ /* where PSI(K,X) is as defined above. For KODE=1, PSIFN returns */ /* the scaled derivatives as described. KODE=2 is operative only */ /* when K=0 and in that case PSIFN returns -PSI(X) + LN(X). That */ /* is, the logarithmic behavior for large X is removed when KODE=1 */ /* and K=0. When sums or differences of PSI functions are computed */ /* the logarithmic terms can be combined analytically and computed */ /* separately to help retain significant digits. */ /* Note that CALL PSIFN(X,0,1,1,ANS) results in */ /* ANS = -PSI(X) */ /* Input */ /* X - Argument, X .gt. 0.0E0 */ /* N - First member of the sequence, 0 .le. N .le. 100 */ /* N=0 gives ANS(1) = -PSI(X) for KODE=1 */ /* -PSI(X)+LN(X) for KODE=2 */ /* KODE - Selection parameter */ /* KODE=1 returns scaled derivatives of the PSI */ /* function. */ /* KODE=2 returns scaled derivatives of the PSI */ /* function EXCEPT when N=0. In this case, */ /* ANS(1) = -PSI(X) + LN(X) is returned. */ /* M - Number of members of the sequence, M .ge. 1 */ /* Output */ /* ANS - A vector of length at least M whose first M */ /* components contain the sequence of derivatives */ /* scaled according to KODE. */ /* NZ - Underflow flag */ /* NZ.eq.0, A normal return */ /* NZ.ne.0, Underflow, last NZ components of ANS are */ /* set to zero, ANS(M-K+1)=0.0, K=1,...,NZ */ /* IERR - Error flag */ /* IERR=0, A normal return, computation completed */ /* IERR=1, Input error, no computation */ /* IERR=2, Overflow, X too small or N+M-1 too */ /* large or both */ /* IERR=3, Error, N too large. Dimensioned */ /* array TRMR(NMAX) is not large enough for N */ /* The nominal computational accuracy is the maximum of unit */ /* roundoff (=R1MACH(4)) and 1.0E-18 since critical constants */ /* are given to only 18 digits. */ /* DPSIFN is the Double Precision version of PSIFN. */ /* *Long Description: */ /* The basic method of evaluation is the asymptotic expansion */ /* for large X.ge.XMIN followed by backward recursion on a two */ /* term recursion relation */ /* W(X+1) + X**(-N-1) = W(X). */ /* This is supplemented by a series */ /* SUM( (X+K)**(-N-1) , K=0,1,2,... ) */ /* which converges rapidly for large N. Both XMIN and the */ /* number of terms of the series are calculated from the unit */ /* roundoff of the machine environment. */ /* ***REFERENCES Handbook of Mathematical Functions, National Bureau */ /* of Standards Applied Mathematics Series 55, edited */ /* by M. Abramowitz and I. A. Stegun, equations 6.3.5, */ /* 6.3.18, 6.4.6, 6.4.9 and 6.4.10, pp.258-260, 1964. */ /* D. E. Amos, A portable Fortran subroutine for */ /* derivatives of the Psi function, Algorithm 610, ACM */ /* Transactions on Mathematical Software 9, 4 (1983), */ /* pp. 494-502. */ /* ***ROUTINES CALLED I1MACH, R1MACH */ /* ***REVISION HISTORY (YYMMDD) */ /* 820601 DATE WRITTEN */ /* 890531 Changed all specific intrinsics to generic. (WRB) */ /* 890531 REVISION DATE from Version 3.2 */ /* 891214 Prologue converted to Version 4.0 format. (BAB) */ /* 920501 Reformatted the REFERENCES section. (WRB) */ /* ***END PROLOGUE PSIFN */ /* Parameter adjustments */ --ans; /* Function Body */ /* ----------------------------------------------------------------------- */ /* BERNOULLI NUMBERS */ /* ----------------------------------------------------------------------- */ /* ***FIRST EXECUTABLE STATEMENT PSIFN */ *ierr = 0; *nz = 0; if (*x <= 0.f) { *ierr = 1; } if (*n < 0) { *ierr = 1; } if (*kode < 1 || *kode > 2) { *ierr = 1; } if (*m < 1) { *ierr = 1; } if (*ierr != 0) { return 0; } mm = *m; /* Computing MIN */ i__1 = -i1mach_(&c__12), i__2 = i1mach_(&c__13); nx = min(i__1,i__2); r1m5 = r1mach_(&c__5); r1m4 = r1mach_(&c__4) * .5f; wdtol = dmax(r1m4,5e-19f); /* ----------------------------------------------------------------------- */ /* ELIM = APPROXIMATE EXPONENTIAL OVER AND UNDERFLOW LIMIT */ /* ----------------------------------------------------------------------- */ elim = (nx * r1m5 - 3.f) * 2.302f; xln = log(*x); L41: nn = *n + mm - 1; fn = (real) nn; fnp = fn + 1.f; t = fnp * xln; /* ----------------------------------------------------------------------- */ /* OVERFLOW AND UNDERFLOW TEST FOR SMALL AND LARGE X */ /* ----------------------------------------------------------------------- */ if (dabs(t) > elim) { goto L290; } if (*x < wdtol) { goto L260; } /* ----------------------------------------------------------------------- */ /* COMPUTE XMIN AND THE NUMBER OF TERMS OF THE SERIES, FLN+1 */ /* ----------------------------------------------------------------------- */ rln = r1m5 * i1mach_(&c__11); rln = dmin(rln,18.06f); fln = dmax(rln,3.f) - 3.f; yint = fln * .4f + 3.5f; slope = fln * (fln * 6.038e-4f + .008677f) + .21f; xm = yint + slope * fn; mx = (integer) xm + 1; xmin = (real) mx; if (*n == 0) { goto L50; } xm = rln * -2.302f - dmin(0.f,xln); fns = (real) (*n); arg = xm / fns; arg = dmin(0.f,arg); eps = exp(arg); xm = 1.f - eps; if (dabs(arg) < .001f) { xm = -arg; } fln = *x * xm / eps; xm = xmin - *x; if (xm > 7.f && fln < 15.f) { goto L200; } L50: xdmy = *x; xdmln = xln; xinc = 0.f; if (*x >= xmin) { goto L60; } nx = (integer) (*x); xinc = xmin - nx; xdmy = *x + xinc; xdmln = log(xdmy); L60: /* ----------------------------------------------------------------------- */ /* GENERATE W(N+MM-1,X) BY THE ASYMPTOTIC EXPANSION */ /* ----------------------------------------------------------------------- */ t = fn * xdmln; t1 = xdmln + xdmln; t2 = t + xdmln; /* Computing MAX */ r__1 = dabs(t), r__2 = dabs(t1), r__1 = max(r__1,r__2), r__2 = dabs(t2); tk = dmax(r__1,r__2); if (tk > elim) { goto L380; } tss = exp(-t); tt = .5f / xdmy; t1 = tt; tst = wdtol * tt; if (nn != 0) { t1 = tt + 1.f / fn; } rxsq = 1.f / (xdmy * xdmy); ta = rxsq * .5f; t = fnp * ta; s = t * psifnb[2]; if (dabs(s) < tst) { goto L80; } tk = 2.f; for (k = 4; k <= 22; ++k) { t = t * ((tk + fn + 1.f) / (tk + 1.f)) * ((tk + fn) / (tk + 2.f)) * rxsq; trm[k - 1] = t * psifnb[k - 1]; if ((r__1 = trm[k - 1], dabs(r__1)) < tst) { goto L80; } s += trm[k - 1]; tk += 2.f; /* L70: */ } L80: s = (s + t1) * tss; if (xinc == 0.f) { goto L100; } /* ----------------------------------------------------------------------- */ /* BACKWARD RECUR FROM XDMY TO X */ /* ----------------------------------------------------------------------- */ nx = (integer) xinc; np = nn + 1; if (nx > nmax) { goto L390; } if (nn == 0) { goto L160; } xm = xinc - 1.f; fx = *x + xm; /* ----------------------------------------------------------------------- */ /* THIS LOOP SHOULD NOT BE CHANGED. FX IS ACCURATE WHEN X IS SMALL */ /* ----------------------------------------------------------------------- */ i__1 = nx; for (i__ = 1; i__ <= i__1; ++i__) { i__2 = -np; trmr[i__ - 1] = pow_ri(&fx, &i__2); s += trmr[i__ - 1]; xm += -1.f; fx = *x + xm; /* L90: */ } L100: ans[mm] = s; if (fn == 0.f) { goto L180; } /* ----------------------------------------------------------------------- */ /* GENERATE LOWER DERIVATIVES, J.LT.N+MM-1 */ /* ----------------------------------------------------------------------- */ if (mm == 1) { return 0; } i__1 = mm; for (j = 2; j <= i__1; ++j) { fnp = fn; fn += -1.f; tss *= xdmy; t1 = tt; if (fn != 0.f) { t1 = tt + 1.f / fn; } t = fnp * ta; s = t * psifnb[2]; if (dabs(s) < tst) { goto L120; } tk = fnp + 3.f; for (k = 4; k <= 22; ++k) { trm[k - 1] = trm[k - 1] * fnp / tk; if ((r__1 = trm[k - 1], dabs(r__1)) < tst) { goto L120; } s += trm[k - 1]; tk += 2.f; /* L110: */ } L120: s = (s + t1) * tss; if (xinc == 0.f) { goto L140; } if (fn == 0.f) { goto L160; } xm = xinc - 1.f; fx = *x + xm; i__2 = nx; for (i__ = 1; i__ <= i__2; ++i__) { trmr[i__ - 1] *= fx; s += trmr[i__ - 1]; xm += -1.f; fx = *x + xm; /* L130: */ } L140: mx = mm - j + 1; ans[mx] = s; if (fn == 0.f) { goto L180; } /* L150: */ } return 0; /* ----------------------------------------------------------------------- */ /* RECURSION FOR N = 0 */ /* ----------------------------------------------------------------------- */ L160: i__1 = nx; for (i__ = 1; i__ <= i__1; ++i__) { s += 1.f / (*x + nx - i__); /* L170: */ } L180: if (*kode == 2) { goto L190; } ans[1] = s - xdmln; return 0; L190: if (xdmy == *x) { return 0; } xq = xdmy / *x; ans[1] = s - log(xq); return 0; /* ----------------------------------------------------------------------- */ /* COMPUTE BY SERIES (X+K)**(-(N+1)) , K=0,1,2,... */ /* ----------------------------------------------------------------------- */ L200: nn = (integer) fln + 1; np = *n + 1; t1 = (fns + 1.f) * xln; t = exp(-t1); s = t; den = *x; i__1 = nn; for (i__ = 1; i__ <= i__1; ++i__) { den += 1.f; i__2 = -np; trm[i__ - 1] = pow_ri(&den, &i__2); s += trm[i__ - 1]; /* L210: */ } ans[1] = s; if (*n != 0) { goto L220; } if (*kode == 2) { ans[1] = s + xln; } L220: if (mm == 1) { return 0; } /* ----------------------------------------------------------------------- */ /* GENERATE HIGHER DERIVATIVES, J.GT.N */ /* ----------------------------------------------------------------------- */ tol = wdtol / 5.f; i__1 = mm; for (j = 2; j <= i__1; ++j) { t /= *x; s = t; tols = t * tol; den = *x; i__2 = nn; for (i__ = 1; i__ <= i__2; ++i__) { den += 1.f; trm[i__ - 1] /= den; s += trm[i__ - 1]; if (trm[i__ - 1] < tols) { goto L240; } /* L230: */ } L240: ans[j] = s; /* L250: */ } return 0; /* ----------------------------------------------------------------------- */ /* SMALL X.LT.UNIT ROUND OFF */ /* ----------------------------------------------------------------------- */ L260: i__1 = -(*n) - 1; ans[1] = pow_ri(x, &i__1); if (mm == 1) { goto L280; } k = 1; i__1 = mm; for (i__ = 2; i__ <= i__1; ++i__) { ans[k + 1] = ans[k] / *x; ++k; /* L270: */ } L280: if (*n != 0) { return 0; } if (*kode == 2) { ans[1] += xln; } return 0; L290: if (t > 0.f) { goto L380; } *nz = 0; *ierr = 2; return 0; L380: ++(*nz); ans[mm] = 0.f; --mm; if (mm == 0) { return 0; } goto L41; L390: *ierr = 3; *nz = 0; return 0; } /* psifn_ */
the_stack
#include "Image.h" #include <iostream> #include <fstream> using namespace std; #include "ErrorCode.h" // Host 静态方法:newImage(创建图像) __host__ int ImageBasicOp::newImage(Image **outimg) { ImageCuda *resimgCud; // 对应于返回的 outimg 的 ImageCuda 型数据。 // 检查装载输出图像的指针是否为 NULL。 if (outimg == NULL) return NULL_POINTER; // 申请图像元数据的空间。 resimgCud = new ImageCuda; // 初始化图像上的数据为空图像。 resimgCud->imgMeta.width = 0; resimgCud->imgMeta.height = 0; resimgCud->imgMeta.roiX1 = 0; resimgCud->imgMeta.roiY1 = 0; resimgCud->imgMeta.roiX2 = 0; resimgCud->imgMeta.roiY2 = 0; resimgCud->imgMeta.imgData = NULL; resimgCud->deviceId = -1; resimgCud->pitchBytes = 0; // 将 Image 赋值给输出参数。 *outimg = &(resimgCud->imgMeta); // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:deleteImage(销毁图像) __host__ int ImageBasicOp::deleteImage(Image *inimg) { // 检查图像的指针是否为 NULL。 if (inimg == NULL) return NULL_POINTER; // 根据输入参数的 Image 型指针,得到对应的 ImageCuda 型数据。 ImageCuda *inimgCud = IMAGE_CUDA(inimg); // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (inimgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 释放图像数据,即像素数据。 if (inimg->imgData == NULL || inimg->width == 0 || inimg->height == 0 || inimgCud->pitchBytes == 0) { // 如果输入图像是空的,则不进行图像数据释放操作(因为本来也没有数据可被 // 释放)。 // Do Nothing; } if (inimgCud->deviceId < 0) { // 对于数据存储于 Host 内存,直接利用 delete 关键字释放图像数据。 delete[] inimg->imgData; } else if (inimgCud->deviceId == curdevid) { // 对于数据存储于当前 Device 内存中,则直接利用 cudaFree 接口释放该图像 // 数据。 cudaFree(inimg->imgData); } else { // 对于数据存储于非当前 Device 内存中,则需要首先切换设备,将该设备作为 // 当前 Device,然后释放之,最后还需要将设备切换回来以保证后续处理的正 // 确性。 cudaSetDevice(inimgCud->deviceId); cudaFree(inimg->imgData); cudaSetDevice(curdevid); } // 释放图像的元数据。 delete inimgCud; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:makeAtCurrentDevice(在当前 Device 内存中构建数据) __host__ int ImageBasicOp::makeAtCurrentDevice(Image *img, size_t width, size_t height) { // 检查输入图像是否为 NULL if (img == NULL) return NULL_POINTER; // 检查给定的图像的长宽是否合法 if (width < 1 || height < 1) return INVALID_DATA; // 检查图像是否为空图像 if (img->imgData != NULL) return UNMATCH_IMG; // 获取 img 对应的 ImageCuda 型数据。 ImageCuda *imgCud = IMAGE_CUDA(img); // 在当前的 Device 上申请存储指定尺寸图片所需要的内存空间。 cudaError_t cuerrcode; cuerrcode = cudaMallocPitch((void **)(&img->imgData), &imgCud->pitchBytes, width * sizeof (unsigned char), height); if (cuerrcode != cudaSuccess) { img->imgData = NULL; return CUDA_ERROR; } // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 修改图像的元数据。其中 ROI 被设为整幅图片。 img->width = width; img->height = height; img->roiX1 = 0; img->roiY1 = 0; img->roiX2 = width; img->roiY2 = height; imgCud->deviceId = curdevid; // 由于 pitchBytes 已经在 cudaMallocPitch 中赋值,此处则不再对其进行赋值。 // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:makeAtHost(在 Host 内存中构建数据) __host__ int ImageBasicOp::makeAtHost(Image *img, size_t width, size_t height) { // 检查输入图像是否为 NULL if (img == NULL) return NULL_POINTER; // 检查给定的图像的长宽是否合法 if (width < 1 || height < 1) return INVALID_DATA; // 检查图像是否为空图像 if (img->imgData != NULL) return UNMATCH_IMG; // 获取 img 对应的 ImageCuda 型数据。 ImageCuda *imgCud = IMAGE_CUDA(img); // 为图像数据在 Host 内存中申请空间 img->imgData = new unsigned char[width * height]; if (img->imgData == NULL) return OUT_OF_MEM; // 设置图像中的元数据 img->width = width; img->height = height; img->roiX1 = 0; img->roiY1 = 0; img->roiX2 = width; img->roiY2 = height; imgCud->deviceId = -1; imgCud->pitchBytes = width; // 处理完毕,退出 return NO_ERROR; } // Host 静态方法:readFromFile(从文件读取图像) __host__ int ImageBasicOp::readFromFile(const char *filepath, Image *outimg) { // 检查文件路径和图像是否为 NULL。 if (filepath == NULL || outimg == NULL) return NULL_POINTER; // 根据输入参数的 Image 型指针,得到对应的 ImageCuda 型数据。 ImageCuda *outimgCud = IMAGE_CUDA(outimg); // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetErrorString(cudaGetDeviceCount(&devcnt)); if (outimgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 打开图像文件。 ifstream imgfile(filepath, ios::in | ios::binary); if (!imgfile) return NO_FILE; // 读取文件头部的文件类型信息,如果文件的头两个字节不是 BM,则说明该文件不 // 是 BMP 文件,则报错。 char headstr[2] = { '\0' }; imgfile.seekg(0x0000, ios::beg); imgfile.read(headstr, 2); if (headstr[0] != 'B' || headstr[1] != 'M') return WRONG_FILE; // 读取文件中的 BPP 字段(每个像素占用的比特数量),如果 BPP 的值不为 8,说 // 明该文件不是一个灰度 BMP 图像,则报错。 unsigned short bpp = 0; imgfile.seekg(0x001C, ios::beg); imgfile.read(reinterpret_cast<char *>(&bpp), 2); if (bpp != 8) return WRONG_FILE; // 从文件中读取图像宽度和高度信息。 unsigned int width = 0, height = 0; imgfile.seekg(0x0012, ios::beg); imgfile.read(reinterpret_cast<char *>(&width), 4); imgfile.read(reinterpret_cast<char *>(&height), 4); // 如果图像的尺寸不合法,则报错退出。 if (width < 1 || height < 1) return WRONG_FILE; // 从文件中读取像素数据所在的文件中的偏移位置。 unsigned int dataoff = 0; imgfile.seekg(0x000A, ios::beg); imgfile.read(reinterpret_cast<char *>(&dataoff), 4); // 获取存放图像像素数据的 Host 内存空间。本着尽量重用的思想,如果原来的图像 // 内存数据是存储于 Host 内存,且尺寸和新的图像尺寸一致时,则不重新申请 // Host 内存空间,直接利用原来的空间存放新的图像数据。 unsigned char *imgdata = outimg->imgData; bool reusedata = true; if (outimg->imgData == NULL || outimgCud->deviceId >= 0 || outimg->width != width || outimg->height != height) { imgdata = new unsigned char[width * height]; // 如果没有申请到新的数据,则报错。 if (imgdata == NULL) return OUT_OF_MEM; reusedata = false; } // 计算 BMP 文件中每行的 Padding 尺寸。在 BMP 文件中,每行的数据都需要保证 // 4 字节对齐。如果某行的宽度不是 4 的整数倍(注意,灰度图中每行的像素个数 // 同每行实际数据占用的字节数是相等的),则需要补充一些字节,使其达到 4 的 // 整数倍。 unsigned int dummybytes = (4 - (width & 3)) & 3; // 将文件指针移动到数据存储的开始位置 imgfile.seekg(dataoff, ios::beg); // 由于 BMP 采用了右手坐标,即图像的左下角点为原点,整个图像位于第一象限, // 而我们系统内部使用的是左手坐标,即图像的左上角点为原点,整个图像亦位于第 // 一象限。这样,BMP 文件中的第一行图像数据,其时是最后一行数据,因此指针初 // 始指向图像的最后一行。 unsigned char *pdata = imgdata + (height - 1) * width; // 读取图像中的各行的图像数据。 for (int r = 0; r < height; r ++) { // 读取图像数据(每次读取一行的数据) imgfile.read(reinterpret_cast<char *>(pdata), width); // 舍弃掉每行结尾的填充字节 if (dummybytes > 0) imgfile.seekg(dummybytes, ios::cur); // 由于 BMP 图像采用右手坐标,因此指针需要向前移动。 pdata -= width; } // 到此为止,图像数据读取完毕,这是可以安全的释放掉图像原来的数据。一直拖到 // 最后才释放原来的数据,正是为了防止一旦图像读取失败,不至于让系统进入一个 // 混乱的状态,因为原来的数据还是处于一个可用的状态。 if (reusedata == false || outimg->imgData != NULL) { if (outimgCud->deviceId < 0) { // 如果原来的数据存放于 Host 内存中,则使用 delete 关键字释放。 delete[] outimg->imgData; } else { // 如果原来的数据存放于 Device 内存中,则首先调到对应的 Device,然 // 后使用 cudaFree 释放掉内存。 cudaSetDevice(outimgCud->deviceId); cudaFree(outimg->imgData); cudaSetDevice(curdevid); } } // 为图像赋值新的元数据。这里 ROI 被重置为整幅图像。 outimg->width = width; outimg->height = height; outimg->roiX1 = 0; outimg->roiY1 = 0; outimg->roiX2 = width; outimg->roiY2 = height; outimg->imgData = imgdata; outimgCud->deviceId = -1; outimgCud->pitchBytes = width; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:writeToFile(将图像写入文件) __host__ int ImageBasicOp::writeToFile(const char *filepath, Image *inimg) { // 检查文件路径和图像是否为 NULL。 if (filepath == NULL || inimg == NULL) return NULL_POINTER; // 打开需要写入的文件。 ofstream imgfile(filepath, ios::out | ios::binary); if (!imgfile) return NO_FILE; // 根据输入参数的 Image 型指针,得到对应的 ImageCuda 型数据。 ImageCuda *inimgCud = IMAGE_CUDA(inimg); // 将图片的数据拷贝回 Host 内存中,这样图片就可以被下面的代码所读取,然后将 // 图像的数据写入到磁盘中。这里需要注意的是,安排图片的拷贝过程在文件打开之 // 后是因为,如果一旦文件打开失败,则不会改变图像在内存中的存储状态,这可能 // 会对后续处理更加有利。 int errcode; errcode = ImageBasicOp::copyToHost(inimg); if (errcode < 0) return errcode; // 计算一些和 BMP 图像相关的参数: // 计算 BMP 文件中每行的 Padding 尺寸。在 BMP 文件中,每行的数据都需要保证 // 4 字节对齐。如果某行的宽度不是 4 的整数倍(注意,灰度图中每行的像素个数 // 同每行实际数据占用的字节数是相等的),则需要补充一些字节,使其达到 4 的 // 整数倍。 unsigned int dummybytes = (4 - (inimg->width & 3)) & 3; // 计算在磁盘上存储图片总共需要的字节数量,这个数量包括了上面提到的 Padding // 的尺寸。 unsigned int datalen = inimg->height * (inimg->width + dummybytes); // 在存储到磁盘中后,像素数据实际的起始位置。因为 BMP 文件存在信息头,实际 // 的像素数据是在这些信息头的后面的。对于系统中使用到的灰度图像来说,信息头 // 包含了两个部分,最前面的是图像的元数据(如图像的宽度、高度;数据的尺寸等 // 信息),紧随其后的是颜色表,颜色表共有 256 个条目,对应了 256 级灰度,每 // 个条目包含了 4 个字节,这四个字节分别为 RGBA 四个通道的亮度值。 unsigned int dataoff = 4 * 256 + 54; // 向文件中写入 BMP 头信息 unsigned short ustemp; // 这三个变量用来保存头信息中的临时域的值,三个变量 unsigned int uitemp; // 用来处理不同的数据类型。 int sitemp; // 文件类型头 ustemp = 0x4D42; imgfile.write(reinterpret_cast<char *>(&ustemp), 2); // 文件长度 uitemp = datalen + dataoff; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 保留区段甲 ustemp = 0; imgfile.write(reinterpret_cast<char *>(&ustemp), 2); // 保留区段乙 ustemp = 0; imgfile.write(reinterpret_cast<char *>(&ustemp), 2); // 像素数据在文件中开始的位置 uitemp = dataoff; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 图像信息头尺寸 uitemp = 40; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 图像宽度 sitemp = inimg->width; imgfile.write(reinterpret_cast<char *>(&sitemp), 4); // 图像高度 sitemp = inimg->height; imgfile.write(reinterpret_cast<char *>(&sitemp), 4); // 图像层次数量 ustemp = 1; imgfile.write(reinterpret_cast<char *>(&ustemp), 2); // BPP(每像素的比特数量) ustemp = 8; imgfile.write(reinterpret_cast<char *>(&ustemp), 2); // 压缩算法 uitemp = 0; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 图像尺寸 uitemp = datalen; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 每公尺的像素数量(X-方向) sitemp = 0; imgfile.write(reinterpret_cast<char *>(&sitemp), 4); // 每公尺的像素数量(Y-方向) sitemp = 0; imgfile.write(reinterpret_cast<char *>(&sitemp), 4); // ClrUsed uitemp = 256; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // ClrImportant uitemp = 0; imgfile.write(reinterpret_cast<char *>(&uitemp), 4); // 写入颜色表信息 // 颜色信息共有 256 个条目,对应了 256 个灰度级;每个条目包含了 4 个颜色通 // 道的数据。由于图像是灰度图像,因此对于灰度为 i 的对应的颜色值为 < i, i, // i, FF >。 unsigned char coloritem[4] = { 0x00, 0x00, 0x00, 0xFF }; for (int i = 0; i < 256; i++) { coloritem[0] = coloritem[1] = coloritem[2] = i; imgfile.write(reinterpret_cast<char *>(coloritem), 4); } // 写入图像像素数据 char dummybuf[4] = { '\0' }; // 每行末尾的 Padding 的补白数据。 // 由于 BMP 采用了右手坐标,即图像的左下角点为原点,整个图像位于第一象限, // 而我们系统内部使用的是左手坐标,即图像的左上角点为原点,整个图像亦位于第 // 一象限。这样,BMP 文件中的第一行图像数据,其时是最后一行数据,因此指针初 // 始指向图像的最后一行。 unsigned char *pdata = inimg->imgData + (inimg->height - 1) * inimg->width; // 逐行写入图像的像素数据。 for (int r = 0; r < inimg->height; r++) { // 写入某行的像素数据。 imgfile.write(reinterpret_cast<char *>(pdata), inimg->width); // 写入为了 Padding 的补白数据。 if (dummybytes > 0) imgfile.write(dummybuf, dummybytes); // 由于 BMP 图像采用右手坐标,因此指针需要向前移动。 pdata -= inimgCud->pitchBytes; } // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将图像拷贝到当前 Device 内存上) __host__ int ImageBasicOp::copyToCurrentDevice(Image *img) { // 检查图像是否为 NULL。 if (img == NULL) return NULL_POINTER; // 根据输入参数的 Image 型指针,得到对应的 ImageCuda 型数据。 ImageCuda *imgCud = IMAGE_CUDA(img); // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (imgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果图像是一个不包含数据的空图像,则报错。 if (img->imgData == NULL || img->width == 0 || img->height == 0 || imgCud->pitchBytes == 0) return UNMATCH_IMG; // 对于不同的情况,将图像数据拷贝到当前设备上。 if (imgCud->deviceId < 0) { // 如果图像的数据位于 Host 内存上,则需要在当前 Device 的内存空间上申请 // 空间,然后将 Host 内存上的数据进行 Padding 后拷贝到当前 Device 上。 unsigned char *devptr; // 新的数据空间,在当前 Device 上。 size_t pitch; // Padding 后的每行尺寸 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前设备上申请空间,使用 Pitch 版本的申请函数,用来进行 Padding。 cuerrcode = cudaMallocPitch((void **)(&devptr), &pitch, img->width * sizeof (unsigned char), img->height); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 进行 Padding 并拷贝数据到当前 Device 上。注意,这里 img->pitchBytes // == img->width。 cuerrcode = cudaMemcpy2D(devptr, pitch, img->imgData, imgCud->pitchBytes, img->width * sizeof (unsigned char), img->height, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉原来存储于 Host 内存上的图像数据。 delete[] img->imgData; // 更新图像数据,把新的在当前 Device 上申请的数据和相关数据写入图像元数 // 据中。 img->imgData = devptr; imgCud->deviceId = curdevid; imgCud->pitchBytes = pitch; // 操作完毕,返回。 return NO_ERROR; } else if (imgCud->deviceId != curdevid) { // 对于数据存在其他 Device 的情况,仍旧要在当前 Device 上申请数据空间, // 并从另一个 Device 上拷贝数据到新申请的当前 Device 的数据空间中。 unsigned char *devptr; // 新申请的当前 Device 上的数据。 size_t datasize = imgCud->pitchBytes * img->height; // 数据尺寸。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前 Device 上申请空间。 cuerrcode = cudaMalloc((void **)(&devptr), datasize); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 将数据从图像原来的存储位置拷贝到当前的 Device 上。 cuerrcode = cudaMemcpyPeer(devptr, curdevid, img->imgData, imgCud->deviceId, datasize); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉图像在原来的 Device 上的数据。 cudaFree(img->imgData); // 将新的图像数据信息写入到图像元数据中。 img->imgData = devptr; imgCud->deviceId = curdevid; // 操作完成,返回。 return NO_ERROR; } // 对于其他情况,即图像数据本来就在当前 Device 上,则直接返回,不进行任何的 // 操作。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将图像拷贝到当前 Device 内存上) __host__ int ImageBasicOp::copyToCurrentDevice(Image *srcimg, Image *dstimg) { // 检查输入图像是否为 NULL。 if (srcimg == NULL) return NULL_POINTER; // 如果输出图像为 NULL,或者输出图像和输入图像为同一各图像,则调用 In-place // 版本的函数。 if (dstimg == NULL || dstimg == srcimg) return copyToCurrentDevice(srcimg); // 获取 srcimg 和 dstimg 对应的 ImageCuda 型指针。 ImageCuda *srcimgCud = IMAGE_CUDA(srcimg); ImageCuda *dstimgCud = IMAGE_CUDA(dstimg); // 用来存放旧的 dstimg 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 ImageCuda olddstimgCud = *dstimgCud; // 旧的 dstimg 数据 bool reusedata = true; // 记录是否重用了原来的图像数据空间。 // 该值为 ture,则原来的数据空间被重 // 用,不需要在之后释放数据,否则需要 // 在最后释放旧的空间。 // 如果源图像是一个空图像,则不进行任何操作,直接报错。 if (srcimg->imgData == NULL || srcimg->width == 0 || srcimg->height == 0 || srcimgCud->pitchBytes == 0) return INVALID_DATA; // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srcimgCud->deviceId >= devcnt || dstimgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标图像中存在有数据,则需要根据情况,若原来的数据不存储在当前的 // Device 上,或者即使存储在当前的 Device 上,但数据尺寸不匹配,则需要释放 // 掉原来申请的空间,以便重新申请合适的内存空间。此处不进行真正的释放操作, // 其目的在于当后续操作出现错误时,可以很快的恢复 dstimg 中原来的信息,使得 // 整个系统不会处于一个混乱的状态,本函数会在最后,确定 dstimg 被成功的更换 // 为了新的数据以后,才会真正的将原来的图像数据释放掉。 if (dstimgCud->deviceId != curdevid) { // 对于数据存在 Host 或其他的 Device 上,则直接释放掉原来的数据空间。 reusedata = 0; dstimg->imgData = NULL; } else if (!(((srcimgCud->deviceId < 0 && srcimg->width == dstimg->width) || dstimgCud->pitchBytes == srcimgCud->pitchBytes) && srcimg->height == dstimg->height)) { // 对于数据存在于当前 Device 上,则需要检查数据的尺寸是否和源图像相匹 // 配。检查的标准包括:要求源图像的 Padding 后的行宽度和目标图像的相 // 同,源图像和目标图像的高度相同;如果源图像是存储在 Host 内存中的,则 // 仅要求源图像和目标图像的宽度相同即可。如果目标图像和源图像的尺寸不匹 // 配则仍旧需要释放目标图像原来的数据空间。 reusedata = 0; dstimg->imgData = NULL; } // 将目标图像的尺寸更改为源图像的尺寸。 dstimg->width = srcimg->width; dstimg->height = srcimg->height; // 将目标图像的 ROI 更改为源图像的 ROI。 dstimg->roiX1 = srcimg->roiX1; dstimg->roiY1 = srcimg->roiY1; dstimg->roiX2 = srcimg->roiX2; dstimg->roiY2 = srcimg->roiY2; // 更改目标图像的数据存储位置为当前 Device。 dstimgCud->deviceId = curdevid; // 将图像数据从源图像中拷贝到目标图像中。 if (srcimgCud->deviceId < 0) { // 如果源图像数据存储于 Host 内存,则使用 cudaMemcpy2D 进行 Padding 形 // 式的拷贝。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 如果目标图像的 imgData == NULL,说明目标图像原本要么是一个空图像,要 // 么目标图像原本的数据空间不合适,需要重新申请。这时,需要为目标图像重 // 新在当前 Device 上申请一个合适的数据空间。 if (dstimg->imgData == NULL) { cuerrcode = cudaMallocPitch((void **)(&dstimg->imgData), &dstimgCud->pitchBytes, dstimg->width * sizeof (unsigned char), dstimg->height); if (cuerrcode != cudaSuccess) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标图像数据 // 恢复到目标图像中,以保证系统接下的操作不至于混乱。 *dstimgCud = olddstimgCud; return CUDA_ERROR; } } // 使用 cudaMemcpy2D 进行 Padding 形式的拷贝。 cuerrcode = cudaMemcpy2D(dstimg->imgData, dstimgCud->pitchBytes, srcimg->imgData, srcimgCud->pitchBytes, srcimg->width * sizeof (unsigned char), srcimg->height, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标图像数据恢复到目 // 标图像中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。 if (!reusedata) cudaFree(dstimg->imgData); *dstimgCud = olddstimgCud; return CUDA_ERROR; } } else { // 如果源图像数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是用端到端的拷贝。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 size_t datasize = srcimgCud->pitchBytes * srcimg->height; // 如果目标图像需要申请数据空间,则进行申请。 if (dstimg->imgData == NULL) { cuerrcode = cudaMalloc((void **)(&dstimg->imgData), datasize); if (cuerrcode != cudaSuccess) { // 如果发生错误,则需要首先恢复旧的图像数据,之后报错。恢复旧的 // 图像数据以防止系统进入混乱状态。 *dstimgCud = olddstimgCud; return CUDA_ERROR; } } // 更新目标图像的 Padding 尺寸与源图像相同。注意,因为源图像也存储在 // Device 上,在 Device 上的数据都是经过 Padding 的,又因为 // cudaMemcpyPeer 方法没有提供 Pitch 版本接口,所以,我们这里直接借用源 // 图像的 Padding 尺寸。 dstimgCud->pitchBytes = srcimgCud->pitchBytes; // 使用 cudaMemcpyPeer 实现两个 Device (可以为同一个 Device)间的数据 // 拷贝,将源图像在 Device 上的数据信息复制到目标图像中。 cuerrcode = cudaMemcpyPeer(dstimg->imgData, curdevid, srcimg->imgData, srcimgCud->deviceId, datasize); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标图像数据恢复到目 // 标图像中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。 if (!reusedata) cudaFree(dstimg->imgData); *dstimgCud = olddstimgCud; return CUDA_ERROR; } } // 到此步骤已经说明新的图像数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstimgCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstimgCud.imgMeta.imgData != NULL) { if (olddstimgCud.deviceId < 0) { // 如果旧数据空间是 Host 内存上的,则需要无条件释放。 delete[] olddstimgCud.imgMeta.imgData; } else if (olddstimgCud.deviceId != curdevid) { // 如果旧数据空间不是当前 Device 内存上的其他 Device 内存上的数据, // 则也需要无条件的释放。 cudaSetDevice(olddstimgCud.deviceId); cudaFree(olddstimgCud.imgMeta.imgData); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在当前的 Device 内存上,则对于 reusedata 未置位的情 // 况进行释放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不 // 能释放。 cudaFree(olddstimgCud.imgMeta.imgData); } } return NO_ERROR; } // Host 静态方法:copyToHost(将图像拷贝到 Host 内存上) __host__ int ImageBasicOp::copyToHost(Image *img) { // 检查图像是否为 NULL。 if (img == NULL) return NULL_POINTER; // 根据输入参数的 Image 型指针,得到对应的 ImageCuda 型数据。 ImageCuda *imgCud = IMAGE_CUDA(img); // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (imgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果图像是一个不包含数据的空图像,则报错。 if (img->imgData == NULL || img->width == 0 || img->height == 0 || imgCud->pitchBytes == 0) return UNMATCH_IMG; // 对于不同的情况,将图像数据拷贝到当前设备上。 if (imgCud->deviceId < 0) { // 如果图像位于 Host 内存上,则不需要进行任何操作。 return NO_ERROR; } else { // 如果图像的数据位于 Device 内存上,则需要在 Host 的内存空间上申请空 // 间,然后将数据消除 Padding 后拷贝到 Host 上。 unsigned char *hostptr; // 新的数据空间,在 Host 上。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在 Host 上申请空间。 hostptr = new unsigned char[img->width * img->height]; if (hostptr == NULL) return OUT_OF_MEM; // 将设备切换到数据所在的 Device 上。 cudaSetDevice(imgCud->deviceId); // 消除 Padding 并拷贝数据 cuerrcode = cudaMemcpy2D(hostptr, img->width, img->imgData, imgCud->pitchBytes, img->width, img->height, cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete[] hostptr; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的图像数据。 cudaFree(img->imgData); // 对 Device 内存的操作完毕,将设备切换回当前 Device。 cudaSetDevice(curdevid); // 更新图像数据,把新的在当前 Device 上申请的数据和相关数据写入图像元数 // 据中。 img->imgData = hostptr; imgCud->deviceId = -1; imgCud->pitchBytes = img->width; // 操作完毕,返回。 return NO_ERROR; } // 程序永远也不会到达这个分支,因此如果到达这个分支,则说明系统紊乱。对于多 // 数编译器来说,会对此句报出不可达语句的 Warning,因此这里将其注释掉,以防 // 止不必要的 Warning。 //return UNKNOW_ERROR; } // Host 静态方法:copyToHost(将图像拷贝到 Host 内存上) __host__ int ImageBasicOp::copyToHost(Image *srcimg, Image *dstimg) { // 检查输入图像是否为 NULL。 if (srcimg == NULL) return NULL_POINTER; // 如果输出图像为 NULL 或者和输入图像同为一个图像,则调用对应的 In-place 版 // 本的函数。 if (dstimg == NULL || dstimg == srcimg) return copyToHost(srcimg); // 获取 srcimg 和 dstimg 对应的 ImageCuda 型指针。 ImageCuda *srcimgCud = IMAGE_CUDA(srcimg); ImageCuda *dstimgCud = IMAGE_CUDA(dstimg); // 用来存放旧的 dstimg 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 ImageCuda olddstimgCud = *dstimgCud; // 旧的 dstimg 数据 bool reusedata = true; // 记录是否重用了原来的图像数据空间。 // 该值为 true,则原来的数据空间被重 // 用,不需要在之后释放数据,否则需要 // 释放旧的空间。 // 如果源图像是一个空图像,则不进行任何操作,直接报错。 if (srcimg->imgData == NULL || srcimg->width == 0 || srcimg->height == 0 || srcimgCud->pitchBytes == 0) return INVALID_DATA; // 检查图像所在的地址空间是否合法,如果图像所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srcimgCud->deviceId >= devcnt || dstimgCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标图像中存在有数据,则需要根据情况,若原来的数据不存储在 Host 上, // 或者即使存储在 Host 上,但数据尺寸不匹配,则需要释放掉原来申请的空间,以 // 便重新申请合适的内存空间。此处不进行真正的释放操作,其目的在于当后续操作 // 出现错误时,可以很快的恢复 dstimg 中原来的信息,使得整个系统不会处于一个 // 混乱的状态,本函数会在最后,确定 dstimg 被成功的更换为了新的数据以后,才 // 会真正的将原来的图像数据释放掉。 if (dstimgCud->deviceId >= 0) { // 对于数据存在于 Device 上,则亦直接释放掉原来的数据空间。 reusedata = 0; dstimg->imgData = NULL; } else if (!(srcimg->width == dstimg->width && srcimg->height == dstimg->height)) { // 对于数据存在于 Host 上,则需要检查数据的尺寸是否和源图像相匹配。检查 // 的标准:源图像和目标图像的尺寸相同时,可重用原来的空间。 reusedata = 0; dstimg->imgData = NULL; } // 将目标图像的尺寸更改为源图像的尺寸。 dstimg->width = srcimg->width; dstimg->height = srcimg->height; // 将目标图像的 ROI 更改为源图像的 ROI。 dstimg->roiX1 = srcimg->roiX1; dstimg->roiY1 = srcimg->roiY1; dstimg->roiX2 = srcimg->roiX2; dstimg->roiY2 = srcimg->roiY2; // 更改目标图像的数据存储位置为 Host。 dstimgCud->deviceId = -1; // 由于 Host 内存上的数据不使用 Padding,因此设置 Padding 尺寸为图像的宽 // 度。 dstimgCud->pitchBytes = dstimg->width; // 如果目标图像的 imgData == NULL,说明目标图像原本要么是一个空图像,要么目 // 标图像原本的数据空间不合适,需要重新申请。这时,需要为目标图像重新在 // Host 上申请一个合适的数据空间。 if (dstimg->imgData == NULL) { dstimg->imgData = new unsigned char[srcimg->width * srcimg->height]; if (dstimg->imgData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标图像数据 // 恢复到目标图像中,以保证系统接下的操作不至于混乱。 *dstimgCud = olddstimgCud; return OUT_OF_MEM; } } // 将图像数据从源图像中拷贝到目标图像中。 if (srcimgCud->deviceId < 0) { // 如果源图像数据存储于 Host 内存,则直接使用 C 标准支持库中的 memcpy // 完成拷贝。 // 将 srcimg 内的图像数据拷贝到 dstimg 中。memcpy 不返回错误,因此,没 // 有进行错误检查。 memcpy(dstimg->imgData, srcimg->imgData, srcimg->width * srcimg->height * sizeof (unsigned char)); } else { // 如果源图像数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是 2D 形式的拷贝,并消除 Padding。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 首先切换到 srcimg 图像数据所在的 Device,以方便进行内存操作。 cudaSetDevice(srcimgCud->deviceId); // 这里使用 cudaMemcpy2D 将 srcimg 中处于 Device 上的数据拷贝到 dstimg // 中位于 Host 的内存空间上面,该拷贝会同时消除 Padding。 cuerrcode = cudaMemcpy2D(dstimg->imgData, dstimgCud->pitchBytes, srcimg->imgData, srcimgCud->pitchBytes, srcimg->width, srcimg->height, cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标图像数据恢复到目 // 标图像中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) delete[] dstimg->imgData; *dstimgCud = olddstimgCud; cudaSetDevice(curdevid); return CUDA_ERROR; } // 对内存操作完毕后,将设备切换回当前的 Device。 cudaSetDevice(curdevid); } // 到此步骤已经说明新的图像数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstimgCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstimgCud.imgMeta.imgData != NULL) { if (olddstimgCud.deviceId > 0) { // 如果旧数据是存储于 Device 内存上的数据,则需要无条件的释放。 cudaSetDevice(olddstimgCud.deviceId); cudaFree(olddstimgCud.imgMeta.imgData); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在 Host 内存上,则对于 reusedata 未置位的情况进行释 // 放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不能释放。 delete[] olddstimgCud.imgMeta.imgData; } } // 处理完毕,退出。 return NO_ERROR; }
the_stack
#include <thrust/sort.h> //headers in local files #include "lidar_point_pillars/postprocess_cuda.h" __global__ void filter_kernel(const float* box_preds, const float* cls_preds, const float* dir_preds, const int* anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* filtered_box, float* filtered_score, int* filtered_dir, float* box_for_nms, int* filter_count, const float FLOAT_MIN, const float FLOAT_MAX, const float score_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { // boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r int tid = threadIdx.x + blockIdx.x * blockDim.x; //sigmoid funciton float score = 1/(1+expf(-cls_preds[tid])); if(anchor_mask[tid] == 1 && score > score_threshold) { int counter = atomicAdd(filter_count, 1); float za = dev_anchors_pz[tid] + dev_anchors_dz[tid]/2; //decode network output float diagonal = sqrtf(dev_anchors_dx[tid]*dev_anchors_dx[tid] + dev_anchors_dy[tid]*dev_anchors_dy[tid]); float box_px = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 0] * diagonal + dev_anchors_px[tid]; float box_py = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 1] * diagonal + dev_anchors_py[tid]; float box_pz = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 2] * dev_anchors_dz[tid] + za; float box_dx = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 3]) * dev_anchors_dx[tid]; float box_dy = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 4]) * dev_anchors_dy[tid]; float box_dz = expf(box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 5]) * dev_anchors_dz[tid]; float box_ro = box_preds[tid*NUM_OUTPUT_BOX_FEATURE + 6] + dev_anchors_ro[tid]; box_pz = box_pz - box_dz/2; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 0] = box_px; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 1] = box_py; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 2] = box_pz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 3] = box_dx; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 4] = box_dy; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 5] = box_dz; filtered_box[counter*NUM_OUTPUT_BOX_FEATURE + 6] = box_ro; filtered_score[counter] = score; int direction_label; if(dir_preds[tid*2 + 0] < dir_preds[tid*2 + 1]) { direction_label = 1; } else { direction_label = 0; } filtered_dir[counter] = direction_label; //convrt normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, xmax, ymax) for nms calculation //First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1) float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5*box_dx), float(-0.5*box_dy), float(-0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float( 0.5*box_dy), float( 0.5*box_dx), float(-0.5*box_dy)}; //Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax) float rotated_corners[NUM_3D_BOX_CORNERS_MACRO]; float offset_corners[NUM_3D_BOX_CORNERS_MACRO]; float sin_yaw = sinf(box_ro); float cos_yaw = cosf(box_ro); float xmin = FLOAT_MAX; float ymin = FLOAT_MAX; float xmax = FLOAT_MIN; float ymax = FLOAT_MIN; for(size_t i = 0; i < NUM_BOX_CORNERS; i++) { rotated_corners[i*2 + 0] = cos_yaw*corners[i*2 + 0] - sin_yaw*corners[i*2 + 1]; rotated_corners[i*2 + 1] = sin_yaw*corners[i*2 + 0] + cos_yaw*corners[i*2 + 1]; offset_corners[i*2 + 0] = rotated_corners[i*2 + 0] + box_px; offset_corners[i*2 + 1] = rotated_corners[i*2 + 1] + box_py; xmin = fminf(xmin, offset_corners[i*2 + 0]); ymin = fminf(ymin, offset_corners[i*2 + 1]); xmax = fmaxf(xmin, offset_corners[i*2 + 0]); ymax = fmaxf(ymax, offset_corners[i*2 + 1]); } // box_for_nms(num_box, 4) box_for_nms[counter*NUM_BOX_CORNERS + 0] = xmin; box_for_nms[counter*NUM_BOX_CORNERS + 1] = ymin; box_for_nms[counter*NUM_BOX_CORNERS + 2] = xmax; box_for_nms[counter*NUM_BOX_CORNERS + 3] = ymax; } } __global__ void sort_boxes_by_indexes_kernel(float* filtered_box, int* filtered_dir, float* box_for_nms, int* indexes, int filter_count, float* sorted_filtered_boxes, int* sorted_filtered_dir, float* sorted_box_for_nms, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < filter_count) { int sort_index = indexes[tid]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 0] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 0]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 1] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 1]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 2] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 2]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 3] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 3]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 4] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 4]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 5] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 5]; sorted_filtered_boxes[tid*NUM_OUTPUT_BOX_FEATURE + 6] = filtered_box[sort_index*NUM_OUTPUT_BOX_FEATURE + 6]; sorted_filtered_dir[tid] = filtered_dir[sort_index]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 0] = box_for_nms[sort_index*NUM_BOX_CORNERS + 0]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 1] = box_for_nms[sort_index*NUM_BOX_CORNERS + 1]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 2] = box_for_nms[sort_index*NUM_BOX_CORNERS + 2]; sorted_box_for_nms[tid*NUM_BOX_CORNERS + 3] = box_for_nms[sort_index*NUM_BOX_CORNERS + 3]; } } PostprocessCuda::PostprocessCuda(const float FLOAT_MIN, const float FLOAT_MAX, const int NUM_ANCHOR_X_INDS, const int NUM_ANCHOR_Y_INDS, const int NUM_ANCHOR_R_INDS, const float score_threshold, const int NUM_THREADS, const float nms_overlap_threshold, const int NUM_BOX_CORNERS, const int NUM_OUTPUT_BOX_FEATURE): FLOAT_MIN_(FLOAT_MIN), FLOAT_MAX_(FLOAT_MAX), NUM_ANCHOR_X_INDS_(NUM_ANCHOR_X_INDS), NUM_ANCHOR_Y_INDS_(NUM_ANCHOR_Y_INDS), NUM_ANCHOR_R_INDS_(NUM_ANCHOR_R_INDS), score_threshold_(score_threshold), NUM_THREADS_(NUM_THREADS), nms_overlap_threshold_(nms_overlap_threshold), NUM_BOX_CORNERS_(NUM_BOX_CORNERS), NUM_OUTPUT_BOX_FEATURE_(NUM_OUTPUT_BOX_FEATURE) { nms_cuda_ptr_.reset(new NMSCuda( NUM_THREADS, NUM_BOX_CORNERS, nms_overlap_threshold)); } void PostprocessCuda::doPostprocessCuda(const float* rpn_box_output, const float* rpn_cls_output, const float* rpn_dir_output, int* dev_anchor_mask, const float* dev_anchors_px, const float* dev_anchors_py, const float* dev_anchors_pz, const float* dev_anchors_dx, const float* dev_anchors_dy, const float* dev_anchors_dz, const float* dev_anchors_ro, float* dev_filtered_box, float* dev_filtered_score, int* dev_filtered_dir, float* dev_box_for_nms, int* dev_filter_count, std::vector<float>& out_detection) { filter_kernel<<<NUM_ANCHOR_X_INDS_*NUM_ANCHOR_R_INDS_, NUM_ANCHOR_Y_INDS_>>> (rpn_box_output, rpn_cls_output, rpn_dir_output, dev_anchor_mask, dev_anchors_px, dev_anchors_py, dev_anchors_pz, dev_anchors_dx, dev_anchors_dy, dev_anchors_dz, dev_anchors_ro, dev_filtered_box, dev_filtered_score, dev_filtered_dir, dev_box_for_nms, dev_filter_count, FLOAT_MIN_, FLOAT_MAX_, score_threshold_, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int host_filter_count[1]; GPU_CHECK( cudaMemcpy(host_filter_count, dev_filter_count, sizeof(int), cudaMemcpyDeviceToHost ) ); if(host_filter_count[0] == 0) { return; } int* dev_indexes; float* dev_sorted_filtered_box, *dev_sorted_box_for_nms; int* dev_sorted_filtered_dir; GPU_CHECK(cudaMalloc((void**)&dev_indexes, host_filter_count[0]*sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0]*sizeof(float))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_filtered_dir, host_filter_count[0]*sizeof(int))); GPU_CHECK(cudaMalloc((void**)&dev_sorted_box_for_nms, NUM_BOX_CORNERS_*host_filter_count[0]*sizeof(float))); thrust::sequence(thrust::device, dev_indexes, dev_indexes + host_filter_count[0]); thrust::sort_by_key(thrust::device, dev_filtered_score, dev_filtered_score + size_t(host_filter_count[0]), dev_indexes, thrust::greater<float>()); const int num_blocks = DIVUP(host_filter_count[0], NUM_THREADS_); sort_boxes_by_indexes_kernel<<<num_blocks, NUM_THREADS_>>>(dev_filtered_box, dev_filtered_dir, dev_box_for_nms, dev_indexes, host_filter_count[0], dev_sorted_filtered_box, dev_sorted_filtered_dir, dev_sorted_box_for_nms, NUM_BOX_CORNERS_, NUM_OUTPUT_BOX_FEATURE_); int keep_inds[host_filter_count[0]] = {0}; int out_num_objects = 0; nms_cuda_ptr_->doNMSCuda(host_filter_count[0], dev_sorted_box_for_nms, keep_inds, out_num_objects); float host_filtered_box[host_filter_count[0]*NUM_OUTPUT_BOX_FEATURE_]; int host_filtered_dir[host_filter_count[0]]; GPU_CHECK( cudaMemcpy(host_filtered_box, dev_sorted_filtered_box, NUM_OUTPUT_BOX_FEATURE_*host_filter_count[0] *sizeof(float), cudaMemcpyDeviceToHost ) ); GPU_CHECK( cudaMemcpy(host_filtered_dir, dev_sorted_filtered_dir, host_filter_count[0] *sizeof(int), cudaMemcpyDeviceToHost ) ); for (size_t i = 0; i < out_num_objects; i++) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+0]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+1]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+2]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+3]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+4]); out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+5]); if(host_filtered_dir[keep_inds[i]] == 0) { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6] + M_PI); } else { out_detection.push_back(host_filtered_box[keep_inds[i]*NUM_OUTPUT_BOX_FEATURE_+6]); } } GPU_CHECK(cudaFree(dev_indexes)); GPU_CHECK(cudaFree(dev_sorted_filtered_box)); GPU_CHECK(cudaFree(dev_sorted_filtered_dir)); GPU_CHECK(cudaFree(dev_sorted_box_for_nms)); }
the_stack
#include "imageProcessingGPUUtils.hpp" #include "backend/common/imageOps.hpp" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "cuda/util.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #define INVALID_VALUE Image::RGBA::pack(1, 2, 3, 0) #define REDUCE_THREADS_PER_BLOCK 512 #define REGULARIZATION_TILE_WIDTH 16 #define KERNEL_SIZE 25 #define TILE_WIDTH 16 #define CUDABLOCKSIZE 512 namespace VideoStitch { namespace Util { // Output lab in [0..255] range instead of the original [0..100, -128..127, -128..127] inline __host__ __device__ uint32_t RGBandGradientToNormalizeLab(const uint32_t rgbAndGradient) { // Check if the pixel is without alpha if (rgbAndGradient == INVALID_VALUE) { return rgbAndGradient; } const float r = min(1.0f, float(Image::RGBA::r(rgbAndGradient)) / 255.0f); const float g = min(1.0f, float(Image::RGBA::g(rgbAndGradient)) / 255.0f); const float b = min(1.0f, float(Image::RGBA::b(rgbAndGradient)) / 255.0f); const float3 rgb = make_float3(r, g, b); const float3 lab = Image::rgbToLab(rgb); const uint32_t l_ui = uint32_t(lab.x * 2.55f); const uint32_t a_ui = uint32_t(lab.y + 128); const uint32_t b_ui = uint32_t(lab.z + 128); const uint32_t i_ui = Image::RGBA::a(rgbAndGradient); return Image::RGBA::pack(l_ui, a_ui, b_ui, i_ui); } inline __host__ __device__ uint32_t normalizeLabAndGradientToRGBA(const uint32_t normalizedLabAndGradient) { // Check if the pixel is without alpha if (normalizedLabAndGradient == INVALID_VALUE) { return 0; } const float l = float(Image::RGBA::r(normalizedLabAndGradient)) / 2.55f; const float a = float(Image::RGBA::g(normalizedLabAndGradient)) - 128.0f; const float b = float(Image::RGBA::b(normalizedLabAndGradient)) - 128.0f; const float3 lab = make_float3(l, a, b); const float3 rgb = Image::labToRGB(lab); const uint32_t red = uint32_t(min(1.0f, rgb.x) * 255.0f); const uint32_t green = uint32_t(min(1.0f, rgb.y) * 255.0f); const uint32_t blue = uint32_t(min(1.0f, rgb.z) * 255.0f); return Image::RGBA::pack(red, green, blue, 255); } __global__ void convertRGBAndGradientToNormalizedLABKernel(const int2 bufferSize, uint32_t* __restrict__ colorBuffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < bufferSize.x && y < bufferSize.y) { const unsigned index = y * bufferSize.x + x; colorBuffer[index] = RGBandGradientToNormalizeLab(colorBuffer[index]); } } Status ImageProcessingGPU::convertRGBandGradientToNormalizedLABandGradient(const int2 bufferSize, GPU::Buffer<uint32_t> colorBuffer, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1); convertRGBAndGradientToNormalizedLABKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(bufferSize, colorBuffer.get()); return CUDA_STATUS; } __global__ void convertNormalizedLABandGradientToRGBAKernel(const int2 bufferSize, uint32_t* __restrict__ colorBuffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < bufferSize.x && y < bufferSize.y) { const unsigned index = y * bufferSize.x + x; colorBuffer[index] = normalizeLabAndGradientToRGBA(colorBuffer[index]); } } Status ImageProcessingGPU::convertNormalizedLABandGradientToRGBA(const int2 bufferSize, GPU::Buffer<uint32_t> colorBuffer, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1); convertNormalizedLABandGradientToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(bufferSize, colorBuffer.get()); return CUDA_STATUS; } // http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/Data-Parallel_Algorithms.html#reduction /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) */ /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) */ __global__ void reduce6(const float* g_idata, float* g_odata, float* g_omask, unsigned int n) { extern __shared__ float sharedData[]; float* sdata = &sharedData[0]; float* smask = &sharedData[blockDim.x]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int gridSize = blockDim.x * 2 * gridDim.x; sdata[tid] = 0; smask[tid] = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridSize). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { sdata[tid] += g_idata[i]; smask[tid] += (g_idata[i] > 0 ? 1 : 0); if (i + blockDim.x < n) { sdata[tid] += g_idata[i + blockDim.x]; smask[tid] += (g_idata[i + blockDim.x] > 0 ? 1 : 0); } i += gridSize; } __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; smask[tid] += smask[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; smask[tid] += smask[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; smask[tid] += smask[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; smask[tid] += smask[tid + 32]; } if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; smask[tid] += smask[tid + 16]; } if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; smask[tid] += smask[tid + 8]; } if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; smask[tid] += smask[tid + 4]; } if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; smask[tid] += smask[tid + 2]; } if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; smask[tid] += smask[tid + 1]; } } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; g_omask[blockIdx.x] = smask[0]; } } Status ImageProcessingGPU::calculateSum(const int numElement, const GPU::Buffer<const float> buffer, const unsigned blockSize, GPU::Stream stream, float& output, float& mask) { int gridSize = (unsigned)Cuda::ceilDiv(numElement, blockSize); auto potOutBuffer = GPU::Buffer<float>::allocate(gridSize, "Merger Mask"); PROPAGATE_FAILURE_STATUS(potOutBuffer.status()); auto outBuffer = potOutBuffer.value(); auto potMaskBuffer = GPU::Buffer<float>::allocate(gridSize, "Merger Mask"); PROPAGATE_FAILURE_STATUS(potMaskBuffer.status()); auto maskBuffer = potMaskBuffer.value(); dim3 dimBlock(blockSize, 1, 1); dim3 dimGrid(gridSize, 1, 1); reduce6<<<dimGrid, dimBlock, blockSize * 2 * sizeof(float), stream.get()>>>(buffer.get(), outBuffer.get(), maskBuffer.get(), numElement); PROPAGATE_FAILURE_STATUS(stream.synchronize()); std::vector<float> h_odata(gridSize); std::vector<float> h_omask(gridSize); if (gridSize > 0) { cudaMemcpy(h_odata.data(), outBuffer.get(), gridSize * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_omask.data(), maskBuffer.get(), gridSize * sizeof(float), cudaMemcpyDeviceToHost); } output = 0; mask = 0; for (int i = 0; i < gridSize; i++) { output += h_odata[i]; mask += h_omask[i]; } PROPAGATE_FAILURE_STATUS(outBuffer.release()); PROPAGATE_FAILURE_STATUS(maskBuffer.release()); return CUDA_STATUS; } __constant__ float kernel[3][3] = {{1, 2, 1}, {0, 0, 0}, {-1, -2, -1}}; __global__ void convertRGB210ToRGBandGradientKernel(int2 size, const uint32_t* __restrict__ inputBuffer, uint32_t* __restrict__ colorBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { uint32_t oriInput = inputBuffer[y * size.x + x]; if (!Image::RGB210::a(oriInput)) { colorBuffer[y * size.x + x] = INVALID_VALUE; return; } float Gx = 0; float Gy = 0; for (int dy = -1; dy <= 1; dy++) for (int dx = -1; dx <= 1; dx++) { int2 localPos = make_int2(dx + x, dy + y); if (localPos.x >= 0 && localPos.x < size.x && localPos.y >= 0 && localPos.y < size.y) { uint32_t input = inputBuffer[localPos.y * size.x + localPos.x]; if (!Image::RGB210::a(input)) { colorBuffer[y * size.x + x] = Image::RGBA::pack(min(255, Image::RGB210::r(oriInput)), min(255, Image::RGB210::g(oriInput)), min(255, Image::RGB210::b(oriInput)), 0); return; } float c = (0.299f * float(Image::RGB210::r(input)) + 0.587f * float(Image::RGB210::g(input)) + 0.114f * float(Image::RGB210::b(input))) / 255.0f; float coefX = kernel[dx + 1][dy + 1]; float coefY = kernel[dy + 1][dx + 1]; Gx += coefX * c; Gy += coefY * c; } } uint32_t gradient = (unsigned char)(min(255.0f, sqrt(Gx * Gx + Gy * Gy) * 255.0f)); colorBuffer[y * size.x + x] = Image::RGBA::pack(min(255, Image::RGB210::r(oriInput)), min(255, Image::RGB210::g(oriInput)), min(255, Image::RGB210::b(oriInput)), gradient); } } Status ImageProcessingGPU::convertRGB210ToRGBandGradient(const int2 bufferSize, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> colorBuffer, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1); convertRGB210ToRGBandGradientKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(bufferSize, inputBuffer.get(), colorBuffer.get()); return CUDA_STATUS; } __global__ void convertRGB210ToRGBAKernel(int2 size, uint32_t* __restrict__ colorBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { uint32_t oriInput = colorBuffer[y * size.x + x]; colorBuffer[y * size.x + x] = Image::RGBA::pack(Image::RGB210::r(oriInput), Image::RGB210::g(oriInput), Image::RGB210::b(oriInput), Image::RGB210::a(oriInput)); } } Status ImageProcessingGPU::convertRGB210ToRGBA(const int2 bufferSize, GPU::Buffer<uint32_t> buffer, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1); convertRGB210ToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(bufferSize, buffer.get()); return CUDA_STATUS; } __global__ void extractChannelKernel(int2 bufferSize, const int channelIndex, const uint32_t* const __restrict__ colorBuffer, unsigned char* __restrict__ outputBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < bufferSize.x && y < bufferSize.y) { const unsigned index = y * bufferSize.x + x; const uint32_t color = colorBuffer[index]; if (channelIndex == 0) outputBuffer[index] = Image::RGBA::r(color); else if (channelIndex == 1) outputBuffer[index] = Image::RGBA::g(color); else if (channelIndex == 2) outputBuffer[index] = Image::RGBA::b(color); else if (channelIndex == 3) outputBuffer[index] = Image::RGBA::a(color); } } Status ImageProcessingGPU::extractChannel(const int2 bufferSize, const GPU::Buffer<const uint32_t> inputBuffer, const int channelIndex, GPU::Buffer<unsigned char> outputBuffer, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(bufferSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(bufferSize.y, dimBlock.y), 1); extractChannelKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(bufferSize, channelIndex, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void gradientKernel(int2 size, const uint32_t* inputBuffer, float* outputGradientBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { float Gx = 0; float Gy = 0; float wX = 0; float wY = 0; for (int dy = -1; dy <= 1; dy++) for (int dx = -1; dx <= 1; dx++) { int2 localPos = make_int2(dx + x, dy + y); if (inRange(localPos, size)) { uint32_t input = inputBuffer[localPos.y * size.x + localPos.x]; if (Image::RGBA::a(input) == 0) { outputGradientBuffer[y * size.x + x] = 0; return; } float c = (0.299f * float(Image::RGBA::r(input)) + 0.587f * float(Image::RGBA::g(input)) + 0.114f * float(Image::RGBA::b(input))) / 255.0f; float coefX = kernel[dx + 1][dy + 1]; float coefY = kernel[dy + 1][dx + 1]; Gx += coefX * c; Gy += coefY * c; wX += coefX; wY += coefY; } } outputGradientBuffer[y * size.x + x] = sqrt(Gx * Gx + Gy * Gy); } } Status ImageProcessingGPU::findGradient(const int2 size, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<float> outputGradientBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); gradientKernel<<<dimGrid, dimBlock, 0, stream>>>(size, inputBuffer.get(), outputGradientBuffer.get()); return CUDA_STATUS; } __global__ void luminanceKernel(const int2 size, const uint32_t* const inputBuffer, float* const outputLuminanceBuffer) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; uint32_t input = inputBuffer[y * size.x + x]; outputLuminanceBuffer[y * size.x + x] = (0.299f * float(Image::RGBA::r(input)) + 0.587f * float(Image::RGBA::g(input)) + 0.114f * float(Image::RGBA::b(input))) / 255.0f; } Status ImageProcessingGPU::findLuminance(const int2 size, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<float> outputLuminanceBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); luminanceKernel<<<dimGrid, dimBlock, 0, stream>>>(size, inputBuffer.get(), outputLuminanceBuffer.get()); return CUDA_STATUS; } // Output lab in [0..1] range instead of the original [0..100, -128..127, -128..127] inline __host__ __device__ uint32_t rgbToNormalizeLab(const uint32_t rgba) { const float r = float(Image::RGBA::r(rgba)) / 255; const float g = float(Image::RGBA::g(rgba)) / 255; const float b = float(Image::RGBA::b(rgba)) / 255; const float3 rgb = make_float3(r, g, b); const float3 lab = Image::rgbToLab(rgb); const uint32_t l_ui = uint32_t(lab.x * 2.55f); const uint32_t a_ui = uint32_t(lab.y + 128); const uint32_t b_ui = uint32_t(lab.z + 128); const uint32_t a = Image::RGBA::a(rgba); return Image::RGBA::pack(l_ui, a_ui, b_ui, a); } __global__ void rgbToNormalizeLabKernel(const int2 size, const uint32_t* const inputRGBBuffer, uint32_t* const outputNormalizedLABBuffer) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; uint32_t input = inputRGBBuffer[y * size.x + x]; outputNormalizedLABBuffer[y * size.x + x] = rgbToNormalizeLab(input); } Status ImageProcessingGPU::convertRGBToNormalizedLAB(const int2 size, const GPU::Buffer<const uint32_t> inputRGBBuffer, GPU::Buffer<uint32_t> outputNormalizedLABBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); rgbToNormalizeLabKernel<<<dimGrid, dimBlock, 0, stream>>>(size, inputRGBBuffer.get(), outputNormalizedLABBuffer.get()); return CUDA_STATUS; } __global__ void buffer2DRGBACompactBlendOffsetOperatorKernel(int offsetX, int offsetY, int width, int height, uint32_t* __restrict__ dst, float w0, int offsetX0, int offsetY0, int width0, int height0, const uint32_t* __restrict__ src0, float w1, int offsetX1, int offsetY1, int width1, int height1, const uint32_t* __restrict__ src1) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { int localIndex = width * y + x; float w = 0; float r = 0; float g = 0; float b = 0; float a = 0; dst[localIndex] = 0; if (offsetY + y - offsetY0 >= 0 && offsetY + y - offsetY0 < height0 && offsetX + x - offsetX0 >= 0 && offsetX + x - offsetX0 < width0) { int localIndex0 = (offsetY + y - offsetY0) * width0 + (offsetX + x - offsetX0); if (Image::RGBA::a(src0[localIndex0]) > 0) { w += w0; r += w0 * Image::RGBA::r(src0[localIndex0]); g += w0 * Image::RGBA::g(src0[localIndex0]); b += w0 * Image::RGBA::b(src0[localIndex0]); a += w0 * Image::RGBA::a(src0[localIndex0]); } } if (offsetY + y - offsetY1 >= 0 && offsetY + y - offsetY1 < height1 && offsetX + x - offsetX1 >= 0 && offsetX + x - offsetX1 < width1) { int localIndex1 = (offsetY + y - offsetY1) * width1 + (offsetX + x - offsetX1); if (Image::RGBA::a(src1[localIndex1]) > 0) { w += w1; r += w1 * Image::RGBA::r(src1[localIndex1]); g += w1 * Image::RGBA::g(src1[localIndex1]); b += w1 * Image::RGBA::b(src1[localIndex1]); a += w1 * Image::RGBA::a(src1[localIndex1]); } } if (w > 0) { dst[localIndex] = Image::RGBA::pack(r / w, g / w, b / w, 255); } else { dst[localIndex] = 0; } } } Status ImageProcessingGPU::buffer2DRGBACompactBlendOffsetOperator(const Core::Rect& dstRect, GPU::Buffer<uint32_t> dst, const float weight0, const Core::Rect& src0Rect, const GPU::Buffer<const uint32_t> src0, const float weight1, const Core::Rect& src1Rect, const GPU::Buffer<const uint32_t> src1, GPU::Stream gpuStream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(dstRect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv(dstRect.getHeight(), dimBlock.y), 1); cudaStream_t stream = gpuStream.get(); buffer2DRGBACompactBlendOffsetOperatorKernel<<<dimGrid, dimBlock, 0, stream>>>( (int)dstRect.left(), (int)dstRect.top(), (int)dstRect.getWidth(), (int)dstRect.getHeight(), dst.get(), weight0, (int)src0Rect.left(), (int)src0Rect.top(), (int)src0Rect.getWidth(), (int)src0Rect.getHeight(), src0.get(), weight1, (int)src1Rect.left(), (int)src1Rect.top(), (int)src1Rect.getWidth(), (int)src1Rect.getHeight(), src1.get()); return CUDA_STATUS; } __global__ void binarizeMaskKernel(const int2 size, const uint32_t* inputBuffer, uint32_t* binarizedBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { binarizedBuffer[y * size.x + x] = inputBuffer[y * size.x + x] > 0 ? 1 : 0; } } Status ImageProcessingGPU::binarizeMask(const int2 size, const GPU::Buffer<const uint32_t> inputMask, GPU::Buffer<uint32_t> binarizedMask, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); binarizeMaskKernel<<<dimGrid, dimBlock, 0, stream>>>(size, inputMask.get(), binarizedMask.get()); return CUDA_STATUS; } __global__ void onBothBufferOperatorKernel(const int warpWidth, const int input0OffsetX, const int input0OffsetY, const int input0Width, const int input0Height, const uint32_t* input0Buffer, const int input1OffsetX, const int input1OffsetY, const int input1Width, const int input1Height, const uint32_t* input1Buffer, const int outputOffsetX, const int outputOffsetY, const int outputWidth, const int outputHeight, uint32_t* outputMask) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { uint32_t v = 0; const int outputX = x + outputOffsetX; const int outputY = y + outputOffsetY; const int input0X = (outputX + warpWidth - input0OffsetX) % warpWidth; const int input0Y = (outputY - input0OffsetY); const int input1X = (outputX + warpWidth - input1OffsetX) % warpWidth; const int input1Y = (outputY - input1OffsetY); if (input1X >= 0 && input1X < input1Width && input1Y >= 0 && input1Y < input1Height && input0X >= 0 && input0X < input0Width && input0Y >= 0 && input0Y < input0Height) { if (input0Buffer[input0Y * input0Width + input0X] > 0 && input1Buffer[input1Y * input1Width + input1X] > 0) { v = 1; } else { v = 0; } } outputMask[y * outputWidth + x] = v; } } Status ImageProcessingGPU::onBothBufferOperator(const int warpWidth, const Core::Rect boundingRect0, const GPU::Buffer<const uint32_t> buffer0, const Core::Rect boundingRect1, const GPU::Buffer<const uint32_t> buffer1, const Core::Rect boundingRectBuffer, GPU::Buffer<uint32_t> buffer, GPU::Stream gpuStream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(boundingRectBuffer.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv(boundingRectBuffer.getHeight(), dimBlock.y), 1); cudaStream_t stream = gpuStream.get(); onBothBufferOperatorKernel<<<dimGrid, dimBlock, 0, stream>>>( (int)warpWidth, (int)boundingRect0.left(), (int)boundingRect0.top(), (int)boundingRect0.getWidth(), (int)boundingRect0.getHeight(), buffer0.get(), (int)boundingRect1.left(), (int)boundingRect1.top(), (int)boundingRect1.getWidth(), (int)boundingRect1.getHeight(), buffer1.get(), (int)boundingRectBuffer.left(), (int)boundingRectBuffer.top(), (int)boundingRectBuffer.getWidth(), (int)boundingRectBuffer.getHeight(), buffer.get()); return CUDA_STATUS; } template <typename T> __global__ void constantBufferKernel(const int2 size, T* buffer, T value) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; buffer[y * size.x + x] = value; } template <typename T> Status ImageProcessingGPU::setConstantBuffer(const int2 size, GPU::Buffer<T> buffer, const T value, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size.y, TILE_WIDTH), 1); constantBufferKernel<T><<<dimGrid, dimBlock, 0, stream>>>(size, buffer.get(), value); return CUDA_STATUS; } template Status ImageProcessingGPU::setConstantBuffer(const int2 size, GPU::Buffer<float2> buffer, const float2 value, GPU::Stream gpuStream); template Status ImageProcessingGPU::setConstantBuffer(const int2 size, GPU::Buffer<uint32_t> buffer, const uint32_t value, GPU::Stream gpuStream); template <typename T> __global__ void packBufferKernel(const int wrapWidth, const T invalidValue, const int inputOffsetX, const int inputOffsetY, const int inputWidth, const int inputHeight, const T* input, const int packedOffsetX, const int packedOffsetY, const int packedWidth, const int packedHeight, T* output) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < packedWidth && y < packedHeight) { const int inputX = (x + packedOffsetX - inputOffsetX + wrapWidth) % wrapWidth; const int inputY = (y + packedOffsetY - inputOffsetY); if (inputX >= 0 && inputX < inputWidth && inputY >= 0 && inputY < inputHeight) { output[y * packedWidth + x] = input[inputY * inputWidth + inputX]; } else { output[y * packedWidth + x] = invalidValue; } } } template <typename T> Status ImageProcessingGPU::packBuffer(const int warpWidth, const T invalidValue, const Core::Rect inputRect, const GPU::Buffer<const T> inputBuffer, const Core::Rect outputRect, GPU::Buffer<T> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(outputRect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv(outputRect.getHeight(), dimBlock.y), 1); packBufferKernel<T><<<dimGrid, dimBlock, 0, stream>>>( warpWidth, invalidValue, (int)inputRect.left(), (int)inputRect.top(), (int)inputRect.getWidth(), (int)inputRect.getHeight(), inputBuffer.get(), (int)outputRect.left(), (int)outputRect.top(), (int)outputRect.getWidth(), (int)outputRect.getHeight(), outputBuffer.get()); return CUDA_STATUS; } template Status ImageProcessingGPU::packBuffer(const int warpWidth, const uint32_t invalidValue, const Core::Rect inputRect, const GPU::Buffer<const uint32_t> inputBuffer, const Core::Rect outputRect, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream); template Status ImageProcessingGPU::packBuffer(const int warpWidth, const float2 invalidValue, const Core::Rect inputRect, const GPU::Buffer<const float2> inputBuffer, const Core::Rect outputRect, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream); template <typename T> __global__ void thresholdingBufferKernel(const int2 size, const T thresholdValue, const T minBoundValue, const T maxBoundValue, T* buffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const int index = y * size.x + x; const T value = buffer[index]; if (value <= thresholdValue) { buffer[index] = minBoundValue; } else { buffer[index] = maxBoundValue; } } } template <typename T> Status ImageProcessingGPU::thresholdingBuffer(const int2 size, const T thresholdValue, const T minBoundValue, const T maxBoundValue, GPU::Buffer<T> inputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); thresholdingBufferKernel<T> <<<dimGrid, dimBlock, 0, stream>>>(size, thresholdValue, minBoundValue, maxBoundValue, inputBuffer.get()); return CUDA_STATUS; } template Status ImageProcessingGPU::thresholdingBuffer(const int2 size, const unsigned char thresholdValue, const unsigned char minBoundValue, const unsigned char maxBoundValue, GPU::Buffer<unsigned char> inputBuffer, GPU::Stream gpuStream); } // namespace Util } // namespace VideoStitch
the_stack
#include <ops/declarable/helpers/BarnesHutTsne.h> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // count rows kernel - count input pRows and pCols and put result onto pRowCounts // pRowCounts - array of ints, with length N // pRows - array of ints with length N, vals from 0 to N-1 // pCols - array of ints with length < N and vals between 0 and max(pRows) // static __global__ void countRowsKernel(int* pRowCounts, int const* pRows, int const* pCols, Nd4jLong N) { auto start = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int n = threadIdx.x + start; n < N; n += step) { int begin = pRows[n];//->e<int>(n); int end = pRows[n + 1];//rowP->e<int>(n + 1); for (int i = begin; i < end; i++) { bool present = false; // loop between near pRows for (int m = pRows[pCols[i]]; m < pRows[pCols[i] + 1]; m++) if (pCols[m] == n) { // mark index as existed with columns array present = true; break; } atomicAdd(&pRowCounts[n], 1); if (!present) // increment row counter for given index atomicAdd(&pRowCounts[pCols[i]], 1); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // row counter caller Nd4jLong barnes_row_count(const NDArray* rowP, const NDArray* colP, Nd4jLong N, NDArray& rowCounts) { int* pRowCounts = reinterpret_cast<int*>(rowCounts.specialBuffer()); int const* pRows = reinterpret_cast<int const*>(rowP->specialBuffer()); int const* pCols = reinterpret_cast<int const*>(colP->specialBuffer()); auto stream = rowCounts.getContext()->getCudaStream(); countRowsKernel<<<1, 1, 128, *stream>>>(pRowCounts, pRows, pCols, N); NDArray numElementsArr = rowCounts.sumNumber(); //reduceAlongDimension(reduce::Sum, {}); //rowCounts.printBuffer("Row counts"); auto numElements = numElementsArr.e<Nd4jLong>(0); return numElements; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // extend symRowP with pRowCounts array vals // pRowCounts - int array with length N // symRowP - int array with length N+1 // N - given array length // static __global__ void fillUpsymRow(int const* pRowCounts, int* symRowP, int N) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int n = start; n < N + 1; n += step) { // to avoid race condition use shift only for given index symRowP[n] = 0; for (int i = 0; i < n; i++) atomicAdd(&symRowP[n], pRowCounts[i]); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // symmetrize routine kernel // pRows - rows buffer (ints) // pCols - column buffer (ints) with vals between 0 and max(pRows) // pVals - values vector (floats) // symRowP - ints, shifted pRows // symColP - ints, shifted pCols, // offset - ints, shitfs // pOutput - result matrix (floats) // N - pRows length // template <typename T> static __global__ void symmetrizeKernel(int const* pRows, int const* pCols, T const* pVals, int* symRowP, int* symColP, int* offset, T* pOutput, int N) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int n = start; n < N; n += step) { int begin = pRows[n]; int bound = pRows[n + 1]; for (int i = begin; i < bound; i++) { bool present = false; int colPI = pCols[i]; int start = pRows[colPI]; int end = pRows[colPI + 1]; for (int m = start; m < end; m++) { if (pCols[m] == n) { present = true; if (n <= colPI) { symColP[symRowP[n] + offset[n]] = colPI; symColP[symRowP[colPI] + offset[colPI]] = n; pOutput[symRowP[n] + offset[n]] = pVals[i] + pVals[m]; pOutput[symRowP[colPI] + offset[colPI]] = pVals[i] + pVals[m]; } } } // If (colP[i], n) is not present, there is no addition involved if (!present) { symColP[symRowP[n] + offset[n]] = colPI; symColP[symRowP[pCols[i]] + offset[colPI]] = n; pOutput[symRowP[n] + offset[n]] = pVals[i]; pOutput[symRowP[colPI] + offset[colPI]] = pVals[i]; } // Update offsets if (!present || (present && n <= colPI)) { atomicAdd(&offset[n], 1); if (colPI != n) atomicAdd(&offset[colPI], 1); } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // symmetrize algorithm itself // template <typename T> static void barnes_symmetrize_(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) { int const* pRows = reinterpret_cast<int const*>(rowP->specialBuffer()); int* symRowP = reinterpret_cast<int*>(outputRows->specialBuffer()); int* pRowCounts = reinterpret_cast<int*>(rowCounts->specialBuffer()); auto stream = outputCols->getContext()->getCudaStream(); // fill up syRowP array fillUpsymRow<<<1, N, 128, *stream>>>(pRowCounts, symRowP, N); outputRows->syncToHost(); // outputRows->printBuffer("output rows"); int* symColP = reinterpret_cast<int*>(outputCols->specialBuffer()); // outputRows->printBuffer("SymRows are"); int const* pCols = reinterpret_cast<int const*>(colP->specialBuffer()); T const* pVals = reinterpret_cast<T const*>(valP->specialBuffer()); T* pOutput = reinterpret_cast<T*>(outputVals->specialBuffer()); //std::vector<int> rowCountsV = rowCounts->getBufferAsVector<int>(); auto offsetArr = NDArrayFactory::create<int>('c', {N}); int* offset = reinterpret_cast<int*>(offsetArr.specialBuffer()); // symmetrize itself symmetrizeKernel<T><<<1, 1, 1024, *stream>>>(pRows, pCols, pVals, symRowP, symColP, offset, pOutput, N); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // symmetrize caller and adoption // void barnes_symmetrize(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) { BUILD_SINGLE_SELECTOR(valP->dataType(), barnes_symmetrize_, (rowP, colP, valP, N, outputRows, outputCols, outputVals, rowCounts), NUMERIC_TYPES); *outputVals /= 2.0; } BUILD_SINGLE_TEMPLATE(template void barnes_symmetrize_, (const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts), NUMERIC_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge forces implementation // template <typename T> static __global__ void edgeForcesKernel(int const* pRows, int const* pCols, T const* dataP, T const* vals, T* outputP, int N, int colCount, int rowSize) { // std::vector<T> buffer(colCount); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int n = start; n < N; n += step) { int start = pRows[n]; int end = pRows[n + 1]; int shift = n * colCount; for (int i = start; i < end; i++) { T const* thisSlice = dataP + pCols[i] * colCount; T res = 1; for (int k = 0; k < colCount; k++) { auto valTemp = dataP[shift + k] - thisSlice[k];//thisSlice[k]; res += valTemp * valTemp; // (dataP[shift + k] * dataP[shift + k] - 2 * dataP[shift + k] * thisSlice[k] + thisSlice[k] * thisSlice[k]) } res = vals[i] / res; for (int k = 0; k < colCount; k++) math::atomics::nd4j_atomicAdd(&outputP[shift + k], T((dataP[shift + k] - thisSlice[k]) * res)); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge forces algorithm // template <typename T> static void barnes_edge_forces_(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output) { NDArray::prepareSpecialUse({output}, {data, rowP, colP, valP, valP}); T const* dataP = reinterpret_cast<T const*>(data->specialBuffer()); T const* vals = reinterpret_cast<T const*>(valP->specialBuffer()); T* outputP = reinterpret_cast<T*>(output->specialBuffer()); int const* pRows = reinterpret_cast<int const*>(rowP->specialBuffer()); int const* pCols = reinterpret_cast<int const*>(colP->specialBuffer()); int colCount = data->columns(); //auto shift = 0; auto rowSize = sizeof(T) * colCount; auto stream = output->getContext()->getCudaStream(); edgeForcesKernel<T><<<1, 128, 1024, *stream>>>(pRows, pCols, dataP, vals, outputP, N, colCount, rowSize); NDArray::registerSpecialUse({output}, {rowP, colP, valP, data}); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // edge forces caller // void barnes_edge_forces(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray* output, NDArray const& data) { // Loop over all edges in the graph BUILD_SINGLE_SELECTOR(output->dataType(), barnes_edge_forces_, (rowP, colP, valP, N, &data, output), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void barnes_edge_forces_, (const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output), FLOAT_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // gains - run a function T((x + 2.) * sd::math::nd4j_sign<T,T>(grad) != sd::math::nd4j_sign<T,T>(eps)) + T(x * 0.8 * sd::math::nd4j_sign<T,T>(grad) != sd::math::nd4j_sign<T,T>(eps)); // for all members in input and put all in output // template <typename T> void barnes_gains_(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) { auto gainsInternal = LAMBDA_TTT(x, grad, eps) { T res = sd::math::nd4j_sign<T,T>(grad) != sd::math::nd4j_sign<T,T>(eps) ? x + T(.2) : x * T(.8); if(res < .01) res = .01; return res; }; input->applyTriplewiseLambda(*gradX, *epsilon, gainsInternal, *output); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // gains caller void barnes_gains(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), barnes_gains_, (input, gradX, epsilon, output), NUMERIC_TYPES); } BUILD_SINGLE_TEMPLATE(template void barnes_gains_, (NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output), NUMERIC_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // cell contains - check cells for given point // bool cell_contains(NDArray* corner, NDArray* width, NDArray* point, Nd4jLong dimension) { auto cornerMinusWidth = *corner - *width; auto cornerPlusWidth = *corner + *width; // executes on host side, so sync all to host memory cornerMinusWidth.syncToHost(); cornerPlusWidth.syncToHost(); for (Nd4jLong i = 0; i < dimension; i++) { if (cornerMinusWidth.e<double>(i) > point->e<double>(i)) return false; if (cornerPlusWidth.e<double>(i) < point->e<double>(i)) return false; } return true; } } } }
the_stack
#include <shrUtils.h> #include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h #include <cutil_math.h> texture<float, 2> tex; texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex; cudaArray* d_array, *d_tempArray; /* Perform a fast box filter using the sliding window method. As the kernel moves from left to right, we add in the contribution of the new sample on the right, and subtract the value of the exiting sample on the left. This only requires 2 adds and a mul per output value, independent of the filter radius. The box filter is separable, so to perform a 2D box filter we perform the filter in the x direction, followed by the same filter in the y direction. Applying multiple iterations of the box filter converges towards a Gaussian blur. Using CUDA, rows or columns of the image are processed in parallel. This version duplicates edge pixels. Note that the x (row) pass suffers from uncoalesced global memory reads, since each thread is reading from a different row. For this reason it is better to use texture lookups for the x pass. The y (column) pass is perfectly coalesced. Parameters id - pointer to input data in global memory od - pointer to output data in global memory w - image width h - image height r - filter radius e.g. for r = 2, w = 8: 0 1 2 3 4 5 6 7 x - - - x - - - - x - - - - x - - - - x - - - - x - - - - x - - - x */ // process row __device__ void d_boxfilter_x(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int x = 0; x < (r + 1); x++) { t += id[x]; } od[0] = t * scale; for(int x = 1; x < (r + 1); x++) { t += id[x + r]; t -= id[0]; od[x] = t * scale; } // main loop for(int x = (r + 1); x < w - r; x++) { t += id[x + r]; t -= id[x - r - 1]; od[x] = t * scale; } // do right edge for (int x = w - r; x < w; x++) { t += id[w - 1]; t -= id[x - r - 1]; od[x] = t * scale; } } // process column __device__ void d_boxfilter_y(float *id, float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); float t; // do left edge t = id[0] * r; for (int y = 0; y < (r + 1); y++) { t += id[y * w]; } od[0] = t * scale; for(int y = 1; y < (r + 1); y++) { t += id[(y + r) * w]; t -= id[0]; od[y * w] = t * scale; } // main loop for(int y = (r + 1); y < (h - r); y++) { t += id[(y + r) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } // do right edge for (int y = h - r; y < h; y++) { t += id[(h-1) * w]; t -= id[((y - r) * w) - w]; od[y * w] = t * scale; } } __global__ void d_boxfilter_x_global(float *id, float *od, int w, int h, int r) { unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_x(&id[y * w], &od[y * w], w, h, r); } __global__ void d_boxfilter_y_global(float *id, float *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; d_boxfilter_y(&id[x], &od[x], w, h, r); } // texture version // texture fetches automatically clamp to edge of image __global__ void d_boxfilter_x_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int x =- r; x <= r; x++) { t += tex2D(tex, x, y); } od[y * w] = t * scale; for(int x = 1; x < w; x++) { t += tex2D(tex, x + r, y); t -= tex2D(tex, x - r - 1, y); od[y * w + x] = t * scale; } } __global__ void d_boxfilter_y_tex(float *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; float t = 0.0f; for(int y = -r; y <= r; y++) { t += tex2D(tex, x, y); } od[x] = t * scale; for(int y = 1; y < h; y++) { t += tex2D(tex, x, y + r); t -= tex2D(tex, x, y - r - 1); od[y * w + x] = t * scale; } } // RGBA version // reads from 32-bit uint array holding 8-bit RGBA // convert floating point rgba color to 32-bit integer __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f); } __device__ float4 rgbaIntToFloat(uint c) { float4 rgba; rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f; rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f; rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f; rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f; return rgba; } // row pass using texture lookups __global__ void d_boxfilter_rgba_x(uint *od, int w, int h, int r) { float scale = 1.0f / (float)((r << 1) + 1); unsigned int y = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; // as long as address is always less than height, we do work if (y < h) { float4 t = make_float4(0.0f); for(int x = -r; x <= r; x++) { t += tex2D(rgbaTex, x, y); } od[y * w] = rgbaFloatToInt(t * scale); for(int x = 1; x < w; x++) { t += tex2D(rgbaTex, x + r, y); t -= tex2D(rgbaTex, x - r - 1, y); od[y * w + x] = rgbaFloatToInt(t * scale); } } } // column pass using coalesced global memory reads __global__ void d_boxfilter_rgba_y(uint *id, uint *od, int w, int h, int r) { unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; id = &id[x]; od = &od[x]; float scale = 1.0f / (float)((r << 1) + 1); float4 t; // do left edge t = rgbaIntToFloat(id[0]) * r; for (int y = 0; y < (r + 1); y++) { t += rgbaIntToFloat(id[y*w]); } od[0] = rgbaFloatToInt(t * scale); for(int y = 1; y < (r + 1); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[0]); od[y * w] = rgbaFloatToInt(t * scale); } // main loop for(int y = (r + 1); y < (h - r); y++) { t += rgbaIntToFloat(id[(y + r) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } // do right edge for (int y = h - r; y < h; y++) { t += rgbaIntToFloat(id[(h - 1) * w]); t -= rgbaIntToFloat(id[((y - r) * w) - w]); od[y * w] = rgbaFloatToInt(t * scale); } } extern "C" void initTexture(int width, int height, void *pImage) { int size = width * height * sizeof(unsigned int); // copy image data to array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); cutilSafeCall( cudaMallocArray ( &d_array, &channelDesc, width, height )); cutilSafeCall( cudaMemcpyToArray( d_array, 0, 0, pImage, size, cudaMemcpyHostToDevice)); cutilSafeCall( cudaMallocArray ( &d_tempArray, &channelDesc, width, height )); // set texture parameters tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModePoint; tex.normalized = true; // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray(tex, d_array, channelDesc) ); } extern "C" void freeTextures() { cutilSafeCall(cudaFreeArray(d_array)); cutilSafeCall(cudaFreeArray(d_tempArray)); } /* Perform 2D box filter on image using CUDA Parameters: d_src - pointer to input image in device memory d_temp - pointer to temporary storage in device memory d_dest - pointer to destination image in device memory width - image width height - image height radius - filter radius iterations - number of iterations */ extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest, int width, int height, int radius, int iterations, int nthreads) { // var for kernel timing double dKernelTime = 0.0; // sync host and start computation timer cutilSafeCall( cutilDeviceSynchronize() ); shrDeltaT(0); cutilSafeCall( cudaBindTextureToArray(tex, d_array) ); for(int i=0; i<iterations; i++) { // use texture for horizontal pass d_boxfilter_x_tex<<< height / nthreads, nthreads, 0 >>>( d_temp, width, height, radius); d_boxfilter_y_global<<< width / nthreads, nthreads, 0 >>>( d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( cutilDeviceSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( cudaMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), cudaMemcpyDeviceToDevice)); cutilSafeCall( cudaBindTextureToArray(tex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } // RGBA version extern "C" double boxFilterRGBA(uint *d_src, uint *d_temp, uint *d_dest, int width, int height, int radius, int iterations, int nthreads) { cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_array) ); // var for kernel computation timing double dKernelTime; for(int i=0; i<iterations; i++) { // sync host and start kernel computation timer dKernelTime = 0.0; cutilSafeCall(cutilDeviceSynchronize()); shrDeltaT(0); // use texture for horizontal pass d_boxfilter_rgba_x<<< height / nthreads, nthreads, 0 >>>( d_temp, width, height, radius); d_boxfilter_rgba_y<<< width / nthreads, nthreads, 0 >>>( d_temp, d_dest, width, height, radius); // sync host and stop computation timer cutilSafeCall( cutilDeviceSynchronize() ); dKernelTime += shrDeltaT(0); if (iterations > 1) { // copy result back from global memory to array cutilSafeCall( cudaMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float), cudaMemcpyDeviceToDevice)); cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_tempArray) ); } } return (dKernelTime/(double)iterations); } #endif // #ifndef _BOXFILTER_KERNEL_H_
the_stack
#pragma once #include <cub/cub.cuh> template<int NUM_WARPS> class BlockWorkAssignmentBase { protected: static constexpr int NUM_THREADS = NUM_WARPS * WARP_SIZE; public: typedef cub::BlockScan<int, NUM_THREADS> SimpleScanT; protected: __device__ static void computeOffsets(int* work_sum, SimpleScanT::TempStorage& sum_space, int thread_work_count) { int output, aggregate; SimpleScanT(sum_space).ExclusiveSum(thread_work_count, output, aggregate); work_sum[threadIdx.x + 1] = output; work_sum[0] = 0; work_sum[NUM_THREADS + 1] = aggregate; } __device__ static int assignWorkAllThreads(int* work_sum, int* work_offsets, SimpleScanT::TempStorage& sum_space, int& aggregate) { // clear work offsets work_offsets[threadIdx.x] = 0; __syncthreads(); // compute which thread should start with a given work element int set_offset = work_sum[threadIdx.x + 1]; // set the entry for the thread to the work id if (work_sum[threadIdx.x+2] != set_offset && set_offset < NUM_THREADS) work_offsets[set_offset] = threadIdx.x; __syncthreads(); // read my offset (can be the right offset or zero as only the first one will have the right per triangle) int element_offset = work_offsets[threadIdx.x]; SimpleScanT(sum_space). template InclusiveScan<cub::Max>(element_offset, element_offset, cub::Max(), aggregate); return element_offset; } __device__ static int computeLocalWorkOffset(int* work_sum, int element_offset, int tid, int& inelement_tid) { // every thread gets its element offset and computes its offset within the element //int within_element_forward = tid - work_sum[element_offset + 1]; // run from back to front so we can just decrese the count iif there are not enough warps for a triangle int within_element_backward = work_sum[element_offset + 2] - tid - 1; inelement_tid = tid - work_sum[element_offset + 1]; return within_element_backward; } __device__ static bool pullWorkInternal(int* work_sum, int* work_offsets, SimpleScanT::TempStorage& sum_space, int& element_offset, int& within_element_offset, int& numWork, int& firstOffset, int& in_element_tid) { int aggregate; element_offset = assignWorkAllThreads(work_sum, work_offsets, sum_space, aggregate); within_element_offset = computeLocalWorkOffset(work_sum, element_offset, threadIdx.x, in_element_tid); numWork = min(NUM_THREADS, work_sum[NUM_THREADS + 1]); firstOffset = work_offsets[0]; __syncthreads(); // update counts work_sum[threadIdx.x + 2] = max(0, work_sum[threadIdx.x + 2] - NUM_THREADS); __syncthreads(); // note that we might have more threads than active elements, then the within_element_offset is negative.. return within_element_offset >= 0; } }; template<int NUM_WARPS, bool PARTIAL_TAKEOUT> class BlockWorkAssignment; template<int NUM_WARPS> class BlockWorkAssignment<NUM_WARPS, false> : public BlockWorkAssignmentBase<NUM_WARPS> { public: struct SharedMemT { int work_sum[NUM_THREADS + 2]; }; struct SharedTempMemT { SimpleScanT::TempStorage tempstorage; int work_offsets[ NUM_THREADS]; }; __device__ static void prepare(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int thread_work_count) { computeOffsets(shared_memory.work_sum, shared_temp_memory.tempstorage, thread_work_count); } __device__ static int availableWork(SharedMemT& shared_memory) { return const_cast<volatile int*>(shared_memory.work_sum)[NUM_THREADS + 1]; } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset) { int unused, unused2, unused3; return pullWorkInternal(shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, unused, unused2, unused3); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork) { int unused, unused2; return pullWorkInternal(shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, unused, unused2); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork, int& firstOffset) { int unused; return pullWorkInternal(shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, firstOffset, unused); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork, int& firstOffset, int& inElementOffset) { return pullWorkInternal(shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, firstOffset, inElementOffset); } }; template<int NUM_WARPS> class BlockWorkAssignment<NUM_WARPS, true> : public BlockWorkAssignmentBase<NUM_WARPS> { public: struct SharedMemT { int work_sum[NUM_THREADS + 2]; int work_offsets[NUM_THREADS]; int lastTaken; }; struct SharedTempMemT { SimpleScanT::TempStorage tempstorage; }; __device__ static void prepare(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int thread_work_count) { computeOffsets(shared_memory.work_sum, shared_temp_memory.tempstorage, thread_work_count); } __device__ static int availableWork(SharedMemT& shared_memory) { return const_cast<volatile int*>(shared_memory.work_sum)[NUM_THREADS + 1]; } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset) { int unused, unused2, unused3; return pullWorkInternal(shared_memory.work_sum, shared_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, unused, unused2, unused3); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork) { int unused, unused2; return pullWorkInternal(shared_memory.work_sum, shared_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, unused, unused2); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork, int& firstOffset) { int unused; return pullWorkInternal(shared_memory.work_sum, shared_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, firstOffset, unused); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& sumwork, int& firstOffset, int& inElementOffset) { return pullWorkInternal(shared_memory.work_sum, shared_memory.work_offsets, shared_temp_memory.tempstorage, element_offset, within_element_offset, sumwork, firstOffset, inElementOffset); } __device__ static bool prepareConsistentWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory) { int aggregate; int element_offset = assignWorkAllThreads(shared_memory.work_sum, shared_memory.work_offsets, shared_temp_memory.tempstorage, aggregate); __syncthreads(); shared_memory.work_offsets[threadIdx.x] = element_offset; shared_memory.lastTaken = 0; return aggregate > 0; } __device__ static bool takeOutConsistentWorkThreads(SharedMemT& shared_memory, int& element_offset, int& within_element_offset) { int unused; return takeOutConsistentWorkThreads(shared_memory, element_offset, within_element_offset, unused); } __device__ static bool takeOutConsistentWorkThreads(int id, int takeOut, SharedMemT& shared_memory, int& element_offset, int& within_element_offset, int& numwork) { __syncthreads(); int takeOutId = shared_memory.lastTaken + id; element_offset = shared_memory.work_offsets[takeOutId]; int unused; within_element_offset = computeLocalWorkOffset(shared_memory.work_sum, element_offset, takeOutId, unused); numwork = min(NUM_THREADS,shared_memory.work_sum[NUM_THREADS + 1]) - shared_memory.lastTaken; __syncthreads(); if (threadIdx.x == 0) shared_memory.lastTaken += takeOut; return within_element_offset >= 0; } __device__ static int hasTakenWork(SharedMemT& shared_memory) { return shared_memory.lastTaken; } __device__ static void removeTakenWorkThreads(int taken, SharedMemT& shared_memory) { // update counts int new_work_sum = max(0,shared_memory.work_sum[threadIdx.x + 2] - taken); shared_memory.work_sum[threadIdx.x + 2] = new_work_sum; __syncthreads(); } }; template<int NUM_THREADS> class BlockWorkAssignmentOld { static constexpr int NUM_WARPS = NUM_THREADS / 32; public: //static constexpr size_t SHARED_MEMORY = 2 * NUM_THREADS * sizeof(int); //static constexpr size_t SHARED_TEMP_MEMORY = 1 * NUM_THREADS * sizeof(int)+sizeof(SimpleScanT::TempStorage); typedef cub::BlockScan<int, NUM_THREADS> SimpleScanT; struct SharedMemT { int work_count[NUM_THREADS]; int work_sum[NUM_THREADS]; }; struct SharedTempMemT { SimpleScanT::TempStorage tempstorage; struct { int work_offsets[NUM_THREADS]; }; }; private: __device__ static void computeOffsets(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int thread_work_count) { int res; SimpleScanT(shared_temp_memory.tempstorage).InclusiveSum(thread_work_count, res); shared_memory.work_sum[threadIdx.x] = res; } __device__ static int2 assignWorkAllThreads(int* work_count, int* work_sum, int* work_offsets, SimpleScanT::TempStorage& sum_space) { // clear work offsets work_offsets[threadIdx.x] = 0; __syncthreads(); // compute which thread should start with a given work element int set_offset = work_sum[threadIdx.x] - work_count[threadIdx.x]; // set the entry for the thread to the work id if (work_count[threadIdx.x] > 0 && set_offset < NUM_THREADS) work_offsets[set_offset] = threadIdx.x; __syncthreads(); // read my offset (can be the right offset or zero as only the first one will have the right per triangle) int element_offset = work_offsets[threadIdx.x]; int agg; SimpleScanT(sum_space). template InclusiveScan<cub::Max>(element_offset, element_offset, cub::Max(), agg); // every thread gets its triangle offset and computes its offset within the triangle int my_element_start = work_sum[element_offset] - work_count[element_offset]; int within_element_forwardnum = threadIdx.x - my_element_start; return make_int2(element_offset, within_element_forwardnum); } __device__ static int2 assignWorkAllWarps(int* work_count, int* work_sum, int* work_offsets, SimpleScanT::TempStorage& sum_space) { int wip = threadIdx.x / 32; // clear work offsets work_offsets[wip] = 0; __syncthreads(); // compute which warp should start with a given work element int set_offset = work_sum[threadIdx.x] - work_count[threadIdx.x]; // set the entry for the warp to the work id if (work_count[threadIdx.x] > 0 && set_offset < NUM_WARPS) work_offsets[set_offset] = threadIdx.x; __syncthreads(); // read my offset (can be the right offset or zero as only the first one will have the right per element) int element_offset = work_offsets[wip]; int agg; SimpleScanT(sum_space). template InclusiveScan<cub::Max>(element_offset, element_offset, cub::Max(), agg); // every warp gets its offset and computes its offset within the element int my_element_start = work_sum[element_offset] - work_count[element_offset]; int within_element_forwardnum = wip - my_element_start; return make_int2(element_offset, within_element_forwardnum); } public: __device__ static void prepare(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int thread_work_count) { shared_memory.work_count[threadIdx.x] = thread_work_count; computeOffsets(shared_memory, shared_temp_memory, thread_work_count); } __device__ static bool isWorkAvailable(SharedMemT& shared_memory) { volatile int* vsum = const_cast<volatile int*>(shared_memory.work_sum); return vsum[NUM_THREADS - 1] > 0; } __device__ static int availableWork(SharedMemT& shared_memory) { volatile int* vsum = const_cast<volatile int*>(shared_memory.work_sum); return vsum[NUM_THREADS - 1]; } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset) { int unused; return pullWorkThreads(shared_memory, shared_temp_memory, element_offset, within_element_offset, unused); } __device__ static bool pullWorkThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& numWork) { int2 res = assignWorkAllThreads(shared_memory.work_count, shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage); element_offset = res.x; int within_element_forwardnum = res.y; // run from back to front so we can just decrese the count iif there are not enough warps for a triangle within_element_offset = shared_memory.work_count[element_offset] - within_element_forwardnum - 1; numWork = min(NUM_WARPS, shared_memory.work_sum[NUM_THREADS - 1]); __syncthreads(); // update counts int new_work_sum = shared_memory.work_sum[threadIdx.x] - NUM_THREADS; shared_memory.work_count[threadIdx.x] = min(shared_memory.work_count[threadIdx.x], new_work_sum); shared_memory.work_sum[threadIdx.x] = new_work_sum; __syncthreads(); // note that we might have more threads than active elements, then the within_element_offset is negative.. return within_element_offset >= 0; } __device__ static bool pullWorkWarps(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset) { int unused; return pullWorkWarps(shared_memory, shared_temp_memory, element_offset, within_element_offset, unused); } __device__ static bool pullWorkWarps(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, int& element_offset, int& within_element_offset, int& numWork) { int2 res = assignWorkAllWarps(shared_memory.work_count, shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage); element_offset = res.x; int within_element_forwardnum = res.y; // run from back to front so we can just decrese the count iif there are not enough warps for an element within_element_offset = shared_memory.work_count[element_offset] - within_element_forwardnum - 1; numWork = min(NUM_WARPS,shared_memory.work_sum[NUM_THREADS - 1]); __syncthreads(); // update counts int new_work_sum = shared_memory.work_sum[threadIdx.x] - NUM_WARPS; shared_memory.work_count[threadIdx.x] = min(shared_memory.work_count[threadIdx.x], new_work_sum); shared_memory.work_sum[threadIdx.x] = new_work_sum; __syncthreads(); // note that we might have more threads than active elements, then the within_element_offset is negative.. return within_element_offset >= 0; } template<typename F> __device__ static bool pullWorkSelectiveThreads(SharedMemT& shared_memory, SharedTempMemT& shared_temp_memory, F f, bool deliverreversed = true) { int2 assigned_work = assignWorkAllThreads(shared_memory.work_count, shared_memory.work_sum, shared_temp_memory.work_offsets, shared_temp_memory.tempstorage); if (deliverreversed) assigned_work.y = shared_memory.work_count[assigned_work.x] - assigned_work.y - 1; bool res = f(shared_memory.work_count, shared_memory.work_sum, assigned_work, assigned_work.y >= 0); __syncthreads(); computeOffsets(shared_memory, shared_temp_memory, shared_memory.work_count[threadIdx.x]); __syncthreads(); return res; } }; #endif // INCLUDED_WORK_ASSIGNMENT
the_stack
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "cublas_v2.h" #include "settings.h" #define MAX_SENTENCE_CHUNK 128 struct CudaStorageInner { cublasHandle_t* handle; cudaStream_t stream_data; }; void AssertCudaSuccess(cudaError_t stat, const char* message) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA ERROR:\n %s\n %s\n", message, cudaGetErrorString(stat)); exit(2); } } void AssertCudaSuccessLast(const char* message) { return AssertCudaSuccess(cudaGetLastError(), message); } void InitCudaStorage(CudaStorage* cust, size_t layer_size, size_t vocab_size, size_t maxent_hash_size, Real lnz, bool memory_efficient_maxent) { cust->layer_size = layer_size; cust->vocab_size = vocab_size; cust->lnz = lnz; cust->memory_efficient_maxent = memory_efficient_maxent; cust->maxent_hash_size = maxent_hash_size; cudaError_t stat; stat = cudaMalloc((void**)&(cust->sm_embedding), layer_size * vocab_size * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for sm_embedding"); if (maxent_hash_size) { if (memory_efficient_maxent) { cust->maxent_indices_all = NULL; cust->maxent_cpu = new Real[maxent_hash_size]; stat = cudaMalloc((void**)&(cust->maxent), MAX_NGRAM_ORDER * vocab_size * MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for maxent"); } else { cust->maxent_cpu = NULL; stat = cudaMalloc((void**)&(cust->maxent), maxent_hash_size * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for maxent"); stat = cudaMalloc((void**)&(cust->maxent_indices_all), MAX_SENTENCE_CHUNK * MAX_NGRAM_ORDER * sizeof(uint64_t)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for maxent_indices_all"); } stat = cudaMalloc((void**)&(cust->maxent_indices_count_all), MAX_SENTENCE_CHUNK * sizeof(int)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for maxent_indices_count_all"); } else { cust->maxent = NULL; cust->maxent_indices_all = NULL; cust->maxent_indices_count_all = NULL; } stat = cudaMalloc((void**)&(cust->hidden_layers), layer_size * MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for hidden_layers"); stat = cudaMalloc((void**)&(cust->logprobs), MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for logprobs"); stat = cudaMalloc((void**)&(cust->sen_shifted), MAX_SENTENCE_CHUNK * sizeof(WordIndex)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for sen"); stat = cudaMalloc((void**)&(cust->scores), vocab_size * MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for scores"); stat = cudaMalloc((void**)&(cust->target_scores), MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for target_scores"); cust->target_scores_host = new Real[MAX_SENTENCE_CHUNK]; { Real* tmp = new Real[vocab_size]; for (size_t i = 0; i < vocab_size; ++i) { tmp[i] = 1; } stat = cudaMalloc((void**)&(cust->vocab_ones), vocab_size * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for vocab_ones"); cudaMemcpy(cust->vocab_ones, tmp, vocab_size * sizeof(Real), cudaMemcpyHostToDevice); delete[] tmp; } stat = cudaMalloc((void**)&(cust->sententence_Z), MAX_SENTENCE_CHUNK * sizeof(Real)); AssertCudaSuccess(stat, "Failed to allocate cuda memory for sententence_Z"); cust->sententence_Z_host = new Real[MAX_SENTENCE_CHUNK]; cust->inner = new CudaStorageInner; cust->inner->handle = new cublasHandle_t; cublasStatus_t cu_stat = cublasCreate(cust->inner->handle); if (cu_stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "CUBLAS initialization failed\n"); exit(1); } stat = cudaStreamCreate(&cust->inner->stream_data); AssertCudaSuccess(stat, "Failed to create stream"); } void FreeCudaStorage(CudaStorage* cust) { cudaFree(cust->sm_embedding); if (cust->maxent_hash_size != 0) { delete[] cust->maxent_cpu; cudaFree(cust->maxent); if (!cust->memory_efficient_maxent) { cudaFree(cust->maxent_indices_all); } cudaFree(cust->maxent_indices_count_all); } cudaFree(cust->hidden_layers); cudaFree(cust->logprobs); cudaFree(cust->sen_shifted); cudaFree(cust->scores); cudaFree(cust->target_scores); delete[] cust->target_scores_host; cudaFree(cust->vocab_ones); cudaFree(cust->sententence_Z); delete[] cust->sententence_Z_host; cublasDestroy(*cust->inner->handle); delete cust->inner->handle; cudaStreamDestroy(cust->inner->stream_data); delete cust->inner; } void UploadNetWeights(CudaStorage* cust, const Real* sm_embedding_cpu, const Real* maxent_cpu) { cudaMemcpy(cust->sm_embedding, sm_embedding_cpu, cust->layer_size * cust->vocab_size * sizeof(Real), cudaMemcpyHostToDevice); if (cust->memory_efficient_maxent) { for (size_t i = 0; i < cust->maxent_hash_size; ++i) { cust->maxent_cpu[i] = maxent_cpu[i]; } } else { cudaMemcpy(cust->maxent, maxent_cpu, cust->maxent_hash_size * sizeof(Real), cudaMemcpyHostToDevice); } } __global__ void initialize_matrix(Real *a, int rows, int cols, Real value) { int ix = blockIdx.x * blockDim.x + threadIdx.x, iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < rows && iy < cols) { a[ix * cols + iy] = value; } } __global__ void take_exp(Real *a, int rows, int cols) { int ix = blockIdx.x * blockDim.x + threadIdx.x, iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < rows && iy < cols) { a[ix * cols + iy] = exp(a[ix * cols + iy]); } } __global__ void add_maxent(int sentence_length, int vocab_size, const WordIndex* sen_shifted, const Real* maxent, const uint64_t* maxent_indices_all, const int* maxent_indices_count_all, Real* scores) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if ((ix < sentence_length) && (iy < vocab_size)) { int maxent_indices_count = maxent_indices_count_all[ix]; Real s = scores[ix * vocab_size + iy]; for (int i = 0; i < maxent_indices_count; ++i) { uint64_t maxent_index = maxent_indices_all[ix * MAX_NGRAM_ORDER + i] + iy; s += maxent[maxent_index]; } scores[ix * vocab_size + iy] = exp(s); } } __global__ void add_prepared_maxent(int sentence_length, int vocab_size, const WordIndex* sen_shifted, const Real* maxent_prepared, const int* maxent_indices_count_all, int max_maxent_order, Real* scores) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if((ix < sentence_length) && (iy < vocab_size)) { int maxent_indices_count = maxent_indices_count_all[ix]; Real s = scores[ix * vocab_size + iy]; for (int i = 0; i < maxent_indices_count; ++i) { uint64_t maxent_index = (max_maxent_order * ix + i) * vocab_size + iy; s += maxent_prepared[maxent_index]; } scores[ix * vocab_size + iy] = exp(s); } } __global__ void pick_target_scores(const Real* scores, const WordIndex* sen_shifted, size_t vocab_size, Real* output) { int word_id = threadIdx.x; output[word_id] = scores[word_id * vocab_size + sen_shifted[word_id]]; } void CublasMultiply_A_BT(cublasHandle_t* handle, Real beta, int rows_a, int rows_b, int cols, Real* dev_a, Real* dev_b, Real* dev_c) { if (cols == 0) return; // C <- A * B^T + C * beta const Real alpha = 1; #ifdef USE_DOUBLE cublasDgemm #else cublasSgemm #endif (*handle, CUBLAS_OP_T, CUBLAS_OP_N, rows_b, rows_a, cols, &alpha, dev_b, cols, dev_a, cols, &beta, dev_c, rows_b); } void CalculateSoftMax( CudaStorage* cust, const Real* hidden_layers, const uint64_t* maxent_indices_all, const int* maxent_indices_count_all, size_t sentence_length, const WordIndex* sen, Real* logprobs) { if (sentence_length > MAX_SENTENCE_CHUNK) { // process long sentences by chunks CalculateSoftMax( cust, hidden_layers, maxent_indices_all, maxent_indices_count_all, MAX_SENTENCE_CHUNK, sen, logprobs); CalculateSoftMax( cust, hidden_layers + cust->layer_size * MAX_SENTENCE_CHUNK, maxent_indices_all + MAX_SENTENCE_CHUNK * MAX_NGRAM_ORDER, maxent_indices_count_all + MAX_SENTENCE_CHUNK, sentence_length - MAX_SENTENCE_CHUNK, sen + MAX_SENTENCE_CHUNK, logprobs + MAX_SENTENCE_CHUNK); return; } cudaError_t stat; const size_t layer_size = cust->layer_size; const size_t vocab_size = cust->vocab_size; stat = cudaMemcpy(cust->hidden_layers, hidden_layers, layer_size * sentence_length * sizeof(Real), cudaMemcpyHostToDevice); AssertCudaSuccess(stat, "Failed to copy hidden layers to cuda"); // copy shifted version of sen to cuda, i.e sen_shifted[i] contains target at position i, // sen[i] contains input at position i stat = cudaMemcpy(cust->sen_shifted, sen + 1, sentence_length * sizeof(WordIndex), cudaMemcpyHostToDevice); AssertCudaSuccess(stat, "Failed to copy sentences to cuda"); if (cust->maxent_hash_size) { if (!cust->memory_efficient_maxent) { stat = cudaMemcpyAsync(cust->maxent_indices_all, maxent_indices_all, sentence_length * MAX_NGRAM_ORDER * sizeof(uint64_t), cudaMemcpyHostToDevice, cust->inner->stream_data); AssertCudaSuccess(stat, "Failed to copy maxent_indices_all to cuda"); } stat = cudaMemcpyAsync(cust->maxent_indices_count_all, maxent_indices_count_all, sentence_length * sizeof(int), cudaMemcpyHostToDevice, cust->inner->stream_data); AssertCudaSuccess(stat, "Failed to copy maxent_indices_count_all to cuda"); } { const size_t BLOCK_SIZE = 32; dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE), blocksPerGrid((sentence_length + threadsPerBlock.x - 1) / BLOCK_SIZE, (vocab_size + threadsPerBlock.y - 1) / BLOCK_SIZE); initialize_matrix<<< blocksPerGrid, threadsPerBlock >>>( cust->scores, sentence_length, vocab_size, -cust->lnz); CublasMultiply_A_BT( cust->inner->handle, 1, sentence_length, vocab_size, layer_size, cust->hidden_layers, cust->sm_embedding, cust->scores); if (cust->maxent_hash_size) { if (cust->memory_efficient_maxent) { int max_maxent_order = 0; for (int i = 0; i < sentence_length; ++i) { int maxent_order = maxent_indices_count_all[i]; max_maxent_order = (max_maxent_order < maxent_order) ? maxent_order : max_maxent_order; } for (int pos = 0; pos < sentence_length; ++pos) { for (int i = 0; i < maxent_indices_count_all[pos]; ++i) { uint64_t maxent_index = maxent_indices_all[pos * MAX_NGRAM_ORDER + i]; Real* dst = cust->maxent + (max_maxent_order * pos + i) * vocab_size; Real* src = cust->maxent_cpu + maxent_index; cudaMemcpyAsync(dst, src, vocab_size * sizeof(Real), cudaMemcpyHostToDevice, cust->inner->stream_data); } } cudaStreamSynchronize(cust->inner->stream_data); add_prepared_maxent<<< blocksPerGrid, threadsPerBlock >>>( sentence_length, vocab_size, cust->sen_shifted, cust->maxent, cust->maxent_indices_count_all, max_maxent_order, cust->scores); } else { cudaStreamSynchronize(cust->inner->stream_data); add_maxent<<< blocksPerGrid, threadsPerBlock >>>( sentence_length, vocab_size, cust->sen_shifted, cust->maxent, cust->maxent_indices_all, cust->maxent_indices_count_all, cust->scores); } } else { take_exp<<< blocksPerGrid, threadsPerBlock >>>( cust->scores, sentence_length, vocab_size); } } cudaDeviceSynchronize(); pick_target_scores<<< 1, sentence_length >>>( cust->scores, cust->sen_shifted, vocab_size, cust->target_scores); CublasMultiply_A_BT( cust->inner->handle, 0, sentence_length, 1, vocab_size, cust->scores, cust->vocab_ones, cust->sententence_Z); stat = cudaMemcpy(cust->target_scores_host, cust->target_scores, sentence_length * sizeof(Real), cudaMemcpyDeviceToHost); AssertCudaSuccess(stat, "Failed to copy target scores from cuda"); cudaMemcpy(cust->sententence_Z_host, cust->sententence_Z, sentence_length * sizeof(Real), cudaMemcpyDeviceToHost); AssertCudaSuccess(stat, "Failed to copy probabilities from cuda"); for (size_t i = 0; i < sentence_length; ++i) { Real target_score = cust->target_scores_host[i]; Real sum = cust->sententence_Z_host[i]; logprobs[i] = log10(target_score / sum); } }
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <stdio.h> #include <float.h> #include <math.h> using namespace at; // TODO make it in a common file #define CUDA_KERNEL_LOOP_X(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_XTHREADS = 1024; const int CUDA_NUM_XYXTHREADS = 32; const int CUDA_NUM_XYYTHREADS = 32; const int kMaxGridNum = 65535; const int SDIM = 32; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_XTHREADS - 1) / CUDA_NUM_XTHREADS); } inline int GET_XBLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_XYXTHREADS - 1) / CUDA_NUM_XYXTHREADS); } inline int GET_YBLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_XYYTHREADS - 1) / CUDA_NUM_XYYTHREADS); } } template <typename scalar_t> __global__ void subtraction_gaussian_forward_kernel( const scalar_t* query, const scalar_t* key, scalar_t* output, int xthreads, int batch_size, int num_head, int q_len, int k_len, int input_channels ) { CUDA_KERNEL_LOOP_X(index, xthreads) { // printf("tid: %d \\n", index); const int b = index / num_head / q_len / k_len; const int h = (index / q_len / k_len) % num_head; const int p = (index / k_len) % q_len; const int q = index % k_len; // printf("b: %d, h: %d, p: %d, q: %d \\n", b, h, p, q); if (index < batch_size * num_head * q_len * k_len){ scalar_t sum = 0; for (int c = 0; c < input_channels; c++){ int query_offset = b * (num_head * q_len * input_channels) + h * (q_len * input_channels) + p * input_channels + c; int key_offset = b * (num_head * k_len * input_channels) + h * (k_len * input_channels) + c * k_len + q; scalar_t dis = query[query_offset] - key[key_offset]; // printf("query off: %d, key off: %d, query data: %f, key data: %f, dis: %f \\n", query_offset, key_offset, query[query_offset], query[key_offset], dis); sum += dis * dis; // printf("dis %f \\n", dis * dis); } // printf("sum: %f \\n", sum); output[index] = sum; } } } template <typename scalar_t> __global__ void subtraction_gaussian_backward_query_kernel( const scalar_t* const output_diff, const scalar_t* const query_data, const scalar_t* const key_data, scalar_t* query_diff, int xthreads, int batch_size, int num_head, int q_len, int k_len, int input_channels) { CUDA_KERNEL_LOOP_X(index, xthreads) { // printf("tid: %d \n", index); const int b = index / num_head / q_len / input_channels; const int h = (index / q_len / input_channels) % num_head; const int q = (index / input_channels) % q_len; const int c = index % input_channels; // printf("b: %d, h: %d, q: %d, c: %d \n", b, h, q, c); if (index < batch_size * num_head * q_len * input_channels){ scalar_t sum = 0; int query_offset = b * (num_head * q_len * input_channels) + h * (q_len * input_channels) + q * input_channels + c; scalar_t query = query_data[query_offset]; for (int k = 0; k < k_len; k++){ int output_offset = b * (num_head * k_len * q_len) + h * (k_len * q_len) + q * k_len + k; int key_offset = b * (num_head * k_len * input_channels) + h * (k_len * input_channels) + c * k_len + k; sum += 2 * output_diff[output_offset] * (query - key_data[key_offset]); // scalar_t output_ind= output_diff[output_offset]; // scalar_t key_ind= key_data[key_offset]; // printf("query off: %d, key off: %d, output off: %d, query data: %f, key data: %f, output data: %f\n", query_offset, key_offset, output_offset, query, key_ind, output_ind); } // printf("sum: %f \n", sum); query_diff[index] = sum; } } } template <typename scalar_t> __global__ void subtraction_gaussian_backward_key_kernel( const scalar_t* const output_diff, const scalar_t* const query_data, const scalar_t* const key_data, scalar_t* key_diff, int xthreads, int batch_size, int num_head, int q_len, int k_len, int input_channels) { CUDA_KERNEL_LOOP_X(index, xthreads) { // printf("tid: %d \n", index); const int b = index / num_head / input_channels / k_len; const int h = (index / input_channels / k_len) % num_head; const int c = (index / k_len) % input_channels; const int k = index % k_len; // printf("b: %d, h: %d, c: %d, k: %d \n", b, h, c, k); if (index < batch_size * num_head * k_len * input_channels){ scalar_t sum = 0; int key_offset = b * (num_head * k_len * input_channels) + h * (k_len * input_channels) + c * k_len + k; scalar_t key = key_data[key_offset]; for (int q = 0; q < q_len; q++){ int output_offset = b * (num_head * k_len * q_len) + h * (k_len * q_len) + q * k_len + k; int query_offset = b * (num_head * q_len * input_channels) + h * (q_len * input_channels) + q * input_channels + c; sum += 2 * output_diff[output_offset] * (query_data[query_offset] - key); // scalar_t output_ind= output_diff[output_offset]; // scalar_t query_ind= query_data[query_offset]; // printf("query off: %d, key off: %d, output off: %d, query data: %f, key data: %f, output data: %f\n", query_offset, key_offset, output_offset, query_ind, key, output_ind); } // printf("sum: %f \n", sum); key_diff[index] = -sum; } } } template <typename scalar_t> __global__ void subtraction_reduce_gaussian_forward_kernel( const scalar_t* query, const scalar_t* key, scalar_t* output, int xthreads, int batch_size, int num_head, int q_len, int k_len, int input_channels ) { CUDA_KERNEL_LOOP_X(index, xthreads) { // printf("tid: %d \\n", index); __shared__ scalar_t cdata[32][32]; const int b = index / num_head / q_len / k_len; const int h = (index / q_len / k_len) % num_head; const int p = (index / k_len) % q_len; const int q = index % k_len; // printf("b: %d, h: %d, p: %d, q: %d \\n", b, h, p, q); scalar_t sum = 0; if (index < batch_size * num_head * q_len * k_len){ for (int c = 0; c < input_channels; c++){ int query_offset = b * (num_head * q_len * input_channels) + h * (q_len * input_channels) + p * input_channels + c; int key_offset = b * (num_head * k_len * input_channels) + h * (k_len * input_channels) + c * k_len + q; scalar_t dis = query[query_offset] - key[key_offset]; // printf("query off: %d, key off: %d, query data: %f, key data: %f, dis: %f \\n", query_offset, key_offset, query[query_offset], query[key_offset], dis); cdata[index][c] = dis * dis; sum += dis * dis; // printf("dis %f \\n", dis * dis); } __syncthreads(); // printf("cdata0: %f ", cdata[0]); int ytid = threadIdx.y; // printf("ytid %d \n", ytid); if (ytid < 16){ // scalar_t *vcdata = cdata; // printf("32cdata0: %f, 32cdata16: %f\n", cdata[0], cdata[16]); __syncthreads(); cdata[index][ytid] += cdata[index][ytid + 16]; __syncthreads(); // printf("16cdata0: %f\n", cdata[0]); __syncthreads(); cdata[index][ytid] += cdata[index][ytid + 8]; __syncthreads(); // printf("8cdata0: %f\n", cdata[0]); cdata[index][ytid] += cdata[index][ytid + 4]; __syncthreads(); // printf("4cdata0: %f\n", cdata[0]); cdata[index][ytid] += cdata[index][ytid + 2]; __syncthreads(); // printf("2cdata0: %f\n", cdata[0]); cdata[index][ytid] += cdata[index][ytid + 1]; __syncthreads(); // printf("1cdata0: %f\n", cdata[0]); } if (ytid == 0) {output[index]= cdata[index][0];} } } } namespace SOFT { void subtraction_gaussian_forward_cuda( const at::Tensor query, const at::Tensor key, at::Tensor output, int batch_size, int num_head, int q_len, int k_len, int input_channels) { const int nx = batch_size * num_head * q_len * k_len; at::cuda::CUDAGuard device_guard(query.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( query.scalar_type(), "subtraction_gaussian_forward_gpu", ([&] { const scalar_t* query_ = query.data_ptr<scalar_t>(); const scalar_t* key_ = key.data_ptr<scalar_t>(); scalar_t* output_ = output.data_ptr<scalar_t>(); subtraction_gaussian_forward_kernel<<<GET_BLOCKS(nx), CUDA_NUM_XTHREADS, 0, stream>>>( query_, key_, output_, nx, batch_size, num_head, q_len, k_len, input_channels); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in subtraction_gaussian_forward_cuda: %s\n", cudaGetErrorString(err)); } } void subtraction_gaussian_backward_query_cuda( const at::Tensor output_diff, const at::Tensor query_data, const at::Tensor key_data, at::Tensor query_diff, int batch_size, int num_head, int q_len, int k_len, int input_channels) { const int nx = batch_size * num_head * q_len * input_channels; // printf("query nx: %d \n", nx); at::cuda::CUDAGuard device_guard(query_data.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( query_data.scalar_type(), "subtraction_gaussian_backward_query_gpu", ([&] { const scalar_t* output_diff_ = output_diff.data_ptr<scalar_t>(); const scalar_t* query_data_ = query_data.data_ptr<scalar_t>(); const scalar_t* key_data_ = key_data.data_ptr<scalar_t>(); scalar_t* query_diff_ = query_diff.data_ptr<scalar_t>(); subtraction_gaussian_backward_query_kernel<<<GET_BLOCKS(nx), CUDA_NUM_XTHREADS, 0, stream>>>( output_diff_, query_data_, key_data_, query_diff_, nx, batch_size, num_head, q_len, k_len, input_channels); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in subtraction_gaussian_backward_query_cuda: %s\n", cudaGetErrorString(err)); } } void subtraction_gaussian_backward_key_cuda( const at::Tensor output_diff, const at::Tensor query_data, const at::Tensor key_data, at::Tensor key_diff, int batch_size, int num_head, int q_len, int k_len, int input_channels) { const int nx = batch_size * num_head * k_len * input_channels; // printf("key nx: %d \n", nx); // printf("key len: %d \n", k_len); // printf("key channel: %d \n", input_channels); at::cuda::CUDAGuard device_guard(query_data.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( query_data.scalar_type(), "subtraction_gaussian_backward_key_gpu", ([&] { const scalar_t* output_diff_ = output_diff.data_ptr<scalar_t>(); const scalar_t* query_data_ = query_data.data_ptr<scalar_t>(); const scalar_t* key_data_ = key_data.data_ptr<scalar_t>(); scalar_t* key_diff_ = key_diff.data_ptr<scalar_t>(); subtraction_gaussian_backward_key_kernel<<<GET_BLOCKS(nx), CUDA_NUM_XTHREADS, 0, stream>>>( output_diff_, query_data_, key_data_, key_diff_, nx, batch_size, num_head, q_len, k_len, input_channels); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in subtraction_gaussian_backward_key_cuda: %s\n", cudaGetErrorString(err)); } } void subtraction_reduce_gaussian_forward_cuda( const at::Tensor query, const at::Tensor key, at::Tensor output, int batch_size, int num_head, int q_len, int k_len, int input_channels) { const int nx = batch_size * num_head * q_len * k_len; const int ny = input_channels; at::cuda::CUDAGuard device_guard(query.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( query.scalar_type(), "subtraction_reduce_gaussian_forward_gpu", ([&] { const scalar_t* query_ = query.data_ptr<scalar_t>(); const scalar_t* key_ = key.data_ptr<scalar_t>(); scalar_t* output_ = output.data_ptr<scalar_t>(); dim3 block(CUDA_NUM_XYXTHREADS, CUDA_NUM_XYYTHREADS); dim3 grid(GET_XBLOCKS(nx), GET_YBLOCKS(ny)); subtraction_reduce_gaussian_forward_kernel<<<grid, block, 0, stream>>>( query_, key_, output_, nx, batch_size, num_head, q_len, k_len, input_channels); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in subtraction_gaussian_forward_cuda: %s\n", cudaGetErrorString(err)); } } }
the_stack
#include <cublas.h> #include <helper_cuda.h> #include <helper_timer.h> // uncomment if you do not use the viewer. //#define NOVIEWER #include "3dregistration.h" using namespace std; __global__ static void updateA(int rowsA, int colsA, int pitchA, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, const float* d_R, const float* d_t, float* d_A, float sigma_p2){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float XxShare[BLOCK_SIZE]; __shared__ float XyShare[BLOCK_SIZE]; __shared__ float XzShare[BLOCK_SIZE]; __shared__ float YxShare[BLOCK_SIZE]; __shared__ float YyShare[BLOCK_SIZE]; __shared__ float YzShare[BLOCK_SIZE]; __shared__ float RShare[9]; __shared__ float tShare[3]; if(threadIdx.x == 0 && threadIdx.y == 0) { for (int i = 0; i < 9; i++) RShare[i] = d_R[i]; for (int i = 0; i < 3; i++) tShare[i] = d_t[i]; } if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.x == 0){ XxShare[threadIdx.y] = d_Xx[c]; XyShare[threadIdx.y] = d_Xy[c]; XzShare[threadIdx.y] = d_Xz[c]; } if(threadIdx.y == 0){ YxShare[threadIdx.x] = d_Yx[r]; YyShare[threadIdx.x] = d_Yy[r]; YzShare[threadIdx.x] = d_Yz[r]; } __syncthreads(); #define Xx XxShare[threadIdx.y] #define Xy XyShare[threadIdx.y] #define Xz XzShare[threadIdx.y] #define Yx YxShare[threadIdx.x] #define Yy YyShare[threadIdx.x] #define Yz YzShare[threadIdx.x] #define R(i) RShare[i] #define t(i) tShare[i] // #define Euclid(a,b,c) ((a)*(a)+(b)*(b)+(c)*(c)) // float tmp = // Euclid(Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)), // Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)), // Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)) ); // tmp = expf(-tmp/sigma_p^2) float tmpX = Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)); float tmpY = Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)); float tmpZ = Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)); __syncthreads(); tmpX *= tmpX; tmpY *= tmpY; tmpZ *= tmpZ; tmpX += tmpY; tmpX += tmpZ; tmpX /= sigma_p2; tmpX = expf(-tmpX); //float *A = (float*)((char*)d_A + c * pitchMinBytes) + r; d_A[c * pitchA + r] = tmpX; } } __global__ static void normalizeRowsOfA(int rowsA, int colsA, int pitchA, float *d_A, const float *d_C ){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float d_CShare[BLOCK_SIZE]; if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.y == 0) d_CShare[threadIdx.x] = d_C[r]; __syncthreads(); if(d_CShare[threadIdx.x] > 10e-7f) // each element in A is normalized C, then squre-rooted d_A[c * pitchA + r] = sqrtf( d_A[c * pitchA + r] / d_CShare[threadIdx.x] ); else d_A[c * pitchA + r] = 1.0f/colsA; // ad_hoc code to avoid 0 division __syncthreads(); } } __global__ static void elementwiseDivision(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ // check for only inside X float l_lambda = d_lambda[x]; d_Xx[x] /= l_lambda; d_Xy[x] /= l_lambda; d_Xz[x] /= l_lambda; } } __global__ static void elementwiseMultiplication(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ // check for only inside X float l_lambda = d_lambda[x]; d_Xx[x] *= l_lambda; d_Xy[x] *= l_lambda; d_Xz[x] *= l_lambda; } } __global__ static void centeringXandY(int rowsA, const float* d_Xc, const float* d_Yc, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, float* d_XxCenterd, float* d_XyCenterd, float* d_XzCenterd, float* d_YxCenterd, float* d_YyCenterd, float* d_YzCenterd ){ // do for both X and Y at the same time int r = blockIdx.x * blockDim.x + threadIdx.x; // Shared memory __shared__ float Xc[3]; __shared__ float Yc[3]; if(threadIdx.x < 6) // assume blocksize >= 6 if(threadIdx.x < 3) Xc[threadIdx.x] = d_Xc[threadIdx.x]; else Yc[threadIdx.x - 3] = d_Yc[threadIdx.x - 3]; if(r < rowsA){ // check for only inside the vectors __syncthreads(); d_XxCenterd[r] = d_Xx[r] - Xc[0]; d_XyCenterd[r] = d_Xy[r] - Xc[1]; d_XzCenterd[r] = d_Xz[r] - Xc[2]; d_YxCenterd[r] = d_Yx[r] - Yc[0]; d_YyCenterd[r] = d_Yy[r] - Yc[1]; d_YzCenterd[r] = d_Yz[r] - Yc[2]; __syncthreads(); } } void emicp(const pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_target, const pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_source, float* h_R, float* h_t, const registrationParameters &param) { int Xsize, Ysize; float *h_X, *h_Y; cloud2data(cloud_target, &h_X, Xsize); cloud2data(cloud_source, &h_Y, Ysize); // // initialize paramters // float sigma_p2 = param.sigma_p2; float sigma_inf = param.sigma_inf; float sigma_factor = param.sigma_factor; float d_02 = param.d_02; // // initializing CUDA // findCudaDevice(param.argc, (const char**)param.argv); // // memory allocation // // example: memCUDA(Xx, Xsize); // declare d_Xx. no copy. #define memCUDA(var,num) \ float* d_ ## var; CUDA_SAFE_CALL(cudaMalloc((void**) &(d_ ## var), sizeof(float)*num)); // example: memHostToCUDA(Xx, Xsize); // declera d_Xx, then copy h_Xx to d_Xx. #define memHostToCUDA(var,num) \ float* d_ ## var; CUDA_SAFE_CALL(cudaMalloc((void**) &(d_ ## var), sizeof(float)*num)); \ CUDA_SAFE_CALL(cudaMemcpy(d_ ## var, h_ ## var, sizeof(float)*num, cudaMemcpyHostToDevice)); memHostToCUDA(X, Xsize*3); float* d_Xx = &d_X[Xsize*0]; float* d_Xy = &d_X[Xsize*1]; float* d_Xz = &d_X[Xsize*2]; memHostToCUDA(Y, Ysize*3); float* d_Yx = &d_Y[Ysize*0]; float* d_Yy = &d_Y[Ysize*1]; float* d_Yz = &d_Y[Ysize*2]; memCUDA(Xprime, Ysize*3); float *d_XprimeX = &d_Xprime[Ysize*0]; float *d_XprimeY = &d_Xprime[Ysize*1]; float *d_XprimeZ = &d_Xprime[Ysize*2]; float *d_XprimeCenterd = d_Xprime; float *d_XprimeCenterdX = &d_XprimeCenterd[Ysize*0]; float *d_XprimeCenterdY = &d_XprimeCenterd[Ysize*1]; float *d_XprimeCenterdZ = &d_XprimeCenterd[Ysize*2]; memCUDA(YCenterd, Ysize*3); float *d_YCenterdX = &d_YCenterd[Ysize*0]; float *d_YCenterdY = &d_YCenterd[Ysize*1]; float *d_YCenterdZ = &d_YCenterd[Ysize*2]; // center of X, Y float h_Xc[3], h_Yc[3]; memCUDA(Xc, 3); memCUDA(Yc, 3); // R, t memHostToCUDA(R, 3*3); memHostToCUDA(t, 3); CUDA_SAFE_CALL(cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice)); // S for finding R, t float h_S[9]; memCUDA(S, 9); // NOTE on matrix A // number of rows: Ysize, or rowsA // number of columns : Xsize, or colsA // // [0th in X] [1st] ... [(Xsize-1)] // [0th point in Y] [ A(0,0) A(0,1) ... A(0,Xsize-1) ] // [1st ] [ A(1,0) A(1,1) ... ] // ... [ ... ] // [(Ysize-1) ] [ A(Ysize-1, 0) ... A(Ysize-1,Xsize-1)] // // // CAUTION on matrix A // A is allcoated as a column-maijor format for the use of cublas. // This means that you must acces an element at row r and column c as: // A(r,c) = A[c * pitchA + r] int rowsA = Ysize; int colsA = Xsize; // pitchA: leading dimension of A, which is ideally equal to rowsA, // but actually larger than that. int pitchA = (rowsA / 4 + 1) * 4; memCUDA(A, pitchA*colsA); // a vector with all elements of 1.0f float* h_one = new float [max(Xsize,Ysize)]; for(int t = 0; t < max(Xsize,Ysize); t++) h_one[t] = 1.0f; memHostToCUDA(one, max(Xsize,Ysize)); memCUDA(sumOfMRow, rowsA); memCUDA(C, rowsA); // sum of a row in A memCUDA(lambda, rowsA); // weight of a row in A // // threads // // for 2D block dim3 dimBlockForA(BLOCK_SIZE, BLOCK_SIZE); // a block is (BLOCK_SIZE*BLOCK_SIZE) threads dim3 dimGridForA( (pitchA + dimBlockForA.x - 1) / dimBlockForA.x, (colsA + dimBlockForA.y - 1) / dimBlockForA.y); // for 1D block int threadsPerBlockForYsize = 512; // a block is 512 threads int blocksPerGridForYsize = (Ysize + threadsPerBlockForYsize - 1 ) / threadsPerBlockForYsize; // // timer // #define START_TIMER(timer) \ if(!param.notimer){ \ CUDA_SAFE_CALL( cudaThreadSynchronize() );\ CUT_SAFE_CALL(sdkStartTimer(&timer)); \ } #define STOP_TIMER(timer) \ if(!param.notimer){ \ CUDA_SAFE_CALL( cudaThreadSynchronize() );\ CUT_SAFE_CALL(sdkStopTimer(&timer)); \ } // timers StopWatchInterface *timerTotal, *timerUpdateA, *timerAfterSVD, *timerRT; if(!param.notimer){ CUT_SAFE_CALL(sdkCreateTimer(&timerUpdateA)); CUT_SAFE_CALL(sdkCreateTimer(&timerAfterSVD)); CUT_SAFE_CALL(sdkCreateTimer(&timerRT)); } CUT_SAFE_CALL(sdkCreateTimer(&timerTotal)); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(sdkStartTimer(&timerTotal)); // // initializing cublas // cublasInit(); // EM-ICP main loop int Titer = 1; while(sigma_p2 > sigma_inf){ fprintf(stderr, "%d iter. sigma_p2 %f ", Titer++, sigma_p2); fprintf(stderr, "time %.10f [s]\n", sdkGetTimerValue(&timerTotal) / 1000.0f); // // UpdateA // START_TIMER(timerUpdateA); updateA <<< dimGridForA, dimBlockForA >>> (rowsA, colsA, pitchA, d_Xx, d_Xy, d_Xz, d_Yx, d_Yy, d_Yz, d_R, d_t, d_A, sigma_p2); STOP_TIMER(timerUpdateA); // // Normalization of A // // cublasSgemv (char trans, int m, int n, float alpha, const float *A, int lda, // const float *x, int incx, float beta, float *y, int incy) // y = alpha * op(A) * x + beta * y, // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_C //(rowsA*colsA) * (colsA*1) = (rowsA*1) cublasSgemv('n', // char trans rowsA, colsA, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_A, pitchA, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_C, 1); // float *y, int incy // void cublasSaxpy (int n, float alpha, const float *x, int incx, float *y, int incy) // alpha * x + y => y // exp(-d_0^2/sigma_p2) * d_one + d_C => d_C cublasSaxpy(rowsA, expf(-d_02/sigma_p2), d_one, 1, d_C, 1); normalizeRowsOfA <<< dimGridForA, dimBlockForA >>> (rowsA, colsA, pitchA, d_A, d_C); // // update R,T // ///////////////////////////////////////////////////////////////////////////////////// // compute lambda // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_lambda //(rowsA*colsA) * (colsA*1) = (rowsA*1) cublasSgemv('n', // char trans rowsA, colsA, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_A, pitchA, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_lambda, 1); // float *y, int incy // float cublasSasum (int n, const float *x, int incx) float sumLambda = cublasSasum (rowsA, d_lambda, 1); ///////////////////////////////////////////////////////////////////////////////////// // compute X' // cublasSgemm (char transa, char transb, int m, int n, int k, float alpha, // const float *A, int lda, const float *B, int ldb, float beta, // float *C, int ldc) // C = alpha * op(A) * op(B) + beta * C, // // m number of rows of matrix op(A) and rows of matrix C // n number of columns of matrix op(B) and number of columns of C // k number of columns of matrix op(A) and number of rows of op(B) // A * X => X' // d_A * d_X => d_Xprime //(rowsA*colsA) * (colsA*3) = (rowsA*3) // m * k k * n m * n cublasSgemm('n', 'n', rowsA, 3, colsA, 1.0f, d_A, pitchA, d_X, colsA, 0.0f, d_Xprime, rowsA); // X' ./ lambda => X' elementwiseDivision <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_XprimeX, d_XprimeY, d_XprimeZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // // centering X' and Y // ///////////////////////////////////////////////////////////////////////////////////// // find weighted center of X' and Y // d_Xprime^T * d_lambda => h_Xc // (3 * rowsA) (rowsA * 1) = (3 * 1) cublasSgemv('t', // char trans rowsA, 3, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_Xprime, rowsA, // const float *A, int lda d_lambda, 1, // const float *x, int incx 0.0f, // float beta d_Xc, 1); // float *y, int incy // d_Y^T * d_lambda => h_Yc // (3 * rowsA) (rowsA * 1) = (3 * 1) cublasSgemv('t', // char trans rowsA, 3, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_Y, rowsA, // const float *A, int lda d_lambda, 1, // const float *x, int incx 0.0f, // float beta d_Yc, 1); // float *y, int incy // void cublasSscal (int n, float alpha, float *x, int incx) // it replaces x[ix + i * incx] with alpha * x[ix + i * incx] cublasSscal (3, 1/sumLambda, d_Xc, 1); cublasSscal (3, 1/sumLambda, d_Yc, 1); CUDA_SAFE_CALL(cudaMemcpy(h_Xc, d_Xc, sizeof(float)*3, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(h_Yc, d_Yc, sizeof(float)*3, cudaMemcpyDeviceToHost)); ///////////////////////////////////////////////////////////////////////////////////// // centering X and Y // d_Xprime .- d_Xc => d_XprimeCenterd // d_Y .- d_Yc => d_YCenterd centeringXandY <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_Xc, d_Yc, d_XprimeX, d_XprimeY, d_XprimeZ, d_Yx, d_Yy, d_Yz, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_YCenterdX, d_YCenterdY, d_YCenterdZ); // XprimeCented .* d_lambda => XprimeCented elementwiseMultiplication <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // compute S // d_XprimeCented^T * d_YCenterd => d_S // (3*rowsA) * (rowsA*3) = (3*3) // m * k k * n m * n cublasSgemm('t', 'n', 3, 3, rowsA, 1.0f, d_XprimeCenterd, rowsA, d_YCenterd, rowsA, 0.0f, d_S, 3); CUDA_SAFE_CALL(cudaMemcpy(h_S, d_S, sizeof(float)*9, cudaMemcpyDeviceToHost)); ///////////////////////////////////////////////////////////////////////////////////// // find RT from S START_TIMER(timerAfterSVD); findRTfromS(h_Xc, h_Yc, h_S, h_R, h_t); STOP_TIMER(timerAfterSVD); ///////////////////////////////////////////////////////////////////////////////////// // copy R,t to device START_TIMER(timerRT); CUDA_SAFE_CALL(cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice)); STOP_TIMER(timerRT); ///////////////////////////////////////////////////////////////////////////////////// #ifndef NOVIEWER if(!param.noviewer){ Eigen::Matrix4f transformation; transformation << h_R[0], h_R[1], h_R[2], h_t[0], h_R[3], h_R[4], h_R[5], h_t[1], h_R[6], h_R[7], h_R[8], h_t[2], 0, 0, 0, 1; pcl::transformPointCloud ( *param.cloud_source, *param.cloud_source_trans, transformation ); param.viewer->updatePointCloud ( param.cloud_source_trans, *param.source_trans_color, "source trans" ); param.viewer->spinOnce(); } #endif sigma_p2 *= sigma_factor; } CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(sdkStopTimer(&timerTotal)); fprintf(stderr, "comping time: %.10f [s]\n", sdkGetTimerValue(&timerTotal) / 1000.0f); if(!param.notimer){ fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerUpdateA) / 1000.0f, "updateA"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerAfterSVD) / 1000.0f, "afterSVD"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerRT) / 1000.0f, "RT"); CUT_SAFE_CALL(sdkDeleteTimer(&timerTotal)); CUT_SAFE_CALL(sdkDeleteTimer(&timerUpdateA)); CUT_SAFE_CALL(sdkDeleteTimer(&timerAfterSVD)); CUT_SAFE_CALL(sdkDeleteTimer(&timerRT)); } cublasShutdown(); CUDA_SAFE_CALL(cudaFree(d_X)); CUDA_SAFE_CALL(cudaFree(d_Y)); CUDA_SAFE_CALL(cudaFree(d_Xprime)); CUDA_SAFE_CALL(cudaFree(d_YCenterd)); CUDA_SAFE_CALL(cudaFree(d_Xc)); CUDA_SAFE_CALL(cudaFree(d_Yc)); CUDA_SAFE_CALL(cudaFree(d_R)); CUDA_SAFE_CALL(cudaFree(d_t)); CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_S)); CUDA_SAFE_CALL(cudaFree(d_one)); CUDA_SAFE_CALL(cudaFree(d_sumOfMRow)); CUDA_SAFE_CALL(cudaFree(d_C)); CUDA_SAFE_CALL(cudaFree(d_lambda)); CUDA_SAFE_CALL( cudaThreadExit() ); delete [] h_one; }
the_stack
#include <thrust/sequence.h> #include "impl.cuh" namespace { using namespace manifold; constexpr uint32_t kNoCode = 0xFFFFFFFFu; struct Extrema : public thrust::binary_function<Halfedge, Halfedge, Halfedge> { __host__ __device__ void MakeForward(Halfedge& a) { if (!a.IsForward()) { int tmp = a.startVert; a.startVert = a.endVert; a.endVert = tmp; } } __host__ __device__ int MaxOrMinus(int a, int b) { return glm::min(a, b) < 0 ? -1 : glm::max(a, b); } __host__ __device__ Halfedge operator()(Halfedge a, Halfedge b) { MakeForward(a); MakeForward(b); a.startVert = glm::min(a.startVert, b.startVert); a.endVert = glm::max(a.endVert, b.endVert); a.face = MaxOrMinus(a.face, b.face); a.pairedHalfedge = MaxOrMinus(a.pairedHalfedge, b.pairedHalfedge); return a; } }; __host__ __device__ uint32_t SpreadBits3(uint32_t v) { v = 0xFF0000FFu & (v * 0x00010001u); v = 0x0F00F00Fu & (v * 0x00000101u); v = 0xC30C30C3u & (v * 0x00000011u); v = 0x49249249u & (v * 0x00000005u); return v; } __host__ __device__ uint32_t MortonCode(glm::vec3 position, Box bBox) { // Unreferenced vertices are marked NaN, and this will sort them to the end // (the Morton code only uses the first 30 of 32 bits). if (isnan(position.x)) return kNoCode; glm::vec3 xyz = (position - bBox.min) / (bBox.max - bBox.min); xyz = glm::min(glm::vec3(1023.0f), glm::max(glm::vec3(0.0f), 1024.0f * xyz)); uint32_t x = SpreadBits3(static_cast<uint32_t>(xyz.x)); uint32_t y = SpreadBits3(static_cast<uint32_t>(xyz.y)); uint32_t z = SpreadBits3(static_cast<uint32_t>(xyz.z)); return x * 4 + y * 2 + z; } struct Morton { const Box bBox; __host__ __device__ void operator()( thrust::tuple<uint32_t&, const glm::vec3&> inout) { glm::vec3 position = thrust::get<1>(inout); thrust::get<0>(inout) = MortonCode(position, bBox); } }; struct FaceMortonBox { const Halfedge* halfedge; const glm::vec3* vertPos; const Box bBox; __host__ __device__ void operator()( thrust::tuple<uint32_t&, Box&, int> inout) { uint32_t& mortonCode = thrust::get<0>(inout); Box& faceBox = thrust::get<1>(inout); int face = thrust::get<2>(inout); // Removed tris are marked by all halfedges having pairedHalfedge = -1, and // this will sort them to the end (the Morton code only uses the first 30 of // 32 bits). if (halfedge[3 * face].pairedHalfedge < 0) { mortonCode = kNoCode; return; } glm::vec3 center(0.0f); for (const int i : {0, 1, 2}) { const glm::vec3 pos = vertPos[halfedge[3 * face + i].startVert]; center += pos; faceBox.Union(pos); } center /= 3; mortonCode = MortonCode(center, bBox); } }; struct Reindex { const int* indexInv; __host__ __device__ void operator()(Halfedge& edge) { if (edge.startVert < 0) return; edge.startVert = indexInv[edge.startVert]; edge.endVert = indexInv[edge.endVert]; } }; template <typename T> void Permute(VecDH<T>& inOut, const VecDH<int>& new2Old) { VecDH<T> tmp(inOut); inOut.resize(new2Old.size()); thrust::gather(new2Old.beginD(), new2Old.endD(), tmp.beginD(), inOut.beginD()); } template void Permute<BaryRef>(VecDH<BaryRef>&, const VecDH<int>&); template void Permute<glm::vec3>(VecDH<glm::vec3>&, const VecDH<int>&); struct ReindexFace { Halfedge* halfedge; glm::vec4* halfedgeTangent; const Halfedge* oldHalfedge; const glm::vec4* oldHalfedgeTangent; const int* faceNew2Old; const int* faceOld2New; __host__ __device__ void operator()(int newFace) { const int oldFace = faceNew2Old[newFace]; for (const int i : {0, 1, 2}) { const int oldEdge = 3 * oldFace + i; Halfedge edge = oldHalfedge[oldEdge]; edge.face = newFace; const int pairedFace = edge.pairedHalfedge / 3; const int offset = edge.pairedHalfedge - 3 * pairedFace; edge.pairedHalfedge = 3 * faceOld2New[pairedFace] + offset; const int newEdge = 3 * newFace + i; halfedge[newEdge] = edge; if (oldHalfedgeTangent != nullptr) { halfedgeTangent[newEdge] = oldHalfedgeTangent[oldEdge]; } } } }; } // namespace namespace manifold { /** * Once halfedge_ has been filled in, this function can be called to create the * rest of the internal data structures. This function also removes the verts * and halfedges flagged for removal (NaN verts and -1 halfedges). */ void Manifold::Impl::Finish() { if (halfedge_.size() == 0) return; CalculateBBox(); SetPrecision(precision_); if (!bBox_.isFinite()) { vertPos_.resize(0); halfedge_.resize(0); faceNormal_.resize(0); return; } SortVerts(); VecDH<Box> faceBox; VecDH<uint32_t> faceMorton; GetFaceBoxMorton(faceBox, faceMorton); SortFaces(faceBox, faceMorton); if (halfedge_.size() == 0) return; ALWAYS_ASSERT(halfedge_.size() % 6 == 0, topologyErr, "Not an even number of faces after sorting faces!"); Halfedge extrema = {0, 0, 0, 0}; extrema = thrust::reduce(halfedge_.beginD(), halfedge_.endD(), extrema, Extrema()); ALWAYS_ASSERT(extrema.startVert >= 0, topologyErr, "Vertex index is negative!"); ALWAYS_ASSERT(extrema.endVert < NumVert(), topologyErr, "Vertex index exceeds number of verts!"); ALWAYS_ASSERT(extrema.face >= 0, topologyErr, "Face index is negative!"); ALWAYS_ASSERT(extrema.face < NumTri(), topologyErr, "Face index exceeds number of faces!"); ALWAYS_ASSERT(extrema.pairedHalfedge >= 0, topologyErr, "Halfedge index is negative!"); ALWAYS_ASSERT(extrema.pairedHalfedge < 2 * NumEdge(), topologyErr, "Halfedge index exceeds number of halfedges!"); CalculateNormals(); collider_ = Collider(faceBox, faceMorton); } /** * Sorts the vertices according to their Morton code. */ void Manifold::Impl::SortVerts() { VecDH<uint32_t> vertMorton(NumVert()); thrust::for_each_n(zip(vertMorton.beginD(), vertPos_.cbeginD()), NumVert(), Morton({bBox_})); VecDH<int> vertNew2Old(NumVert()); thrust::sequence(vertNew2Old.beginD(), vertNew2Old.endD()); thrust::sort_by_key(vertMorton.beginD(), vertMorton.endD(), zip(vertPos_.beginD(), vertNew2Old.beginD())); ReindexVerts(vertNew2Old, NumVert()); // Verts were flagged for removal with NaNs and assigned kNoCode to sort them // to the end, which allows them to be removed. const int newNumVert = thrust::find(vertMorton.beginD(), vertMorton.endD(), kNoCode) - vertMorton.beginD(); vertPos_.resize(newNumVert); } /** * Updates the halfedges to point to new vert indices based on a mapping, * vertNew2Old. This may be a subset, so the total number of original verts is * also given. */ void Manifold::Impl::ReindexVerts(const VecDH<int>& vertNew2Old, int oldNumVert) { VecDH<int> vertOld2New(oldNumVert); thrust::scatter(countAt(0), countAt(NumVert()), vertNew2Old.beginD(), vertOld2New.beginD()); thrust::for_each(halfedge_.beginD(), halfedge_.endD(), Reindex({vertOld2New.cptrD()})); } /** * Fills the faceBox and faceMorton input with the bounding boxes and Morton * codes of the faces, respectively. The Morton code is based on the center of * the bounding box. */ void Manifold::Impl::GetFaceBoxMorton(VecDH<Box>& faceBox, VecDH<uint32_t>& faceMorton) const { faceBox.resize(NumTri()); faceMorton.resize(NumTri()); thrust::for_each_n( zip(faceMorton.beginD(), faceBox.beginD(), countAt(0)), NumTri(), FaceMortonBox({halfedge_.cptrD(), vertPos_.cptrD(), bBox_})); } /** * Sorts the faces of this manifold according to their input Morton code. The * bounding box and Morton code arrays are also sorted accordingly. */ void Manifold::Impl::SortFaces(VecDH<Box>& faceBox, VecDH<uint32_t>& faceMorton) { VecDH<int> faceNew2Old(NumTri()); thrust::sequence(faceNew2Old.beginD(), faceNew2Old.endD()); thrust::sort_by_key(faceMorton.beginD(), faceMorton.endD(), zip(faceBox.beginD(), faceNew2Old.beginD())); // Tris were flagged for removal with pairedHalfedge = -1 and assigned kNoCode // to sort them to the end, which allows them to be removed. const int newNumTri = thrust::find(faceMorton.beginD(), faceMorton.endD(), kNoCode) - faceMorton.beginD(); faceBox.resize(newNumTri); faceMorton.resize(newNumTri); faceNew2Old.resize(newNumTri); GatherFaces(faceNew2Old); } /** * Creates the halfedge_ vector for this manifold by copying a set of faces from * another manifold, given by oldHalfedge. Input faceNew2Old defines the old * faces to gather into this. */ void Manifold::Impl::GatherFaces(const VecDH<int>& faceNew2Old) { const int numTri = faceNew2Old.size(); if (meshRelation_.triBary.size() == NumTri()) Permute(meshRelation_.triBary, faceNew2Old); if (faceNormal_.size() == NumTri()) Permute(faceNormal_, faceNew2Old); VecDH<Halfedge> oldHalfedge(halfedge_); VecDH<glm::vec4> oldHalfedgeTangent(halfedgeTangent_); VecDH<int> faceOld2New(oldHalfedge.size() / 3); thrust::scatter(countAt(0), countAt(numTri), faceNew2Old.beginD(), faceOld2New.beginD()); halfedge_.resize(3 * numTri); if (oldHalfedgeTangent.size() != 0) halfedgeTangent_.resize(3 * numTri); thrust::for_each_n( countAt(0), numTri, ReindexFace({halfedge_.ptrD(), halfedgeTangent_.ptrD(), oldHalfedge.cptrD(), oldHalfedgeTangent.cptrD(), faceNew2Old.cptrD(), faceOld2New.cptrD()})); } void Manifold::Impl::GatherFaces(const Impl& old, const VecDH<int>& faceNew2Old) { const int numTri = faceNew2Old.size(); meshRelation_.triBary.resize(numTri); thrust::gather(faceNew2Old.beginD(), faceNew2Old.endD(), old.meshRelation_.triBary.beginD(), meshRelation_.triBary.beginD()); meshRelation_.barycentric = old.meshRelation_.barycentric; DuplicateMeshIDs(); if (old.faceNormal_.size() == old.NumTri()) { faceNormal_.resize(numTri); thrust::gather(faceNew2Old.beginD(), faceNew2Old.endD(), old.faceNormal_.beginD(), faceNormal_.beginD()); } VecDH<int> faceOld2New(old.NumTri()); thrust::scatter(countAt(0), countAt(numTri), faceNew2Old.beginD(), faceOld2New.beginD()); halfedge_.resize(3 * numTri); if (old.halfedgeTangent_.size() != 0) halfedgeTangent_.resize(3 * numTri); thrust::for_each_n( countAt(0), numTri, ReindexFace({halfedge_.ptrD(), halfedgeTangent_.ptrD(), old.halfedge_.cptrD(), old.halfedgeTangent_.cptrD(), faceNew2Old.cptrD(), faceOld2New.cptrD()})); } } // namespace manifold
the_stack
#pragma once #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace vn { /** * @brief Speciflying parameters for VN Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_problem(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(gunrock::app::UseParameters_problem(parameters)); GUARD_CU(parameters.Use<bool>( "mark-pred", util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to mark predecessor info.", __FILE__, __LINE__)); return retval; } /** * @brief Single-Source Shortest Path Problem structure. * @tparam _GraphT Type of the graph * @tparam _LabelT Type of labels used in VN * @tparam _ValueT Type of per-vertex distance values * @tparam _FLAG Problem flags */ template <typename _GraphT, typename _LabelT = typename _GraphT::VertexT, typename _ValueT = typename _GraphT::ValueT, ProblemFlag _FLAG = Problem_None> struct Problem : ProblemBase<_GraphT, _FLAG> { typedef _GraphT GraphT; static const ProblemFlag FLAG = _FLAG; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::GpT GpT; typedef _LabelT LabelT; typedef _ValueT ValueT; typedef ProblemBase<GraphT, FLAG> BaseProblem; typedef DataSliceBase<GraphT, FLAG> BaseDataSlice; // Helper structures /** * @brief Data structure containing VN-specific data on indivual GPU. */ struct DataSlice : BaseDataSlice { // VN-specific storage arrays util::Array1D<SizeT, ValueT> distances; // source distance util::Array1D<SizeT, LabelT> labels; // labels to mark latest iteration the vertex been visited util::Array1D<SizeT, VertexT> preds; // predecessors of vertices util::Array1D<SizeT, VertexT> temp_preds; // predecessors of vertices /* * @brief Default constructor */ DataSlice() : BaseDataSlice() { distances.SetName("distances"); labels.SetName("labels"); preds.SetName("preds"); temp_preds.SetName("temp_preds"); } /* * @brief Default destructor */ virtual ~DataSlice() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx)); GUARD_CU(distances.Release(target)); GUARD_CU(labels.Release(target)); GUARD_CU(preds.Release(target)); GUARD_CU(temp_preds.Release(target)); GUARD_CU(BaseDataSlice ::Release(target)); return retval; } /** * @brief initializing VN-specific data on each gpu * @param sub_graph Sub graph on the GPU. * @param[in] num_gpus Number of GPUs * @param[in] gpu_idx GPU device index * @param[in] target Targeting device location * @param[in] flag Problem flag containling options * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0, util::Location target = util::DEVICE, ProblemFlag flag = Problem_None) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag)); GUARD_CU(distances.Allocate(sub_graph.nodes, target)); GUARD_CU(labels.Allocate(sub_graph.nodes, target)); if (flag & Mark_Predecessors) { GUARD_CU(preds.Allocate(sub_graph.nodes, target)); GUARD_CU(temp_preds.Allocate(sub_graph.nodes, target)); } /*if (target & util::DEVICE) { GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this -> stream)); }*/ GUARD_CU(sub_graph.Move(util::HOST, target, this->stream)); return retval; } // Init /** * @brief Reset problem function. Must be called prior to each run. * @param[in] target Targeting device location * \return cudaError_t Error message(s), if any */ cudaError_t Reset(util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->sub_graph->nodes; // Ensure data are allocated GUARD_CU(distances.EnsureSize_(nodes, target)); GUARD_CU(labels.EnsureSize_(nodes, target)); if (this->flag & Mark_Predecessors) { GUARD_CU(preds.EnsureSize_(nodes, target)); GUARD_CU(temp_preds.EnsureSize_(nodes, target)); } // Reset data GUARD_CU(distances.ForEach( [] __host__ __device__(ValueT & distance) { distance = util::PreDefinedValues<ValueT>::MaxValue; }, nodes, target, this->stream)); GUARD_CU(labels.ForEach( [] __host__ __device__(LabelT & label) { label = util::PreDefinedValues<LabelT>::InvalidValue; }, nodes, target, this->stream)); if (this->flag & Mark_Predecessors) { GUARD_CU(preds.ForAll( [] __host__ __device__(VertexT * preds_, const SizeT &pos) { preds_[pos] = pos; }, nodes, target, this->stream)); GUARD_CU(temp_preds.ForAll( [] __host__ __device__(VertexT * preds_, const SizeT &pos) { preds_[pos] = pos; }, nodes, target, this->stream)); } return retval; } }; // DataSlice // Members // Set of data slices (one for each GPU) util::Array1D<SizeT, DataSlice> *data_slices; // Methods /** * @brief VNProblem default constructor */ Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None) : BaseProblem(_parameters, _flag), data_slices(NULL) {} /** * @brief VNProblem default destructor */ virtual ~Problem() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (data_slices == NULL) return retval; for (int i = 0; i < this->num_gpus; i++) GUARD_CU(data_slices[i].Release(target)); if ((target & util::HOST) != 0 && data_slices[0].GetPointer(util::DEVICE) == NULL) { delete[] data_slices; data_slices = NULL; } GUARD_CU(BaseProblem::Release(target)); return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Copy result distancess computed on GPUs back to host-side arrays. * @param[out] h_distances Host array to store computed vertex distances from * the source. * @param[out] h_preds Host array to store computed vertex predecessors. * @param[in] target where the results are stored * \return cudaError_t Error message(s), if any */ cudaError_t Extract(ValueT *h_distances, VertexT *h_preds = NULL, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->org_graph->nodes; if (this->num_gpus == 1) { auto &data_slice = data_slices[0][0]; // Set device if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[0])); GUARD_CU( data_slice.distances.SetPointer(h_distances, nodes, util::HOST)); GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST)); if ((this->flag & Mark_Predecessors) == 0) return retval; GUARD_CU(data_slice.preds.SetPointer(h_preds, nodes, util::HOST)); GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST)); } else if (target == util::HOST) { GUARD_CU(data_slice.distances.ForEach( h_distances, [] __host__ __device__(const ValueT &distance, ValueT &h_distance) { h_distance = distance; }, nodes, util::HOST)); if (this->flag & Mark_Predecessors) GUARD_CU(data_slice.preds.ForEach( h_preds, [] __host__ __device__(const VertexT &pred, VertexT &h_pred) { h_pred = pred; }, nodes, util::HOST)); } } else { // num_gpus != 1 util::Array1D<SizeT, ValueT *> th_distances; util::Array1D<SizeT, VertexT *> th_preds; th_distances.SetName("bfs::Problem::Extract::th_distances"); th_preds.SetName("bfs::Problem::Extract::th_preds"); GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST)); GUARD_CU(th_preds.Allocate(this->num_gpus, util::HOST)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { auto &data_slice = data_slices[gpu][0]; if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST)); if (this->flag & Mark_Predecessors) GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST)); } th_distances[gpu] = data_slice.distances.GetPointer(util::HOST); th_preds[gpu] = data_slice.preds.GetPointer(util::HOST); } // end for(gpu) for (VertexT v = 0; v < nodes; v++) { int gpu = this->org_graph->GpT::partition_table[v]; VertexT v_ = v; if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0) v_ = this->org_graph->GpT::convertion_table[v]; h_distances[v] = th_distances[gpu][v_]; if (this->flag & Mark_Predecessors) h_preds[v] = th_preds[gpu][v_]; } GUARD_CU(th_distances.Release()); GUARD_CU(th_preds.Release()); } // end if return retval; } /** * @brief initialization function. * @param graph The graph that VN processes on * @param[in] Location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseProblem::Init(graph, target)); data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus]; if (this->parameters.template Get<bool>("mark-pred")) this->flag = this->flag | Mark_Predecessors; for (int gpu = 0; gpu < this->num_gpus; gpu++) { data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]"); if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST)); auto &data_slice = data_slices[gpu][0]; GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus, this->gpu_idx[gpu], target, this->flag)); } // end for (gpu) return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT *srcs, SizeT num_srcs, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; for (int gpu = 0; gpu < this->num_gpus; ++gpu) { // Set device if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu]->Reset(target)); GUARD_CU(data_slices[gpu].Move(util::HOST, target)); } // Fillin the initial input_queue for VN problem int gpu; VertexT *srcs_; if (this->num_gpus <= 1) { gpu = 0; srcs_ = srcs; } else { // <TODO> finish multiGPU implementation // gpu = this -> org_graph -> partition_table[src]; // if (this -> flag & partitioner::Keep_Node_Num) // src_ = src; // else // src_ = this -> org_graph -> GpT::convertion_table[src]; // </TODO> } if (target & util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } // <TODO> Is there a preferable way to do this in parallel? for (SizeT i = 0; i < num_srcs; ++i) { VertexT src_ = srcs_[i]; ValueT src_distance = 0; if (target & util::HOST) { data_slices[gpu]->distances[src_] = src_distance; if (this->flag & Mark_Predecessors) data_slices[gpu]->preds[src_] = util::PreDefinedValues<VertexT>::InvalidValue; } if (target & util::DEVICE) { GUARD_CU2( cudaMemcpy( data_slices[gpu]->distances.GetPointer(util::DEVICE) + src_, &src_distance, sizeof(ValueT), cudaMemcpyHostToDevice), "VNProblem cudaMemcpy distances failed"); if (this->flag & Mark_Predecessors) { VertexT src_pred = util::PreDefinedValues<VertexT>::InvalidValue; GUARD_CU2(cudaMemcpy( data_slices[gpu]->preds.GetPointer(util::DEVICE) + src_, &src_pred, sizeof(VertexT), cudaMemcpyHostToDevice), "VNProblem cudaMemcpy preds failed"); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } } // </TODO> return retval; } /** @} */ }; } // namespace vn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#ifndef CPU_ONLY void Hamiltonian::initialize_gpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; grid_size = (model.number_of_atoms - 1) / BLOCK_SIZE + 1; CHECK(cudaMalloc((void**)&neighbor_number, sizeof(int) * n)); CHECK(cudaMalloc((void**)&neighbor_list, sizeof(int) * model.number_of_pairs)); CHECK(cudaMalloc((void**)&potential, sizeof(real) * n)); CHECK(cudaMalloc((void**)&hopping_real, sizeof(real) * model.number_of_pairs)); CHECK(cudaMalloc((void**)&hopping_imag, sizeof(real) * model.number_of_pairs)); CHECK(cudaMalloc((void**)&xx, sizeof(real) * model.number_of_pairs)); CHECK( cudaMemcpy(neighbor_number, model.neighbor_number, sizeof(int) * n, cudaMemcpyHostToDevice)); delete[] model.neighbor_number; CHECK(cudaMemcpy(potential, model.potential, sizeof(real) * n, cudaMemcpyHostToDevice)); delete[] model.potential; int* neighbor_list_new = new int[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { neighbor_list_new[m * n + i] = model.neighbor_list[i * max_neighbor + m]; } } delete[] model.neighbor_list; CHECK(cudaMemcpy( neighbor_list, neighbor_list_new, sizeof(int) * model.number_of_pairs, cudaMemcpyHostToDevice)); delete[] neighbor_list_new; real* hopping_real_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_real_new[m * n + i] = model.hopping_real[i * max_neighbor + m]; } } delete[] model.hopping_real; CHECK(cudaMemcpy( hopping_real, hopping_real_new, sizeof(real) * model.number_of_pairs, cudaMemcpyHostToDevice)); delete[] hopping_real_new; real* hopping_imag_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_imag_new[m * n + i] = model.hopping_imag[i * max_neighbor + m]; } } delete[] model.hopping_imag; CHECK(cudaMemcpy( hopping_imag, hopping_imag_new, sizeof(real) * model.number_of_pairs, cudaMemcpyHostToDevice)); delete[] hopping_imag_new; real* xx_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { xx_new[m * n + i] = model.xx[i * max_neighbor + m]; } } delete[] model.xx; CHECK(cudaMemcpy(xx, xx_new, sizeof(real) * model.number_of_pairs, cudaMemcpyHostToDevice)); delete[] xx_new; } #else void Hamiltonian::initialize_cpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; int number_of_pairs = model.number_of_pairs; neighbor_number = new int[n]; memcpy(neighbor_number, model.neighbor_number, sizeof(int) * n); delete[] model.neighbor_number; neighbor_list = new int[number_of_pairs]; memcpy(neighbor_list, model.neighbor_list, sizeof(int) * number_of_pairs); delete[] model.neighbor_list; potential = new real[n]; memcpy(potential, model.potential, sizeof(real) * n); delete[] model.potential; hopping_real = new real[number_of_pairs]; memcpy(hopping_real, model.hopping_real, sizeof(real) * number_of_pairs); delete[] model.hopping_real; hopping_imag = new real[number_of_pairs]; memcpy(hopping_imag, model.hopping_imag, sizeof(real) * number_of_pairs); delete[] model.hopping_imag; xx = new real[number_of_pairs]; memcpy(xx, model.xx, sizeof(real) * number_of_pairs); delete[] model.xx; } #endif Hamiltonian::Hamiltonian(Model& model) { #ifndef CPU_ONLY initialize_gpu(model); #else initialize_cpu(model); #endif } Hamiltonian::~Hamiltonian() { #ifndef CPU_ONLY CHECK(cudaFree(neighbor_number)); CHECK(cudaFree(neighbor_list)); CHECK(cudaFree(potential)); CHECK(cudaFree(hopping_real)); CHECK(cudaFree(hopping_imag)); CHECK(cudaFree(xx)); #else delete[] neighbor_number; delete[] neighbor_list; delete[] potential; delete[] hopping_real; delete[] hopping_imag; delete[] xx; #endif } #ifndef CPU_ONLY __global__ void gpu_apply_hamiltonian( const int number_of_atoms, const real energy_max, const int* __restrict g_neighbor_number, const int* __restrict g_neighbor_list, const real* __restrict g_potential, const real* __restrict g_hopping_real, const real* __restrict g_hopping_imag, const real* __restrict g_state_in_real, const real* __restrict g_state_in_imag, real* __restrict g_state_out_real, real* __restrict g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #else void cpu_apply_hamiltonian( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #endif // |output> = H |input> void Hamiltonian::apply(Vector& input, Vector& output) { #ifndef CPU_ONLY gpu_apply_hamiltonian<<<grid_size, BLOCK_SIZE>>>( n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(cudaGetLastError()); #else cpu_apply_hamiltonian( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_commutator( int number_of_atoms, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #else void cpu_apply_commutator( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #endif // |output> = [X, H] |input> void Hamiltonian::apply_commutator(Vector& input, Vector& output) { #ifndef CPU_ONLY gpu_apply_commutator<<<grid_size, BLOCK_SIZE>>>( n, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(cudaGetLastError()); #else cpu_apply_commutator( n, max_neighbor, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_current( const int number_of_atoms, const int* __restrict g_neighbor_number, const int* __restrict g_neighbor_list, const real* __restrict g_hopping_real, const real* __restrict g_hopping_imag, const real* __restrict g_xx, const real* __restrict g_state_in_real, const real* __restrict g_state_in_imag, real* __restrict g_state_out_real, real* __restrict g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #else void cpu_apply_current( int number_of_atoms, int max_neighbor, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #endif // |output> = V |input> void Hamiltonian::apply_current(Vector& input, Vector& output) { #ifndef CPU_ONLY gpu_apply_current<<<grid_size, BLOCK_SIZE>>>( n, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(cudaGetLastError()); #else cpu_apply_current( n, max_neighbor, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } // Kernel which calculates the two first terms of time evolution as described by // Eq. (36) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_01( const int number_of_atoms, const real* __restrict g_state_0_real, const real* __restrict g_state_0_imag, const real* __restrict g_state_1_real, const real* __restrict g_state_1_imag, real* __restrict g_state_real, real* __restrict g_state_imag, const real b0, const real b1, const int direction) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #else void cpu_chebyshev_01( int number_of_atoms, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_real, real* g_state_imag, real b0, real b1, int direction) { for (int n = 0; n < number_of_atoms; ++n) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_01( Vector& state_0, Vector& state_1, Vector& state, real bessel_0, real bessel_1, int direction) { #ifndef CPU_ONLY gpu_chebyshev_01<<<grid_size, BLOCK_SIZE>>>( n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); CHECK(cudaGetLastError()); #else cpu_chebyshev_01( n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); #endif } // Kernel for calculating further terms of Eq. (36) // in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_2( const int number_of_atoms, const real energy_max, const int* __restrict g_neighbor_number, const int* __restrict g_neighbor_list, const real* __restrict g_potential, const real* __restrict g_hopping_real, const real* __restrict g_hopping_imag, const real* __restrict g_state_0_real, const real* __restrict g_state_0_imag, const real* __restrict g_state_1_real, const real* __restrict g_state_1_imag, real* __restrict g_state_2_real, real* __restrict g_state_2_imag, real* __restrict g_state_real, real* __restrict g_state_imag, const real bessel_m, const int label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_chebyshev_2( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_real, real* g_state_imag, real bessel_m, int label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2( Vector& state_0, Vector& state_1, Vector& state_2, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY gpu_chebyshev_2<<<grid_size, BLOCK_SIZE>>>( n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(cudaGetLastError()); #else cpu_chebyshev_2( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel which calculates the two first terms of commutator [X, U(dt)] // Corresponds to Eq. (37) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_1x( const int number_of_atoms, const real* __restrict g_state_1x_real, const real* __restrict g_state_1x_imag, real* __restrict g_state_real, real* __restrict g_state_imag, const real g_bessel_1) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #else void cpu_chebyshev_1x( int number_of_atoms, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_real, real* g_state_imag, real g_bessel_1) { for (int n = 0; n < number_of_atoms; ++n) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #endif // Wrapper for kernel above void Hamiltonian::chebyshev_1x(Vector& input, Vector& output, real bessel_1) { #ifndef CPU_ONLY gpu_chebyshev_1x<<<grid_size, BLOCK_SIZE>>>( n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); CHECK(cudaGetLastError()); #else cpu_chebyshev_1x( n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); #endif } // Kernel which calculates the further terms of [X, U(dt)] #ifndef CPU_ONLY __global__ void gpu_chebyshev_2x( const int number_of_atoms, const real energy_max, const int* __restrict g_neighbor_number, const int* __restrict g_neighbor_list, const real* __restrict g_potential, const real* __restrict g_hopping_real, const real* __restrict g_hopping_imag, const real* __restrict g_xx, const real* __restrict g_state_0_real, const real* __restrict g_state_0_imag, const real* __restrict g_state_0x_real, const real* __restrict g_state_0x_imag, const real* __restrict g_state_1_real, const real* __restrict g_state_1_imag, const real* __restrict g_state_1x_real, const real* __restrict g_state_1x_imag, real* __restrict g_state_2_real, real* __restrict g_state_2_imag, real* __restrict g_state_2x_real, real* __restrict g_state_2x_imag, real* __restrict g_state_real, real* __restrict g_state_imag, const real g_bessel_m, const int g_label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #else void cpu_chebyshev_2x( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_0_real, real* g_state_0_imag, real* g_state_0x_real, real* g_state_0x_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_2x_real, real* g_state_2x_imag, real* g_state_real, real* g_state_imag, real g_bessel_m, int g_label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2x( Vector& state_0, Vector& state_0x, Vector& state_1, Vector& state_1x, Vector& state_2, Vector& state_2x, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY gpu_chebyshev_2x<<<grid_size, BLOCK_SIZE>>>( n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(cudaGetLastError()); #else cpu_chebyshev_2x( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel for doing the Chebyshev iteration phi_2 = 2 * H * phi_1 - phi_0. #ifndef CPU_ONLY __global__ void gpu_kernel_polynomial( const int number_of_atoms, const real energy_max, const int* __restrict g_neighbor_number, const int* __restrict g_neighbor_list, const real* __restrict g_potential, const real* __restrict g_hopping_real, const real* __restrict g_hopping_imag, const real* __restrict g_state_0_real, const real* __restrict g_state_0_imag, const real* __restrict g_state_1_real, const real* __restrict g_state_1_imag, real* __restrict g_state_2_real, real* __restrict g_state_2_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_kernel_polynomial( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the Chebyshev iteration void Hamiltonian::kernel_polynomial(Vector& state_0, Vector& state_1, Vector& state_2) { #ifndef CPU_ONLY gpu_kernel_polynomial<<<grid_size, BLOCK_SIZE>>>( n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); CHECK(cudaGetLastError()); #else cpu_kernel_polynomial( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); #endif }
the_stack
namespace TextEntity { void Objective::generate_labels( const EntityIdxType* const labels, const size_t num_labels, const size_t num_negative_labels, std::vector<EntityIdxType>* const instance_entities, RNG* const rng) const { PROFILE_FUNCTION(); CHECK(instance_entities->empty()); const size_t num_repeats = num_negative_labels + 1; const size_t total_labels = num_labels * num_repeats; instance_entities->resize(total_labels, 0); label_generator_->generate( labels, model_->entities_, num_labels, num_negative_labels, instance_entities, /* dst */ rng); DCHECK_EQ(instance_entities->size(), total_labels); CHECK_EQ(instance_entities->size() % num_repeats, 0); } Objective::ForwardResultType* Objective::compute_cost( const Batch& batch, RNG* const rng) const { PROFILE_FUNCTION(); const size_t num_random_entities = train_config_.num_random_entities(); std::unique_ptr<device_matrix<WordIdxType>> instance_words( device_matrix<WordIdxType>::create( model_->streams_->next(), batch.features_, /* begin */ batch.features_+ batch.num_instances_ * batch.window_size(), /* end */ 1, /* num_rows */ batch.num_instances_ * batch.window_size() /* num_cols */)); std::unique_ptr<device_matrix<FloatT>> instance_word_weights( device_matrix<FloatT>::create( model_->streams_->next(), batch.feature_weights_, /* begin */ batch.feature_weights_+ batch.num_instances_ * batch.window_size(), /* end */ 1, /* num_rows */ batch.num_instances_ * batch.window_size() /* num_cols */)); // Transfer instance weights to device. // // TODO(cvangysel): maybe these weights can live on the GPU? std::unique_ptr<device_matrix<FloatT>> instance_weights( device_matrix<FloatT>::create( model_->streams_->next(), batch.weights_, /* begin */ batch.weights_+ batch.num_instances_, /* end */ 1, /* num_rows */ batch.num_instances_ /* num_cols */)); // Figure out the entities in the sample. const size_t num_entities = batch.num_instances_; const size_t num_repeats = num_random_entities + 1; const size_t total_entity_ids = num_repeats * num_entities; std::unique_ptr<device_matrix<WordIdxType>> instance_entities( new device_matrix<WordIdxType>( 1, /* num_rows */ total_entity_ids /* num_cols */, model_->streams_->next())); // Generate identifiers for target and negative entities. { std::vector<EntityIdxType> tmp_instance_entities; generate_labels(batch.labels_, num_entities, /* num_entities */ num_random_entities, /* num_negative_labels */ &tmp_instance_entities, rng); CHECK_EQ(tmp_instance_entities.size(), total_entity_ids); instance_entities->fillwith( instance_entities->getStream(), tmp_instance_entities); } std::unique_ptr<ForwardResultType> result( new ForwardResultType(instance_words.release(), instance_word_weights.release(), instance_entities.release(), batch.window_size(), num_random_entities, train_config_.regularization_lambda())); if (model_->desc_.l2_normalize_phrase_reprs()) { result->phrase_normalizer_.reset(new Normalizer<FloatT>( num_entities /* num_instances */)); } if (model_->desc_.l2_normalize_entity_reprs()) { result->entity_normalizer_.reset(new Normalizer<FloatT>( total_entity_ids /* num_instances */)); } if (model_->desc_.transform_desc().batch_normalization()) { result->batch_normalization_.reset( new BatchNormalization<FloatT>( model_->desc_.entity_repr_size(), /* num_features */ 0.1, /* momentum */ 1e-4, /* epsilon */ true /* cache_input */)); } DCHECK_EQ(instance_weights->size(), result->batch_size_); DCHECK_EQ(result->entity_ids_->size() / num_repeats, result->batch_size_); // // From word representations to projections in entity space. // // Get phrase representations. result->phrase_reprs_.reset( model_->get_phrase_representations(result->flattened_words_->getStream(), *result->flattened_words_, batch.window_size(), result->flattened_word_weights_.get())); CHECK_DIMENSIONS((*result->phrase_reprs_), model_->words_.size(), num_entities); CHECK_MATRIX(*result->phrase_reprs_); if (result->phrase_normalizer_ != nullptr) { result->phrase_normalizer_->forward( *result->phrase_reprs_, /* input */ result->phrase_reprs_.get() /* output */); CHECK_MATRIX(*result->phrase_reprs_); } // Project to entity space. result->word_projections_.reset( model_->transform_.transform(result->phrase_reprs_->getStream(), *result->phrase_reprs_, result->batch_normalization_.get())); CHECK_DIMENSIONS(*result->word_projections_, model_->entities_.size(), num_entities); CHECK_MATRIX(*result->word_projections_); // // From projections to NCE cost. // // Broadcast projections. result->broadcasted_word_projections_.reset( broadcast_columns(result->word_projections_->getStream(), *result->word_projections_, num_repeats)); // Fetch entity representations. result->entity_representations_.reset( model_->entities_.get_representations( model_->streams_->next(), *result->entity_ids_)); if (result->entity_normalizer_ != nullptr) { result->entity_normalizer_->forward( *result->entity_representations_, result->entity_representations_.get()); CHECK_MATRIX(*result->entity_representations_); } // Negate the representations belonging to negative instances; // this works for us both in the forward as in the backward passes. // // Forward pass: // sigmoid(-x) = 1.0 - sigmoid(x) // // and such, we can back propagate the errors without special casing // the negative instances. apply_except_every_Nth_column<thrust::negate<FloatT>>( thrust::cuda::par.on(result->entity_representations_->getStream()), num_random_entities + 1 /* col_idx */, result->entity_representations_.get()); CHECK_MATRIX(*result->entity_representations_); CHECK_MATRIX(*result->broadcasted_word_projections_); CHECK_DIMENSIONS_EQUAL(*result->entity_representations_, *result->broadcasted_word_projections_); // Hadamard product. device_matrix<FloatT> multiplied_representations( model_->entities_.size(), total_entity_ids, model_->streams_->next()); MAKE_MATRIX_NULL(multiplied_representations); CHECK_DIMENSIONS_EQUAL(multiplied_representations, *result->entity_representations_); // Merge streams. const cudaStream_t words_and_entities_stream = merge_streams( result->broadcasted_word_projections_->getStream(), merge_streams(result->entity_representations_->getStream(), multiplied_representations.getStream())); { thrust::transform( thrust::cuda::par.on(words_and_entities_stream), begin(*result->broadcasted_word_projections_), /* first op */ end(*result->broadcasted_word_projections_), begin(*result->entity_representations_), /* second op */ begin(multiplied_representations), /* dest */ thrust::multiplies<FloatT>()); CHECK_MATRIX(multiplied_representations); } // Compute the similarity probabilities (for every entity, either positive or negative). result->similarity_probs_.reset( new device_matrix<FloatT>(1, total_entity_ids, model_->streams_->next())); CHECK_EQ(total_entity_ids, multiplied_representations.getCols()); const cudaStream_t probs_and_words_and_entities_stream = merge_streams( result->similarity_probs_->getStream(), words_and_entities_stream); { // Aggregate multiplied representations (i.e. finish the dot product); // reduce_axis does not expect nulled output. reduce_axis( probs_and_words_and_entities_stream, FIRST_AXIS, multiplied_representations, result->similarity_probs_.get()); // Apply sigmoid, clipped between epsilon and 1.0 - epsilon. apply_elemwise<func::truncated_sigmoid<FloatT>>( thrust::cuda::par.on(probs_and_words_and_entities_stream), result->similarity_probs_.get(), func::truncated_sigmoid<FloatT>( model_->desc_.clip_sigmoid() ? 1e-7 : 0.0 /* epsilon */)); } // Make a copy for the log_probs. result->pointwise_mass_.reset( result->similarity_probs_->copy(probs_and_words_and_entities_stream)); // Convert to log-probs. apply_elemwise<func::log<FloatT> >( thrust::cuda::par.on(probs_and_words_and_entities_stream), result->pointwise_mass_.get()); // For every positive example, we sample one or more negative classes; // this introduces an artificial bias towards negative classes. // // Given that we sample an equal amount of instances from every document, // every class receives the same number of updates. However, as the introduced // bias prefers negative classes, we postulate that this causes all documents // to live very close near each other in a restricted area of the space. // // Most likely, if you train long enough with this bias enabled; the learning // process figures it out. if (!model_->desc_.bias_negative_samples() && num_random_entities > 1) { // Reweights everything such that the cost function remains const instance_weights->scale( instance_weights->getStream(), (static_cast<FloatT>(num_random_entities) + 1.0) / (2.0 * static_cast<FloatT>(num_random_entities))); } // Broadcast instance weights. result->broadcasted_instance_weights_.reset( broadcast_columns(instance_weights->getStream(), *instance_weights, num_repeats)); // Continuation of bias correction above. if (!model_->desc_.bias_negative_samples() && num_random_entities > 1) { // Upweights the positive instances; every positive instances becomes // equal to the number of negative instances. apply_every_Nth_column( thrust::cuda::par.on(result->broadcasted_instance_weights_->getStream()), num_repeats /* col_idx */, result->broadcasted_instance_weights_.get(), func::scale_by_constant<FloatT>(num_random_entities)); } // Verify dimensions. CHECK_DIMENSIONS_EQUAL(*result->pointwise_mass_, *result->broadcasted_instance_weights_); const cudaStream_t predictions_and_weights_stream = merge_streams( probs_and_words_and_entities_stream, result->broadcasted_instance_weights_->getStream()); { // Again, Hadamard, but now between point-wise contributions and their weights. hadamard_product( thrust::cuda::par.on(predictions_and_weights_stream), *result->broadcasted_instance_weights_, result->pointwise_mass_.get()); } // Synchronize here, as the gradient phase has not been fully stream-lined (pun intended). // streams_->synchronize(); DCHECK(result->complete()); return result.release(); } Objective::GradientsType* Objective::compute_gradients(const ForwardResultType& result) { PROFILE_FUNCTION(); DCHECK(result.complete()); const cudaStream_t stream = model_->streams_->next(); std::unique_ptr<GradientsType> gradients(new SingleGradients<::Typedefs::FloatT>(&result)); // As we take the negative of the joint-log probability and consequently // subtract the derivative of this quantity w.r.t. the weights, we can // simply add the derivative (i.e. no need to negate, and we do gradient ascent). const size_t num_entities = result.entity_ids_->size(); // Broadcast the phrase representations. const size_t num_repeats = (result.num_random_entities_ + 1); CHECK_EQ(num_repeats * result.batch_size_, num_entities); // Keep track of multipliers for every entity instance. device_matrix<FloatT> instance_multipliers( 1 /* num rows */, num_entities /* num columns*/, model_->streams_->next()); const cudaStream_t instance_multipliers_stream = merge_streams( instance_multipliers.getStream(), merge_streams(result.broadcasted_instance_weights_->getStream(), result.similarity_probs_->getStream())); CHECK_DIMENSIONS_EQUAL( instance_multipliers, (*result.broadcasted_instance_weights_)); CHECK_DIMENSIONS_EQUAL( instance_multipliers, (*result.similarity_probs_)); // Multiply learning rate divided by batch size with // the per-instance weights. const FloatT batch_size_normalizer = exp(-log(result.batch_size_)); // Multiply with the complement of the probabilities. thrust::transform( thrust::cuda::par.on(instance_multipliers_stream), // Multiply in instance weights. begin(*result.broadcasted_instance_weights_), end(*result.broadcasted_instance_weights_), /* first op*/ // Multiply in batch normalization constant. make_scalar_multiplication_iterator( // Multiply in similarity probabilities derivatives. thrust::make_transform_iterator( begin(*result.similarity_probs_), func::sigmoid_to_log_sigmoid_deriv<FloatT>( model_->desc_.clip_sigmoid() ? 1e-6 : 0.0 /* epsilon */)), batch_size_normalizer), /* second op */ begin(instance_multipliers), /* result */ thrust::multiplies<FloatT>()); // Get pointers to intermediate results which we will use. const device_matrix<FloatT>* const entity_reprs = result.entity_representations_.get(); CHECK_EQ(entity_reprs->getCols(), num_entities); CHECK_DIMENSIONS((*result.broadcasted_word_projections_), model_->entities_.size(), num_entities); // d cost / d entity_reprs gradients->grad_entity_repr_.reset( result.broadcasted_word_projections_->copy( result.broadcasted_word_projections_->getStream())); // Verify dimensions. CHECK_DIMENSIONS_EQUAL((*gradients->grad_entity_repr_), (*result.broadcasted_word_projections_)); CHECK_EQ(gradients->grad_entity_repr_->getCols(), instance_multipliers.getCols()); // Merge in multipliers. apply_columnwise<thrust::multiplies<FloatT>>( thrust::cuda::par.on(stream), instance_multipliers, gradients->grad_entity_repr_.get()); // Negate the appropriate representations again. apply_except_every_Nth_column<thrust::negate<FloatT>>( thrust::cuda::par.on(stream), num_repeats /* col_idx */, gradients->grad_entity_repr_.get()); CHECK_MATRIX(*gradients->grad_entity_repr_); if (result.entity_normalizer_ != nullptr) { gradients->grad_entity_repr_.reset( result.entity_normalizer_->backward( *gradients->grad_entity_repr_)); CHECK_DIMENSIONS_EQUAL(*gradients->grad_entity_repr_, *result.broadcasted_word_projections_); } // // Back-propagate through projection: d cost / d projection. // const size_t src_num_cols = result.num_random_entities_ + 1; std::unique_ptr<device_matrix<FloatT>> grad_projection( fold_columns( entity_reprs->getStream(), *entity_reprs, src_num_cols, &instance_multipliers)); CHECK_DIMENSIONS(*grad_projection, model_->entities_.size(), result.batch_size_); gradients->grad_bias_.reset( new device_matrix<FloatT>(model_->entities_.size(), 1, stream)); gradients->grad_transform_matrix_.reset( new device_matrix<FloatT>(model_->entities_.size(), model_->words_.size(), stream)); // Back-propagate through transform layer. model_->transform_.backward( stream, result, *result.phrase_reprs_, *result.word_projections_, grad_projection.get(), gradients.get()); // d cost / d word_reprs // // (entity_repr_size by word_repr_size)^T X (entity_repr_size by batch_size_) gradients->grad_phrase_reprs_.reset( new device_matrix<FloatT>(model_->words_.size(), result.batch_size_, stream)); // TODO(cvangysel): this logic should also be moved to Transform::backward. matrix_mult(stream, model_->transform_.transform_, CUBLAS_OP_T, *grad_projection, CUBLAS_OP_N, gradients->grad_phrase_reprs_.get()); CHECK_DIMENSIONS(*gradients->grad_phrase_reprs_, model_->words_.size(), result.batch_size_); if (result.phrase_normalizer_ != nullptr) { gradients->grad_phrase_reprs_.reset( result.phrase_normalizer_->backward( *gradients->grad_phrase_reprs_)); CHECK_DIMENSIONS(*gradients->grad_phrase_reprs_, model_->words_.size(), result.batch_size_); } // Divide by window_size, as we took the average word representation in the forward pass. thrust::transform(thrust::cuda::par.on(stream), gradients->grad_phrase_reprs_->begin(), gradients->grad_phrase_reprs_->end(), gradients->grad_phrase_reprs_->begin(), func::scale_by_constant<FloatT>( exp(-log(result.window_size_)) /* result.window_size_^-1 */)); CHECK_MATRIX(*gradients->grad_phrase_reprs_); return gradients.release(); } } // namespace TextEntity namespace RepresentationSimilarity { Objective::ForwardResultType* Objective::compute_cost( const Batch& batch, RNG* const rng) const { PROFILE_FUNCTION(); const size_t num_instances = batch.num_instances_; const size_t total_ids = 2 * num_instances; std::unique_ptr<device_matrix<ObjectIdxType>> instance_ids( device_matrix<ObjectIdxType>::create( model_->streams_->next(), batch.features_, batch.features_ + total_ids, 1, /* num_rows */ total_ids /* num_cols */)); std::unique_ptr<device_matrix<FloatT>> instance_weights( device_matrix<FloatT>::create( model_->streams_->next(), batch.weights_, /* begin */ batch.weights_+ num_instances, /* end */ 1, /* num_rows */ num_instances /* num_cols */)); print_matrix(*instance_weights); std::unique_ptr<ForwardResultType> result( new ForwardResultType(param_id_, instance_ids.release(), instance_weights.release(), train_config_.regularization_lambda())); // Fetch representations. result->representations_.reset( get_representation_storage()->get_representations( model_->streams_->next(), *result->ids_)); CHECK_DIMENSIONS(*result->representations_, get_representation_storage()->size(), result->ids_->getCols()); std::unique_ptr<device_matrix<FloatT>> multiplied_representations( fold_columns<FloatT, thrust::multiplies<FloatT>>( result->representations_->getStream(), *result->representations_, 2 /* cluster_size */)); // Compute the similarity probabilities (for every representation, either positive or negative). result->similarity_probs_.reset( new device_matrix<FloatT>(1, num_instances, model_->streams_->next())); CHECK_EQ(num_instances, multiplied_representations->getCols()); reduce_axis(merge_streams( multiplied_representations->getStream(), result->similarity_probs_->getStream()), FIRST_AXIS, *multiplied_representations, result->similarity_probs_.get()); // Apply sigmoid, clipped between epsilon and 1.0 - epsilon. apply_elemwise<func::truncated_sigmoid<FloatT>>( thrust::cuda::par.on(result->similarity_probs_->getStream()), result->similarity_probs_.get(), func::truncated_sigmoid<FloatT>( model_->desc_.clip_sigmoid() ? 1e-7 : 0.0 /* epsilon */)); // Make a copy for the log_probs. result->pointwise_mass_.reset( result->similarity_probs_->copy(result->similarity_probs_->getStream())); // Convert to log-probabilities. apply_elemwise<func::log<FloatT> >( thrust::cuda::par.on(result->pointwise_mass_->getStream()), result->pointwise_mass_.get()); elemwise_binary( thrust::cuda::par.on(merge_streams( result->pointwise_mass_->getStream(), result->weights_->getStream())), *result->weights_, /* first op */ result->pointwise_mass_.get(), /* second op and dst */ thrust::multiplies<FloatT>()); // Synchronize here, as the gradient phase has not been fully stream-lined (pun intended). // streams_->synchronize(); DCHECK(result->complete()); return result.release(); } Objective::GradientsType* Objective::compute_gradients(const ForwardResultType& result) { PROFILE_FUNCTION(); DCHECK(result.complete()); const cudaStream_t stream = model_->streams_->next(); std::unique_ptr<GradientsType> gradients(new SingleGradients<::Typedefs::FloatT>(&result)); // As we take the negative of the joint-log probability and consequently // subtract the derivative of this quantity w.r.t. the weights, we can // simply add the derivative (i.e. no need to negate, and we do gradient ascent). const size_t num_instances = result.batch_size_; const size_t num_ids = 2 * num_instances; // Keep track of multipliers for every instance. std::unique_ptr<device_matrix<FloatT>> instance_multipliers( result.weights_->copy(result.weights_->getStream())); CHECK_DIMENSIONS(*instance_multipliers, 1 /* num_rows */, num_instances /* num_cols */); const cudaStream_t instance_multipliers_stream = merge_streams( instance_multipliers->getStream(), result.similarity_probs_->getStream()); CHECK_DIMENSIONS_EQUAL( *instance_multipliers, *result.similarity_probs_); // Multiply learning rate divided by batch size with // the per-instance weights. const FloatT batch_size_normalizer = exp(-log(result.batch_size_)); // Multiply with the complement of the probabilities. thrust::transform( thrust::cuda::par.on(instance_multipliers_stream), // Multiply in instance weights. begin(*instance_multipliers), // PLACEHOLDER FOR instance weights. end(*instance_multipliers), /* first op*/ // Multiply in batch normalization constant. make_scalar_multiplication_iterator( // Multiply in similarity probabilities derivatives. thrust::make_transform_iterator( begin(*result.similarity_probs_), func::sigmoid_to_log_sigmoid_deriv<FloatT>( model_->desc_.clip_sigmoid() ? 1e-6 : 0.0 /* epsilon */)), batch_size_normalizer), /* second op */ begin(*instance_multipliers), /* result */ thrust::multiplies<FloatT>()); // Get pointers to intermediate results that we will use. const device_matrix<FloatT>* const reprs = result.representations_.get(); CHECK_EQ(reprs->getCols(), num_ids); CHECK_DIMENSIONS(*result.representations_, get_representation_storage()->size(), num_ids); // d cost / d reprs // // TODO(cvangysel): remove this const_cast. device_matrix<FloatT>* const grad_reprs = const_cast<ForwardResultType*>(&result)->representations_.release(); reset_grad(gradients.get(), grad_reprs); // Flip adjacent columns. The matrix is organized in pairs of columns. // The gradient w.r.t. the first representation is the representation // of the second, and vice versa! flip_adjacent_columns(grad_reprs->getStream(), grad_reprs); std::unique_ptr<device_matrix<FloatT>> broadcasted_instance_multipliers( broadcast_columns(instance_multipliers->getStream(), *instance_multipliers, 2 /* num_repeats */)); // Verify dimensions. CHECK_EQ(grad_reprs->getCols(), broadcasted_instance_multipliers->getCols()); // Merge in multipliers. apply_columnwise<thrust::multiplies<FloatT>>( thrust::cuda::par.on(stream), *broadcasted_instance_multipliers, grad_reprs); return gradients.release(); } Representations<::Typedefs::FloatT, ::Typedefs::IdxType>* Objective::get_representation_storage() const { switch (param_id_) { case WORD_REPRS: return &model_->words_; case ENTITY_REPRS: return &model_->entities_; default: break; }; LOG(FATAL) << "Unable to return representations."; throw 0; } void Objective::reset_grad(GradientsType* const gradients, device_matrix<FloatT>* const grad_reprs) const { switch (param_id_) { case WORD_REPRS: gradients->grad_phrase_reprs_.reset(grad_reprs); return; case ENTITY_REPRS: gradients->grad_entity_repr_.reset(grad_reprs); return; default: break; }; LOG(FATAL) << "Unable to set gradient."; throw 0; } } // namespace RepresentationSimilarity namespace TextEntityEntityEntity { Objective::Objective( ::Typedefs::ModelBase* const model, const lse::TrainConfig& train_config) : ::Objective<BatchType, ForwardResultType, GradientsType>(model, train_config), text_entity_weight_(train_config.text_entity_weight()), entity_entity_weight_(train_config.entity_entity_weight()), text_entity_objective_(model, train_config), entity_entity_objective_(ENTITY_REPRS, model, train_config) { CHECK_NE(text_entity_weight_, 0.0); CHECK_NE(entity_entity_weight_, 0.0); } Objective::ForwardResultType* Objective::compute_cost( const BatchType& batch, RNG* const rng) const { PROFILE_FUNCTION(); return new ForwardResultType( std::make_tuple( std::make_pair(text_entity_objective_.compute_cost(std::get<0>(batch), rng), text_entity_weight_), std::make_pair(entity_entity_objective_.compute_cost(std::get<1>(batch), rng), entity_entity_weight_))); } Objective::GradientsType* Objective::compute_gradients(const ForwardResultType& result) { PROFILE_FUNCTION(); MergeGradientsFn<FloatT> merge_gradients_fn; std::unique_ptr<Gradients<FloatT>> text_entity_gradients( text_entity_objective_.compute_gradients(*std::get<0>(std::get<0>(result.forward_results_)))); FloatT text_entity_weight = std::get<1>(std::get<0>(result.forward_results_)); std::unique_ptr<Gradients<FloatT>> entity_entity_gradients( entity_entity_objective_.compute_gradients(*std::get<0>(std::get<1>(result.forward_results_)))); FloatT entity_entity_weight = std::get<1>(std::get<1>(result.forward_results_)); Objective::GradientsType* const merged_gradients = merge_gradients_fn({ {text_entity_gradients.release(), text_entity_weight}, {entity_entity_gradients.release(), entity_entity_weight}, }); return merged_gradients; } } // namespace TextEntityEntityEntity namespace TextEntityTermTerm { Objective::Objective( ::Typedefs::ModelBase* const model, const lse::TrainConfig& train_config) : ::Objective<BatchType, ForwardResultType, GradientsType>(model, train_config), text_entity_weight_(train_config.text_entity_weight()), term_term_weight_(train_config.term_term_weight()), text_entity_objective_(model, train_config), term_term_objective_(WORD_REPRS, model, train_config) { CHECK_NE(text_entity_weight_, 0.0); CHECK_NE(term_term_weight_, 0.0); } Objective::ForwardResultType* Objective::compute_cost( const BatchType& batch, RNG* const rng) const { PROFILE_FUNCTION(); return new ForwardResultType( std::make_tuple( std::make_pair(text_entity_objective_.compute_cost(std::get<0>(batch), rng), text_entity_weight_), std::make_pair(term_term_objective_.compute_cost(std::get<1>(batch), rng), term_term_weight_))); } Objective::GradientsType* Objective::compute_gradients(const ForwardResultType& result) { PROFILE_FUNCTION(); MergeGradientsFn<FloatT> merge_gradients_fn; std::unique_ptr<Gradients<FloatT>> text_entity_gradients( text_entity_objective_.compute_gradients(*std::get<0>(std::get<0>(result.forward_results_)))); FloatT text_entity_weight = std::get<1>(std::get<0>(result.forward_results_)); std::unique_ptr<Gradients<FloatT>> term_term_gradients( term_term_objective_.compute_gradients(*std::get<0>(std::get<1>(result.forward_results_)))); FloatT term_term_weight = std::get<1>(std::get<1>(result.forward_results_)); Objective::GradientsType* const merged_gradients = merge_gradients_fn({ {text_entity_gradients.release(), text_entity_weight}, {term_term_gradients.release(), term_term_weight}, }); return merged_gradients; } } // namespace TextEntityTermTerm
the_stack
#ifndef ONEFLOW_CORE_KERNEL_UTIL_NUMERICS_H #define ONEFLOW_CORE_KERNEL_UTIL_NUMERICS_H #pragma once #include <limits.h> #include <math.h> #include <float.h> #include <cstdlib> #include <assert.h> #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/util/numeric_limits.cuh" namespace oneflow { namespace detail { template<typename T> struct numerics {}; template<typename T> OF_NUMERICS_FUNC T powi(T a, T b) { assert(numerics<T>::ge(b, 0)); T result = 1; while (b) { if (b & 1) { result *= a; } b /= 2; a *= a; } return result; } template<> struct numerics<uint8_t> { OF_NUMERICS_FUNC uint8_t min() { return detail::numeric_limits<uint8_t>::lowest(); } OF_NUMERICS_FUNC uint8_t max() { return detail::numeric_limits<uint8_t>::max(); } OF_NUMERICS_FUNC uint8_t lower_bound() { return detail::numeric_limits<uint8_t>::lower_bound(); } OF_NUMERICS_FUNC uint8_t upper_bound() { return detail::numeric_limits<uint8_t>::upper_bound(); } OF_NUMERICS_FUNC bool lt(uint8_t a, uint8_t b) { return a < b; } OF_NUMERICS_FUNC bool le(uint8_t a, uint8_t b) { return a <= b; } OF_NUMERICS_FUNC bool gt(uint8_t a, uint8_t b) { return a > b; } OF_NUMERICS_FUNC bool ge(uint8_t a, uint8_t b) { return a >= b; } OF_NUMERICS_FUNC bool eq(uint8_t a, uint8_t b) { return a == b; } OF_NUMERICS_FUNC bool ne(uint8_t a, uint8_t b) { return a != b; } OF_NUMERICS_FUNC uint8_t add(uint8_t a, uint8_t b) { return a + b; } OF_NUMERICS_FUNC uint8_t mul(uint8_t a, uint8_t b) { return a * b; } OF_NUMERICS_FUNC uint8_t sub(uint8_t a, uint8_t b) { return a - b; } OF_NUMERICS_FUNC uint8_t div(uint8_t a, uint8_t b) { return a / b; } OF_NUMERICS_FUNC uint8_t pow(uint8_t a, uint8_t b) { return powi<uint8_t>(a, b); } OF_NUMERICS_FUNC bool isnan(uint8_t a) { return false; } OF_NUMERICS_FUNC bool isinf(uint8_t a) { return false; } }; #ifdef _MSC_VER // Suppress warning C4804: '/': unsafe use of type 'bool' in operation #pragma warning(push) #pragma warning(disable : 4804) #endif template<> struct numerics<bool> { OF_NUMERICS_FUNC bool min() { return detail::numeric_limits<bool>::lowest(); } OF_NUMERICS_FUNC bool max() { return detail::numeric_limits<bool>::max(); } OF_NUMERICS_FUNC bool lower_bound() { return detail::numeric_limits<bool>::lower_bound(); } OF_NUMERICS_FUNC bool upper_bound() { return detail::numeric_limits<bool>::upper_bound(); } OF_NUMERICS_FUNC bool lt(bool a, bool b) { return a < b; } OF_NUMERICS_FUNC bool le(bool a, bool b) { return a <= b; } OF_NUMERICS_FUNC bool gt(bool a, bool b) { return a > b; } OF_NUMERICS_FUNC bool ge(bool a, bool b) { return a >= b; } OF_NUMERICS_FUNC bool eq(bool a, bool b) { return a == b; } OF_NUMERICS_FUNC bool ne(bool a, bool b) { return a != b; } OF_NUMERICS_FUNC bool add(bool a, bool b) { return a + b; } OF_NUMERICS_FUNC bool mul(bool a, bool b) { return a && b; } OF_NUMERICS_FUNC bool sub(bool a, bool b) { return a - b; } OF_NUMERICS_FUNC bool div(bool a, bool b) { return a / b; } OF_NUMERICS_FUNC bool isnan(bool a) { return false; } OF_NUMERICS_FUNC bool isinf(bool a) { return false; } }; #ifdef _MSC_VER #pragma warning(pop) #endif template<> struct numerics<int8_t> { OF_NUMERICS_FUNC int8_t min() { return detail::numeric_limits<int8_t>::lowest(); } OF_NUMERICS_FUNC int8_t max() { return detail::numeric_limits<int8_t>::max(); } OF_NUMERICS_FUNC int8_t lower_bound() { return detail::numeric_limits<int8_t>::lower_bound(); } OF_NUMERICS_FUNC int8_t upper_bound() { return detail::numeric_limits<int8_t>::upper_bound(); } OF_NUMERICS_FUNC bool lt(int8_t a, int8_t b) { return a < b; } OF_NUMERICS_FUNC bool le(int8_t a, int8_t b) { return a <= b; } OF_NUMERICS_FUNC bool gt(int8_t a, int8_t b) { return a > b; } OF_NUMERICS_FUNC bool ge(int8_t a, int8_t b) { return a >= b; } OF_NUMERICS_FUNC bool eq(int8_t a, int8_t b) { return a == b; } OF_NUMERICS_FUNC bool ne(int8_t a, int8_t b) { return a != b; } OF_NUMERICS_FUNC int8_t add(int8_t a, int8_t b) { return a + b; } OF_NUMERICS_FUNC int8_t mul(int8_t a, int8_t b) { return a * b; } OF_NUMERICS_FUNC int8_t sub(int8_t a, int8_t b) { return a - b; } OF_NUMERICS_FUNC int8_t div(int8_t a, int8_t b) { return a / b; } OF_NUMERICS_FUNC int8_t pow(int8_t a, int8_t b) { return powi<int8_t>(a, b); } OF_NUMERICS_FUNC bool isnan(int8_t a) { return false; } OF_NUMERICS_FUNC bool isinf(int8_t a) { return false; } }; template<> struct numerics<int16_t> { OF_NUMERICS_FUNC int16_t min() { return detail::numeric_limits<int16_t>::lowest(); } OF_NUMERICS_FUNC int16_t max() { return detail::numeric_limits<int16_t>::max(); } OF_NUMERICS_FUNC int16_t lower_bound() { return detail::numeric_limits<int16_t>::lower_bound(); } OF_NUMERICS_FUNC int16_t upper_bound() { return detail::numeric_limits<int16_t>::upper_bound(); } OF_NUMERICS_FUNC bool lt(int16_t a, int16_t b) { return a < b; } OF_NUMERICS_FUNC bool le(int16_t a, int16_t b) { return a <= b; } OF_NUMERICS_FUNC bool gt(int16_t a, int16_t b) { return a > b; } OF_NUMERICS_FUNC bool ge(int16_t a, int16_t b) { return a >= b; } OF_NUMERICS_FUNC bool eq(int16_t a, int16_t b) { return a == b; } OF_NUMERICS_FUNC bool ne(int16_t a, int16_t b) { return a != b; } OF_NUMERICS_FUNC int16_t add(int16_t a, int16_t b) { return a + b; } OF_NUMERICS_FUNC int16_t mul(int16_t a, int16_t b) { return a * b; } OF_NUMERICS_FUNC int16_t sub(int16_t a, int16_t b) { return a - b; } OF_NUMERICS_FUNC int16_t div(int16_t a, int16_t b) { return a / b; } OF_NUMERICS_FUNC int16_t pow(int16_t a, int16_t b) { return powi<int16_t>(a, b); } OF_NUMERICS_FUNC bool isnan(int16_t a) { return false; } OF_NUMERICS_FUNC bool isinf(int16_t a) { return false; } }; template<> struct numerics<int32_t> { OF_NUMERICS_FUNC int32_t min() { return detail::numeric_limits<int32_t>::lowest(); } OF_NUMERICS_FUNC int32_t max() { return detail::numeric_limits<int32_t>::max(); } OF_NUMERICS_FUNC int32_t lower_bound() { return detail::numeric_limits<int32_t>::lower_bound(); } OF_NUMERICS_FUNC int32_t upper_bound() { return detail::numeric_limits<int32_t>::upper_bound(); } OF_NUMERICS_FUNC bool lt(int32_t a, int32_t b) { return a < b; } OF_NUMERICS_FUNC bool le(int32_t a, int32_t b) { return a <= b; } OF_NUMERICS_FUNC bool gt(int32_t a, int32_t b) { return a > b; } OF_NUMERICS_FUNC bool ge(int32_t a, int32_t b) { return a >= b; } OF_NUMERICS_FUNC bool eq(int32_t a, int32_t b) { return a == b; } OF_NUMERICS_FUNC bool ne(int32_t a, int32_t b) { return a != b; } OF_NUMERICS_FUNC int32_t add(int32_t a, int32_t b) { return a + b; } OF_NUMERICS_FUNC int32_t mul(int32_t a, int32_t b) { return a * b; } OF_NUMERICS_FUNC int32_t sub(int32_t a, int32_t b) { return a - b; } OF_NUMERICS_FUNC int32_t div(int32_t a, int32_t b) { return a / b; } OF_NUMERICS_FUNC int32_t pow(int32_t a, int32_t b) { return powi<int32_t>(a, b); } OF_NUMERICS_FUNC bool isnan(int32_t a) { return false; } OF_NUMERICS_FUNC bool isinf(int32_t a) { return false; } }; template<> struct numerics<int64_t> { OF_NUMERICS_FUNC int64_t min() { return detail::numeric_limits<int64_t>::lowest(); } OF_NUMERICS_FUNC int64_t max() { return detail::numeric_limits<int64_t>::max(); } OF_NUMERICS_FUNC int64_t lower_bound() { return detail::numeric_limits<int64_t>::lower_bound(); } OF_NUMERICS_FUNC int64_t upper_bound() { return detail::numeric_limits<int64_t>::upper_bound(); } OF_NUMERICS_FUNC bool lt(int64_t a, int64_t b) { return a < b; } OF_NUMERICS_FUNC bool le(int64_t a, int64_t b) { return a <= b; } OF_NUMERICS_FUNC bool gt(int64_t a, int64_t b) { return a > b; } OF_NUMERICS_FUNC bool ge(int64_t a, int64_t b) { return a >= b; } OF_NUMERICS_FUNC bool eq(int64_t a, int64_t b) { return a == b; } OF_NUMERICS_FUNC bool ne(int64_t a, int64_t b) { return a != b; } OF_NUMERICS_FUNC int64_t add(int64_t a, int64_t b) { return a + b; } OF_NUMERICS_FUNC int64_t mul(int64_t a, int64_t b) { return a * b; } OF_NUMERICS_FUNC int64_t sub(int64_t a, int64_t b) { return a - b; } OF_NUMERICS_FUNC int64_t div(int64_t a, int64_t b) { return a / b; }; OF_NUMERICS_FUNC int64_t pow(int64_t a, int64_t b) { return powi<int64_t>(a, b); } OF_NUMERICS_FUNC bool isnan(int64_t a) { return false; } OF_NUMERICS_FUNC bool isinf(int64_t a) { return false; } }; // DEPRECATED: use math functions from std and cuda math API (if needed) template<> struct numerics<float> { OF_NUMERICS_FUNC float min() { return detail::numeric_limits<float>::lowest(); } OF_NUMERICS_FUNC float max() { return detail::numeric_limits<float>::max(); } OF_NUMERICS_FUNC float lower_bound() { return detail::numeric_limits<float>::lower_bound(); } OF_NUMERICS_FUNC float upper_bound() { return detail::numeric_limits<float>::upper_bound(); } OF_NUMERICS_FUNC bool lt(float a, float b) { return a < b; } OF_NUMERICS_FUNC bool le(float a, float b) { return a <= b; } OF_NUMERICS_FUNC bool gt(float a, float b) { return a > b; } OF_NUMERICS_FUNC bool ge(float a, float b) { return a >= b; } OF_NUMERICS_FUNC bool eq(float a, float b) { return a == b; } OF_NUMERICS_FUNC bool ne(float a, float b) { return a != b; } OF_NUMERICS_FUNC float sqrt(float a) { return sqrtf(a); } OF_NUMERICS_FUNC float atan(float a) { return atanf(a); } OF_NUMERICS_FUNC float add(float a, float b) { return a + b; } OF_NUMERICS_FUNC float div(float a, float b) { return a / b; } OF_NUMERICS_FUNC float mul(float a, float b) { return a * b; } OF_NUMERICS_FUNC float sub(float a, float b) { return a - b; } OF_NUMERICS_FUNC float pow(float a, float b) { return powf(a, b); } OF_NUMERICS_FUNC bool isnan(float a) { return ::isnan(a); } OF_NUMERICS_FUNC bool isinf(float a) { return ::isinf(a); } }; template<> struct numerics<double> { OF_NUMERICS_FUNC double min() { return detail::numeric_limits<double>::lowest(); } OF_NUMERICS_FUNC double max() { return detail::numeric_limits<double>::max(); } OF_NUMERICS_FUNC double lower_bound() { return detail::numeric_limits<double>::lower_bound(); } OF_NUMERICS_FUNC double upper_bound() { return detail::numeric_limits<double>::upper_bound(); } OF_NUMERICS_FUNC bool lt(double a, double b) { return a < b; } OF_NUMERICS_FUNC bool le(double a, double b) { return a <= b; } OF_NUMERICS_FUNC bool gt(double a, double b) { return a > b; } OF_NUMERICS_FUNC bool ge(double a, double b) { return a >= b; } OF_NUMERICS_FUNC bool eq(double a, double b) { return a == b; } OF_NUMERICS_FUNC bool ne(double a, double b) { return a != b; } OF_NUMERICS_FUNC double sqrt(double a) { return ::sqrt(a); } OF_NUMERICS_FUNC double atan(double a) { return ::atan(a); } OF_NUMERICS_FUNC double add(double a, double b) { return a + b; } OF_NUMERICS_FUNC double div(double a, double b) { return a / b; } OF_NUMERICS_FUNC double mul(double a, double b) { return a * b; } OF_NUMERICS_FUNC double sub(double a, double b) { return a - b; } OF_NUMERICS_FUNC double pow(double a, double b) { return ::pow(a, b); } OF_NUMERICS_FUNC bool isnan(double a) { return ::isnan(a); } OF_NUMERICS_FUNC bool isinf(double a) { return ::isinf(a); } }; } // namespace detail } // namespace oneflow #endif // ONEFLOW_CORE_KERNEL_UTIL_NUMERICS_H
the_stack
#include "nnnormalizelp.hpp" #include "impl/dispatcher.hpp" #include <iostream> #include <algorithm> #include <cmath> #include <cassert> #include <cstring> using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; template<vl::DeviceType deviceType, vl::DataType dataType> struct NormalizeLpForward ; template<vl::DeviceType deviceType, vl::DataType dataType> struct NormalizeLpForwardWithNorms ; template<vl::DeviceType deviceType, vl::DataType dataType> struct NormalizeLpBackward ; template<vl::DeviceType deviceType, vl::DataType dataType> struct NormalizeLpBackwardWithNorms ; // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- struct VisitPattern { std::vector<ptrdiff_t> steps ; std::vector<ptrdiff_t> stepPeriods ; size_t normsVolume ; size_t inputVolume ; } ; VisitPattern getVisitPatternForInput(NormalizeLp const & op, vl::Tensor input) { // Compute tensor geometry. int n = input.getNumDimensions() ; auto inputDimensions = std::vector<size_t>(input.getDimensions(), input.getDimensions() + n) ; assert(n <= 4) ; // Todo: relax (just extend the for loops below). size_t inputVolume = 1 ; size_t normsVolume = 1 ; auto steps = std::vector<ptrdiff_t>(n+1,0) ; auto stepPeriods = std::vector<ptrdiff_t>(n+1,0) ; // Find out how to traverse the reduced results as the input is // scanned from first to last element. for (int d = 0 ; d < n ; ++d) { stepPeriods[d] = inputVolume ; bool squashed = (find(op.selectedDimensions.begin(), op.selectedDimensions.end(), d) != op.selectedDimensions.end()) ; if (!squashed) { steps[d] += normsVolume ; normsVolume *= inputDimensions[d] ; steps[d+1] -= normsVolume ; } inputVolume *= inputDimensions[d] ; } steps[n] = 0 ; stepPeriods[n] = inputVolume ; // Simplify traversal. for (int d = 0 ; d < steps.size() - 2 ; ) { if (steps[d] == 0 && steps[d+1] == 0) { steps.erase(steps.begin() + d) ; stepPeriods.erase(stepPeriods.begin() + d+1) ; } else { ++ d ; } } // Make it suitable for more efficient loops. for (int d = steps.size()-1 ; d >= 1 ; --d) { stepPeriods[d] /= stepPeriods[d - 1] ; } for (int d = steps.size() ; d < 5 ; ++d) { steps.push_back(0) ; stepPeriods.push_back(1) ; } VisitPattern vp ; vp.steps = move(steps) ; vp.stepPeriods = move(stepPeriods) ; vp.inputVolume = inputVolume ; vp.normsVolume = normsVolume ; return vp ; } template<typename type> void computeNorms(NormalizeLp const & op, type * normsData, type const * inputData, VisitPattern vp) { // Clear norms. memset(normsData, 0, vp.normsVolume * sizeof(type)) ; // Accumulate norm. auto npt = normsData ; auto ipt = inputData ; for (ptrdiff_t i3 = 0 ; i3 < vp.stepPeriods[4] ; ++i3) { for (ptrdiff_t i2 = 0 ; i2 < vp.stepPeriods[3] ; ++i2) { for (ptrdiff_t i1 = 0 ; i1 < vp.stepPeriods[2] ; ++i1) { for (ptrdiff_t i0 = 0 ; i0 < vp.stepPeriods[1] ; ++i0) { *npt += pow(*ipt++, op.exponent) ; npt += vp.steps[0] ; } npt += vp.steps[1] ; } npt += vp.steps[2] ; } npt += vp.steps[3] ; } // Root norm. for (ptrdiff_t i = 0 ; i < vp.normsVolume ; ++i) { normsData[i] = pow(normsData[i] + op.epsilon, 1.0/op.exponent) ; } } // ------------------------------------------------------------------- // CPU forward // ------------------------------------------------------------------- template<bool givenNomrs> struct NormAgrument ; template<> struct NormAgrument<true> { typedef vl::Tensor const &type ; } ; template<> struct NormAgrument<false> { typedef vl::Tensor &type ; } ; template<vl::DataType dataType, bool givenNorms> struct NormalizeLpForwardCPU { vl::ErrorCode operator()(vl::nn::NormalizeLp & op, vl::Tensor &output, typename NormAgrument<givenNorms>::type norms, vl::Tensor const &input) { assert(norms || !givenNorms) ; typedef typename vl::DataTypeTraits<dataType>::type type ; auto vp = getVisitPatternForInput(op, input) ; type const * inputData = (type const*)input.getMemory() ; type * normsData ; bool normsDataIsOwner = false ; if (norms) { normsData = (type*)norms.getMemory() ; } else { normsData = new type [vp.normsVolume] ; normsDataIsOwner = true ; } // Compute norm if needed. if (!givenNorms) { computeNorms(op,normsData,inputData,vp) ; } // Divide norm. if (output) { auto npt = normsData ; type * outputData = (type*)output.getMemory() ; for (ptrdiff_t i3 = 0 ; i3 < vp.stepPeriods[4] ; ++i3) { for (ptrdiff_t i2 = 0 ; i2 < vp.stepPeriods[3] ; ++i2) { for (ptrdiff_t i1 = 0 ; i1 < vp.stepPeriods[2] ; ++i1) { for (ptrdiff_t i0 = 0 ; i0 < vp.stepPeriods[1] ; ++i0) { *outputData = *inputData / *npt ; inputData ++ ; outputData ++ ; npt += vp.steps[0] ; } npt += vp.steps[1] ; } npt += vp.steps[2] ; } npt += vp.steps[3] ; } } // Finish. if (normsData && normsDataIsOwner) { delete [] normsData ; } return vl::VLE_Success ; } } ; template<vl::DataType dataType> struct NormalizeLpForward<vl::VLDT_CPU, dataType> : public NormalizeLpForwardCPU<dataType,false> { } ; template<vl::DataType dataType> struct NormalizeLpForwardWithNorms<vl::VLDT_CPU, dataType> : public NormalizeLpForwardCPU<dataType,true> { } ; // ------------------------------------------------------------------- // CPU backward // ------------------------------------------------------------------- template<vl::DataType dataType, bool givenNorms> struct NormalizeLpBackwardCPU { vl::ErrorCode operator()(vl::nn::NormalizeLp &op, vl::Tensor &derInput, typename NormAgrument<givenNorms>::type norms, vl::Tensor const &input, vl::Tensor const& derOutput) { assert(norms || !givenNorms) ; // Compute tensor geometry. typedef typename vl::DataTypeTraits<dataType>::type type ; auto vp = getVisitPatternForInput(op, input) ; auto derInputData = (type*)derInput.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto derOutputData = (type const *)derOutput.getMemory() ; type * normsData ; bool normsDataIsOwner = false ; if (norms) { normsData = (type*)norms.getMemory() ; } else { normsData = new type [vp.normsVolume] ; normsDataIsOwner = true ; } // Compute norms if given. if (!givenNorms) { computeNorms(op,normsData,inputData,vp) ; } // Compute sum(derOutput .* input). type * scratchData = new type [vp.normsVolume] () ; // zeros { auto ipt = inputData ; auto dopt = derOutputData ; auto spt = scratchData ; for (ptrdiff_t i3 = 0 ; i3 < vp.stepPeriods[4] ; ++i3) { for (ptrdiff_t i2 = 0 ; i2 < vp.stepPeriods[3] ; ++i2) { for (ptrdiff_t i1 = 0 ; i1 < vp.stepPeriods[2] ; ++i1) { for (ptrdiff_t i0 = 0 ; i0 < vp.stepPeriods[1] ; ++i0) { *spt += (*ipt) * (*dopt) ; ipt ++ ; dopt ++ ; spt += vp.steps[0] ; } spt += vp.steps[1] ; } spt += vp.steps[2] ; } spt += vp.steps[3] ; } } // Compute derInputs. { auto dipt = derInputData ; auto npt = normsData ; auto ipt = inputData ; auto dopt = derOutputData ; auto spt = scratchData ; for (ptrdiff_t i3 = 0 ; i3 < vp.stepPeriods[4] ; ++i3) { for (ptrdiff_t i2 = 0 ; i2 < vp.stepPeriods[3] ; ++i2) { for (ptrdiff_t i1 = 0 ; i1 < vp.stepPeriods[2] ; ++i1) { for (ptrdiff_t i0 = 0 ; i0 < vp.stepPeriods[1] ; ++i0) { auto n = *npt ; *dipt = (*dopt) / n - (*spt) * pow(*ipt, op.exponent-1) / pow(n,op.exponent+1) ; dipt ++ ; ipt ++ ; dopt ++ ; npt += vp.steps[0] ; spt += vp.steps[0] ; } npt += vp.steps[1] ; spt += vp.steps[1] ; } npt += vp.steps[2] ; spt += vp.steps[2] ; } npt += vp.steps[3] ; spt += vp.steps[3] ; } } // Finish. if (normsData && normsDataIsOwner) { delete [] normsData ; } delete [] scratchData ; return vl::VLE_Success ; } } ; template<vl::DataType dataType> struct NormalizeLpBackward<vl::VLDT_CPU, dataType> : public NormalizeLpBackwardCPU<dataType,false> { } ; template<vl::DataType dataType> struct NormalizeLpBackwardWithNorms<vl::VLDT_CPU, dataType> : public NormalizeLpBackwardCPU<dataType,true> { } ; // ------------------------------------------------------------------- // Driver // ------------------------------------------------------------------- #if ENABLE_GPU #include "nnnormalizelp_gpu.cu" #endif NormalizeLp::NormalizeLp(vl::Context &context, std::vector<int> const& selectedDimensions, double exponent, double epsilon) : context(context), selectedDimensions(selectedDimensions), exponent(exponent), epsilon(epsilon) { } vl::TensorShape NormalizeLp::getNormsShapeForData(vl::Tensor const &data) { vl::TensorShape shape(data) ; int n = shape.getNumDimensions() ; for (int d = 0 ; d < n ; ++d) { bool squashed = (find(selectedDimensions.begin(), selectedDimensions.end(), d) != selectedDimensions.end()) ; if (squashed) { shape.setDimension(d, 1) ; } } return shape ; } vl::ErrorCode NormalizeLp::forward(vl::Tensor &output, vl::Tensor &norms, vl::Tensor const &data) { return dispatch<NormalizeLpForward>()(*this,output,norms,data) ; } vl::ErrorCode NormalizeLp::forwardWithNorms(vl::Tensor &output, vl::Tensor const &norms, vl::Tensor const &data) { return dispatch<NormalizeLpForwardWithNorms>()(*this,output,norms,data) ; } vl::ErrorCode NormalizeLp::backward(vl::Tensor &derData, vl::Tensor &norms, vl::Tensor const &data, vl::Tensor const &derOutput) { return dispatch<NormalizeLpBackward>()(*this,derData,norms,data,derOutput) ; } vl::ErrorCode NormalizeLp::backwardWithNorms(vl::Tensor &derData, vl::Tensor const &norms, vl::Tensor const &data, vl::Tensor const &derOutput) { return dispatch<NormalizeLpBackwardWithNorms>()(*this,derData,norms,data,derOutput) ; }
the_stack
#ifdef WITH_FULL_W_MATRIX #define R_W_MATRICES_SMEM_SLOTS 15 #else #define R_W_MATRICES_SMEM_SLOTS 12 #endif #define CHECK_HIP(call) do { \ hipError_t status = call; \ if( status != hipSuccess ) { \ fprintf(stderr, "HIP Error at line %d in %s: %s\n", __LINE__, __FILE__, hipGetErrorString(status)); \ exit((int) status); \ } \ } while(0) #define HOST_DEVICE __host__ __device__ #define HOST_DEVICE_INLINE __host__ __device__ __forceinline__ struct PayoffCall { double m_K; HOST_DEVICE_INLINE PayoffCall(double K) : m_K(K) {} HOST_DEVICE_INLINE double operator()(double S) const { return fmax(S - m_K, 0.0); } HOST_DEVICE_INLINE int is_in_the_money(double S) const { return S > m_K; } }; struct PayoffPut { double m_K; HOST_DEVICE_INLINE PayoffPut(double K) : m_K(K) {} HOST_DEVICE_INLINE double operator()(double S) const { return fmax(m_K - S, 0.0); } HOST_DEVICE_INLINE int is_in_the_money(double S) const { return S < m_K; } }; template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void generate_paths_kernel(int num_timesteps, int num_paths, Payoff payoff, double dt, double S0, double r, double sigma, const double *__restrict samples, double *__restrict paths) { // The path generated by this thread. int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x; // Early exit. if( path >= num_paths ) return; // Compute (r - sigma^2 / 2). const double r_min_half_sigma_sq_dt = (r - 0.5*sigma*sigma)*dt; // Compute sigma*sqrt(dt). const double sigma_sqrt_dt = sigma*sqrt(dt); // Keep the previous price. double S = S0; // The offset. int offset = path; // Each thread generates several timesteps. for( int timestep = 0 ; timestep < num_timesteps-1 ; ++timestep, offset += num_paths ) { S = S * exp(r_min_half_sigma_sq_dt + sigma_sqrt_dt*samples[offset]); paths[offset] = S; } // The asset price. S = S * exp(r_min_half_sigma_sq_dt + sigma_sqrt_dt*samples[offset]); // Store the payoff at expiry. paths[offset] = payoff(S); } static __device__ __forceinline__ void assemble_R(int m, double4 &sums, double *smem_svds) { // Assemble R. double x0 = smem_svds[0]; double x1 = smem_svds[1]; double x2 = smem_svds[2]; double x0_sq = x0 * x0; double sum1 = sums.x - x0; double sum2 = sums.y - x0_sq; double sum3 = sums.z - x0_sq*x0; double sum4 = sums.w - x0_sq*x0_sq; double m_as_dbl = (double) m; double sigma = m_as_dbl - 1.0; double mu = sqrt(m_as_dbl); double v0 = -sigma / (1.0 + mu); double v0_sq = v0*v0; double beta = 2.0 * v0_sq / (sigma + v0_sq); double inv_v0 = 1.0 / v0; double one_min_beta = 1.0 - beta; double beta_div_v0 = beta * inv_v0; smem_svds[0] = mu; smem_svds[1] = one_min_beta*x0 - beta_div_v0*sum1; smem_svds[2] = one_min_beta*x0_sq - beta_div_v0*sum2; // Rank update coefficients. double beta_div_v0_sq = beta_div_v0 * inv_v0; double c1 = beta_div_v0_sq*sum1 + beta_div_v0*x0; double c2 = beta_div_v0_sq*sum2 + beta_div_v0*x0_sq; // 2nd step of QR. double x1_sq = x1*x1; sum1 -= x1; sum2 -= x1_sq; sum3 -= x1_sq*x1; sum4 -= x1_sq*x1_sq; x0 = x1-c1; x0_sq = x0*x0; sigma = sum2 - 2.0*c1*sum1 + (m_as_dbl-2.0)*c1*c1; if( abs(sigma) < 1.0e-16 ) beta = 0.0; else { mu = sqrt(x0_sq + sigma); if( x0 <= 0.0 ) v0 = x0 - mu; else v0 = -sigma / (x0 + mu); v0_sq = v0*v0; beta = 2.0*v0_sq / (sigma + v0_sq); } inv_v0 = 1.0 / v0; beta_div_v0 = beta * inv_v0; // The coefficient to perform the rank update. double c3 = (sum3 - c1*sum2 - c2*sum1 + (m_as_dbl-2.0)*c1*c2)*beta_div_v0; double c4 = (x1_sq-c2)*beta_div_v0 + c3*inv_v0; double c5 = c1*c4 - c2; one_min_beta = 1.0 - beta; // Update R. smem_svds[3] = one_min_beta*x0 - beta_div_v0*sigma; smem_svds[4] = one_min_beta*(x1_sq-c2) - c3; // 3rd step of QR. double x2_sq = x2*x2; sum1 -= x2; sum2 -= x2_sq; sum3 -= x2_sq*x2; sum4 -= x2_sq*x2_sq; x0 = x2_sq-c4*x2+c5; sigma = sum4 - 2.0*c4*sum3 + (c4*c4 + 2.0*c5)*sum2 - 2.0*c4*c5*sum1 + (m_as_dbl-3.0)*c5*c5; if( abs(sigma) < 1.0e-12 ) beta = 0.0; else { mu = sqrt(x0*x0 + sigma); if( x0 <= 0.0 ) v0 = x0 - mu; else v0 = -sigma / (x0 + mu); v0_sq = v0*v0; beta = 2.0*v0_sq / (sigma + v0_sq); } // Update R. smem_svds[5] = (1.0-beta)*x0 - (beta/v0)*sigma; } static __device__ double off_diag_norm(double A01, double A02, double A12) { return sqrt(2.0 * (A01*A01 + A02*A02 + A12*A12)); } static __device__ __forceinline__ void swap(double &x, double &y) { double t = x; x = y; y = t; } static __device__ __forceinline__ void svd_3x3(int m, double4 &sums, double *smem_svds) { // Assemble the R matrix. assemble_R(m, sums, smem_svds); // The matrix R. double R00 = smem_svds[0]; double R01 = smem_svds[1]; double R02 = smem_svds[2]; double R11 = smem_svds[3]; double R12 = smem_svds[4]; double R22 = smem_svds[5]; // We compute the eigenvalues/eigenvectors of A = R^T R. double A00 = R00*R00; double A01 = R00*R01; double A02 = R00*R02; double A11 = R01*R01 + R11*R11; double A12 = R01*R02 + R11*R12; double A22 = R02*R02 + R12*R12 + R22*R22; // We keep track of V since A = Sigma^2 V. Each thread stores a row of V. double V00 = 1.0, V01 = 0.0, V02 = 0.0; double V10 = 0.0, V11 = 1.0, V12 = 0.0; double V20 = 0.0, V21 = 0.0, V22 = 1.0; // The Jacobi algorithm is iterative. We fix the max number of iter and the minimum tolerance. const int max_iters = 16; const double tolerance = 1.0e-12; // Iterate until we reach the max number of iters or the tolerance. for( int iter = 0 ; off_diag_norm(A01, A02, A12) >= tolerance && iter < max_iters ; ++iter ) { double c, s, B00, B01, B02, B10, B11, B12, B20, B21, B22; // Compute the Jacobi matrix for p=0 and q=1. c = 1.0, s = 0.0; if( A01 != 0.0 ) { double tau = (A11 - A00) / (2.0 * A01); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B00 = c*A00 - s*A01; B01 = s*A00 + c*A01; B10 = c*A01 - s*A11; B11 = s*A01 + c*A11; B02 = A02; A00 = c*B00 - s*B10; A01 = c*B01 - s*B11; A11 = s*B01 + c*B11; A02 = c*B02 - s*A12; A12 = s*B02 + c*A12; B00 = c*V00 - s*V01; V01 = s*V00 + c*V01; V00 = B00; B10 = c*V10 - s*V11; V11 = s*V10 + c*V11; V10 = B10; B20 = c*V20 - s*V21; V21 = s*V20 + c*V21; V20 = B20; // Compute the Jacobi matrix for p=0 and q=2. c = 1.0, s = 0.0; if( A02 != 0.0 ) { double tau = (A22 - A00) / (2.0 * A02); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B00 = c*A00 - s*A02; B01 = c*A01 - s*A12; B02 = s*A00 + c*A02; B20 = c*A02 - s*A22; B22 = s*A02 + c*A22; A00 = c*B00 - s*B20; A12 = s*A01 + c*A12; A02 = c*B02 - s*B22; A22 = s*B02 + c*B22; A01 = B01; B00 = c*V00 - s*V02; V02 = s*V00 + c*V02; V00 = B00; B10 = c*V10 - s*V12; V12 = s*V10 + c*V12; V10 = B10; B20 = c*V20 - s*V22; V22 = s*V20 + c*V22; V20 = B20; // Compute the Jacobi matrix for p=1 and q=2. c = 1.0, s = 0.0; if( A12 != 0.0 ) { double tau = (A22 - A11) / (2.0 * A12); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B02 = s*A01 + c*A02; B11 = c*A11 - s*A12; B12 = s*A11 + c*A12; B21 = c*A12 - s*A22; B22 = s*A12 + c*A22; A01 = c*A01 - s*A02; A02 = B02; A11 = c*B11 - s*B21; A12 = c*B12 - s*B22; A22 = s*B12 + c*B22; B01 = c*V01 - s*V02; V02 = s*V01 + c*V02; V01 = B01; B11 = c*V11 - s*V12; V12 = s*V11 + c*V12; V11 = B11; B21 = c*V21 - s*V22; V22 = s*V21 + c*V22; V21 = B21; } // Swap the columns to have S[0] >= S[1] >= S[2]. if( A00 < A11 ) { swap(A00, A11); swap(V00, V01); swap(V10, V11); swap(V20, V21); } if( A00 < A22 ) { swap(A00, A22); swap(V00, V02); swap(V10, V12); swap(V20, V22); } if( A11 < A22 ) { swap(A11, A22); swap(V01, V02); swap(V11, V12); swap(V21, V22); } //printf("timestep=%3d, svd0=%.8lf svd1=%.8lf svd2=%.8lf\n", blockIdx.x, sqrt(A00), sqrt(A11), sqrt(A22)); // Invert the diagonal terms and compute V*S^-1. double inv_S0 = abs(A00) < 1.0e-12 ? 0.0 : 1.0 / A00; double inv_S1 = abs(A11) < 1.0e-12 ? 0.0 : 1.0 / A11; double inv_S2 = abs(A22) < 1.0e-12 ? 0.0 : 1.0 / A22; // printf("SVD: timestep=%3d %12.8lf %12.8lf %12.8lf\n", blockIdx.x, sqrt(A00), sqrt(A11), sqrt(A22)); double U00 = V00 * inv_S0; double U01 = V01 * inv_S1; double U02 = V02 * inv_S2; double U10 = V10 * inv_S0; double U11 = V11 * inv_S1; double U12 = V12 * inv_S2; double U20 = V20 * inv_S0; double U21 = V21 * inv_S1; double U22 = V22 * inv_S2; // Compute V*S^-1*V^T*R^T. #ifdef WITH_FULL_W_MATRIX double B00 = U00*V00 + U01*V01 + U02*V02; double B01 = U00*V10 + U01*V11 + U02*V12; double B02 = U00*V20 + U01*V21 + U02*V22; double B10 = U10*V00 + U11*V01 + U12*V02; double B11 = U10*V10 + U11*V11 + U12*V12; double B12 = U10*V20 + U11*V21 + U12*V22; double B20 = U20*V00 + U21*V01 + U22*V02; double B21 = U20*V10 + U21*V11 + U22*V12; double B22 = U20*V20 + U21*V21 + U22*V22; smem_svds[ 6] = B00*R00 + B01*R01 + B02*R02; smem_svds[ 7] = B01*R11 + B02*R12; smem_svds[ 8] = B02*R22; smem_svds[ 9] = B10*R00 + B11*R01 + B12*R02; smem_svds[10] = B11*R11 + B12*R12; smem_svds[11] = B12*R22; smem_svds[12] = B20*R00 + B21*R01 + B22*R02; smem_svds[13] = B21*R11 + B22*R12; smem_svds[14] = B22*R22; #else double B00 = U00*V00 + U01*V01 + U02*V02; double B01 = U00*V10 + U01*V11 + U02*V12; double B02 = U00*V20 + U01*V21 + U02*V22; double B11 = U10*V10 + U11*V11 + U12*V12; double B12 = U10*V20 + U11*V21 + U12*V22; double B22 = U20*V20 + U21*V21 + U22*V22; smem_svds[ 6] = B00*R00 + B01*R01 + B02*R02; smem_svds[ 7] = B01*R11 + B02*R12; smem_svds[ 8] = B02*R22; smem_svds[ 9] = B11*R11 + B12*R12; smem_svds[10] = B12*R22; smem_svds[11] = B22*R22; #endif } template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK, 4) void prepare_svd_kernel(int num_paths, int min_in_the_money, Payoff payoff, const double *__restrict paths, int *__restrict all_out_of_the_money, double *__restrict svds) { // We need to perform a scan to find the first 3 stocks pay off. __shared__ int scan_input[NUM_THREADS_PER_BLOCK]; __shared__ int scan_output[1+NUM_THREADS_PER_BLOCK]; // sum reduction __shared__ double4 lsums; __shared__ int lsum; // Shared buffer for the ouput. __shared__ double smem_svds[R_W_MATRICES_SMEM_SLOTS]; // Each block works on a single timestep. const int timestep = blockIdx.x; // The timestep offset. const int offset = timestep * num_paths; // Sums. int m = 0; double4 sums = make_double4(0,0,0,0); // Initialize the shared memory. DBL_MAX is a marker to specify that the value is invalid. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) smem_svds[threadIdx.x] = 0.0; __syncthreads(); // Have we already found our 3 first paths which pay off. int found_paths = 0; // Iterate over the paths. for( int path = threadIdx.x ; path < num_paths ; path += NUM_THREADS_PER_BLOCK ) { // Load the asset price to determine if it pays off. double S = paths[offset + path]; // Check if it pays off. const int in_the_money = payoff.is_in_the_money(S); // Try to check if we have found the 3 first stocks. scan_input[threadIdx.x] = in_the_money; __syncthreads(); if (threadIdx.x == 0) { scan_output[0] = 0; for (int i = 1; i <= NUM_THREADS_PER_BLOCK; i++) scan_output[i] = scan_output[i-1]+scan_input[i-1]; } __syncthreads(); const int partial_sum = scan_output[threadIdx.x]; const int total_sum = scan_output[NUM_THREADS_PER_BLOCK]; if( found_paths < 3 ) { if( in_the_money && found_paths + partial_sum < 3 ) smem_svds[found_paths + partial_sum] = S; __syncthreads(); found_paths += total_sum; } // Early continue if no item pays off. if (threadIdx.x == 0) lsum = 0; __syncthreads(); atomicOr(&lsum, in_the_money); __syncthreads(); if (lsum == 0) continue; // Update the number of payoff items. m += in_the_money; // The "normalized" value. double x = 0.0, x_sq = 0.0; if( in_the_money ) { x = S; x_sq = S*S; } // Compute the 4 sums. sums.x += x; sums.y += x_sq; sums.z += x_sq*x; sums.w += x_sq*x_sq; } // Compute the final reductions. if (threadIdx.x == 0) lsum = 0; __syncthreads(); atomicAdd(&lsum, m); __syncthreads(); int not_enough_paths = 0; // Do we all exit? if (threadIdx.x == 0 && lsum < min_in_the_money) not_enough_paths = 1; // Early exit if no path is in the money. if( not_enough_paths ) { if( threadIdx.x == 0 ) all_out_of_the_money[blockIdx.x] = 1; } else { // Compute the final reductions. if (threadIdx.x == 0) lsums = make_double4(0,0,0,0); __syncthreads(); atomicAdd(&lsums.x, sums.x); atomicAdd(&lsums.y, sums.y); atomicAdd(&lsums.z, sums.z); atomicAdd(&lsums.w, sums.w); __syncthreads(); // The 1st thread has everything he needs to build R from the QR decomposition. if( threadIdx.x == 0 ) svd_3x3(lsum, lsums, smem_svds); __syncthreads(); // Store the final results. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) svds[16*blockIdx.x + threadIdx.x] = smem_svds[threadIdx.x]; } } template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK, 8) void compute_partial_beta_kernel(int num_paths, Payoff payoff, const double *__restrict svd, const double *__restrict paths, const double *__restrict cashflows, const int *__restrict all_out_of_the_money, double *__restrict partial_sums) { // The shared memory storage. __shared__ double3 lsums; // The shared memory to store the SVD. __shared__ double shared_svd[R_W_MATRICES_SMEM_SLOTS]; // Early exit if needed. if( *all_out_of_the_money ) return; // The number of threads per grid. const int NUM_THREADS_PER_GRID = NUM_THREADS_PER_BLOCK * gridDim.x; // The 1st threads loads the matrices SVD and R. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) shared_svd[threadIdx.x] = svd[threadIdx.x]; __syncthreads(); // Load the terms of R. const double R00 = shared_svd[ 0]; const double R01 = shared_svd[ 1]; const double R02 = shared_svd[ 2]; const double R11 = shared_svd[ 3]; const double R12 = shared_svd[ 4]; const double R22 = shared_svd[ 5]; // Load the elements of W. #ifdef WITH_FULL_W_MATRIX const double W00 = shared_svd[ 6]; const double W01 = shared_svd[ 7]; const double W02 = shared_svd[ 8]; const double W10 = shared_svd[ 9]; const double W11 = shared_svd[10]; const double W12 = shared_svd[11]; const double W20 = shared_svd[12]; const double W21 = shared_svd[13]; const double W22 = shared_svd[14]; #else const double W00 = shared_svd[ 6]; const double W01 = shared_svd[ 7]; const double W02 = shared_svd[ 8]; const double W11 = shared_svd[ 9]; const double W12 = shared_svd[10]; const double W22 = shared_svd[11]; #endif // Invert the diagonal of R. const double inv_R00 = R00 != 0.0 ? __drcp_rn(R00) : 0.0; const double inv_R11 = R11 != 0.0 ? __drcp_rn(R11) : 0.0; const double inv_R22 = R22 != 0.0 ? __drcp_rn(R22) : 0.0; // Precompute the R terms. const double inv_R01 = inv_R00*inv_R11*R01; const double inv_R02 = inv_R00*inv_R22*R02; const double inv_R12 = inv_R22*R12; // Precompute W00/R00. #ifdef WITH_FULL_W_MATRIX const double inv_W00 = W00*inv_R00; const double inv_W10 = W10*inv_R00; const double inv_W20 = W20*inv_R00; #else const double inv_W00 = W00*inv_R00; #endif // Each thread has 3 numbers to sum. double beta0 = 0.0, beta1 = 0.0, beta2 = 0.0; // Iterate over the paths. for( int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x ; path < num_paths ; path += NUM_THREADS_PER_GRID ) { // Threads load the asset price to rebuild Q from the QR decomposition. double S = paths[path]; // Is the path in the money? const int in_the_money = payoff.is_in_the_money(S); // Compute Qis. The elements of the Q matrix in the QR decomposition. double Q1i = inv_R11*S - inv_R01; double Q2i = inv_R22*S*S - inv_R02 - Q1i*inv_R12; // Compute the ith row of the pseudo-inverse of [1 X X^2]. #ifdef WITH_FULL_W_MATRIX const double WI0 = inv_W00 + W01 * Q1i + W02 * Q2i; const double WI1 = inv_W10 + W11 * Q1i + W12 * Q2i; const double WI2 = inv_W20 + W21 * Q1i + W22 * Q2i; #else const double WI0 = inv_W00 + W01 * Q1i + W02 * Q2i; const double WI1 = W11 * Q1i + W12 * Q2i; const double WI2 = W22 * Q2i; #endif // Each thread loads its element from the Y vector. double cashflow = in_the_money ? cashflows[path] : 0.0; // Update beta. beta0 += WI0*cashflow; beta1 += WI1*cashflow; beta2 += WI2*cashflow; } // Compute the sum of the elements in the block. if( threadIdx.x == 0 ) lsums = make_double3(0,0,0); __syncthreads(); atomicAdd(&lsums.x, beta0); atomicAdd(&lsums.y, beta1); atomicAdd(&lsums.z, beta2); __syncthreads(); // The 1st thread stores the result to GMEM. if( threadIdx.x == 0 ) { partial_sums[0*NUM_THREADS_PER_BLOCK + blockIdx.x] = lsums.x; partial_sums[1*NUM_THREADS_PER_BLOCK + blockIdx.x] = lsums.y; partial_sums[2*NUM_THREADS_PER_BLOCK + blockIdx.x] = lsums.z; } } template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_final_beta_kernel(const int *__restrict all_out_of_the_money, double *__restrict beta) { // The shared memory for the reduction. __shared__ double3 lsums; // Early exit if needed. if( *all_out_of_the_money ) { if( threadIdx.x < 3 ) beta[threadIdx.x] = 0.0; return; } // The final sums. double3 sums; // We load the elements. sums.x = beta[0*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.y = beta[1*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.z = beta[2*NUM_THREADS_PER_BLOCK + threadIdx.x]; // Compute the sums. if( threadIdx.x == 0 ) lsums = make_double3(0,0,0); __syncthreads(); atomicAdd(&lsums.x, sums.x); atomicAdd(&lsums.y, sums.y); atomicAdd(&lsums.z, sums.z); __syncthreads(); // Store beta. if( threadIdx.x == 0 ) { //printf("beta0=%.8lf beta1=%.8lf beta2=%.8lf\n", sums.x, sums.y, sums.z); beta[0] = lsums.x; beta[1] = lsums.y; beta[2] = lsums.z; } } // assumes beta has been built either by compute_final_beta_kernel or // by atomic operations at the end of compute_partial_beta_kernel. template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void update_cashflow_kernel(int num_paths, Payoff payoff_object, double exp_min_r_dt, const double *__restrict beta, const double *__restrict paths, const int *__restrict all_out_of_the_money, double *__restrict cashflows) { const int NUM_THREADS_PER_GRID = gridDim.x * NUM_THREADS_PER_BLOCK; // Are we going to skip the computations. const int skip_computations = *all_out_of_the_money; // Load the beta coefficients for the linear regression. const double beta0 = beta[0]; const double beta1 = beta[1]; const double beta2 = beta[2]; // Iterate over the paths. int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x; for( ; path < num_paths ; path += NUM_THREADS_PER_GRID ) { // The cashflow. const double old_cashflow = exp_min_r_dt*cashflows[path]; if( skip_computations ) { cashflows[path] = old_cashflow; continue; } // Load the asset price. double S = paths[path]; double S2 = S*S; // The payoff. double payoff = payoff_object(S); // Compute the estimated payoff from continuing. double estimated_payoff = beta0 + beta1*S + beta2*S2; // Discount the payoff because we did not take it into account for beta. estimated_payoff *= exp_min_r_dt; // Update the payoff. if( payoff <= 1.0e-8 || payoff <= estimated_payoff ) payoff = old_cashflow; // Store the updated cashflow. cashflows[path] = payoff; } } template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_partial_sums_kernel(int num_paths, const double *__restrict cashflows, double *__restrict sums) { // Shared memory to compute the final sum. __shared__ double lsum; // Each thread works on a single path. const int path = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; // Load the final sum. double sum = 0.0; if( path < num_paths ) sum = cashflows[path]; // Compute the sum over the block. if (threadIdx.x == 0) lsum = 0; __syncthreads(); atomicAdd(&lsum, sum); __syncthreads(); // The block leader writes the sum to GMEM. if( threadIdx.x == 0 ) sums[blockIdx.x] = lsum; } template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_final_sum_kernel(int num_paths, int num_blocks, double exp_min_r_dt, double *__restrict sums) { // Shared memory to compute the final sum. __shared__ double lsum; // The sum. double sum = 0.0; for( int item = threadIdx.x ; item < num_blocks ; item += NUM_THREADS_PER_BLOCK ) sum += sums[item]; // Compute the sum over the block. if (threadIdx.x == 0) lsum = 0; __syncthreads(); atomicAdd(&lsum, sum); __syncthreads(); // The block leader writes the sum to GMEM. if( threadIdx.x == 0 ) { sums[0] = exp_min_r_dt * lsum / (double) num_paths; } } template< typename Payoff > static inline void do_run(double *h_samples, int num_timesteps, int num_paths, const Payoff &payoff, double dt, double S0, double r, double sigma, double *d_samples, double *d_paths, double *d_cashflows, double *d_svds, int *d_all_out_of_the_money, double *d_temp_storage, double *h_price) { CHECK_HIP(hipMemcpy(d_samples, h_samples, sizeof(double) * num_timesteps*num_paths, hipMemcpyHostToDevice)); // Generate asset prices. const int NUM_THREADS_PER_BLOCK0 = 256; int grid_dim = (num_paths + NUM_THREADS_PER_BLOCK0-1) / NUM_THREADS_PER_BLOCK0; hipLaunchKernelGGL(HIP_KERNEL_NAME(generate_paths_kernel<NUM_THREADS_PER_BLOCK0>), dim3(grid_dim), dim3(NUM_THREADS_PER_BLOCK0), 0, 0, num_timesteps, num_paths, payoff, dt, S0, r, sigma, d_samples, d_paths); CHECK_HIP(hipGetLastError()); // Reset the all_out_of_the_money array. CHECK_HIP(hipMemsetAsync(d_all_out_of_the_money, 0, num_timesteps*sizeof(int))); // Prepare the SVDs. const int NUM_THREADS_PER_BLOCK1 = 256; hipLaunchKernelGGL(HIP_KERNEL_NAME(prepare_svd_kernel<NUM_THREADS_PER_BLOCK1>), dim3(num_timesteps-1), dim3(NUM_THREADS_PER_BLOCK1), 0, 0, num_paths, 4, //1024, payoff, d_paths, d_all_out_of_the_money, d_svds); CHECK_HIP(hipGetLastError()); // The constant to discount the payoffs. const double exp_min_r_dt = std::exp(-r*dt); // Estimate the number of blocks in a wave of update_cashflow. hipDeviceProp_t properties; int device = 0; CHECK_HIP(hipGetDevice(&device)); CHECK_HIP(hipGetDeviceProperties(&properties, device)); // Number of threads per wave at fully occupancy. const int num_threads_per_wave_full_occupancy = 256 * 112; // Enable 8B mode for SMEM. const int NUM_THREADS_PER_BLOCK2 = 128; // Update the cashflows. grid_dim = (num_paths + NUM_THREADS_PER_BLOCK2-1) / NUM_THREADS_PER_BLOCK2; double num_waves = grid_dim*NUM_THREADS_PER_BLOCK2 / (double) num_threads_per_wave_full_occupancy; int update_cashflow_grid = grid_dim; if( num_waves < 10 && num_waves - (int) num_waves < 0.6 ) update_cashflow_grid = std::max(1, (int) num_waves) * num_threads_per_wave_full_occupancy / NUM_THREADS_PER_BLOCK2; // Run the main loop. for( int timestep = num_timesteps-2 ; timestep >= 0 ; --timestep ) { // Compute beta (two kernels) for that timestep. hipLaunchKernelGGL(HIP_KERNEL_NAME(compute_partial_beta_kernel<NUM_THREADS_PER_BLOCK2>), dim3(NUM_THREADS_PER_BLOCK2), dim3(NUM_THREADS_PER_BLOCK2), 0, 0, num_paths, payoff, d_svds + 16*timestep, d_paths + timestep*num_paths, d_cashflows, d_all_out_of_the_money + timestep, d_temp_storage); CHECK_HIP(hipGetLastError()); hipLaunchKernelGGL(HIP_KERNEL_NAME(compute_final_beta_kernel<NUM_THREADS_PER_BLOCK2>), dim3(1), dim3(NUM_THREADS_PER_BLOCK2), 0, 0, d_all_out_of_the_money + timestep, d_temp_storage); CHECK_HIP(hipGetLastError()); hipLaunchKernelGGL(HIP_KERNEL_NAME(update_cashflow_kernel<NUM_THREADS_PER_BLOCK2>), dim3(update_cashflow_grid), dim3(NUM_THREADS_PER_BLOCK2), 0, 0, num_paths, payoff, exp_min_r_dt, d_temp_storage, d_paths + timestep*num_paths, d_all_out_of_the_money + timestep, d_cashflows); CHECK_HIP(hipGetLastError()); } // Compute the final sum. const int NUM_THREADS_PER_BLOCK4 = 128; grid_dim = (num_paths + NUM_THREADS_PER_BLOCK4-1) / NUM_THREADS_PER_BLOCK4; hipLaunchKernelGGL(HIP_KERNEL_NAME(compute_partial_sums_kernel<NUM_THREADS_PER_BLOCK4>), dim3(grid_dim), dim3(NUM_THREADS_PER_BLOCK4), 0, 0, num_paths, d_cashflows, d_temp_storage); CHECK_HIP(hipGetLastError()); hipLaunchKernelGGL(HIP_KERNEL_NAME(compute_final_sum_kernel<NUM_THREADS_PER_BLOCK4>), dim3(1), dim3(NUM_THREADS_PER_BLOCK4), 0, 0, num_paths, grid_dim, exp_min_r_dt, d_temp_storage); CHECK_HIP(hipGetLastError()); // Copy the result to the host. CHECK_HIP(hipMemcpy(h_price, d_temp_storage, sizeof(double), hipMemcpyDeviceToHost)); } template< typename Payoff > static double binomial_tree(int num_timesteps, const Payoff &payoff, double dt, double S0, double r, double sigma) { double *tree = new double[num_timesteps+1]; double u = std::exp( sigma * std::sqrt(dt)); double d = std::exp(-sigma * std::sqrt(dt)); double a = std::exp( r * dt); double p = (a - d) / (u - d); double k = std::pow(d, num_timesteps); for( int t = 0 ; t <= num_timesteps ; ++t ) { tree[t] = payoff(S0*k); k *= u*u; } for( int t = num_timesteps-1 ; t >= 0 ; --t ) { k = std::pow(d, t); for( int i = 0 ; i <= t ; ++i ) { double expected = std::exp(-r*dt) * (p*tree[i+1] + (1.0 - p)*tree[i]); double earlyex = payoff(S0*k); tree[i] = std::max(earlyex, expected); k *= u*u; } } double f = tree[0]; delete[] tree; return f; } // Calculate the standard normal cumulative distribution function inline double normcdf (double x) { return (1.0 + erf(x / sqrt(2.0))) / 2.0; } static double black_scholes_merton_put(double T, double K, double S0, double r, double sigma) { double d1 = (std::log(S0 / K) + (r + 0.5*sigma*sigma)*T) / (sigma*std::sqrt(T)); double d2 = d1 - sigma*std::sqrt(T); return K*std::exp(-r*T)*normcdf(-d2) - S0*normcdf(-d1); } static double black_scholes_merton_call(double T, double K, double S0, double r, double sigma) { double d1 = (std::log(S0 / K) + (r + 0.5*sigma*sigma)*T) / (sigma*std::sqrt(T)); double d2 = d1 - sigma*std::sqrt(T); return S0*normcdf(d1) - K*std::exp(-r*T)*normcdf(d2); } int main(int argc, char **argv) { const int MAX_GRID_SIZE = 2048; // Simulation parameters. int num_timesteps = 100; int num_paths = 32; int num_runs = 1; // Option parameters. double T = 1.00; double K = 4.00; double S0 = 3.60; double r = 0.06; double sigma = 0.20; // Bool do we price a put or a call. bool price_put = true; // Read command-line options. for( int i = 1 ; i < argc ; ++i ) { if( !strcmp(argv[i], "-timesteps") ) num_timesteps = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-paths") ) num_paths = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-runs") ) num_runs = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-T") ) T = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-S0") ) S0 = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-K") ) K = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-r") ) r = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-sigma") ) sigma = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-call") ) price_put = false; else { fprintf(stderr, "Unknown option %s. Aborting!!!\n", argv[i]); exit(1); } } // Print the arguments. printf("==============\n"); printf("Num Timesteps : %d\n", num_timesteps); printf("Num Paths : %dK\n", num_paths); printf("Num Runs : %d\n", num_runs); printf("T : %lf\n", T); printf("S0 : %lf\n", S0); printf("K : %lf\n", K); printf("r : %lf\n", r); printf("sigma : %lf\n", sigma); printf("Option Type : American %s\n", price_put ? "Put" : "Call"); // We want x1024 paths. num_paths *= 1024; // A timestep. double dt = T / num_timesteps; // Generate random samples on a host std::default_random_engine rng; std::normal_distribution<double> norm_dist(0.0, 1.0); double *h_samples = (double*) malloc (num_timesteps*num_paths*sizeof(double)); // Memory on the GPU to store normally distributed random numbers. double *d_samples = NULL; CHECK_HIP(hipMalloc((void**) &d_samples, num_timesteps*num_paths*sizeof(double))); // Memory on the GPU to store the asset price along the paths. The last column contains the discounted payoffs. double *d_paths = NULL; CHECK_HIP(hipMalloc((void**) &d_paths, num_timesteps*num_paths*sizeof(double))); // The discounted payoffs are the last column. double *d_cashflows = d_paths + (num_timesteps-1)*num_paths; // Storage to keep intermediate SVD matrices. double *d_svds = NULL; CHECK_HIP(hipMalloc((void**) &d_svds, 16*num_timesteps*sizeof(double))); // Memory on the GPU to flag timesteps where no path is in the money. int *d_all_out_of_the_money = NULL; CHECK_HIP(hipMalloc((void**) &d_all_out_of_the_money, num_timesteps*sizeof(int))); // Memory on the GPU to compute the reductions (beta and the option price). int max_temp_storage = 4*MAX_GRID_SIZE; double *d_temp_storage = NULL; CHECK_HIP(hipMalloc((void**) &d_temp_storage, max_temp_storage*sizeof(double))); // The price on the host. double h_price; // time the do_run function float total_elapsed_time = 0; for( int run = 0; run < num_runs; ++run ) { for (int i = 0; i < num_timesteps*num_paths; ++i) h_samples[i] = norm_dist(rng); auto start = std::chrono::high_resolution_clock::now(); if( price_put ) do_run(h_samples, num_timesteps, num_paths, PayoffPut(K), dt, S0, r, sigma, d_samples, d_paths, d_cashflows, d_svds, d_all_out_of_the_money, d_temp_storage, &h_price); else do_run(h_samples, num_timesteps, num_paths, PayoffCall(K), dt, S0, r, sigma, d_samples, d_paths, d_cashflows, d_svds, d_all_out_of_the_money, d_temp_storage, &h_price); auto end = std::chrono::high_resolution_clock::now(); const float elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(); total_elapsed_time += elapsed_time; } printf("==============\n"); printf("GPU Longstaff-Schwartz: %.8lf\n", h_price); double price = 0.0; if( price_put ) price = binomial_tree(num_timesteps, PayoffPut(K), dt, S0, r, sigma); else price = binomial_tree(num_timesteps, PayoffCall(K), dt, S0, r, sigma); printf("Binonmial : %.8lf\n", price); if( price_put ) price = black_scholes_merton_put(T, K, S0, r, sigma); else price = black_scholes_merton_call(T, K, S0, r, sigma); printf("European Price : %.8lf\n", price); printf("==============\n"); printf("elapsed time for each run : %.3fms\n", total_elapsed_time / num_runs); printf("==============\n"); // Release memory free(h_samples); CHECK_HIP(hipFree(d_temp_storage)); CHECK_HIP(hipFree(d_all_out_of_the_money)); CHECK_HIP(hipFree(d_svds)); CHECK_HIP(hipFree(d_paths)); CHECK_HIP(hipFree(d_samples)); return 0; }
the_stack
THC_API void THCTensor_(calculateMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position) { THAssert(THCTensor_(isContiguous)(state, input)); // Because the input is contiguous, we want to get a reference to the // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset real *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i); } long nElement = THCTensor_(size)(state, input, THCTensor_(nDimension)(state, input) - 1); THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data); thrust::device_vector<real> iter(vecPtr, vecPtr + nElement); thrust::device_ptr<long> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector<long> seq(sbPtr, sbPtr + nElement); // Fill sortBuffer with [0, 1, 2, ... nElement - 1] thrust::sequence( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif seq.begin(), seq.end()); // Sort the input data. The original indices of the data are stored in seq thrust::sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), seq.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfLess() #endif ); // Count # of unique elements via an inner product between adjacent elements. // Add 1 if two neighboring element are not equal. int unique = 1 + thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(), #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else thrust::not_equal_to<real>() #endif ); // Count frequency of each element thrust::device_vector<real> keys(unique); thrust::device_vector<int> counts(unique); thrust::reduce_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), thrust::constant_iterator<int>(1), keys.begin(), counts.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfEqualTo() #endif ); // Find index of maximum count thrust::device_vector<int>::iterator it = thrust::max_element( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif counts.begin(), counts.end()); real mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) thrust::device_vector<real>::iterator positionIter = thrust::find_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else thrust::device_vector<real>::iterator positionIter = thrust::find( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), mode); #endif THAssert(positionIter != iter.end()); long index = TH_INDEX_BASE + seq[positionIter - iter.begin()]; // Place mode, index in output ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values); long indicesOffset = THCudaLongTensor_storageOffset(state, indices); for (int i = 0; i < THLongStorage_size(position); ++i) { long pos = THLongStorage_data(position)[i]; valuesOffset += THCTensor_(stride)(state, values, i) * pos; indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos; } THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode); THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index); } // this probably could be a loop, not a recursive algorithm THC_API void THCTensor_(dimApplyMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position, int curDim) { long ndim = THCTensor_(nDimension)(state, input); // Because we have transposed the Tensor, the data for the dimension we are mode'ing along // is always in the innermost dimension if (curDim == ndim - 1) { THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position); } else { // Loop through the values and recurse for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) { position->data[curDim] = i; THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1); } } } #define MAX_GRID_SIZE 65535 #define MAX_BLOCK_SIZE 1024 THC_API void THCTensor_(mode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, int dimension, int keepdim) { THLongStorage *dim; THCTensor *transposed, *contiguous, *valuesTransposed; THLongStorage *position; THCudaLongStorage *sortBuffer; THCudaLongTensor *indicesTransposed; long ndim, sliceSize, slices; THAssert(THCTensor_(checkGPU)(state, 1, values)); // Verify they are asking for a valid dimension ndim = THCTensor_(nDimension)(state, input); THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds"); sliceSize = THCTensor_(size)(state, input, dimension); slices = THCTensor_(nElement)(state, input) / sliceSize; // Resize output value, index Tensors to appropriate sizes (i.e. the same as // the input Tensor, except at dim=dimension, the size is 1) dim = THCTensor_(newSizeOf)(state, input); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, values, dim, NULL); THCudaLongTensor_resize(state, indices, dim, NULL); THLongStorage_free(dim); // If sliceSize is 1, copy input to values and set indices if (sliceSize == 1) { THCTensor_(copy)(state, values, input); THCudaLongTensor_fill(state, indices, TH_INDEX_BASE); if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } return; } // Requirements for fused kernel implementation: // // 1. sliceSize <= 2 * max threads per block // 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for // a kernel launch // 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed) if (sliceSize <= MAX_BLOCK_SIZE && slices <= MAX_GRID_SIZE && TensorUtils<THCTensor>::canUse32BitIndexMath(state, input)) { // Beginning our optimized implementation. First thing we want to do is to transpose // the input Tensor along the sort dimension, and then make it contiguous transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel TensorInfo<real, unsigned int> tiValues = getTensorInfo<THCTensor, unsigned int>(state, valuesTransposed); TensorInfo<long, unsigned int> tiIndices = getTensorInfo<THCudaLongTensor, unsigned int>(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block // is responsible for computing a single mode dim3 grid; THC_getGridFromTiles(slices, grid); // The blocksize is two elements per thread, rounded up to the nearest power of 2 long ceilPowerOf2 = nextHighestPowerOf2(sliceSize); // Macro that calls kernel --> note that we set the block dimensions here, and // the amount of shared memory #define HANDLE_MODE(SIZE) \ { \ dim3 blockSize(SIZE / 2); \ \ int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ computeMode<real, SIZE> \ <<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } // Tradeoff between compilation time and the number of specializations. Ideally we would have // one HANDLE_MODE for each power of 2 switch(ceilPowerOf2) { case 2048: HANDLE_MODE(2048) break; case 1024: case 512: case 256: HANDLE_MODE(1024) break; case 128: case 64: HANDLE_MODE(128) break; case 32: case 16: case 8: case 4: case 2: HANDLE_MODE(32) break; case 1: default: assert(false); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, transposed); THCTensor_(free)(state, contiguous); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); } else { // Beginning our naive implementation: We don't want to mutate the input Tensor, but // we need to be able to sort the inputs along the dimension in order to calculate the // mode. Additionally, its ideal if the data along the dimension is contiguous. So // we transpose the dimension with the innermost dimension and make a new contiguous // version that we can use. transposed = THCTensor_(newClone)(state, input); THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); THCTensor_(free)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1); // Position is a Storage that will store the dimension values we are processing position = THLongStorage_newWithSize(ndim - 1); // Sort Buffer is a Storage that will be used in the internal sort required to calculate // the mode efficiently sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize); // Call mode THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0); THCTensor_(free)(state, contiguous); THLongStorage_free(position); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); THCudaLongStorage_free(state, sortBuffer); } if (!keepdim) { THCTensor_(squeeze1d)(state, values, values, dimension); THCudaLongTensor_squeeze1d(state, indices, indices, dimension); } } #undef MAX_GRID_SIZE #undef MAX_BLOCK_SIZE #endif
the_stack
#include "kernels.h" #include <cooperative_groups.h> namespace cg = cooperative_groups; curandStatePhilox4_32_10_t *curandstate; /** * @brief element-wise activation function on device, like Relu, Gelu * * @tparam enum class ActivationType, kRelu, kGelu * @tparam input type * @param any shape of float and __half2 * @return same shape and type with input */ template <ActivationType, typename T> __forceinline__ __device__ T activation_kernel(T x); template <> __device__ float activation_kernel<ActivationType::kGelu, float>(float x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <> __device__ __half2 activation_kernel<ActivationType::kGelu, __half2>(__half2 val) { __half2 val_pow3 = __hmul2(val, __hmul2(val, val)); float2 tmp_pow = __half22float2(val_pow3); float2 tmp = __half22float2(val); tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x)))); tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y)))); return __hmul2(val, __float22half2_rn(tmp)); } template <> __device__ float activation_kernel<ActivationType::kRelu, float>(float x) { return fmaxf(x, 0); } template <> __device__ __half2 activation_kernel<ActivationType::kRelu, __half2>(__half2 x) { return __floats2half2_rn(fmaxf(0.f, __half2float(x.x)), fmaxf(0.f, __half2float(x.y))); } /** * @brief element-wise activation backward function on device * * @tparam enum class ActivationType * @tparam input type * @param any shape of float and __half2 * @return same shape of input */ template <ActivationType, typename T> __forceinline__ __device__ T activation_bwd_kernel(T grad, T x); template <> __device__ float activation_bwd_kernel<ActivationType::kGelu, float>(float grad, float x) { const float sqrt_param = 0.79788456080286535587989211986876f; const float mul_param = 0.044715; float x2mul = x * x * mul_param; float tan_h = tanhf(sqrt_param * (x + x * x2mul)); float dg1 = 0.5f * (1.0f + tan_h); float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); float dg3 = dg2 * 3 * x2mul; return grad * (dg1 + dg2 + dg3); } template <> __device__ __half activation_bwd_kernel<ActivationType::kGelu, __half>( __half grad, __half x_half) { float x = __half2float(x_half); const float sqrt_param = 0.79788456080286535587989211986876f; const float mul_param = 0.044715; float x2mul = x * x * mul_param; float tan_h = tanhf(sqrt_param * (x + x * x2mul)); float dg1 = 0.5f * (1.0f + tan_h); float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); float dg3 = dg2 * 3 * x2mul; return grad * __float2half(dg1 + dg2 + dg3); } template <> __device__ float activation_bwd_kernel<ActivationType::kRelu, float>(float grad, float x) { return x > 0.f ? grad : 0.f; } template <> __device__ __half activation_bwd_kernel<ActivationType::kRelu, __half>(__half grad, __half x) { const __half half_zero = __float2half(0.f); return x > half_zero ? grad : half_zero; } template <> __device__ __half2 activation_bwd_kernel<ActivationType::kRelu, __half2>( __half2 grad2, __half2 x_half2) { const __half half_zero = __float2half(0.f); return __floats2half2_rn(x_half2.x > half_zero ? grad2.x : half_zero, x_half2.y > half_zero ? grad2.y : half_zero); } /** * @brief init curand states in global memory * * @thread grid_dim * block*dim to suuport any size of states * @param state persistant curand states * @param seed seed to init states * @return void */ __global__ void curand_init_kernel(curandStatePhilox4_32_10_t *state, int seed) { /* Each thread gets same seed, a different sequence number, no offset */ int id = threadIdx.x + blockIdx.x * blockDim.x; curand_init(seed, id, 0, &state[id]); } void launch_curand_init(int total_count, int dim, cudaStream_t stream) { cudaMalloc(&curandstate, total_count * sizeof(curandStatePhilox4_32_10_t)); int grid_dim = total_count >> 9; curand_init_kernel<<<grid_dim, 512, 0, stream>>>( curandstate, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count()); } /** * @brief element-wise dropout, store dropped position in mask, it's not * in-place * * @thread * gridDim.x = total_count / 1024 * blockDim.x = 1024 * * @param total_count total elements * @param ratio drop ratio * @param out any size of float and __half * @param in same with out * @param mask uint8 type, same size with out * @param seed seed to curand * @return void */ __global__ void ls_dropout_kernel(const int total_count, const float ratio, float *__restrict__ out, const float *__restrict__ in, uint8_t *__restrict__ mask, const int seed) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 4 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); uint8_t m[4]; float4 *out4 = reinterpret_cast<float4 *>(out); const float4 *data4 = reinterpret_cast<const float4 *>(in); uint32_t *mask4 = reinterpret_cast<uint32_t *>(mask); float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); mask4[i] = m4[0]; float4 input4 = data4[i]; float4 res4; res4.x = input4.x * scale * m[0]; res4.y = input4.y * scale * m[1]; res4.z = input4.z * scale * m[2]; res4.w = input4.w * scale * m[3]; out4[i] = res4; } __global__ void ls_dropout_kernel(const int total_count, const float ratio, __half *__restrict__ out, const __half *__restrict__ in, uint8_t *__restrict__ mask, const int seed) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 8 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); const float4 *vals_float4 = reinterpret_cast<const float4 *>(in); float4 *outs_float4 = reinterpret_cast<float4 *>(out); uint64_t *mask8 = reinterpret_cast<uint64_t *>(mask); uint8_t m[8]; float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); rand = curand_uniform4(&state); m[4] = (uint8_t)(rand.x > ratio); m[5] = (uint8_t)(rand.y > ratio); m[6] = (uint8_t)(rand.z > ratio); m[7] = (uint8_t)(rand.w > ratio); uint64_t *m8 = reinterpret_cast<uint64_t *>(m); mask8[i] = *m8; float4 val_float4 = vals_float4[i]; float4 out_float4; __half2 *val_half2 = reinterpret_cast<__half2 *>(&val_float4); __half2 *out_half2 = reinterpret_cast<__half2 *>(&out_float4); __half2 scale_mask_1 = __floats2half2_rn(scale * m[0], scale * m[1]); __half2 scale_mask_2 = __floats2half2_rn(scale * m[2], scale * m[3]); __half2 scale_mask_3 = __floats2half2_rn(scale * m[4], scale * m[5]); __half2 scale_mask_4 = __floats2half2_rn(scale * m[6], scale * m[7]); out_half2[0] = __hmul2(val_half2[0], scale_mask_1); out_half2[1] = __hmul2(val_half2[1], scale_mask_2); out_half2[2] = __hmul2(val_half2[2], scale_mask_3); out_half2[3] = __hmul2(val_half2[3], scale_mask_4); outs_float4[i] = out_float4; } /** * @brief element-wise dropout backward with dropout mask, it's * not in-place * * @thread * gridDim.x = total_count / 1024 * blockDim.x = 1024 * * @param total_count total elements * @param ratio drop ratio * @param in any size of float and __half * @param mask uint8 type, same size with in * @return void */ __global__ void ls_dropout_bwd_kernel(const int total_count, const float ratio, float *out, const float *in, const uint8_t *__restrict__ mask) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 4 >= total_count) return; uint8_t m[4]; float4 *out4 = reinterpret_cast<float4 *>(out); const float4 *in4 = reinterpret_cast<const float4 *>(in); const uint32_t *mask4 = reinterpret_cast<const uint32_t *>(mask); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); m4[0] = mask4[i]; float4 input4 = in4[i]; float4 res4; res4.x = input4.x * scale * static_cast<float>(m[0]); res4.y = input4.y * scale * static_cast<float>(m[1]); res4.z = input4.z * scale * static_cast<float>(m[2]); res4.w = input4.w * scale * static_cast<float>(m[3]); out4[i] = res4; } __global__ void ls_dropout_bwd_kernel(const int total_count, const float ratio, __half *out, const __half *in, const uint8_t *__restrict__ mask) { const __half scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 8 >= total_count) return; float4 *out4 = reinterpret_cast<float4 *>(out); const float4 *vals_float4 = reinterpret_cast<const float4 *>(in); const uint64_t *mask8 = reinterpret_cast<const uint64_t *>(mask); uint8_t m[8]; uint64_t *m8 = reinterpret_cast<uint64_t *>(m); m8[0] = mask8[i]; float4 val_float4 = vals_float4[i]; float4 out_float4; __half2 *val_half2 = reinterpret_cast<__half2 *>(&val_float4); __half2 *out_half2 = reinterpret_cast<__half2 *>(&out_float4); __half2 scale_mask_1 = __halves2half2(scale * __float2half(m[0]), scale * __float2half(m[1])); __half2 scale_mask_2 = __halves2half2(scale * __float2half(m[2]), scale * __float2half(m[3])); __half2 scale_mask_3 = __halves2half2(scale * __float2half(m[4]), scale * __float2half(m[5])); __half2 scale_mask_4 = __halves2half2(scale * __float2half(m[6]), scale * __float2half(m[7])); out_half2[0] = __hmul2(val_half2[0], scale_mask_1); out_half2[1] = __hmul2(val_half2[1], scale_mask_2); out_half2[2] = __hmul2(val_half2[2], scale_mask_3); out_half2[3] = __hmul2(val_half2[3], scale_mask_4); out4[i] = out_float4; } template <> void launch_ls_dropout<float>(float *out, const float *vals, uint8_t *mask, int total_count, float ratio, cudaStream_t stream, bool backward) { int grid_dim = total_count >> 12; if (!backward) { ls_dropout_kernel<<<grid_dim + 1, 1024, 0, stream>>>( total_count, ratio, out, vals, mask, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count()); } else { ls_dropout_bwd_kernel<<<grid_dim + 1, 1024, 0, stream>>>(total_count, ratio, out, vals, mask); } } template <> void launch_ls_dropout<__half>(__half *out, const __half *vals, uint8_t *mask, int total_count, float ratio, cudaStream_t stream, bool backward) { int grid_dim = total_count >> 13; if (!backward) { ls_dropout_kernel<<<grid_dim + 1, 1024, 0, stream>>>( total_count, ratio, out, vals, mask, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count()); } else { ls_dropout_bwd_kernel<<<grid_dim + 1, 1024, 0, stream>>>(total_count, ratio, out, vals, mask); } } /** * @brief fused bias, dropout, and residual at the end of Attention and FFN, * store dropped position in mask, it's not in-place * * @thread * gridDim.x = total_count / 1024 * blockDim.x = 1024 * * @param total_count total elements * @param ratio drop ratio * @param out [batch_size, seq_len, hidden_size], float and __half * @param in [batch_size, seq_len, hidden_size], float and __half * @param mask [batch_size, seq_len, hidden_size], uint8 type * @param bias [hidden_size], ffn bias * @param residual [batch_size, seq_len, hidden_size], float and __half * @param seed seed to curand * @param hidden_size hidden size * @return void */ __global__ void ls_dropout_res_bias_kernel( const int total_count, const float ratio, float *__restrict__ out, const float *__restrict__ in, uint8_t *__restrict__ mask, const float *__restrict__ bias, const float *__restrict__ residual, const int seed, const int hidden_size) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 4 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); uint8_t m[4]; float4 *out4 = reinterpret_cast<float4 *>(out); const float4 *data4 = reinterpret_cast<const float4 *>(in); const float4 *residual4 = reinterpret_cast<const float4 *>(residual); const float4 *bias4 = reinterpret_cast<const float4 *>(bias); uint32_t *mask4 = reinterpret_cast<uint32_t *>(mask); float4 rand = curand_uniform4(&state); m[0] = static_cast<uint8_t>(rand.x > ratio); m[1] = static_cast<uint8_t>(rand.y > ratio); m[2] = static_cast<uint8_t>(rand.z > ratio); m[3] = static_cast<uint8_t>(rand.w > ratio); int bias_i = i % (hidden_size >> 2); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); mask4[i] = m4[0]; const float4 input4 = data4[i]; const float4 b4 = __ldg(&bias4[bias_i]); const float4 res4 = residual4[i]; float4 output4; output4.x = (input4.x + b4.x) * scale * m[0] + res4.x; output4.y = (input4.y + b4.y) * scale * m[1] + res4.y; output4.z = (input4.z + b4.z) * scale * m[2] + res4.z; output4.w = (input4.w + b4.w) * scale * m[3] + res4.w; out4[i] = output4; } __global__ void ls_dropout_res_bias_kernel( const int total_count, const float ratio, __half *__restrict__ out, const __half *__restrict__ in, uint8_t *__restrict__ mask, const __half *__restrict__ bias, const __half *__restrict__ residual, const int seed, const int hidden_size) { const __half scale = 1. / (1. - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 8 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); const float4 *vals_float4 = reinterpret_cast<const float4 *>(in); float4 *outs_float4 = reinterpret_cast<float4 *>(out); const float4 *residual4 = reinterpret_cast<const float4 *>(residual); const float4 *bias4 = reinterpret_cast<const float4 *>(bias); uint64_t *mask8 = reinterpret_cast<uint64_t *>(mask); uint8_t m[8]; float4 rand = curand_uniform4(&state); m[0] = static_cast<uint8_t>(rand.x > ratio); m[1] = static_cast<uint8_t>(rand.y > ratio); m[2] = static_cast<uint8_t>(rand.z > ratio); m[3] = static_cast<uint8_t>(rand.w > ratio); rand = curand_uniform4(&state); m[4] = static_cast<uint8_t>(rand.x > ratio); m[5] = static_cast<uint8_t>(rand.y > ratio); m[6] = static_cast<uint8_t>(rand.z > ratio); m[7] = static_cast<uint8_t>(rand.w > ratio); uint64_t *m8 = reinterpret_cast<uint64_t *>(m); mask8[i] = m8[0]; int bias_i = i % (hidden_size >> 3); float4 val_float4 = vals_float4[i]; const float4 b4 = __ldg(&bias4[bias_i]); const float4 res4 = residual4[i]; float4 out_float4; __half2 *val_half2 = reinterpret_cast<__half2 *>(&val_float4); __half2 *out_half2 = reinterpret_cast<__half2 *>(&out_float4); const __half2 *b_half2 = reinterpret_cast<const __half2 *>(&b4); const __half2 *res_half2 = reinterpret_cast<const __half2 *>(&res4); __half2 scale_mask_1 = __halves2half2(scale * __float2half(m[0]), scale * __float2half(m[1])); __half2 scale_mask_2 = __halves2half2(scale * __float2half(m[2]), scale * __float2half(m[3])); __half2 scale_mask_3 = __halves2half2(scale * __float2half(m[4]), scale * __float2half(m[5])); __half2 scale_mask_4 = __halves2half2(scale * __float2half(m[6]), scale * __float2half(m[7])); out_half2[0] = __hfma2(__hadd2(val_half2[0], b_half2[0]), scale_mask_1, res_half2[0]); out_half2[1] = __hfma2(__hadd2(val_half2[1], b_half2[1]), scale_mask_2, res_half2[1]); out_half2[2] = __hfma2(__hadd2(val_half2[2], b_half2[2]), scale_mask_3, res_half2[2]); out_half2[3] = __hfma2(__hadd2(val_half2[3], b_half2[3]), scale_mask_4, res_half2[3]); outs_float4[i] = out_float4; } template <> void launch_ls_dropout_res_bias<float>(float *out, const float *vals, uint8_t *mask, const float *bias, const float *residual, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 12; ls_dropout_res_bias_kernel<<<grid_dim + 1, 1024, 0, stream>>>( total_count, ratio, out, vals, mask, bias, residual, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } template <> void launch_ls_dropout_res_bias<__half>(__half *out, const __half *vals, uint8_t *mask, const __half *bias, const __half *residual, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 13; ls_dropout_res_bias_kernel<<<grid_dim + 1, 1024, 0, stream>>>( total_count, ratio, out, vals, mask, bias, residual, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } /** * @brief fused bias and dropout backward at the end of Attention and FFN * * @thread * gridDim.x = hidden_size / 8 * blockDim.x = 8 * blockDim.y = 1024 / 8 = 128 * * @param row_size batch_size * seq_len * @param ratio dropout ratio * @param in_grad [batch_size, seq_len, hidden_size], input grad * @param bias_grad [hidden_size], bias grad * @param out_grad [batch_size, seq_len, hidden_size], output grad * @param mask [batch_size, seq_len, hidden_size], dropout mask * @param hidden_size * @return void */ __global__ void ls_dropout_bias_bwd_kernel( const int row_size, const float ratio, float *__restrict__ in_grad, float *__restrict__ bias_grad, const float *__restrict__ out_grad, const uint8_t *__restrict__ mask, const int hidden_size) { const float scale = 1.f / (1.f - ratio); // every block generate 8 bias result __shared__ float tile[8][129]; cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); int col_idx = flat_2dim(blockIdx.x, threadIdx.x, 8); int stride = hidden_size * 128; float local_sum = 0; int idx = flat_2dim(threadIdx.y, col_idx, hidden_size); for (int r = threadIdx.y; r < row_size; r += 128) { float val = out_grad[idx]; val *= scale * static_cast<float>(mask[idx]); local_sum += val; in_grad[idx] = val; idx += stride; } tile[threadIdx.x][threadIdx.y] = local_sum; __syncthreads(); float sum = 0; int tid = threadIdx.y * blockDim.x + threadIdx.x; int x = tid >> 7; int y = tid & (127); if (y < 32) { #pragma unroll for (int i = 0; i < 4; i++) { sum += tile[x][y + i * 32]; } } __syncthreads(); for (int i = 1; i < 32; i <<= 1) sum += g.shfl_down(sum, i); if (y == 0) tile[0][x] = sum; __syncthreads(); if (threadIdx.x < 8) { int pos = flat_2dim(blockIdx.x, threadIdx.x, 8); bias_grad[pos] = tile[0][threadIdx.x]; } } __global__ void ls_dropout_bias_bwd_kernel( const int row_size, const float ratio, __half *__restrict__ in_grad, __half *__restrict__ bias_grad, const __half *__restrict__ out_grad, const uint8_t *__restrict__ mask, const int hidden_size) { const __half2 scale = __float2half2_rn(1.f / (1.f - ratio)); __shared__ __half2 tile[8][129]; cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); __half2 *in_grad2 = reinterpret_cast<__half2 *>(in_grad); const __half2 *out_grad2 = reinterpret_cast<const __half2 *>(out_grad); __half2 *bias_grad2 = reinterpret_cast<__half2 *>(bias_grad); int col_idx = flat_2dim(blockIdx.x, threadIdx.x, 8); int stride = hidden_size * 128; __half2 local_sum = __float2half2_rn(0.f); int idx = flat_2dim(threadIdx.y, col_idx, hidden_size); for (int r = threadIdx.y; r < row_size; r += 128) { __half2 val = out_grad2[idx]; __half2 m2 = __floats2half2_rn(mask[2 * idx], mask[2 * idx + 1]); val *= scale * m2; local_sum += val; in_grad2[idx] = val; idx += stride; } tile[threadIdx.x][threadIdx.y] = local_sum; __syncthreads(); __half2 sum = __float2half2_rn(0.f); int tid = threadIdx.y * blockDim.x + threadIdx.x; int x = tid >> 7; int y = tid & (127); if (y < 32) { #pragma unroll for (int i = 0; i < 4; i++) { sum += tile[x][y + i * 32]; } } __syncthreads(); for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_down(sum, i); if (y == 0) tile[0][x] = sum; __syncthreads(); if (threadIdx.x < 8) { int pos = flat_2dim(blockIdx.x, threadIdx.x, 8); bias_grad2[pos] = tile[0][threadIdx.x]; } } template <typename T> void launch_ls_dropout_bias_bwd(T *in_grad, T *bias_grad, const T *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream) { dim3 grid_dim((dim - 1) / 8 + 1); dim3 block_dim(8, 128); ls_dropout_bias_bwd_kernel<<<grid_dim, block_dim, 0, stream>>>( row_size, ratio, in_grad, bias_grad, out_grad, mask, dim); } template <> void launch_ls_dropout_bias_bwd(__half *in_grad, __half *bias_grad, const __half *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream) { dim >>= 1; dim3 grid_dim((dim - 1) / 8 + 1); dim3 block_dim(8, 128); ls_dropout_bias_bwd_kernel<<<grid_dim, block_dim, 0, stream>>>( row_size, ratio, in_grad, bias_grad, out_grad, mask, dim); } template void launch_ls_dropout_bias_bwd(float *in_grad, float *bias_grad, const float *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream); /** * @brief fused bias, activation, and dropout at the end of first ffn * * @thread * gridDim.x = hidden_size / 8 * blockDim.x = 8 * blockDim.y = 1024 / 8 = 128 * * @tparam act_type activation function, like kRelu, kGelu * @param total_count total elements * @param ratio drop ratio * @param out [batch_size, seq_len, hidden_size], float and __half * @param in [batch_size, seq_len, hidden_size], float and __half * @param mask [batch_size, seq_len, hidden_size], uint8 type * @param bias [hidden_size], ffn bias * @param seed seed to curand * @param hidden_size * @return void */ template <ActivationType act_type> __global__ void ls_dropout_act_bias_kernel( const int total_count, const float ratio, float *__restrict__ out, const float *__restrict__ in, uint8_t *__restrict__ mask, const float *__restrict__ bias, const int seed, const int hidden_size) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 4 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); uint8_t m[4]; float4 *out4 = reinterpret_cast<float4 *>(out); const float4 *data4 = reinterpret_cast<const float4 *>(in); const float4 *bias4 = reinterpret_cast<const float4 *>(bias); uint32_t *mask4 = reinterpret_cast<uint32_t *>(mask); float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); int bias_i = i % (hidden_size >> 2); uint32_t *m4 = reinterpret_cast<uint32_t *>(m); mask4[i] = m4[0]; const float4 input4 = data4[i]; const float4 b4 = __ldg(&bias4[bias_i]); float4 output4; output4.x = activation_kernel<act_type, float>(input4.x + b4.x) * scale * m[0]; output4.y = activation_kernel<act_type, float>(input4.y + b4.y) * scale * m[1]; output4.z = activation_kernel<act_type, float>(input4.z + b4.z) * scale * m[2]; output4.w = activation_kernel<act_type, float>(input4.w + b4.w) * scale * m[3]; out4[i] = output4; } template <ActivationType act_type> __global__ void ls_dropout_act_bias_kernel( const int total_count, const float ratio, __half *__restrict__ out, const __half *__restrict__ in, uint8_t *__restrict__ mask, const __half *__restrict__ bias, const int seed, const int hidden_size) { const float scale = 1.f / (1.f - ratio); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i * 8 >= total_count) return; curandStatePhilox4_32_10_t state; curand_init(seed, i, 0, &state); const float4 *vals_float4 = reinterpret_cast<const float4 *>(in); float4 *outs_float4 = reinterpret_cast<float4 *>(out); const float4 *bias4 = reinterpret_cast<const float4 *>(bias); uint64_t *mask8 = reinterpret_cast<uint64_t *>(mask); uint8_t m[8]; float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); rand = curand_uniform4(&state); m[4] = (uint8_t)(rand.x > ratio); m[5] = (uint8_t)(rand.y > ratio); m[6] = (uint8_t)(rand.z > ratio); m[7] = (uint8_t)(rand.w > ratio); uint64_t *m8 = reinterpret_cast<uint64_t *>(m); mask8[i] = *m8; int bias_i = i % (hidden_size >> 3); float4 val_float4 = vals_float4[i]; const float4 b4 = __ldg(&bias4[bias_i]); float4 out_float4; __half2 *val_half2 = reinterpret_cast<__half2 *>(&val_float4); __half2 *out_half2 = reinterpret_cast<__half2 *>(&out_float4); const __half2 *b_half2 = reinterpret_cast<const __half2 *>(&b4); __half2 scale_mask_1 = __floats2half2_rn(scale * m[0], scale * m[1]); __half2 scale_mask_2 = __floats2half2_rn(scale * m[2], scale * m[3]); __half2 scale_mask_3 = __floats2half2_rn(scale * m[4], scale * m[5]); __half2 scale_mask_4 = __floats2half2_rn(scale * m[6], scale * m[7]); out_half2[0] = __hmul2( activation_kernel<act_type, __half2>(__hadd2(val_half2[0], b_half2[0])), scale_mask_1); out_half2[1] = __hmul2( activation_kernel<act_type, __half2>(__hadd2(val_half2[1], b_half2[1])), scale_mask_2); out_half2[2] = __hmul2( activation_kernel<act_type, __half2>(__hadd2(val_half2[2], b_half2[2])), scale_mask_3); out_half2[3] = __hmul2( activation_kernel<act_type, __half2>(__hadd2(val_half2[3], b_half2[3])), scale_mask_4); outs_float4[i] = out_float4; } template <> void launch_ls_dropout_act_bias<ActivationType::kGelu, float>( float *out, const float *vals, uint8_t *mask, const float *bias, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 10; ls_dropout_act_bias_kernel<ActivationType::kGelu> <<<grid_dim + 1, 256, 0, stream>>>( total_count, ratio, out, vals, mask, bias, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } template <> void launch_ls_dropout_act_bias<ActivationType::kGelu, __half>( __half *out, const __half *vals, uint8_t *mask, const __half *bias, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 11; ls_dropout_act_bias_kernel<ActivationType::kGelu> <<<grid_dim + 1, 256, 0, stream>>>( total_count, ratio, out, vals, mask, bias, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } template <> void launch_ls_dropout_act_bias<ActivationType::kRelu, float>( float *out, const float *vals, uint8_t *mask, const float *bias, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 10; ls_dropout_act_bias_kernel<ActivationType::kRelu> <<<grid_dim + 1, 256, 0, stream>>>( total_count, ratio, out, vals, mask, bias, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } template <> void launch_ls_dropout_act_bias<ActivationType::kRelu, __half>( __half *out, const __half *vals, uint8_t *mask, const __half *bias, int total_count, int dim, float ratio, cudaStream_t stream) { int grid_dim = total_count >> 11; ls_dropout_act_bias_kernel<ActivationType::kRelu> <<<grid_dim + 1, 256, 0, stream>>>( total_count, ratio, out, vals, mask, bias, std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(), dim); } /** * @brief fused bias, activation, and dropout backward * * @thread * gridDim.x = total_count / 1024 * blockDim.x = 1024 * * @tparam act_type kRelu * @param row_size batch_size * seq_len * @param ratio dropout ratio * @param in_grad [batch_size, seq_len, hidden_size], input grad * @param bias_grad [hidden_size], bias grad * @param out_grad [batch_size, seq_len, hidden_size], output grad * @param mask [batch_size, seq_len, hidden_size], dropout mask * @param hidden_size * @return void */ template <ActivationType act_type, typename T> __global__ void ls_dropout_act_bias_bwd_kernel( const int row_size, const float ratio, T *in_grad, T *__restrict__ bias_grad, const T *__restrict__ input, const T *__restrict__ bias, const T *out_grad, const uint8_t *__restrict__ mask, const int hidden_size) { const float scale = 1.f / (1.f - ratio); __shared__ float tile[WARP_SIZE][WARP_SIZE + 1]; cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); int col_idx = flat_2dim(blockIdx.x, threadIdx.x, WARP_SIZE); int stride = hidden_size * WARP_SIZE; float local_sum = 0; int idx = flat_2dim(threadIdx.y, col_idx, hidden_size); if (col_idx < hidden_size) { for (int r = threadIdx.y; r < row_size; r += WARP_SIZE) { float val = out_grad[idx]; float in = input[idx]; float b = bias[idx % hidden_size]; val = activation_bwd_kernel<act_type, float>( val * scale * static_cast<float>(mask[idx]), in + b); local_sum += val; in_grad[idx] = val; idx += stride; } } tile[threadIdx.x][threadIdx.y] = local_sum; __syncthreads(); float sum = tile[threadIdx.y][threadIdx.x]; __syncthreads(); for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_down(sum, i); if (threadIdx.x == 0) tile[0][threadIdx.y] = sum; __syncthreads(); if (threadIdx.y == 0) { int pos = flat_2dim(blockIdx.x, threadIdx.x, WARP_SIZE); bias_grad[pos] = tile[0][threadIdx.x]; } } // @brief fused bias, activation, and dropout backward // It is deprecated for precision reason. Keep it for future optimization. // // template <ActivationType act_type> // __global__ void ls_dropout_act_bias_bwd_kernel( // const int row_size, const float ratio, __half * in_grad, // __half *__restrict__ bias_grad, const __half *__restrict__ input, const // __half *__restrict__ bias, const __half * out_grad, const uint8_t // *__restrict__ mask, const int hidden_size) { // const __half2 scale = __float2half2_rn(1.f / (1.f - ratio)); // __shared__ __half2 tile[WARP_SIZE][WARP_SIZE + 1]; // cg::thread_block b = cg::this_thread_block(); // cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); // __half2 *in_grad2 = reinterpret_cast<__half2 *>(in_grad); // __half2 *bias_grad2 = reinterpret_cast<__half2 *>(bias_grad); // const __half2 *out_grad2 = reinterpret_cast<const __half2 *>(out_grad); // const __half2 *input2 = reinterpret_cast<const __half2 *>(input); // const __half2 *bias2 = reinterpret_cast<const __half2 *>(bias); // int col_idx = flat_2dim(blockIdx.x, threadIdx.x, WARP_SIZE); // int stride = hidden_size * WARP_SIZE; // __half2 local_sum = __float2half2_rn(0.f); // int idx = flat_2dim(threadIdx.y, col_idx, hidden_size); // if (col_idx < hidden_size) { // for (int r = threadIdx.y; r < row_size; r += WARP_SIZE) { // __half2 val = out_grad2[idx]; // __half2 in2 = input2[idx]; // __half2 b2 = bias2[idx % hidden_size ]; // __half2 m2 = __floats2half2_rn(mask[2 * idx], mask[2 * idx + 1]); // val = activation_bwd_kernel<ActivationType::kRelu, __half2>(val * scale // * // m2, // in2+b2); // local_sum += val; // in_grad2[idx] = val; // idx += stride; // } // } // tile[threadIdx.x][threadIdx.y] = local_sum; // __syncthreads(); // __half2 sum = tile[threadIdx.y][threadIdx.x]; // __syncthreads(); // for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_down(sum, i); // if (threadIdx.x == 0) tile[0][threadIdx.y] = sum; // __syncthreads(); // if (threadIdx.y == 0) { // int pos = flat_2dim(blockIdx.x, threadIdx.x, WARP_SIZE); // bias_grad2[pos] = tile[0][threadIdx.x]; // } // } template <ActivationType act_type, typename T> void launch_ls_dropout_act_bias_bwd(T *in_grad, T *bias_grad, const T *input, const T *bias, const T *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream) { dim3 grid_dim((dim - 1) / WARP_SIZE + 1); dim3 block_dim(WARP_SIZE, WARP_SIZE); ls_dropout_act_bias_bwd_kernel<act_type><<<grid_dim, block_dim, 0, stream>>>( row_size, ratio, in_grad, bias_grad, input, bias, out_grad, mask, dim); } // template <> // void launch_ls_dropout_act_bias_bwd<ActivationType::kRelu, __half>( // __half *in_grad, __half *bias_grad,const __half *input, const __half // *bias, const __half *out_grad, const uint8_t *mask, int row_size, int // dim, float ratio, cudaStream_t stream) { // dim >>= 1; // dim3 grid_dim((dim - 1) / WARP_SIZE + 1); // dim3 block_dim(WARP_SIZE, WARP_SIZE); // ls_dropout_act_bias_bwd_kernel<ActivationType::kRelu> // <<<grid_dim, block_dim, 0, stream>>>(row_size, ratio, in_grad, // bias_grad, // input, bias,out_grad, mask, dim); // } template void launch_ls_dropout_act_bias_bwd<ActivationType::kRelu, float>( float *in_grad, float *bias_grad, const float *input, const float *bias, const float *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream); template void launch_ls_dropout_act_bias_bwd<ActivationType::kRelu, __half>( __half *in_grad, __half *bias_grad, const __half *input, const __half *bias, const __half *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream); template void launch_ls_dropout_act_bias_bwd<ActivationType::kGelu, float>( float *in_grad, float *bias_grad, const float *input, const float *bias, const float *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream); template void launch_ls_dropout_act_bias_bwd<ActivationType::kGelu, __half>( __half *in_grad, __half *bias_grad, const __half *input, const __half *bias, const __half *out_grad, const uint8_t *mask, int row_size, int dim, float ratio, cudaStream_t stream);
the_stack
#include "nnbnorm.hpp" #include "datacu.hpp" #include "impl/cudnnhelper.hpp" #include "impl/copy.hpp" #include <cassert> using namespace std ; using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; #define CHECK(x) \ { \ cudnnError = x ; \ if (cudnnError != CUDNN_STATUS_SUCCESS) { \ error = op.context.setError(op.context.getCudaHelper().catchCudnnError(cudnnError, \ STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \ goto done ; \ } } // ------------------------------------------------------------------- // Kernels // ------------------------------------------------------------------- template<typename T> __global__ void var_to_std(T * var, unsigned int num, T scale, T epsilon) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { var[idx] = sqrt(scale * var[idx] + epsilon) ; } } template<typename T> __global__ void std_to_var(T * var, T const * std, unsigned int num, T epsilon) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { var[idx] = std[idx]*std[idx] - epsilon ; } } template<typename T> __global__ void inverse(T * ivar, unsigned int num) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { ivar[idx] = ((T)1) / ivar[idx] ; } } template<typename T> __global__ void inverse(T * out, T * in, unsigned int num) { unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < num) { out[idx] = ((T)1) / in[idx] ; } } // ------------------------------------------------------------------- // Forward // ------------------------------------------------------------------- template<DataType dataType> struct BatchNormForwardWithMomentCudnn { vl::ErrorCode operator()(BatchNorm &op, Tensor &output, Tensor const &moment, // can be null Tensor const &input, Tensor const &multiplier, Tensor const &bias) { assert(output) ; assert(input) ; assert(multiplier) ; assert(bias) ; if (op.epsilon < CUDNN_BN_MIN_EPSILON) { return VLE_Unsupported ; } typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; cudnnTensorDescriptor_t dataDesc, momentDesc ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ; vl::DataType dynDataType = output.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, input.getSize(), input.getDepth(), input.getWidth(), input.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, input.getDepth(), 1, 1)) ; // Allocate workspace. workspaceSize = input.getDepth() ; workspace = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; // Run CuDNN batch normalization implementation. { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = moment ? (type*)moment.getMemory() : workspace ; type * stdMemory = meanMemory + input.getDepth() ; type * varMemory = workspace ; size_t const blockSize = VL_CUDA_NUM_THREADS ; std_to_var<type> <<<divideAndRoundUp(input.getDepth(),blockSize),blockSize>>> (varMemory, stdMemory, input.getDepth(), CUDNN_BN_MIN_EPSILON) ; CHECK(cudnnBatchNormalizationForwardInference (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, dataDesc, input.getMemory(), dataDesc, output.getMemory(), momentDesc, multiplier.getMemory(), bias.getMemory(), meanMemory, varMemory, CUDNN_BN_MIN_EPSILON)) ; } // Finish. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return op.context.passError(error, __func__) ; } } ; template<DataType dataType> struct BatchNormForwardCudnn { vl::ErrorCode operator()(BatchNorm &op, Tensor &output, Tensor &moment, Tensor const &input, Tensor const &multiplier, Tensor const &bias) { assert(output) ; assert(input) ; assert(multiplier) ; assert(bias) ; if (op.epsilon < CUDNN_BN_MIN_EPSILON) { return VLE_Unsupported ; } typedef typename DataTypeTraits<dataType>::type type ; cudnnTensorDescriptor_t dataDesc, momentDesc ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ; vl::DataType dynDataType = output.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, input.getSize(), input.getDepth(), input.getWidth(), input.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, input.getDepth(), 1, 1)) ; // Run CuDNN batch normalization implementation. { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = NULL ; type * varMemory = NULL ; if (moment) { meanMemory = (type*)moment.getMemory() ; varMemory = meanMemory + input.getDepth() ; vl::impl::operations<vl::VLDT_GPU,type>::fill (meanMemory, 2 * input.getDepth() * sizeof(type), 0) ; } CHECK(cudnnBatchNormalizationForwardTraining (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, dataDesc, input.getMemory(), dataDesc, output.getMemory(), momentDesc, multiplier.getMemory(), bias.getMemory(), 0, NULL, NULL, op.epsilon, meanMemory, varMemory)) ; if (varMemory) { // CuDNN computes the variance without epsilon, whereas MCN // returns the standard deviation after adding epsilon. // Also, CuDNN returns the unbiased variance estimate, but it is // debatable that this is appropriate. // // We pick instead the caches, which are closer to the values we compute. // Also they do not need to be pre-initialized with zeros. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(input.getDepth(),blockSize),blockSize>>> (varMemory, input.getDepth()) ; } } // Finish. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return op.context.passError(error, __func__) ; } } ; // ------------------------------------------------------------------- // Backward // ------------------------------------------------------------------- template<DataType dataType> struct BatchNormBackwardWithMomentCudnn { vl::ErrorCode operator()(BatchNorm &op, Tensor &derInput, Tensor &derMultiplier, Tensor &derBias, Tensor const &moment, Tensor const &input, Tensor const &multiplier, Tensor const &bias, Tensor const &derOutput) { assert(derInput) ; assert(derMultiplier) ; assert(derBias) ; assert(moment) ; assert(input) ; assert(multiplier) ; assert(bias) ; assert(derOutput) ; if (op.epsilon < CUDNN_BN_MIN_EPSILON) { return VLE_Unsupported ; } typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; cudnnTensorDescriptor_t derOutputDesc, dataDesc, momentDesc ; bool derOutputDescInitialized = false ; bool dataDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ; vl::DataType dynDataType = derOutput.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, derOutput.getSize(), // sizes derOutput.getDepth(), derOutput.getWidth(), derOutput.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(dataDesc, CUDNN_TENSOR_NCHW, cudnnDataType, input.getSize(), input.getDepth(), input.getWidth(), input.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, input.getDepth(), 1, 1)) ; // Scrarch space to provide moments in CuDNN format. workspaceSize = derInput.getDepth() ; workspace = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; { type alpha = 1.0f ; type beta = 0.0f ; type * meanMemory = (type*)moment.getMemory() ; type * stdMemory = meanMemory + input.getDepth() ; type * istdMemory = workspace ; // The CuDNN manual describes the varMemory output above // as inverse variance, but it is the inverse standard deviation instead. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(input.getDepth(),blockSize),blockSize>>> (istdMemory, stdMemory, input.getDepth()) ; CHECK(cudnnBatchNormalizationBackward (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, // data &alpha, &beta, // params dataDesc, input.getMemory(), // input derOutputDesc, derOutput.getMemory(), // input dataDesc, derInput.getMemory(), // output momentDesc, multiplier.getMemory(), // input derMultiplier.getMemory(), // output derBias.getMemory(), // output op.epsilon, meanMemory, istdMemory)) ; } // Finish. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } return op.context.passError(error, __func__) ; } } ; template<DataType dataType> struct BatchNormBackwardCudnn { vl::ErrorCode operator()(BatchNorm &op, Tensor &derInput, Tensor &derMultiplier, Tensor &derBias, Tensor &moment, Tensor const &input, Tensor const &multiplier, Tensor const &bias, Tensor const &derOutput) { assert(derInput) ; assert(derMultiplier) ; assert(derBias) ; assert(input) ; assert(multiplier) ; assert(bias) ; assert(derOutput) ; if (op.epsilon < CUDNN_BN_MIN_EPSILON) { return VLE_Unsupported ; } typedef typename DataTypeTraits<dataType>::type type ; size_t workspaceSize ; type * workspace ; size_t volume ; cudnnTensorDescriptor_t derOutputDesc, momentDesc ; bool derOutputDescInitialized = false ; bool momentDescInitialized = false ; cudnnDataType_t cudnnDataType = DataTypeToCudnn<dataType>::dataType ; vl::DataType dynDataType = derOutput.getDataType() ; assert(dynDataType == dataType) ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN. CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs. CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derOutputDesc, CUDNN_TENSOR_NCHW, cudnnDataType, derOutput.getSize(), // sizes derOutput.getDepth(), derOutput.getWidth(), derOutput.getHeight())) ; CHECK(cudnnCreateTensorDescriptor(&momentDesc)) ; momentDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(momentDesc, CUDNN_TENSOR_NCHW, cudnnDataType, 1, input.getDepth(), 1, 1)) ; // Compute moment using CuDNN. Unfortunately CuDNN does not expose // the values of the moment in the backward pass, so we need to run // the forward code to get them. volume = derInput.getNumElements() ; workspaceSize = (moment ? 0 : 2 * derInput.getDepth()) + volume ; workspace = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ; { type alpha = 1.0f ; type beta = 0.0f ; type * outMemory = workspace ; type * meanMemory = moment ? (type*)moment.getMemory() : workspace + volume ; type * varMemory = meanMemory + input.getDepth() ; CHECK(cudnnBatchNormalizationForwardTraining (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, derOutputDesc, input.getMemory(), derOutputDesc, outMemory, // will be discarded momentDesc, multiplier.getMemory(), bias.getMemory(), 1.0, // cumulative factor for moment NULL, NULL, op.epsilon, meanMemory, varMemory)) ; CHECK(cudnnBatchNormalizationBackward (handle, CUDNN_BATCHNORM_SPATIAL, &alpha, &beta, // data &alpha, &beta, // params derOutputDesc, input.getMemory(), // input derOutputDesc, derOutput.getMemory(), // input derOutputDesc, derInput.getMemory(), // output momentDesc, multiplier.getMemory(), // input derMultiplier.getMemory(), // output derBias.getMemory(), // output op.epsilon, meanMemory, varMemory)) ; // The CuDNN manual describes the varMemory output above // as inverse variance, but it is the inverse standard deviation instead. size_t const blockSize = VL_CUDA_NUM_THREADS ; inverse<type> <<<divideAndRoundUp(input.getDepth(),blockSize),blockSize>>> (varMemory, input.getDepth()) ; } // Finish. done: if (momentDescInitialized) { cudnnDestroyTensorDescriptor(momentDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } return op.context.passError(error, __func__) ; } } ;
the_stack
//#define MIN(x,y) (((x)>(y))?(y):(x)) #define MAX(x,y) (((x)>(y))?(x):(y)) #include "ssids/gpu/kernels/dtrsv.h" #define TRSM_TR_NBX 256 #define TRSM_TR_NBY 32 #define TRSM_TR_THREADSX 32 #define TRSM_TR_THREADSY 4 #define REDUCING_D_SOLVE_THREADS_PER_BLOCK 256 #define SCATTER_NB 256 #define GEMV_NX 32 #define GEMV_NY 32 #define GEMV_THREADSX 32 #define GEMV_THREADSY 4 #define ASSEMBLE_NB 128 using namespace spral::ssids::gpu; namespace /* anon */ { /* Perform the assignment xdense(:) = xsparse( idx(:) ) */ template <int threadsx, int threadsy> void __device__ gather(const int n, const int *const idx, const double *const xsparse, volatile double *const xdense) { int tid = threadsx*threadIdx.y + threadIdx.x; for(int i=tid; i<n; i+=threadsx*threadsy) xdense[i] = xsparse[ idx[i] ]; } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ struct gemv_transpose_lookup { int m; // number of rows of L (cols of L^T) for block int n; // number of cols of L (rows of L^T) for block const double *a; int lda; // leading dimension of a const int *rlist; int yoffset; // offset into y for answer }; /* This subroutine performs a matrix-vector multiplication y = Ax where * x is a sparse vector indexed into by rlist. * The lookup[] array is indexed into by the block id and specifies which * part of the matrix we're working on. * * Requires max(maxm + maxn*threadsx) shared memory. * Requires threadsy to exactly divide maxn. */ template <int threadsx, int threadsy, int maxm, int maxn> __launch_bounds__(threadsx*threadsy, 6) void __global__ gemv_transpose_sps_rhs(struct gemv_transpose_lookup *lookup, double *x, double *y ) { // Reuse shmem for two different purposes __shared__ volatile double shmem[maxn*threadsx]; volatile double *const partSum = shmem; volatile double *const xlocal = shmem; double partSumReg[maxn / threadsy]; // Assumes neat division lookup += blockIdx.x; int m = lookup->m; int n = lookup->n; const double *a = lookup->a; const int *rlist = lookup->rlist; int lda = lookup->lda; y += lookup->yoffset; /* Read x(rlist(:)) into xlocal(:) */ gather <threadsx,threadsy> (m, rlist, x, xlocal); __syncthreads(); /* Perform matrix-vector multiply with answer y in register that is then stored in partSum for later reduction. */ if(m==maxm) { volatile double *const xl = xlocal + threadIdx.x; #pragma unroll for(int iLoop=0; iLoop<maxn/threadsy; iLoop++) { // row int i = iLoop * threadsy + threadIdx.y; partSumReg[iLoop] = 0; if (i < n) { const double *arow = a+i*lda+threadIdx.x; for(int j=0; j<maxm; j+=threadsx) partSumReg[iLoop] += xl[j] * arow[j]; } } } else { #pragma unroll for(int iLoop=0; iLoop<maxn/threadsy; iLoop++) { // row int i = iLoop * threadsy + threadIdx.y; partSumReg[iLoop] = 0; if (i < n) { const double *arow = a+i*lda; for(int j=threadIdx.x; j<m; j+=threadsx) partSumReg[iLoop] += xlocal[j] * arow[j]; } } } __syncthreads(); // Wait till done with xlocal=shmem before using partSum #pragma unroll for(int iLoop=0; iLoop<maxn/threadsy; iLoop++) { // row int i = iLoop * threadsy + threadIdx.y; if (i < n) { partSum[i*threadsx+threadIdx.x] = partSumReg[iLoop]; } } __syncthreads(); /* Reduce partSum across threads to get y contribution from this block */ if(threadIdx.y==0) { for(int i=threadIdx.x; i<n; i+=threadsx) { double val = 0; /* The offset avoids large bank conflicts. */ for(int j=threadIdx.x; j<threadsx+threadIdx.x; j++) { int j2 = (j >= threadsx ? j - threadsx : j); val += partSum[i*threadsx+j2]; } y[i] = val; } } } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ struct reducing_d_solve_lookup { int first_idx; // Index of supernode for thread 0 of this block. int m; // Number of columns in upd to reduce. int n; // Number of rows THIS BLOCK is responisble for. int ldupd; // Leading dimension of upd. int updoffset; // Offset into upd for supernode. const double *d; const int *perm; // Offset into perm for supernode. }; /* This subroutine performs two unrelated tasks and subtracts the result of the * first from the second. * Task 1: Sum along the rows of the m x n matrix upd. (This is reducing the * result of a previous gemv operation). * Task 2: Peform the special matrix-vector multiplication D^-1 P x where * D is a block diagonal matrix with 1x1 and 2x2 blocks, and * P is a (partial) permutation matrix, given by the vector perm. * The result x_2-x_1 is returned replacing the first column of upd. */ template <int threadsx, bool DSOLVE> void __global__ reducing_d_solve(struct reducing_d_solve_lookup *lookup, double *upd, const double *x ) { /* Read details from lookup */ lookup += blockIdx.x; int idx = lookup->first_idx + threadIdx.x; int m = lookup->m; int n = lookup->n; int ldupd = lookup->ldupd; upd += lookup->updoffset; const double *d = lookup->d; const int *perm = lookup->perm; /* Don't do anything on threads past end of arrays */ if(threadIdx.x>=m) return; /* Task 1: Sum upd and negate */ double val = upd[idx]; for(int j=1; j<n; j++) val += upd[j*ldupd+idx]; val = -val; /* Task 2: D solve (note that D is actually stored as inverse already) */ if(DSOLVE) { int rp = perm[idx]; if(idx!=0 && d[2*idx-1] != 0) { /* second part of 2x2 */ int rp2 = perm[idx-1]; val += d[2*idx-1] * x[rp2] + d[2*idx] * x[rp]; } else if (d[2*idx+1] != 0) { /* first part of 2x2 */ int rp2 = perm[idx+1]; val += d[2*idx] * x[rp] + d[2*idx+1] * x[rp2]; } else { /* 1x1 */ val += x[rp]*d[2*idx]; } } else { int rp = perm[idx]; val += x[rp]; } /* Store result as first column of upd */ upd[idx] = val; } /* This subroutine only performs the solve with D. For best performance, use * reducing_d_solve() instead. * Peform the special matrix-vector multiplication D^-1 P x where * D is a block diagonal matrix with 1x1 and 2x2 blocks, and * P is a (partial) permutation matrix, given by the vector perm. * The result is not returned in-place due to 2x2 pivots potentially * split between blocks. */ template <int threadsx> void __global__ d_solve(struct reducing_d_solve_lookup *lookup, const double *x, double *y) { /* Read details from lookup */ lookup += blockIdx.x; int idx = lookup->first_idx + threadIdx.x; int m = lookup->m; const double *d = lookup->d; const int *perm = lookup->perm; /* Don't do anything on threads past end of arrays */ if(threadIdx.x>=m) return; /* D solve (note that D is actually stored as inverse already) */ int rp = perm[idx]; double val; if(idx!=0 && d[2*idx-1] != 0) { /* second part of 2x2 */ int rp2 = perm[idx-1]; val = d[2*idx-1] * x[rp2] + d[2*idx] * x[rp]; } else if (d[2*idx+1] != 0) { /* first part of 2x2 */ int rp2 = perm[idx+1]; val = d[2*idx] * x[rp] + d[2*idx+1] * x[rp2]; } else { /* 1x1 */ val = x[rp]*d[2*idx]; } /* Store result in y[] */ y[rp] = val; } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ struct scatter_lookup { int n; int src_offset; const int *index; int dest_offset; }; /* This subroutine performs the scatter operation dest( index(:) ) = src(:) */ void __global__ scatter(struct scatter_lookup *lookup, const double *src, double *dest ) { lookup += blockIdx.x; if(threadIdx.x >= lookup->n) return; // Skip on out of range threads src += lookup->src_offset; const int *index = lookup->index; dest += lookup->dest_offset; int idx = index[threadIdx.x]; dest[idx] = src[threadIdx.x]; } /* This subroutine performs the scatter operation dest( index(:) ) += src(:) */ void __global__ scatter_sum(struct scatter_lookup *lookup, const double *src, double *dest ) { lookup += blockIdx.x; if(threadIdx.x >= lookup->n) return; // Skip on out of range threads src += lookup->src_offset; const int *index = lookup->index; dest += lookup->dest_offset; int idx = index[threadIdx.x]; dest[idx] += src[threadIdx.x]; } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ struct lookups_gpu_bwd { int ngemv; int nrds; int ntrsv; int nscatter; struct gemv_transpose_lookup *gemv; struct reducing_d_solve_lookup *rds; struct trsv_lookup *trsv; struct scatter_lookup *scatter; }; /* * Perform y = Ax * Result y actually output as array with leading dimn m that must be summed * externally. */ template <int threadsx, int threadsy, int maxm, int maxn> void __global__ simple_gemv(int m, int n, const double *a, int lda, const double *x, double *y) { a += blockIdx.x*maxm + (blockIdx.y*maxn)*lda; x += blockIdx.y*maxn; y += m*blockIdx.y + maxm*blockIdx.x; __shared__ volatile double partSum[maxm*threadsy]; m = MIN(maxm, m-blockIdx.x*maxm); n = MIN(maxn, n-blockIdx.y*maxn); volatile double *const ps = partSum + maxm*threadIdx.y; for(int j=threadIdx.x; j<m; j+=threadsx) { ps[j] = 0; } for(int i=threadIdx.y; i<n; i+=threadsy) { double xv = x[i]; for(int j=threadIdx.x; j<m; j+=threadsx) { ps[j] += a[i*lda+j]*xv; } } __syncthreads(); if(threadIdx.y==0) { for(int j=threadIdx.x; j<m; j+=threadsx) { double val = ps[j]; for(int i=1; i<threadsy; i++) { val += ps[j+i*maxm]; } y[j] = val; } } } struct gemv_notrans_lookup { int m; int n; const double *a; int lda; int x_offset; int y_offset; }; template <int threadsx, int threadsy, int maxm, int maxn> void __global__ simple_gemv_lookup(const double *x, double *y, struct gemv_notrans_lookup *lookup) { lookup += blockIdx.x; int m = lookup->m; int n = lookup->n; double const* a = lookup->a; int lda = lookup->lda; x += lookup->x_offset; y += lookup->y_offset; __shared__ volatile double partSum[maxm*threadsy]; volatile double *const ps = partSum + maxm*threadIdx.y; // Templated parameters for shortcut if (maxm <= threadsx) { ps[threadIdx.x] = 0; } else { for(int j=threadIdx.x; j<m; j+=threadsx) { ps[j] = 0; } } for(int i=threadIdx.y; i<n; i+=threadsy) { double xv = x[i]; // Templated parameters for shortcut - this reads out of bounds so shouldn't be uncommented /*if (maxm <= threadsx) { ps[threadIdx.x] += a[i*lda+threadIdx.x]*xv; } else {*/ for(int j=threadIdx.x; j<m; j+=threadsx) { ps[j] += a[i*lda+j]*xv; } //} } __syncthreads(); if(threadIdx.y==0) { // Templated parameters for shortcut if (maxm <= threadsx) { if (threadIdx.x < m) { double val = ps[threadIdx.x]; for(int i=1; i<threadsy; i++) { val += ps[threadIdx.x+i*maxm]; } y[threadIdx.x] = val; } } else { for(int j=threadIdx.x; j<m; j+=threadsx) { double val = ps[j]; for(int i=1; i<threadsy; i++) { val += ps[j+i*maxm]; } y[j] = val; } } } } struct reduce_notrans_lookup { int m; int n; int src_offset; int ldsrc; int dest_idx; int dest_offset; }; void __global__ gemv_reduce_lookup(const double *src, double **dest, int numLookups, struct reduce_notrans_lookup *lookup) { int offset = blockIdx.x * blockDim.y + threadIdx.y; if (offset >= numLookups) return; lookup += offset; int m = lookup->m; if(threadIdx.x>=m) return; int n = lookup->n; src += lookup->src_offset + threadIdx.x; int ldsrc = lookup->ldsrc; double *d = dest[lookup->dest_idx] + lookup->dest_offset; double val = 0; for(int i=0; i<n; i++) val += src[i*ldsrc]; d[threadIdx.x] -= val; } // FIXME: move to common header? struct assemble_blk_type { int cp; int blk; }; struct assemble_lookup { int m; int xend; int const* list; int x_offset; int contrib_idx; int contrib_offset; int nchild; int const* clen; int * const* clists; int * const* clists_direct; int cvalues_offset; int first; // First index of node. Used to shortcut searching }; struct assemble_lookup2 { int m; int nelim; int x_offset; int *const* list; int cvparent; int cvchild; int sync_offset; int sync_waitfor; }; void __device__ wait_for_sync(const int tid, volatile int *const sync, const int target) { if(tid==0) { while(*sync < target) {} } __syncthreads(); } void __global__ assemble_lvl(struct assemble_lookup2 *lookup, struct assemble_blk_type *blkdata, double *xlocal, int *next_blk, volatile int *sync, double * const* cvalues) { __shared__ volatile int thisblk; if(threadIdx.x==0) thisblk = atomicAdd(next_blk, 1); __syncthreads(); blkdata += thisblk; lookup += blkdata->cp; int blk = blkdata->blk; int m = lookup->m; int nelim = lookup->nelim; double *xparent = cvalues[lookup->cvparent]; volatile const double *xchild = cvalues[lookup->cvchild]; const int * list = *(lookup->list); xlocal += lookup->x_offset; // Wait for previous children to complete wait_for_sync(threadIdx.x, &(sync[lookup->sync_offset]), lookup->sync_waitfor); // Add block increments m = MIN(ASSEMBLE_NB, m-blk*ASSEMBLE_NB); list += blk*ASSEMBLE_NB; xchild += blk*ASSEMBLE_NB; // Perform actual assembly for(int i=threadIdx.x; i<m; i+=blockDim.x) { int j = list[i]; if(j < nelim) { xlocal[j] += xchild[i]; } else { xparent[j-nelim] += xchild[i]; } } // Wait for all threads to complete, then increment sync object __threadfence(); __syncthreads(); if(threadIdx.x==0) { atomicAdd((int*)&(sync[lookup->sync_offset]), 1); } } void __global__ grabx(double *xlocal, double **xstack, const double *x, struct assemble_lookup *lookup) { lookup += blockIdx.x; if(threadIdx.x>=lookup->m) return; int xend = lookup->xend; double *contrib = (threadIdx.x>=xend) ? xstack[lookup->contrib_idx]+lookup->contrib_offset : NULL; xlocal += lookup->x_offset; int row = lookup->list[threadIdx.x]; if(threadIdx.x<xend) xlocal[threadIdx.x] = x[row]; else contrib[threadIdx.x] = 0.0; } struct lookups_gpu_fwd { int nassemble; int nasm_sync; int nassemble2; int nasmblk; int ntrsv; int ngemv; int nreduce; int nscatter; struct assemble_lookup *assemble; struct assemble_lookup2 *assemble2; struct assemble_blk_type *asmblk; struct trsv_lookup *trsv; struct gemv_notrans_lookup *gemv; struct reduce_notrans_lookup *reduce; struct scatter_lookup *scatter; }; struct lookup_contrib_fwd { int nscatter; struct scatter_lookup *scatter; }; } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_run_fwd_solve_kernels(bool posdef, struct lookups_gpu_fwd const* gpu, double *xlocal_gpu, double **xstack_gpu, double *x_gpu, double ** cvalues_gpu, double *work_gpu, int nsync, int *sync, int nasm_sync, int *asm_sync, const cudaStream_t *stream) { if(nsync>0) { for(int i=0; i<nsync; i+=65535) trsv_init <<<MIN(65535,nsync-i), 1, 0, *stream>>> (sync+2*i); CudaCheckError(); } for(int i=0; i<gpu->nassemble; i+=65535) grabx <<<MIN(65535,gpu->nassemble-i), ASSEMBLE_NB, 0, *stream>>> (xlocal_gpu, xstack_gpu, x_gpu, gpu->assemble+i); cudaMemset(asm_sync, 0, (1+gpu->nasm_sync)*sizeof(int)); for(int i=0; i<gpu->nasmblk; i+=65535) assemble_lvl <<<MIN(65535,gpu->nasmblk-i), ASSEMBLE_NB, 0, *stream>>> (gpu->assemble2, gpu->asmblk, xlocal_gpu, &asm_sync[0], &asm_sync[1], cvalues_gpu); CudaCheckError(); if(gpu->ntrsv>0) { if(posdef) { for(int i=0; i<gpu->ntrsv; i+=65535) trsv_ln_exec <double,TRSV_NB_TASK,THREADSX_TASK,THREADSY_TASK,false> <<<MIN(65535,gpu->ntrsv-i), dim3(THREADSX_TASK,THREADSY_TASK), 0, *stream>>> (xlocal_gpu, sync, gpu->trsv+i); } else { for(int i=0; i<gpu->ntrsv; i+=65535) trsv_ln_exec <double,TRSV_NB_TASK,THREADSX_TASK,THREADSY_TASK,true> <<<MIN(65535,gpu->ntrsv-i), dim3(THREADSX_TASK,THREADSY_TASK), 0, *stream>>> (xlocal_gpu, sync, gpu->trsv+i); } CudaCheckError(); } if(gpu->ngemv>0) { for(int i=0; i<gpu->ngemv; i+=65535) simple_gemv_lookup <GEMV_THREADSX, GEMV_THREADSY, GEMV_NX, GEMV_NY> <<<MIN(65535,gpu->ngemv-i), dim3(GEMV_THREADSX,GEMV_THREADSY), 0, *stream>>> (xlocal_gpu, work_gpu, gpu->gemv+i); CudaCheckError(); } if(gpu->nreduce>0) { if((gpu->nreduce + 4 - 1) / 4 > 65535) printf("Unhandled error! fwd solve gemv_reduce_lookup()\n"); gemv_reduce_lookup <<<dim3((gpu->nreduce + 4 - 1) / 4), dim3(GEMV_NX, 4), 0, *stream>>> (work_gpu, cvalues_gpu, gpu->nreduce, gpu->reduce); CudaCheckError(); } for(int i=0; i<gpu->nscatter; i+=65535) scatter <<<MIN(65535,gpu->nscatter-i), SCATTER_NB, 0, *stream>>> (gpu->scatter+i, xlocal_gpu, x_gpu); CudaCheckError(); } void spral_ssids_run_d_solve_kernel(double *x_gpu, double *y_gpu, struct lookups_gpu_bwd *gpu, const cudaStream_t *stream) { if(gpu->nrds>0) { d_solve <REDUCING_D_SOLVE_THREADS_PER_BLOCK> <<<gpu->nrds, REDUCING_D_SOLVE_THREADS_PER_BLOCK, 0, *stream>>> (gpu->rds, x_gpu, y_gpu); CudaCheckError(); } } void spral_ssids_run_bwd_solve_kernels(bool dsolve, bool unit_diagonal, double *x_gpu, double *work_gpu, int nsync, int *sync_gpu, struct lookups_gpu_bwd *gpu, const cudaStream_t *stream) { /* === Kernel Launches === */ if(nsync>0) { for(int i=0; i<nsync; i+=65535) trsv_init <<<MIN(65535,nsync-i), 1, 0, *stream>>> (sync_gpu+2*i); CudaCheckError(); } if(gpu->ngemv>0) { for(int i=0; i<gpu->ngemv; i+=65535) gemv_transpose_sps_rhs <TRSM_TR_THREADSX, TRSM_TR_THREADSY, TRSM_TR_NBX, TRSM_TR_NBY> <<<MIN(65535,gpu->ngemv-i), dim3(TRSM_TR_THREADSX,TRSM_TR_THREADSY), 0, *stream>>> (gpu->gemv+i, x_gpu, work_gpu); CudaCheckError(); } if(gpu->nrds>0) { if(dsolve) { for(int i=0; i<gpu->nrds; i+=65535) reducing_d_solve <REDUCING_D_SOLVE_THREADS_PER_BLOCK, true> <<<MIN(65535,gpu->nrds-i), REDUCING_D_SOLVE_THREADS_PER_BLOCK, 0, *stream>>> (gpu->rds+i, work_gpu, x_gpu); } else { for(int i=0; i<gpu->nrds; i+=65535) reducing_d_solve <REDUCING_D_SOLVE_THREADS_PER_BLOCK, false> <<<MIN(65535,gpu->nrds-i), REDUCING_D_SOLVE_THREADS_PER_BLOCK, 0, *stream>>> (gpu->rds+i, work_gpu, x_gpu); } CudaCheckError(); } if(gpu->ntrsv>0) { if(unit_diagonal) { for(int i=0; i<gpu->ntrsv; i+=65535) trsv_lt_exec <double,TRSV_NB_TASK,THREADSX_TASK,THREADSY_TASK,true> <<<MIN(65535,gpu->ntrsv-i), dim3(THREADSX_TASK,THREADSY_TASK), 0, *stream>>> (gpu->trsv+i, work_gpu, sync_gpu); } else { for(int i=0; i<gpu->ntrsv; i+=65535) trsv_lt_exec <double,TRSV_NB_TASK,THREADSX_TASK,THREADSY_TASK,false> <<<MIN(65535,gpu->ntrsv-i), dim3(THREADSX_TASK,THREADSY_TASK), 0, *stream>>> (gpu->trsv+i, work_gpu, sync_gpu); } CudaCheckError(); } if(gpu->nscatter>0) { for(int i=0; i<gpu->nscatter; i+=65535) scatter <<<MIN(65535,gpu->nscatter-i), SCATTER_NB, 0, *stream>>> (gpu->scatter+i, work_gpu, x_gpu); CudaCheckError(); } } void spral_ssids_run_slv_contrib_fwd(struct lookup_contrib_fwd const* gpu, double* x_gpu, double const* xstack_gpu, const cudaStream_t *stream) { if(gpu->nscatter>0) { for(int i=0; i<gpu->nscatter; i+=65535) scatter_sum <<<MIN(65535,gpu->nscatter-i), SCATTER_NB, 0, *stream>>> (gpu->scatter+i, xstack_gpu, x_gpu); CudaCheckError(); } } } // end extern "C"
the_stack
#pragma once #include <math.h> #include <time.h> #include <list> #include <random> #include <gunrock/graphio/utils.cuh> #include <gunrock/util/sort_omp.cuh> #include <gunrock/util/parameters.h> #include <gunrock/util/test_utils.h> namespace gunrock { namespace graphio { namespace rgg { typedef std::mt19937 Engine; typedef std::uniform_real_distribution<double> Distribution; template <typename T> inline T SqrtSum(T x, T y) { return sqrt(x * x + y * y); } template <typename T> T P2PDistance(T co_x0, T co_y0, T co_x1, T co_y1) { return SqrtSum(co_x0 - co_x1, co_y0 - co_y1); } class RggPoint { public: double x, y; long long node; RggPoint() {} RggPoint(double x, double y, long long node) { this->x = x; this->y = y; this->node = node; } }; // inline bool operator< (const RggPoint& lhs, const RggPoint& rhs) template <typename Point> bool XFirstPointCompare(Point lhs, Point rhs) { if (lhs.x < rhs.x) return true; if (lhs.x > rhs.x) return false; if (lhs.y < rhs.y) return true; return false; } template <typename T> bool PureTwoFactor(T x) { if (x < 3) return true; while (x > 0) { if ((x % 2) != 0) return false; x /= 2; } return true; } template <typename ParametersT> cudaError_t UseParameters(ParametersT &parameters, std::string graph_prefix = "") { cudaError_t retval = cudaSuccess; GUARD_CU(parameters.template Use<double>( graph_prefix + "rgg-thfactor", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 0.55, "Threshold-factor", __FILE__, __LINE__)); GUARD_CU(parameters.template Use<double>( graph_prefix + "rgg-threshold", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 0, "Threshold, default is thfactor * sqrt(log(#nodes) / #nodes)", __FILE__, __LINE__)); return retval; } /* * @brief Build random geometry graph (RGG). * * @tparam WITH_VALUES Whether or not associate with per edge weight values. * @tparam VertexT Vertex identifier. * @tparam Value Value type. * @tparam SizeT Graph size type. * * @param[in] nodes * @param[in] graph * @param[in] threshould * @param[in] undirected * @param[in] value_multipiler * @param[in] value_min * @param[in] seed */ template <typename GraphT> cudaError_t Build(util::Parameters &parameters, GraphT &graph, std::string graph_prefix = "") { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::CsrT CsrT; bool quiet = parameters.Get<bool>("quiet"); std::string dataset = "rgg_"; // bool undirected = !parameters.Get<bool>(graph_prefix + "directed"); SizeT scale = parameters.Get<SizeT>(graph_prefix + "graph-scale"); SizeT num_nodes = 0; if (!parameters.UseDefault(graph_prefix + "graph-nodes")) { num_nodes = parameters.Get<SizeT>(graph_prefix + "graph-nodes"); dataset = dataset + std::to_string(num_nodes) + "_"; } else { num_nodes = 1 << scale; dataset = dataset + "n" + std::to_string(scale) + "_"; } double thfactor = parameters.Get<double>(graph_prefix + "rgg-thfactor"); double threshold = 0; if (!parameters.UseDefault(graph_prefix + "rgg-threshold")) { threshold = parameters.Get<double>(graph_prefix + "rgg-threshold"); dataset = dataset + "t" + std::to_string(threshold); } else { threshold = thfactor * sqrt(log(num_nodes) / num_nodes); dataset = dataset + std::to_string(threshold); } if (parameters.UseDefault("dataset")) parameters.Set<std::string>("dataset", dataset); bool random_edge_values = parameters.Get<bool>(graph_prefix + "random-edge-values"); double edge_value_range = parameters.Get<double>(graph_prefix + "edge-value-range"); double edge_value_min = parameters.Get<double>(graph_prefix + "edge-value-min"); int seed = time(NULL); if (parameters.UseDefault(graph_prefix + "graph-seed")) seed = parameters.Get<int>(graph_prefix + "graph-seed"); util::PrintMsg("Generating RGG " + graph_prefix + "graph, threshold = " + std::to_string(threshold) + ", seed = " + std::to_string(seed) + "...", !quiet); util::CpuTimer cpu_timer; cpu_timer.Start(); int reserved_size = 50; util::Location target = util::HOST; SizeT num_edges = 0; long long row_length = 1.0 / threshold + 1; long long reserved_factor2 = 8; long long initial_length = reserved_factor2 * num_nodes / row_length / row_length; util::Array1D<SizeT, RggPoint> points; util::Array1D<SizeT, SizeT> row_offsets; util::Array1D<SizeT, VertexT> col_index_; util::Array1D<SizeT, ValueT> values_; util::Array1D<SizeT, SizeT> offsets; util::Array1D<SizeT, VertexT *> blocks; util::Array1D<SizeT, SizeT> block_size; util::Array1D<SizeT, SizeT> block_length; points.SetName("graphio::rgg::points"); row_offsets.SetName("graphio::rgg::row_offsets"); col_index_.SetName("graphio::rgg::col_index_"); values_.SetName("graphio::rgg::values_"); offsets.SetName("graphio::rgg::offsets"); blocks.SetName("graphio::rgg::blocks"); block_size.SetName("graphio::rgg::block_size"); block_length.SetName("graphio::rgg::block_length"); GUARD_CU(points.Allocate(num_nodes + 1, target)); GUARD_CU(row_offsets.Allocate(num_nodes + 1, target)); GUARD_CU(col_index_.Allocate(reserved_size * num_nodes, target)); if (GraphT::FLAG & graph::HAS_EDGE_VALUES) GUARD_CU(values_.Allocate(reserved_size * num_nodes, target)); SizeT tLength = row_length * row_length + 1; GUARD_CU(blocks.Allocate(tLength, target)); GUARD_CU(block_size.Allocate(tLength, target)); GUARD_CU(block_length.Allocate(tLength, target)); if (initial_length < 4) initial_length = 4; GUARD_CU(block_size.ForEach( block_length, blocks, [] __host__ __device__(SizeT & size, SizeT & length, VertexT * &block) { size = 0; length = 0; block = NULL; }, tLength, target)); #pragma omp parallel do { int thread_num = omp_get_thread_num(); int num_threads = omp_get_num_threads(); SizeT node_start = (long long)(num_nodes)*thread_num / num_threads; SizeT node_end = (long long)(num_nodes) * (thread_num + 1) / num_threads; SizeT counter = 0; VertexT *col_index = col_index_ + reserved_size * node_start; ValueT *values = (GraphT::FLAG & graph::HAS_EDGE_VALUES) ? values_ + reserved_size * node_start : NULL; unsigned int seed_ = seed + 805 * thread_num; Engine engine(seed_); Distribution distribution(0.0, 1.0); #pragma omp single { retval = offsets.Allocate(num_threads + 1, target); } if (retval) break; for (VertexT node = node_start; node < node_end; node++) { points[node].x = distribution(engine); points[node].y = distribution(engine); points[node].node = node; } #pragma omp barrier #pragma omp single { std::stable_sort(points + 0, points + num_nodes, XFirstPointCompare<RggPoint>); } for (VertexT node = node_start; node < node_end; node++) { SizeT x_index = points[node].x / threshold; SizeT y_index = points[node].y / threshold; SizeT block_index = x_index * row_length + y_index; #pragma omp atomic block_size[block_index]++; } #pragma omp barrier #pragma omp single { for (SizeT i = 0; i < row_length * row_length; i++) if (block_size[i] != 0) blocks[i] = new VertexT[block_size[i]]; } for (VertexT node = node_start; node < node_end; node++) { double co_x0 = points[node].x; // co_x[node]; double co_y0 = points[node].y; // co_y[node]; // RggPoint point(co_x0, co_y0, node); SizeT x_index = co_x0 / threshold; SizeT y_index = co_y0 / threshold; SizeT block_index = x_index * row_length + y_index; SizeT pos = 0; #pragma omp atomic capture { pos = block_length[block_index]; block_length[block_index] += 1; } blocks[block_index][pos] = node; } #pragma omp barrier for (VertexT node = node_start; node < node_end; node++) { row_offsets[node] = counter; double co_x0 = points[node].x; double co_y0 = points[node].y; SizeT x_index = co_x0 / threshold; SizeT y_index = co_y0 / threshold; SizeT x_start = x_index < 2 ? 0 : x_index - 2; SizeT y_start = y_index < 2 ? 0 : y_index - 2; for (SizeT x1 = x_start; x1 <= x_index + 2; x1++) for (SizeT y1 = y_start; y1 <= y_index + 2; y1++) { if (x1 >= row_length || y1 >= row_length) continue; SizeT block_index = x1 * row_length + y1; VertexT *block = blocks[block_index]; for (SizeT i = 0; i < block_length[block_index]; i++) { VertexT peer = block[i]; if (node >= peer) continue; double co_x1 = points[peer].x; // co_x[peer]; double co_y1 = points[peer].y; // co_y[peer]; double dis_x = co_x0 - co_x1; double dis_y = co_y0 - co_y1; if (fabs(dis_x) > threshold || fabs(dis_y) > threshold) continue; double dis = SqrtSum(dis_x, dis_y); if (dis > threshold) continue; col_index[counter] = peer; if (GraphT::FLAG & graph::HAS_EDGE_VALUES) { if (random_edge_values) { values[counter] = distribution(engine) * edge_value_range + edge_value_min; } else { values[counter] = 1; } } counter++; } } } offsets[thread_num + 1] = counter; #pragma omp barrier #pragma omp single { offsets[0] = 0; for (int i = 0; i < num_threads; i++) offsets[i + 1] += offsets[i]; num_edges = offsets[num_threads]; retval = graph.CsrT::Allocate(num_nodes, num_edges, target); // coo = (EdgeTupleType*) malloc (sizeof(EdgeTupleType) * edges); } if (retval) break; SizeT offset = offsets[thread_num]; for (VertexT node = node_start; node < node_end; node++) { SizeT end_edge = (node != node_end - 1 ? row_offsets[node + 1] : counter); graph.CsrT::row_offsets[node] = offset + row_offsets[node]; for (SizeT edge = row_offsets[node]; edge < end_edge; edge++) { // VertexT peer = col_index[edge]; graph.CsrT::column_indices[offset + edge] = col_index[edge]; if (GraphT::FLAG & graph::HAS_EDGE_VALUES) graph.CsrT::edge_values[offset + edge] = values[edge]; } } col_index = NULL; values = NULL; } while (false); if (retval) return retval; graph.CsrT::row_offsets[num_nodes] = num_edges; cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); util::PrintMsg("RGG generated in " + std::to_string(elapsed) + " ms.", !quiet); SizeT counter = 0; for (SizeT i = 0; i < row_length * row_length; i++) if (block_size[i] != 0) { counter += block_length[i]; delete[] blocks[i]; blocks[i] = NULL; } GUARD_CU(row_offsets.Release()); GUARD_CU(offsets.Release()); GUARD_CU(points.Release()); GUARD_CU(blocks.Release()); GUARD_CU(block_size.Release()); GUARD_CU(block_length.Release()); GUARD_CU(col_index_.Release()); GUARD_CU(values_.Release()); return retval; } template <typename GraphT, bool CSR_SWITCH> struct CsrSwitch { static cudaError_t Load(util::Parameters &parameters, GraphT &graph, std::string graph_prefix = "") { cudaError_t retval = cudaSuccess; GUARD_CU(Build(parameters, graph, graph_prefix)); GUARD_CU(graph.FromCsr(graph, util::HOST, 0, parameters.Get<bool>("quiet"), true)); return retval; } }; template <typename GraphT> struct CsrSwitch<GraphT, false> { static cudaError_t Load(util::Parameters &parameters, GraphT &graph, std::string graph_prefix = "") { typedef graph::Csr<typename GraphT::VertexT, typename GraphT::SizeT, typename GraphT::ValueT, GraphT::FLAG | graph::HAS_CSR, GraphT::cudaHostRegisterFlag> CsrT; cudaError_t retval = cudaSuccess; CsrT csr; GUARD_CU(Build(parameters, csr, graph_prefix)); GUARD_CU(graph.FromCsr(csr, util::HOST, 0, parameters.Get<bool>("quiet"), false)); GUARD_CU(csr.Release()); return retval; } }; template <typename GraphT> cudaError_t Load(util::Parameters &parameters, GraphT &graph_, std::string graph_prefix = "") { return CsrSwitch<GraphT, (GraphT::FLAG & gunrock::graph::HAS_CSR) != 0>::Load( parameters, graph_, graph_prefix); } } // namespace rgg } // namespace graphio } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
/** \addtogroup cudpp_app * */ #include <cstdlib> #include <cstdio> #include <assert.h> #include "cuda_util.h" #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include "cudpp_globals.h" #include "kernel/spmvmult_kernel.cuh" extern "C" void cudppSegmentedScanDispatch (void *d_out, const void *d_idata, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ); /** @name Sparse Matrix-Vector Multiply Functions * @{ */ /** @brief Perform matrix-vector multiply for sparse matrices and vectors of arbitrary size. * * This function performs the sparse matrix-vector multiply by executing four steps. * * 1. The sparseMatrixVectorFetchAndMultiply() kernel does an element-wise multiplication of a * each element e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A with the corresponding * (i.e. in the same row as the column index of e in CUDPPSparseMatrixVectorMultiplyPlan::m_d_A) * element in d_x and stores the product in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. It * also sets all elements of CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 0. * * 2. The sparseMatrixVectorSetFlags() kernel iterates over each element in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex and sets * the corresponding position (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowIndex) in * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags to 1. * * 3. Perform a segmented scan on CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod with * CUDPPSparseMatrixVectorMultiplyPlan::m_d_flags as the flag vector. The output is * stored in CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod. * * 4. The yGather() kernel goes over each element in CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex * and picks the corresponding element (indicated by CUDPPSparseMatrixVectorMultiplyPlan::m_d_rowFinalIndex) * element from CUDPPSparseMatrixVectorMultiplyPlan::m_d_prod and stores it in d_y. * * @param[out] d_y The output array for the sparse matrix-vector multiply (y vector) * @param[in] d_x The input x vector * @param[in] plan Pointer to the CUDPPSparseMatrixVectorMultiplyPlan object which stores the * configuration and pointers to temporary buffers needed by this routine */ template <class T> void sparseMatrixVectorMultiply( T *d_y, const T *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { unsigned int numEltsBlocks = max(1, (int)ceil((double)plan->m_numNonZeroElements / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); bool fullBlock = (plan->m_numNonZeroElements == (numEltsBlocks * SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)); dim3 gridElts(max(1, numEltsBlocks), 1, 1); dim3 threads(SCAN_CTA_SIZE, 1, 1); if (fullBlock) sparseMatrixVectorFetchAndMultiply<T, true><<<gridElts, threads>>> (plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, (unsigned)plan->m_numNonZeroElements); else sparseMatrixVectorFetchAndMultiply<T, false><<<gridElts, threads>>> (plan->m_d_flags, (T*)plan->m_d_prod, (T*)plan->m_d_A, d_x, plan->m_d_index, (unsigned)plan->m_numNonZeroElements); unsigned int numRowBlocks = max(1, (int)ceil((double)plan->m_numRows / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); dim3 gridRows(max(1, numRowBlocks), 1, 1); sparseMatrixVectorSetFlags<<<gridRows, threads>>> (plan->m_d_flags, plan->m_d_rowIndex, (unsigned)plan->m_numRows); cudppSegmentedScanDispatch ((T*)plan->m_d_prod, (const T*)plan->m_d_prod, plan->m_d_flags, (unsigned)plan->m_numNonZeroElements, plan->m_segmentedScanPlan); yGather<<<gridRows, threads>>> (d_y, (T*)plan->m_d_prod, plan->m_d_rowFinalIndex, (unsigned)plan->m_numRows); } #ifdef __cplusplus extern "C" { #endif // file scope /** @brief Allocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan class containing sparse * matrix-vector multiply options, number of non-zero elements and number * of rows which is used to compute storage requirements * @param[in] A The matrix A * @param[in] rowindx The indices of elements in A which are the first element of their row * @param[in] indx The column number for each element in A */ void allocSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan, const void *A, const unsigned int *rowindx, const unsigned int *indx) { switch(plan->m_config.datatype) { case CUDPP_INT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (int *)A, plan->m_numNonZeroElements * sizeof(int), cudaMemcpyHostToDevice) ); break; case CUDPP_UINT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (unsigned int *)A, plan->m_numNonZeroElements * sizeof(unsigned int), cudaMemcpyHostToDevice) ); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_prod), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc(&(plan->m_d_A), plan->m_numNonZeroElements * sizeof(float))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_A, (float *)A, plan->m_numNonZeroElements * sizeof(float), cudaMemcpyHostToDevice) ); break; default: break; } CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_flags), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_index), plan->m_numNonZeroElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_rowFinalIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&(plan->m_d_rowIndex), plan->m_numRows * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpy(plan->m_d_rowFinalIndex, plan->m_rowFinalIndex, plan->m_numRows * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(plan->m_d_rowIndex, rowindx, plan->m_numRows * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaMemcpy(plan->m_d_index, indx, plan->m_numNonZeroElements * sizeof(unsigned int), cudaMemcpyHostToDevice) ); CUDA_CHECK_ERROR("allocSparseMatrixVectorMultiplyStorage"); } /** @brief Deallocate intermediate product, flags and rowFindx (index of the last * element of each row) array . * * These arrays must have been allocated by allocSparseMatrixVectorMultiplyStorage(), which is called * by the constructor of CUDPPSparseMatrixVectorMultiplyPlan. * * @param[in] plan Pointer to CUDPPSparseMatrixVectorMultiplyPlan plan initialized by its constructor. */ void freeSparseMatrixVectorMultiplyStorage(CUDPPSparseMatrixVectorMultiplyPlan *plan) { CUDA_CHECK_ERROR("freeSparseMatrixVectorMultiply"); cudaFree(plan->m_d_prod); cudaFree(plan->m_d_A); cudaFree((void*)plan->m_d_flags); cudaFree((void*)plan->m_d_index); cudaFree((void*)plan->m_d_rowFinalIndex); cudaFree((void*)plan->m_d_rowIndex); plan->m_d_prod = 0; plan->m_d_A = 0; plan->m_d_flags = 0; plan->m_d_index = 0; plan->m_d_rowFinalIndex = 0; plan->m_d_rowIndex = 0; plan->m_numNonZeroElements = 0; plan->m_numRows = 0; } /** @brief Dispatch function to perform a sparse matrix-vector multiply * with the specified configuration. * * This is the dispatch routine which calls sparseMatrixVectorMultiply() with * appropriate template parameters and arguments * * @param[out] d_y The output vector for y = A*x * @param[in] d_x The x vector for y = A*x * @param[in] plan The sparse matrix plan and data */ void cudppSparseMatrixVectorMultiplyDispatch ( void *d_y, const void *d_x, const CUDPPSparseMatrixVectorMultiplyPlan *plan ) { switch(plan->m_config.datatype) { case CUDPP_INT: sparseMatrixVectorMultiply<int>((int *)d_y, (int *)d_x, plan); break; case CUDPP_UINT: sparseMatrixVectorMultiply<unsigned int>((unsigned int *)d_y, (unsigned int *)d_x, plan); break; case CUDPP_FLOAT: sparseMatrixVectorMultiply<float>((float *)d_y, (float *)d_x, plan); break; default: break; } } #ifdef __cplusplus } #endif /** @} */ // end sparse matrix-vector multiply functions /** @} */ // end cudpp_app
the_stack
#include <thrust/device_ptr.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include "gtn/cuda/cuda.h" #include "gtn/hd_span.h" #include "gtn/cuda/functions.h" using namespace gtn::detail; namespace gtn { namespace cuda { namespace detail { namespace { typedef Graph::SharedGraph GraphData; // A resource manager struct for gradient data. struct GradInfo { int* first; int* second; ~GradInfo() { CUDA_CHECK(cudaFree(first)); CUDA_CHECK(cudaFree(second)); }; }; struct ExploreNodeAndArcs { int2 arcPair; int2 nodePair; int nodeIdx; bool exploreBoth{false}; bool exploreFirst{false}; bool exploreSecond{false}; }; struct ExploreState { int first; int second; bool followFirst; bool followSecond; }; inline int divUp(int x, int y) { return (x + y - 1) / y; } __device__ inline size_t stateToIndex( const int first, const int second, int numFirst, bool followFirst, bool followSecond) { size_t offset = followFirst ? 1 : (followSecond ? 2 : 0); return 3 * (numFirst * second + first) + offset; } __device__ inline ExploreState indexToState(size_t n, int numFirst) { ExploreState state; auto offset = n % 3; state.followFirst = (offset == 1); state.followSecond = (offset == 2); n /= 3; state.first = n % numFirst; state.second = n / numFirst; return state; } bool checkAnyTrue(const HDSpan<bool>& flags) { thrust::device_ptr<const bool> tPtr(flags.data()); return thrust::any_of(tPtr, tPtr + flags.size(), thrust::identity<bool>()); } void setFalse(HDSpan<bool>& span) { if (span.size() != 0) { cuda::detail::fill(span.data(), false, span.size()); } } std::tuple<int*, int> prefixSumScan(const bool* input, size_t numElts) { const size_t scanNumElts = numElts + 1; HDSpan<int> output(scanNumElts, 0, Device::CUDA); thrust::device_ptr<const bool> iPtr(input); thrust::device_ptr<int> oPtr(output.data()); thrust::exclusive_scan(iPtr, iPtr + numElts, oPtr, (int) 0); int sum = 0; if (numElts > 0) { bool lastVal; CUDA_CHECK(cudaMemcpy((void*)(&sum), (void* )(&(output[scanNumElts-2])), sizeof(int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy((void*)(&lastVal), (void* )(&(input[scanNumElts-2])), sizeof(bool), cudaMemcpyDeviceToHost)); sum += lastVal; } CUDA_CHECK(cudaMemcpy((void*)(&(output[scanNumElts-1])),(void*)(&sum), sizeof(int), cudaMemcpyHostToDevice)); return std::make_tuple(output.data(), sum); } std::tuple<int*, int> prefixSumScan(const int* input, size_t numElts) { const size_t scanNumElts = numElts + 1; HDSpan<int> output(scanNumElts, 0, Device::CUDA); thrust::device_ptr<const int> iPtr(input); thrust::device_ptr<int> oPtr(output.data()); thrust::inclusive_scan(iPtr, iPtr + numElts, oPtr + 1); int sum = 0; CUDA_CHECK(cudaMemcpy((void *)(&sum), (void *)(&(output[scanNumElts-1])), sizeof(int), cudaMemcpyDeviceToHost)); return std::make_tuple(output.data(), sum); } __device__ size_t binarySearchBinIndex(const int* bins, int size, int tid) { size_t lIdx = 0; size_t rIdx = size - 1; while (lIdx <= rIdx) { size_t intervalIdx = (lIdx + rIdx) / 2; const int lVal = bins[intervalIdx]; const int rVal = bins[intervalIdx + 1]; if (tid >= rVal) { lIdx = intervalIdx + 1; } else if (tid < lVal) { assert(intervalIdx >= 1); rIdx = intervalIdx - 1; } else { return intervalIdx; } } assert(false); return 0; } __global__ void calculateArcCrossProductForwardKernel( const HDSpan<int> arcOffsets1, const HDSpan<int> arcOffsets2, const HDSpan<int> exploreIndices, int* arcCrossProductOffset, int numNodesFirst) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < exploreIndices.size()) { auto state = indexToState(exploreIndices[gTid], numNodesFirst); const int numArcsFirst = arcOffsets1[state.first + 1] - arcOffsets1[state.first]; const int numArcsSecond = arcOffsets2[state.second + 1] - arcOffsets2[state.second]; arcCrossProductOffset[gTid] = numArcsFirst * numArcsSecond; if (numArcsSecond == 0 && !state.followSecond) { arcCrossProductOffset[gTid] = numArcsFirst; } if (numArcsFirst == 0 && !state.followFirst) { arcCrossProductOffset[gTid] = numArcsSecond; } } } __global__ void calculateArcCrossProductBackwardKernel( const HDSpan<int> arcOffsets1, const HDSpan<int> arcOffsets2, const HDSpan<int> exploreIndices, int* arcCrossProductOffset, int numNodesFirst) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < exploreIndices.size()) { auto state = indexToState(exploreIndices[gTid], numNodesFirst); const int numArcsFirst = arcOffsets1[state.first + 1] - arcOffsets1[state.first]; const int numArcsSecond = arcOffsets2[state.second + 1] - arcOffsets2[state.second]; if (state.followFirst) { arcCrossProductOffset[gTid] = numArcsFirst; } else if (state.followSecond) { arcCrossProductOffset[gTid] = numArcsSecond; } else { arcCrossProductOffset[gTid] = numArcsFirst * numArcsSecond; } } } // Takes a pair of nodes, where each member of pair comes from a different // graph and calculate a vector of number of arcs in the cross product of // arcs outgoing from each pair. int* calculateArcCrossProductOffset( const HDSpan<int>& exploreIndices, const GraphData g1, const GraphData g2, bool inOrOutArc) { int numToExploreNodePair = exploreIndices.size(); int* arcCrossProductOffset; CUDA_CHECK(cudaMalloc((void **)(&(arcCrossProductOffset)), sizeof(int) * numToExploreNodePair)); const int NT = 128; const int gridSize = divUp(numToExploreNodePair, NT); if (inOrOutArc) { calculateArcCrossProductBackwardKernel<<<gridSize, NT, 0, 0>>>( g1.inArcOffset, g2.inArcOffset, exploreIndices, arcCrossProductOffset, g1.numNodes); } else { calculateArcCrossProductForwardKernel<<<gridSize, NT, 0, 0>>>( g1.outArcOffset, g2.outArcOffset, exploreIndices, arcCrossProductOffset, g1.numNodes); } return arcCrossProductOffset; } // This function needs to be thread safe since multiple threads can // can call it and they will overlap on curIdx and dstIdx __device__ void calculateNumArcsAndNodesToExplore( int curIdx, int dstIdx, const HDSpan<bool> reachable, HDSpan<bool> newNodes, bool* toExplore, int* numOutArcs, int* numInArcs) { if (reachable[dstIdx]) { if (!newNodes[dstIdx]) { newNodes[dstIdx] = true; toExplore[dstIdx] = true; } // These are atomic increments // numOutArcs[curIdx]++; // numInArcs[dstIdx]++; atomicAdd(&(numOutArcs[curIdx]), 1); atomicAdd(&(numInArcs[dstIdx]), 1); } } // This function needs to be thread safe since multiple threads can // can call it __device__ void generateCombinedGraphArcs( int dstIdx, int curIdx, const int2& arcPair, const HDSpan<bool> reachable, const int* newNodesOffset, int* gradInfoFirst, int* gradInfoSecond, GraphData newGraphDP, float* weights, int ilabel, int olabel, float weight) { if (reachable[dstIdx]) { // Both of these increments are atomic // int inArcIdx = newGraphDP.inArcOffset[newNodesOffset[dstIdx]]++; // int outArcIdx = newGraphDP.outArcOffset[newNodesOffset[curIdx]]++; int inArcIdx = atomicAdd(&(newGraphDP.inArcOffset[newNodesOffset[dstIdx]]), 1); int outArcIdx = atomicAdd(&(newGraphDP.outArcOffset[newNodesOffset[curIdx]]), 1); // outArcIdx is also the arc identifier newGraphDP.outArcs[outArcIdx] = outArcIdx; newGraphDP.inArcs[inArcIdx] = outArcIdx; // Fill in everything else for this arc newGraphDP.ilabels[outArcIdx] = ilabel; newGraphDP.olabels[outArcIdx] = olabel; newGraphDP.srcNodes[outArcIdx] = newNodesOffset[curIdx]; newGraphDP.dstNodes[outArcIdx] = newNodesOffset[dstIdx]; weights[outArcIdx] = weight; gradInfoFirst[outArcIdx] = arcPair.x; gradInfoSecond[outArcIdx] = arcPair.y; } } __global__ void findReachableKernel( const GraphData g1, const GraphData g2, const int* arcCrossProductOffset, const HDSpan<int> exploreIndices, int totalArcs, HDSpan<bool> toExplore, HDSpan<bool> reachable) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < totalArcs) { auto idx = binarySearchBinIndex(arcCrossProductOffset, exploreIndices.size(), gTid); auto state = indexToState(exploreIndices[idx], g1.numNodes); int localIdx = gTid - arcCrossProductOffset[idx]; assert(localIdx >= 0); int firstArcIdx, secondArcIdx; if (state.followFirst) { firstArcIdx = g1.inArcs[g1.inArcOffset[state.first] + localIdx]; } else if (state.followSecond) { secondArcIdx = g2.inArcs[g2.inArcOffset[state.second] + localIdx]; } else { auto numArcsFirst = g1.inArcOffset[state.first + 1] - g1.inArcOffset[state.first]; firstArcIdx = g1.inArcs[g1.inArcOffset[state.first] + (localIdx % numArcsFirst)]; secondArcIdx = g2.inArcs[g2.inArcOffset[state.second] + (localIdx / numArcsFirst)]; } if (!(state.followFirst || state.followSecond) && (g1.olabels[firstArcIdx] == g2.ilabels[secondArcIdx])) { const int idx = stateToIndex( g1.srcNodes[firstArcIdx], g2.srcNodes[secondArcIdx], g1.numNodes, false, false); if (!reachable[idx]) { reachable[idx] = true; toExplore[idx] = true; } if (g1.olabels[firstArcIdx] != epsilon) { if (!reachable[idx + 1]) { reachable[idx + 1] = true; toExplore[idx + 1] = true; } if (!reachable[idx + 2]) { reachable[idx + 2] = true; toExplore[idx + 2] = true; } } } else if (state.followFirst && (g1.olabels[firstArcIdx] == epsilon)) { const int idx = stateToIndex( g1.srcNodes[firstArcIdx], state.second, g1.numNodes, false, false); if (!reachable[idx]) { reachable[idx] = true; toExplore[idx] = true; } if (!reachable[idx + 1]) { reachable[idx + 1] = true; toExplore[idx + 1] = true; } } else if (state.followSecond && (g2.ilabels[secondArcIdx] == epsilon)) { const int idx = stateToIndex( state.first, g2.srcNodes[secondArcIdx], g1.numNodes, false, false); if (!reachable[idx]) { reachable[idx] = true; toExplore[idx] = true; } if (!reachable[idx + 2]) { reachable[idx + 2] = true; toExplore[idx + 2] = true; } } } } __device__ ExploreNodeAndArcs getArcPairAndExploreState( const GraphData& g1, const GraphData& g2, const int* arcCrossProductOffset, const HDSpan<int>& exploreIndices, int gTid) { ExploreNodeAndArcs res; auto idx = binarySearchBinIndex(arcCrossProductOffset, exploreIndices.size(), gTid); auto state = indexToState(exploreIndices[idx], g1.numNodes); res.nodePair = make_int2(state.first, state.second); int localIdx = gTid - arcCrossProductOffset[idx]; assert(localIdx >= 0); auto numArcsFirst = g1.outArcOffset[state.first + 1] - g1.outArcOffset[state.first]; auto numArcsSecond = g2.outArcOffset[state.second + 1] - g2.outArcOffset[state.second]; assert(numArcsFirst > 0 || numArcsSecond > 0); res.nodeIdx = exploreIndices[idx]; int firstArcIdx, secondArcIdx; if (numArcsFirst > 0 && numArcsSecond > 0 ) { // Explore everything res.exploreFirst = !state.followSecond && (localIdx / numArcsFirst) == 0; res.exploreSecond = !state.followFirst && (localIdx % numArcsFirst) == 0; firstArcIdx = g1.outArcs[g1.outArcOffset[state.first] + localIdx % numArcsFirst]; secondArcIdx = g2.outArcs[g2.outArcOffset[state.second] + localIdx / numArcsFirst]; res.exploreBoth = g1.olabels[firstArcIdx] == g2.ilabels[secondArcIdx] && (g1.olabels[firstArcIdx] != epsilon || !(state.followFirst || state.followSecond)); } else if (numArcsSecond == 0 && !state.followSecond) { // Explore first res.exploreFirst = true; firstArcIdx = g1.outArcs[g1.outArcOffset[state.first] + localIdx]; } else if (numArcsFirst == 0 && !state.followFirst) { // Explore second res.exploreSecond = true; secondArcIdx = g2.outArcs[g2.outArcOffset[state.second] + localIdx]; } if (res.exploreFirst) { res.exploreFirst &= (g1.olabels[firstArcIdx] == epsilon); } if (res.exploreSecond) { res.exploreSecond &= (g2.ilabels[secondArcIdx] == epsilon); } res.arcPair = make_int2(firstArcIdx, secondArcIdx); return res; } __global__ void computeValidNodeAndArcKernel( const GraphData g1, const GraphData g2, const int* arcCrossProductOffset, const HDSpan<int> exploreIndices, const HDSpan<bool> reachable, int totalArcs, HDSpan<bool> toExplore, HDSpan<bool> newNodes, int* numInArcs, int* numOutArcs) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < totalArcs) { auto res = getArcPairAndExploreState( g1, g2, arcCrossProductOffset, exploreIndices, gTid); const int firstArcIdx = res.arcPair.x; const int secondArcIdx = res.arcPair.y; if (res.exploreBoth) { const int dstIdx = stateToIndex( g1.dstNodes[firstArcIdx], g2.dstNodes[secondArcIdx], g1.numNodes, false, false); calculateNumArcsAndNodesToExplore( res.nodeIdx, dstIdx, reachable, newNodes, toExplore.data(), numOutArcs, numInArcs); } if (res.exploreFirst) { const int dstIdx = stateToIndex( g1.dstNodes[firstArcIdx], res.nodePair.y, g1.numNodes, true, false); calculateNumArcsAndNodesToExplore( res.nodeIdx, dstIdx, reachable, newNodes, toExplore.data(), numOutArcs, numInArcs); } if (res.exploreSecond) { const int dstIdx = stateToIndex( res.nodePair.x, g2.dstNodes[secondArcIdx], g1.numNodes, false, true); calculateNumArcsAndNodesToExplore( res.nodeIdx, dstIdx, reachable, newNodes, toExplore.data(), numOutArcs, numInArcs); } } } __global__ void generateNodeAndArcKernel( const GraphData g1, const GraphData g2, const float* weightsFirst, const float* weightsSecond, const int* arcCrossProductOffset, const HDSpan<int> exploreIndices, const HDSpan<bool> reachable, int totalArcs, GraphData newGraph, float* weights, int* gradInfoFirst, int* gradInfoSecond, int* newNodesOffset) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < totalArcs) { auto res = getArcPairAndExploreState( g1, g2, arcCrossProductOffset, exploreIndices, gTid); const int firstArcIdx = res.arcPair.x; const int secondArcIdx = res.arcPair.y; if (res.exploreBoth) { const int dstIdx = stateToIndex( g1.dstNodes[firstArcIdx], g2.dstNodes[secondArcIdx], g1.numNodes, false, false); generateCombinedGraphArcs( dstIdx, res.nodeIdx, make_int2(firstArcIdx, secondArcIdx), reachable, newNodesOffset, gradInfoFirst, gradInfoSecond, newGraph, weights, g1.ilabels[firstArcIdx], g2.olabels[secondArcIdx], weightsFirst[firstArcIdx] + weightsSecond[secondArcIdx]); } if (res.exploreFirst) { const int dstIdx = stateToIndex( g1.dstNodes[firstArcIdx], res.nodePair.y, g1.numNodes, true, false); generateCombinedGraphArcs( dstIdx, res.nodeIdx, make_int2(firstArcIdx, -1), reachable, newNodesOffset, gradInfoFirst, gradInfoSecond, newGraph, weights, g1.ilabels[firstArcIdx], epsilon, weightsFirst[firstArcIdx]); } if (res.exploreSecond) { const int dstIdx = stateToIndex( res.nodePair.x, g2.dstNodes[secondArcIdx], g1.numNodes, false, true); generateCombinedGraphArcs( dstIdx, res.nodeIdx, make_int2(-1, secondArcIdx), reachable, newNodesOffset, gradInfoFirst, gradInfoSecond, newGraph, weights, epsilon, g2.olabels[secondArcIdx], weightsSecond[secondArcIdx]); } } } __global__ void setStartAndAccept( const GraphData g1, const GraphData g2, const HDSpan<int> exploreIndices, GraphData newGraph) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < exploreIndices.size()) { auto state = indexToState(exploreIndices[gTid], g1.numNodes); newGraph.start[gTid] = g1.start[state.first] && g2.start[state.second] && !(state.followFirst || state.followSecond); newGraph.accept[gTid] = g1.accept[state.first] && g2.accept[state.second]; } } __global__ void calculateNumArcsKernel( const HDSpan<int> nodeIndices, const int* inputInArcs, const int* inputOutArcs, HDSpan<int> outputInArcs, HDSpan<int> outputOutArcs) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < nodeIndices.size()) { const int index = nodeIndices[gTid]; outputInArcs[gTid] = inputInArcs[index]; outputOutArcs[gTid] = inputOutArcs[index]; } } __global__ void findReachableInitKernel( const HDSpan<int> idsFirst, const HDSpan<int> idsSecond, HDSpan<bool> reachable, HDSpan<bool> toExplore, int numNodesFirst) { const int fid = blockIdx.x * blockDim.x + threadIdx.x; const int sid = blockIdx.y * blockDim.y + threadIdx.y; if (fid < idsFirst.size() && sid < idsSecond.size()) { auto idx = stateToIndex( idsFirst[fid], idsSecond[sid], numNodesFirst, false, false); toExplore[idx] = true; reachable[idx] = true; toExplore[idx + 1] = true; reachable[idx + 1] = true; toExplore[idx + 2] = true; reachable[idx + 2] = true; } } void findReachableInit( const GraphData& g1, const GraphData& g2, HDSpan<bool> reachable, HDSpan<bool> toExplore) { int NT = 16; auto blocks = dim3( divUp(g1.acceptIds.size(), NT), divUp(g2.acceptIds.size(), NT)); auto threads = dim3(NT, NT); findReachableInitKernel<<<blocks, threads>>>(g1.acceptIds, g2.acceptIds, reachable, toExplore, g1.numNodes); } __global__ void secondPassInitKernel( const HDSpan<int> idsFirst, const HDSpan<int> idsSecond, const HDSpan<bool> reachable, HDSpan<bool> toExplore, HDSpan<bool> newNodes, int numNodesFirst) { const int fid = blockIdx.x * blockDim.x + threadIdx.x; const int sid = blockIdx.y * blockDim.y + threadIdx.y; if (fid < idsFirst.size() && sid < idsSecond.size()) { auto idx = stateToIndex( idsFirst[fid], idsSecond[sid], numNodesFirst, false, false); if (reachable[idx]) { toExplore[idx] = true; newNodes[idx] = true; } } } void secondPassInit( const GraphData& g1, const GraphData& g2, const HDSpan<bool> reachable, HDSpan<bool> toExplore, HDSpan<bool> newNodes) { int NT = 16; auto blocks = dim3( divUp(g1.startIds.size(), NT), divUp(g2.startIds.size(), NT)); auto threads = dim3(NT, NT); secondPassInitKernel<<<blocks, threads>>>(g1.startIds, g2.startIds, reachable, toExplore, newNodes, g1.numNodes); } __global__ void gradKernel( int* arcIds, const float* deltas, float* grad, size_t numArcs) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < numArcs && arcIds[gTid] >= 0) { atomicAdd(grad + arcIds[gTid], deltas[gTid]); } } void calcGrad(Graph& g, int* arcIds, const Graph& deltas) { if (!g.calcGrad()) { return; } HDSpan<float> grad(g.numArcs(), 0.0, Device::CUDA); const int NT = 128; const int gridSize = divUp(deltas.numArcs(), NT); gradKernel<<<gridSize, NT, 0, 0>>>( arcIds, deltas.weights(), grad.data(), deltas.numArcs()); g.addGrad(grad.data()); grad.clear(); } __global__ void boolToIndicesKernel( HDSpan<int> ids, const int* counts, const HDSpan<bool> vals, size_t size) { const int gTid = blockIdx.x * blockDim.x + threadIdx.x; if (gTid < size && vals[gTid]) { ids[counts[gTid]] = gTid; } } auto boolToIndices(const HDSpan<bool>& vals) { int* counts; int numTrue; std::tie(counts, numTrue) = prefixSumScan(vals.data(), vals.size()); const int NT = 128; const int gridSize = divUp(vals.size(), NT); HDSpan<int> ids(numTrue, Device::CUDA); boolToIndicesKernel<<<gridSize, NT, 0, 0>>>(ids, counts, vals, vals.size()); CUDA_CHECK(cudaFree(counts)); return ids; } } // namespace Graph compose(const Graph& first, const Graph& second) { auto nGraph = Graph(nullptr, {first, second}); auto& nData = nGraph.getData(); auto g1 = first.getData(); auto g2 = second.getData(); const int numAllPairNodes = 3 * first.numNodes() * second.numNodes(); const int numNodesFirst = first.numNodes(); // Fixed number of CUDA threads and stream for all kernels const int NT = 128; ////////////////////////////////////////////////////////////////////////// // Step 1: Data parallel findReachable ////////////////////////////////////////////////////////////////////////// HDSpan<bool> reachable(numAllPairNodes, false, Device::CUDA); HDSpan<bool> toExplore(numAllPairNodes, false, Device::CUDA); findReachableInit(g1, g2, reachable, toExplore); // This is the outer control loop that would spawn DP kernels while(checkAnyTrue(toExplore)) { // Convert bits set in toExplore to indices auto exploreIndices = boolToIndices(toExplore); int* arcCrossProductIndex = calculateArcCrossProductOffset( exploreIndices, g1, g2, true); int* arcCrossProductOffset; int totalArcs; std::tie(arcCrossProductOffset, totalArcs) = prefixSumScan(arcCrossProductIndex, exploreIndices.size()); CUDA_CHECK(cudaFree(arcCrossProductIndex)); // Reset so pristine state for next frontier to explore setFalse(toExplore); if (totalArcs > 0) { const int gridSize = divUp(totalArcs, NT); findReachableKernel<<<gridSize, NT, 0, 0>>>( g1, g2, arcCrossProductOffset, exploreIndices, totalArcs, toExplore, reachable); } exploreIndices.clear(); CUDA_CHECK(cudaFree(arcCrossProductOffset)); } // end while for findReachable ////////////////////////////////////////////////////////////////////////// // Step 2: Compute a) valid nodes in combined graph // b) Number of in and out arcs in combined graph // This information is used to generate offsets for nodes and arcs // in the combined graph ////////////////////////////////////////////////////////////////////////// HDSpan<bool> newNodes(numAllPairNodes, 0.0, Device::CUDA); int* numOutArcs; int* numInArcs; CUDA_CHECK(cudaMalloc((void **)(&numOutArcs), sizeof(int) * numAllPairNodes)); CUDA_CHECK(cudaMalloc((void **)(&numInArcs), sizeof(int) * numAllPairNodes)); CUDA_CHECK(cudaMemset((void*)numOutArcs, 0, sizeof(int) * numAllPairNodes)); CUDA_CHECK(cudaMemset((void*)numInArcs, 0, sizeof(int) * numAllPairNodes)); setFalse(toExplore); secondPassInit(g1, g2, reachable, toExplore, newNodes); // This is the outer control loop that would spawn DP kernels while(checkAnyTrue(toExplore)) { // Convert bits set in toExplore to node pairs auto exploreIndices = boolToIndices(toExplore); int* arcCrossProductIndex = calculateArcCrossProductOffset( exploreIndices, g1, g2, false); int* arcCrossProductOffset; int totalArcs; std::tie(arcCrossProductOffset, totalArcs) = prefixSumScan(arcCrossProductIndex, exploreIndices.size()); CUDA_CHECK(cudaFree(arcCrossProductIndex)); // Reset so pristine state for next frontier to explore setFalse(toExplore); if (totalArcs > 0) { const int gridSize = divUp(totalArcs, NT); computeValidNodeAndArcKernel<<<gridSize, NT, 0, 0>>>(g1, g2, arcCrossProductOffset, exploreIndices, reachable, totalArcs, toExplore, newNodes, numInArcs, numOutArcs); } exploreIndices.clear(); CUDA_CHECK(cudaFree(arcCrossProductOffset)); } toExplore.clear(); reachable.clear(); ////////////////////////////////////////////////////////////////////////// // Step 3: Generate offsets for nodes and arcs in combined graph ////////////////////////////////////////////////////////////////////////// int totalNodes; int* newNodesOffset; std::tie(newNodesOffset, totalNodes) = prefixSumScan(newNodes.data(), numAllPairNodes); nData.numNodes = totalNodes; nData.start.resize(totalNodes); nData.accept.resize(totalNodes); nData.inArcOffset.resize(totalNodes + 1); nData.outArcOffset.resize(totalNodes + 1); // Convert bits to indices auto exploreIndices = boolToIndices(newNodes); // Generate offsets for nodes and arcs if (exploreIndices.size() > 0) { const int NT = 128; const int gridSize = divUp(exploreIndices.size(), NT); calculateNumArcsKernel<<<gridSize, NT, 0, 0>>>(exploreIndices, numInArcs, numOutArcs, nData.inArcOffset, nData.outArcOffset); } CUDA_CHECK(cudaFree(numOutArcs)); CUDA_CHECK(cudaFree(numInArcs)); int totalInArcs; int totalOutArcs; int* inArcOffsetGPU; int* outArcOffsetGPU; std::tie(inArcOffsetGPU, totalInArcs) = prefixSumScan(nData.inArcOffset.data(), totalNodes); std::tie(outArcOffsetGPU, totalOutArcs) = prefixSumScan(nData.outArcOffset.data(), totalNodes); assert(totalInArcs == totalOutArcs); nData.numArcs = totalOutArcs; nData.inArcs.resize(totalOutArcs); nData.outArcs.resize(totalOutArcs); nData.ilabels.resize(totalOutArcs); nData.olabels.resize(totalOutArcs); nData.srcNodes.resize(totalOutArcs); nData.dstNodes.resize(totalOutArcs); nGraph.getWeights().resize(totalOutArcs); nData.inArcOffset.copy(inArcOffsetGPU); nData.outArcOffset.copy(outArcOffsetGPU); auto gradInfo = std::make_shared<GradInfo>(); CUDA_CHECK(cudaMalloc((void **)(&gradInfo->first), sizeof(int) * totalOutArcs)); CUDA_CHECK(cudaMalloc((void **)(&gradInfo->second), sizeof(int) * totalOutArcs)); ////////////////////////////////////////////////////////////////////////// // Step 4: Generate nodes and arcs in combined graph ////////////////////////////////////////////////////////////////////////// int* arcCrossProductIndex = calculateArcCrossProductOffset( exploreIndices, g1, g2, false); int* arcCrossProductOffset; int totalArcs; std::tie(arcCrossProductOffset, totalArcs) = prefixSumScan(arcCrossProductIndex, exploreIndices.size()); CUDA_CHECK(cudaFree(arcCrossProductIndex)); if (exploreIndices.size() > 0) { setFalse(nData.start); setFalse(nData.accept); const int gridSize = divUp(exploreIndices.size(), NT); setStartAndAccept<<<gridSize, NT, 0, 0>>>(g1, g2, exploreIndices, nData); nData.startIds = boolToIndices(nData.start); nData.acceptIds = boolToIndices(nData.accept); } if (totalArcs > 0) { const int gridSize = divUp(totalArcs, NT); generateNodeAndArcKernel<<<gridSize, NT, 0, 0>>>(g1, g2, first.weights(), second.weights(), arcCrossProductOffset, exploreIndices, newNodes, totalArcs, nData, nGraph.weights(), gradInfo->first, gradInfo->second, newNodesOffset); } exploreIndices.clear(); CUDA_CHECK(cudaFree(arcCrossProductOffset)); // Reset incremented offsets to original value nData.inArcOffset.copy(inArcOffsetGPU); nData.outArcOffset.copy(outArcOffsetGPU); newNodes.clear(); CUDA_CHECK(cudaFree(newNodesOffset)); CUDA_CHECK(cudaFree(inArcOffsetGPU)); CUDA_CHECK(cudaFree(outArcOffsetGPU)); auto gradFunc = [gradInfo](std::vector<Graph>& inputs, Graph deltas) { calcGrad(inputs[0], gradInfo->first, deltas); calcGrad(inputs[1], gradInfo->second, deltas); }; nGraph.setGradFunc(std::move(gradFunc)); return nGraph; } } // namespace detail } // namespace cuda } // namespace gtn
the_stack
#define RUN(TYPE, DIMS, REAL) \ THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; THArgCheck(getApplyGrid(state, totalElements, grid), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (TensorUtils<THCTensor>::overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, tensor) && TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<THCTensor, unsigned int>(state, src); TensorInfo<long, unsigned int> indexInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(cudaGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(cudaGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(cudaGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(cudaGetLastError()); break; } } else { TensorInfo<real, unsigned long> tensorInfo = getTensorInfo<THCTensor, unsigned long>(state, tensor); TensorInfo<real, unsigned long> srcInfo = getTensorInfo<THCTensor, unsigned long>(state, src); TensorInfo<long, unsigned long> indexInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, index); RUN(unsigned long, -1, real); THCudaCheck(cudaGetLastError()); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimension)(state, src) == THCTensor_(nDimension)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); THLongStorage *indexDims = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, src, indexDims), 3, "Index tensor must have the same size as input tensor."); THLongStorage_free(indexDims); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 4, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; THArgCheck(getApplyGrid(state, totalElements, grid), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (TensorUtils<THCTensor>::overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, tensor) && TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<THCTensor, unsigned int>(state, src); TensorInfo<long, unsigned int> indexInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, unsigned long> tensorInfo = getTensorInfo<THCTensor, unsigned long>(state, tensor); TensorInfo<real, unsigned long> srcInfo = getTensorInfo<THCTensor, unsigned long>(state, src); TensorInfo<long, unsigned long> indexInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, index); RUN(unsigned long, -1, real) } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimension)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimension(state, index) == THCTensor_(nDimension)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimension)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimension)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; THArgCheck(getApplyGrid(state, totalElements, grid), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (TensorUtils<THCTensor>::overlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, tensor) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<THCTensor, unsigned int>(state, tensor); TensorInfo<long, unsigned int> indexInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, unsigned long> tensorInfo = getTensorInfo<THCTensor, unsigned long>(state, tensor); TensorInfo<long, unsigned long> indexInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, index); RUN(unsigned long, -1, real); } if (oldTensor) { TensorUtils<THCTensor>::copyIgnoringOverlaps(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #endif
the_stack
#include <stdio.h> #include <iomanip> #include <thrust/extrema.h> #include "cm.h" using namespace std; //thrust::device_vector<unsigned char> scratch; bool phase_copy = 0; map<string, unsigned int> cnt_counts; string curr_file; map<string,bool> min_max_eq; template<typename T> struct type_to_int64 { const T *source; long long int *dest; long long int *ad; type_to_int64(const T* _source, long long int *_dest, long long int *_ad): source(_source), dest(_dest), ad(_ad) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { dest[i] = (int_type)source[i] + ad[0]; } }; template<typename T> struct int64_to_type { __host__ __device__ unsigned int operator()(const int_type x) { return (T)x; } }; template<typename T> struct to_int64 { __host__ __device__ int_type operator()(const T x) { return (int_type)x; } }; struct compress_functor_int { const int_type * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_int(const int_type * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val = source[i] - start_val[0]; unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0]; dest[i] = val << shifted; } }; struct compress_functor_float { const long long int * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_float(const long long int * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val; unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; val = source[i] - start_val[0]; unsigned int z = i%fit_count; unsigned int shifted = int_sz - bits - z*bits; dest[i] = val << shifted; } }; struct decompress_functor_int { const unsigned long long int * source; int_type * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_int(const unsigned long long int * _source, int_type * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned long long int tmp = source[i/vals[1]] >> (vals[2] - vals[0] - (i%vals[1])*vals[0]); // set the rest of bits to 0 tmp = tmp << (vals[2] - vals[0]); tmp = tmp >> (vals[2] - vals[0]); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_str { const unsigned long long * source; unsigned int * dest; const unsigned int * vals; decompress_functor_str(const unsigned long long int * _source, unsigned int * _dest, const unsigned int * _vals): source(_source), dest(_dest), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = 64; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = ((fit_count-src_loc)-1)*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp; } }; size_t pfor_decompress(void* destination, void* host, void* d_v, void* s_v, string colname) { unsigned int bit_count = 64; auto cnt = ((unsigned int*)host)[0]; auto orig_upper_val = ((long long int*)((char*)host +12))[0]; auto orig_recCount = ((unsigned int*)((char*)host + cnt))[7]; auto bits = ((unsigned int*)((char*)host + cnt))[8]; auto orig_lower_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 9))[0]; auto fit_count = ((unsigned int*)((char*)host + cnt))[11]; auto start_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 12))[0]; auto comp_type = ((unsigned int*)host)[5]; //cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl; //cout << colname << " " << orig_lower_val << " " << orig_upper_val << endl; if(orig_lower_val == orig_upper_val) min_max_eq[colname] = 1; else min_max_eq[colname] = 0; if(scratch.size() < cnt) scratch.resize(cnt); cudaMemcpy(thrust::raw_pointer_cast(scratch.data()), (void*)((unsigned int*)host + 6), cnt, cudaMemcpyHostToDevice); thrust::device_ptr<int_type> d_int((int_type*)destination); if(comp_type == 1) { thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::counting_iterator<unsigned int> begin(0); decompress_functor_int ff1((const unsigned long long int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); d_int[0] = start_val; thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int); } else { if(!phase_copy) { thrust::device_vector<int_type> ad(1); ad[0] = orig_lower_val; thrust::counting_iterator<unsigned int> begin(0); if(bits == 8) { type_to_int64<unsigned char> ff1((const unsigned char *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, thrust::raw_pointer_cast(ad.data())); thrust::for_each(begin, begin + orig_recCount, ff1); } else if(bits == 16) { type_to_int64<unsigned short int> ff1((const unsigned short int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, thrust::raw_pointer_cast(ad.data())); thrust::for_each(begin, begin + orig_recCount, ff1); } else if(bits == 32) { type_to_int64<unsigned int> ff1((const unsigned int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, thrust::raw_pointer_cast(ad.data())); thrust::for_each(begin, begin + orig_recCount, ff1); } else { type_to_int64<long long int> ff1((const long long int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, thrust::raw_pointer_cast(ad.data())); thrust::for_each(begin, begin + orig_recCount, ff1); }; } else { cpy_bits[colname] = bits; cpy_init_val[colname] = orig_lower_val; if(bits == 8) { thrust::device_ptr<unsigned char> dest((unsigned char*)destination); thrust::copy(scratch.begin(), scratch.begin()+orig_recCount, dest); } else if(bits == 16) { thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(scratch.data())); thrust::device_ptr<unsigned short int> dest((unsigned short int*)destination); thrust::copy(src, src+orig_recCount, dest); } else if(bits == 32) { thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::device_ptr<unsigned int> dest((unsigned int*)destination); thrust::copy(src, src+orig_recCount, dest); } else { thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::copy(src, src+orig_recCount, d_int); }; }; }; return orig_recCount; } template< typename T> void pfor_delta_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp) { long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper; unsigned int bits, recCount; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 1; // FOR-DELTA if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; void* ss; CUDA_SAFE_CALL(cudaMalloc((void **) &ss, recCount*float_size)); if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); thrust::device_ptr<int_type> d_ss((int_type*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << file_name << " " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; } else { thrust::device_ptr<long long int> s((long long int*)source); thrust::device_ptr<long long int> d_ss((long long int*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << file_name << " " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; }; thrust::counting_iterator<unsigned int> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; //void* d; //CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)source); thrust::fill(dd, dd+source_len,0); //cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl; if (tp == 0) { compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source); // make an addition sequence thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss); thrust::constant_iterator<unsigned long long int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>()); unsigned int cnt = (recCount)/fit_count; if (recCount%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); //cout << file_name << " CNT " << cnt << " " << recCount << endl; cnt = cnt*8; cudaMemcpy( host.data(), (void *)raw_src, cnt, cudaMemcpyDeviceToHost); fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&real_lower, 8); binary_file.write((char *)&real_upper, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)host.data(),cnt); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; thrust::device_free(fin_seq); cudaFree(ss); cudaFree(d_v1); cudaFree(s_v1); } // non sorted compressed fields should have 1,2,4 or 8 byte values for direct operations on compressed values template< typename T> void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp) //void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T>& host, bool tp) { unsigned int recCount = source_len/int_size; long long int orig_lower_val; long long int orig_upper_val; unsigned int bits; unsigned int fit_count = 0; unsigned int comp_type = 0; // FOR long long int start_val = 0; bool sorted = 0; // check if sorted if(delta) { if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); sorted = thrust::is_sorted(s, s+recCount); } else { recCount = source_len/float_size; thrust::device_ptr<long long int> s((long long int*)source); sorted = thrust::is_sorted(s, s+recCount); }; //cout << "file " << file_name << " is sorted " << sorted << endl; if(sorted) { pfor_delta_compress(source, source_len, file_name, host, tp); return; }; }; //cout << "Recs " << recCount << endl; if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); } else { thrust::device_ptr<long long int> s((long long int*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); }; if (bits != 8 && bits != 16 && bits != 32 && bits != 64) { if(bits < 8) bits = 8; else if(bits < 16) bits = 16; else if(bits < 32) bits = 32; else if(bits < 64) bits = 64; }; //cout << "We will really need " << bits << " for " << file_name << endl; unsigned int cnt; thrust::device_ptr<int_type> s((int_type*)source); thrust::constant_iterator<int_type> iter(orig_lower_val); thrust::transform(s, s+recCount, iter, s, thrust::minus<int_type>()); thrust::device_vector<int8_type> d_columns_int8; thrust::device_vector<int16_type> d_columns_int16; thrust::device_vector<int32_type> d_columns_int32; if(bits == 8) { d_columns_int8.resize(recCount); thrust::transform(s, s+recCount, d_columns_int8.begin(), int64_to_type<int8_type>()); cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int8.data()), recCount, cudaMemcpyDeviceToHost); cnt = recCount; } else if(bits == 16) { d_columns_int16.resize(recCount); thrust::transform(s, s+recCount, d_columns_int16.begin(), int64_to_type<int16_type>()); cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int16.data()), recCount*2, cudaMemcpyDeviceToHost); cnt = recCount*2; } else if(bits == 32) { d_columns_int32.resize(recCount); thrust::transform(s, s+recCount, d_columns_int32.begin(), int64_to_type<int32_type>()); cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int32.data()), recCount*4, cudaMemcpyDeviceToHost); cnt = recCount*4; } else { cudaMemcpy( host.data(), (void*)source, recCount*8, cudaMemcpyDeviceToHost); cnt = recCount*8; }; fit_count = 64/bits; //cout << "comp Header " << file_name << " " << recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << " " << orig_upper_val << " " << start_val << endl; fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&orig_upper_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)host.data(),cnt); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; }
the_stack
#include "../config.cuh" #include "../util_namespace.cuh" #include "dispatch/dispatch_merge_sort.cuh" CUB_NAMESPACE_BEGIN /** * @brief DeviceMergeSort provides device-wide, parallel operations for * computing a merge sort across a sequence of data items residing within * device-accessible memory. * * @ingroup SingleModule * * @par Overview * - DeviceMergeSort arranges items into ascending order using a comparison * functor with less-than semantics. Merge sort can handle arbitrary types (as * long as a value of these types is a model of [LessThan Comparable]) and * comparison functors, but is slower than DeviceRadixSort when sorting * arithmetic types into ascending/descending order. * - Another difference from RadixSort is the fact that DeviceMergeSort can * handle arbitrary random-access iterators, as shown below. * * @par A Simple Example * @par * The code snippet below illustrates a thrust reverse iterator usage. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/device/device_merge_sort.cuh> * * struct CustomLess * { * template <typename DataType> * __device__ bool operator()(const DataType &lhs, const DataType &rhs) * { * return lhs < rhs; * } * }; * * // Declare, allocate, and initialize device-accessible pointers * // for sorting data * thrust::device_vector<KeyType> d_keys(num_items); * thrust::device_vector<DataType> d_values(num_items); * // ... * * // Initialize iterator * using KeyIterator = typename thrust::device_vector<KeyType>::iterator; * thrust::reverse_iterator<KeyIterator> reverse_iter(d_keys.end()); * * // Determine temporary device storage requirements * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::SortPairs( * nullptr, * temp_storage_bytes, * reverse_iter, * thrust::raw_pointer_cast(d_values.data()), * num_items, * CustomLess()); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::SortPairs( * d_temp_storage, * temp_storage_bytes, * reverse_iter, * thrust::raw_pointer_cast(d_values.data()), * num_items, * CustomLess()); * @endcode * * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ struct DeviceMergeSort { /** * @brief Sorts items using a merge sorting method. * * @par * SortPairs is not guaranteed to be stable. That is, suppose that i and j are * equivalent: neither one is less than the other. It is not guaranteed * that the relative order of these two elements will be preserved by sort. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of `int` * keys with associated vector of `int` values. * @par * @code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for * // sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 6, 5, 3, 0, 9] * int *d_values; // e.g., [0, 1, 2, 3, 4, 5, 6] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::SortPairs( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::SortPairs( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 6, 8, 9] * // d_values <-- [5, 4, 3, 2, 1, 0, 6] * * @endcode * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam ValueIteratorT * is a model of [Random Access Iterator], and `ValueIteratorT` is mutable. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in,out] d_keys * Pointer to the input sequence of unsorted input keys * * @param[in,out] d_items * Pointer to the input sequence of unsorted input values * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns true if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyIteratorT d_keys, ValueIteratorT d_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { using DispatchMergeSortT = DispatchMergeSort<KeyIteratorT, ValueIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT>; return DispatchMergeSortT::Dispatch(d_temp_storage, temp_storage_bytes, d_keys, d_items, d_keys, d_items, num_items, compare_op, stream, debug_synchronous); } /** * @brief Sorts items using a merge sorting method. * * @par * - SortPairsCopy is not guaranteed to be stable. That is, suppose * that `i` and `j` are equivalent: neither one is less than the * other. It is not guaranteed that the relative order of these * two elements will be preserved by sort. * - Input arrays `d_input_keys` and `d_input_items` are not modified. * - Note that the behavior is undefined if the input and output ranges * overlap in any way. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of * `int` keys with associated vector of `int` values. * @par * @code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers * // for sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 6, 5, 3, 0, 9] * int *d_values; // e.g., [0, 1, 2, 3, 4, 5, 6] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::SortPairsCopy( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::SortPairsCopy( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 6, 8, 9] * // d_values <-- [5, 4, 3, 2, 1, 0, 6] * * @endcode * * @tparam KeyInputIteratorT * is a model of [Random Access Iterator]. Its `value_type` is a model of * [LessThan Comparable]. This `value_type`'s ordering relation is a * *strict weak ordering* as defined in the [LessThan Comparable] * requirements. * * @tparam ValueInputIteratorT * is a model of [Random Access Iterator]. * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam ValueIteratorT * is a model of [Random Access Iterator], and `ValueIteratorT` is mutable. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in] d_input_keys * Pointer to the input sequence of unsorted input keys * * @param[in] d_input_items * Pointer to the input sequence of unsorted input values * * @param[out] d_output_keys * Pointer to the output sequence of sorted input keys * * @param[out] d_output_items * Pointer to the output sequence of sorted input values * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns `true` if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyInputIteratorT, typename ValueInputIteratorT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsCopy(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyInputIteratorT d_input_keys, ValueInputIteratorT d_input_items, KeyIteratorT d_output_keys, ValueIteratorT d_output_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { using DispatchMergeSortT = DispatchMergeSort<KeyInputIteratorT, ValueInputIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT>; return DispatchMergeSortT::Dispatch(d_temp_storage, temp_storage_bytes, d_input_keys, d_input_items, d_output_keys, d_output_items, num_items, compare_op, stream, debug_synchronous); } /** * @brief Sorts items using a merge sorting method. * * @par * SortKeys is not guaranteed to be stable. That is, suppose that `i` and `j` * are equivalent: neither one is less than the other. It is not guaranteed * that the relative order of these two elements will be preserved by sort. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of `int` * keys. * @par * @code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers * // for sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 7, 5, 3, 0, 9] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::SortKeys( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::SortKeys( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 7, 8, 9] * @endcode * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in,out] d_keys * Pointer to the input sequence of unsorted input keys * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns true if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyIteratorT d_keys, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { using DispatchMergeSortT = DispatchMergeSort<KeyIteratorT, NullType *, KeyIteratorT, NullType *, OffsetT, CompareOpT>; return DispatchMergeSortT::Dispatch(d_temp_storage, temp_storage_bytes, d_keys, static_cast<NullType *>(nullptr), d_keys, static_cast<NullType *>(nullptr), num_items, compare_op, stream, debug_synchronous); } /** * @brief Sorts items using a merge sorting method. * * @par * - SortKeysCopy is not guaranteed to be stable. That is, suppose that `i` * and `j` are equivalent: neither one is less than the other. It is not * guaranteed that the relative order of these two elements will be * preserved by sort. * - Input array d_input_keys is not modified. * - Note that the behavior is undefined if the input and output ranges * overlap in any way. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of * `int` keys. * @par * @code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for * // sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 7, 5, 3, 0, 9] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::SortKeysCopy( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::SortKeysCopy( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 7, 8, 9] * @endcode * * @tparam KeyInputIteratorT * is a model of [Random Access Iterator]. Its `value_type` is a model of * [LessThan Comparable]. This `value_type`'s ordering relation is a * *strict weak ordering* as defined in the [LessThan Comparable] * requirements. * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in] d_input_keys * Pointer to the input sequence of unsorted input keys * * @param[out] d_output_keys * Pointer to the output sequence of sorted input keys * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns true if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyInputIteratorT, typename KeyIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysCopy(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyInputIteratorT d_input_keys, KeyIteratorT d_output_keys, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { using DispatchMergeSortT = DispatchMergeSort<KeyInputIteratorT, NullType *, KeyIteratorT, NullType *, OffsetT, CompareOpT>; return DispatchMergeSortT::Dispatch(d_temp_storage, temp_storage_bytes, d_input_keys, static_cast<NullType *>(nullptr), d_output_keys, static_cast<NullType *>(nullptr), num_items, compare_op, stream, debug_synchronous); } /** * @brief Sorts items using a merge sorting method. * * @par * StableSortPairs is stable: it preserves the relative ordering of equivalent * elements. That is, if x and y are elements such that x precedes y, * and if the two elements are equivalent (neither x < y nor y < x) then * a postcondition of stable_sort is that x still precedes y. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of `int` * keys with associated vector of `int` values. * @par * @code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for * // sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 6, 5, 3, 0, 9] * int *d_values; // e.g., [0, 1, 2, 3, 4, 5, 6] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::StableSortPairs( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::StableSortPairs( * d_temp_storage, temp_storage_bytes, * d_keys, d_values, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 6, 8, 9] * // d_values <-- [5, 4, 3, 1, 2, 0, 6] * @endcode * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam ValueIteratorT * is a model of [Random Access Iterator], and `ValueIteratorT` is mutable. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in,out] d_keys * Pointer to the input sequence of unsorted input keys * * @param[in,out] d_items * Pointer to the input sequence of unsorted input values * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns true if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t StableSortPairs(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyIteratorT d_keys, ValueIteratorT d_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { return SortPairs<KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT>( d_temp_storage, temp_storage_bytes, d_keys, d_items, num_items, compare_op, stream, debug_synchronous); } /** * @brief Sorts items using a merge sorting method. * * @par * StableSortKeys is stable: it preserves the relative ordering of equivalent * elements. That is, if `x` and `y` are elements such that `x` precedes `y`, * and if the two elements are equivalent (neither `x < y` nor `y < x`) then * a postcondition of stable_sort is that `x` still precedes `y`. * * @par Snippet * The code snippet below illustrates the sorting of a device vector of `int` * keys. * \par * \code * #include <cub/cub.cuh> * // or equivalently <cub/device/device_merge_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for * // sorting data * int num_items; // e.g., 7 * int *d_keys; // e.g., [8, 6, 7, 5, 3, 0, 9] * ... * * // Initialize comparator * CustomOpT custom_op; * * // Determine temporary device storage requirements * void *d_temp_storage = nullptr; * std::size_t temp_storage_bytes = 0; * cub::DeviceMergeSort::StableSortKeys( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceMergeSort::StableSortKeys( * d_temp_storage, temp_storage_bytes, * d_keys, num_items, custom_op); * * // d_keys <-- [0, 3, 5, 6, 7, 8, 9] * @endcode * * @tparam KeyIteratorT * is a model of [Random Access Iterator]. `KeyIteratorT` is mutable, and * its `value_type` is a model of [LessThan Comparable]. This `value_type`'s * ordering relation is a *strict weak ordering* as defined in * the [LessThan Comparable] requirements. * * @tparam OffsetT * is an integer type for global offsets. * * @tparam CompareOpT * is a type of callable object with the signature * `bool operator()(KeyT lhs, KeyT rhs)` that models * the [Strict Weak Ordering] concept. * * @param[in] d_temp_storage * Device-accessible allocation of temporary storage. When `nullptr`, the * required allocation size is written to `temp_storage_bytes` and no work * is done. * * @param[in,out] temp_storage_bytes * Reference to size in bytes of `d_temp_storage` allocation * * @param[in,out] d_keys * Pointer to the input sequence of unsorted input keys * * @param[in] num_items * Number of items to sort * * @param[in] compare_op * Comparison function object which returns true if the first argument is * ordered before the second * * @param[in] stream * **[optional]** CUDA stream to launch kernels within. Default is * stream<sub>0</sub>. * * @param[in] debug_synchronous * **[optional]** Whether or not to synchronize the stream after every * kernel launch to check for errors. Also causes launch configurations to * be printed to the console. Default is `false`. * * [Random Access Iterator]: https://en.cppreference.com/w/cpp/iterator/random_access_iterator * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order * [LessThan Comparable]: https://en.cppreference.com/w/cpp/named_req/LessThanComparable */ template <typename KeyIteratorT, typename OffsetT, typename CompareOpT> CUB_RUNTIME_FUNCTION static cudaError_t StableSortKeys(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyIteratorT d_keys, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream = 0, bool debug_synchronous = false) { return SortKeys<KeyIteratorT, OffsetT, CompareOpT>(d_temp_storage, temp_storage_bytes, d_keys, num_items, compare_op, stream, debug_synchronous); } }; CUB_NAMESPACE_END
the_stack
namespace vilib { static void harris_gpu_find_best_params(const int & cols, int & threadX, int & threadY) { int t_best = 128; for(int t=t_best,idle_threads_best = t_best;t>16;t>>=1) { int idle_threads_now = ((cols + t-1)/t)*t - cols; if(idle_threads_best > idle_threads_now) { t_best = t; idle_threads_best = idle_threads_now; } } threadX = t_best; threadY = 128/t_best; } static void harris_gpu_find_best_vector_params(const int & cols, const int & input_pitch, const int & output_pitch, int & v, int & cols_vectorized, int & input_pitch_vectorized, int & output_pitch_vectorized, int & threadX, int & threadY) { // Select the most efficient vectorized version v=4; for(;v>0;v>>=1) { int bitfield = v-1; if(((cols & bitfield) != 0) || ((input_pitch & bitfield) != 0) || ((output_pitch & bitfield) != 0)) { continue; } break; } cols_vectorized = cols/v; input_pitch_vectorized = input_pitch/v; output_pitch_vectorized = output_pitch/v; // Select a good thread block size that minimizes the number of idle threads int t_best = 128; for(int t=t_best,idle_threads_best = t_best;t>16;t>>=1) { int idle_threads_now = ((cols_vectorized + t-1)/t)*t - cols_vectorized; if(idle_threads_best > idle_threads_now) { t_best = t; idle_threads_best = idle_threads_now; } } threadX = t_best; threadY = 128/t_best; } template <int N> __global__ void array_multiply_kernel(const float * __restrict__ a, const float * __restrict__ b, float * result, const int cols, const int rows, const int common_pitch) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; if(ty < rows && tx < cols) { const int idx = ty * common_pitch + tx; if(N==1) { result[idx] = a[idx] * b[idx]; } else if(N==2) { float2 * result_v = reinterpret_cast<float2*>(result); const float2 * a_v = reinterpret_cast<const float2*>(a); const float2 * b_v = reinterpret_cast<const float2*>(b); result_v[idx].x = a_v[idx].x * b_v[idx].x; result_v[idx].y = a_v[idx].y * b_v[idx].y; } else if(N==4) { float4 * result_v = reinterpret_cast<float4*>(result); const float4 * a_v = reinterpret_cast<const float4*>(a); const float4 * b_v = reinterpret_cast<const float4*>(b); result_v[idx].x = a_v[idx].x * b_v[idx].x; result_v[idx].y = a_v[idx].y * b_v[idx].y; result_v[idx].z = a_v[idx].z * b_v[idx].z; result_v[idx].w = a_v[idx].w * b_v[idx].w; } } } __host__ void harris_gpu_array_multiply(const float * d_input_a, const int input_a_pitch, const float * d_input_b, const int input_b_pitch, float * d_output, const int output_pitch, const int cols, const int rows, cudaStream_t stream) { if(input_a_pitch != input_b_pitch || input_a_pitch != output_pitch) { throw std::runtime_error("Currently only common pitch is supported."); } int v, cols_vectorized, common_pitch_vectorized; int threadX, threadY; harris_gpu_find_best_vector_params(cols, input_a_pitch, input_a_pitch, v, cols_vectorized, common_pitch_vectorized, common_pitch_vectorized, threadX, threadY); kernel_params_t p = cuda_gen_kernel_params_2d(cols_vectorized,rows,threadX,threadY); decltype(&array_multiply_kernel<1>) kernel; switch(v) { case 4: kernel = array_multiply_kernel<4>; break; case 2: kernel = array_multiply_kernel<2>; break; case 1: kernel = array_multiply_kernel<1>; break; default: assert(0); kernel = array_multiply_kernel<1>; break; } kernel <<< p.blocks_per_grid, p.threads_per_block,0,stream>>>( d_input_a, d_input_b, d_output, cols_vectorized, rows, common_pitch_vectorized); CUDA_KERNEL_CHECK(); } __inline__ __device__ void sum_neighbors(const float * __restrict__ d_dx2, const float * __restrict__ d_dy2, const float * __restrict__ d_dxdy, const int & pitch_elements, const int & col, const int & row, float & a, float & b, float & c) { a = 0.0f; b = 0.0f; c = 0.0f; const int start_offset = (row-1) * pitch_elements + (col-1); const int row_offset = (pitch_elements-3); const float * ptr_a = d_dx2 + start_offset; const float * ptr_b = d_dxdy + start_offset; const float * ptr_c = d_dy2 + start_offset; #pragma unroll 3 for(int i=-1; i<2; ++i) { #pragma unroll 3 for(int j=-1; j<2; ++j) { a += *ptr_a; b += *ptr_b; c += *ptr_c; ++ptr_a; ++ptr_b; ++ptr_c; } ptr_a += row_offset; ptr_b += row_offset; ptr_c += row_offset; } a *= 1.0f/9.0f; b *= 1.0f/9.0f; c *= 1.0f/9.0f; } template<bool use_harris> __global__ void harris_gpu_calc_corner_response_kernel( const float * __restrict__ d_dx2, const float * __restrict__ d_dy2, const float * __restrict__ d_dxdy, const int input_pitch, float * __restrict__ d_response, const int output_pitch, const int minX, const int maxX, const int minY, const int maxY, const float k) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(y <= maxY && x <= maxX && y >= minY && x >= minX) { /* * Notes to future self: * M = | dx2_int dxy_int | = | a b | * | dxy_int dy2_int | | b c | * * Run a 3x3 box filter */ float a, b, c; sum_neighbors(d_dx2, d_dy2, d_dxdy, input_pitch, x, y, a, b, c); float r; if(use_harris) { // Harris score r = a*c -b*b - k*(a+c)*(a+c); } else { // Shi-Tomasi score: min(l1,l2) /* * l1 + l2 = a + c -> l1 = a + c - l2 * l1 * l2 = a * c - b^2 -> 0 = l2^2 - (a+c)l2 + (a*c-b^2) * l2 = (a+c +- sqrt((a+c)^2 - 4(a*c-b^2)) / 2 * After simplification: * l2 = a/2 + c/2 +- sqrt((a/2 - c/2)^2 + b^2) * l1 = a/2 + c/2 -+ sqrt((a/2 - c/2)^2 + b^2) * * Remark: * We multiplied the response by 2 for less arithmetics: * This will introduce a 2x scale difference between a "true" * implementation, and this. */ r = (a+c) - sqrtf((a-c)*(a-c) + 4*b*b); } d_response[y * output_pitch + x] = r; } } __host__ void harris_gpu_calc_corner_response(const float * d_dx2, const int dx2_pitch, const float * d_dy2, const int dy2_pitch, const float * d_dxdy, const int dxdy_pitch, float * d_response, const int response_pitch, const int cols, const int rows, const conv_filter_border_type_t border_type, const bool use_harris, const float k, cudaStream_t stream) { if(dx2_pitch != dy2_pitch || dx2_pitch != dxdy_pitch) { throw std::runtime_error("Currently only common pitch is supported."); } int threadX,threadY; harris_gpu_find_best_params(cols,threadX,threadY); kernel_params_t p = cuda_gen_kernel_params_2d(cols,rows,threadX,threadY); const int minX = 1 + (border_type == conv_filter_border_type::BORDER_SKIP?1:0); const int maxX = (cols-2) - (border_type == conv_filter_border_type::BORDER_SKIP?1:0); const int minY = 1 + (border_type == conv_filter_border_type::BORDER_SKIP?1:0); const int maxY = (rows-2) - (border_type == conv_filter_border_type::BORDER_SKIP?1:0); decltype(&harris_gpu_calc_corner_response_kernel<false>) kernel; if(use_harris) { kernel = harris_gpu_calc_corner_response_kernel<true>; } else { kernel = harris_gpu_calc_corner_response_kernel<false>; } kernel <<< p.blocks_per_grid, p.threads_per_block,0,stream>>> ( d_dx2, d_dy2, d_dxdy, dx2_pitch, d_response, response_pitch, minX, maxX, minY, maxY, k); CUDA_KERNEL_CHECK(); } } // namespace vilib
the_stack
#include <gtest/gtest.h> #include "serac/serac_config.hpp" #include "serac/numerics/mesh_utils_base.hpp" #include "serac/physics/utilities/quadrature_data.hpp" #include "serac/physics/utilities/state_manager.hpp" #include "serac/physics/utilities/functional/functional.hpp" using namespace serac; template <typename T> __global__ void fill(QuadratureDataView<T> output, int num_elements, int num_quadrature_points) { int elem_id = threadIdx.x + blockIdx.x * blockDim.x; int quad_id = threadIdx.y; if (elem_id < num_elements && quad_id < num_quadrature_points) { output(elem_id, quad_id) = elem_id * elem_id + quad_id; } } template <typename T> __global__ void copy(QuadratureDataView<T> destination, QuadratureDataView<T> source, int num_elements, int num_quadrature_points) { int elem_id = threadIdx.x + blockIdx.x * blockDim.x; int quad_id = threadIdx.y; if (elem_id < num_elements && quad_id < num_quadrature_points) { destination(elem_id, quad_id) = source(elem_id, quad_id); } } TEST(QuadratureDataCUDA, basic_fill_and_copy) { constexpr auto mesh_file = SERAC_REPO_DIR "/data/meshes/star.mesh"; auto mesh = serac::mesh::refineAndDistribute(serac::buildMeshFromFile(mesh_file), 0, 0); constexpr int p = 1; constexpr int elements_per_block = 16; const int num_elements = mesh->GetNE(); // FIXME: This assumes a homogeneous mesh const int geom = mesh->GetElementBaseGeometry(0); const int num_quadrature_points = mfem::IntRules.Get(geom, p).GetNPoints(); QuadratureData<int> source(*mesh, p); QuadratureData<int> destination(*mesh, p); dim3 blocks{elements_per_block, static_cast<unsigned int>(num_quadrature_points)}; dim3 grids{static_cast<unsigned int>(num_elements / elements_per_block)}; fill<<<grids, blocks>>>(QuadratureDataView{source}, num_elements, num_quadrature_points); copy<<<grids, blocks>>>(QuadratureDataView{destination}, QuadratureDataView{source}, num_elements, num_quadrature_points); cudaDeviceSynchronize(); EXPECT_TRUE(std::equal(source.begin(), source.end(), destination.begin())); } struct State { double x; }; bool operator==(const State& lhs, const State& rhs) { return lhs.x == rhs.x; } class QuadratureDataGPUTest : public ::testing::Test { protected: void SetUp() override { constexpr auto mesh_file = SERAC_REPO_DIR "/data/meshes/star.mesh"; default_mesh = mesh::refineAndDistribute(buildMeshFromFile(mesh_file), 0, 0); resetWithNewMesh(*default_mesh); } void resetWithNewMesh(mfem::ParMesh& new_mesh) { mesh = &new_mesh; festate = std::make_unique<FiniteElementState>(*mesh); festate->gridFunc() = 0.0; residual = std::make_unique<Functional<test_space(trial_space), ExecutionSpace::GPU>>(&festate->space(), &festate->space()); } static constexpr int p = 1; static constexpr int dim = 2; using test_space = H1<p>; using trial_space = H1<p>; std::unique_ptr<mfem::ParMesh> default_mesh; mfem::ParMesh* mesh = nullptr; std::unique_ptr<FiniteElementState> festate; std::unique_ptr<Functional<test_space(trial_space), ExecutionSpace::GPU>> residual; }; struct basic_state_qfunction { template <typename x_t, typename field_t, typename state_t> __host__ __device__ auto operator()(x_t&& /* x */, field_t&& u, state_t&& state) { state.x += 0.1; return u; } }; TEST_F(QuadratureDataGPUTest, basic_integrals) { QuadratureData<State> qdata(*mesh, p); State init{0.1}; qdata = init; residual->AddDomainIntegral(Dimension<dim>{}, basic_state_qfunction{}, *mesh, qdata); // If we run through it one time... mfem::Vector U(festate->space().TrueVSize()); (*residual)(U); // Then each element of the state should have been incremented accordingly... State correct{0.2}; for (const auto& s : qdata) { EXPECT_EQ(s, correct); } } struct StateWithDefault { double x = 0.5; }; bool operator==(const StateWithDefault& lhs, const StateWithDefault& rhs) { return lhs.x == rhs.x; } struct basic_state_with_default_qfunction { template <typename x_t, typename field_t, typename state_t> __host__ __device__ auto operator()(x_t&& /* x */, field_t&& u, state_t&& state) { state.x += 0.1; return u; } }; TEST_F(QuadratureDataGPUTest, basic_integrals_default) { QuadratureData<StateWithDefault> qdata(*mesh, p); residual->AddDomainIntegral(Dimension<dim>{}, basic_state_with_default_qfunction{}, *mesh, qdata); // If we run through it one time... mfem::Vector U(festate->space().TrueVSize()); (*residual)(U); // Then each element of the state should have been incremented accordingly... StateWithDefault correct{0.6}; const auto& const_qdata = qdata; for (auto& s : const_qdata) { EXPECT_EQ(s, correct); } } struct StateWithMultiFields { double x = 0.5; double y = 0.3; }; bool almost_equal(double a, double b) { return std::abs(a - b) < 1e-10; } bool operator==(const StateWithMultiFields& lhs, const StateWithMultiFields& rhs) { return almost_equal(lhs.x, rhs.x) && almost_equal(lhs.y, rhs.y); } struct basic_state_with_multi_fields_qfunction { template <typename x_t, typename field_t, typename state_t> __host__ __device__ auto operator()(x_t&& /* x */, field_t&& u, state_t&& state) { state.x += 0.1; state.y += 0.7; return u; } }; TEST_F(QuadratureDataGPUTest, basic_integrals_multi_fields) { QuadratureData<StateWithMultiFields> qdata(*mesh, p); residual->AddDomainIntegral(Dimension<dim>{}, basic_state_with_multi_fields_qfunction{}, *mesh, qdata); // If we run through it one time... mfem::Vector U(festate->space().TrueVSize()); (*residual)(U); // Then each element of the state should have been incremented accordingly... StateWithMultiFields correct{0.6, 1.0}; for (const auto& s : qdata) { EXPECT_EQ(s, correct); } } TEST_F(QuadratureDataGPUTest, basic_integrals_multi_fields_bulk_assignment) { QuadratureData<StateWithMultiFields> qdata(*mesh, p); qdata = StateWithMultiFields{0.7, 0.2}; residual->AddDomainIntegral(Dimension<dim>{}, basic_state_with_multi_fields_qfunction{}, *mesh, qdata); // If we run through it one time... mfem::Vector U(festate->space().TrueVSize()); (*residual)(U); // Then each element of the state should have been incremented accordingly... StateWithMultiFields correct{0.8, 0.9}; for (const auto& s : qdata) { EXPECT_EQ(s, correct); } } template <typename T> class QuadratureDataGPUStateManagerTest : public QuadratureDataGPUTest { public: using value_type = typename T::value_type; static constexpr value_type initial_state = T::initial_state; __host__ __device__ static void mutate(value_type& v, double other = 0.0) { T::mutate(v, other); } }; struct MultiFieldWrapper { using value_type = StateWithMultiFields; static constexpr value_type initial_state = {}; __host__ __device__ static void mutate(value_type& v, double other = 0.0) { v.x += (0.1 + other); v.y += (0.7 + other); } }; struct IntWrapper { using value_type = int; static constexpr value_type initial_state = 0; __host__ __device__ static void mutate(value_type& v, double other = 0.0) { v += 4; v += static_cast<int>(other * 10); } }; struct ThreeBytes { char x[3] = {1, 2, 3}; }; bool operator==(const ThreeBytes& lhs, const ThreeBytes& rhs) { return std::equal(lhs.x, lhs.x + 3, rhs.x); } struct ThreeBytesWrapper { using value_type = ThreeBytes; static_assert(sizeof(value_type) == 3); static constexpr value_type initial_state = {}; __host__ __device__ static void mutate(value_type& v, double other = 0.0) { v.x[0] = static_cast<char>(v.x[0] + 3 + (other * 10)); v.x[1] = static_cast<char>(v.x[1] + 2 + (other * 10)); v.x[2] = static_cast<char>(v.x[2] + 1 + (other * 10)); } }; using StateTypes = ::testing::Types<MultiFieldWrapper, IntWrapper, ThreeBytesWrapper>; // NOTE: The extra comma is here due a clang issue where the variadic macro param is not provided // so instead, we leave it unspecified/empty TYPED_TEST_SUITE(QuadratureDataGPUStateManagerTest, StateTypes, ); template <typename wrapper_t> struct state_manager_qfunction { template <typename x_t, typename field_t, typename state_t> __host__ __device__ auto operator()(x_t&& /* x */, field_t&& u, state_t&& state) { wrapper_t::mutate(state); return u; } }; template <typename wrapper_t> struct state_manager_varying_qfunction { template <typename x_t, typename field_t, typename state_t> __host__ __device__ auto operator()(x_t&& x, field_t&& u, state_t&& state) { double norm = 0.0; for (int i = 0; i < x.first_dim; i++) { norm += x[i] * x[i]; } wrapper_t::mutate(state, norm); mutated_data[idx++] = state; return u; } DeviceArray<typename wrapper_t::value_type>& mutated_data; int idx = 0; }; TYPED_TEST(QuadratureDataGPUStateManagerTest, basic_integrals_state_manager) { constexpr int cycle = 0; const auto mutated_once = []() { typename TestFixture::value_type result = TestFixture::initial_state; TestFixture::mutate(result); return result; }(); const auto mutated_twice = []() { typename TestFixture::value_type result = TestFixture::initial_state; TestFixture::mutate(result); TestFixture::mutate(result); return result; }(); // First set up the Functional object, run it once to update the state once, // then save it { axom::sidre::DataStore datastore; serac::StateManager::initialize(datastore); // We need to use "this->" explicitly because we are in a derived class template serac::StateManager::setMesh(std::move(this->default_mesh)); // Can't use auto& here because we're in a template context serac::QuadratureData<typename TestFixture::value_type>& qdata = serac::StateManager::newQuadratureData<typename TestFixture::value_type>("test_data", this->p); qdata = TestFixture::initial_state; this->residual->AddDomainIntegral(Dimension<TestFixture::dim>{}, state_manager_qfunction<TestFixture>{}, *this->mesh, qdata); // If we run through it one time... mfem::Vector U(this->festate->space().TrueVSize()); (*this->residual)(U); for (const auto& s : qdata) { EXPECT_EQ(s, mutated_once); } serac::StateManager::save(0.0, cycle); serac::StateManager::reset(); } // Then reload the state to make sure it was synced correctly, and update it again before saving { axom::sidre::DataStore datastore; serac::StateManager::initialize(datastore, "serac", "", cycle); // Since the original mesh is dead, use the mesh recovered from the save file to build a new Functional this->resetWithNewMesh(serac::StateManager::mesh()); serac::QuadratureData<typename TestFixture::value_type>& qdata = serac::StateManager::newQuadratureData<typename TestFixture::value_type>("test_data", this->p); // Make sure the changes from the first increment were propagated through for (const auto& s : qdata) { EXPECT_EQ(s, mutated_once); } // Note that the mesh here has been recovered from the save file, // same for the qdata (or rather the underlying QuadratureFunction) this->residual->AddDomainIntegral(Dimension<TestFixture::dim>{}, state_manager_qfunction<TestFixture>{}, *this->mesh, qdata); // Then increment it for the second time mfem::Vector U(this->festate->space().TrueVSize()); (*this->residual)(U); // Before saving it again serac::StateManager::save(0.1, cycle + 1); serac::StateManager::reset(); } // Ordered quadrature point data that is unique (mutated with the point's distance from the origin) DeviceArray<typename TestFixture::value_type> origin_mutated_data; // Reload the state again to make sure the same synchronization still happens when the data // is read in from a restart { axom::sidre::DataStore datastore; serac::StateManager::initialize(datastore, "serac", "", cycle + 1); // Since the original mesh is dead, use the mesh recovered from the save file to build a new Functional this->resetWithNewMesh(serac::StateManager::mesh()); serac::QuadratureData<typename TestFixture::value_type>& qdata = serac::StateManager::newQuadratureData<typename TestFixture::value_type>("test_data", this->p); // Make sure the changes from the second increment were propagated through for (const auto& s : qdata) { EXPECT_EQ(s, mutated_twice); } origin_mutated_data.resize(std::distance(qdata.begin(), qdata.end())); // this->residual->AddDomainIntegral(Dimension<TestFixture::dim>{}, // state_manager_varying_qfunction<TestFixture>{origin_mutated_data}, *this->mesh, // qdata); // Then mutate it for the third time mfem::Vector U(this->festate->space().TrueVSize()); (*this->residual)(U); // Before saving it again serac::StateManager::save(0.1, cycle + 2); serac::StateManager::reset(); } // Reload the state one more time to make sure order is preserved when reloading - the previous mutation // included the distance of the quadrature point from the origin (which is unique) { axom::sidre::DataStore datastore; serac::StateManager::initialize(datastore, "serac", "", cycle + 2); serac::QuadratureData<typename TestFixture::value_type>& qdata = serac::StateManager::newQuadratureData<typename TestFixture::value_type>("test_data", this->p); // Make sure the changes from the distance-specified increment were propagated through and in the correct order std::size_t i = 0; for (const auto& s : qdata) { // EXPECT_EQ(s, origin_mutated_data[i]); i++; } serac::StateManager::reset(); } } //------------------------------------------------------------------------------ #include "axom/slic/core/SimpleLogger.hpp" int main(int argc, char* argv[]) { int result = 0; ::testing::InitGoogleTest(&argc, argv); MPI_Init(&argc, &argv); serac::accelerator::initializeDevice(); axom::slic::SimpleLogger logger; // create & initialize test logger, finalized when exiting main scope result = RUN_ALL_TESTS(); MPI_Finalize(); // why does this test need to call terminateDevice, // but none of the other CUDA tests do? serac::accelerator::terminateDevice(); return result; }
the_stack
#include <gtest/gtest.h> #include <isce3/cuda/core/Interp1d.h> #include <isce3/cuda/core/Kernels.h> #include <isce3/cuda/except/Error.h> #include <isce3/except/Error.h> #include <isce3/math/Sinc.h> using namespace isce3::cuda::core; using namespace isce3::except; using thrust::complex; // Adapted from julia code at // https://github.jpl.nasa.gov/bhawkins/FIRInterp.jl/blob/master/test/common.jl class TestSignal { public: TestSignal(int n, double bw, unsigned seed = 12345) : _bw(bw) { if (bw < 0. or bw >= 1.) { throw DomainError(ISCE_SRCINFO(), "bandwidth must be in [0, 1)"); } int nt = 4 * n; _t.resize(nt); _w.resize(nt); std::mt19937 rng(seed); std::normal_distribution<double> normal(0., 1. * n / nt); std::uniform_real_distribution<double> uniform(0., n - 1.); std::generate(_t.begin(), _t.end(), [&]() { return uniform(rng); }); std::generate(_w.begin(), _w.end(), [&]() { return complex<double>(normal(rng), normal(rng)); }); } std::complex<double> eval(double t) { complex<double> z = {0., 0.}; auto n = static_cast<int>(_w.size()); for (int i = 0; i < n; ++i) { z += _w[i] * isce3::math::sinc(_bw * (t - _t[i])); } return z; } std::vector<complex<double>> eval(const std::vector<double>& times) { std::vector<complex<double>> z(times.size()); std::transform(times.begin(), times.end(), z.begin(), [&](double t) { return eval(t); }); return z; } private: double _bw; std::vector<double> _t; std::vector<complex<double>> _w; }; template<class Kernel> __global__ void interp(Kernel kernel, complex<double>* out, const double* times, size_t out_n, const complex<double>* signal, size_t in_n) { auto tid = size_t(blockIdx.x) * blockDim.x + threadIdx.x; if (tid >= out_n) { return; } out[tid] = interp1d(kernel, signal, in_n, 1, times[tid]); } template<class Kernel> std::vector<complex<double>> interpolate(const Kernel& kernel, const std::vector<complex<double>>& signal, const std::vector<double>& times) { // copy signal and interp times to the device thrust::device_vector<complex<double>> d_signal = signal; thrust::device_vector<double> d_times = times; // create device vector to store output thrust::device_vector<complex<double>> d_out(times.size()); using KV = typename Kernel::view_type; // interpolate signal on the device int block = 128; int grid = (times.size() + block - 1) / block; interp<KV><<<grid, block>>>(kernel, d_out.data().get(), d_times.data().get(), times.size(), d_signal.data().get(), signal.size()); // check for kernel launch/execution errors checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); // copy result to host std::vector<complex<double>> out(d_out.size()); thrust::copy(d_out.begin(), d_out.end(), out.begin()); return out; } template<typename T> void checkSameSize(const std::vector<T>& x, const std::vector<T>& y) { if (x.size() != y.size()) { throw LengthError(ISCE_SRCINFO(), "input vectors must have the same size"); } } // compute the magnitude of the complex correlation between the two input // signals double correlation(const std::vector<complex<double>>& x, const std::vector<complex<double>>& y) { checkSameSize(x, y); auto n = static_cast<int>(x.size()); double xy = 0., xx = 0., yy = 0.; for (int i = 0; i < n; ++i) { auto xi = x[i]; auto yi = y[i]; xy += abs(xi * conj(yi)); xx += (xi * conj(xi)).real(); yy += (yi * conj(yi)).real(); } return xy / std::sqrt(xx * yy); } // return the arithmetic mean of the input vector double mean(const std::vector<double>& v) { double sum = std::accumulate(v.begin(), v.end(), 0.); auto n = static_cast<double>(v.size()); return sum / n; } // estimate the sample standard deviation of the input vector double stddev(const std::vector<double>& v, int ddof = 1) { double mu = mean(v); double sse = 0.; std::for_each(v.begin(), v.end(), [&](double d) { sse += (d - mu) * (d - mu); }); auto n = static_cast<double>(v.size()); return std::sqrt(sse / (n - ddof)); } // estimate the standard deviation of the phase difference between the two // inputs, assuming no phase wrapping double phaseStddev(const std::vector<complex<double>>& x, const std::vector<complex<double>>& y) { checkSameSize(x, y); auto n = static_cast<int>(x.size()); std::vector<double> phi(n); std::transform(x.begin(), x.end(), y.begin(), phi.begin(), [](complex<double> lhs, complex<double> rhs) { return arg(lhs * conj(rhs)); }); return stddev(phi); } // convert linear amplitude to decibels double dB(double x) { return 20. * std::log10(x); } void powerBiasStddev(double* bias, double* spread, const std::vector<complex<double>>& x, const std::vector<complex<double>>& y, double minval) { checkSameSize(x, y); auto n = static_cast<int>(x.size()); std::vector<double> ratio; for (int i = 0; i < n; ++i) { auto ax = abs(x[i]); auto ay = abs(y[i]); if (ax >= minval and ay >= minval) { ratio.push_back(ax / ay); } } *bias = dB(mean(ratio)); auto m = static_cast<int>(ratio.size()); std::vector<double> dbr(m); std::transform(ratio.begin(), ratio.end(), dbr.begin(), [](double r) { return dB(r); }); *spread = stddev(dbr); } // convert radians to degrees constexpr double rad2deg(double phi) { return phi * 180. / M_PI; } struct Interp1dTest : public testing::Test { // Length of input signal const int n = 512; // signal bandwidth as a fraction of sample rate const double bw = 0.8; // Trick to get apples-to-apples comparisons even for different kernel // widths. See assertions for acceptable range of values const int pad = 8; // Amplitude mask for backscatter const double minval = 1e-6; // Generator of bandlimited test signal at arbitrary time samples TestSignal ts; // Realization of signal at integer time steps std::vector<complex<double>> signal; Interp1dTest() : ts(n, bw) { assert(pad > 0 and 2 * pad < n); // Generate signal at integer sample times. std::vector<double> times(n); std::iota(times.begin(), times.end(), 0.); signal = ts.eval(times); } template<class Kernel> void testFixedOffset(const Kernel& kernel, double off, double min_corr, double max_phs, double max_bias, double max_spread) { std::printf("Testing fixed offset = %g\n", off); std::vector<double> times(n); std::iota(times.begin(), times.end(), off); checkInterp(kernel, times, min_corr, max_phs, max_bias, max_spread); } template<class Kernel> void testRandomOffsets(const Kernel& kernel, double min_corr, double max_phs, double max_bias, double max_spread, unsigned seed = 12345) { std::printf("Testing random offsets.\n"); std::mt19937 rng(2 * seed); std::uniform_real_distribution<double> uniform(-0.5, 0.5); std::vector<double> times(n); for (int i = 0; i < n; ++i) { times[i] = i + uniform(rng); } checkInterp(kernel, times, min_corr, max_phs, max_bias, max_spread); } template<class Kernel> void checkInterp(const Kernel& kernel, const std::vector<double>& times, double min_corr, double max_phs, double max_bias, double max_spread) { // evaluate signal and interpolate signal at test times std::vector<complex<double>> ref = ts.eval(times); std::vector<complex<double>> out = interpolate(kernel, signal, times); // mask boundary values auto nt = static_cast<int>(times.size()); for (int i = 0; i < nt; ++i) { auto t = times[i]; if (t < pad or t > n - 1 - pad) { out[i] = ref[i] = 0.; } } // check results auto corr = correlation(ref, out); auto phs = rad2deg(phaseStddev(ref, out)); double bias, spread; powerBiasStddev(&bias, &spread, ref, out, minval); EXPECT_GE(corr, min_corr); EXPECT_LE(phs, max_phs); EXPECT_LE(bias, max_bias); EXPECT_LE(spread, max_spread); std::printf("min_corr %9.6f | corr %9.6f\n", min_corr, corr); std::printf("max_phs %9.6f | phs %9.6f\n", max_phs, phs); std::printf("max_bias %9.6f | bias %9.6f\n", max_bias, bias); std::printf("max_spread %9.6f | spread %9.6f\n", max_spread, spread); std::printf("\n"); } }; TEST_F(Interp1dTest, Linear) { auto kernel = LinearKernel<double>(); // offset = 0 should give back original data for this kernel testFixedOffset(kernel, 0., 0.999999, 0.001, 0.001, 0.001); testFixedOffset(kernel, -0.3, 0.95, 30., 5., 5.); testFixedOffset(kernel, 0.3, 0.95, 30., 5., 5.); testFixedOffset(kernel, -0.5, 0.95, 40., 5., 5.); testFixedOffset(kernel, 0.5, 0.95, 40., 5., 5.); testRandomOffsets(kernel, 0.95, 30., 3., 3.); } TEST_F(Interp1dTest, Knab) { auto kernel = KnabKernel<double>(9., 0.8); // offset = 0 should give back original data for this kernel testFixedOffset(kernel, 0., 0.999999, 0.001, 0.001, 0.001); testFixedOffset(kernel, -0.3, 0.998, 5., 1., 1.); testFixedOffset(kernel, 0.3, 0.998, 5., 1., 1.); testFixedOffset(kernel, -0.3, 0.998, 5., 1., 1.); testFixedOffset(kernel, 0.5, 0.998, 5., 1., 1.); testRandomOffsets(kernel, 0.998, 5., 0.5, 0.5); } TEST_F(Interp1dTest, TabulatedKnab) { auto knab = KnabKernel<double>(9., 0.8); auto kernel = TabulatedKernel<double>(knab, 2048); // offset = 0 should give back original data for this kernel testFixedOffset(kernel, 0., 0.999999, 0.001, 0.001, 0.001); testRandomOffsets(kernel, 0.998, 5.0, 0.5, 0.5); } TEST_F(Interp1dTest, ChebyKnab) { auto knab = KnabKernel<double>(9., 0.8); auto kernel = ChebyKernel<double>(knab, 16); // offset = 0 should give back original data for this kernel testFixedOffset(kernel, 0., 0.999999, 0.001, 0.001, 0.001); testRandomOffsets(kernel, 0.998, 5.0, 0.5, 0.5); } int main(int argc, char* argv[]) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
the_stack
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // // @author George A. Shulinok <sgazeos@gmail.com> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <ops/declarable/helpers/image_resize.h> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // computeInterpolationWeights kernel // outSize - output length // inSize - input size // scale - input scale // interporationData - result // template <class Scaler> static SD_KERNEL void computeInterpolationWeights(sd::LongType outSize, sd::LongType inSize, double scale, sd::LongType channels, BilinearInterpolationData* interpolationData) { interpolationData[outSize].bottomIndex = 0; interpolationData[outSize].topIndex = 0; auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; Scaler scaler; for (sd::LongType i = outSize - tid; i >= 0; i -= step) { double in = scaler(i, scale); // interpolationData[i].bottomIndex = static_cast<sd::LongType>(in); // interpolationData[i].topIndex = sd::math::sd_min(interpolationData[i].bottomIndex + 1, inSize - 1); // interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex; double const in_f = sd::math::p_floor<double>(in); double const in_c = sd::math::p_ceil<double>(in); interpolationData[i].bottomIndex = sd::math::sd_max(static_cast<sd::LongType>(in_f), (sd::LongType)0LL); // static_cast<sd::LongType>(in); interpolationData[i].topIndex = sd::math::sd_min(static_cast<sd::LongType>(in_c), inSize - 1); interpolationData[i].interpolarValue = in - in_f; if (channels) { math::atomics::sd_atomicMul(&interpolationData[i].bottomIndex, channels); math::atomics::sd_atomicMul(&interpolationData[i].topIndex, channels); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resize image with bilinear interpolation algorithm // static void resizeImage(sd::LaunchContext* context, NDArray const* images, sd::LongType batchSize, sd::LongType inHeight, sd::LongType inWidth, sd::LongType outHeight, sd::LongType outWidth, sd::LongType channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resize image with bilinear interpolation algorithm kernel // template <typename T, typename Z> static SD_KERNEL void resizeImageKernel(T const* input, sd::LongType const* inputShape, Z* outputYptr, sd::LongType const* outputShape, sd::LongType batchSize, sd::LongType outWidth, sd::LongType outHeight, sd::LongType channels, sd::LongType inRowSize, sd::LongType outRowSize, sd::LongType inBatchNumValues, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) { for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x) { // blockIdx.x as batch index auto pX = input + batch * inBatchNumValues; for (sd::LongType y = threadIdx.x; y < outHeight; y += blockDim.x) { const T* ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize; const T* ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize; double yVal = ys_[y].interpolarValue; auto pZ = outputYptr + (batch * outHeight + y) * outRowSize; for (sd::LongType x = 0; x < outWidth; x++) { auto xsBottom = xs_[x].bottomIndex; auto xsTop = xs_[x].topIndex; auto xVal = xs_[x].interpolarValue; // process interpolation for all channels for (int c = 0; c < channels; c++) { Z topLeft(ys_input_lower_ptr[xsBottom + c]); Z topRight(ys_input_lower_ptr[xsTop + c]); Z bottomLeft(ys_input_upper_ptr[xsBottom + c]); Z bottomRight(ys_input_upper_ptr[xsTop + c]); Z top = topLeft + (topRight - topLeft) * xVal; Z bottom = bottomLeft + (bottomRight - bottomLeft) * xVal; Z resVal = Z(top + (bottom - top) * yVal); pZ[x * channels + c] = resVal; } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resize image with template <typename T, typename F> static void resizeImage_(sd::LaunchContext* context, NDArray const* images, sd::LongType batchSize, sd::LongType inHeight, sd::LongType inWidth, sd::LongType outHeight, sd::LongType outWidth, sd::LongType channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output) { sd::LongType inRowSize = inWidth * channels; sd::LongType inBatchNumValues = inHeight * inRowSize; sd::LongType outRowSize = outWidth * channels; auto stream = context->getCudaStream(); T const* pInput = images->getDataBuffer()->specialAsT<T>(); // reinterpret_cast<T const *>(images->specialBuffer()); // // this works only with 'c' direction F* pOutput = output->dataBuffer()->specialAsT<F>(); // reinterpret_cast<F *>(output->specialBuffer()); dim3 batchSizeBlock(batchSize, 1, 1); dim3 pictureBlock(outHeight, outWidth, channels); resizeImageKernel<T, F><<<256, 256, 256, *stream>>>(pInput, images->specialShapeInfo(), pOutput, output->specialShapeInfo(), batchSize, outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_); auto err = cudaStreamSynchronize(*stream); if (err != 0) { throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename F> static sd::Status resizeBilinearFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) { const sd::LongType batchSize = images->sizeAt(0); const sd::LongType inHeight = images->sizeAt(1); const sd::LongType inWidth = images->sizeAt(2); const sd::LongType channels = images->sizeAt(3); const sd::LongType outHeight = output->sizeAt(1); const sd::LongType outWidth = output->sizeAt(2); // Handle no-op resizes efficiently. if (outHeight == inHeight && outWidth == inWidth) { output->assign(images); return sd::Status::OK; } float heightScale = ImageResizerState::calculateResizeScale(inHeight, outHeight, alignCorners); float widthScale = ImageResizerState::calculateResizeScale(inWidth, outWidth, alignCorners); BilinearInterpolationData* xs_; // = xs.data(); BilinearInterpolationData* ys_; // = xs.data(); cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1)); if (err != 0) { throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err); } err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1)); if (err != 0) { throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err); } auto stream = context->getCudaStream(); // Compute the cached interpolation weights on the x and y dimensions. if (halfPixelCenter) { computeInterpolationWeights<HalfPixelScaler><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_); computeInterpolationWeights<HalfPixelScaler> <<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_); } else { computeInterpolationWeights<LegacyScaler><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_); computeInterpolationWeights<LegacyScaler><<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_); } NDArray::prepareSpecialUse({output}, {images}); resizeImage_<T, F>(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output); err = cudaStreamSynchronize(*stream); NDArray::registerSpecialUse({output}, {images}); err = cudaFree(xs_); if (err != 0) { throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err); } err = cudaFree(ys_); if (err != 0) { throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err); } return sd::Status::OK; } typedef float (*MODE_FUNC)(float); SD_DEVICE MODE_FUNC mode_functions[4] = {sd::math::p_floor<float>, sd::math::p_round_prefer_floor<float>, sd::math::p_round_prefer_ceil<float>, sd::math::p_ceil<float>}; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resize by interpolation nearest neighbor algorithm kernel // template <typename T, typename Scaler> static SD_KERNEL void resizeNeighborKernel(T const* input, sd::LongType const* inputShape, T* output, sd::LongType const* outputShape, sd::LongType batchSize, sd::LongType inWidth, sd::LongType inHeight, sd::LongType outWidth, sd::LongType outHeight, sd::LongType channels, double widthScale, double heightScale, NearestMode nearestMode) { constexpr bool halfPixelCenter = std::is_same<Scaler, HalfPixelScaler>::value || std::is_same<Scaler, HalfPixelScalerNN>::value; MODE_FUNC modeFunc; switch (nearestMode) { case NearestMode::FLOOR: modeFunc = mode_functions[0]; break; case NearestMode::ROUND_PREFER_FLOOR: modeFunc = mode_functions[1]; break; case NearestMode::ROUND_PREFER_CEIL: modeFunc = mode_functions[2]; break; case NearestMode::CEIL: modeFunc = mode_functions[3]; break; default: modeFunc = mode_functions[0]; } Scaler scaler; // if(threadIdx.x==0){ // } // for (int b = blockIdx.x; b < batchSize; b += gridDim.x) if (blockIdx.x < batchSize) { auto b = blockIdx.x; for (int y = threadIdx.x; y < outHeight; y += blockDim.x) { auto posY = static_cast<sd::LongType>(modeFunc(scaler(y, heightScale))); sd::LongType inY = sd::math::sd_min(posY, inHeight - 1); if (halfPixelCenter) { inY = sd::math::sd_max(0LL, inY); } for (int x = threadIdx.y; x < outWidth; x += blockDim.y) { auto posX = static_cast<sd::LongType>(modeFunc(scaler(x, widthScale))); sd::LongType inX = sd::math::sd_min(posX, inWidth - 1); if (halfPixelCenter) { inX = sd::math::sd_max(0LL, inX); } auto start = blockIdx.z * blockDim.z + threadIdx.z; auto step = blockDim.z * gridDim.z; for (sd::LongType e = start; e < channels; e += step) { sd::LongType posX[] = {b, inY, inX, e}; sd::LongType posZ[] = {b, y, x, e}; auto xIndex = shape::getOffset(inputShape, posX); auto zIndex = shape::getOffset(outputShape, posZ); output[zIndex] = input[xIndex]; } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resizeNeighborFunctor - main algorithm by nearest neighbor // template <typename T> sd::Status resizeNeighborFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height, CoordinateTransformationMode coorMode, NearestMode nearestMode, bool alignCorner, NDArray* output) { const sd::LongType batchSize = images->sizeAt(0); const sd::LongType inHeight = images->sizeAt(1); const sd::LongType inWidth = images->sizeAt(2); const sd::LongType channels = images->sizeAt(3); const sd::LongType outHeight = output->sizeAt(1); const sd::LongType outWidth = output->sizeAt(2); // Handle no-op resizes efficiently. if (outHeight == inHeight && outWidth == inWidth) { output->assign(images); return sd::Status::OK; } float heightScale = ImageResizerState::calculateResizeScale(inHeight, outHeight, alignCorner); float widthScale = ImageResizerState::calculateResizeScale(inWidth, outWidth, alignCorner); auto imagesBuffer = images->getDataBuffer()->specialAsT<T>(); // reinterpret_cast<T const*>(images->specialBuffer()); auto outputBuffer = output->dataBuffer()->specialAsT<T>(); // reinterpret_cast<T*>(output->specialBuffer()); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {images}); switch (coorMode) { case ASYMMETRIC: resizeNeighborKernel<T, LegacyScaler><<<batchSize, outHeight * outWidth, 512, *stream>>>( imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(), batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, nearestMode); break; case HALF_PIXEL: resizeNeighborKernel<T, HalfPixelScaler><<<batchSize, outHeight * outWidth, 512, *stream>>>( imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(), batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, nearestMode); break; case HALF_PIXEL_NN: resizeNeighborKernel<T, HalfPixelScalerNN><<<batchSize, outHeight * outWidth, 512, *stream>>>( imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(), batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, nearestMode); break; default: resizeNeighborKernel<T, HalfPixelScaler><<<batchSize, outHeight * outWidth, 512, *stream>>>( imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(), batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, nearestMode); break; }; NDArray::registerSpecialUse({output}, {images}); return sd::Status::OK; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // resizeImage - resize bilinear algorithm caller // void resizeImage(sd::LaunchContext* context, NDArray const* images, sd::LongType batchSize, sd::LongType inHeight, sd::LongType inWidth, sd::LongType outHeight, sd::LongType outWidth, sd::LongType channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output) { BUILD_DOUBLE_SELECTOR( images->dataType(), output->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), SD_NUMERIC_TYPES, SD_FLOAT_TYPES); } BUILD_DOUBLE_TEMPLATE(template void resizeImage_, (sd::LaunchContext * context, NDArray const* images, sd::LongType batchSize, sd::LongType inHeight, sd::LongType inWidth, sd::LongType outHeight, sd::LongType outWidth, sd::LongType channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), SD_NUMERIC_TYPES, SD_FLOAT_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// sd::Status resizeBilinearFunctor(sd::LaunchContext* context, NDArray const* images, int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) { BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(), return resizeBilinearFunctor_, (context, images, width, height, alignCorners, halfPixelCenter, output), SD_NUMERIC_TYPES, SD_FLOAT_TYPES); } // BUILD_SINGLE_TEMPLATE(template sd::Status resizeBilinearFunctor_, (sd::LaunchContext* context, // NDArray const* images, int const width, int const height, bool const alignCorners, // bool const halfPixelCenter, NDArray* output), SD_COMMON_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// sd::Status resizeNeighborFunctor(sd::LaunchContext* context, NDArray const* images, int const width, int const height, CoordinateTransformationMode coorMode, NearestMode nearestMode, bool alignCorner, NDArray* output) { BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, coorMode, nearestMode, alignCorner, output), SD_COMMON_TYPES); } // BUILD_SINGLE_TEMPLATE(template sd::Status Logger::logStatusMsg, (sd::LaunchContext* context, NDArray const* // images, // int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output), // SD_COMMON_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Bicubic interpolation //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// static SD_KERNEL void initCoefTableKernel(const float a, float* table, sd::LongType tableSize) { KeysCubicKernelFunc<float> kernel(a); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int i = start; i <= tableSize; i += step) { float x = i * 1.0 / tableSize; table[i * 2] = kernel.calc_less1pt0(x); x += 1.0; table[i * 2 + 1] = kernel.calc_less2pt0(x); } } float* initCoeffsTable(const double a, cudaStream_t* stream) { // Allocate and initialize coefficients table using Bicubic // convolution algorithm. // https://en.wikipedia.org/wiki/Bicubic_interpolation float* coeffs_table; // = new float[(kTableSize + 1) * 2]; auto err = cudaMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2)); if (err != 0) { throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err); } initCoefTableKernel<<<128, 128, 128, *stream>>>(static_cast<float>(a), coeffs_table, kTableSize); err = cudaStreamSynchronize(*stream); if (err != 0) { throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err); } return coeffs_table; } // SD_HOST_DEVICE const float* getCoeffsTable(const bool use_keys_cubic) { // // Static so that we initialize it on first use // if (use_keys_cubic) { // // http://ieeexplore.ieee.org/document/1163711/ // // R. G. Keys. Cubic convolution interpolation for digital image // // processing. IEEE Transactions on Acoustics, Speech, and Signal // // Processing, 29(6):1153–1160, 1981. // //static const float* coeffs_table = initCoeffsTable(-0.5f, stream); // return sCoeffsTableHalf; // } else { // //static const float* coeffs_table = initCoeffsTable(-0.75f, stream); // return sCoeffsTableThreeFourth; // } // } static SD_KERNEL void accumulateChannelsKernel(WeightsAndIndices* pXWais, sd::LongType outWidth, sd::LongType channels) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto x = start; x < outWidth; x += step) { pXWais[x]._index0 *= channels; pXWais[x]._index1 *= channels; pXWais[x]._index2 *= channels; pXWais[x]._index3 *= channels; } } template <typename Scaler> static SD_KERNEL void advanceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, sd::LongType inWidth, float widthScale, sd::LongType outWidth, sd::LongType channels, bool exclude_outside) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (auto x = start; x < outWidth; x += step) { getWeightsAndIndices<Scaler>(cacheTable, widthScale, x, inWidth, pXWais + x, exclude_outside); } __syncthreads(); if (start == 0) { // update only in one thread for (auto i = 0; i < outWidth; i++) { pXWais[i]._advance = calc->Advance(pXWais[i]._index0, pXWais[i]._index1, pXWais[i]._index2, pXWais[i]._index3); } } } // resizerState and xWais are device allocated template <typename Scaler> static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState, WeightsAndIndices* pXWais, bool exclude_outside) { auto stream = resizerState.stream; auto outWidth = resizerState.outWidth; CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator; CachedInterpolationCalculator* pCalcD; auto err = cudaMalloc(&pCalcD, sizeof(CachedInterpolationCalculator)); if (err != 0) { cuda_exception::build( "helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err); } err = cudaMemcpyAsync(pCalcD, &calc, sizeof(CachedInterpolationCalculator), cudaMemcpyHostToDevice, *stream); if (err != 0) { cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err); } advanceWeightsAndIndicesKernel<Scaler><<<128, 128, 128, *stream>>>(coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, exclude_outside); err = cudaFree(pCalcD); if (err != 0) { cuda_exception::build( "helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err); } err = cudaStreamSynchronize(*stream); if (err != 0) { cuda_exception::build( "helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err); } // Scale the values so they can be used as offsets into buffers. accumulateChannelsKernel<<<128, 128, 512, *stream>>>(pXWais, outWidth, resizerState.wStride); err = cudaStreamSynchronize(*stream); if (err != 0) { cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err); } } template <typename T, typename Scaler> static SD_KERNEL void bicubicInterpolateWithCachingKernel(float const* cachedTable, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool exclude_outside, float* outputPtr) { // auto numChannels = pResizerState->channels; const auto batchStride = pResizerState->bStride; const auto hStride = pResizerState->hStride; const auto cStride = pResizerState->cStride; for (sd::LongType b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) { auto pInput = inputPtr + b * batchStride; float* cachedValue; for (sd::LongType y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) { if (threadIdx.x == 0) { extern __shared__ char sharedChar[]; cachedValue = reinterpret_cast<float*>(sharedChar); } auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels; auto pOutput = &outputPtr[pos]; struct WeightsAndIndices yWai; getWeightsAndIndices<Scaler>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai, exclude_outside); // Make pointers represent offsets of data in inputBPtr. const T* y_ptr_0 = pInput + yWai._index0 * hStride; const T* y_ptr_1 = pInput + yWai._index1 * hStride; const T* y_ptr_2 = pInput + yWai._index2 * hStride; const T* y_ptr_3 = pInput + yWai._index3 * hStride; if (pResizerState->channels == 100) { // Manually unroll case of 3 channels. float cached_value_0[4] = {0}; float cached_value_1[4] = {0}; float cached_value_2[4] = {0}; for (sd::LongType x = 0; x < pResizerState->outWidth; ++x) { const WeightsAndIndices& xWai = xWais[x]; // Shift values in cached_value_* to fill first '_advance' values. switch (xWai._advance) { case 3: cached_value_0[0] = cached_value_0[1]; cached_value_0[1] = cached_value_0[2]; cached_value_0[2] = cached_value_0[3]; cached_value_1[0] = cached_value_1[1]; cached_value_1[1] = cached_value_1[2]; cached_value_1[2] = cached_value_1[3]; cached_value_2[0] = cached_value_2[1]; cached_value_2[1] = cached_value_2[2]; cached_value_2[2] = cached_value_2[3]; break; case 2: cached_value_0[0] = cached_value_0[2]; cached_value_0[1] = cached_value_0[3]; cached_value_1[0] = cached_value_1[2]; cached_value_1[1] = cached_value_1[3]; cached_value_2[0] = cached_value_2[2]; cached_value_2[1] = cached_value_2[3]; break; case 1: { cached_value_0[0] = cached_value_0[3]; cached_value_1[0] = cached_value_1[3]; cached_value_2[0] = cached_value_2[3]; break; } } // Set the remaining '4-_advance' values by computing. switch (xWai._advance) { case 0: cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_1[0] = computeYInterpolation(0, cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_2[0] = computeYInterpolation(0, 2 * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); case 1: cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_1[1] = computeYInterpolation(1, cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_2[1] = computeYInterpolation(1, 2 * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); case 2: cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_1[2] = computeYInterpolation(2, cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_2[2] = computeYInterpolation(2, 2 * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); case 3: cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_1[3] = computeYInterpolation(3, cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); cached_value_2[3] = computeYInterpolation(3, 2 * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); // break; } pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3); pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3); pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3); } } else { for (sd::LongType x = 0; x < pResizerState->outWidth; ++x) { const WeightsAndIndices& xWai = xWais[x]; // Shift values in cachedValue to fill first '_advance' values. switch (xWai._advance) { case 3: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 0] = cachedValue[4 * c + 1]; cachedValue[4 * c + 1] = cachedValue[4 * c + 2]; cachedValue[4 * c + 2] = cachedValue[4 * c + 3]; } break; case 2: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 0] = cachedValue[4 * c + 2]; cachedValue[4 * c + 1] = cachedValue[4 * c + 3]; } break; case 1: { for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 0] = cachedValue[4 * c + 3]; } break; } } // Set the remaining '4-_advance' values by computing. switch (xWai._advance) { case 0: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 0] = computeYInterpolation(0, c * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); } case 1: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 1] = computeYInterpolation(1, c * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); } case 2: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 2] = computeYInterpolation(2, c * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); } case 3: for (sd::LongType c = 0; c < pResizerState->channels; ++c) { cachedValue[4 * c + 3] = computeYInterpolation(3, c * cStride, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai); } // break; } for (sd::LongType c = 0; c < pResizerState->channels; ++c) { auto res = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3); pOutput[x * pResizerState->channels + c] = res; } } } } } } template <typename T, typename Scaler> static void bicubicInterpolateWithCaching(NDArray const* image, const ImageResizerState& resizerState, const double coefficient, bool exclude_outside, NDArray* output) { const auto numChannels = resizerState.channels; auto stream = resizerState.stream; // output->getContext()->getCudaStream(); ImageResizerState* resizerStateD; auto err = cudaMalloc(&resizerStateD, sizeof(ImageResizerState)); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err); } err = cudaMemcpyAsync(resizerStateD, &resizerState, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err); } // float* cachedValue = nullptr; // size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels); // if (cachedSize) { // err = cudaMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize); // if (err != 0) { // throw cuda_exception::build( // "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err); // } // err = cudaMemset(cachedValue, 0, cachedSize); // if (err != 0) { // throw cuda_exception::build( // "helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err); // } // } WeightsAndIndices* xWais; //(resizerState.outWidth); err = cudaMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth); if (err != 0) { throw cuda_exception::build( "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err); } auto coeffsTable = initCoeffsTable( coefficient, stream); // halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err); } computeXWeightsAndIndices<Scaler>(coeffsTable, resizerState, xWais, exclude_outside); err = cudaStreamQuery(*stream); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err); } const T* pInput = image->getDataBuffer()->specialAsT<T>(); float* pOutput = output->dataBuffer()->specialAsT<float>(); //_data.data(); bicubicInterpolateWithCachingKernel<T, Scaler> <<<128, 1, 512, *stream>>>(coeffsTable, pInput, resizerStateD, xWais, exclude_outside, pOutput); err = cudaStreamSynchronize(*stream); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err); } err = cudaFree(resizerStateD); if (err != 0) { throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err); } // if (cachedSize) // err = cudaFree(cachedValue); // if (err != 0) { // throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached // values", err); // } err = cudaFree(xWais); if (err != 0) { throw cuda_exception::build( "helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err); } err = cudaFree(coeffsTable); if (err != 0) { throw cuda_exception::build( "helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> sd::Status resizeBicubicFunctor_(sd::LaunchContext* context, NDArray const* image, int width, int height, bool preserveAspectRatio, bool antialias, NDArray* output) { return sd::Status::OK; } sd::Status resizeBicubicFunctor(sd::LaunchContext* context, NDArray const* image, int width, int height, bool preserveAspectRatio, bool antialias, NDArray* output) { BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image, width, height, preserveAspectRatio, antialias, output), SD_NUMERIC_TYPES); } BUILD_SINGLE_TEMPLATE(template sd::Status resizeBicubicFunctor_, (sd::LaunchContext * context, NDArray const* image, int width, int height, bool preserveAspectRatio, bool antialias, NDArray* output), SD_NUMERIC_TYPES); // ------------------------------------------------------------------------------------------------------------------ // static SD_KERNEL void fillInterpolationCache(CachedInterpolation* xCached, sd::LongType cacheLen, sd::LongType inWidth, float widthScale) { auto start = blockIdx.x * blockDim.x + threadIdx.x; auto increment = blockDim.x * gridDim.x; for (auto x = start; x < cacheLen; x += increment) { auto& xCache = xCached[x]; const float inX = x * widthScale; const float inX1 = (x + 1) * widthScale; sd::LongType v = math::sd_floor<float, sd::LongType>(inX); xCache.start = v; xCache.startScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f); v = math::sd_ceil<float, sd::LongType>(inX1); xCache.end = v--; xCache.endMinusOneScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f); xCache.needsBounding = bound(xCache.start, inWidth) != xCache.start || bound(xCache.end - 1, inWidth) != (xCache.end - 1); } } // ------------------------------------------------------------------------------------------------------------------ // template <typename T> static SD_KERNEL void resizeAreaKernel(ImageResizerState const* pSt, CachedInterpolation const* caches, float scale, T const* inputPtr, sd::LongType const* inputShape, float* outputPtr, sd::LongType const* outputShape, ScaleCache<T>* cachePool) { // batch * outWidth * outHeight for (auto batch = blockIdx.x; batch < pSt->batchSize; batch += gridDim.x) { for (auto y = threadIdx.x; y < pSt->outHeight; y += blockDim.x) { const float inY = y * pSt->heightScale; const float inY1 = (y + 1) * pSt->heightScale; // The start and end height indices of all the cells that could // contribute to the target cell. const sd::LongType yStart = math::sd_floor<float, sd::LongType>(inY); const sd::LongType yEnd = math::sd_ceil<float, sd::LongType>(inY1); auto scalesDim = yEnd - yStart; auto yScaleCache = cachePool + (batch * pSt->outHeight + y) * pSt->outWidth; // auto startPtr = sharedPtr + y * scalesDim * sizeof(float); // float* yScales = yScalesShare + y * sizeof(float) * scalesDim;//reinterpret_cast<float*>(startPtr); //shared + // y * scalesDim * y + scalesDim * sizeof(T const *) [scalesDim]; T const** yPtrs = yPtrsShare + y * sizeof(T // const*) * scalesDim; //[scalesDim]; yPtrs = reinterpret_cast<T const**>(sharedBuf); float* output = outputPtr + (batch * pSt->outHeight + y) * pSt->channels * pSt->outWidth; // int k = 0; for (sd::LongType i = yStart, k = 0; i < yEnd; ++i, ++k) { float scaleY; if (i < inY) { scaleY = (i + 1 > inY1 ? pSt->heightScale : i + 1 - inY); } else { scaleY = (i + 1 > inY1 ? inY1 - i : 1.0); } yScaleCache[k].yScale = scaleY; yScaleCache[k].yPtr = inputPtr + (batch * pSt->bStride + bound(i, pSt->inHeight) * pSt->hStride); } if (pSt->channels == 3) { for (sd::LongType x = 0; x < pSt->outWidth; ++x) { const CachedInterpolation& xCache = caches[x]; computePatchSumOf3Channels<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output); output += pSt->channels; } } else { for (sd::LongType x = 0; x < pSt->outWidth; ++x) { const CachedInterpolation& xCache = caches[x]; computePatchSum<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output); output += pSt->channels; } } } } } template <typename T> static void resizeArea(cudaStream_t* stream, ImageResizerState const& st, CachedInterpolation* cache, NDArray const* input, NDArray* output) { T const* inputPtr = reinterpret_cast<T const*>(input->specialBuffer()); // float* yScales; // T const** yPtrs; float scale = 1.f / (st.heightScale * st.widthScale); auto outputPtr = reinterpret_cast<float*>(output->specialBuffer()); // output is always float. TO DO: provide another float types // also with template <typename X, typename Z> declaration ImageResizerState* pSt; auto err = cudaMalloc(&pSt, sizeof(ImageResizerState)); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for ImageResizerState", err); } err = cudaMemcpyAsync(pSt, &st, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: Cannot copy to device memory", err); } ScaleCache<T>* cachePool; auto cachePoolSize = sizeof(ScaleCache<T>) * st.batchSize * st.outWidth * st.outHeight; err = cudaMalloc(&cachePool, cachePoolSize); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for cache", err); } resizeAreaKernel<T><<<128, 128, 2048, *stream>>>(pSt, cache, scale, inputPtr, input->specialShapeInfo(), outputPtr, output->specialShapeInfo(), cachePool); err = cudaStreamSynchronize(*stream); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: An error occured with kernel running", err); } err = cudaFree(cachePool); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for cache", err); } err = cudaFree(pSt); if (err != 0) { throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for ImageResizeState", err); } } // ------------------------------------------------------------------------------------------------------------------ // template <typename T> sd::Status resizeAreaFunctor_(sd::LaunchContext* context, NDArray const* image, int const width, int const height, bool const alignCorners, NDArray* output) { ImageResizerState st(alignCorners, false); // Create resize info auto res = st.validateAndCalculateOutputSize(image, width, height); auto stream = context->getCudaStream(); if (sd::Status::OK == res) { CachedInterpolation* xCached; //(st.outWidth); auto err = cudaMalloc(&xCached, sizeof(CachedInterpolation) * st.outWidth); if (err != 0) { throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot allocate memory for cached interpolations", err); } NDArray::prepareSpecialUse({output}, {image}); fillInterpolationCache<<<128, 128, 256, *stream>>>(xCached, st.outWidth, st.inWidth, st.widthScale); resizeArea<T>(stream, st, xCached, image, output); err = cudaStreamSynchronize(*stream); if (err != 0) { throw cuda_exception::build("helpers::resizeAreaFunctor_: Error occured when kernel was running", err); } err = cudaFree(xCached); if (err != 0) { throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot deallocate memory for cached interpolations", err); } NDArray::registerSpecialUse({output}, {image}); } return res; } sd::Status resizeAreaFunctor(sd::LaunchContext* context, NDArray const* image, int const width, int const height, bool const alignCorners, NDArray* output) { BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), SD_NUMERIC_TYPES); } // ------------------------------------------------------------------------------------------------------------------ // // simplified bicubic resize without antialiasing // template <typename T> sd::Status resizeBicubicFunctorA_(sd::LaunchContext* context, NDArray const* image, int const width, int const height, bool const alignCorners, CoordinateTransformationMode coorMode, bool exclude_outside, double coefficient, NDArray* output) { ImageResizerState st(alignCorners, coorMode == HALF_PIXEL, context->getCudaStream()); // align_corners, half_pixel_align NDArray::prepareSpecialUse({output}, {image}); sd::Status res = st.validateAndCreateOutput(image, width, height); if (res == sd::Status::OK) { switch (coorMode) { case ASYMMETRIC: bicubicInterpolateWithCaching<T, LegacyScaler>(image, st, coefficient, exclude_outside, output); break; case HALF_PIXEL: bicubicInterpolateWithCaching<T, HalfPixelScaler>(image, st, coefficient, exclude_outside, output); break; case HALF_PIXEL_NN: bicubicInterpolateWithCaching<T, HalfPixelScalerNN>(image, st, coefficient, exclude_outside, output); break; default: break; } } NDArray::registerSpecialUse({output}, {image}); return res; } sd::Status resizeBicubicFunctorA(sd::LaunchContext* context, NDArray const* image, int const width, int const height, bool const alignCorners, CoordinateTransformationMode coorMode, bool exclude_outside, double coefficient, NDArray* output) { BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context, image, width, height, alignCorners, coorMode, exclude_outside, coefficient, output), SD_NUMERIC_TYPES); } // ------------------------------------------------------------------------------------------------------------------ // sd::Status resizeImagesFunctor(sd::LaunchContext* context, NDArray const* image, int const width, int const height, ImageResizeMethods method, bool alignCorners, NDArray* output) { switch (method) { case kResizeBilinear: return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output); case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, CoordinateTransformationMode::ASYMMETRIC, alignCorners ? NearestMode::ROUND_PREFER_CEIL : NearestMode::FLOOR, alignCorners, output); case kResizeBicubic: return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output); case kResizeArea: return resizeAreaFunctor(context, image, width, height, alignCorners, output); default: throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method."); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // --------------------------------------------------------------------------------------------------------------- // // Crop and Resize helper implementation // -------------------------------------------------------------------------------------------------------------- // // cropAndResize kernel type of input(images) and output should be the same // template <typename T, typename Z, typename I> static SD_KERNEL void cropAndResizeKernel(T const* images, sd::LongType const* imagesShape, Z const* boxes, sd::LongType const* boxesShape, I const* indices, sd::LongType const* indexShape, I const* cropSize, sd::LongType const* cropShape, int method, double extrapolationVal, T* output, sd::LongType const* outputShape, int numBoxes, int cropHeight, int cropWidth, int batchSize, int imageHeight, int imageWidth, int depth) { for (int b = blockIdx.x; b < numBoxes; b += gridDim.x) { sd::LongType x1Pos[] = {b, 1}; sd::LongType y1Pos[] = {b, 0}; sd::LongType y2Pos[] = {b, 2}; sd::LongType x2Pos[] = {b, 3}; Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)]; //->t<T>(b, 0)]; Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)]; Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)]; Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)]; int bIn = indices[b]; if (bIn >= batchSize) { continue; } Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0); Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0); for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) { const float inY = (cropHeight > 1) ? y1 * (imageHeight - 1) + y * heightScale : 0.5 * (y1 + y2) * (imageHeight - 1); if (inY < 0 || inY > imageHeight - 1) { for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) { auto start = blockIdx.z * blockDim.x + threadIdx.z; auto step = blockDim.z * gridDim.z; for (int d = start; d < depth; d += step) { sd::LongType zPos[] = {b, y, x, d}; auto zIndex = shape::getOffset(outputShape, zPos); output[zIndex] = (Z)extrapolationVal; // crops->p(b, y, x, d, extrapolationVal); } } continue; } if (method == 0 /* bilinear */) { const int topYIndex = sd::math::p_floor(inY); const int bottomYIndex = sd::math::p_ceil(inY); const float y_lerp = inY - topYIndex; for (int x = 0; x < cropWidth; ++x) { const float in_x = (cropWidth > 1) ? x1 * (imageWidth - 1) + x * widthScale : 0.5 * (x1 + x2) * (imageWidth - 1); if (in_x < 0 || in_x > imageWidth - 1) { auto start = blockIdx.z * blockDim.x + threadIdx.z; auto step = blockDim.z * gridDim.z; for (int d = start; d < depth; d += step) { sd::LongType zPos[] = {b, y, x, d}; auto zIndex = shape::getOffset(outputShape, zPos); output[zIndex] = (Z)extrapolationVal; // crops->p(b, y, x, d, extrapolationVal); } continue; } int left_x_index = math::p_floor(in_x); int right_x_index = math::p_ceil(in_x); T x_lerp = in_x - left_x_index; auto start = blockIdx.z * blockDim.x + threadIdx.z; auto step = blockDim.z * gridDim.z; for (int d = start; d < depth; d += step) { sd::LongType topLeftPos[] = {bIn, topYIndex, left_x_index, d}; sd::LongType topRightPos[] = {bIn, topYIndex, right_x_index, d}; sd::LongType bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d}; sd::LongType bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d}; const T topLeft( images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d)); const T topRight( images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d)); const T bottomLeft(images[shape::getOffset( imagesShape, bottomLeftPos)]); //->e<float>(bIn, bottomYIndex, left_x_index, d)); const T bottomRight(images[shape::getOffset( imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d)); const T top = topLeft + (topRight - topLeft) * x_lerp; const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp; sd::LongType zPos[] = {b, y, x, d}; auto zIndex = shape::getOffset(outputShape, zPos); output[zIndex] = Z(top + (bottom - top) * y_lerp); } } } else { // method is "nearest neighbor" for (int x = 0; x < cropWidth; ++x) { const float inX = (cropWidth > 1) ? x1 * (imageWidth - 1) + x * widthScale : 0.5 * (x1 + x2) * (imageWidth - 1); if (inX < 0 || inX > imageWidth - 1) { auto start = blockIdx.z * blockDim.x + threadIdx.z; auto step = blockDim.z * gridDim.z; for (int d = start; d < depth; d += step) { sd::LongType zPos[] = {b, y, x, d}; auto zIndex = shape::getOffset(outputShape, zPos); output[zIndex] = (Z)extrapolationVal; } continue; } const int closestXIndex = roundf(inX); const int closestYIndex = roundf(inY); auto start = blockIdx.z * blockDim.x + threadIdx.z; auto step = blockDim.z * gridDim.z; for (int d = start; d < depth; d += step) { sd::LongType zPos[] = {b, y, x, d}; sd::LongType xPos[] = {bIn, closestYIndex, closestXIndex, d}; auto zIndex = shape::getOffset(outputShape, zPos); auto xIndex = shape::getOffset(imagesShape, xPos); output[zIndex] = images[xIndex]; } } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // cropAndResizeFunctor main algorithm // context - launch context // images - batch of images (4D tensor - [batch, width, height, pixels]) // boxes - 2D tensor with boxes for crop // indices - 2D int tensor with indices of boxes to crop // cropSize - 2D int tensor with crop box sizes // method - (one of 0 - bilinear, 1 - nearest) // extrapolationVal - double value of extrapolation // crops - output (4D tensor - [batch, outWidth, outHeight, pixels]) // template <typename T, typename Z, typename I> void cropAndResizeFunctor_(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops) { const int batchSize = images->sizeAt(0); const int imageHeight = images->sizeAt(1); const int imageWidth = images->sizeAt(2); const int numBoxes = crops->sizeAt(0); const int cropHeight = crops->sizeAt(1); const int cropWidth = crops->sizeAt(2); const int depth = crops->sizeAt(3); auto stream = context->getCudaStream(); T const* imagesBuf = reinterpret_cast<T const*>(images->specialBuffer()); Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->specialBuffer()); I const* indexBuf = reinterpret_cast<I const*>(indices->specialBuffer()); I const* cropSizes = reinterpret_cast<I const*>(cropSize->specialBuffer()); T* outBuf = reinterpret_cast<T*>(crops->specialBuffer()); int threadsPerBlock = math::sd_max(imageHeight * imageWidth, cropHeight * cropWidth); if (threadsPerBlock > SD_MAX_NUM_THREADS / 4) threadsPerBlock = SD_MAX_NUM_THREADS / 4; NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize}); cropAndResizeKernel<T, Z, I><<<batchSize, threadsPerBlock, 256, *stream>>>( imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), indexBuf, indices->specialShapeInfo(), cropSizes, cropSize->specialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth); NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize}); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void cropAndResizeFunctor(sd::LaunchContext* context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops) { BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_, (context, images, boxes, indices, cropSize, method, extrapolationVal, crops), SD_NUMERIC_TYPES, SD_FLOAT_TYPES, SD_INTEGER_TYPES); // } BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_, (sd::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops), SD_NUMERIC_TYPES, SD_FLOAT_TYPES, SD_INTEGER_TYPES); } // namespace helpers } // namespace ops } // namespace sd
the_stack
// Copyright 2015-2019 Johns Hopkins University (author: Daniel Povey) // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. #include <cfloat> #include "chain/chain-kernels-ansi.h" #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 200 #error - Kaldi no longer supports CC1.x devices. Please use a newer GPU or \ configure with --use-cuda=no (this will disable the use of GPU). #endif #ifdef __CUDACC__ #if ( __CUDACC_VER_MAJOR__ >= 8 ) && ( !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 ) // native implementation available #else #if __CUDA_ARCH__ >= 600 #error using CAS implementation of double atomicAdd #endif __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif #endif template <typename Real> __device__ inline void atomic_add(Real* address, Real value) { atomicAdd(address, value); } template <typename Real> __device__ inline void atomic_add_thresholded(Real* address, Real value) { // This function uses a randomized algorithm to only do atomic adds for values // >=n a threshold, and if it's below the threshold, randomly add the // threshold itself with probability (value / threshold). This preserves // expectations. Note: we assume that value >= 0. // kThresholdingPowerOfTwo is defined in chain-datastruct.h; it defines // the threshold for randomized posterior pruning. const Real threshold = 1.0 / (1 << kThresholdingPowerOfTwo); if (value >= threshold) { atomic_add(address, value); } else { // The intention here is to do: // with probability(value / threshold), do: // atomic_add(address, threshold); // We use the least significant bits of the value as a source of // randomness. It would probably be more efficient to extract these // random bits directly from the float, but I don't want to have to // deal with endian-ness issues. // // below, x is a fixed-point representation of (value / threshold); it would // be 16777216 == 2^24 if value == threshold and 0 if value == 0. We choose // the power 24 because that's the number of binary digits in the mantissa // in IEEE single precision floating point. // Note: we parenthesize the expression like this so that the // denominator can be precomputed as a constant expression. int32_cuda x = value / (threshold / (1 << 24)); // in the line below, the expression (x >> 12) is a representation of (value / // threshold) between 0 and 4096, with 4096 representing (value / threshold == // 1), while (x & 4095) is treated as a pseudorandom number between 0 and 4095. if ((x >> 12) > (x & 4095)) atomic_add(address, threshold); } } // one iteration of the forward computation in the 'tombstone' CTC HMM computation. // The grid y determines which HMM-state we handle. [put this in the grid because // HMM-states don't all take the same amount of time in the backwards direction, and it's // better for scheduling to have them at the outer level.] // The block x and grid x determine which sequence (0 ... num_sequences - 1) we handle; // note that num_sequences == the number of elements in the minibatch, and we // insist they all have the same number of time steps. // note: 'probs' is indexed by sequence-index + (pdf-index * prob_stride). __global__ static void _cuda_chain_hmm_forward(const Int32Pair *backward_transitions, const DenominatorGraphTransition *transitions, int32_cuda num_sequences, int32_cuda num_hmm_states, const BaseFloat *probs, int32_cuda prob_stride, const BaseFloat *prev_alpha, BaseFloat *this_alpha) { // 'backward_transitions', indexed by hmm-state, consists of [start, end] // indexes into the 'transitions' array. This gives us the info for // transitions *into* this state. 'probs' contains the exponentiated neural // net outputs; it has dimension num-output-indexes by num_sequences and its // stride is 'prob_stride'. 'prev_alpha' and 'this_alpha', which are // extracted from a larger matrix, both have dimension num-history-states by // num-sequences. // s is the index of the sequence within the minibatch, // from 0 .. num-egs-in-this-minibatch - 1. // h is the hmm-state index. int32_cuda s = threadIdx.x + blockIdx.x * blockDim.x, h = blockIdx.y; if (s >= num_sequences) return; double this_tot_alpha = 0.0; const DenominatorGraphTransition *trans_iter = transitions + backward_transitions[h].first, *trans_end = transitions + backward_transitions[h].second; // Note: regarding this loop unrolling, I tried the automatic unrolling using // #pragma unroll 2 (after modifying the loop to have an integer index), but I // did not see any performance improvement, it was slightly slower. So the // compiler must be doing something different than what I'm doing here. const int loop_unroll = 2; // don't change this without changing the code // below. for (; trans_iter + loop_unroll <= trans_end; trans_iter += loop_unroll) { BaseFloat transition_prob0 = trans_iter[0].transition_prob; int32_cuda pdf_id0 = trans_iter[0].pdf_id, prev_hmm_state0 = trans_iter[0].hmm_state; BaseFloat transition_prob1 = trans_iter[1].transition_prob; int32_cuda pdf_id1 = trans_iter[1].pdf_id, prev_hmm_state1 = trans_iter[1].hmm_state; BaseFloat pseudo_loglike0 = probs[pdf_id0 * prob_stride + s], this_prev_alpha0 = prev_alpha[prev_hmm_state0 * num_sequences + s], pseudo_loglike1 = probs[pdf_id1 * prob_stride + s], this_prev_alpha1 = prev_alpha[prev_hmm_state1 * num_sequences + s]; this_tot_alpha += this_prev_alpha0 * transition_prob0 * pseudo_loglike0 + this_prev_alpha1 * transition_prob1 * pseudo_loglike1; } if (trans_iter != trans_end) { // mop up the odd transition. BaseFloat transition_prob0 = trans_iter[0].transition_prob; int32_cuda pdf_id0 = trans_iter[0].pdf_id, prev_hmm_state0 = trans_iter[0].hmm_state; BaseFloat pseudo_loglike0 = probs[pdf_id0 * prob_stride + s], this_prev_alpha0 = prev_alpha[prev_hmm_state0 * num_sequences + s]; this_tot_alpha += this_prev_alpha0 * transition_prob0 * pseudo_loglike0; } // Let arbitrary_scale be the inverse of the sum of all alpha values on-- the // previous frame this sum of all the alpha values is stored in the place that // we'd store the previous alpha for state-index equal to num_hmm_states // (i.e. one past the end). We multiply this into all the // transition-probabilities from the previous frame to this frame, in both the // forward and backward passes, in order to keep the alphas in a good numeric // range. This won't affect the posteriors, as it's just a constant factor // for each frame, but when computing the total likelihood we'll need to // compensate for it later on. BaseFloat arbitrary_scale = 1.0 / prev_alpha[num_hmm_states * num_sequences + s]; this_alpha[h * num_sequences + s] = this_tot_alpha * arbitrary_scale; } __global__ static void _cuda_chain_hmm_backward(const Int32Pair *forward_transitions, const DenominatorGraphTransition *transitions, int32_cuda num_sequences, int32_cuda num_hmm_states, const BaseFloat *probs, int32_cuda prob_stride, const BaseFloat *this_alpha, const BaseFloat *next_beta, BaseFloat *this_beta, BaseFloat *log_prob_deriv, int32_cuda log_prob_deriv_stride) { // 'forward_transitions', indexed by hmm-state, consists of [start, end] // indexes into the 'transition_info' array. This is about the transitions // *out of* this state. 'probs' contains the exponentiated neural net // outputs; it has dimension num-output-indexes by num_sequences, and contains // just the observation probabilities for this time index. Its stride is // prob_stride. // 'this_alpha', 'next_beta' and 'this_beta' all have dimension // num-history-states by num-sequences. // The beta probs are normalized in such a way (by multiplying by 1/(total-data-prob)) // that to get occupation counts we don't need to multiply by 1/total-data-prob. // deriv_scale is a factor (e.g. -1.0 or -0.99) that we multiply these derivs by // while accumulating them. // s is the index of the sequence within the minibatch, // from 0 .. num-egs-in-this-minibatch - 1. // h is the hmm-state index. int32_cuda s = threadIdx.x + blockIdx.x * blockDim.x, h = blockIdx.y; if (s >= num_sequences) return; // See where arbitrary_scale is defined in the forward computation above, for // more explanation of inv_arbitrary_scale. BaseFloat this_alpha_prob = this_alpha[h * num_sequences + s], inv_arbitrary_scale = this_alpha[num_hmm_states * num_sequences + s]; double tot_variable_factor = 0.0; BaseFloat occupation_factor = this_alpha_prob / inv_arbitrary_scale; const DenominatorGraphTransition *trans_iter = transitions + forward_transitions[h].first, *trans_end = transitions + forward_transitions[h].second; const int loop_unroll = 2; // don't change this without changing the code // below. for (; trans_iter + loop_unroll <= trans_end; trans_iter += loop_unroll) { BaseFloat transition_prob0 = trans_iter[0].transition_prob; int32_cuda pdf_id0 = trans_iter[0].pdf_id, next_hmm_state0 = trans_iter[0].hmm_state; BaseFloat transition_prob1 = trans_iter[1].transition_prob; int32_cuda pdf_id1 = trans_iter[1].pdf_id, next_hmm_state1 = trans_iter[1].hmm_state; BaseFloat variable_factor0 = transition_prob0 * next_beta[next_hmm_state0 * num_sequences + s] * probs[pdf_id0 * prob_stride + s], variable_factor1 = transition_prob1 * next_beta[next_hmm_state1 * num_sequences + s] * probs[pdf_id1 * prob_stride + s]; tot_variable_factor += variable_factor0 + variable_factor1; BaseFloat occupation_prob0 = variable_factor0 * occupation_factor; atomic_add_thresholded(log_prob_deriv + (pdf_id0 * log_prob_deriv_stride + s), occupation_prob0); BaseFloat occupation_prob1 = variable_factor1 * occupation_factor; atomic_add_thresholded(log_prob_deriv + (pdf_id1 * log_prob_deriv_stride + s), occupation_prob1); } if (trans_iter != trans_end) { // mop up the odd transition. BaseFloat transition_prob0 = trans_iter[0].transition_prob; int32_cuda pdf_id0 = trans_iter[0].pdf_id, next_hmm_state0 = trans_iter[0].hmm_state; BaseFloat variable_factor0 = transition_prob0 * next_beta[next_hmm_state0 * num_sequences + s] * probs[pdf_id0 * prob_stride + s]; tot_variable_factor += variable_factor0; BaseFloat occupation_prob0 = variable_factor0 * occupation_factor; atomic_add_thresholded(log_prob_deriv + (pdf_id0 * log_prob_deriv_stride + s), occupation_prob0); } BaseFloat beta = tot_variable_factor / inv_arbitrary_scale; this_beta[h * num_sequences + s] = beta; } void cuda_chain_hmm_forward(dim3 Gr, dim3 Bl, const Int32Pair *backward_transitions, const DenominatorGraphTransition *transitions, int32_cuda num_sequences, int32_cuda num_hmm_states, const BaseFloat *probs, int32_cuda prob_stride, const BaseFloat *prev_alpha, BaseFloat *this_alpha) { _cuda_chain_hmm_forward<<<Gr,Bl>>>(backward_transitions, transitions, num_sequences, num_hmm_states, probs, prob_stride, prev_alpha, this_alpha); } void cuda_chain_hmm_backward(dim3 Gr, dim3 Bl, const Int32Pair *forward_transitions, const DenominatorGraphTransition *transitions, int32_cuda num_sequences, int32_cuda num_hmm_states, const BaseFloat *probs, int32_cuda prob_stride, const BaseFloat *this_alpha, const BaseFloat *next_beta, BaseFloat *this_beta, BaseFloat *log_prob_deriv, int32_cuda log_prob_deriv_stride) { _cuda_chain_hmm_backward<<<Gr,Bl>>>(forward_transitions, transitions, num_sequences, num_hmm_states, probs, prob_stride, this_alpha, next_beta, this_beta, log_prob_deriv, log_prob_deriv_stride); } // See documentation for PenalizeOutOfRange() in chain-training.cc to see what // this is about. __global__ static void _penalize_out_of_range( BaseFloat limit, BaseFloat scale, const BaseFloat *in_data, MatrixDim dim, int out_stride, BaseFloat *out_deriv) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int in_index = i + j * dim.stride, out_index = i + j * out_stride; if (i < dim.cols && j < dim.rows) { BaseFloat val = in_data[in_index]; if (val < -limit) { out_deriv[out_index] -= scale * (val + limit); } else if (val > limit) { out_deriv[out_index] -= scale * (val - limit); } } } void cuda_penalize_out_of_range(dim3 Gr, dim3 Bl, BaseFloat limit, BaseFloat scale, const BaseFloat *in_data, MatrixDim dim, int out_stride, BaseFloat *out_deriv) { _penalize_out_of_range<<<Gr,Bl>>>(limit, scale, in_data, dim, out_stride, out_deriv); }
the_stack
#include <cub/block/block_reduce.cuh> #include <cub/cub.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/conversions.h" #include "caffe2/utils/math.h" #if THRUST_VERSION >= 100800 #define THRUST_SUPPORTS_PER_THREAD #endif // THRUST_VERSION >= 100800 namespace caffe2 { namespace math { #define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \ __global__ \ void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ y[i] = function(x[i]); \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* x, T* y, \ CUDAContext* context) { \ _Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>( \ N, x, y); \ } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf); DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf); __device__ float cuda_sqrf(const float x) { return x * x; } DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf); #undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION #define DELEGATE_SINCOS_CUDA_FUNCTION(T) \ __global__ void _Kernel_##T##_##SinCos( \ const int N, const T* x, T* ys, T* yc) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ sincos(x[i], ys + i, yc + i); \ } \ } \ template <> \ void SinCos<T, CUDAContext>( \ const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \ _Kernel_##T##_##SinCos<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, x, ys, yc); \ } DELEGATE_SINCOS_CUDA_FUNCTION(float) DELEGATE_SINCOS_CUDA_FUNCTION(double) #undef DELEGATE_SINCOS_CUDA_FUNCTION #define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \ __global__ void _Kernel_##T##_##Funcname( \ const int N, const T* a, const T* b, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \ y[i] = convert::To<float, T>(r); \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* a, const T* b, T* y, CUDAContext* context) { \ _Kernel_##T##_##Funcname<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, a, b, y); \ } DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *); DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /); #undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION #define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \ __global__ void _Kernel_##T##_##Funcname( \ const int N, const T* a, const T* b, T* y) { \ CUDA_1D_KERNEL_LOOP(i, N) { \ float r = \ func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \ y[i] = convert::To<float, T>(r); \ } \ } \ template <> \ void Funcname<T, CUDAContext>( \ const int N, const T* a, const T* b, T* y, CUDAContext* context) { \ _Kernel_##T##_##Funcname<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, a, b, y); \ } DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf); #undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION #define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \ template <> \ void Funcname<T, CUDAContext>( \ const int N, \ const T* src, \ T* dst, \ Tensor<CUDAContext>* scratch_ptr, \ CUDAContext* context) { \ size_t memRequired = 0; \ cub::DeviceReduce::func( \ nullptr, memRequired, src, dst, N, context->cuda_stream()); \ auto buffer_size = \ static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \ scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \ cub::DeviceReduce::func( \ static_cast<void*>(scratch_ptr->mutable_data<T>()), \ memRequired, \ src, \ dst, \ N, \ context->cuda_stream()); \ } DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min) DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max) DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max) #undef DELEGATE_REDUCTION_FUNCTION // Caffe2 gemm provides a simpler interface to the gemm functions, with the // limitation that the data has to be contiguous in memory. template <> void Gemm<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void Gemm<float16, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A, const float16* B, const float beta, float16* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_CHECK(cublasSgemmEx( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N)); } else if (math_type == TensorProto_DataType_FLOAT16) { // convert alpha, beta from float -> __half auto alpha_fp16 = convert::floatToHalf(alpha); auto beta_fp16 = convert::floatToHalf(beta); // call cublasHgemm CUBLAS_CHECK(cublasHgemm( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_fp16, (const __half*)B, ldb, (const __half*)A, lda, &beta_fp16, (__half*)C, N)); } else { // fail CAFFE_THROW("Unsupported math type"); } } template <> void GemmBatched<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, Tensor<CUDAContext>* scratch, TensorProto::DataType math_type) { const int a_stride = M * K; const int b_stride = K * N; const int c_stride = M * N; #if __CUDACC_VER_MAJOR__ < 8 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { math::Gemm<float, CUDAContext>( TransA, TransB, M, N, K, alpha, A + a_stride * i, B + b_stride * i, beta, C + c_stride * i, context); } #else // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (TransA == CblasNoTrans) ? K : M; const int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE(cublasSgemmStridedBatched( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, b_stride, A, lda, a_stride, &beta, C, N, c_stride, batch_size)); #endif } namespace { __global__ void FloatToHalfKernel(const int N, const float* X, half* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = __float2half(X[i]); } } __global__ void HalfToFloatKernel(const int N, const half* X, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = __half2float(X[i]); } } }; template <> void GemmBatched<float16, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int batch_size, const int M, const int N, const int K, const float alpha, const float16* A, const float16* B, const float beta, float16* C, CUDAContext* context, Tensor<CUDAContext>* scratch, TensorProto::DataType math_type) { const int a_stride = M * K; const int b_stride = K * N; const int c_stride = M * N; #if __CUDACC_VER_MAJOR__ < 8 // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { math::Gemm<float16, CUDAContext>( TransA, TransB, M, N, K, alpha, A + a_stride * i, B + b_stride * i, beta, C + c_stride * i, context); } #else // 3 options: // 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16 // 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx // 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm if (scratch != nullptr) { const int A_size = a_stride * batch_size; const int B_size = b_stride * batch_size; // cast, cublasSgemmStridedBatched, cast size_t in_elems = A_size + B_size; size_t out_elems = c_stride * batch_size; scratch->Resize(in_elems + out_elems); float* scratch_ptr = scratch->mutable_data<float>(); float* A_fp32 = scratch_ptr; float* B_fp32 = scratch_ptr + A_size; float* C_fp32 = scratch_ptr + A_size + B_size; // cast A, B into fp32 HalfToFloatKernel<<<CAFFE_GET_BLOCKS(A_size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(A_size, (half*)A, A_fp32); HalfToFloatKernel<<<CAFFE_GET_BLOCKS(B_size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(B_size, (half*)B, B_fp32); // run fp32 batched Gemm GemmBatched<float, CUDAContext>( TransA, TransB, batch_size, M, N, K, alpha, A_fp32, B_fp32, beta, C_fp32, context); // cast result back to fp16 FloatToHalfKernel<<< CAFFE_GET_BLOCKS(batch_size * M * N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(batch_size * M * N, C_fp32, (half*)C); } else { if (math_type == TensorProto_DataType_FLOAT) { // loop over matrices in the batch for (int i = 0; i < batch_size; ++i) { math::Gemm<float16, CUDAContext>( TransA, TransB, M, N, K, alpha, A + a_stride * i, B + b_stride * i, beta, C + c_stride * i, context); } } else if (math_type == TensorProto_DataType_FLOAT16) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. const int lda = (TransA == CblasNoTrans) ? K : M; const int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // convert alpha, beta from float -> __half auto alpha_fp16 = convert::floatToHalf(alpha); auto beta_fp16 = convert::floatToHalf(beta); CUBLAS_ENFORCE(cublasHgemmStridedBatched( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_fp16, (const __half*)B, ldb, b_stride, (const __half*)A, lda, a_stride, &beta_fp16, (__half*)C, N, c_stride, batch_size)); } } #endif } #if CUDA_VERSION >= 9000 // No change, but required. Defer to default CUDA engine template <> void Gemm<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, TensorProto::DataType math_type) { return Gemm<float,CUDAContext>(TransA, TransB, M, N, K, alpha, A, B, beta, C, context, math_type); } template <> void Gemm<float16, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A, const float16* B, const float beta, float16* C, CUDAContext* context, TensorProto::DataType math_type) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // enable TensorCore for this call on this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE(cublasSetMathMode( context->cublas_handle(), CUBLAS_TENSOR_OP_MATH)); } CUBLAS_CHECK(cublasGemmEx( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &beta, C, CUDA_R_16F, N, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); // Now disable TensorCore math for subsequent calls to this handle if (TensorCoreAvailable()) { CUBLAS_ENFORCE(cublasSetMathMode( context->cublas_handle(), CUBLAS_DEFAULT_MATH)); } } template <> void GemmBatched<float, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int batch_size, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* context, Tensor<CUDAContext>* scratch, TensorProto::DataType math_type) { return GemmBatched<float, CUDAContext, DefaultEngine>( TransA, TransB, batch_size, M, N, K, alpha, A, B, beta, C, context, scratch, math_type); } template <> void GemmBatched<float16, CUDAContext, TensorCoreEngine>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int batch_size, const int M, const int N, const int K, const float alpha, const float16* A, const float16* B, const float beta, float16* C, CUDAContext* context, Tensor<CUDAContext>* scratch, TensorProto::DataType math_type) { return GemmBatched<float16, CUDAContext, DefaultEngine>( TransA, TransB, batch_size, M, N, K, alpha, A, B, beta, C, context, scratch, math_type); } #endif // CUDA_VERSION >= 9000 template <> void GemmEx<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* context) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_ENFORCE(cublasSgemm( context->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> void Gemv<float, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* context, TensorProto::DataType math_type) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_ENFORCE(cublasSgemv( context->cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } // Batched Add variants namespace { template <typename T> __global__ void AddStripedBatchKernel( const int N, const T* first, T* Y, const int stripe, const int batch) { for (int j = 0; j < batch; j++) { const T* x = first + j * stripe; CUDA_1D_KERNEL_LOOP(i, N) { float tmpY = convert::To<T, float>(Y[i]); tmpY += convert::To<T,float>(x[i]); Y[i] = convert::To<float,T>(tmpY); } } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \ template <> \ void AddStripedBatch<T, CUDAContext>( \ const int N, \ const T* first, \ T* Y, \ const int stripe, \ const int batch, \ CUDAContext* context) { \ AddStripedBatchKernel<T><<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, first, Y, stripe, batch); \ } CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float); CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16); #undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH template <> void Gemv<float16, CUDAContext>( const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float16* A, const float16* x, const float beta, float16* y, CUDAContext* context, TensorProto::DataType math_type) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; // sort out what we need to call cublasSgemmEx / cublasHgemm int m = (cuTransA == CUBLAS_OP_N) ? N : M; int k = (cuTransA == CUBLAS_OP_N) ? M : N; int LDA = (cuTransA == CUBLAS_OP_N) ? m : k; int LDC = m; if (math_type == TensorProto_DataType_FLOAT) { CUBLAS_CHECK(cublasSgemmEx( context->cublas_handle(), cuTransA, CUBLAS_OP_N, m, 1, k, &alpha, A, CUDA_R_16F, LDA, x, CUDA_R_16F, k, &beta, y, CUDA_R_16F, LDC)); } else if (math_type == TensorProto_DataType_FLOAT16) { auto alpha_fp16 = convert::floatToHalf(alpha); auto beta_fp16 = convert::floatToHalf(beta); CUBLAS_CHECK(cublasHgemm( context->cublas_handle(), cuTransA, CUBLAS_OP_N, m, 1, k, &alpha_fp16, (const __half*)A, LDA, (const __half*)x, k, &beta_fp16, (__half*)y, LDC)); } else { // fail CAFFE_THROW("Unsupported math type"); } } namespace { template <typename T> __global__ void SetKernel(const int N, const T alpha, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = alpha; } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_SET(T) \ template <> \ void Set<T, CUDAContext>( \ const size_t N, const T alpha, T* Y, CUDAContext* context) { \ SetKernel<<< \ CAFFE_GET_BLOCKS(N), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>(N, alpha, Y); \ } CAFFE2_SPECIALIZED_CUDA_SET(float); CAFFE2_SPECIALIZED_CUDA_SET(double); CAFFE2_SPECIALIZED_CUDA_SET(bool); CAFFE2_SPECIALIZED_CUDA_SET(int8_t); CAFFE2_SPECIALIZED_CUDA_SET(int16_t); CAFFE2_SPECIALIZED_CUDA_SET(float16); CAFFE2_SPECIALIZED_CUDA_SET(int); CAFFE2_SPECIALIZED_CUDA_SET(int64_t); CAFFE2_SPECIALIZED_CUDA_SET(char); CAFFE2_SPECIALIZED_CUDA_SET(uint8_t); CAFFE2_SPECIALIZED_CUDA_SET(uint16_t); #undef CAFFE2_SPECIALIZED_CUDA_SET namespace { template <typename T> __global__ void UniformShift(const size_t N, const float min, const float max, T* x) { float scale = max - min; CUDA_1D_KERNEL_LOOP(i, N) { x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min); } } __global__ void UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) { int* x_int = reinterpret_cast<int*>(x); int range = (max - min + 1); CUDA_1D_KERNEL_LOOP(i, N) { x_int[i] = min + static_cast<int>(x[i] % range); } } } // namespace template <> void RandUniform<float, CUDAContext>( const size_t n, const float min, const float max, float* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n)); UniformShift<float> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> void RandUniform<double, CUDAContext>( const size_t n, const double min, const double max, double* r, CUDAContext* context) { CURAND_ENFORCE( curandGenerateUniformDouble(context->curand_generator(), r, n)); UniformShift<double> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, min, max, r); } template <> void RandUniform<int, CUDAContext>( const size_t n, const int min, const int max, int* r, CUDAContext* context) { CURAND_ENFORCE(curandGenerate( context->curand_generator(), reinterpret_cast<unsigned int*>(r), n)); UniformIntFit<<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, min, max, reinterpret_cast<unsigned int*>(r)); } template <typename T> size_t HandleOddLengthRandGaussian( const size_t n, const T mean, const T std, T* r, CUDAContext* context) { if (n % 2 == 1) { std::default_random_engine generator; std::normal_distribution<T> distribution(mean, std); const T random_value = distribution(generator); math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context); return n - 1; } return n; } template <> void RandGaussian<float, CUDAContext>( const size_t n, const float mean, const float std, float* r, CUDAContext* context) { // If n is odd, we add a random Gaussian value at the end manually // and generate n-1 random values using curandGenerateNormal. // curandGenerateNormal requires n to be even. const size_t even_n = HandleOddLengthRandGaussian<float>(n, mean, std, r, context); CURAND_ENFORCE( curandGenerateNormal(context->curand_generator(), r, even_n, mean, std)); } template <> void RandGaussian<double, CUDAContext>( const size_t n, const double mean, const double std, double* r, CUDAContext* context) { const size_t even_n = HandleOddLengthRandGaussian<double>(n, mean, std, r, context); CURAND_ENFORCE(curandGenerateNormalDouble( context->curand_generator(), r, even_n, mean, std)); } template <> void Dot<float, CUDAContext>( const int n, const float* a, const float* b, float* y, CUDAContext* context) { float result; CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result)); context->Copy<float, CPUContext, CUDAContext>(1, &result, y); } template <> void Dot<float16, CUDAContext>( const int n, const float16* a, const float16* b, float16* y, CUDAContext* context) { float16 result; // execute with 32-bit math CUBLAS_CHECK(cublasDotEx( context->cublas_handle(), n, a, CUDA_R_16F, 1, b, CUDA_R_16F, 1, &result, CUDA_R_16F, CUDA_R_32F)); context->Copy<float16, CPUContext, CUDAContext>(1, &result, y); } // A previous version of caffe2 used Thrust but it turns out that thrust // reduction has an implicit scratch space allocation and deallocation, which // may interfere with NCCL and create a deadlock. Hence we are using a custom // reduction here. #define SUM_KERNEL_NTHREADS 128 template <typename T> __global__ void SumKernel(const int N, const T* X, T* Y, bool square) { const int idx = threadIdx.x; __shared__ float reduction_buffer[SUM_KERNEL_NTHREADS]; reduction_buffer[idx] = 0; // A multilevel reduction. // N -> 128 if (!square) { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { reduction_buffer[idx] += convert::To<T, float>(X[i]); } } else { for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) { float Xi = convert::To<T, float>(X[i]); reduction_buffer[idx] += Xi * Xi; } } __syncthreads(); // 128 -> 32 if (idx < 32) { reduction_buffer[idx] += reduction_buffer[idx + 32] + reduction_buffer[idx + 64] + reduction_buffer[idx + 96]; } __syncthreads(); // 32 -> 1 if (idx == 0) { float tmp = 0; for (int i = 0; i < 32; ++i) { tmp += reduction_buffer[i]; } *Y = convert::To<float, T>(tmp); } } // According to the benchmarks script // caffe2/caffe2/experiments/python/device_reduce_sum_bench.py, // device reduce is slower for N <= 10000. #define DEVICE_REDUCE_SIZE_THRESHOLD 10000 namespace { template <typename T> __global__ void SumConvertKernel(float* sum, T* dest) { *dest = convert::To<float, T>(*sum); } template <typename FloatIterT> void SumFloatIter( const int N, FloatIterT it, float*& dest, CUDAContext* context, Tensor<CUDAContext>* scratch_ptr) { size_t memRequired = 0; cub::DeviceReduce::Sum( nullptr, memRequired, it, dest, N, context->cuda_stream()); auto buffer_size = static_cast<TIndex>((memRequired + sizeof(float) - 1) / sizeof(float)); if (!dest) { // allocate one more float at the end of scratch for dest scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1}); dest = scratch_ptr->template mutable_data<float>() + buffer_size; } else { scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); } cub::DeviceReduce::Sum( static_cast<void*>(scratch_ptr->template mutable_data<float>()), memRequired, it, dest, N, context->cuda_stream()); } } // namespace template <> void Sum<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor<CUDAContext>* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SumFloatIter(N, x, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, false); } } namespace { template <typename T> struct FloatTransform { inline __host__ __device__ float operator()(const T v) const { return convert::To<T, float>(v); } }; } // namespace #define CAFFE2_MATH_SUM_FUNC(T) \ template <> \ void Sum<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor<CUDAContext>* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \ x, transform); \ float* sum = nullptr; \ SumFloatIter(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, false); \ } \ } CAFFE2_MATH_SUM_FUNC(float16) #undef CAFFE2_MATH_SUM_FUNC namespace { template <typename T> struct SqrTransform { inline __host__ __device__ T operator()(const T v) const { return v * v; } }; } // namespace template <> void SumSqr<float, CUDAContext>( const int N, const float* x, float* y, CUDAContext* context, Tensor<CUDAContext>* scratch_ptr) { if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { SqrTransform<float> transform; cub::TransformInputIterator<float, SqrTransform<float>, const float*> it( x, transform); SumFloatIter(N, it, y, context, scratch_ptr); } else { SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( N, x, y, true); } } #define CAFFE2_MATH_SUMSQR_FUNC(T) \ template <> \ void SumSqr<T, CUDAContext>( \ const int N, \ const T* x, \ T* y, \ CUDAContext* context, \ Tensor<CUDAContext>* scratch_ptr) { \ if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \ FloatTransform<T> float_transform; \ cub::TransformInputIterator<float, FloatTransform<T>, const T*> \ float_it(x, float_transform); \ SqrTransform<float> sqr_transform; \ cub::TransformInputIterator< \ float, \ SqrTransform<float>, \ decltype(float_it)> \ it(float_it, sqr_transform); \ float* sum = nullptr; \ SumFloatIter(N, it, sum, context, scratch_ptr); \ SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \ } else { \ SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \ N, x, y, true); \ } \ } CAFFE2_MATH_SUMSQR_FUNC(float16) #undef CAFFE2_MATH_SUMSQR_FUNC #undef DEVICE_REDUCE_SIZE_THRESHOLD namespace { template <typename T> __global__ void SelectKernel( const int N, const int D, const T* x, const int* idx, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = x[i * D + idx[i]]; } } } // namespace template <> void Select<float, CUDAContext>( const int N, const int D, const float* x, const int* idx, float* y, CUDAContext* context) { SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); } template <> void Select<float16, CUDAContext>( const int N, const int D, const float16* x, const int* idx, float16* y, CUDAContext* context) { SelectKernel<float16><<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, idx, y); } namespace { template <typename T> __global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { // y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha); y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha); } } template <typename T> __global__ void ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * (*alpha); } } template <typename T> __global__ void PowKernel(const int n, const T* x, const T exponent, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = powf(x[i], exponent); } } // fp16 specialization template <> __global__ void ScaleKernelDeviceAlpha( const int n, const float* alpha, const float16* x, float16* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = convert::To<float, float16>( convert::To<float16, float>(x[i]) * (*alpha)); } } } // namespace template <> void Powx<float, CUDAContext>( const int N, const float* a, const float b, float* y, CUDAContext* context) { PowKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, a, b, y); } template <> void Scale<float, CUDAContext>( const int n, const float alpha, const float* x, float* y, CUDAContext* context) { ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, x, y); } template <> void Scale<float16, CUDAContext>( const int n, const float alpha, const float16* x, float16* y, CUDAContext* context) { ScaleKernel<float16><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, x, y); } template <> void Scale<float, CUDAContext>( const int n, const float* alpha, const float *x, float* y, CUDAContext* context) { ScaleKernelDeviceAlpha<float><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( n, alpha, x, y); } template <> void Scale<float16, CUDAContext>( const int n, const float* alpha, const float16* x, float16* y, CUDAContext* context) { ScaleKernelDeviceAlpha<float16><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, x, y); } template <> void Axpy<float, CUDAContext>( const int N, const float alpha, const float* X, float* Y, CUDAContext* context) { CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void Axpy<double, CUDAContext>( const int N, const float alpha, const double* X, double* Y, CUDAContext* context) { double alpha_d{alpha}; CUBLAS_ENFORCE( cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1)); } template <> void Axpy<float16, CUDAContext>( const int N, const float alpha, const float16* X, float16* Y, CUDAContext* context) { CUBLAS_CHECK(cublasAxpyEx( context->cublas_handle(), N, &alpha, CUDA_R_16F, X, CUDA_R_16F, 1, Y, CUDA_R_16F, 1, CUDA_R_32F)); } namespace { template <typename T> __global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] = convert::Get<T>( convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index])); } } } // namespace template <> void Axpy<float, CUDAContext>( const int n, const float* alpha, const float* X, float* Y, CUDAContext* context) { AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, X, Y); } template <> void Axpy<float16, CUDAContext>( const int n, const float* alpha, const float16* X, float16* Y, CUDAContext* context) { AxpyKernel<float16><<< CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, alpha, X, Y); } namespace { template <typename T> __global__ void AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) { CUDA_1D_KERNEL_LOOP(index, n) { y[index] = x[index] * a + y[index] * b; } } } // namespace template <> void Axpby<float, CUDAContext>( const int n, const float a, const float* x, const float b, float* y, CUDAContext* context) { AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, a, x, b, y); } namespace { template <typename T> __global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_col) { CUDA_1D_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const T* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i * dilation_h; int w = w_in + j * dilation_w; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } template <typename T> __global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int width_col, const int channels, T* data_col) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { int channel_in = index % channels; int w_out = index / channels % width_col; int h_out = index / channels / width_col; int h_in = h_out * stride_h - pad_t; int w_in = w_out * stride_w - pad_l; T* local_data_col = data_col + ((h_out * width_col) + w_out) * channels * kernel_h * kernel_w + channel_in; for (int i = 0; i < dkernel_h; i += dilation_h) { int h = h_in + i; for (int j = 0; j < dkernel_w; j += dilation_w) { int w = w_in + j; *local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ? data_im[(h * width + w) * channels + channel_in] : 0; local_data_col += channels; } } } } template <typename T> __global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int w = index % width + pad_l; int h = (index / width) % height + pad_t; int c = index / (width * height); // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = (h - h_col * stride_h); int w_k = (w - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } template <typename T> __global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col, const int width, const int channels, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int stride_h, const int stride_w, const int height_col, const int width_col, T* data_im) { const int dpatch_h = dilation_h * (patch_h - 1) + 1; const int dpatch_w = dilation_w * (patch_w - 1) + 1; CUDA_1D_KERNEL_LOOP(index, n) { T val = 0; int c = index % channels; int w = index / channels % width + pad_l; int h = index / channels / width + pad_t; // compute the start and end of the output int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1; int w_col_end = min(w / stride_w + 1, width_col); int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1; int h_col_end = min(h / stride_h + 1, height_col); int channels_col = patch_h * patch_w * channels; for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_k = h - h_col * stride_h; int w_k = w - w_col * stride_w; if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int c_col = (h_k * patch_w + w_k) * channels + c; val += data_col[(h_col * width_col + w_col) * channels_col + c_col]; } } } data_im[index] = val; } } // Ported from caffe1 template <typename T, int num_axes> __global__ void im2col_nd_gpu_kernel( const int n, const T* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, T* data_col) { int d_offset[num_axes]; // NOLINT(runtime/arrays) int d_iter[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); int i; int kernel_size = 1; for (i = 0; i < num_axes; ++i) { kernel_size *= shared_kernel_shape[i]; } CUDA_1D_KERNEL_LOOP(index, n) { if (index >= col_shape[0]) { break; } // Initialize offset, computed in the loop below, with intermediate // computations used to compute the spatial indices. int offset = index; for (i = num_axes - 1; i >= 0; --i) { if (i < num_axes - 1) { offset /= shared_kernel_shape[i + 1]; } d_offset[i] = offset % shared_kernel_shape[i]; } for (i = 0; i < num_axes; ++i) { d_iter[i] = 0; } bool incremented; do { int index_col = index; int index_im = index / kernel_size; bool in_range = true; for (i = 0; i < num_axes; ++i) { const int d = d_iter[i]; const int d_im = d * shared_stride[i] - shared_pad[i] + d_offset[i] * shared_dilation[i]; in_range &= (d_im >= 0 && d_im < shared_im_shape[i + 1]); index_col *= shared_col_shape[i + 1]; index_col += d; index_im *= shared_im_shape[i + 1]; index_im += d_im; } if (in_range) { // data_col[index_col] = 0; data_col[index_col] = data_im[index_im]; // T temp = data_im[index_im]; } else { data_col[index_col] = 0; } incremented = false; for (i = num_axes - 1; i >= 0; --i) { // const int d_max = shared_kernel_shape[i]; const int d_max = shared_col_shape[i + 1]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 ++d_iter[i]; incremented = true; break; } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template <typename T, int num_axes> __global__ void col2im_nd_gpu_kernel( const int n, const T* data_col, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, T* data_im) { int d_im[num_axes]; // NOLINT(runtime/arrays) int d_col_iter[num_axes]; // NOLINT(runtime/arrays) int d_col_start[num_axes]; // NOLINT(runtime/arrays) int d_col_end[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); CUDA_1D_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int c_im = index; // Calculate d_im (image dimensions). for (int i = num_axes - 1; i >= 0; --i) { d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i]; c_im /= shared_im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { const int kernel_extent = shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1; d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent) ? 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1; d_col_end[i] = min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; done = true; break; // for (int i = 0; i < num_axes; ++i) } } if (done) { continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. T val = 0; bool incremented = true; bool skip = false; do { // Compute the final offset. int final_offset = 0; int kernel_shape_prod = 1; int kernel_index; for (int i = num_axes - 1; i >= 0; --i) { kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; if (kernel_index % shared_dilation[i]) { skip = true; break; } else { kernel_index /= shared_dilation[i]; final_offset += kernel_index * kernel_shape_prod; kernel_shape_prod *= shared_kernel_shape[i]; } } if (!skip) { final_offset += kernel_shape_prod * c_im; for (int i = 0; i < num_axes; ++i) { final_offset *= shared_col_shape[i + 1]; final_offset += d_col_iter[i]; } val += data_col[final_offset]; } skip = false; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { const int d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } } // namespace template <> void Im2col<float, CUDAContext, StorageOrder::NCHW>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_col); } template <> void Im2col<float, CUDAContext, StorageOrder::NHWC>( const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_col, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; // We are going to launch height_col * width_col * channels kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height_col * width_col * channels; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, width_col, channels, data_col); } template <> void Col2im<float, CUDAContext, StorageOrder::NCHW>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_col, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void Col2im<float, CUDAContext, StorageOrder::NHWC>( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, float* data_im, CUDAContext* context) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int num_kernels = height * width * channels; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( num_kernels, data_col, width, channels, kernel_h, kernel_w, dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im); } template <> void Col2imNd<float, CUDAContext, StorageOrder::NCHW>( const float* data_col, const int* img_shape, const int* col_shape, const int img_size, const int col_size, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const int N, float* data_img, CUDAContext* context) { CAFFE_ENFORCE_LT( N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size."); #define COL2IM_ND_KERNEL(n) \ col2im_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \ <<<CAFFE_GET_BLOCKS(img_size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ img_size, \ data_col, \ img_shape, \ col_shape, \ kernel_shape, \ pad, \ stride, \ dilation, \ data_img) switch (N) { case 1: COL2IM_ND_KERNEL(1); break; case 2: COL2IM_ND_KERNEL(2); break; case 3: COL2IM_ND_KERNEL(3); break; case 4: COL2IM_ND_KERNEL(4); break; case 5: COL2IM_ND_KERNEL(5); break; default: CAFFE_THROW( "Col2imNd does not support computation with ", N, " spatial axes"); } } template <> void Im2colNd<float, CUDAContext, StorageOrder::NCHW>( const float* data_img, const int* img_shape, const int* col_shape, const int img_size, const int col_size, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const int N, float* data_col, CUDAContext* context, bool /*accumlate_output*/) { CAFFE_ENFORCE_LT( N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size."); #define IM2COL_ND_KERNEL(n) \ im2col_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \ <<<CAFFE_GET_BLOCKS(col_size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ col_size, \ data_img, \ img_shape, \ col_shape, \ kernel_shape, \ pad, \ stride, \ dilation, \ data_col) switch (N) { case 1: IM2COL_ND_KERNEL(1); break; case 2: IM2COL_ND_KERNEL(2); break; case 3: IM2COL_ND_KERNEL(3); break; case 4: IM2COL_ND_KERNEL(4); case 5: IM2COL_ND_KERNEL(5); break; default: CAFFE_THROW( "Im2colNd does not support computation with ", N, " spatial axes"); } } template <> void CopyMatrix<CUDAContext>( const size_t itemsize, const int M, const int N, const void* A, const int lda, void* B, const int ldb, CUDAContext* context, TypeMeta::TypedCopy copy) { CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context"); cudaMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M, cudaMemcpyDeviceToDevice, context->cuda_stream()); } template <> void CopyVector<float, CUDAContext>( const int N, const float* src, float* dst, CUDAContext* context) { if (src != dst && N > 0) { cudaMemcpyAsync( dst, src, sizeof(float) * N, cudaMemcpyDeviceToDevice, context->cuda_stream()); } } namespace { __global__ void rowwise_max_kernel( const int rows, const int cols, const float* data, float* out) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) { float maxval = -FLT_MAX; // NB: The memory accesses here are sequentialized; without unrolling // the loop, there will not be any ILP. However, because we are running // this kernel with a lot of threads, this should not be a big problem. // However, if we reduce the number of threads to take advantage of // warp-wide synchronization, this may become a problem again. for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max()); if (threadIdx.x == 0) { out[rowIndex] = maxval; } __syncthreads(); } } __global__ void colwise_max_kernel( const int rows, const int cols, const float* data, float* out) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int colIndex = blockIdx.x; colIndex < cols; colIndex += gridDim.x) { float maxval = -FLT_MAX; for (int rowIndex = threadIdx.x; rowIndex < rows; rowIndex += blockDim.x) { maxval = max(data[rowIndex * cols + colIndex], maxval); } maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max()); if (threadIdx.x == 0) { out[colIndex] = maxval; } __syncthreads(); } } } // namespace template <> void RowwiseMax( const int N, const int D, const float* x, float* y, CUDAContext* context) { rowwise_max_kernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, y); } template <> void ColwiseMax( const int N, const int D, const float* x, float* y, CUDAContext* context) { colwise_max_kernel<<< std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, D, x, y); } namespace { __global__ void maximum_kernel(const int N, const float alpha, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = fmaxf(x[i], alpha); } } } // namespace template <> void Maximum( const int N, const float alpha, const float* x, float* y, CUDAContext* context) { maximum_kernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, alpha, x, y); } namespace { constexpr int kCompileTimeCUDAMaxTransposeDims = 8; __device__ void ComputeYStride( const int num_axes, const int* y_dims, const int* axes, int* y_strides) { int buff[kCompileTimeCUDAMaxTransposeDims]; int cur_stride = 1; for (int i = num_axes - 1; i >= 0; --i) { buff[i] = cur_stride; cur_stride *= y_dims[i]; } for (int i = 0; i < num_axes; ++i) { y_strides[axes[i]] = buff[i]; } } __device__ int GetYIndex( const int num_axes, const int* x_dims, const int* y_strides, int x_index) { int y_index = 0; for (int i = num_axes - 1; i >= 0 && x_index > 0; --i) { y_index += x_index % x_dims[i] * y_strides[i]; x_index /= x_dims[i]; } return y_index; } template <typename T> __global__ void TransposeCUDA( const int num_axes, const int* x_dims, const int* y_dims, const int* axes, const int data_size, const T* X, T* Y) { __shared__ int y_strides[kCompileTimeCUDAMaxTransposeDims]; ComputeYStride(num_axes, y_dims, axes, y_strides); __syncthreads(); CUDA_1D_KERNEL_LOOP(x_index, data_size) { const int y_index = GetYIndex(num_axes, x_dims, y_strides, x_index); #if __CUDA_ARCH__ >= 350 Y[y_index] = __ldg(X + x_index); #else Y[y_index] = X[x_index]; #endif } } } // namespace #define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \ template <> \ void Transpose<T, CUDAContext>( \ const int num_axes, \ const int* x_dims, \ const int* y_dims, \ const int* axes, \ const int data_size, \ const T* X, \ T* Y, \ CUDAContext* context) { \ CAFFE_ENFORCE( \ num_axes <= kCompileTimeCUDAMaxTransposeDims, \ "num_axes exceeds compile time max."); \ TransposeCUDA<T> \ <<<CAFFE_GET_BLOCKS(data_size), \ CAFFE_CUDA_NUM_THREADS, \ 0, \ context->cuda_stream()>>>( \ num_axes, x_dims, y_dims, axes, data_size, X, Y); \ } CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int) CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(long) #undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE } // namespace math } // namespace caffe2
the_stack
* \brief Implements PME force gathering in CUDA. * * \author Aleksei Iupinov <a.yupinov@gmail.com> */ #include "gmxpre.h" #include <cassert> #include "gromacs/gpu_utils/cuda_kernel_utils.cuh" #include "gromacs/gpu_utils/typecasts.cuh" #include "pme.cuh" #include "pme_gpu_calculate_splines.cuh" #include "pme_grid.h" /*! \brief * An inline CUDA function: unroll the dynamic index accesses to the constant grid sizes to avoid local memory operations. */ __device__ __forceinline__ float read_grid_size(const float* realGridSizeFP, const int dimIndex) { switch (dimIndex) { case XX: return realGridSizeFP[XX]; case YY: return realGridSizeFP[YY]; case ZZ: return realGridSizeFP[ZZ]; } assert(false); return 0.0F; } /*! \brief Reduce the partial force contributions. * * \tparam order The PME order (must be 4). * \tparam atomDataSize The number of partial force contributions for each atom (currently * order^2 == 16) * \tparam blockSize The CUDA block size * * \param[out] sm_forces Shared memory array with the output forces (number of elements * is number of atoms per block) * \param[in] atomIndexLocal Local atom index * \param[in] splineIndex Spline index * \param[in] lineIndex Line index (same as threadLocalId) * \param[in] realGridSizeFP Local grid size constant * \param[in] fx Input force partial component X * \param[in] fy Input force partial component Y * \param[in] fz Input force partial component Z */ template<int order, int atomDataSize, int blockSize> __device__ __forceinline__ void reduce_atom_forces(float3* __restrict__ sm_forces, const int atomIndexLocal, const int splineIndex, const int lineIndex, const float* realGridSizeFP, float& fx, // NOLINT(google-runtime-references) float& fy, // NOLINT(google-runtime-references) float& fz) // NOLINT(google-runtime-references) { if (gmx::isPowerOfTwo(order)) // Only for orders of power of 2 { const unsigned int activeMask = c_fullWarpMask; // A tricky shuffle reduction inspired by reduce_force_j_warp_shfl // TODO: find out if this is the best in terms of transactions count static_assert(order == 4, "Only order of 4 is implemented"); static_assert(atomDataSize <= warp_size, "TODO: rework for atomDataSize > warp_size (order 8 or larger)"); const int width = atomDataSize; fx += __shfl_down_sync(activeMask, fx, 1, width); fy += __shfl_up_sync(activeMask, fy, 1, width); fz += __shfl_down_sync(activeMask, fz, 1, width); if (splineIndex & 1) { fx = fy; } fx += __shfl_down_sync(activeMask, fx, 2, width); fz += __shfl_up_sync(activeMask, fz, 2, width); if (splineIndex & 2) { fx = fz; } // By now fx contains intermediate quad sums of all 3 components: // splineIndex 0 1 2 and 3 4 5 6 and 7 8... // sum of... fx0 to fx3 fy0 to fy3 fz0 to fz3 fx4 to fx7 fy4 to fy7 fz4 to fz7 etc. // We have to just further reduce those groups of 4 for (int delta = 4; delta < atomDataSize; delta <<= 1) { fx += __shfl_down_sync(activeMask, fx, delta, width); } const int dimIndex = splineIndex; if (dimIndex < DIM) { const float n = read_grid_size(realGridSizeFP, dimIndex); float* __restrict__ sm_forcesAtomIndexOffset = reinterpret_cast<float*>(&sm_forces[atomIndexLocal]); sm_forcesAtomIndexOffset[dimIndex] = fx * n; } } else { // We use blockSize shared memory elements to read fx, or fy, or fz, and then reduce them to // fit into smemPerDim elements which are stored separately (first 2 dimensions only) const int smemPerDim = warp_size; const int smemReserved = (DIM)*smemPerDim; __shared__ float sm_forceReduction[smemReserved + blockSize]; __shared__ float* sm_forceTemp[DIM]; const int numWarps = blockSize / smemPerDim; const int minStride = max(1, atomDataSize / numWarps); // order 4: 128 threads => 4, 256 threads => 2, etc #pragma unroll for (int dimIndex = 0; dimIndex < DIM; dimIndex++) { int elementIndex = smemReserved + lineIndex; // Store input force contributions sm_forceReduction[elementIndex] = (dimIndex == XX) ? fx : (dimIndex == YY) ? fy : fz; // sync here because two warps write data that the first one consumes below __syncthreads(); // Reduce to fit into smemPerDim (warp size) #pragma unroll for (int redStride = atomDataSize / 2; redStride > minStride; redStride >>= 1) { if (splineIndex < redStride) { sm_forceReduction[elementIndex] += sm_forceReduction[elementIndex + redStride]; } } __syncthreads(); // Last iteration - packing everything to be nearby, storing convenience pointer sm_forceTemp[dimIndex] = sm_forceReduction + dimIndex * smemPerDim; int redStride = minStride; if (splineIndex < redStride) { const int packedIndex = atomIndexLocal * redStride + splineIndex; sm_forceTemp[dimIndex][packedIndex] = sm_forceReduction[elementIndex] + sm_forceReduction[elementIndex + redStride]; } __syncthreads(); } assert((blockSize / warp_size) >= DIM); // assert (atomsPerBlock <= warp_size); const int warpIndex = lineIndex / warp_size; const int dimIndex = warpIndex; // First 3 warps can now process 1 dimension each if (dimIndex < DIM) { int sourceIndex = lineIndex % warp_size; #pragma unroll for (int redStride = minStride / 2; redStride > 1; redStride >>= 1) { if (!(splineIndex & redStride)) { sm_forceTemp[dimIndex][sourceIndex] += sm_forceTemp[dimIndex][sourceIndex + redStride]; } } __syncwarp(); const float n = read_grid_size(realGridSizeFP, dimIndex); const int atomIndex = sourceIndex / minStride; if (sourceIndex == minStride * atomIndex) { float* __restrict__ sm_forcesAtomIndexOffset = reinterpret_cast<float*>(&sm_forces[atomIndex]); sm_forcesAtomIndexOffset[dimIndex] = (sm_forceTemp[dimIndex][sourceIndex] + sm_forceTemp[dimIndex][sourceIndex + 1]) * n; } } } } /*! \brief Calculate the sum of the force partial components (in X, Y and Z) * * \tparam order The PME order (must be 4). * \tparam atomsPerWarp The number of atoms per GPU warp. * \tparam wrapX Tells if the grid is wrapped in the X dimension. * \tparam wrapY Tells if the grid is wrapped in the Y dimension. * \param[out] fx The force partial component in the X dimension. * \param[out] fy The force partial component in the Y dimension. * \param[out] fz The force partial component in the Z dimension. * \param[in] ithyMin The thread minimum index in the Y dimension. * \param[in] ithyMax The thread maximum index in the Y dimension. * \param[in] ixBase The grid line index base value in the X dimension. * \param[in] iz The grid line index in the Z dimension. * \param[in] nx The grid real size in the X dimension. * \param[in] ny The grid real size in the Y dimension. * \param[in] pny The padded grid real size in the Y dimension. * \param[in] pnz The padded grid real size in the Z dimension. * \param[in] atomIndexLocal The atom index for this thread. * \param[in] splineIndexBase The base value of the spline parameter index. * \param[in] tdz The theta and dtheta in the Z dimension. * \param[in] sm_gridlineIndices Shared memory array of grid line indices. * \param[in] sm_theta Shared memory array of atom theta values. * \param[in] sm_dtheta Shared memory array of atom dtheta values. * \param[in] gm_grid Global memory array of the grid to use. */ template<int order, int atomsPerWarp, bool wrapX, bool wrapY> __device__ __forceinline__ void sumForceComponents(float* __restrict__ fx, float* __restrict__ fy, float* __restrict__ fz, const int ithyMin, const int ithyMax, const int ixBase, const int iz, const int nx, const int ny, const int pny, const int pnz, const int atomIndexLocal, const int splineIndexBase, const float2 tdz, const int* __restrict__ sm_gridlineIndices, const float* __restrict__ sm_theta, const float* __restrict__ sm_dtheta, const float* __restrict__ gm_grid) { #pragma unroll for (int ithy = ithyMin; ithy < ithyMax; ithy++) { const int splineIndexY = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, YY, ithy); const float2 tdy = make_float2(sm_theta[splineIndexY], sm_dtheta[splineIndexY]); int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] + ithy; if (wrapY & (iy >= ny)) { iy -= ny; } const int constOffset = iy * pnz + iz; #pragma unroll for (int ithx = 0; (ithx < order); ithx++) { int ix = ixBase + ithx; if (wrapX & (ix >= nx)) { ix -= nx; } const int gridIndexGlobal = ix * pny * pnz + constOffset; assert(gridIndexGlobal >= 0); const float gridValue = gm_grid[gridIndexGlobal]; assert(isfinite(gridValue)); const int splineIndexX = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, XX, ithx); const float2 tdx = make_float2(sm_theta[splineIndexX], sm_dtheta[splineIndexX]); const float fxy1 = tdz.x * gridValue; const float fz1 = tdz.y * gridValue; *fx += tdx.y * tdy.x * fxy1; *fy += tdx.x * tdy.y * fxy1; *fz += tdx.x * tdy.x * fz1; } } } /*! \brief Calculate the grid forces and store them in shared memory. * * \param[in,out] sm_forces Shared memory array with the output forces. * \param[in] forceIndexLocal The local (per thread) index in the sm_forces array. * \param[in] forceIndexGlobal The index of the thread in the gm_coefficients array. * \param[in] recipBox The reciprocal box. * \param[in] scale The scale to use when calculating the forces. For gm_coefficientsB * (when using multiple coefficients on a single grid) the scale will be (1.0 - scale). * \param[in] gm_coefficients Global memory array of the coefficients to use for an unperturbed * or FEP in state A if a single grid is used (\p multiCoefficientsSingleGrid == true).If two * separate grids are used this should be the coefficients of the grid in question. */ __device__ __forceinline__ void calculateAndStoreGridForces(float3* __restrict__ sm_forces, const int forceIndexLocal, const int forceIndexGlobal, const float recipBox[DIM][DIM], const float scale, const float* __restrict__ gm_coefficients) { const float3 atomForces = sm_forces[forceIndexLocal]; float negCoefficient = -scale * gm_coefficients[forceIndexGlobal]; float3 result; result.x = negCoefficient * recipBox[XX][XX] * atomForces.x; result.y = negCoefficient * (recipBox[XX][YY] * atomForces.x + recipBox[YY][YY] * atomForces.y); result.z = negCoefficient * (recipBox[XX][ZZ] * atomForces.x + recipBox[YY][ZZ] * atomForces.y + recipBox[ZZ][ZZ] * atomForces.z); sm_forces[forceIndexLocal] = result; } /*! \brief * A CUDA kernel which gathers the atom forces from the grid. * The grid is assumed to be wrapped in dimension Z. * * \tparam order The PME order (must be 4 currently). * \tparam wrapX Tells if the grid is wrapped in the X dimension. * \tparam wrapY Tells if the grid is wrapped in the Y dimension. * \tparam numGrids The number of grids to use in the kernel. Can be 1 or 2. * \tparam readGlobal Tells if we should read spline values from global memory * \tparam threadsPerAtom How many threads work on each atom * * \param[in] kernelParams All the PME GPU data. */ template<int order, bool wrapX, bool wrapY, int numGrids, bool readGlobal, ThreadsPerAtom threadsPerAtom> __launch_bounds__(c_gatherMaxThreadsPerBlock, c_gatherMinBlocksPerMP) __global__ void pme_gather_kernel(const PmeGpuCudaKernelParams kernelParams) { assert(numGrids == 1 || numGrids == 2); /* Global memory pointers */ const float* __restrict__ gm_coefficientsA = kernelParams.atoms.d_coefficients[0]; const float* __restrict__ gm_coefficientsB = kernelParams.atoms.d_coefficients[1]; const float* __restrict__ gm_gridA = kernelParams.grid.d_realGrid[0]; const float* __restrict__ gm_gridB = kernelParams.grid.d_realGrid[1]; static_assert(sizeof(*kernelParams.atoms.d_forces) == 3 * sizeof(float)); float* __restrict__ gm_forces = reinterpret_cast<float*>(kernelParams.atoms.d_forces); /* Global memory pointers for readGlobal */ const float* __restrict__ gm_theta = kernelParams.atoms.d_theta; const float* __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta; const int* __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices; float3 atomX; float atomCharge; const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x; /* Number of data components and threads for a single atom */ const int threadsPerAtomValue = (threadsPerAtom == ThreadsPerAtom::Order) ? order : order * order; const int atomDataSize = threadsPerAtomValue; const int atomsPerBlock = c_gatherMaxThreadsPerBlock / atomDataSize; // Number of atoms processed by a single warp in spread and gather const int atomsPerWarp = warp_size / atomDataSize; const int blockSize = atomsPerBlock * atomDataSize; assert(blockSize == blockDim.x * blockDim.y * blockDim.z); /* These are the atom indices - for the shared and global memory */ const int atomIndexLocal = threadIdx.z; const int atomIndexOffset = blockIndex * atomsPerBlock; const int atomIndexGlobal = atomIndexOffset + atomIndexLocal; /* Early return for fully empty blocks at the end * (should only happen for billions of input atoms) */ if (atomIndexOffset >= kernelParams.atoms.nAtoms) { return; } // 4 warps per block, 8 atoms per warp *3 *4 const int splineParamsSize = atomsPerBlock * DIM * order; const int gridlineIndicesSize = atomsPerBlock * DIM; __shared__ int sm_gridlineIndices[gridlineIndicesSize]; __shared__ float sm_theta[splineParamsSize]; __shared__ float sm_dtheta[splineParamsSize]; /* Spline Z coordinates */ const int ithz = threadIdx.x; /* These are the spline contribution indices in shared memory */ const int splineIndex = threadIdx.y * blockDim.x + threadIdx.x; const int lineIndex = (threadIdx.z * (blockDim.x * blockDim.y)) + splineIndex; /* And to all the block's particles */ const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y)) + blockDim.x * threadIdx.y + threadIdx.x; const int threadLocalIdMax = blockDim.x * blockDim.y * blockDim.z; if (readGlobal) { /* Read splines */ const int localGridlineIndicesIndex = threadLocalId; const int globalGridlineIndicesIndex = blockIndex * gridlineIndicesSize + localGridlineIndicesIndex; if (localGridlineIndicesIndex < gridlineIndicesSize) { sm_gridlineIndices[localGridlineIndicesIndex] = gm_gridlineIndices[globalGridlineIndicesIndex]; assert(sm_gridlineIndices[localGridlineIndicesIndex] >= 0); } /* The loop needed for order threads per atom to make sure we load all data values, as each thread must load multiple values with order*order threads per atom, it is only required for each thread to load one data value */ const int iMin = 0; const int iMax = (threadsPerAtom == ThreadsPerAtom::Order) ? 3 : 1; for (int i = iMin; i < iMax; i++) { int localSplineParamsIndex = threadLocalId + i * threadLocalIdMax; /* i will always be zero for order*order threads per atom */ int globalSplineParamsIndex = blockIndex * splineParamsSize + localSplineParamsIndex; if (localSplineParamsIndex < splineParamsSize) { sm_theta[localSplineParamsIndex] = gm_theta[globalSplineParamsIndex]; sm_dtheta[localSplineParamsIndex] = gm_dtheta[globalSplineParamsIndex]; assert(isfinite(sm_theta[localSplineParamsIndex])); assert(isfinite(sm_dtheta[localSplineParamsIndex])); } } __syncthreads(); } else { const float3* __restrict__ gm_coordinates = asFloat3(kernelParams.atoms.d_coordinates); /* Recalculate Splines */ if (c_useAtomDataPrefetch) { // charges __shared__ float sm_coefficients[atomsPerBlock]; // Coordinates __shared__ float3 sm_coordinates[atomsPerBlock]; /* Staging coefficients/charges */ pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(sm_coefficients, gm_coefficientsA); /* Staging coordinates */ pme_gpu_stage_atom_data<float3, atomsPerBlock, 1>(sm_coordinates, gm_coordinates); __syncthreads(); atomX = sm_coordinates[atomIndexLocal]; atomCharge = sm_coefficients[atomIndexLocal]; } else { atomX = gm_coordinates[atomIndexGlobal]; atomCharge = gm_coefficientsA[atomIndexGlobal]; } calculate_splines<order, atomsPerBlock, atomsPerWarp, true, false, numGrids>( kernelParams, atomIndexOffset, atomX, atomCharge, sm_theta, sm_dtheta, sm_gridlineIndices); __syncwarp(); } float fx = 0.0F; float fy = 0.0F; float fz = 0.0F; const int chargeCheck = pme_gpu_check_atom_charge(gm_coefficientsA[atomIndexGlobal]); const int nx = kernelParams.grid.realGridSize[XX]; const int ny = kernelParams.grid.realGridSize[YY]; const int nz = kernelParams.grid.realGridSize[ZZ]; const int pny = kernelParams.grid.realGridSizePadded[YY]; const int pnz = kernelParams.grid.realGridSizePadded[ZZ]; const int atomWarpIndex = atomIndexLocal % atomsPerWarp; const int warpIndex = atomIndexLocal / atomsPerWarp; const int splineIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex); const int splineIndexZ = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, ZZ, ithz); const float2 tdz = make_float2(sm_theta[splineIndexZ], sm_dtheta[splineIndexZ]); int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] + ithz; const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX]; if (iz >= nz) { iz -= nz; } const int ithyMin = (threadsPerAtom == ThreadsPerAtom::Order) ? 0 : threadIdx.y; const int ithyMax = (threadsPerAtom == ThreadsPerAtom::Order) ? order : threadIdx.y + 1; if (chargeCheck) { sumForceComponents<order, atomsPerWarp, wrapX, wrapY>(&fx, &fy, &fz, ithyMin, ithyMax, ixBase, iz, nx, ny, pny, pnz, atomIndexLocal, splineIndexBase, tdz, sm_gridlineIndices, sm_theta, sm_dtheta, gm_gridA); } // Reduction of partial force contributions __shared__ float3 sm_forces[atomsPerBlock]; reduce_atom_forces<order, atomDataSize, blockSize>( sm_forces, atomIndexLocal, splineIndex, lineIndex, kernelParams.grid.realGridSizeFP, fx, fy, fz); __syncthreads(); /* Calculating the final forces with no component branching, atomsPerBlock threads */ const int forceIndexLocal = threadLocalId; const int forceIndexGlobal = atomIndexOffset + forceIndexLocal; const float scale = kernelParams.current.scale; if (forceIndexLocal < atomsPerBlock) { calculateAndStoreGridForces( sm_forces, forceIndexLocal, forceIndexGlobal, kernelParams.current.recipBox, scale, gm_coefficientsA); } __syncwarp(); assert(atomsPerBlock <= warp_size); /* Writing or adding the final forces component-wise, single warp */ const int blockForcesSize = atomsPerBlock * DIM; const int numIter = (blockForcesSize + warp_size - 1) / warp_size; const int iterThreads = blockForcesSize / numIter; if (threadLocalId < iterThreads) { #pragma unroll for (int i = 0; i < numIter; i++) { int outputIndexLocal = i * iterThreads + threadLocalId; int outputIndexGlobal = blockIndex * blockForcesSize + outputIndexLocal; float outputForceComponent = (reinterpret_cast<float*>(sm_forces)[outputIndexLocal]); gm_forces[outputIndexGlobal] = outputForceComponent; } } if (numGrids == 2) { /* We must sync here since the same shared memory is used as above. */ __syncthreads(); fx = 0.0F; fy = 0.0F; fz = 0.0F; const int chargeCheck = pme_gpu_check_atom_charge(gm_coefficientsB[atomIndexGlobal]); if (chargeCheck) { sumForceComponents<order, atomsPerWarp, wrapX, wrapY>(&fx, &fy, &fz, ithyMin, ithyMax, ixBase, iz, nx, ny, pny, pnz, atomIndexLocal, splineIndexBase, tdz, sm_gridlineIndices, sm_theta, sm_dtheta, gm_gridB); } // Reduction of partial force contributions reduce_atom_forces<order, atomDataSize, blockSize>( sm_forces, atomIndexLocal, splineIndex, lineIndex, kernelParams.grid.realGridSizeFP, fx, fy, fz); __syncthreads(); /* Calculating the final forces with no component branching, atomsPerBlock threads */ if (forceIndexLocal < atomsPerBlock) { calculateAndStoreGridForces(sm_forces, forceIndexLocal, forceIndexGlobal, kernelParams.current.recipBox, 1.0F - scale, gm_coefficientsB); } __syncwarp(); /* Writing or adding the final forces component-wise, single warp */ if (threadLocalId < iterThreads) { #pragma unroll for (int i = 0; i < numIter; i++) { int outputIndexLocal = i * iterThreads + threadLocalId; int outputIndexGlobal = blockIndex * blockForcesSize + outputIndexLocal; float outputForceComponent = (reinterpret_cast<float*>(sm_forces)[outputIndexLocal]); gm_forces[outputIndexGlobal] += outputForceComponent; } } } } //! Kernel instantiations // clang-format off template __global__ void pme_gather_kernel<4, true, true, 1, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 1, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 1, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 1, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 2, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 2, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 2, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_gather_kernel<4, true, true, 2, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); // clang-format on
the_stack
#include <nvector/nvector_cuda.h> #include "VectorArrayKernels.cuh" #include "VectorKernels.cuh" #include "sundials_cuda.h" #include "sundials_debug.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) extern "C" { using namespace sundials; using namespace sundials::cuda; using namespace sundials::nvector_cuda; /* * Macro definitions */ // Macros to access vector content #define NVEC_CUDA_CONTENT(x) ((N_VectorContent_Cuda)(x->content)) #define NVEC_CUDA_MEMSIZE(x) (NVEC_CUDA_CONTENT(x)->length * sizeof(realtype)) #define NVEC_CUDA_MEMHELP(x) (NVEC_CUDA_CONTENT(x)->mem_helper) #define NVEC_CUDA_HDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->host_data->ptr) #define NVEC_CUDA_DDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->device_data->ptr) #define NVEC_CUDA_STREAM(x) (NVEC_CUDA_CONTENT(x)->stream_exec_policy->stream()) // Macros to access vector private content #define NVEC_CUDA_PRIVATE(x) ((N_PrivateVectorContent_Cuda)(NVEC_CUDA_CONTENT(x)->priv)) #define NVEC_CUDA_HBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_host->ptr) #define NVEC_CUDA_DBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_dev->ptr) /* * Private structure definition */ struct _N_PrivateVectorContent_Cuda { booleantype use_managed_mem; /* do data pointers use managed memory */ // reduction workspace SUNMemory reduce_buffer_dev; // device memory for reductions SUNMemory reduce_buffer_host; // host memory for reductions size_t reduce_buffer_bytes; // current size of reduction buffers // fused op workspace SUNMemory fused_buffer_dev; // device memory for fused ops SUNMemory fused_buffer_host; // host memory for fused ops size_t fused_buffer_bytes; // current size of the buffers size_t fused_buffer_offset; // current offset into the buffer }; typedef struct _N_PrivateVectorContent_Cuda *N_PrivateVectorContent_Cuda; /* * Private function definitions */ // Allocate vector data static int AllocateData(N_Vector v); // Reduction buffer functions static int InitializeReductionBuffer(N_Vector v, const realtype* value, size_t n = 1); static void FreeReductionBuffer(N_Vector v); static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1); // Fused operation buffer functions static int FusedBuffer_Init(N_Vector v, int nreal, int nptr); static int FusedBuffer_CopyRealArray(N_Vector v, realtype *r_data, int nval, realtype **shortcut); static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut); static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut); static int FusedBuffer_CopyToDevice(N_Vector v); static int FusedBuffer_Free(N_Vector v); // Kernel launch parameters static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, size_t n = 0); static void PostKernelLaunch(); N_Vector N_VNewEmpty_Cuda(SUNContext sunctx) { N_Vector v; /* Create vector */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Cuda; v->ops->nvclone = N_VClone_Cuda; v->ops->nvcloneempty = N_VCloneEmpty_Cuda; v->ops->nvdestroy = N_VDestroy_Cuda; v->ops->nvspace = N_VSpace_Cuda; v->ops->nvgetlength = N_VGetLength_Cuda; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Cuda; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Cuda; v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Cuda; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Cuda; v->ops->nvconst = N_VConst_Cuda; v->ops->nvprod = N_VProd_Cuda; v->ops->nvdiv = N_VDiv_Cuda; v->ops->nvscale = N_VScale_Cuda; v->ops->nvabs = N_VAbs_Cuda; v->ops->nvinv = N_VInv_Cuda; v->ops->nvaddconst = N_VAddConst_Cuda; v->ops->nvdotprod = N_VDotProd_Cuda; v->ops->nvmaxnorm = N_VMaxNorm_Cuda; v->ops->nvmin = N_VMin_Cuda; v->ops->nvl1norm = N_VL1Norm_Cuda; v->ops->nvinvtest = N_VInvTest_Cuda; v->ops->nvconstrmask = N_VConstrMask_Cuda; v->ops->nvminquotient = N_VMinQuotient_Cuda; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Cuda; v->ops->nvwrmsnorm = N_VWrmsNorm_Cuda; v->ops->nvwl2norm = N_VWL2Norm_Cuda; v->ops->nvcompare = N_VCompare_Cuda; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_Cuda; v->ops->nvmaxnormlocal = N_VMaxNorm_Cuda; v->ops->nvminlocal = N_VMin_Cuda; v->ops->nvl1normlocal = N_VL1Norm_Cuda; v->ops->nvinvtestlocal = N_VInvTest_Cuda; v->ops->nvconstrmasklocal = N_VConstrMask_Cuda; v->ops->nvminquotientlocal = N_VMinQuotient_Cuda; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Cuda; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Cuda; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Cuda; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Cuda; v->ops->nvbufpack = N_VBufPack_Cuda; v->ops->nvbufunpack = N_VBufUnpack_Cuda; /* print operation for debugging */ v->ops->nvprint = N_VPrint_Cuda; v->ops->nvprintfile = N_VPrintFile_Cuda; /* Create content */ v->content = (N_VectorContent_Cuda) malloc(sizeof(_N_VectorContent_Cuda)); if (v->content == NULL) { N_VDestroy(v); return(NULL); } NVEC_CUDA_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Cuda)); if (NVEC_CUDA_CONTENT(v)->priv == NULL) { N_VDestroy(v); return(NULL); } // Initialize content NVEC_CUDA_CONTENT(v)->length = 0; NVEC_CUDA_CONTENT(v)->host_data = NULL; NVEC_CUDA_CONTENT(v)->device_data = NULL; NVEC_CUDA_CONTENT(v)->stream_exec_policy = NULL; NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NULL; NVEC_CUDA_CONTENT(v)->mem_helper = NULL; NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; // Initialize private content NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL; NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL; NVEC_CUDA_PRIVATE(v)->reduce_buffer_bytes = 0; NVEC_CUDA_PRIVATE(v)->fused_buffer_dev = NULL; NVEC_CUDA_PRIVATE(v)->fused_buffer_host = NULL; NVEC_CUDA_PRIVATE(v)->fused_buffer_bytes = 0; NVEC_CUDA_PRIVATE(v)->fused_buffer_offset = 0; return(v); } N_Vector N_VNew_Cuda(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(256); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(256); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewWithMemHelp_Cuda(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx) { N_Vector v; if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper is NULL\n"); return(NULL); } if (!SUNMemoryHelper_ImplementsRequiredOps(helper)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper doesn't implement all required ops\n"); return(NULL); } v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->mem_helper = helper; NVEC_CUDA_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(256); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(256); NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = use_managed_mem; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewManaged_Cuda(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(256); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(256); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMake_Cuda(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx) { N_Vector v; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE); NVEC_CUDA_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(256); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(256); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_CUDA_CONTENT(v)->device_data == NULL || NVEC_CUDA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMakeManaged_Cuda(sunindextype length, realtype *vdata, SUNContext sunctx) { N_Vector v; if (vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data); NVEC_CUDA_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(256); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(256); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_CUDA_CONTENT(v)->device_data == NULL || NVEC_CUDA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } /* ---------------------------------------------------------------------------- * Set pointer to the raw host data. Does not free the existing pointer. */ void N_VSetHostArrayPointer_Cuda(realtype* h_vdata, N_Vector v) { if (N_VIsManagedMemory_Cuda(v)) { if (NVEC_CUDA_CONTENT(v)->host_data) { NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata; NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) h_vdata; } else { NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data); } } else { if (NVEC_CUDA_CONTENT(v)->host_data) { NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata; } else { NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST); } } } /* ---------------------------------------------------------------------------- * Set pointer to the raw device data */ void N_VSetDeviceArrayPointer_Cuda(realtype* d_vdata, N_Vector v) { if (N_VIsManagedMemory_Cuda(v)) { if (NVEC_CUDA_CONTENT(v)->device_data) { NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata; NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) d_vdata; } else { NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->device_data); } } else { if (NVEC_CUDA_CONTENT(v)->device_data) { NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata; } else { NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE); } } } /* ---------------------------------------------------------------------------- * Return a flag indicating if the memory for the vector data is managed */ booleantype N_VIsManagedMemory_Cuda(N_Vector x) { return NVEC_CUDA_PRIVATE(x)->use_managed_mem; } int N_VSetKernelExecPolicy_Cuda(N_Vector x, SUNCudaExecPolicy* stream_exec_policy, SUNCudaExecPolicy* reduce_exec_policy) { if (x == NULL || stream_exec_policy == NULL || reduce_exec_policy == NULL) return(-1); if (NVEC_CUDA_CONTENT(x)->own_exec) { delete NVEC_CUDA_CONTENT(x)->stream_exec_policy; delete NVEC_CUDA_CONTENT(x)->reduce_exec_policy; } NVEC_CUDA_CONTENT(x)->stream_exec_policy = stream_exec_policy; NVEC_CUDA_CONTENT(x)->reduce_exec_policy = reduce_exec_policy; NVEC_CUDA_CONTENT(x)->own_exec = SUNFALSE; return(0); } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Cuda(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_CONTENT(x)->host_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x))); } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Cuda(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->host_data, NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x))); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to stdout */ void N_VPrint_Cuda(N_Vector x) { N_VPrintFile_Cuda(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to outfile */ void N_VPrintFile_Cuda(N_Vector x, FILE *outfile) { sunindextype i; #ifdef SUNDIALS_DEBUG_PRINTVEC N_VCopyFromDevice_Cuda(x); #endif for (i = 0; i < NVEC_CUDA_CONTENT(x)->length; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Le\n", NVEC_CUDA_HDATAp(x)[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16e\n", NVEC_CUDA_HDATAp(x)[i]); #else fprintf(outfile, "%11.8e\n", NVEC_CUDA_HDATAp(x)[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Cuda(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty_Cuda(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Set content */ NVEC_CUDA_CONTENT(v)->length = NVEC_CUDA_CONTENT(w)->length; NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = NVEC_CUDA_PRIVATE(w)->use_managed_mem; return(v); } N_Vector N_VClone_Cuda(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Cuda(w); if (v == NULL) return(NULL); NVEC_CUDA_MEMHELP(v) = SUNMemoryHelper_Clone(NVEC_CUDA_MEMHELP(w)); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->stream_exec_policy = NVEC_CUDA_CONTENT(w)->stream_exec_policy->clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NVEC_CUDA_CONTENT(w)->reduce_exec_policy->clone(); if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: SUNMemoryHelper_Clone returned NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } void N_VDestroy_Cuda(N_Vector v) { N_VectorContent_Cuda vc; N_PrivateVectorContent_Cuda vcp; if (v == NULL) return; /* free ops structure */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } /* extract content */ vc = NVEC_CUDA_CONTENT(v); if (vc == NULL) { free(v); v = NULL; return; } /* free private content */ vcp = (N_PrivateVectorContent_Cuda) vc->priv; if (vcp != NULL) { /* free items in private content */ FreeReductionBuffer(v); FusedBuffer_Free(v); free(vcp); vc->priv = NULL; } /* free items in content */ if (vc->own_exec) { delete vc->stream_exec_policy; vc->stream_exec_policy = NULL; delete vc->reduce_exec_policy; vc->reduce_exec_policy = NULL; } if (NVEC_CUDA_MEMHELP(v)) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->host_data, nullptr); vc->host_data = NULL; SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->device_data, nullptr); vc->device_data = NULL; if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper); vc->mem_helper = NULL; } /* free content struct */ free(vc); /* free vector */ free(v); return; } void N_VSpace_Cuda(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = NVEC_CUDA_CONTENT(X)->length; *liw = 2; } void N_VConst_Cuda(realtype a, N_Vector X) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConst_Cuda: GetKernelParameters returned nonzero\n"); } setConstKernel<<<grid, block, shMemSize, stream>>> ( a, NVEC_CUDA_DDATAp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VLinearSum_Cuda(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSum_Cuda: GetKernelParameters returned nonzero\n"); } linearSumKernel<<<grid, block, shMemSize, stream>>> ( a, NVEC_CUDA_DDATAp(X), b, NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VProd_Cuda(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VProd_Cuda: GetKernelParameters returned nonzero\n"); } prodKernel<<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VDiv_Cuda(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDiv_Cuda: GetKernelParameters returned nonzero\n"); } divKernel<<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VScale_Cuda(realtype a, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScale_Cuda: GetKernelParameters returned nonzero\n"); } scaleKernel<<<grid, block, shMemSize, stream>>> ( a, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAbs_Cuda(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAbs_Cuda: GetKernelParameters returned nonzero\n"); } absKernel<<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VInv_Cuda(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInv_Cuda: GetKernelParameters returned nonzero\n"); } invKernel<<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAddConst_Cuda(N_Vector X, realtype b, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAddConst_Cuda: GetKernelParameters returned nonzero\n"); } addConstKernel<<<grid, block, shMemSize, stream>>> ( b, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } realtype N_VDotProd_Cuda(N_Vector X, N_Vector Y) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: GetKernelParameters returned nonzero\n"); } dotProdKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VMaxNorm_Cuda(N_Vector X) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: GetKernelParameters returned nonzero\n"); } maxNormKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Finish reduction on CPU if there are less than two blocks of data left. CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWSqrSumLocal_Cuda(N_Vector X, N_Vector W) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: GetKernelParameters returned nonzero\n"); } wL2NormSquareKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNorm_Cuda(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Cuda(X, W); return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length); } realtype N_VWSqrSumMaskLocal_Cuda(N_Vector X, N_Vector W, N_Vector Id) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: GetKernelParameters returned nonzero\n"); } wL2NormSquareMaskKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DDATAp(Id), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNormMask_Cuda(N_Vector X, N_Vector W, N_Vector Id) { const realtype sum = N_VWSqrSumMaskLocal_Cuda(X, W, Id); return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length); } realtype N_VMin_Cuda(N_Vector X) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = std::numeric_limits<realtype>::max(); if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: GetKernelParameters returned nonzero\n"); } findMinKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( gpu_result, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWL2Norm_Cuda(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Cuda(X, W); return std::sqrt(sum); } realtype N_VL1Norm_Cuda(N_Vector X) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: GetKernelParameters returned nonzero\n"); } L1NormKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } void N_VCompare_Cuda(realtype c, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCompare_Cuda: GetKernelParameters returned nonzero\n"); } compareKernel<<<grid, block, shMemSize, stream>>> ( c, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } booleantype N_VInvTest_Cuda(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: GetKernelParameters returned nonzero\n"); } invTestKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return (gpu_result < HALF); } booleantype N_VConstrMask_Cuda(N_Vector C, N_Vector X, N_Vector M) { size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (InitializeReductionBuffer(X, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(X, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: GetKernelParameters returned nonzero\n"); } constrMaskKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( NVEC_CUDA_DDATAp(C), NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(M), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return (gpu_result < HALF); } realtype N_VMinQuotient_Cuda(N_Vector num, N_Vector denom) { // Starting value for min reduction size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = std::numeric_limits<realtype>::max();; if (InitializeReductionBuffer(num, &gpu_result)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(num, true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: GetKernelParameters returned nonzero\n"); } minQuotientKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( gpu_result, NVEC_CUDA_DDATAp(num), NVEC_CUDA_DDATAp(denom), NVEC_CUDA_DBUFFERp(num), NVEC_CUDA_CONTENT(num)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(num); gpu_result = NVEC_CUDA_HBUFFERp(num)[0]; return gpu_result; } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector z) { // Fused op workspace shortcuts realtype* cdata = NULL; realtype** xdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(z, nvec, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(z, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(z, X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(z)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters and launch size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearCombinationKernel<<<grid, block, shMemSize, stream>>> ( nvec, cdata, xdata, NVEC_CUDA_DDATAp(z), NVEC_CUDA_CONTENT(z)->length ); PostKernelLaunch(); return 0; } int N_VScaleAddMulti_Cuda(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z) { // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(x, nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(x, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(x, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleAddMultiKernel<<<grid, block, shMemSize, stream>>> ( nvec, cdata, NVEC_CUDA_DDATAp(x), ydata, zdata, NVEC_CUDA_CONTENT(x)->length ); PostKernelLaunch(); return 0; } int N_VDotProdMulti_Cuda(int nvec, N_Vector x, N_Vector* Y, realtype* dots) { // Fused op workspace shortcuts realtype** ydata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(x, 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Setup the reduction buffer for (int i = 0; i < nvec; ++i) { dots[i] = ZERO; } if (InitializeReductionBuffer(x, dots, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n"); } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(x, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; dotProdMultiKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( nvec, NVEC_CUDA_DDATAp(x), ydata, NVEC_CUDA_DBUFFERp(x), NVEC_CUDA_CONTENT(x)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(x, nvec); for (int i = 0; i < nvec; ++i) { dots[i] = NVEC_CUDA_HBUFFERp(x)[i]; } return 0; } /* * ----------------------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Cuda(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { // Shortcuts to the fused op workspace realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, 3 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinaerSumVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearSumVectorArrayKernel<<<grid, block, shMemSize, stream>>> ( nvec, a, xdata, b, ydata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VScaleVectorArray_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleVectorArrayKernel<<<grid, block, shMemSize, stream>>> ( nvec, cdata, xdata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VConstVectorArray_Cuda(int nvec, realtype c, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } constVectorArrayKernel<<<grid, block, shMemSize, stream>>> ( nvec, c, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VWrmsNormVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, realtype* norms) { // Fused op workspace shortcuts realtype** xdata = NULL; realtype** wdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(W[0], 0, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], W, nvec, &wdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(W[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Setup the reduction buffer for (int i = 0; i < nvec; ++i) { norms[i] = ZERO; } if (InitializeReductionBuffer(W[0], norms, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: InitializeReductionBuffer returned nonzero\n"); } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(W[0], true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; wL2NormSquareVectorArrayKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( nvec, xdata, wdata, NVEC_CUDA_DBUFFERp(W[0]), NVEC_CUDA_CONTENT(W[0])->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(W[0], nvec); for (int i = 0; i < nvec; ++i) { norms[i] = std::sqrt(NVEC_CUDA_HBUFFERp(W[0])[i] / NVEC_CUDA_CONTENT(W[0])->length); } return 0; } int N_VWrmsNormMaskVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* norms) { // Fused op workspace shortcuts realtype** xdata = NULL; realtype** wdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(W[0], 0, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], W, nvec, &wdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(W[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Setup the reduction buffer for (int i = 0; i < nvec; ++i) { norms[i] = ZERO; } if (InitializeReductionBuffer(W[0], norms, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: InitializeReductionBuffer returned nonzero\n"); } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(W[0], true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormMaskVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; wL2NormSquareMaskVectorArrayKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>> ( nvec, xdata, wdata, NVEC_CUDA_DDATAp(id), NVEC_CUDA_DBUFFERp(W[0]), NVEC_CUDA_CONTENT(W[0])->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(W[0], nvec); for (int i = 0; i < nvec; ++i) { norms[i] = std::sqrt(NVEC_CUDA_HBUFFERp(W[0])[i] / NVEC_CUDA_CONTENT(W[0])->length); } return 0; } int N_VScaleAddMultiVectorArray_Cuda(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(X[0], nsum, nvec + 2 * nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(X[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(X[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Y, nvec, nsum, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Z, nvec, nsum, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(X[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleAddMultiVectorArrayKernel<<<grid, block, shMemSize, stream>>> ( nvec, nsum, cdata, xdata, ydata, zdata, NVEC_CUDA_CONTENT(X[0])->length ); PostKernelLaunch(); return 0; } int N_VLinearCombinationVectorArray_Cuda(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nsum, nvec + nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(Z[0], X, nvec, nsum, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearCombinationVectorArrayKernel<<<grid, block, shMemSize, stream>>> ( nvec, nsum, cdata, xdata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } /* * ----------------------------------------------------------------- * OPTIONAL XBraid interface operations * ----------------------------------------------------------------- */ int N_VBufSize_Cuda(N_Vector x, sunindextype *size) { if (x == NULL) return(-1); *size = (sunindextype)NVEC_CUDA_MEMSIZE(x); return(0); } int N_VBufPack_Cuda(N_Vector x, void *buf) { int copy_fail = 0; cudaError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), buf_mem, NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem, nullptr); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } int N_VBufUnpack_Cuda(N_Vector x, void *buf) { int copy_fail = 0; cudaError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->device_data, buf_mem, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem, nullptr); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Cuda(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Cuda; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda; v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda; v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda; v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Cuda; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombination = tf ? N_VLinearCombination_Cuda : NULL; return 0; } int N_VEnableScaleAddMulti_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmulti = tf ? N_VScaleAddMulti_Cuda : NULL; return 0; } int N_VEnableDotProdMulti_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvdotprodmulti = tf ? N_VDotProdMulti_Cuda : NULL; v->ops->nvdotprodmultilocal = tf ? N_VDotProdMulti_Cuda : NULL; return 0; } int N_VEnableLinearSumVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearsumvectorarray = tf ? N_VLinearSumVectorArray_Cuda : NULL; return 0; } int N_VEnableScaleVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscalevectorarray = tf ? N_VScaleVectorArray_Cuda : NULL; return 0; } int N_VEnableConstVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvconstvectorarray = tf ? N_VConstVectorArray_Cuda : NULL; return 0; } int N_VEnableWrmsNormVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvwrmsnormvectorarray = tf ? N_VWrmsNormVectorArray_Cuda : NULL; return 0; } int N_VEnableWrmsNormMaskVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvwrmsnormmaskvectorarray = tf ? N_VWrmsNormMaskVectorArray_Cuda : NULL; return 0; } int N_VEnableScaleAddMultiVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmultivectorarray = tf ? N_VScaleAddMultiVectorArray_Cuda : NULL; return 0; } int N_VEnableLinearCombinationVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombinationvectorarray = tf ? N_VLinearCombinationVectorArray_Cuda : NULL; return 0; } /* * Private helper functions. */ int AllocateData(N_Vector v) { int alloc_fail = 0; N_VectorContent_Cuda vc = NVEC_CUDA_CONTENT(v); N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (N_VGetLength_Cuda(v) == 0) return(0); if (vcp->use_managed_mem) { alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_UVM, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n"); } vc->host_data = SUNMemoryHelper_Alias(vc->device_data); } else { alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->host_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_HOST, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_DEVICE, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } return(alloc_fail ? -1 : 0); } /* * Initializes the internal buffer used for reductions. * If the buffer is already allocated, it will only be reallocated * if it is no longer large enough. This may occur if the length * of the vector is increased. The buffer is initialized to the * value given. */ int InitializeReductionBuffer(N_Vector v, const realtype* value, size_t n) { int alloc_fail = 0; int copy_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = n * sizeof(realtype); // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Wrap the initial value as SUNMemory object SUNMemory value_mem = SUNMemoryHelper_Wrap((void*) value, SUNMEMTYPE_HOST); // Check if the existing reduction memory is not large enough if (vcp->reduce_buffer_bytes < bytes) { FreeReductionBuffer(v); alloc_mem = SUNTRUE; } if (alloc_mem) { // Allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_PINNED, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // If pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_HOST, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } } // Allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_dev), bytes, SUNMEMTYPE_DEVICE, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } if (!alloc_fail) { // Store the size of the reduction memory buffer vcp->reduce_buffer_bytes = bytes; // Initialize the memory with the value copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev, value_mem, bytes, (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n"); } } // Deallocate the wrapper SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), value_mem, nullptr); return((alloc_fail || copy_fail) ? -1 : 0); } /* Free the reduction buffer */ void FreeReductionBuffer(N_Vector v) { N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (vcp == NULL) return; // Free device mem if (vcp->reduce_buffer_dev != NULL) SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev, nullptr); vcp->reduce_buffer_dev = NULL; // Free host mem if (vcp->reduce_buffer_host != NULL) SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_host, nullptr); vcp->reduce_buffer_host = NULL; // Reset allocated memory size vcp->reduce_buffer_bytes = 0; } /* Copy the reduction buffer from the device to the host. */ int CopyReductionBufferFromDevice(N_Vector v, size_t n) { int copy_fail; cudaError_t cuerr; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), NVEC_CUDA_PRIVATE(v)->reduce_buffer_host, NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev, n * sizeof(realtype), (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(v)); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } static int FusedBuffer_Init(N_Vector v, int nreal, int nptr) { int alloc_fail = 0; booleantype alloc_mem = SUNFALSE; // pad buffer with single precision data #if defined(SUNDIALS_SINGLE_PRECISION) size_t bytes = nreal * 2 * sizeof(realtype) + nptr * sizeof(realtype*); #elif defined(SUNDIALS_DOUBLE_PRECISION) size_t bytes = nreal * sizeof(realtype) + nptr * sizeof(realtype*); #else #error Incompatible precision for CUDA #endif // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check if the existing memory is not large enough if (vcp->fused_buffer_bytes < bytes) { FusedBuffer_Free(v); alloc_mem = SUNTRUE; } if (alloc_mem) { // Allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_PINNED, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // If pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_HOST, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); return -1; } } // Allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_dev), bytes, SUNMEMTYPE_DEVICE, nullptr); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); return -1; } // Store the size of the fused op buffer vcp->fused_buffer_bytes = bytes; } // Reset the buffer offset vcp->fused_buffer_offset = 0; return 0; } static int FusedBuffer_CopyRealArray(N_Vector v, realtype *rdata, int nval, realtype **shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyRealArray: Buffer offset is exceedes the buffer size\n"); return -1; } realtype* h_buffer = (realtype*) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nval; j++) { h_buffer[j] = rdata[j]; } // Set shortcut to the device buffer and update offset *shortcut = (realtype*) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); // accounting for buffer padding #if defined(SUNDIALS_SINGLE_PRECISION) vcp->fused_buffer_offset += nval * 2 * sizeof(realtype); #elif defined(SUNDIALS_DOUBLE_PRECISION) vcp->fused_buffer_offset += nval * sizeof(realtype); #else #error Incompatible precision for CUDA #endif return 0; } static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray1D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { h_buffer[j] = NVEC_CUDA_DDATAp(X[j]); } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nvec * sizeof(realtype*); return 0; } static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray2D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { for (int k = 0; k < nsum; k++) { h_buffer[j * nsum + k] = NVEC_CUDA_DDATAp(X[k][j]); } } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); // Update the offset vcp->fused_buffer_offset += nvec * nsum * sizeof(realtype*); return 0; } static int FusedBuffer_CopyToDevice(N_Vector v) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Copy the fused buffer to the device int copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_dev, vcp->fused_buffer_host, vcp->fused_buffer_offset, (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyToDevice: SUNMemoryHelper_CopyAsync failed\n"); return -1; } // Synchronize with respect to the host, but only in this stream SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(v))); return 0; } static int FusedBuffer_Free(N_Vector v) { N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (vcp == NULL) return 0; if (vcp->fused_buffer_host) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_host, nullptr); vcp->fused_buffer_host = NULL; } if (vcp->fused_buffer_dev) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_dev, nullptr); vcp->fused_buffer_dev = NULL; } vcp->fused_buffer_bytes = 0; vcp->fused_buffer_offset = 0; return 0; } /* Get the kernel launch parameters based on the kernel type (reduction or not), * using the appropriate kernel execution policy. */ static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, size_t n) { n = (n == 0) ? NVEC_CUDA_CONTENT(v)->length : n; if (reduction) { SUNCudaExecPolicy* reduce_exec_policy = NVEC_CUDA_CONTENT(v)->reduce_exec_policy; grid = reduce_exec_policy->gridSize(n); block = reduce_exec_policy->blockSize(); shMemSize = 0; stream = *(reduce_exec_policy->stream()); if (block % CUDA_WARP_SIZE) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be a multiple must be of CUDA warp size"); #endif return(-1); } } else { SUNCudaExecPolicy* stream_exec_policy = NVEC_CUDA_CONTENT(v)->stream_exec_policy; grid = stream_exec_policy->gridSize(n); block = stream_exec_policy->blockSize(); shMemSize = 0; stream = *(stream_exec_policy->stream()); } if (grid == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the grid size must be > 0"); #endif return(-1); } if (block == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be > 0"); #endif return(-1); } return(0); } /* Should be called after a kernel launch. * If SUNDIALS_DEBUG_CUDA_LASTERROR is not defined, then the function does nothing. * If it is defined, the function will synchronize and check the last CUDA error. */ void PostKernelLaunch() { #ifdef SUNDIALS_DEBUG_CUDA_LASTERROR cudaDeviceSynchronize(); SUNDIALS_CUDA_VERIFY(cudaGetLastError()); #endif } } // extern "C"
the_stack
#pragma once #include <iostream> #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace bc { /** * @brief Speciflying parameters for BC Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_problem(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(gunrock::app::UseParameters_problem(parameters)); return retval; } /** * @brief BetweennessCentrality Problem structure. * @tparam _GraphT Type of the graph * @tparam _ValueT Type of BC values, usually float or double * @tparam _FLAG Problem flags */ template <typename _GraphT, typename _ValueT = typename _GraphT::ValueT, ProblemFlag _FLAG = Problem_None> struct Problem : ProblemBase<_GraphT, _FLAG> { typedef _GraphT GraphT; static const ProblemFlag FLAG = _FLAG; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef _ValueT ValueT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::GpT GpT; typedef ProblemBase<GraphT, FLAG> BaseProblem; typedef DataSliceBase<GraphT, FLAG> BaseDataSlice; // Helper structures /** * @brief Data structure containing BC-specific data on indivual GPU. */ struct DataSlice : BaseDataSlice { // device storage arrays util::Array1D<SizeT, ValueT> bc_values; // Final BC values for each vertex util::Array1D<SizeT, ValueT> sigmas; // Accumulated sigma values for each vertex util::Array1D<SizeT, ValueT> deltas; // Accumulated delta values for each vertex VertexT src_node; // Source vertex ID util::Array1D<SizeT, VertexT> *forward_output; // Output vertex IDs by the forward pass std::vector<SizeT> *forward_queue_offsets; util::Array1D<SizeT, VertexT> original_vertex; util::Array1D<int, unsigned char> *barrier_markers; util::Array1D<SizeT, bool> first_backward_incoming; util::Array1D<SizeT, VertexT> local_vertices; util::Array1D<SizeT, bool> middle_event_set; util::Array1D<SizeT, cudaEvent_t> middle_events; VertexT middle_iteration; bool middle_finish; util::Array1D<SizeT, VertexT> preds; // predecessors of vertices util::Array1D<SizeT, VertexT> labels; // Source distance /* * @brief Default constructor */ DataSlice() : BaseDataSlice(), src_node(0), middle_iteration(0), middle_finish(false), forward_output(NULL), forward_queue_offsets(NULL), barrier_markers(NULL) { bc_values.SetName("bc_values"); sigmas.SetName("sigmas"); deltas.SetName("deltas"); original_vertex.SetName("original_vertex"); first_backward_incoming.SetName("first_backward_incoming"); local_vertices.SetName("local_vertices"); middle_event_set.SetName("middle_event_set"); middle_events.SetName("middle_events"); preds.SetName("preds"); labels.SetName("labels"); } /* * @brief Default destructor */ virtual ~DataSlice() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx)); GUARD_CU(labels.Release(target)); GUARD_CU(preds.Release(target)); GUARD_CU(bc_values.Release(target)); GUARD_CU(sigmas.Release(target)); GUARD_CU(deltas.Release(target)); GUARD_CU(original_vertex.Release(target)); GUARD_CU(first_backward_incoming.Release(target)); GUARD_CU(local_vertices.Release(target)); GUARD_CU(middle_events.Release(target)); GUARD_CU(middle_event_set.Release(target)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(forward_output[gpu].Release(target)); forward_queue_offsets[gpu].resize(0); } if (forward_output != NULL) { delete[] forward_output; forward_output = NULL; } if (forward_queue_offsets != NULL) { delete[] forward_queue_offsets; forward_queue_offsets = NULL; } barrier_markers = NULL; GUARD_CU(BaseDataSlice::Release(target)); return retval; } /** * @brief initializing bc-specific data on each gpu * @param sub_graph Sub graph on the GPU. * @param[in] gpu_idx GPU device index * @param[in] target Targeting device location * @param[in] flag Problem flag containling options * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0, util::Location target = util::DEVICE, ProblemFlag flag = Problem_None) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag)); GUARD_CU(labels.Allocate(sub_graph.nodes, target | util::HOST)); GUARD_CU(preds.Allocate(sub_graph.nodes, target)); GUARD_CU(bc_values.Allocate(sub_graph.nodes, target)); GUARD_CU(sigmas.Allocate(sub_graph.nodes, target | util::HOST)); GUARD_CU(deltas.Allocate(sub_graph.nodes, target)); GUARD_CU(bc_values.ForEach( [] __host__ __device__(ValueT & x) { x = (ValueT)0.0; }, sub_graph.nodes, target, this->stream)); forward_queue_offsets = new std::vector<SizeT>[num_gpus]; forward_output = new util::Array1D<SizeT, VertexT>[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { forward_queue_offsets[gpu].reserve(sub_graph.nodes); forward_queue_offsets[gpu].push_back(0); forward_output[gpu].SetName("forward_output[]"); GUARD_CU(forward_output[gpu].Allocate(sub_graph.nodes, target)); } if (target & util::DEVICE) { GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this->stream)); } return retval; } // Init /** * @brief Reset problem function. Must be called prior to each run. * @param[in] target Targeting device location * \return cudaError_t Error message(s), if any */ cudaError_t Reset(util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->sub_graph->nodes; // Ensure data are allocated GUARD_CU(labels.EnsureSize_(nodes, target)); GUARD_CU(preds.EnsureSize_(nodes, target)); GUARD_CU(bc_values.EnsureSize_(nodes, target)); GUARD_CU(sigmas.EnsureSize_(nodes, target)); GUARD_CU(deltas.EnsureSize_(nodes, target)); // Reset data GUARD_CU(labels.ForEach( [] __host__ __device__(VertexT & x) { x = util::PreDefinedValues<VertexT>::InvalidValue; //(VertexT)-1; }, nodes, target, this->stream)); GUARD_CU(preds.ForEach( [] __host__ __device__(VertexT & x) { x = util::PreDefinedValues<VertexT>::InvalidValue; //(VertexT)-2; }, nodes, target, this->stream)); // ?? Do I actually want to be resetting this? GUARD_CU(bc_values.ForEach( [] __host__ __device__(ValueT & x) { x = (ValueT)0.0; }, nodes, target, this->stream)); GUARD_CU(deltas.ForEach( [] __host__ __device__(ValueT & x) { x = (ValueT)0.0; }, nodes, target, this->stream)); GUARD_CU(sigmas.ForEach( [] __host__ __device__(ValueT & x) { x = (ValueT)0.0; }, nodes, target, this->stream)); // ?? Reset `src_node YC: in problem::Reset()` for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(forward_output[gpu].EnsureSize_(nodes, util::DEVICE)); forward_queue_offsets[gpu].clear(); forward_queue_offsets[gpu].reserve(nodes); forward_queue_offsets[gpu].push_back(0); if (this->num_gpus > 1) middle_event_set[gpu] = false; } middle_iteration = util::PreDefinedValues<VertexT>::InvalidValue; middle_finish = false; return retval; } }; // DataSlice // Members // Set of data slices (one for each GPU) util::Array1D<SizeT, DataSlice> *data_slices; // Methods /** * @brief BCProblem default constructor */ Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None) : BaseProblem(_parameters, _flag), data_slices(NULL) {} /** * @brief BCProblem default destructor */ virtual ~Problem() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (data_slices == NULL) return retval; for (int gpu = 0; gpu < this->num_gpus; gpu++) GUARD_CU(data_slices[gpu].Release(target)); if ((target & util::HOST) != 0 && data_slices[0].GetPointer(util::DEVICE) == NULL) { delete[] data_slices; data_slices = NULL; } GUARD_CU(BaseProblem::Release(target)); return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Copy result distancess computed on GPUs back to host-side arrays. * @param[out] h_distances Host array to store computed vertex distances from * the source. \return cudaError_t Error message(s), if any */ cudaError_t Extract(ValueT *h_bc_values, ValueT *h_sigmas, VertexT *h_labels, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->org_graph->nodes; if (this->num_gpus == 1) { auto &data_slice = data_slices[0][0]; // Set device if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[0])); GUARD_CU( data_slice.bc_values.SetPointer(h_bc_values, nodes, util::HOST)); GUARD_CU(data_slice.bc_values.Move(util::DEVICE, util::HOST)); GUARD_CU(data_slice.sigmas.SetPointer(h_sigmas, nodes, util::HOST)); GUARD_CU(data_slice.sigmas.Move(util::DEVICE, util::HOST)); GUARD_CU(data_slice.labels.SetPointer(h_labels, nodes, util::HOST)); GUARD_CU(data_slice.labels.Move(util::DEVICE, util::HOST)); } else if (target == util::HOST) { GUARD_CU(data_slice.bc_values.ForEach( h_bc_values, [] __host__ __device__(const ValueT &x, ValueT &h_x) { h_x = x; }, nodes, util::HOST)); GUARD_CU(data_slice.sigmas.ForEach( h_sigmas, [] __host__ __device__(const ValueT &x, ValueT &h_x) { h_x = x; }, nodes, util::HOST)); GUARD_CU(data_slice.labels.ForEach( h_labels, [] __host__ __device__(const VertexT &x, VertexT &h_x) { h_x = x; }, nodes, util::HOST)); } } else { // TODO: extract the results from multiple GPUs, e.g.: } // Scale final results by 0.5 // YC: ? for (VertexT v = 0; v < nodes; ++v) { h_bc_values[v] *= (ValueT)0.5; } // Logging // for(VertexT v = 0; v < nodes; ++v) { // std::cout // << "v=" << v // << " | h_bc_values[v]=" << h_bc_values[v] // << std::endl; //} // for(VertexT v = 0; v < nodes; ++v) { // std::cout // << "v=" << v // << " | h_sigmas[v]=" << h_sigmas[v] // << std::endl; //} // for(VertexT v = 0; v < nodes; ++v) { // std::cout // << "v=" << v // << " | h_labels[v]=" << h_labels[v] // << std::endl; //} return retval; } /** * @brief initialization function. * @param graph The graph that BC processes on * @param[in] Location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseProblem::Init(graph, target)); data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]"); if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST)); auto &data_slice = data_slices[gpu][0]; GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus, this->gpu_idx[gpu], target, this->flag)); } // end for (gpu) for (int gpu = 0; gpu < this->num_gpus; gpu++) { if (target & util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaStreamSynchronize(data_slices[gpu]->stream), "cudaStreamSynchronize failed"); } } return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex to start. * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, util::Location target = util::DEVICE) { std::cout << "Problem->Reset(" << src << ")" << std::endl; cudaError_t retval = cudaSuccess; for (int gpu = 0; gpu < this->num_gpus; ++gpu) { // Set device if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu]->Reset(target)); GUARD_CU(data_slices[gpu].Move(util::HOST, target)); } int gpu; VertexT tsrc; if (this->num_gpus <= 1) { gpu = 0; tsrc = src; } else { // TODO } GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); VertexT src_label = 0; VertexT src_pred = util::PreDefinedValues<VertexT>::InvalidValue; ValueT src_sigma = 1.0; data_slices[gpu]->src_node = tsrc; if (target & util::HOST) { data_slices[gpu]->labels[tsrc] = src_label; data_slices[gpu]->preds[tsrc] = src_pred; data_slices[gpu]->sigmas[tsrc] = src_sigma; } if (target & util::DEVICE) { GUARD_CU2( cudaMemcpy(data_slices[gpu]->labels.GetPointer(util::DEVICE) + tsrc, &src_label, sizeof(VertexT), cudaMemcpyHostToDevice), "BCProblem cudaMemcpy labels failed"); GUARD_CU2( cudaMemcpy(data_slices[gpu]->preds.GetPointer(util::DEVICE) + tsrc, &src_pred, sizeof(VertexT), cudaMemcpyHostToDevice), "BCProblem cudaMemcpy preds failed"); GUARD_CU2( cudaMemcpy(data_slices[gpu]->sigmas.GetPointer(util::DEVICE) + tsrc, &src_sigma, sizeof(ValueT), cudaMemcpyHostToDevice), "BCProblem cudaMemcpy sigmas failed"); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); return retval; } /** @} */ }; } // namespace bc } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
typedef unsigned int uint_t; /* native unsigned integer */ #define SKEIN_MODIFIER_WORDS ( 2) /* number of modifier (tweak) words */ #define SKEIN_256_STATE_WORDS ( 4) #define SKEIN_512_STATE_WORDS ( 8) #define SKEIN1024_STATE_WORDS (16) #define SKEIN_256_STATE_BYTES ( 8*SKEIN_256_STATE_WORDS) #define SKEIN_512_STATE_BYTES ( 8*SKEIN_512_STATE_WORDS) #define SKEIN1024_STATE_BYTES ( 8*SKEIN1024_STATE_WORDS) #define SKEIN_256_STATE_BITS (64*SKEIN_256_STATE_WORDS) #define SKEIN_512_STATE_BITS (64*SKEIN_512_STATE_WORDS) #define SKEIN1024_STATE_BITS (64*SKEIN1024_STATE_WORDS) #define SKEIN_256_BLOCK_BYTES ( 8*SKEIN_256_STATE_WORDS) #define SKEIN_512_BLOCK_BYTES ( 8*SKEIN_512_STATE_WORDS) #define SKEIN1024_BLOCK_BYTES ( 8*SKEIN1024_STATE_WORDS) #define SKEIN_MK_64(hi32,lo32) ((lo32) + (((uint64_t) (hi32)) << 32)) #define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA,0xA9FC1A22) #define SKEIN_T1_BIT(BIT) ((BIT) - 64) /* offset 64 because it's the second word */ #define SKEIN_T1_POS_FIRST SKEIN_T1_BIT(126) /* bits 126 : first block flag */ #define SKEIN_T1_POS_BIT_PAD SKEIN_T1_BIT(119) /* bit 119 : partial final input byte */ #define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* bit 127 : final block flag */ #define SKEIN_T1_POS_BLK_TYPE SKEIN_T1_BIT(120) /* bits 120..125: type field */ #define SKEIN_T1_FLAG_FIRST (((uint64_t) 1 ) << SKEIN_T1_POS_FIRST) #define SKEIN_T1_FLAG_BIT_PAD (((uint64_t) 1 ) << SKEIN_T1_POS_BIT_PAD) #define SKEIN_T1_FLAG_FINAL (((uint64_t) 1 ) << SKEIN_T1_POS_FINAL) #define SKEIN_BLK_TYPE_MSG (48) /* message processing */ #define SKEIN_BLK_TYPE_OUT (63) /* output stage */ #define SKEIN_T1_BLK_TYPE(T) (((uint64_t) (SKEIN_BLK_TYPE_##T)) << SKEIN_T1_POS_BLK_TYPE) #define SKEIN_T1_BLK_TYPE_MSG SKEIN_T1_BLK_TYPE(MSG) /* message processing */ #define SKEIN_T1_BLK_TYPE_OUT SKEIN_T1_BLK_TYPE(OUT) /* output stage */ #define SKEIN_T1_BLK_TYPE_OUT_FINAL (SKEIN_T1_BLK_TYPE_OUT | SKEIN_T1_FLAG_FINAL) #define Skein_Set_Tweak(ctxPtr,TWK_NUM,tVal) {(ctxPtr)->h.T[TWK_NUM] = (tVal);} #define Skein_Set_T0(ctxPtr,T0) Skein_Set_Tweak(ctxPtr,0,T0) #define Skein_Set_T1(ctxPtr,T1) Skein_Set_Tweak(ctxPtr,1,T1) #define Skein_Set_T0_T1(ctxPtr,T0,T1) { \ Skein_Set_T0(ctxPtr,(T0)); \ Skein_Set_T1(ctxPtr,(T1)); } #define Skein_Start_New_Type(ctxPtr,BLK_TYPE) \ { Skein_Set_T0_T1(ctxPtr,0,SKEIN_T1_FLAG_FIRST | SKEIN_T1_BLK_TYPE_##BLK_TYPE); (ctxPtr)->h.bCnt=0; } #define Skein_Set_Bit_Pad_Flag(hdr) { (hdr).T[1] |= SKEIN_T1_FLAG_BIT_PAD; } #define KW_TWK_BASE (0) #define KW_KEY_BASE (3) #define ks (kw + KW_KEY_BASE) #define ts (kw + KW_TWK_BASE) #define R512(p0,p1,p2,p3,p4,p5,p6,p7,R512ROT,rNum) \ X##p0 += X##p1; X##p1 = ROTL64(X##p1,R512ROT##_0); X##p1 ^= X##p0; \ X##p2 += X##p3; X##p3 = ROTL64(X##p3,R512ROT##_1); X##p3 ^= X##p2; \ X##p4 += X##p5; X##p5 = ROTL64(X##p5,R512ROT##_2); X##p5 ^= X##p4; \ X##p6 += X##p7; X##p7 = ROTL64(X##p7,R512ROT##_3); X##p7 ^= X##p6; #define I512(R) \ X0 += ks[((R)+1) % 9]; \ X1 += ks[((R)+2) % 9]; \ X2 += ks[((R)+3) % 9]; \ X3 += ks[((R)+4) % 9]; \ X4 += ks[((R)+5) % 9]; \ X5 += ks[((R)+6) % 9] + ts[((R)+1) % 3]; \ X6 += ks[((R)+7) % 9] + ts[((R)+2) % 3]; \ X7 += ks[((R)+8) % 9] + (R)+1; #define R512_8_rounds(R) \ R512(0,1,2,3,4,5,6,7,R_512_0,8*(R)+ 1); \ R512(2,1,4,7,6,5,0,3,R_512_1,8*(R)+ 2); \ R512(4,1,6,3,0,5,2,7,R_512_2,8*(R)+ 3); \ R512(6,1,0,7,2,5,4,3,R_512_3,8*(R)+ 4); \ I512(2*(R)); \ R512(0,1,2,3,4,5,6,7,R_512_4,8*(R)+ 5); \ R512(2,1,4,7,6,5,0,3,R_512_5,8*(R)+ 6); \ R512(4,1,6,3,0,5,2,7,R_512_6,8*(R)+ 7); \ R512(6,1,0,7,2,5,4,3,R_512_7,8*(R)+ 8); \ I512(2*(R)+1); typedef struct { size_t hashBitLen; size_t bCnt; uint64_t T[SKEIN_MODIFIER_WORDS]; } Skein_Ctxt_Hdr_t; typedef struct { Skein_Ctxt_Hdr_t h; uint64_t X[SKEIN_256_STATE_WORDS]; uint8_t b[SKEIN_256_BLOCK_BYTES]; } Skein_256_Ctxt_t; typedef struct { Skein_Ctxt_Hdr_t h; uint64_t X[SKEIN_512_STATE_WORDS]; uint8_t b[SKEIN_512_BLOCK_BYTES]; } Skein_512_Ctxt_t; typedef struct { Skein_Ctxt_Hdr_t h; uint64_t X[SKEIN1024_STATE_WORDS]; uint8_t b[SKEIN1024_BLOCK_BYTES]; } Skein1024_Ctxt_t; typedef struct { uint_t statebits; union { Skein_Ctxt_Hdr_t h; Skein_256_Ctxt_t ctx_256; Skein_512_Ctxt_t ctx_512; Skein1024_Ctxt_t ctx1024; } u; } skeinHashState; __device__ void cn_skein_init(skeinHashState *state, size_t hashBitLen) { const uint64_t SKEIN_512_IV_256[] = { SKEIN_MK_64(0xCCD044A1,0x2FDB3E13), SKEIN_MK_64(0xE8359030,0x1A79A9EB), SKEIN_MK_64(0x55AEA061,0x4F816E6F), SKEIN_MK_64(0x2A2767A4,0xAE9B94DB), SKEIN_MK_64(0xEC06025E,0x74DD7683), SKEIN_MK_64(0xE7A436CD,0xC4746251), SKEIN_MK_64(0xC36FBAF9,0x393AD185), SKEIN_MK_64(0x3EEDBA18,0x33EDFC13) }; Skein_512_Ctxt_t *ctx = &state->u.ctx_512; ctx->h.hashBitLen = hashBitLen; memcpy(ctx->X, SKEIN_512_IV_256, sizeof(ctx->X)); Skein_Start_New_Type(ctx, MSG); } __device__ void cn_skein512_processblock(Skein_512_Ctxt_t * __restrict__ ctx, const uint8_t * __restrict__ blkPtr, size_t blkCnt, size_t byteCntAdd) { enum { R_512_0_0=46, R_512_0_1=36, R_512_0_2=19, R_512_0_3=37, R_512_1_0=33, R_512_1_1=27, R_512_1_2=14, R_512_1_3=42, R_512_2_0=17, R_512_2_1=49, R_512_2_2=36, R_512_2_3=39, R_512_3_0=44, R_512_3_1= 9, R_512_3_2=54, R_512_3_3=56, R_512_4_0=39, R_512_4_1=30, R_512_4_2=34, R_512_4_3=24, R_512_5_0=13, R_512_5_1=50, R_512_5_2=10, R_512_5_3=17, R_512_6_0=25, R_512_6_1=29, R_512_6_2=39, R_512_6_3=43, R_512_7_0= 8, R_512_7_1=35, R_512_7_2=56, R_512_7_3=22 }; uint64_t X0,X1,X2,X3,X4,X5,X6,X7; uint64_t w[SKEIN_512_STATE_WORDS]; uint64_t kw[SKEIN_512_STATE_WORDS+4]; ts[0] = ctx->h.T[0]; ts[1] = ctx->h.T[1]; do { ts[0] += byteCntAdd; ks[0] = ctx->X[0]; ks[1] = ctx->X[1]; ks[2] = ctx->X[2]; ks[3] = ctx->X[3]; ks[4] = ctx->X[4]; ks[5] = ctx->X[5]; ks[6] = ctx->X[6]; ks[7] = ctx->X[7]; ks[8] = ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ ks[4] ^ ks[5] ^ ks[6] ^ ks[7] ^ SKEIN_KS_PARITY; ts[2] = ts[0] ^ ts[1]; memcpy(w, blkPtr, SKEIN_512_STATE_WORDS << 3); X0 = w[0] + ks[0]; X1 = w[1] + ks[1]; X2 = w[2] + ks[2]; X3 = w[3] + ks[3]; X4 = w[4] + ks[4]; X5 = w[5] + ks[5] + ts[0]; X6 = w[6] + ks[6] + ts[1]; X7 = w[7] + ks[7]; blkPtr += SKEIN_512_BLOCK_BYTES; R512_8_rounds( 0); R512_8_rounds( 1); R512_8_rounds( 2); R512_8_rounds( 3); R512_8_rounds( 4); R512_8_rounds( 5); R512_8_rounds( 6); R512_8_rounds( 7); R512_8_rounds( 8); ctx->X[0] = X0 ^ w[0]; ctx->X[1] = X1 ^ w[1]; ctx->X[2] = X2 ^ w[2]; ctx->X[3] = X3 ^ w[3]; ctx->X[4] = X4 ^ w[4]; ctx->X[5] = X5 ^ w[5]; ctx->X[6] = X6 ^ w[6]; ctx->X[7] = X7 ^ w[7]; ts[1] &= ~SKEIN_T1_FLAG_FIRST; } while (--blkCnt); ctx->h.T[0] = ts[0]; ctx->h.T[1] = ts[1]; } __device__ void cn_skein_final(skeinHashState * __restrict__ state, uint8_t * __restrict__ hashVal) { size_t i,n,byteCnt; uint64_t X[SKEIN_512_STATE_WORDS]; Skein_512_Ctxt_t *ctx = (Skein_512_Ctxt_t *)&state->u.ctx_512; //size_t tmp; //uint8_t *p8; //uint64_t *p64; ctx->h.T[1] |= SKEIN_T1_FLAG_FINAL; if (ctx->h.bCnt < SKEIN_512_BLOCK_BYTES) { memset(&ctx->b[ctx->h.bCnt],0,SKEIN_512_BLOCK_BYTES - ctx->h.bCnt); //p8 = &ctx->b[ctx->h.bCnt]; //tmp = SKEIN_512_BLOCK_BYTES - ctx->h.bCnt; //for( i = 0; i < tmp; i++ ) *(p8+i) = 0; } cn_skein512_processblock(ctx,ctx->b,1,ctx->h.bCnt); byteCnt = (ctx->h.hashBitLen + 7) >> 3; //uint8_t b[SKEIN_512_BLOCK_BYTES] == 64 memset(ctx->b,0,sizeof(ctx->b)); //p64 = (uint64_t *)ctx->b; //for( i = 0; i < 8; i++ ) *(p64+i) = 0; memcpy(X,ctx->X,sizeof(X)); for (i=0;i*SKEIN_512_BLOCK_BYTES < byteCnt;i++) { ((uint64_t *)ctx->b)[0]= (uint64_t)i; Skein_Start_New_Type(ctx,OUT_FINAL); cn_skein512_processblock(ctx,ctx->b,1,sizeof(uint64_t)); n = byteCnt - i*SKEIN_512_BLOCK_BYTES; if (n >= SKEIN_512_BLOCK_BYTES) n = SKEIN_512_BLOCK_BYTES; memcpy(hashVal+i*SKEIN_512_BLOCK_BYTES,ctx->X,n); memcpy(ctx->X,X,sizeof(X)); /* restore the counter mode key for next time */ } } __device__ void cn_skein512_update(Skein_512_Ctxt_t * __restrict__ ctx, const uint8_t * __restrict__ msg, size_t msgByteCnt) { size_t n; if (msgByteCnt + ctx->h.bCnt > SKEIN_512_BLOCK_BYTES) { if (ctx->h.bCnt) { n = SKEIN_512_BLOCK_BYTES - ctx->h.bCnt; if (n) { memcpy(&ctx->b[ctx->h.bCnt],msg,n); msgByteCnt -= n; msg += n; ctx->h.bCnt += n; } cn_skein512_processblock(ctx,ctx->b,1,SKEIN_512_BLOCK_BYTES); ctx->h.bCnt = 0; } if (msgByteCnt > SKEIN_512_BLOCK_BYTES) { n = (msgByteCnt-1) / SKEIN_512_BLOCK_BYTES; cn_skein512_processblock(ctx,msg,n,SKEIN_512_BLOCK_BYTES); msgByteCnt -= n * SKEIN_512_BLOCK_BYTES; msg += n * SKEIN_512_BLOCK_BYTES; } } if (msgByteCnt) { memcpy(&ctx->b[ctx->h.bCnt],msg,msgByteCnt); ctx->h.bCnt += msgByteCnt; } } __device__ void cn_skein_update(skeinHashState * __restrict__ state, const BitSequence * __restrict__ data, DataLength databitlen) { if ((databitlen & 7) == 0) { cn_skein512_update(&state->u.ctx_512,data,databitlen >> 3); } else { size_t bCnt = (databitlen >> 3) + 1; uint8_t b,mask; mask = (uint8_t) (1u << (7 - (databitlen & 7))); b = (uint8_t) ((data[bCnt-1] & (0-mask)) | mask); cn_skein512_update(&state->u.ctx_512,data,bCnt-1); cn_skein512_update(&state->u.ctx_512,&b , 1 ); Skein_Set_Bit_Pad_Flag(state->u.h); } } __device__ void cn_skein(const BitSequence * __restrict__ data, DataLength len, BitSequence * __restrict__ hashval) { int hashbitlen = 256; DataLength databitlen = len << 3; skeinHashState state; state.statebits = 64*SKEIN_512_STATE_WORDS; cn_skein_init(&state, hashbitlen); cn_skein_update(&state, data, databitlen); cn_skein_final(&state, hashval); }
the_stack
#include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include "ssids/gpu/kernels/datatypes.h" #include "cuda/cuda_check.h" #define min(x,y) ((x) < (y) ? (x) : (y)) #define BLOCK_SIZE 8 #define MAX_CUDA_BLOCKS 65535 //#define SM_3X (__CUDA_ARCH__ == 300 || __CUDA_ARCH__ == 350 || __CUDA_ARCH__ == 370) //FIXME: Verify if the code for Keplers (sm_3x) is still correct for the later GPUs. #define SM_3X (__CUDA_ARCH__ >= 300) using namespace spral::ssids::gpu; namespace /* anon */ { template< typename ELEMENT_TYPE > __global__ void cu_copy_mc( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* mask ) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; if ( i < nrows && j < ncols && mask[j] > 0 ) b[i + ldb*j] = a[i + lda*j]; } template< typename ELEMENT_TYPE > __global__ void cu_copy_ic( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* ind ) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; if ( i < nrows && j < ncols && ind[j] > 0 ) b[i + ldb*(ind[j] - 1)] = a[i + lda*j]; } template< typename ELEMENT_TYPE > __global__ void cu_swap_ni2D_ic( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* index ) // swaps columns of non-intersecting 2D arrays a(1:n,index(1:m)) and b(1:n,1:m) // index is one-based { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int k; double s; if ( i < nrows && j < ncols && (k = index[j] - 1) > -1 ) { s = a[i + lda*k]; a[i + lda*k] = b[i + ldb*j]; b[i + ldb*j] = s; } } template< typename ELEMENT_TYPE > __global__ void cu_swap_ni2D_ir( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* index ) // swaps rows of non-intersecting 2D arrays a(index(1:n),1:m) and b(1:n,1:m) // index is one-based { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int k; double s; if ( i < nrows && j < ncols && (k = index[i] - 1) > -1 ) { s = a[k + lda*j]; a[k + lda*j] = b[i + ldb*j]; b[i + ldb*j] = s; } } struct multiswap_type { int nrows; int ncols; int k; double *lcol; int lda; int off; }; template< typename ELEMENT_TYPE > __global__ void cu_multiswap_ni2D_c( struct multiswap_type *swapdata ) // swaps non-intersecting rows or cols of a 2D multiarray a { swapdata += blockIdx.x; int nrows = swapdata->nrows; if ( blockIdx.y*blockDim.x >= nrows ) return; int k = swapdata->k; ELEMENT_TYPE *a = swapdata->lcol; int lda = swapdata->lda; int off = lda*swapdata->off; ELEMENT_TYPE s; for ( int i = threadIdx.x + blockIdx.y*blockDim.x; i < nrows; i += blockDim.x*gridDim.y ) for ( int j = threadIdx.y; j < k; j += blockDim.y ) { s = a[i + lda*j]; a[i + lda*j] = a[off + i + lda*j]; a[off + i + lda*j] = s; } } template< typename ELEMENT_TYPE > __global__ void cu_multiswap_ni2D_r( struct multiswap_type *swapdata ) // swaps non-intersecting rows or cols of a 2D multiarray a { swapdata += blockIdx.x; int ncols = swapdata->ncols; if ( blockIdx.y*blockDim.y >= ncols ) return; int k = swapdata->k; ELEMENT_TYPE *a = swapdata->lcol; int lda = swapdata->lda; int off = swapdata->off; ELEMENT_TYPE s; for ( int i = threadIdx.x; i < k; i += blockDim.x ) for ( int j = threadIdx.y + blockIdx.y*blockDim.y; j < ncols; j += blockDim.y*gridDim.y ) { s = a[i + lda*j]; a[i + lda*j] = a[off + i + lda*j]; a[off + i + lda*j] = s; } } template< typename ELEMENT_TYPE > __global__ void cu_reorder_rows( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* index ) { int x; int y = threadIdx.y + blockIdx.y*blockDim.y; for ( x = threadIdx.x; x < nrows; x += blockDim.x ) if ( y < ncols ) b[index[x] - 1 + ldb*y] = a[x + lda*y]; __syncthreads(); for ( x = threadIdx.x; x < nrows; x += blockDim.x ) if ( y < ncols ) a[x + lda*y] = b[x + ldb*y]; } template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > __global__ void cu_reorder_cols2( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* index, int mode ) { int ix = threadIdx.x + blockIdx.x*blockDim.x; __shared__ volatile ELEMENT_TYPE work[SIZE_X*SIZE_Y]; if ( blockIdx.y ) { if ( mode > 0 ) { if ( ix < nrows && threadIdx.y < ncols ) work[threadIdx.x + (index[threadIdx.y] - 1)*SIZE_X] = a[ix + lda*threadIdx.y]; } else { if ( ix < nrows && threadIdx.y < ncols ) work[threadIdx.x + threadIdx.y*SIZE_X] = a[ix + lda*(index[threadIdx.y] - 1)]; } __syncthreads(); if ( ix < nrows && threadIdx.y < ncols ) a[ix + lda*threadIdx.y] = work[threadIdx.x + threadIdx.y*SIZE_X]; } else { if ( mode > 0 ) { if ( ix < nrows && threadIdx.y < ncols ) work[threadIdx.x + (index[threadIdx.y] - 1)*SIZE_X] = b[ix + ldb*threadIdx.y]; } else { if ( ix < nrows && threadIdx.y < ncols ) work[threadIdx.x + threadIdx.y*SIZE_X] = b[ix + ldb*(index[threadIdx.y] - 1)]; } __syncthreads(); if ( ix < nrows && threadIdx.y < ncols ) b[ix + ldb*threadIdx.y] = work[threadIdx.x + threadIdx.y*SIZE_X]; } } template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > __global__ void cu_reorder_rows2( int nrows, int ncols, ELEMENT_TYPE* a, int lda, ELEMENT_TYPE* b, int ldb, int* index, int mode ) { int iy = threadIdx.y + blockIdx.x*blockDim.y; __shared__ volatile ELEMENT_TYPE work[SIZE_X*SIZE_Y]; if ( blockIdx.y ) { if ( mode > 0 ) { if ( threadIdx.x < nrows && iy < ncols ) work[index[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = a[threadIdx.x + lda*iy]; } else { if ( threadIdx.x < nrows && iy < ncols ) work[threadIdx.x + threadIdx.y*SIZE_X] = a[index[threadIdx.x] - 1 + lda*iy]; } __syncthreads(); if ( threadIdx.x < nrows && iy < ncols ) a[threadIdx.x + lda*iy] = work[threadIdx.x + threadIdx.y*SIZE_X]; } else { if ( mode > 0 ) { if ( threadIdx.x < nrows && iy < ncols ) work[index[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = b[threadIdx.x + ldb*iy]; } else { if ( threadIdx.x < nrows && iy < ncols ) work[threadIdx.x + threadIdx.y*SIZE_X] = b[index[threadIdx.x] - 1 + ldb*iy]; } __syncthreads(); if ( threadIdx.x < nrows && iy < ncols ) b[threadIdx.x + ldb*iy] = work[threadIdx.x + threadIdx.y*SIZE_X]; } } /* * Copies new L factors back to A array without any permutation */ template< typename ELEMENT_TYPE, int NTX > __device__ void __forceinline__ // Required to avoid errors about reg counts compiling with -G copy_L_LD_no_perm( int nblk, int bidx, int tid, int nrows, int ncols, ELEMENT_TYPE *dest, int ldd, const ELEMENT_TYPE *src, int lds ) { int tx = tid % NTX; int ty = tid / NTX; src += NTX*bidx; dest += NTX*bidx; nrows -= NTX*bidx; if ( ty < ncols ) { for ( int x = tx; x < nrows; x += NTX*nblk ) dest[x + ldd*ty] = src[x + lds*ty]; } } /* Shuffles the permutation vector using shared memory [in case it overlaps itself] */ template < int SIZE_X > __device__ void shuffle_perm_shmem( int n, volatile const int *const indr, int *perm ) { // Update permutation __shared__ volatile int iwork[SIZE_X]; if ( threadIdx.x < n && threadIdx.y == 0 ) iwork[indr[threadIdx.x] - 1] = perm[threadIdx.x]; __syncthreads(); if ( threadIdx.x < n && threadIdx.y == 0 ) perm[threadIdx.x] = iwork[threadIdx.x]; } /* * Copies new L factors back to A array and applies permutation to rows and cols * This version uses shared memory and is designed for the case when the new * and old location of columns and rows overlap. */ template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > __device__ void __forceinline__ // Required to avoid errors about reg counts compiling with -G copy_L_LD_perm_shmem( int block, int nblocks, int done, int pivoted, int delayed, int nrows, int ncols, int ib, int jb, int offc, int offp, int ld, volatile int *const indr, double *a, double *b, const double *c, int *perm ) { __shared__ volatile ELEMENT_TYPE work1[SIZE_X*SIZE_Y]; __shared__ volatile ELEMENT_TYPE work2[SIZE_X*SIZE_Y]; #if (!SM_3X) __shared__ volatile ELEMENT_TYPE work3[SIZE_X*SIZE_Y]; __shared__ volatile ELEMENT_TYPE work4[SIZE_X*SIZE_Y]; #endif // Extend permutation array to cover non-pivoted columns if ( threadIdx.x == 0 && threadIdx.y == 0 ) { int i = 0; int j = pivoted; for ( ; i < delayed; i++ ) indr[i] = ++j; for ( ; i < delayed + jb - ib + 1; i++ ) if ( !indr[i] ) indr[i] = ++j; } int off = done*ld; // We handle the (done-jb) x (done-jb) block that requires both // row and column permutations seperately using the first block. // All remaining rows and columns are handlded by the remaining blocks. // Note that while we do not need to perumute "above" the pivoted columns, // we do need to permute to the "left" of the pivoted rows! if ( block ) { // Swap columns of A and copy in L, but avoiding rows that need // permuted // Also, swap cols of LD but avoiding rows that need permuted int baseStep = blockDim.x*(nblocks - 1); #if (SM_3X) for ( int i = jb + blockDim.x*(block - 1); i < nrows; i += baseStep ) { #else for ( int i = jb + blockDim.x*(block - 1); i < nrows + baseStep; i += baseStep * 2 ) { #endif int ix = i + threadIdx.x; #if (!SM_3X) int ix2 = ix + baseStep; #endif __syncthreads(); if (threadIdx.y < jb - done) { #if (!SM_3X) if ( ix2 < nrows ) { if ( indr[threadIdx.y] > pivoted ) { work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = a[off + ix + ld*threadIdx.y]; work3[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = a[off + ix2 + ld*threadIdx.y]; } else { work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = c[offc + ix + ld*(threadIdx.y - delayed)]; work3[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = c[offc + ix2 + ld*(threadIdx.y - delayed)]; } work2[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = b[off + ix + ld*threadIdx.y]; work4[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = b[off + ix2 + ld*threadIdx.y]; } else #endif if ( ix < nrows ) { if ( indr[threadIdx.y] > pivoted ) work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = a[off + ix + ld*threadIdx.y]; else work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = c[offc + ix + ld*(threadIdx.y - delayed)]; work2[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = b[off + ix + ld*threadIdx.y]; } } __syncthreads(); if (threadIdx.y < jb - done) { #if (!SM_3X) if ( ix2 < nrows) { a[off + ix + ld*threadIdx.y] = work1[threadIdx.x + threadIdx.y*SIZE_X]; a[off + ix2 + ld*threadIdx.y] = work3[threadIdx.x + threadIdx.y*SIZE_X]; b[off + ix + ld*threadIdx.y] = work2[threadIdx.x + threadIdx.y*SIZE_X]; b[off + ix2 + ld*threadIdx.y] = work4[threadIdx.x + threadIdx.y*SIZE_X]; } else #endif if ( ix < nrows) { a[off + ix + ld*threadIdx.y] = work1[threadIdx.x + threadIdx.y*SIZE_X]; b[off + ix + ld*threadIdx.y] = work2[threadIdx.x + threadIdx.y*SIZE_X]; } } } if ( (block - 1)*blockDim.y >= ncols ) return; // Block not needed for y direction (Note that n <= m always) off -= done*ld; off += done; // Swap rows of A baseStep = blockDim.y*(nblocks - 1); #if (SM_3X) for ( int i = blockDim.y*(block - 1); i < ncols; i += baseStep ) { #else for ( int i = blockDim.y*(block - 1); i < ncols + baseStep; i += baseStep * 2 ) { #endif int iy = i + threadIdx.y; #if (!SM_3X) int iy2 = iy + baseStep; #endif __syncthreads(); if ( !(iy >= done && iy < jb) && iy < ncols && threadIdx.x < jb - done ) { work1[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = a[off + threadIdx.x + ld*iy]; work2[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = b[off + threadIdx.x + ld*iy]; } #if (!SM_3X) if ( !(iy2 >= done && iy2 < jb) && iy2 < ncols && threadIdx.x < jb - done ) { work3[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = a[off + threadIdx.x + ld*iy2]; work4[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = b[off + threadIdx.x + ld*iy2]; } #endif __syncthreads(); if ( !(iy >= done && iy < jb) && iy < ncols && threadIdx.x < jb - done ) { a[off + threadIdx.x + ld*iy] = work1[threadIdx.x + threadIdx.y*SIZE_X]; b[off + threadIdx.x + ld*iy] = work2[threadIdx.x + threadIdx.y*SIZE_X]; } #if (!SM_3X) if ( !(iy2 >= done && iy2 < jb) && iy2 < ncols && threadIdx.x < jb - done ) { a[off + threadIdx.x + ld*iy2] = work3[threadIdx.x + threadIdx.y*SIZE_X]; b[off + threadIdx.x + ld*iy2] = work4[threadIdx.x + threadIdx.y*SIZE_X]; } #endif } } else { // Handle (jb-done) x (jb-done) block that needs both // row /and/ column permutations. shuffle_perm_shmem< SIZE_X > ( delayed + jb - ib + 1, indr, &perm[offp + done] ); int pass = threadIdx.x < jb - done && threadIdx.y < jb - done; // Handle L and LD if ( pass ) { // Column permtuations + copy from c[] if ( indr[threadIdx.y] > pivoted ) work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = a[off + done + threadIdx.x + ld*threadIdx.y]; else work1[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = c[offc + done + threadIdx.x + ld*(threadIdx.y - delayed)]; work2[threadIdx.x + (indr[threadIdx.y] - 1)*SIZE_X] = b[off + done + threadIdx.x + ld*threadIdx.y]; } __syncthreads(); // Row permutations if ( pass ) { a[off + done + threadIdx.x + ld*threadIdx.y] = work1[threadIdx.x + threadIdx.y*SIZE_X]; b[off + done + threadIdx.x + ld*threadIdx.y] = work2[threadIdx.x + threadIdx.y*SIZE_X]; off -= done*nrows; off += done; } __syncthreads(); if ( pass ) { work1[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = a[off + threadIdx.x + ld*(done + threadIdx.y)]; work2[indr[threadIdx.x] - 1 + threadIdx.y*SIZE_X] = b[off + threadIdx.x + ld*(done + threadIdx.y)]; } __syncthreads(); if ( pass ) { a[off + threadIdx.x + ld*(done + threadIdx.y)] = work1[threadIdx.x + threadIdx.y*SIZE_X]; b[off + threadIdx.x + ld*(done + threadIdx.y)] = work2[threadIdx.x + threadIdx.y*SIZE_X]; } } } /* * Copies new L factors back to A array and applies permutation to rows and cols * This version does this directly in global memory and is designed for the case * when the new and old location of columns and rows DO NOT overlap. */ template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > __device__ void __forceinline__ // Required to avoid errors about reg counts compiling with -G copy_L_LD_perm_noshmem( int node, int block, int nblocks, int done, int pivoted, int delayed, int nrows, int ncols, int ib, int jb, int offc, int offp, int ld, const int *ind, const volatile int *const indf, double *a, double *b, const double *c, int *perm ) { int off1 = done; int off2 = ib - 1; int offi = node*SIZE_Y/2; // We handle the two pivoted x pivoted blocks where row and columns cross // over seperately using the first block. // The other blocks just exclude these rows/cols as appropriate // All remaining rows and columns are handlded by the remaining blocks. if ( block ) { // Handle parts of matrix that require EITHER row OR col shuffle int tx = (threadIdx.y < SIZE_Y/2) ? threadIdx.x : threadIdx.x + blockDim.x; int ty = (threadIdx.y < SIZE_Y/2) ? threadIdx.y : threadIdx.y - SIZE_Y/2; // Swap a[:,done:done+pivoted] and a[:,ib:jb] pulling in c[] as we go for ( int x = tx + 2*blockDim.x*(block - 1); x < nrows && ty < jb - ib + 1; x += 2*blockDim.x*(nblocks - 1) ) { int y = ind[offi + ty] - 1; if ( (x >= done && x < done + jb - ib + 1) || (x >= ib - 1 && x < jb) || y < 0 ) continue; // handled separately a[x + ld*(off2 + ty)] = a[x + ld*(off1 + y)]; a[x + ld*(off1 + y)] = c[offc + x + ld*ty]; } // Swap b[:,done:done+pivoted] and b[:,ib:jb] for ( int x = tx + 2*blockDim.x*(block - 1); x < nrows && ty < jb - ib + 1; x += 2*blockDim.x*(nblocks - 1) ) { int y = ind[offi + ty] - 1; if ( ( x >= done && x < done + jb - ib + 1 ) || ( x >= ib - 1 && x < jb ) || y < 0) continue; // handled separately ELEMENT_TYPE s = b[x + ld*(off1 + y)]; b[x + ld*(off1 + y)] = b[x + ld*(off2 + ty)]; b[x + ld*(off2 + ty)] = s; } if ( (block - 1)*blockDim.y >= ncols ) return; // swap a[done:done+pivoted,:] and a[ib:jb,:] for ( int y = threadIdx.y + blockDim.y*(block - 1); y < ncols && threadIdx.x < jb - ib + 1; y += blockDim.y*(nblocks - 1) ) { int x = ind[offi + threadIdx.x] - 1; if ( (y >= done && y < done + jb - ib + 1) || (y >= ib - 1 && y < jb) || x < 0 ) continue; // handled separately ELEMENT_TYPE s = a[off1 + x + ld*y]; a[off1 + x + ld*y] = a[off2 + threadIdx.x + ld*y]; a[off2 + threadIdx.x + ld*y] = s; } // swap b[done:done+pivoted,:] and b[ib:jb,:] for ( int y = threadIdx.y + blockDim.y*(block - 1); y < ncols && threadIdx.x < jb - ib + 1; y += blockDim.y*(nblocks - 1) ) { int x = ind[offi + threadIdx.x] - 1; if ( (y >= done && y < done + jb - ib + 1) || (y >= ib - 1 && y < jb) || x < 0) continue; // handled separately ELEMENT_TYPE s = b[off1 + x + ld*y]; b[off1 + x + ld*y] = b[off2 + threadIdx.x + ld*y]; b[off2 + threadIdx.x + ld*y] = s; } } else { // Handle part of matrix that requires BOTH row AND col shuffle if ( threadIdx.x < jb - ib + 1 && threadIdx.y == 0 ) { // Update permutation int i = indf[threadIdx.x] - 1; if ( i >= 0 ) { int s = perm[offp + ib - 1 + threadIdx.x]; perm[offp + ib - 1 + threadIdx.x] = perm[offp + done + i]; perm[offp + done + i] = s; } } // Swaps with L // FIXME: This might be sped up by doing 1.5 swaps instead of 3.5. // Swap a[done:done+pivoted,done:done+pivoted] and // a[done:done+pivoted,ib:jb] // pulling in new cols from c[] as we go. int x = done + threadIdx.x; int y = ind[offi + threadIdx.y] - 1; if ( x < done + jb - ib + 1 && threadIdx.y < jb - ib + 1 && y >= 0 ) { a[x + ld*(off2 + threadIdx.y)] = a[x + ld*(off1 + y)]; a[x + ld*(off1 + y)] = c[offc + x + ld*threadIdx.y]; } // Swap a[ib:jb,done:done+pivoted] and a[ib:jb,ib:jb] // pulling in new cols from c[] as we go. x = ib - 1 + threadIdx.x; y = ind[offi + threadIdx.y] - 1; if ( x < jb && threadIdx.y < jb - ib + 1 && y >= 0 ) { a[x + ld*(off2 + threadIdx.y)] = a[x + ld*(off1 + y)]; a[x + ld*(off1 + y)] = c[offc + x + ld*threadIdx.y]; } __syncthreads(); // wait for a[] to be correct // Swap a[done:done+pivoted,done:done+pivoted] and // a[ib:jb,done:done+pivoted] x = ind[offi + threadIdx.x] - 1; y = done + threadIdx.y; if ( threadIdx.x < jb - ib + 1 && y < done + jb - ib + 1 && x >= 0 ) { ELEMENT_TYPE s = a[off1 + x + ld*y]; a[off1 + x + ld*y] = a[off2 + threadIdx.x + ld*y]; a[off2 + threadIdx.x + ld*y] = s; } // Swap a[done:done+pivoted,ib:jb] and a[ib:jb,ib:jb] x = ind[offi + threadIdx.x] - 1; y = ib - 1 + threadIdx.y; if ( threadIdx.x < jb - ib + 1 && y < jb && x >= 0 ) { ELEMENT_TYPE s = a[off1 + x + ld*y]; a[off1 + x + ld*y] = a[off2 + threadIdx.x + ld*y]; a[off2 + threadIdx.x + ld*y] = s; } // Swaps with LD // Swap a[done:done+pivoted,done:done+pivoted] and // a[done:done+pivoted,ib:jb] x = done + threadIdx.x; y = ind[offi + threadIdx.y] - 1; if ( x < done + jb - ib + 1 && threadIdx.y < jb - ib + 1 && y >= 0 ) { ELEMENT_TYPE s = b[x + ld*(off1 + y)]; b[x + ld*(off1 + y)] = b[x + ld*(off2 + threadIdx.y)]; b[x + ld*(off2 + threadIdx.y)] = s; } // Swap a[ib:jb,done:done+pivoted] and a[ib:jb,ib:jb] x = ib - 1 + threadIdx.x; y = ind[offi + threadIdx.y] - 1; if ( x < jb && threadIdx.y < jb - ib + 1 && y >= 0 ) { ELEMENT_TYPE s = b[x + ld*(off1 + y)]; b[x + ld*(off1 + y)] = b[x + ld*(off2 + threadIdx.y)]; b[x + ld*(off2 + threadIdx.y)] = s; } __syncthreads(); // Swap a[done:done+pivoted,done:done+pivoted] and // a[ib:jb,done:done+pivoted] x = ind[offi + threadIdx.x] - 1; y = done + threadIdx.y; if ( threadIdx.x < jb - ib + 1 && y < done + jb - ib + 1 && x >= 0 ) { ELEMENT_TYPE s = b[off1 + x + ld*y]; b[off1 + x + ld*y] = b[off2 + threadIdx.x + ld*y]; b[off2 + threadIdx.x + ld*y] = s; } // Swap a[done:done+pivoted,ib:jb] and a[ib:jb,ib:jb] x = ind[offi + threadIdx.x] - 1; y = ib - 1 + threadIdx.y; if ( threadIdx.x < jb - ib + 1 && y < jb && x >= 0 ) { ELEMENT_TYPE s = b[off1 + x + ld*y]; b[off1 + x + ld*y] = b[off2 + threadIdx.x + ld*y]; b[off2 + threadIdx.x + ld*y] = s; } } } struct multireorder_data { int node; int block; int nblocks; }; template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > #if (SM_3X) __launch_bounds__(256, 8) #else __launch_bounds__(256, 4) #endif __global__ void cu_multireorder( const struct multinode_fact_type *ndata, const struct multireorder_data* rdata, const ELEMENT_TYPE* c, const int* stat, const int* ind, int* perm, int* ncb) { __shared__ volatile int indf[SIZE_X]; // index from node_fact __shared__ volatile int indr[SIZE_X]; // reorder index __shared__ volatile int simple; // Reset ncb ready for next call of muliblock_fact_setup() if ( blockIdx.x == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) { ncb[0] = 0; ncb[1] = 0; } // Load data on block rdata += blockIdx.x; int node = rdata->node; ndata += node; int ib = ndata->ib; int jb = ndata->jb; if ( jb < ib ) return; int pivoted = stat[node]; if ( pivoted < 1 ) return; int nrows = ndata->nrows; int bidx = rdata->block; if ( bidx > 1 && (bidx - 1)*blockDim.x >= nrows ) return; int done = ndata->done; int ld = nrows; int delayed = ib - done - 1; // Number delayed before most recent factor if ( threadIdx.x == 0 && threadIdx.y == 0 ) simple = (delayed == 0); // true if we don't need to offset __syncthreads(); int next; if ( threadIdx.x < jb - ib + 1 && threadIdx.y == 0 ) { next = ind[node*SIZE_Y/2 + threadIdx.x]; // SIZE_Y=2*BLOCK_SIZE indf[threadIdx.x] = next; if ( jb - ib + 1 > delayed ) indr[delayed + threadIdx.x] = next; if ( indf[threadIdx.x] != threadIdx.x + 1 ) atomicMin((int*)&simple, 0); } __syncthreads(); ELEMENT_TYPE *a = ndata->lval; ELEMENT_TYPE *b = ndata->ldval; int offc = ndata->lbuf; int nblk = rdata->nblocks; if ( simple ) { // Copy successful columns from workspace c to factors a without an // offset or permutation. copy_L_LD_no_perm< ELEMENT_TYPE, SIZE_X*2 > ( nblk, bidx, threadIdx.x + blockDim.x*threadIdx.y, nrows, pivoted, &a[ld*done], ld, &c[offc], ld ); } else { // We need a permutation int ncols = ndata->ncols; int offp = ndata->offp; if ( jb - ib + 1 > delayed ) { // Can't just shuffle along, as pivoted columns overlap with where they // need to be. However, we know that pivoted+delayed < 2*BLOCK_SIZE, so // we can do a shuffle via shmem. copy_L_LD_perm_shmem< ELEMENT_TYPE, SIZE_X, SIZE_Y > ( bidx, nblk, done, pivoted, delayed, nrows, ncols, ib, jb, offc, offp, ld, indr, a, b, c, perm ); } else { // Pivoted columns don't overlap where they need to be, so can just // shuffle in global memory a[] and b[]. copy_L_LD_perm_noshmem< ELEMENT_TYPE, SIZE_X, SIZE_Y > ( node, bidx, nblk, done, pivoted, delayed, nrows, ncols, ib, jb, offc, offp, ld, ind, indf, a, b, c, perm ); } } } template< typename ELEMENT_TYPE, unsigned int SIZE_X, unsigned int SIZE_Y > __global__ void cu_multicopy( const struct multinode_fact_type *ndata, const struct multireorder_data* rdata, ELEMENT_TYPE* b, int* stat, int* ncb ) { if ( blockIdx.x == 0 && threadIdx.x == 0 && threadIdx.y == 0 ) { ncb[0] = 0; ncb[1] = 0; } rdata += blockIdx.x; int node = rdata->node; ndata += node; int ib = ndata->ib; int jb = ndata->jb; if ( jb < ib ) return; int pivoted = stat[node]; if ( pivoted < 1 ) return; int nrows = ndata->nrows; int block = rdata->block; int nblocks = rdata->nblocks; if ( block > 1 && (block - 1)*blockDim.x >= nrows ) return; int done = ndata->done; ELEMENT_TYPE *a = ndata->lval; int offb = ndata->lbuf; for ( int x = threadIdx.x + blockDim.x*block; x < nrows && threadIdx.y < pivoted; x += blockDim.x*nblocks ) { a[x + nrows*(done + threadIdx.y)] = b[offb + x + nrows*threadIdx.y]; } } struct multisymm_type { double *lcol; int ncols; int nrows; }; /* * Symmetrically fills the upper triangles of the upper square blocks of * matrices continuously packed in a * Note: modifed data is pointed to by component of *msdata */ template< typename ELEMENT_TYPE > __global__ void cu_multisymm( const struct multisymm_type* msdata ) { msdata += blockIdx.x; ELEMENT_TYPE *a = msdata->lcol; int ncols = msdata->ncols; int nrows = msdata->nrows; for ( int i = threadIdx.x + blockDim.x*blockIdx.y; i < ncols; i += blockDim.x*gridDim.y ) for ( int j = threadIdx.y + blockDim.y*blockIdx.z; j < i; j += blockDim.y*gridDim.z ) a[j + i*nrows] = a[i + j*nrows]; } } /* anon namespace */ /******************************************************************************* * Following routines are exported with C binding so can be called from Fortran ******************************************************************************/ extern "C" { void spral_ssids_copy_ic(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* ind) { int rb = (nrows - 1)/BLOCK_SIZE + 1; int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(rb, cb); cu_copy_ic< double > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, ind ); } void spral_ssids_copy_mc(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* mask) { int rb = (nrows - 1)/BLOCK_SIZE + 1; int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(rb, cb); cu_copy_mc< double > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, mask ); } void spral_ssids_multisymm(cudaStream_t *stream, int nblocks, const struct multisymm_type* msdata) { dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); dim3 grid(nb,4,4); cu_multisymm< double ><<< grid, threads, 0, *stream >>>( msdata + i ); } } void spral_ssids_multicopy(cudaStream_t *stream, int nblocks, const struct multinode_fact_type *ndata, const struct multireorder_data *rdata, double* a, double* b, int* stat, int* ncb) { dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); cu_multicopy< double, BLOCK_SIZE, BLOCK_SIZE > <<< nb, threads, 0, *stream >>> ( ndata, rdata + i, b, stat, ncb ); } } void spral_ssids_multireorder(cudaStream_t *stream, int nblocks, const struct multinode_fact_type *ndata, const struct multireorder_data *rdata, double* c, int* stat, int* ind, int* index, int* ncb) { dim3 threads(2*BLOCK_SIZE, 2*BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); dim3 grid(nb,1); cu_multireorder< double, 2*BLOCK_SIZE, 2*BLOCK_SIZE > <<< grid, threads, 0, *stream >>> ( ndata, rdata + i, c, stat, ind, index, ncb ); } } // ncols <= 2*BLOCK_SIZE void spral_ssids_reorder_cols2(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* index, int mode ) { int rb = (nrows - 1)/BLOCK_SIZE + 1; dim3 grid(rb, 2); if ( ncols <= BLOCK_SIZE ) { dim3 threads(BLOCK_SIZE, BLOCK_SIZE); cu_reorder_cols2< double, BLOCK_SIZE, BLOCK_SIZE > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index, mode ); } else if ( ncols <= 2*BLOCK_SIZE ) { dim3 threads(BLOCK_SIZE, 2*BLOCK_SIZE); cu_reorder_cols2< double, BLOCK_SIZE, 2*BLOCK_SIZE > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index, mode ); } } void spral_ssids_reorder_rows(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* index) { int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 grid(1, cb); int tx = min(nrows, 1024/BLOCK_SIZE); dim3 threads(tx, BLOCK_SIZE); cu_reorder_rows< double > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index ); } // nrows <= 2*BLOCK_SIZE void spral_ssids_reorder_rows2(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* index, int mode ) { int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 grid(cb, 2); if ( nrows <= BLOCK_SIZE ) { dim3 threads(BLOCK_SIZE, BLOCK_SIZE); cu_reorder_rows2< double, BLOCK_SIZE, BLOCK_SIZE > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index, mode ); } else if ( nrows <= 2*BLOCK_SIZE ) { dim3 threads(2*BLOCK_SIZE, BLOCK_SIZE); cu_reorder_rows2< double, 2*BLOCK_SIZE, BLOCK_SIZE > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index, mode ); } } void spral_ssids_swap_ni2Dm(cudaStream_t *stream, int nblocks, struct multiswap_type *swapdata) { dim3 threads(BLOCK_SIZE, BLOCK_SIZE); for ( int i = 0; i < nblocks; i += MAX_CUDA_BLOCKS ) { int nb = min(MAX_CUDA_BLOCKS, nblocks - i); dim3 grid(nb,8); cu_multiswap_ni2D_c < double > <<< grid, threads, 0, *stream >>> ( swapdata + i ); cu_multiswap_ni2D_r < double > <<< grid, threads, 0, *stream >>> ( swapdata + i ); } } void spral_ssids_swap_ni2D_ic(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* index) { int rb = (nrows - 1)/BLOCK_SIZE + 1; int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(rb, cb); cu_swap_ni2D_ic< double > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index ); } void spral_ssids_swap_ni2D_ir(cudaStream_t *stream, int nrows, int ncols, double* a, int lda, double* b, int ldb, int* index) { int rb = (nrows - 1)/BLOCK_SIZE + 1; int cb = (ncols - 1)/BLOCK_SIZE + 1; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(rb, cb); cu_swap_ni2D_ir< double > <<< grid, threads, 0, *stream >>> ( nrows, ncols, a, lda, b, ldb, index ); } } // end extern "C"
the_stack
#ifndef DALI_KERNELS_REDUCE_MEAN_STDDEV_GPU_IMPL_CUH_ #define DALI_KERNELS_REDUCE_MEAN_STDDEV_GPU_IMPL_CUH_ /** * @file * * This file contains the classes needed to implement reductions with pre- * and postprocessing: mean, root mean square, standard deviation (and its reciprocal). */ #include "dali/kernels/reduce/reduce_gpu_impl.cuh" #include "dali/kernels/reduce/reduce_drop_dims.h" namespace dali { namespace kernels { namespace reduce_impl { template <typename T> using scale_t = std::conditional_t<std::is_same<T, double>::value, double, float>; template <typename Out, typename Scale = scale_t<Out>> struct ScaleAndConvert { using scale_t = Scale; scale_t scale = 1; template <typename T> DALI_HOST_DEV Out operator()(T x) const { return ConvertSat<Out>(x * scale); } }; template <typename Out, typename Scale = scale_t<Out>> struct ScaleSqrtConvert { using scale_t = Scale; scale_t scale = 1; template <typename T> DALI_HOST_DEV Out operator()(T x) const { return ConvertSat<Out>(sqrt(x * scale)); } }; template <typename Out, typename In, typename Actual, typename Postprocessor = ScaleAndConvert<Out>> class MeanImplBase { public: Actual &This() { return static_cast<Actual&>(*this); } const Actual &This() const { return static_cast<const Actual&>(*this); } using postprocessor_t = Postprocessor; using scale_t = typename Postprocessor::scale_t; Postprocessor GetPostprocessorImpl(int sample_index, bool reduce_batch) const { int64_t reduced_elems = reduce_batch ? This().TotalReducedElements() : This().ReducedElements(sample_index); return GetPostprocessorImpl(reduced_elems, 0); } Postprocessor GetPostprocessorImpl(int64_t reduced_elems, int ddof) const { DALI_ENFORCE(reduced_elems > 0, "Cannot calculate a mean from 0 elements"); auto denominator = reduced_elems - ddof; return { denominator > 0 ? scale_t(1.0 / denominator) : 0 }; } }; template <typename Out, typename In, typename Actual> using RootMeanImplBase = MeanImplBase<Out, In, Actual, ScaleSqrtConvert<Out>>; /** * @brief Implements mean reduction */ template <typename Out, typename In, typename Acc = default_sum_acc_t<Out, In>> class MeanImplGPU : public ReduceImplGPU<Out, In, Acc, MeanImplGPU<Out, In, Acc>>, public MeanImplBase<Out, In, MeanImplGPU<Out, In, Acc>> { public: reductions::sum GetReduction() const { return {}; } }; /** * @brief Implements mean square reduction */ template <typename Out, typename In, typename Acc = default_sum_acc_t<Out, decltype(reductions::square()(In()))>> class MeanSquareImplGPU : public ReduceImplGPU<Out, In, Acc, MeanSquareImplGPU<Out, In, Acc>> , public MeanImplBase<Out, In, MeanSquareImplGPU<Out, In, Acc>> { public: using Preprocessor = reductions::square; template <int non_reduced_dims> using PreprocessorBank = UniformPreprocessorBank<non_reduced_dims, Preprocessor>; Preprocessor GetPreprocessorImpl(int sample_idx, bool batch) const { return {}; } template <int non_reduced_dims> PreprocessorBank<non_reduced_dims> * GetPreprocessorBanksImpl(WorkArea &wa, int axis, int_const<non_reduced_dims>) const { return nullptr; } reductions::sum GetReduction() const { return {}; } }; /** * @brief Implements root mean square reduction */ template <typename Out, typename In, typename Acc = default_sum_acc_t<Out, decltype(reductions::square()(In()))>> class RootMeanSquareImplGPU : public ReduceImplGPU<Out, In, Acc, RootMeanSquareImplGPU<Out, In, Acc>> , public RootMeanImplBase<Out, In, RootMeanSquareImplGPU<Out, In, Acc>> { public: using Preprocessor = reductions::square; template <int non_reduced_dims> using PreprocessorBank = UniformPreprocessorBank<non_reduced_dims, Preprocessor>; Preprocessor GetPreprocessorImpl(int sample_idx, bool batch) const { return {}; } template <int non_reduced_dims> PreprocessorBank<non_reduced_dims> * GetPreprocessorBanksImpl(WorkArea &wa, int axis, int_const<non_reduced_dims>) const { return nullptr; } reductions::sum GetReduction() const { return {}; } }; /** * @brief Subtracts a mean value stored in specified memory location and squares the difference * * This postprocessor is necessary because regular `variance` would require gathering means * for all samples, which may be scattered in non-contiguous device memory. */ template <class Mean> struct VarianceIndirect { const Mean *__restrict__ mean = nullptr; template <typename T> DALI_HOST_DEV DALI_FORCEINLINE auto operator()(const T &x) const noexcept { #ifdef __CUDA_ARCH__ auto d = x - __ldg(mean); #else auto d = x - *mean; #endif return d * d; } }; /** * @brief A preprocessor bank which returns a `reduce::variance` functor with * mean value taken from a tensor */ template <int non_reduced_ndim, typename Mean> struct VariancePreprocessorBank; template <typename Mean> struct VariancePreprocessorBank<1, Mean> { const Mean *__restrict__ mean; i64vec<1> stride; DALI_HOST_DEV DALI_FORCEINLINE reductions::variance<Mean> Get(const i64vec<1> &pos) const { auto offset = dot(pos, stride); #ifdef __CUDA_ARCH__ Mean m = __ldg(mean + offset); #else Mean m = mean[offset]; #endif return { m }; } }; template <typename Mean> struct VariancePreprocessorBank<2, Mean> { const Mean *mean; i64vec<2> stride; /// Calculates the fully reduced inner offset based on non-reduced `pos[1]` DropDims<3> inner_dims; DALI_HOST_DEV DALI_FORCEINLINE reductions::variance<Mean> Get(const i64vec<2> &pos) const { auto offset = dot(i64vec2(pos[0], inner_dims.reindex(pos[1])), stride); #ifdef __CUDA_ARCH__ Mean m = __ldg(mean + offset); #else Mean m = mean[offset]; #endif return { m }; } }; template <typename Out, typename In, typename Mean, typename Actual> class VarianceImplBase { public: Actual &This() { return static_cast<Actual&>(*this); } const Actual &This() const { return static_cast<const Actual&>(*this); } void SetMean(const InListGPU<Mean> &mean, cudaStream_t stream) { mean_ = mean; mean_.reshape(This().SimplifiedOutputShape()); } InListGPU<Mean> mean_; using Preprocessor = VarianceIndirect<Mean>; static_assert(sizeof(Preprocessor) == sizeof(Mean*), "A variance functor must carry only a pointer to the mean"); template <int non_reduced_dims> using PreprocessorBank = VariancePreprocessorBank<non_reduced_dims, Mean>; void InitMean(const InListGPU<Mean> &mean) { mean_ = reshape(mean, This().SimplifiedOutputShape(), true); } Preprocessor GetPreprocessorImpl(int sample_index, bool batch) const { assert(sample_index < This().SimplifiedOutputShape().num_samples()); return Preprocessor { mean_.data[sample_index] }; } PreprocessorBank<1> * GetPreprocessorBanks(WorkArea &wa, int axis, int_const<1>) const { using Bank = PreprocessorBank<1>; int n = This().SimplifiedInputShape().num_samples(); Bank *banks = wa.ParamBuffer<Bank>(n); for (int i = 0; i < n; i++) { int o = This().ReduceBatch() ? 0 : i; auto shape = This().SimplifiedOutputShape().tensor_shape_span(o); auto &bank = banks[i]; bank.mean = mean_.data[o]; bank.stride[0] = volume(shape.begin() + axis, shape.end()); // outer stride } return banks; } PreprocessorBank<2> * GetPreprocessorBanks(WorkArea &wa, int axis, int_const<2>) const { using Bank = PreprocessorBank<2>; int n = This().SimplifiedInputShape().num_samples(); Bank *banks = wa.ParamBuffer<Bank>(n); SmallVector<int, 6> remaining_axes; for (int a : This().SimplifiedAxes()) if (a > axis) remaining_axes.push_back(a - axis - 1); int mask = to_bit_mask(remaining_axes); for (int i = 0; i < n; i++) { int o = This().ReduceBatch() ? 0 : i; auto &bank = banks[i]; auto in_shape = This().SimplifiedInputShape().tensor_shape_span(i); auto out_shape = This().SimplifiedOutputShape().tensor_shape_span(o); auto inner_shape = span<const int64_t>(in_shape.begin() + axis + 1, in_shape.end()); bank.mean = mean_.data[o]; bank.stride[0] = volume(out_shape.begin() + axis, out_shape.end()); // outer stride bank.stride[1] = 1; // inner stride, always 1? bank.inner_dims = DropDims<3>(inner_shape, mask); // reindexing, if necessary } return banks; } template <int non_reduced_dims> PreprocessorBank<non_reduced_dims> * GetPreprocessorBanksImpl(WorkArea &wa, int axis, int_const<non_reduced_dims> nrd) const { return GetPreprocessorBanks(wa, axis, nrd); } }; /** * @brief Implements variance with externally provided mean */ template <typename Out, typename In, typename Mean = Out, typename Acc = Out> class VarianceImplGPU : public ReduceImplGPU<Out, In, Acc, VarianceImplGPU<Out, In, Mean, Acc>>, public VarianceImplBase<Out, In, Mean, VarianceImplGPU<Out, In, Mean, Acc>>, public MeanImplBase<Out, In, VarianceImplGPU<Out, In, Mean, Acc>> { public: using ReduceBase = ReduceImplGPU<Out, In, Acc, VarianceImplGPU<Out, In, Mean, Acc>>; using MeanBase = MeanImplBase<Out, In, VarianceImplGPU<Out, In, Mean, Acc>>; reductions::sum GetReduction() const { return {}; } typename MeanBase::postprocessor_t GetPostprocessorImpl(int sample_index, bool reduce_batch) const { int64_t reduced_elems = reduce_batch ? this->TotalReducedElements() : this->ReducedElements(sample_index); return MeanBase::GetPostprocessorImpl(reduced_elems, ddof_); } void Run(KernelContext &kctx, const OutListGPU<Out> &out, const InListGPU<In> &in, const InListGPU<Mean> &mean, int ddof = 0) { ddof_ = ddof; this->InitMean(mean); ReduceBase::Run(kctx, out, in); } private: int ddof_ = 0; }; /** * @brief Implements standard deviation with externally provided mean */ template <typename Out, typename In, typename Mean = Out, typename Acc = Out> class StdDevImplGPU : public ReduceImplGPU<Out, In, Acc, StdDevImplGPU<Out, In, Mean, Acc>>, public VarianceImplBase<Out, In, Mean, StdDevImplGPU<Out, In, Mean, Acc>>, public RootMeanImplBase<Out, In, StdDevImplGPU<Out, In, Mean, Acc>> { public: using ReduceBase = ReduceImplGPU<Out, In, Acc, StdDevImplGPU<Out, In, Mean, Acc>>; using RMSBase = RootMeanImplBase<Out, In, StdDevImplGPU<Out, In, Mean, Acc>>; reductions::sum GetReduction() const { return {}; } typename RMSBase::postprocessor_t GetPostprocessorImpl(int sample_index, bool reduce_batch) const { int64_t reduced_elems = reduce_batch ? this->TotalReducedElements() : this->ReducedElements(sample_index); return RMSBase::GetPostprocessorImpl(reduced_elems, ddof_); } void Run(KernelContext &kctx, const OutListGPU<Out> &out, const InListGPU<In> &in, const InListGPU<Mean> &mean, int ddof = 0) { ddof_ = ddof; this->InitMean(mean); ReduceBase::Run(kctx, out, in); } private: int ddof_ = 0; }; template <typename Out, typename ScaleAndReg> struct RegularizedInvSqrt { ScaleAndReg scale = 1, reg = 0; template <typename T> DALI_HOST_DEV Out operator()(T x) const { float s = scale * x + reg; return s ? ConvertSat<Out>(rsqrt(s)) : Out(0); } }; template <typename Out, typename In, typename Actual> class RegularizedInvRMS { public: Actual &This() { return static_cast<Actual&>(*this); } const Actual &This() const { return static_cast<const Actual&>(*this); } using param_t = std::conditional_t<std::is_same<Out, double>::value, double, float>; using Postprocessor = RegularizedInvSqrt<Out, param_t>; void SetStdDevParams(int ddof, param_t epsilon) { if (!(epsilon >= 0)) // >= 0 and not NaN throw std::range_error("The regularizing term must be a non-negative number."); if (ddof < 0) throw std::range_error("Delta Degrees of Freedom must be a non-negative number."); regularization_ = epsilon; ddof_ = ddof; } param_t regularization_ = 0.0f; int ddof_ = 0; Postprocessor GetPostprocessorImpl(int sample_index, bool reduce_batch) const { int64_t reduced_elems = reduce_batch ? This().TotalReducedElements() : This().ReducedElements(sample_index); DALI_ENFORCE(reduced_elems > 0, "Cannot calculate a mean from 0 elements"); param_t scale = reduced_elems > ddof_ ? param_t(1.0 / (reduced_elems - ddof_)) : 0; return { scale, regularization_ }; } }; /** * @brief Implements regularized inverse standard deviation reduction with externally provided mean */ template <typename Out, typename In, typename Mean = Out, typename Acc = Out> class InvStdDevImplGPU : public ReduceImplGPU<Out, In, Acc, InvStdDevImplGPU<Out, In, Mean, Acc>>, public VarianceImplBase<Out, In, Mean, InvStdDevImplGPU<Out, In, Mean, Acc>>, public RegularizedInvRMS<Out, In, InvStdDevImplGPU<Out, In, Mean, Acc>> { public: using ReduceBase = ReduceImplGPU<Out, In, Acc, InvStdDevImplGPU<Out, In, Mean, Acc>>; reductions::sum GetReduction() const { return {}; } /** * */ void Run(KernelContext &kctx, const OutListGPU<Out> &out, const InListGPU<In> &in, const InListGPU<Mean> &mean, int ddof = 0, float epsilon = 0.0f) { this->InitMean(mean); this->SetStdDevParams(ddof, epsilon); ReduceBase::Run(kctx, out, in); } }; } // namespace reduce_impl } // namespace kernels } // namespace dali #endif // DALI_KERNELS_REDUCE_MEAN_STDDEV_GPU_IMPL_CUH_
the_stack
#include <df/util/cudaHelpers.h> #include <df/util/macros.h> #include <df/voxel/color.h> #include <df/voxel/probability.h> #include <df/voxel/compositeVoxel.h> #include <df/voxel/tsdf.h> // TODO #include <Eigen/Geometry> namespace df { // TODO: maybe one thread per vertex, do a full-bandwidth read, // compute normal with every third thread, // broadcast, then do a full-bandwidth write? template <typename Scalar> __global__ void computeTriangularFaceNormalsKernel(const Tensor<2,Scalar,DeviceResident> vertices, Tensor<2,Scalar,DeviceResident> normals) { typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3; const uint x = threadIdx.x + blockDim.x * blockIdx.x; if ( x < vertices.dimensionSize(1) / 3) { const Eigen::Map<const Vec3> v1(&vertices(0,3*x)); const Eigen::Map<const Vec3> v2(&vertices(0,3*x+1)); const Eigen::Map<const Vec3> v3(&vertices(0,3*x+2)); Vec3 normal = (v3-v1).cross(v2-v1); normal.normalize(); Eigen::Map<Vec3> n1(&normals(0,3*x)); Eigen::Map<Vec3> n2(&normals(0,3*x+1)); Eigen::Map<Vec3> n3(&normals(0,3*x+2)); n1 = normal; n2 = normal; n3 = normal; } } template <typename Scalar> void computeTriangularFaceNormals(const Tensor<2,Scalar,DeviceResident> & vertices, Tensor<2,Scalar,DeviceResident> & normals) { assert(vertices.dimensionSize(0) == 3); assert(normals.dimensionSize(0) == 3); const int nVertices = vertices.dimensionSize(1); assert(normals.dimensionSize(1) == nVertices); const uint nFaces = nVertices / 3; const dim3 block(512,1,1); // TODO const dim3 grid(intDivideAndCeil(nFaces,block.x),1,1); computeTriangularFaceNormalsKernel<<<grid,block>>>(vertices,normals); } #define COMPUTE_TRIANGULAR_FACE_NORMALS_EXPLICIT_INSTANTIATION(type) \ template void computeTriangularFaceNormals(const Tensor<2,type,DeviceResident> &, \ Tensor<2,type,DeviceResident> &) ALL_TYPES_INSTANTIATION(COMPUTE_TRIANGULAR_FACE_NORMALS_EXPLICIT_INSTANTIATION); template <typename Scalar, typename ... NormalizerT> inline __device__ Eigen::Matrix<Scalar,3,1> normalize(const Eigen::Matrix<Scalar,3,1> & vec, NormalizerT ... normalizer) { return vec.normalized(); } template <typename Scalar> inline __device__ Eigen::Matrix<Scalar,3,1> normalize(const Eigen::Matrix<Scalar,3,1> & vec, const Eigen::Matrix<Scalar,3,1> & normalizer) { return vec.cwiseProduct(normalizer).normalized(); } template <typename Scalar, typename VoxelT, typename ... NormalizerT> __global__ void computeSignedDistanceGradientNormalsKernel(const Tensor<2,Scalar,DeviceResident> vertices, Tensor<2,Scalar,DeviceResident> normals, Tensor<3,VoxelT,DeviceResident> voxelGrid, NormalizerT ... normalizer) { typedef Eigen::Matrix<Scalar,3,1> Vec3; const uint i = threadIdx.x + blockDim.x * blockIdx.x; if (i < vertices.dimensionSize(1)) { Eigen::Map<Vec3> normalMap(&normals(0,i)); if (voxelGrid.inBounds(vertices(0,i),vertices(1,i),vertices(2,i),1.f)) { if (vertices(0,i) != floor(vertices(0,i))) { normalMap = normalize(voxelGrid.transformBackwardGradientValidOnly(SignedDistanceValueExtractor<Scalar,VoxelT>(), SignedDistanceValidExtractor<Scalar,VoxelT>(), vertices(0,i),(int)vertices(1,i),(int)vertices(2,i)), normalizer...); if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),vertices(0,i),(int)vertices(1,i),(int)vertices(2,i))) { printf("whoops!\n"); } // if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),vertices(0,i)-1,(int)vertices(1,i),(int)vertices(2,i))) { // printf("whoops!\n"); // } // if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),vertices(0,i),(int)vertices(1,i)-1,(int)vertices(2,i))) { // printf("whoops!\n"); // } // if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),vertices(0,i),(int)vertices(1,i),(int)vertices(2,i)-1)) { // printf("whoops!\n"); // } } else if (vertices(1,i) != floor(vertices(1,i))) { normalMap = normalize(voxelGrid.transformBackwardGradientValidOnly(SignedDistanceValueExtractor<Scalar,VoxelT>(), SignedDistanceValidExtractor<Scalar,VoxelT>(), (int)vertices(0,i),vertices(1,i),(int)vertices(2,i)), normalizer...); if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),(int)vertices(0,i),vertices(1,i),(int)vertices(2,i))) { printf("whoops!\n"); } } else if (vertices(2,i) != floor(vertices(2,i))) { normalMap = normalize(voxelGrid.transformBackwardGradientValidOnly(SignedDistanceValueExtractor<Scalar,VoxelT>(), SignedDistanceValidExtractor<Scalar,VoxelT>(), (int)vertices(0,i),(int)vertices(1,i),vertices(2,i)), normalizer...); if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),(int)vertices(0,i),(int)vertices(1,i),vertices(2,i))) { printf("whoops!\n"); } } else { normalMap = normalize(voxelGrid.transformBackwardGradientValidOnly(SignedDistanceValueExtractor<Scalar,VoxelT>(), SignedDistanceValidExtractor<Scalar,VoxelT>(), (int)vertices(0,i),(int)vertices(1,i),(int)vertices(2,i)), normalizer...); if (!voxelGrid.validForInterpolation(SignedDistanceValidExtractor<Scalar,VoxelT>(),(int)vertices(0,i),(int)vertices(1,i),(int)vertices(2,i))) { printf("whoops!\n"); } } } else { normalMap = Vec3(0,0,0); } } } template <typename Scalar, typename VoxelT> void computeSignedDistanceGradientNormals(const Tensor<2,Scalar,DeviceResident> & vertices, Tensor<2,Scalar,DeviceResident> & normals, VoxelGrid<Scalar,VoxelT,DeviceResident> & voxelGrid) { typedef Eigen::Matrix<Scalar,3,1> Vec3; assert(vertices.dimensionSize(0) == 3); assert(normals.dimensionSize(0) == 3); const int numVertices = vertices.dimensionSize(1); assert(normals.dimensionSize(1) == numVertices); if (!numVertices) { // there are no points return; } const dim3 block(1024); const dim3 grid(intDivideAndCeil((uint)numVertices,block.x)); // std::cout << normalizer.transpose() << std::endl; // std::cout << std::abs(normalizer(0) - normalizer(1)) << std::endl; // std::cout << std::abs(normalizer(0) - normalizer(2)) << std::endl; // std::cout << std::numeric_limits<Scalar>::epsilon() << std::endl; const Vec3 boundingBoxExtent = voxelGrid.boundingBox().max() - voxelGrid.boundingBox().min(); const Vec3 normalizer = voxelGrid.worldToGridScale(); const Vec3 voxelSize = boundingBoxExtent.cwiseProduct(normalizer); // std::cout << std::abs(boundingBoxExtent(0) - boundingBoxExtent(1)) << std::endl; // std::cout << std::abs(boundingBoxExtent(0) - boundingBoxExtent(2)) << std::endl; if ( (std::abs(voxelSize(0) - voxelSize(1)) < std::numeric_limits<Scalar>::epsilon()) && (std::abs(voxelSize(0) - voxelSize(2)) < std::numeric_limits<Scalar>::epsilon())) { std::cout << "computing isotropic normals" << std::endl; computeSignedDistanceGradientNormalsKernel<<<grid,block>>>(vertices,normals,voxelGrid.grid()); } else { std::cout << "computing anisotropic normals" << std::endl; computeSignedDistanceGradientNormalsKernel<<<grid,block>>>(vertices,normals,voxelGrid.grid(), normalizer); } } // TODO: do these really need separate explicit instantiations? can we condense these somehow? template void computeSignedDistanceGradientNormals(const Tensor<2,float,DeviceResident> &, Tensor<2,float,DeviceResident> &, VoxelGrid<float,CompositeVoxel<float,TsdfVoxel>,DeviceResident> &); template void computeSignedDistanceGradientNormals(const Tensor<2,float,DeviceResident> &, Tensor<2,float,DeviceResident> &, VoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ColorVoxel>,DeviceResident> &); template void computeSignedDistanceGradientNormals(const Tensor<2,float,DeviceResident> &, Tensor<2,float,DeviceResident> &, VoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>,DeviceResident> &); template <typename Scalar, int D> __global__ void computeVertMapNormalsKernel(const DeviceTensor2<Eigen::Matrix<Scalar,D,1,Eigen::DontAlign> > vertMap, DeviceTensor2<Eigen::Matrix<Scalar,D,1,Eigen::DontAlign> > normMap) { typedef Eigen::Matrix<Scalar,D,1,Eigen::DontAlign> VecD; const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; if ( x < vertMap.dimensionSize(0) && y < vertMap.dimensionSize(1)) { const VecD & center = vertMap(x,y); if ( (x == 0) || (vertMap(x-1,y)(2) <= Scalar(0)) ) { const VecD & right = vertMap(x+1,y); if (right(2) <= Scalar(0)) { normMap(x,y) = VecD::Zero(); } else if ( (y == 0) || (vertMap(x,y-1)(2) <= Scalar(0)) ) { const VecD & up = vertMap(x,y+1); if (up(2) <= Scalar(0)) { normMap(x,y) = VecD::Zero(); } else { normMap(x,y).template head<3>() = (right.template head<3>() - center.template head<3>()).cross (up.template head<3>() - center.template head<3>()).normalized(); } } else { const VecD & down = vertMap(x,y-1); normMap(x,y).template head<3>() = (right.template head<3>() - center.template head<3>()).cross (center.template head<3>() - down.template head<3>()).normalized(); } } else { const VecD & left = vertMap(x-1,y); if ( (y == 0) || (vertMap(x,y-1)(2) <= Scalar(0)) ) { const VecD & up = vertMap(x,y+1); if (up(2) <= Scalar(0)) { normMap(x,y) = VecD::Zero(); } else { normMap(x,y).template head<3>() = (center.template head<3>() - left.template head<3>()).cross (up.template head<3>() - center.template head<3>()).normalized(); } } else { const VecD & down = vertMap(x,y-1); normMap(x,y).template head<3>() = (center.template head<3>() - left.template head<3>()).cross (center.template head<3>() - down.template head<3>()).normalized(); } } normMap(x,y) *= Scalar(-1); } } template <typename Scalar, int D> void computeVertMapNormals(const DeviceTensor2<Eigen::Matrix<Scalar,D,1,Eigen::DontAlign> > & vertMap, DeviceTensor2<Eigen::Matrix<Scalar,D,1,Eigen::DontAlign> > & normMap) { const dim3 block(16,8); const dim3 grid(intDivideAndCeil(vertMap.dimensionSize(0),block.x), intDivideAndCeil(vertMap.dimensionSize(1),block.y)); computeVertMapNormalsKernel<<<grid,block>>>(vertMap,normMap); cudaDeviceSynchronize(); CheckCudaDieOnError(); } template void computeVertMapNormals(const DeviceTensor2<Eigen::Matrix<float,3,1,Eigen::DontAlign> > & vertMap, DeviceTensor2<Eigen::Matrix<float,3,1,Eigen::DontAlign> > & normaMap); template void computeVertMapNormals(const DeviceTensor2<Eigen::Matrix<float,4,1,Eigen::DontAlign> > & vertMap, DeviceTensor2<Eigen::Matrix<float,4,1,Eigen::DontAlign> > & normaMap); } // namespace df
the_stack
#include "SmallestDirRect.h" #include "CoordiSet.h" #include <cmath> #include <iostream> #include <stdio.h> using namespace std; // 宏:SDR_BLOCKSIZE // 定义了核函数线程块的大小。 #define DEF_BLOCK_1D 512 // 宏:SDR_LARGE_ENOUGH // 定义了一个足够大的正整数,该整数在使用过程中被认为是无穷大。 #define SDR_LARGE_ENOUGH ((1 << 30) - 1) // 宏:SDR_DEBUG_KERNEL_PRINT(Kernel 调试打印开关) // 打开该开关则会在 Kernel 运行时打印相关的信息,以参考调试程序;如果注释掉该 // 宏,则 Kernel 不会打印这些信息,但这会有助于程序更快速的运行。 //#define SDR_DEBUG_KERNEL_PRINT // Kernel 函数: _sdrComputeBoundInfoKer(计算凸壳点集中每相邻两点的旋转矩阵 // 信息,进而计算新坐标系下凸壳的有向外接矩形的边界信息) // 根据输入的凸壳点,计算顺时针相邻两点的构成的直线与 x 轴的角度,同时计算 // 旋转矩阵信息。在此基础上,计算新坐标系下各点的坐标。从而计算每个有向外接 // 矩形的边界点的坐标信息。 static __global__ void // Kernel 函数无返回值。 _sdrComputeBoundInfoKer( CoordiSet convexcst, // 输入凸壳点集。 RotationInfo rotateinfo[], // 输出,旋转矩阵信息数组。 BoundBox bbox[] // 输出,找出的包围矩形的边界坐标信息数组。 ); // Kernel 函数: _sdrComputeSDRKer(计算包围矩形中面积最小的) // 根据输入的目前的每个包围矩形的长短边长度,计算最小有向外接矩形的标号索引。 static __global__ void // Kernel 函数无返回值。 _sdrComputeSDRKer( int cstcnt, // 输入,点集中点的数量。 BoundBox bbox[], // 输入,找出的包围矩形的边界坐标信息。 int *index // 输出,计算出的最小有向外接矩形的标号索引。 ); // Kernel 函数: _sdrComputeBoundInfoKer(计算凸壳点集中每相邻两点的旋转矩阵 // 信息,进而计算新坐标系下凸壳的有向外接矩形的边界信息) static __global__ void _sdrComputeBoundInfoKer( CoordiSet convexcst, RotationInfo rotateinfo[], BoundBox bbox[]) { // 当前 block 的索引,在 x 上 block 的索引表示各个凸壳点的索引。 int r = blockIdx.x; // 检查索引值是否越界。如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (r >= convexcst.count) return; // 当前凸壳点的下一个点的索引。 int nextidx; // 当前点与下一点的 x、y 坐标差值。 float deltax, deltay; // 当前点与下一点间距离。 float sidelength; // 旋转角度的余弦值和正弦值。 float cosalpha, sinalpha; // 声明 Shared Memory,并分配各个指针。 extern __shared__ float shdmem[]; float *shdcos = shdmem; float *shdsin = shdcos + 1; float *shdradian = shdsin + 1; if (threadIdx.x == 0) { // 当前索引值加 1,求得下一点索引值。 nextidx = r + 1; // 若当前点为点集中最后一点,则下一点为起始点。 if (nextidx == convexcst.count) nextidx = 0; // 计算当前点和下一点 x, y 坐标差值。 deltax = convexcst.tplData[nextidx * 2] - convexcst.tplData[r * 2]; deltay = convexcst.tplData[nextidx * 2 + 1] - convexcst.tplData[r * 2 + 1]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的余弦、正弦值。 cosalpha = deltax / sidelength; sinalpha = deltay / sidelength; // 根据计算得到的正弦值计算角度,将旋转矩阵信息存入到 Shared Memory // 和 Global Memory 参数 rotateinfo[r].cos = shdcos[0] = cosalpha; rotateinfo[r].sin = shdsin[0] = sinalpha; rotateinfo[r].radian = shdradian[0] = asin(sinalpha); } // 同步所有线程,使初始化 Shared Memory 的结果对所有线程可见。 __syncthreads(); // 计算当前块内线程的下标。在 x 维上该 Kernel 计算边界值点, // 必须以单 Block 运行,避免跨 block 同步引发的同步问题。 int c = threadIdx.x; // 声明包围矩形。 BoundBox tmpbbox; // 当前 Thread 处理的若干个点中找到的局部极值点。初始化。 tmpbbox.left = tmpbbox.bottom = SDR_LARGE_ENOUGH; tmpbbox.right = tmpbbox.top = -SDR_LARGE_ENOUGH; // 当前点在新坐标系下的新坐标。 float curx, cury; // 迭代处理该线程所要处理的所有坐标点,这些坐标点是间隔 blockDim.x // 个的各个坐标点。 while (c < convexcst.count) { // 从 Global Memory 中读取坐标值,从 Shared Memory 读取旋转信息值, // 并计算当前点在新坐标系下的新坐标。 curx = convexcst.tplData[2 * c] * shdcos[0] + convexcst.tplData[2 * c + 1] * shdsin[0]; cury = convexcst.tplData[2 * c] * (-shdsin[0]) + convexcst.tplData[2 * c + 1] * shdcos[0]; // 判断该坐标值的大小,和已经找到的极值做比较,更新极值。 tmpbbox.left = min(tmpbbox.left, curx); tmpbbox.right = max(tmpbbox.right, curx); tmpbbox.bottom = min(tmpbbox.bottom, cury); tmpbbox.top = max(tmpbbox.top, cury); // 更新 idx,在下一轮迭代时计算下一个点。 c += blockDim.x; } // 至此,所有 Thread 都得到了自己的局部极值,现在需要将极值放入 // Shared Memory 中,以便下一步进行归约处理。 // 分配 Shared Memory 给各个指针。 float *shdbboxleft = shdradian + 1; float *shdbboxright = shdbboxleft + blockDim.x; float *shdbboxbottom = shdbboxright + blockDim.x; float *shdbboxtop = shdbboxbottom + blockDim.x; // 将局部结果拷贝到 Shared Memory 中。 c = threadIdx.x; shdbboxleft[c] = tmpbbox.left; shdbboxright[c] = tmpbbox.right; shdbboxbottom[c] = tmpbbox.bottom; shdbboxtop[c] = tmpbbox.top; // 同步所有线程,使初始化Shared Memory 的结果对所有线程可见。 __syncthreads(); // 下面进行折半归约迭代。这里要求 blockDim.x 必须为 2 的整数次幂。 int currdsize = blockDim.x / 2; // 和当前线程间隔 currdsize 位置处的索引。 int inidx; for (/*currdsize*/; currdsize >= 1; currdsize /= 2) { if (c < currdsize) { inidx = c + currdsize; // 将两个局部结果归约成一个局部结果。 shdbboxleft[c] = min(shdbboxleft[c], shdbboxleft[inidx]); shdbboxright[c] = max(shdbboxright[c], shdbboxright[inidx]); shdbboxbottom[c] = min(shdbboxbottom[c], shdbboxbottom[inidx]); shdbboxtop[c] = max(shdbboxtop[c], shdbboxtop[inidx]); } // 同步线程,使本轮迭代归约的结果对所有线程可见。 __syncthreads(); } // 打印当前的最值点,检查中间结果。 if (c == 0) // 调试打印。 #ifdef SDR_DEBUG_KERNEL_PRINT printf("Kernel[computeBdInf]:(%3d, %3d) LRBT (%7.3f,%7.3f,%7.3f,%7.3f)\n", r, c, shdbboxleft[c], shdbboxright[c], shdbboxbottom[c], shdbboxtop[c]); #endif // 将边界值传递给 Global Memory 参数,每个线程块的第一个线程会进行这个操作。 if (c == 0) { bbox[r].left = shdbboxleft[c]; bbox[r].right = shdbboxright[c]; bbox[r].bottom = shdbboxbottom[c]; bbox[r].top = shdbboxtop[c]; } } // Host 成员方法:sdrComputeBoundInfo(计算新坐标系下凸壳的有向外接矩形的边界信 // 息) __host__ int SmallestDirRect::sdrComputeBoundInfo( CoordiSet *convexcst, RotationInfo rotateinfo[], BoundBox bbox[]) { // 检查坐标集和旋转矩阵是否为空,若为空则直接返回。 if (convexcst == NULL || rotateinfo == NULL || bbox == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (convexcst->count < 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将 convexcst 拷贝到 Device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; // 计算启动 Kernel 函数所需要的 Block 尺寸与数量。 size_t blocksize; blocksize = DEF_BLOCK_1D; size_t gridsize; gridsize = DEF_BLOCK_1D; // 分配共享内存大小。 int sharedmemsize = (4 * DEF_BLOCK_1D + 3) * sizeof (float); // 启动 Kernel 函数,完成计算。 _sdrComputeBoundInfoKer<<<gridsize, blocksize, sharedmemsize>>>( *convexcst, rotateinfo, bbox); // 检查 Kernel 函数执行是否正确。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 运行完毕退出。 return NO_ERROR; } // Host 成员方法:sdrComputeBoundInfoCpu(计算新坐标系下凸壳的有向外接矩形的边界 // 信息) __host__ int SmallestDirRect::sdrComputeBoundInfoCpu( CoordiSet *convexcst, RotationInfo rotateinfo[], BoundBox bbox[]) { // 检查坐标集和旋转矩阵是否为空,若为空则直接返回。 if (convexcst == NULL || rotateinfo == NULL || bbox == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (convexcst->count < 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 将 convexcst 拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; int idx; int cstidx; // 当前凸壳点的下一个点的索引。 int nextidx; // 当前点与下一点的 x、y 坐标差值。 float deltax, deltay; // 当前点与下一点间距离。 float sidelength; // 旋转角度的余弦值和正弦值。 float cosalpha, sinalpha; // 声明包围矩形。 BoundBox tmpbbox; // 当前点在新坐标系下的新坐标。 float curx, cury; for (idx = 0; idx < convexcst->count; idx++) { // 当前索引值加 1,求得下一点索引值。 nextidx = idx + 1; // 若当前点为点集中最后一点,则下一点为起始点。 if (nextidx == convexcst->count) nextidx = 0; // 计算当前点和下一点 x, y 坐标差值。 deltax = convexcst->tplData[nextidx * 2] - convexcst->tplData[idx * 2]; deltay = convexcst->tplData[nextidx * 2 + 1] - convexcst->tplData[idx * 2 + 1]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的余弦、正弦值。 cosalpha = deltax / sidelength; sinalpha = deltay / sidelength; // 根据计算得到的正弦值计算角度,将旋转矩阵信息存入到参数 rotateinfo[idx].cos = cosalpha; rotateinfo[idx].sin = sinalpha; rotateinfo[idx].radian = asin(sinalpha); // 每次均初始化。 tmpbbox.left = tmpbbox.bottom = SDR_LARGE_ENOUGH; tmpbbox.right = tmpbbox.top = -SDR_LARGE_ENOUGH; for (cstidx = 0; cstidx < convexcst->count; cstidx++) { // 读取坐标值和旋转信息值, // 并计算当前点在新坐标系下的新坐标。 curx = convexcst->tplData[2 * cstidx] * rotateinfo[idx].cos + convexcst->tplData[2 * cstidx + 1] * rotateinfo[idx].sin; cury = convexcst->tplData[2 * cstidx] * (-rotateinfo[idx].sin) + convexcst->tplData[2 * cstidx + 1] * rotateinfo[idx].cos; // 判断该坐标值的大小,和已经找到的极值做比较,更新极值。 tmpbbox.left = min(tmpbbox.left, curx); tmpbbox.right = max(tmpbbox.right, curx); tmpbbox.bottom = min(tmpbbox.bottom, cury); tmpbbox.top = max(tmpbbox.top, cury); } // 最值赋值 bbox[idx].left = tmpbbox.left; bbox[idx].right = tmpbbox.right; bbox[idx].bottom = tmpbbox.bottom; bbox[idx].top = tmpbbox.top; } // 运行完毕退出。 return NO_ERROR; } // Kernel 函数: _sdrComputeSDRKer(计算包围矩形中面积最小的) static __global__ void _sdrComputeSDRKer( int cstcnt, BoundBox bbox[], int *index) { // 计算当前线程的下标,该 Kernel 必须以单 Block 运行,因此不涉及到 Block 相 // 关的变量。 int idx = threadIdx.x; // 当前 Thread 处理的若干个矩形中找到的最小的矩形面积。 float cursdrarea = SDR_LARGE_ENOUGH; // 当前线程计算得到的矩形面积。 float curarea; // 当前线程记录的最小矩形面积的索引,初始化为 idx。 int cursdrindex = idx; // 当前线程对应的点计算得到的长宽。 float length1, length2; // 迭代处理该线程所要处理的所有矩形,这些矩形是间隔 blockDim.x 个索引的各个 // 矩形。 while (idx < cstcnt) { // 从 Global Memory 中读取极值,计算长宽。 length1 = bbox[idx].right - bbox[idx].left; length2 = bbox[idx].top - bbox[idx].bottom; // 计算当前的矩形面积。 curarea = length1 * length2; // 判断该面积的大小,和已经找到的最小面积做比较,更新最小面积及索引。 cursdrindex = (curarea <= cursdrarea) ? idx : cursdrindex; cursdrarea = min(curarea, cursdrarea); // 更新 idx,在下一轮迭代时计算下一个点。 idx += blockDim.x; } // 至此,所有 Thread 都得到了自己的局部最小面积及索引,现在需要将这些点放入 // Shared Memory 中,以便下一步进行归约处理。 // 声明 Shared Memory,并分配各个指针。 extern __shared__ float shdmem[]; float *shdarea = shdmem; int *shdidx = (int *)(shdarea + blockDim.x); // 将局部结果拷贝到 Shared Memory 中。 idx = threadIdx.x; shdarea[idx] = cursdrarea; shdidx[idx] = cursdrindex; // 同步所有线程,使初始化Shared Memory 的结果对所有线程可见。 __syncthreads(); // 下面进行折半归约迭代。这里要求 blockDim.x 必须为 2 的整数次幂。 int currdsize = blockDim.x / 2; // 和当前线程间隔 currdsize 位置处的索引。 int inidx; for (/* currdsize */; currdsize > 0; currdsize >>= 1) { if (idx < currdsize) { inidx = idx + currdsize; // 将两个局部结果归约成一个局部结果。 shdidx[idx] = (shdarea[idx] <= shdarea[inidx]) ? shdidx[idx] : shdidx[inidx]; shdarea[idx] = min(shdarea[idx], shdarea[inidx]); // 输出结果进行验证。 #ifdef SDR_DEBUG_KERNEL_PRINT printf("Kernel[computeSDR]: ReduceSize %3d," "(%3d) CurSdrArea %7.3f CurSdrId %3d\n", "(%3d) CurReSdrArea %7.3f CurReSdrId %3d\n", currdsize, idx, shdarea[idx], shdidx[idx], inidx, shdarea[inidx], shdidx[inidx]); #endif } // 同步线程,使本轮迭代归约的结果对所有线程可见。 __syncthreads(); } // 将最小面积的索引传递给 Global Memory 参数,第一个线程会进行这个操作。 if (idx == 0) { index[0] = shdidx[idx]; // 调试打印。 #ifdef SDR_DEBUG_KERNEL_PRINT printf("Kernel[computeSDR]: SDR index %5d\n", index[0]); #endif } } // Host 成员方法:sdrComputeSDR(计算有向外接矩形中面积最小的) __host__ int SmallestDirRect::sdrComputeSDR( int cstcnt, BoundBox bbox[], int *index) { // 检查坐标集和旋转矩阵是否为空,若为空则直接返回。 if (bbox == NULL || index == NULL) return NULL_POINTER; // 如果点集数量小于 1,直接退出。 if (cstcnt < 1) return INVALID_DATA; // 计算启动 Kernel 函数所需要的 Block 尺寸与数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = 1; // 共享内存大小。 int shdmemsize = DEF_BLOCK_1D * (sizeof (float) + sizeof (int)); // 启动 Kernel 函数,完成计算。 _sdrComputeSDRKer<<<gridsize, blocksize, shdmemsize>>>( cstcnt, bbox, index); // 检查 Kernel 函数执行是否正确。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 运行完毕退出。 return NO_ERROR; } // Host 成员方法:sdrComputeSDRCpu(计算有向外接矩形中面积最小的) __host__ int SmallestDirRect::sdrComputeSDRCpu( int cstcnt, BoundBox bbox[], int *index) { // 检查坐标集和旋转矩阵是否为空,若为空则直接返回。 if (bbox == NULL || index == NULL) return NULL_POINTER; // 如果点集数量小于 1,直接退出。 if (cstcnt < 1) return INVALID_DATA; // 索引,初始化为 0。 int idx = 0; // 当前 Thread 处理的若干个矩形中找到的最小的矩形面积。 float cursdrarea = SDR_LARGE_ENOUGH; // 当前线程计算得到的矩形面积。 float curarea; // 当前线程记录的最小矩形面积的索引,初始化为 idx。 int cursdrindex = idx; // 当前线程对应的点计算得到的长宽。 float length1, length2; for (idx = 0; idx < cstcnt; idx++) { // 读取极值,计算长宽。 length1 = bbox[idx].right - bbox[idx].left; length2 = bbox[idx].top - bbox[idx].bottom; // 计算当前的矩形面积。 curarea = length1 * length2; // 判断该面积的大小,和已经找到的最小面积做比较,更新最小面积及索引。 cursdrindex = (curarea <= cursdrarea) ? idx : cursdrindex; cursdrarea = min(curarea, cursdrarea); } // 输出赋值 index[0] = cursdrindex; // 运行完毕退出。 return NO_ERROR; } // 宏:FAIL_SDRPARAMONCVX_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_SDRPARAMONCVX_FREE do { \ if (devtemp != NULL) \ cudaFree(devtemp); \ } while (0) // Host 成员方法:sdrParamOnConvex(求凸壳点集的最小有向外接矩形的参数) __host__ int SmallestDirRect::sdrParamOnConvex( CoordiSet *convexcst, BoundBox *bbox, RotationInfo *rotinfo) { // 检查输入,输出是否为空。 if (convexcst == NULL || bbox == NULL || rotinfo == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (convexcst->count < 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 用来记录最小有向外接矩形在整个结果中的索引。 int index = 0; // 中间变量的设备端数组。存放旋转矩阵信息,包围盒顶点,索引。 RotationInfo *rotateinfoDev = NULL; BoundBox *bboxDev = NULL; int *indexDev = NULL; // 中间变量申请 Device 内存空间,并将这些空间分配给各个中间变量。 float *devtemp = NULL; size_t datasize = (sizeof (RotationInfo) + sizeof (BoundBox)) * convexcst->count + sizeof (int); cuerrcode = cudaMalloc((void **)&devtemp, datasize); if (cuerrcode != cudaSuccess) { FAIL_SDRPARAMONCVX_FREE; return CUDA_ERROR; } // 为各个中间变量分配内存空间,采用这种一次申请一个大空间的做法是为了减少申 // 请内存的开销,同时也减少因内存对齐导致的内存浪费。 rotateinfoDev = (RotationInfo *)(devtemp); bboxDev = (BoundBox *)(rotateinfoDev + convexcst->count); indexDev = (int *)(bboxDev + convexcst->count); // 将输入坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVX_FREE; return errcode; } // 调用计算凸壳点集中每相邻两点的旋转矩阵信息,进而计算新坐标系下 // 凸壳的有向外接矩形的边界信息的函数。 errcode = this->sdrComputeBoundInfo(convexcst, rotateinfoDev, bboxDev); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVX_FREE; return errcode; } // 调用计算最小有向外接矩形的函数。 errcode = this->sdrComputeSDR(convexcst->count, bboxDev, indexDev); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVX_FREE; return errcode; } // 将最小有向外接矩形在所有结果中的索引,拷贝到 host 端。 cuerrcode = cudaMemcpy(&index, indexDev, sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_SDRPARAMONCVX_FREE; return CUDA_ERROR; } // 将最小有向外接矩形的四个顶点,拷贝到主存端。 cuerrcode = cudaMemcpy(bbox, &bboxDev[index], sizeof (BoundBox), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_SDRPARAMONCVX_FREE; return CUDA_ERROR; } // 将最小有向外接矩形的旋转信息,拷贝到主存端。 cuerrcode = cudaMemcpy(rotinfo, &rotateinfoDev[index], sizeof (RotationInfo), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_SDRPARAMONCVX_FREE; return CUDA_ERROR; } // 释放内存 cudaFree(devtemp); // 退出。 return NO_ERROR; } #undef FAIL_SDRPARAMONCVX_FREE // 宏:FAIL_SDRPARAMONCVXCPU_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_SDRPARAMONCVXCPU_FREE do { \ if (temp != NULL) \ delete temp; \ } while (0) // Host 成员方法:sdrParamOnConvexCpu(求凸壳点集的最小有向外接矩形的参数) __host__ int SmallestDirRect::sdrParamOnConvexCpu( CoordiSet *convexcst, BoundBox *bbox, RotationInfo *rotinfo) { // 检查输入,输出是否为空。 if (convexcst == NULL || bbox == NULL || rotinfo == NULL) return NULL_POINTER; // 如果输入点集中不含有任何的坐标点,则直接退出。 if (convexcst->count < 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 用来记录最小有向外接矩形在整个结果中的索引。 int index = 0; // 中间变量的设备端数组。存放旋转矩阵信息,包围盒顶点,索引。 RotationInfo *rotateinfoHost = NULL; BoundBox *bboxHost = NULL; int *indexHost = NULL; // 中间变量申请 Host 内存空间,并将这些空间分配给各个中间变量。 float *temp = NULL; size_t datasize = (sizeof (RotationInfo) + sizeof (BoundBox)) * convexcst->count + sizeof (int); // liuyao debug temp = new float[datasize]; if (temp == NULL) { return OUT_OF_MEM;; } // 为各个中间变量分配内存空间,采用这种一次申请一个大空间的做法是为了减少申 // 请内存的开销,同时也减少因内存对齐导致的内存浪费。 rotateinfoHost = (RotationInfo *)(temp); bboxHost = (BoundBox *)(rotateinfoHost + convexcst->count); indexHost = (int *)(bboxHost + convexcst->count); // 将输入坐标集拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVXCPU_FREE; return errcode; } // 调用计算凸壳点集中每相邻两点的旋转矩阵信息,进而计算新坐标系下 // 凸壳的有向外接矩形的边界信息的函数。 errcode = this->sdrComputeBoundInfoCpu(convexcst, rotateinfoHost, bboxHost); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVXCPU_FREE; return errcode; } // 调用计算最小有向外接矩形的函数。 errcode = this->sdrComputeSDRCpu(convexcst->count, bboxHost, indexHost); if (errcode != NO_ERROR) { FAIL_SDRPARAMONCVXCPU_FREE; return errcode; } // 输出赋值 index = indexHost[0]; bbox[0] = bboxHost[index]; rotinfo[0] = rotateinfoHost[index]; // 释放内存 delete temp; // 退出。 return NO_ERROR; } #undef FAIL_SDRPARAMONCVXCPU_FREE // 宏:FAIL_SDRONCVX_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_SDRONCVX_FREE do { \ if (!hostrect && recthost != NULL) \ delete [] recthost; \ } while (0) // Host 成员方法:smallestDirRectOnConvex(求凸壳点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpuOnConvex( CoordiSet *convexcst, Quadrangle *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (convexcst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (convexcst->count <= 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 定义 Host 端的输出数组指针,这里计算量比较小,所以统一采取在 host 端 // 根据最小有向包围矩形的顶点和旋转矩阵信息计算输出的包围矩形各个参数。 Quadrangle *recthost = NULL; // 判断输出矩形是否存储在 Device 端。若不是,则需要在 Host 端为输出矩形 // 申请一段空间;若该数组是在 Host 端,则直接使用。 if (hostrect) { // 如果在 Host 端,则将指针传给对应的 Host 端统一指针。 recthost = outrect; } else { // 为输入数组在 Host 端申请内存。 recthost = new Quadrangle[1]; // 出错则报错返回。 if (recthost == NULL) { return OUT_OF_MEM; } // 将输出数组拷贝到 Host 端内存。 cuerrcode = cudaMemcpy(recthost, outrect, sizeof (Quadrangle), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 如果输入凸壳点集中只含有 2 个坐标点,则特殊处理。 if (convexcst->count == 2) { // 四个顶点赋值为两个坐标点的坐标。四个顶点有两对重合。 recthost->points[0][0] = recthost->points[1][0] = convexcst->tplData[0]; recthost->points[0][1] = recthost->points[1][1] = convexcst->tplData[1]; recthost->points[2][0] = recthost->points[3][0] = convexcst->tplData[2]; recthost->points[2][1] = recthost->points[3][1] = convexcst->tplData[3]; // 计算两点坐标差值。 int deltax = convexcst->tplData[0] - convexcst->tplData[2]; int deltay = convexcst->tplData[1] - convexcst->tplData[3]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 float sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的正弦值。 float sinalpha = deltay / sidelength; // 该角度的弧度值。从而计算最小有向外接矩形的角度。 float radian = asin(sinalpha); recthost->angle = RECT_RAD_TO_DEG(radian); // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (Quadrangle), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 特殊情况,不用计算下列步骤,退出。 return NO_ERROR; } // 局部变量,用来记录面积最小的有向外接矩形和对应的旋转信息。 BoundBox bbox; RotationInfo rotinfo; // 将输入坐标集拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; // 调用求凸壳点集的最小有向外接矩形参数的函数。 errcode = this->sdrParamOnConvexCpu(convexcst, &bbox, &rotinfo); if (errcode != NO_ERROR) return errcode; // 计算最小有向外接矩形的角度。 recthost->angle = RECT_RAD_TO_DEG(rotinfo.radian); // 计算最小有向外接矩形的边界点值。 float points[4][2]; points[0][0] = bbox.left; points[0][1] = bbox.top; points[1][0] = bbox.right; points[1][1] = bbox.top; points[2][0] = bbox.right; points[2][1] = bbox.bottom; points[3][0] = bbox.left; points[3][1] = bbox.bottom; // 打印临时顶点信息。 #ifdef SDR_DEBUG_KERNEL_PRINT cout << "temprect info: " << endl; cout << points[0][0] << "," << points[0][1] << endl; cout << points[1][0] << "," << points[1][1] << endl; cout << points[2][0] << "," << points[2][1] << endl; cout << points[3][0] << "," << points[3][1] << endl; #endif // 临时存放的四个顶点值。 float tempvertex[4][2]; // 计算旋转后最小有向外接矩形的四个顶点值。 RECT_ROTATE_POINT(points[0], tempvertex[0], rotinfo); RECT_ROTATE_POINT(points[1], tempvertex[1], rotinfo); RECT_ROTATE_POINT(points[2], tempvertex[2], rotinfo); RECT_ROTATE_POINT(points[3], tempvertex[3], rotinfo); // 求中心坐标。 float boxcenter[2]; boxcenter[0] = (tempvertex[0][0] + tempvertex[1][0] + tempvertex[2][0] + tempvertex[3][0]) / 4.0f; boxcenter[1] = (tempvertex[0][1] + tempvertex[1][1] + tempvertex[2][1] + tempvertex[3][1]) / 4.0f; // 计算所得的包围盒的四个顶点逆时针排列,寻找右上点的索引值。 int rightupidx; // 如果是垂直于坐标轴的菱形,也就是对角的 x 坐标相等,需要特殊处理。 // 如果第 0 个和第 2 个点的 x 坐标相等。 if (tempvertex[0][0] == tempvertex[2][0]) { // 如果第 0 个的 y 坐标更大。 if (tempvertex[0][1] > boxcenter[1]) // 右上点的索引值为 0。 rightupidx = 0; // 如果第 2 个的 y 坐标更大。 else // 右上点的索引值为 2。 rightupidx = 2; // 如果第 1 个和第 3 个点的 x 坐标相等。 } else if (tempvertex[1][0] == tempvertex[3][0]) { // 如果第 1 个的 y 坐标更大。 if (tempvertex[1][1] > boxcenter[1]) // 右上点的索引值为 1。 rightupidx = 1; // 如果第 3 个的 y 坐标更大。 else // 右上点的索引值为 3。 rightupidx = 3; // 如果没有 x 或者 y 坐标相等的特殊情况。 } else { // 如果第 0 个点的 x,y 坐标均大于中心点坐标。 if (tempvertex[0][0] > boxcenter[0] && tempvertex[0][1] > boxcenter[1]) // 右上点的索引值为 0。 rightupidx = 0; // 如果第 1 个点的 x,y 坐标均大于中心点坐标。 else if (tempvertex[1][0] > boxcenter[0] && tempvertex[1][1] > boxcenter[1]) // 右上点的索引值为 1。 rightupidx = 1; // 如果第 2 个点的 x,y 坐标均大于中心点坐标。 else if (tempvertex[2][0] > boxcenter[0] && tempvertex[2][1] > boxcenter[1]) // 右上点的索引值为 2。 rightupidx = 2; // 如果第 3 个点的 x,y 坐标均大于中心点坐标。 else // 右上点的索引值为 3。 rightupidx = 3; } // 按照算得的右上点索引值,对四个顶点的 x,y 坐标进行分别的向下向上取整处理 // 右上点,x 向上取整,y 向上取整。 recthost->points[rightupidx][0] = (int)ceil(tempvertex[rightupidx][0]); recthost->points[rightupidx][1] = (int)ceil(tempvertex[rightupidx][1]); // 右下点,x 向上取整,y 向下取整。 recthost->points[(rightupidx + 1) % 4][0] = (int)ceil(tempvertex[(rightupidx + 1) % 4][0]); recthost->points[(rightupidx + 1) % 4][1] = (int)floor(tempvertex[(rightupidx + 1) % 4][1]); // 左下点,x 向下取整,y 向下取整。 recthost->points[(rightupidx + 2) % 4][0] = (int)floor(tempvertex[(rightupidx + 2) % 4][0]); recthost->points[(rightupidx + 2) % 4][1] = (int)floor(tempvertex[(rightupidx + 2) % 4][1]); // 左上点,x 向下取整,y 向上取整。 recthost->points[(rightupidx + 3) % 4][0] = (int)ceil(tempvertex[(rightupidx + 3) % 4][0]); recthost->points[(rightupidx + 3) % 4][1] = (int)floor(tempvertex[(rightupidx + 3) % 4][1]); // 计算矩形的长宽。 float length1 = bbox.right - bbox.left; float length2 = bbox.top - bbox.bottom; // 角度是跟 length1 边平行的,需求为角度的方向平行于长边。当 length1 不是 // 长边时,做出调整。 if (length1 < length2) { // 旋转角度为负时,加上 90 度。 if (recthost->angle < 0.0f) recthost->angle += 90.0f; // 旋转角度为正时,减去 90 度。 else recthost->angle -= 90.0f; } // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (Quadrangle), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRectOnConvex(求凸壳点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectOnConvex( CoordiSet *convexcst, Quadrangle *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (convexcst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (convexcst->count <= 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 定义 Host 端的输出数组指针,这里计算量比较小,所以统一采取在 host 端 // 根据最小有向包围矩形的顶点和旋转矩阵信息计算输出的包围矩形各个参数。 Quadrangle *recthost = NULL; // 判断输出矩形是否存储在 Device 端。若不是,则需要在 Host 端为输出矩形 // 申请一段空间;若该数组是在 Host 端,则直接使用。 if (hostrect) { // 如果在 Host 端,则将指针传给对应的 Host 端统一指针。 recthost = outrect; } else { // 为输入数组在 Host 端申请内存。 recthost = new Quadrangle[1]; // 出错则报错返回。 if (recthost == NULL) { return OUT_OF_MEM; } // 将输出数组拷贝到 Host 端内存。 cuerrcode = cudaMemcpy(recthost, outrect, sizeof (Quadrangle), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 如果输入凸壳点集中只含有 2 个坐标点,则特殊处理。 if (convexcst->count == 2) { // 四个顶点赋值为两个坐标点的坐标。四个顶点有两对重合。 recthost->points[0][0] = recthost->points[1][0] = convexcst->tplData[0]; recthost->points[0][1] = recthost->points[1][1] = convexcst->tplData[1]; recthost->points[2][0] = recthost->points[3][0] = convexcst->tplData[2]; recthost->points[2][1] = recthost->points[3][1] = convexcst->tplData[3]; // 计算两点坐标差值。 int deltax = convexcst->tplData[0] - convexcst->tplData[2]; int deltay = convexcst->tplData[1] - convexcst->tplData[3]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 float sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的正弦值。 float sinalpha = deltay / sidelength; // 该角度的弧度值。从而计算最小有向外接矩形的角度。 float radian = asin(sinalpha); recthost->angle = RECT_RAD_TO_DEG(radian); // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (Quadrangle), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 特殊情况,不用计算下列步骤,退出。 return NO_ERROR; } // 局部变量,用来记录面积最小的有向外接矩形和对应的旋转信息。 BoundBox bbox; RotationInfo rotinfo; // 将输入坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; // 调用求凸壳点集的最小有向外接矩形参数的函数。 errcode = this->sdrParamOnConvex(convexcst, &bbox, &rotinfo); if (errcode != NO_ERROR) return errcode; // 计算最小有向外接矩形的角度。 recthost->angle = RECT_RAD_TO_DEG(rotinfo.radian); // 计算最小有向外接矩形的边界点值。 float points[4][2]; points[0][0] = bbox.left; points[0][1] = bbox.top; points[1][0] = bbox.right; points[1][1] = bbox.top; points[2][0] = bbox.right; points[2][1] = bbox.bottom; points[3][0] = bbox.left; points[3][1] = bbox.bottom; // 打印临时顶点信息。 #ifdef SDR_DEBUG_KERNEL_PRINT cout << "temprect info: " << endl; cout << points[0][0] << "," << points[0][1] << endl; cout << points[1][0] << "," << points[1][1] << endl; cout << points[2][0] << "," << points[2][1] << endl; cout << points[3][0] << "," << points[3][1] << endl; #endif // 临时存放的四个顶点值。 float tempvertex[4][2]; // 计算旋转后最小有向外接矩形的四个顶点值。 RECT_ROTATE_POINT(points[0], tempvertex[0], rotinfo); RECT_ROTATE_POINT(points[1], tempvertex[1], rotinfo); RECT_ROTATE_POINT(points[2], tempvertex[2], rotinfo); RECT_ROTATE_POINT(points[3], tempvertex[3], rotinfo); // 求中心坐标。 float boxcenter[2]; boxcenter[0] = (tempvertex[0][0] + tempvertex[1][0] + tempvertex[2][0] + tempvertex[3][0]) / 4.0f; boxcenter[1] = (tempvertex[0][1] + tempvertex[1][1] + tempvertex[2][1] + tempvertex[3][1]) / 4.0f; // 计算所得的包围盒的四个顶点逆时针排列,寻找右上点的索引值。 int rightupidx; // 如果是垂直于坐标轴的菱形,也就是对角的 x 坐标相等,需要特殊处理。 // 如果第 0 个和第 2 个点的 x 坐标相等。 if (tempvertex[0][0] == tempvertex[2][0]) { // 如果第 0 个的 y 坐标更大。 if (tempvertex[0][1] > boxcenter[1]) // 右上点的索引值为 0。 rightupidx = 0; // 如果第 2 个的 y 坐标更大。 else // 右上点的索引值为 2。 rightupidx = 2; // 如果第 1 个和第 3 个点的 x 坐标相等。 } else if (tempvertex[1][0] == tempvertex[3][0]) { // 如果第 1 个的 y 坐标更大。 if (tempvertex[1][1] > boxcenter[1]) // 右上点的索引值为 1。 rightupidx = 1; // 如果第 3 个的 y 坐标更大。 else // 右上点的索引值为 3。 rightupidx = 3; // 如果没有 x 或者 y 坐标相等的特殊情况。 } else { // 如果第 0 个点的 x,y 坐标均大于中心点坐标。 if (tempvertex[0][0] > boxcenter[0] && tempvertex[0][1] > boxcenter[1]) // 右上点的索引值为 0。 rightupidx = 0; // 如果第 1 个点的 x,y 坐标均大于中心点坐标。 else if (tempvertex[1][0] > boxcenter[0] && tempvertex[1][1] > boxcenter[1]) // 右上点的索引值为 1。 rightupidx = 1; // 如果第 2 个点的 x,y 坐标均大于中心点坐标。 else if (tempvertex[2][0] > boxcenter[0] && tempvertex[2][1] > boxcenter[1]) // 右上点的索引值为 2。 rightupidx = 2; // 如果第 3 个点的 x,y 坐标均大于中心点坐标。 else // 右上点的索引值为 3。 rightupidx = 3; } // 按照算得的右上点索引值,对四个顶点的 x,y 坐标进行分别的向下向上取整处理 // 右上点,x 向上取整,y 向上取整。 recthost->points[rightupidx][0] = (int)ceil(tempvertex[rightupidx][0]); recthost->points[rightupidx][1] = (int)ceil(tempvertex[rightupidx][1]); // 右下点,x 向上取整,y 向下取整。 recthost->points[(rightupidx + 1) % 4][0] = (int)ceil(tempvertex[(rightupidx + 1) % 4][0]); recthost->points[(rightupidx + 1) % 4][1] = (int)floor(tempvertex[(rightupidx + 1) % 4][1]); // 左下点,x 向下取整,y 向下取整。 recthost->points[(rightupidx + 2) % 4][0] = (int)floor(tempvertex[(rightupidx + 2) % 4][0]); recthost->points[(rightupidx + 2) % 4][1] = (int)floor(tempvertex[(rightupidx + 2) % 4][1]); // 左上点,x 向下取整,y 向上取整。 recthost->points[(rightupidx + 3) % 4][0] = (int)ceil(tempvertex[(rightupidx + 3) % 4][0]); recthost->points[(rightupidx + 3) % 4][1] = (int)floor(tempvertex[(rightupidx + 3) % 4][1]); // 计算矩形的长宽。 float length1 = bbox.right - bbox.left; float length2 = bbox.top - bbox.bottom; // 角度是跟 length1 边平行的,需求为角度的方向平行于长边。当 length1 不是 // 长边时,做出调整。 if (length1 < length2) { // 旋转角度为负时,加上 90 度。 if (recthost->angle < 0.0f) recthost->angle += 90.0f; // 旋转角度为正时,减去 90 度。 else recthost->angle -= 90.0f; } // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (Quadrangle), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRectCpuOnConvex(求凸壳点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpuOnConvex( CoordiSet *convexcst, DirectedRect *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (convexcst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (convexcst->count <= 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 定义 Host 端的输出数组指针,这里计算量比较小,所以统一采取在 host 端 // 根据最小有向包围矩形的顶点和旋转矩阵信息计算输出的包围矩形各个参数。 DirectedRect *recthost = NULL; // 判断输出矩形是否存储在 Device 端。若不是,则需要在 Host 端为输出矩形 // 申请一段空间;若该数组是在 Host 端,则直接使用。 if (hostrect) { // 如果在 Host 端,则将指针传给对应的 Host 端统一指针。 recthost = outrect; } else { // 为输入数组在 Host 端申请内存。 recthost = new DirectedRect[1]; // 出错则报错返回。 if (recthost == NULL) { return OUT_OF_MEM; } // 将输出数组拷贝到 Host 端内存。 cuerrcode = cudaMemcpy(recthost, outrect, sizeof (DirectedRect), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 如果输入凸壳点集中只含有 2 个坐标点,则特殊处理。 if (convexcst->count == 2) { // 中心点坐标为两个点的中点。 recthost->centerPoint[0] = convexcst->tplData[0] + convexcst->tplData[2]; recthost->centerPoint[1] = convexcst->tplData[1] + convexcst->tplData[3]; // 计算两点坐标差值。 int deltax = convexcst->tplData[0] - convexcst->tplData[2]; int deltay = convexcst->tplData[1] - convexcst->tplData[3]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 float sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的正弦值。 float sinalpha = deltay / sidelength; // 该角度的弧度值。从而计算最小有向外接矩形的角度。 float radian = asin(sinalpha); recthost->angle = RECT_RAD_TO_DEG(radian); // 该包围矩形的边长。 recthost->length1 = (int)sidelength; recthost->length2 = 0; // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (DirectedRect), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 特殊情况,不用计算下列步骤,退出。 return NO_ERROR; } // 局部变量,用来记录面积最小的有向外接矩形和对应的旋转信息。 BoundBox bbox; RotationInfo rotinfo; // 将输入坐标集拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(convexcst); if (errcode != NO_ERROR) return errcode; // 调用求凸壳点集的最小有向外接矩形参数的函数。 errcode = this->sdrParamOnConvexCpu(convexcst, &bbox, &rotinfo); if (errcode != NO_ERROR) return errcode; // 计算最小有向外接矩形的角度。 recthost->angle = RECT_RAD_TO_DEG(rotinfo.radian); // 计算中心坐标。 float boxcenter[2]; boxcenter[0] = (bbox.left + bbox.right) / 2.0f; boxcenter[1] = (bbox.top + bbox.bottom) / 2.0f; RECT_ROTATE_POINT(boxcenter, recthost->centerPoint, rotinfo); // 计算矩形的长宽。 recthost->length1 = (int)(bbox.right - bbox.left); recthost->length2 = (int)(bbox.top - bbox.bottom); // 选择长的作为矩形的长。 if (recthost->length1 < recthost->length2) { // 长短边进行交换。 int length_temp; length_temp = recthost->length1; recthost->length1 = recthost->length2; recthost->length2 = length_temp; // 角度是跟 length1 边平行的,需求为角度的方向平行于长边。当 length1 // 不是长边时,做出调整。 // 旋转角度为负时,加上 90 度。 if (recthost->angle < 0.0f) recthost->angle += 90.0f; // 旋转角度为正时,减去 90 度。 else recthost->angle -= 90.0f; } // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (DirectedRect), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRectOnConvex(求凸壳点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectOnConvex( CoordiSet *convexcst, DirectedRect *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (convexcst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (convexcst->count <= 1 || convexcst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 cudaError_t cuerrcode; int errcode; // 定义 Host 端的输出数组指针,这里计算量比较小,所以统一采取在 host 端 // 根据最小有向包围矩形的顶点和旋转矩阵信息计算输出的包围矩形各个参数。 DirectedRect *recthost = NULL; // 判断输出矩形是否存储在 Device 端。若不是,则需要在 Host 端为输出矩形 // 申请一段空间;若该数组是在 Host 端,则直接使用。 if (hostrect) { // 如果在 Host 端,则将指针传给对应的 Host 端统一指针。 recthost = outrect; } else { // 为输入数组在 Host 端申请内存。 recthost = new DirectedRect[1]; // 出错则报错返回。 if (recthost == NULL) { return OUT_OF_MEM; } // 将输出数组拷贝到 Host 端内存。 cuerrcode = cudaMemcpy(recthost, outrect, sizeof (DirectedRect), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 如果输入凸壳点集中只含有 2 个坐标点,则特殊处理。 if (convexcst->count == 2) { // 中心点坐标为两个点的中点。 recthost->centerPoint[0] = convexcst->tplData[0] + convexcst->tplData[2]; recthost->centerPoint[1] = convexcst->tplData[1] + convexcst->tplData[3]; // 计算两点坐标差值。 int deltax = convexcst->tplData[0] - convexcst->tplData[2]; int deltay = convexcst->tplData[1] - convexcst->tplData[3]; // 如果解的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (deltax < 0) { deltax = -deltax; deltay = -deltay; } // 计算当前点和下一点间距离。 float sidelength = sqrtf(deltax * deltax + deltay * deltay); // 计算旋转角度的正弦值。 float sinalpha = deltay / sidelength; // 该角度的弧度值。从而计算最小有向外接矩形的角度。 float radian = asin(sinalpha); recthost->angle = RECT_RAD_TO_DEG(radian); // 该包围矩形的边长。 recthost->length1 = (int)sidelength; recthost->length2 = 0; // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (DirectedRect), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 特殊情况,不用计算下列步骤,退出。 return NO_ERROR; } // 局部变量,用来记录面积最小的有向外接矩形和对应的旋转信息。 BoundBox bbox; RotationInfo rotinfo; // 将输入坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(convexcst); if (errcode != NO_ERROR) return errcode; // 调用求凸壳点集的最小有向外接矩形参数的函数。 errcode = this->sdrParamOnConvex(convexcst, &bbox, &rotinfo); if (errcode != NO_ERROR) return errcode; // 计算最小有向外接矩形的角度。 recthost->angle = RECT_RAD_TO_DEG(rotinfo.radian); // 计算中心坐标。 float boxcenter[2]; boxcenter[0] = (bbox.left + bbox.right) / 2.0f; boxcenter[1] = (bbox.top + bbox.bottom) / 2.0f; RECT_ROTATE_POINT(boxcenter, recthost->centerPoint, rotinfo); // 计算矩形的长宽。 recthost->length1 = (int)(bbox.right - bbox.left); recthost->length2 = (int)(bbox.top - bbox.bottom); // 选择长的作为矩形的长。 if (recthost->length1 < recthost->length2) { // 长短边进行交换。 int length_temp; length_temp = recthost->length1; recthost->length1 = recthost->length2; recthost->length2 = length_temp; // 角度是跟 length1 边平行的,需求为角度的方向平行于长边。当 length1 // 不是长边时,做出调整。 // 旋转角度为负时,加上 90 度。 if (recthost->angle < 0.0f) recthost->angle += 90.0f; // 旋转角度为正时,减去 90 度。 else recthost->angle -= 90.0f; } // 如果输出矩形在 Device 端,将结果拷贝到输出。 if (!hostrect) { // 将结果从 Host 端内存拷贝到输出。 cuerrcode = cudaMemcpy(outrect, recthost, sizeof (DirectedRect), cudaMemcpyHostToDevice); // 出错则释放之前申请的内存。 if (cuerrcode != cudaSuccess) { // 释放之前申请的内存。 FAIL_SDRONCVX_FREE; return cuerrcode; } } // 释放之前申请的 Host 端内存。需要判断输出参数是否在 host 端。 FAIL_SDRONCVX_FREE; // 退出。 return NO_ERROR; } #undef FAIL_SDRONCVX_FREE // 宏:FAIL_SDRONCST_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_SDRONCST_FREE do { \ if (convexcst != NULL) \ CoordiSetBasicOp::deleteCoordiSet(convexcst); \ } while (0) // Host 成员方法:smallestDirRectCpu(求给定点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpu( CoordiSet *cst, Quadrangle *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (cst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (cst->count <= 1 || cst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 凸壳点集。 CoordiSet *convexcst; // 创建凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 宏:SDR_USE_CPU_CONVEXHULL // 该开关宏用于指示是否在后续步骤中使用 CPU 版本的 ConvexHull 函数。 #define SDR_USE_CPU_CONVEXHULL // 初始化 LABEL 数组。 #ifdef SDR_USE_CPU_CONVEXHULL // 给该凸壳点集开辟合适的内存空间。 //errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHullCpu(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #else // 给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。GPU 版本 errcode = this->cvHull.convexHull(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #endif #undef SDR_USE_CPU_CONVEXHULL // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectCpuOnConvex(convexcst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 清除凸壳点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(convexcst); // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRect(求给定点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRect( CoordiSet *cst, Quadrangle *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (cst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (cst->count <= 1 || cst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 凸壳点集。 CoordiSet *convexcst; // 创建凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 宏:SDR_USE_CPU_CONVEXHULL // 该开关宏用于指示是否在后续步骤中使用 CPU 版本的 ConvexHull 函数。 //#define SDR_USE_CPU_CONVEXHULL // 初始化 LABEL 数组。 #ifdef SDR_USE_CPU_CONVEXHULL // 给该凸壳点集开辟合适的内存空间。 //errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHullCpu(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #else // 给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。GPU 版本 errcode = this->cvHull.convexHull(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #endif #undef SDR_USE_CPU_CONVEXHULL // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectOnConvex(convexcst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 清除凸壳点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(convexcst); // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRectCpu(求给定点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpu( CoordiSet *cst, DirectedRect *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (cst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (cst->count <= 1 || cst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 凸壳点集。 CoordiSet *convexcst; // 创建凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 宏:SDR_USE_CPU_CONVEXHULL // 该开关宏用于指示是否在后续步骤中使用 CPU 版本的 ConvexHull 函数。 //#define SDR_USE_CPU_CONVEXHULL // 初始化 LABEL 数组。 #ifdef SDR_USE_CPU_CONVEXHULL // 给该凸壳点集开辟合适的内存空间。 //errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHullCpu(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #else // 给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); //errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHull(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #endif #undef SDR_USE_CPU_CONVEXHULL // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectCpuOnConvex(convexcst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 清除凸壳点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(convexcst); // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRect(求给定点集的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRect( CoordiSet *cst, DirectedRect *outrect, bool hostrect) { // 检查输入,输出是否为空。 if (cst == NULL || outrect == NULL) return NULL_POINTER; // 如果输入点集中不包含任何点或者只含 1 个坐标点,则报错退出。 if (cst->count <= 1 || cst->tplData == NULL) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 凸壳点集。 CoordiSet *convexcst; // 创建凸壳点集。 errcode = CoordiSetBasicOp::newCoordiSet(&convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 宏:SDR_USE_CPU_CONVEXHULL // 该开关宏用于指示是否在后续步骤中使用 CPU 版本的 ConvexHull 函数。 //#define SDR_USE_CPU_CONVEXHULL // 初始化 LABEL 数组。 #ifdef SDR_USE_CPU_CONVEXHULL // 给该凸壳点集开辟合适的内存空间。 //errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHullCpu(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #else // 给该凸壳点集开辟合适的内存空间。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(convexcst, cst->count); //errcode = CoordiSetBasicOp::makeAtHost(convexcst, cst->count); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 调用求凸壳的函数。 errcode = this->cvHull.convexHull(cst, convexcst); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } #endif #undef SDR_USE_CPU_CONVEXHULL // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectOnConvex(convexcst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONCST_FREE; return errcode; } // 清除凸壳点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(convexcst); // 退出。 return NO_ERROR; } #undef FAIL_SDRONCST_FREE // 宏:FAIL_SDRONIMG_FREE // 该宏用于完成下面函数运行出现错误退出前的内存清理工作。 #define FAIL_SDRONIMG_FREE do { \ if (cst != NULL) \ CoordiSetBasicOp::deleteCoordiSet(cst); \ } while (0) // Host 成员方法:smallestDirRectCpu(求像素值给定的对象的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpu( Image *inimg, Quadrangle *outrect, bool hostrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectCpu(cst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } __host__ int SmallestDirRect::smallestDirRect( Image *inimg, Quadrangle *outrect, bool hostrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRect(cst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRectCpu(求像素值给定的对象的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRectCpu( Image *inimg, DirectedRect *outrect, bool hostrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRectCpu(cst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } // Host 成员方法:smallestDirRect(求像素值给定的对象的最小有向外接矩形) __host__ int SmallestDirRect::smallestDirRect( Image *inimg, DirectedRect *outrect, bool hostrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; // 新建点集。 CoordiSet *cst; // 构造点集。 errcode = CoordiSetBasicOp::newCoordiSet(&cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用图像转点集的函数。 errcode = this->imgCvt.imgConvertToCst(inimg, cst); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 调用求给定凸壳点集的最小有向外接矩形的函数。 errcode = smallestDirRect(cst, outrect, hostrect); if (errcode != NO_ERROR) { FAIL_SDRONIMG_FREE; return errcode; } // 清除点集所占用的内容空间。 CoordiSetBasicOp::deleteCoordiSet(cst); // 退出。 return NO_ERROR; } #undef FAIL_SDRONIMG_FREE
the_stack
#include <stdio.h> #include <map> #include <algorithm> #include <unistd.h> // usleep #include <ctype.h> // tolower #include "cuda_helper.h" #include "salsa_kernel.h" #include "nv_kernel2.h" #include "titan_kernel.h" #include "nv_kernel.h" #include "kepler_kernel.h" #include "fermi_kernel.h" #include "test_kernel.h" #include "miner.h" #if defined(_WIN64) || defined(__x86_64__) || defined(__64BIT__) #define MAXMEM 0x300000000ULL // 12 GB (the largest Kepler) #else #define MAXMEM 0xFFFFFFFFULL // nearly 4 GB (32 bit limitations) #endif // require CUDA 5.5 driver API #define DMAJ 5 #define DMIN 5 // define some error checking macros #define DELIMITER '/' #define __FILENAME__ ( strrchr(__FILE__, DELIMITER) != NULL ? strrchr(__FILE__, DELIMITER)+1 : __FILE__ ) #undef checkCudaErrors #define checkCudaErrors(x) \ { \ cudaGetLastError(); \ x; \ cudaError_t err = cudaGetLastError(); \ if (err != cudaSuccess && !abort_flag) \ applog(LOG_ERR, "GPU #%d: Err %d: %s (%s:%d)", device_map[thr_id], err, cudaGetErrorString(err), __FILENAME__, __LINE__); \ } // some globals containing pointers to device memory (for chunked allocation) // [MAX_GPUS] indexes up to MAX_GPUS threads (0...MAX_GPUS-1) int MAXWARPS[MAX_GPUS]; uint32_t* h_V[MAX_GPUS][TOTAL_WARP_LIMIT*64]; // NOTE: the *64 prevents buffer overflow for --keccak uint32_t h_V_extra[MAX_GPUS][TOTAL_WARP_LIMIT*64]; // with really large kernel launch configurations KernelInterface *Best_Kernel_Heuristics(cudaDeviceProp *props) { KernelInterface *kernel = NULL; uint64_t N = 1UL << (opt_nfactor+1); if (IS_SCRYPT() || (IS_SCRYPT_JANE() && N <= 8192)) { // high register count kernels (scrypt, low N-factor scrypt-jane) if (props->major > 3 || (props->major == 3 && props->minor >= 5)) kernel = new NV2Kernel(); // we don't want this for Keccak though else if (props->major == 3 && props->minor == 0) kernel = new NVKernel(); else kernel = new FermiKernel(); } else { // high N-factor scrypt-jane = low registers count kernels if (props->major > 3 || (props->major == 3 && props->minor >= 5)) kernel = new TitanKernel(); else if (props->major == 3 && props->minor == 0) kernel = new KeplerKernel(); else kernel = new TestKernel(); } return kernel; } bool validate_config(char *config, int &b, int &w, KernelInterface **kernel = NULL, cudaDeviceProp *props = NULL) { bool success = false; char kernelid = ' '; if (config != NULL) { if (config[0] == 'T' || config[0] == 'K' || config[0] == 'F' || config[0] == 'L' || config[0] == 't' || config[0] == 'k' || config[0] == 'f' || config[0] == 'Z' || config[0] == 'Y' || config[0] == 'X') { kernelid = config[0]; config++; } if (config[0] >= '0' && config[0] <= '9') if (sscanf(config, "%dx%d", &b, &w) == 2) success = true; if (success && kernel != NULL) { switch (kernelid) { case 'T': case 'Z': *kernel = new NV2Kernel(); break; case 't': *kernel = new TitanKernel(); break; case 'K': case 'Y': *kernel = new NVKernel(); break; case 'k': *kernel = new KeplerKernel(); break; case 'F': case 'L': *kernel = new FermiKernel(); break; case 'f': case 'X': *kernel = new TestKernel(); break; case ' ': // choose based on device architecture *kernel = Best_Kernel_Heuristics(props); break; } } } return success; } std::map<int, int> context_blocks; std::map<int, int> context_wpb; std::map<int, bool> context_concurrent; std::map<int, KernelInterface *> context_kernel; std::map<int, uint32_t *> context_idata[2]; std::map<int, uint32_t *> context_odata[2]; std::map<int, cudaStream_t> context_streams[2]; std::map<int, uint32_t *> context_X[2]; std::map<int, uint32_t *> context_H[2]; std::map<int, cudaEvent_t> context_serialize[2]; // for SHA256 hashing on GPU std::map<int, uint32_t *> context_tstate[2]; std::map<int, uint32_t *> context_ostate[2]; std::map<int, uint32_t *> context_hash[2]; int find_optimal_blockcount(int thr_id, KernelInterface* &kernel, bool &concurrent, int &wpb); int cuda_throughput(int thr_id) { int GRID_BLOCKS, WARPS_PER_BLOCK; if (context_blocks.find(thr_id) == context_blocks.end()) { #if 0 CUcontext ctx; cuCtxCreate( &ctx, CU_CTX_SCHED_YIELD, device_map[thr_id] ); cuCtxSetCurrent(ctx); #else checkCudaErrors(cudaSetDevice(device_map[thr_id])); checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleYield)); #endif KernelInterface *kernel; bool concurrent; GRID_BLOCKS = find_optimal_blockcount(thr_id, kernel, concurrent, WARPS_PER_BLOCK); if(GRID_BLOCKS == 0) return 0; unsigned int THREADS_PER_WU = kernel->threads_per_wu(); unsigned int mem_size = WU_PER_LAUNCH * sizeof(uint32_t) * 32; unsigned int state_size = WU_PER_LAUNCH * sizeof(uint32_t) * 8; // allocate device memory for scrypt_core inputs and outputs uint32_t *tmp; checkCudaErrors(cudaMalloc((void **) &tmp, mem_size)); context_idata[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, mem_size)); context_idata[1][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, mem_size)); context_odata[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, mem_size)); context_odata[1][thr_id] = tmp; // allocate pinned host memory for scrypt hashes checkCudaErrors(cudaHostAlloc((void **) &tmp, state_size, cudaHostAllocDefault)); context_H[0][thr_id] = tmp; checkCudaErrors(cudaHostAlloc((void **) &tmp, state_size, cudaHostAllocDefault)); context_H[1][thr_id] = tmp; if (IS_SCRYPT()) { if (parallel < 2) { // allocate pinned host memory for scrypt_core input/output checkCudaErrors(cudaHostAlloc((void **) &tmp, mem_size, cudaHostAllocDefault)); context_X[0][thr_id] = tmp; checkCudaErrors(cudaHostAlloc((void **) &tmp, mem_size, cudaHostAllocDefault)); context_X[1][thr_id] = tmp; } else { // allocate tstate, ostate, scrypt hash device memory checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_tstate[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_tstate[1][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_ostate[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_ostate[1][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_hash[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_hash[1][thr_id] = tmp; } } else /* if (IS_SCRYPT_JANE()) */ { // allocate pinned host memory for scrypt_core input/output checkCudaErrors(cudaHostAlloc((void **) &tmp, mem_size, cudaHostAllocDefault)); context_X[0][thr_id] = tmp; checkCudaErrors(cudaHostAlloc((void **) &tmp, mem_size, cudaHostAllocDefault)); context_X[1][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_hash[0][thr_id] = tmp; checkCudaErrors(cudaMalloc((void **) &tmp, state_size)); context_hash[1][thr_id] = tmp; } // create two CUDA streams cudaStream_t tmp2; checkCudaErrors( cudaStreamCreate(&tmp2) ); context_streams[0][thr_id] = tmp2; checkCudaErrors( cudaStreamCreate(&tmp2) ); context_streams[1][thr_id] = tmp2; // events used to serialize the kernel launches (we don't want any overlapping of kernels) cudaEvent_t tmp4; checkCudaErrors(cudaEventCreateWithFlags(&tmp4, cudaEventDisableTiming)); context_serialize[0][thr_id] = tmp4; checkCudaErrors(cudaEventCreateWithFlags(&tmp4, cudaEventDisableTiming)); context_serialize[1][thr_id] = tmp4; checkCudaErrors(cudaEventRecord(context_serialize[1][thr_id])); context_kernel[thr_id] = kernel; context_concurrent[thr_id] = concurrent; context_blocks[thr_id] = GRID_BLOCKS; context_wpb[thr_id] = WARPS_PER_BLOCK; } GRID_BLOCKS = context_blocks[thr_id]; WARPS_PER_BLOCK = context_wpb[thr_id]; unsigned int THREADS_PER_WU = context_kernel[thr_id]->threads_per_wu(); return WU_PER_LAUNCH; } // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class { 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class { 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class { 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class - GK104 = 1536 cores / 8 SMs { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x50, 128 }, // Maxwell First Generation (SM 5.0) GTX750/750Ti { 0x52, 128 }, // Maxwell Second Generation (SM 5.2) GTX980 = 2048 cores / 16 SMs - GTX970 1664 cores / 13 SMs { 0x61, 128 }, // Pascal GeForce (SM 6.1) { -1, -1 }, }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly applog(LOG_WARNING, "MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM", major, minor, 128); return 128; } #ifdef WIN32 #include <windows.h> static int console_width() { CONSOLE_SCREEN_BUFFER_INFO csbi; GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi); return csbi.srWindow.Right - csbi.srWindow.Left + 1; } #else static inline int console_width() { return 999; } #endif int find_optimal_blockcount(int thr_id, KernelInterface* &kernel, bool &concurrent, int &WARPS_PER_BLOCK) { int cw = console_width(); int optimal_blocks = 0; cudaDeviceProp props; checkCudaErrors(cudaGetDeviceProperties(&props, device_map[thr_id])); concurrent = (props.concurrentKernels > 0); WARPS_PER_BLOCK = -1; // if not specified, use interactive mode for devices that have the watchdog timer enabled if (device_interactive[thr_id] == -1) device_interactive[thr_id] = props.kernelExecTimeoutEnabled; // turn off texture cache if not otherwise specified if (device_texturecache[thr_id] == -1) device_texturecache[thr_id] = 0; // if not otherwise specified or required, turn single memory allocations off as they reduce // the amount of memory that we can allocate on Windows Vista, 7 and 8 (WDDM driver model issue) if (device_singlememory[thr_id] == -1) device_singlememory[thr_id] = 0; // figure out which kernel implementation to use if (!validate_config(device_config[thr_id], optimal_blocks, WARPS_PER_BLOCK, &kernel, &props)) { kernel = NULL; if (device_config[thr_id] != NULL) { if (device_config[thr_id][0] == 'T' || device_config[thr_id][0] == 'Z') kernel = new NV2Kernel(); else if (device_config[thr_id][0] == 't') kernel = new TitanKernel(); else if (device_config[thr_id][0] == 'K' || device_config[thr_id][0] == 'Y') kernel = new NVKernel(); else if (device_config[thr_id][0] == 'k') kernel = new KeplerKernel(); else if (device_config[thr_id][0] == 'F' || device_config[thr_id][0] == 'L') kernel = new FermiKernel(); else if (device_config[thr_id][0] == 'f' || device_config[thr_id][0] == 'X') kernel = new TestKernel(); } if (kernel == NULL) kernel = Best_Kernel_Heuristics(&props); } if (kernel->get_major_version() > props.major || kernel->get_major_version() == props.major && kernel->get_minor_version() > props.minor) { applog(LOG_ERR, "GPU #%d: FATAL: the '%c' kernel requires %d.%d capability!", device_map[thr_id], kernel->get_identifier(), kernel->get_major_version(), kernel->get_minor_version()); return 0; } // set whatever cache configuration and shared memory bank mode the kernel prefers checkCudaErrors(cudaDeviceSetCacheConfig(kernel->cache_config())); checkCudaErrors(cudaDeviceSetSharedMemConfig(kernel->shared_mem_config())); // some kernels (e.g. Titan) do not support the texture cache if (kernel->no_textures() && device_texturecache[thr_id]) { applog(LOG_WARNING, "GPU #%d: the '%c' kernel ignores the texture cache argument", device_map[thr_id], kernel->get_identifier()); device_texturecache[thr_id] = 0; } // Texture caching only works with single memory allocation if (device_texturecache[thr_id]) device_singlememory[thr_id] = 1; if (kernel->single_memory() && !device_singlememory[thr_id]) { applog(LOG_WARNING, "GPU #%d: the '%c' kernel requires single memory allocation", device_map[thr_id], kernel->get_identifier()); device_singlememory[thr_id] = 1; } if (device_lookup_gap[thr_id] == 0) device_lookup_gap[thr_id] = 1; if (!kernel->support_lookup_gap() && device_lookup_gap[thr_id] > 1) { applog(LOG_WARNING, "GPU #%d: the '%c' kernel does not support a lookup gap", device_map[thr_id], kernel->get_identifier()); device_lookup_gap[thr_id] = 1; } if (opt_debug) { applog(LOG_INFO, "GPU #%d: interactive: %d, tex-cache: %d%s, single-alloc: %d", device_map[thr_id], (device_interactive[thr_id] != 0) ? 1 : 0, (device_texturecache[thr_id] != 0) ? device_texturecache[thr_id] : 0, (device_texturecache[thr_id] != 0) ? "D" : "", (device_singlememory[thr_id] != 0) ? 1 : 0 ); } // number of threads collaborating on one work unit (hash) unsigned int THREADS_PER_WU = kernel->threads_per_wu(); unsigned int LOOKUP_GAP = device_lookup_gap[thr_id]; unsigned int BACKOFF = device_backoff[thr_id]; unsigned int N = (1 << (opt_nfactor+1)); double szPerWarp = (double)(SCRATCH * WU_PER_WARP * sizeof(uint32_t)); //applog(LOG_INFO, "WU_PER_WARP=%u, THREADS_PER_WU=%u, LOOKUP_GAP=%u, BACKOFF=%u, SCRATCH=%u", WU_PER_WARP, THREADS_PER_WU, LOOKUP_GAP, BACKOFF, SCRATCH); applog(LOG_INFO, "GPU #%d: %d hashes / %.1f MB per warp.", device_map[thr_id], WU_PER_WARP, szPerWarp / (1024.0 * 1024.0)); // compute highest MAXWARPS numbers for kernels allowing cudaBindTexture to succeed int MW_1D_4 = 134217728 / (SCRATCH * WU_PER_WARP / 4); // for uint4_t textures int MW_1D_2 = 134217728 / (SCRATCH * WU_PER_WARP / 2); // for uint2_t textures int MW_1D = kernel->get_texel_width() == 2 ? MW_1D_2 : MW_1D_4; uint32_t *d_V = NULL; if (device_singlememory[thr_id]) { // if no launch config was specified, we simply // allocate the single largest memory chunk on the device that we can get if (validate_config(device_config[thr_id], optimal_blocks, WARPS_PER_BLOCK)) { MAXWARPS[thr_id] = optimal_blocks * WARPS_PER_BLOCK; } else { // compute no. of warps to allocate the largest number producing a single memory block // PROBLEM: one some devices, ALL allocations will fail if the first one failed. This sucks. size_t MEM_LIMIT = (size_t)min((unsigned long long)MAXMEM, (unsigned long long)props.totalGlobalMem); int warpmax = (int)min((unsigned long long)TOTAL_WARP_LIMIT, (unsigned long long)(MEM_LIMIT / szPerWarp)); // run a bisection algorithm for memory allocation (way more reliable than the previous approach) int best = 0; int warp = (warpmax+1)/2; int interval = (warpmax+1)/2; while (interval > 0) { cudaGetLastError(); // clear the error state cudaMalloc((void **)&d_V, (size_t)(szPerWarp * warp)); if (cudaGetLastError() == cudaSuccess) { checkCudaErrors(cudaFree(d_V)); d_V = NULL; if (warp > best) best = warp; if (warp == warpmax) break; interval = (interval+1)/2; warp += interval; if (warp > warpmax) warp = warpmax; } else { interval = interval/2; warp -= interval; if (warp < 1) warp = 1; } } // back off a bit from the largest possible allocation size MAXWARPS[thr_id] = ((100-BACKOFF)*best+50)/100; } // now allocate a buffer for determined MAXWARPS setting cudaGetLastError(); // clear the error state cudaMalloc((void **)&d_V, (size_t)SCRATCH * WU_PER_WARP * MAXWARPS[thr_id] * sizeof(uint32_t)); if (cudaGetLastError() == cudaSuccess) { for (int i=0; i < MAXWARPS[thr_id]; ++i) h_V[thr_id][i] = d_V + SCRATCH * WU_PER_WARP * i; if (device_texturecache[thr_id] == 1) { if (validate_config(device_config[thr_id], optimal_blocks, WARPS_PER_BLOCK)) { if ( optimal_blocks * WARPS_PER_BLOCK > MW_1D ) { applog(LOG_ERR, "GPU #%d: '%s' exceeds limits for 1D cache. Using 2D cache instead.", device_map[thr_id], device_config[thr_id]); device_texturecache[thr_id] = 2; } } // bind linear memory to a 1D texture reference if (kernel->get_texel_width() == 2) kernel->bindtexture_1D(d_V, SCRATCH * WU_PER_WARP * min(MAXWARPS[thr_id],MW_1D_2) * sizeof(uint32_t)); else kernel->bindtexture_1D(d_V, SCRATCH * WU_PER_WARP * min(MAXWARPS[thr_id],MW_1D_4) * sizeof(uint32_t)); } else if (device_texturecache[thr_id] == 2) { // bind pitch linear memory to a 2D texture reference if (kernel->get_texel_width() == 2) kernel->bindtexture_2D(d_V, SCRATCH/2, WU_PER_WARP * MAXWARPS[thr_id], SCRATCH*sizeof(uint32_t)); else kernel->bindtexture_2D(d_V, SCRATCH/4, WU_PER_WARP * MAXWARPS[thr_id], SCRATCH*sizeof(uint32_t)); } } else { applog(LOG_ERR, "GPU #%d: FATAL: Launch config '%s' requires too much memory!", device_map[thr_id], device_config[thr_id]); return 0; } } else { if (validate_config(device_config[thr_id], optimal_blocks, WARPS_PER_BLOCK)) MAXWARPS[thr_id] = optimal_blocks * WARPS_PER_BLOCK; else MAXWARPS[thr_id] = TOTAL_WARP_LIMIT; // chunked memory allocation up to device limits int warp; for (warp = 0; warp < MAXWARPS[thr_id]; ++warp) { // work around partition camping problems by adding a random start address offset to each allocation h_V_extra[thr_id][warp] = (props.major == 1) ? (16 * (rand()%(16384/16))) : 0; cudaGetLastError(); // clear the error state cudaMalloc((void **) &h_V[thr_id][warp], (SCRATCH * WU_PER_WARP + h_V_extra[thr_id][warp])*sizeof(uint32_t)); if (cudaGetLastError() == cudaSuccess) h_V[thr_id][warp] += h_V_extra[thr_id][warp]; else { h_V_extra[thr_id][warp] = 0; // back off by several warp allocations to have some breathing room int remove = (BACKOFF*warp+50)/100; for (int i=0; warp > 0 && i < remove; ++i) { warp--; checkCudaErrors(cudaFree(h_V[thr_id][warp]-h_V_extra[thr_id][warp])); h_V[thr_id][warp] = NULL; h_V_extra[thr_id][warp] = 0; } break; } } MAXWARPS[thr_id] = warp; } kernel->set_scratchbuf_constants(MAXWARPS[thr_id], h_V[thr_id]); if (validate_config(device_config[thr_id], optimal_blocks, WARPS_PER_BLOCK)) { if (optimal_blocks * WARPS_PER_BLOCK > MAXWARPS[thr_id]) { applog(LOG_ERR, "GPU #%d: FATAL: Given launch config '%s' requires too much memory.", device_map[thr_id], device_config[thr_id]); return 0; } if (WARPS_PER_BLOCK > kernel->max_warps_per_block()) { applog(LOG_ERR, "GPU #%d: FATAL: Given launch config '%s' exceeds warp limit for '%c' kernel.", device_map[thr_id], device_config[thr_id], kernel->get_identifier()); return 0; } } else { if (device_config[thr_id] != NULL && strcasecmp("auto", device_config[thr_id])) applog(LOG_WARNING, "GPU #%d: Given launch config '%s' does not validate.", device_map[thr_id], device_config[thr_id]); if (opt_autotune) { applog(LOG_INFO, "GPU #%d: Performing auto-tuning, please wait 2 minutes...", device_map[thr_id]); // allocate device memory uint32_t *d_idata = NULL, *d_odata = NULL; unsigned int mem_size = MAXWARPS[thr_id] * WU_PER_WARP * sizeof(uint32_t) * 32; checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); // pre-initialize some device memory uint32_t *h_idata = (uint32_t*)malloc(mem_size); for (unsigned int i=0; i < mem_size/sizeof(uint32_t); ++i) h_idata[i] = i*2654435761UL; // knuth's method checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); free(h_idata); double best_hash_sec = 0.0; int best_wpb = 0; // auto-tuning loop { // we want to have enough total warps for half the multiprocessors at least // compute highest MAXWARPS number that we can support based on texture cache mode int MINTW = props.multiProcessorCount / 2; int MAXTW = (device_texturecache[thr_id] == 1) ? min(MAXWARPS[thr_id],MW_1D) : MAXWARPS[thr_id]; // we want to have blocks for half the multiprocessors at least int MINB = props.multiProcessorCount / 2; int MAXB = MAXTW; double tmin = 0.05; applog(LOG_INFO, "GPU #%d: maximum total warps (BxW): %d", (int) device_map[thr_id], MAXTW); for (int GRID_BLOCKS = MINB; !abort_flag && GRID_BLOCKS <= MAXB; ++GRID_BLOCKS) { double Hash[32+1] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; for (WARPS_PER_BLOCK = 1; !abort_flag && WARPS_PER_BLOCK <= kernel->max_warps_per_block(); ++WARPS_PER_BLOCK) { double hash_sec = 0; if (GRID_BLOCKS * WARPS_PER_BLOCK >= MINTW && GRID_BLOCKS * WARPS_PER_BLOCK <= MAXTW) { // setup execution parameters dim3 grid(WU_PER_LAUNCH/WU_PER_BLOCK, 1, 1); dim3 threads(THREADS_PER_WU*WU_PER_BLOCK, 1, 1); struct timeval tv_start, tv_end; double tdelta = 0; checkCudaErrors(cudaDeviceSynchronize()); gettimeofday(&tv_start, NULL); int repeat = 0; do // average several measurements for better exactness { kernel->run_kernel( grid, threads, WARPS_PER_BLOCK, thr_id, NULL, d_idata, d_odata, N, LOOKUP_GAP, device_interactive[thr_id], true, device_texturecache[thr_id] ); if(cudaDeviceSynchronize() != cudaSuccess) break; ++repeat; gettimeofday(&tv_end, NULL); // for a better result averaging, measure for at least 50ms (10ms for Keccak) } while ((tdelta=(1e-6 * (tv_end.tv_usec-tv_start.tv_usec) + (tv_end.tv_sec-tv_start.tv_sec))) < tmin); if (cudaGetLastError() != cudaSuccess) continue; tdelta /= repeat; // BUGFIX: this averaging over multiple measurements was missing // for scrypt: in interactive mode only find launch configs where kernel launch times are short enough // TODO: instead we could reduce the batchsize parameter to meet the launch time requirement. if (IS_SCRYPT() && device_interactive[thr_id] && GRID_BLOCKS > 2*props.multiProcessorCount && tdelta > 1.0/30) { if (WARPS_PER_BLOCK == 1) goto skip; else goto skip2; } hash_sec = (double)WU_PER_LAUNCH / tdelta; Hash[WARPS_PER_BLOCK] = hash_sec; if (hash_sec > best_hash_sec) { optimal_blocks = GRID_BLOCKS; best_hash_sec = hash_sec; best_wpb = WARPS_PER_BLOCK; } } } skip2: if (opt_debug) { if (GRID_BLOCKS == MINB) { char line[512] = " "; for (int i=1; i<=kernel->max_warps_per_block(); ++i) { char tmp[16]; sprintf(tmp, i < 10 ? " x%-2d" : " x%-2d ", i); strcat(line, tmp); if (cw == 80 && (i % 8 == 0 && i != kernel->max_warps_per_block())) strcat(line, "\n "); } applog(LOG_DEBUG, line); } char kMGT = ' '; bool flag; for (int j=0; j < 4; ++j) { flag=false; for (int i=1; i<=kernel->max_warps_per_block(); flag|=Hash[i] >= 1000, i++); if (flag) for (int i=1; i<=kernel->max_warps_per_block(); Hash[i] /= 1000, i++); else break; if (kMGT == ' ') kMGT = 'k'; else if (kMGT == 'k') kMGT = 'M'; else if (kMGT == 'M') kMGT = 'G'; else if (kMGT == 'G') kMGT = 'T'; } const char *format = "%5.4f%c"; flag = false; for (int i=1; i<=kernel->max_warps_per_block(); flag|=Hash[i] >= 1, i++); if (flag) format = "%5.3f%c"; flag = false; for (int i=1; i<=kernel->max_warps_per_block(); flag|=Hash[i] >= 10, i++); if (flag) format = "%5.2f%c"; flag = false; for (int i=1; i<=kernel->max_warps_per_block(); flag|=Hash[i] >= 100, i++); if (flag) format = "%5.1f%c"; char line[512]; sprintf(line, "%3d:", GRID_BLOCKS); for (int i=1; i<=kernel->max_warps_per_block(); ++i) { char tmp[16]; if (Hash[i]>0) sprintf(tmp, format, Hash[i], (i<kernel->max_warps_per_block())?'|':' '); else sprintf(tmp, " %c", (i<kernel->max_warps_per_block())?'|':' '); strcat(line, tmp); if (cw == 80 && (i % 8 == 0 && i != kernel->max_warps_per_block())) strcat(line, "\n "); } int n = strlen(line)-1; line[n++] = '|'; line[n++] = ' '; line[n++] = kMGT; line[n++] = '\0'; strcat(line, "H/s"); applog(LOG_DEBUG, line); } } skip: ; } checkCudaErrors(cudaFree(d_odata)); checkCudaErrors(cudaFree(d_idata)); WARPS_PER_BLOCK = best_wpb; applog(LOG_INFO, "GPU #%d: %7.2f hash/s with configuration %c%dx%d", device_map[thr_id], best_hash_sec, kernel->get_identifier(), optimal_blocks, WARPS_PER_BLOCK); } else { // Heuristics to find a good kernel launch configuration // base the initial block estimate on the number of multiprocessors int device_cores = props.multiProcessorCount * _ConvertSMVer2Cores(props.major, props.minor); // defaults, in case nothing else is chosen below optimal_blocks = 4 * device_cores / WU_PER_WARP; WARPS_PER_BLOCK = 2; // Based on compute capability, pick a known good block x warp configuration. if (props.major >= 3) { if (props.major == 3 && props.minor == 5) // GK110 (Tesla K20X, K20, GeForce GTX TITAN) { // TODO: what to do with Titan and Tesla K20(X)? // for now, do the same as for GTX 660Ti (2GB) optimal_blocks = (int)(optimal_blocks * 0.8809524); WARPS_PER_BLOCK = 2; } else // GK104, GK106, GK107 ... { if (MAXWARPS[thr_id] > (int)(optimal_blocks * 1.7261905) * 2) { // this results in 290x2 configuration on GTX 660Ti (3GB) // but it requires 3GB memory on the card! optimal_blocks = (int)(optimal_blocks * 1.7261905); WARPS_PER_BLOCK = 2; } else { // this results in 148x2 configuration on GTX 660Ti (2GB) optimal_blocks = (int)(optimal_blocks * 0.8809524); WARPS_PER_BLOCK = 2; } } } // 1st generation Fermi (compute 2.0) GF100, GF110 else if (props.major == 2 && props.minor == 0) { // this results in a 60x4 configuration on GTX 570 optimal_blocks = 4 * device_cores / WU_PER_WARP; WARPS_PER_BLOCK = 4; } // 2nd generation Fermi (compute 2.1) GF104,106,108,114,116 else if (props.major == 2 && props.minor == 1) { // this results in a 56x2 configuration on GTX 460 optimal_blocks = props.multiProcessorCount * 8; WARPS_PER_BLOCK = 2; } // in case we run out of memory with the automatically chosen configuration, // first back off with WARPS_PER_BLOCK, then reduce optimal_blocks. if (WARPS_PER_BLOCK==3 && optimal_blocks * WARPS_PER_BLOCK > MAXWARPS[thr_id]) WARPS_PER_BLOCK = 2; while (optimal_blocks > 0 && optimal_blocks * WARPS_PER_BLOCK > MAXWARPS[thr_id]) optimal_blocks--; } } applog(LOG_INFO, "GPU #%d: using launch configuration %c%dx%d", device_map[thr_id], kernel->get_identifier(), optimal_blocks, WARPS_PER_BLOCK); if (device_singlememory[thr_id]) { if (MAXWARPS[thr_id] != optimal_blocks * WARPS_PER_BLOCK) { MAXWARPS[thr_id] = optimal_blocks * WARPS_PER_BLOCK; if (device_texturecache[thr_id] == 1) kernel->unbindtexture_1D(); else if (device_texturecache[thr_id] == 2) kernel->unbindtexture_2D(); checkCudaErrors(cudaFree(d_V)); d_V = NULL; cudaGetLastError(); // clear the error state cudaMalloc((void **)&d_V, (size_t)SCRATCH * WU_PER_WARP * MAXWARPS[thr_id] * sizeof(uint32_t)); if (cudaGetLastError() == cudaSuccess) { for (int i=0; i < MAXWARPS[thr_id]; ++i) h_V[thr_id][i] = d_V + SCRATCH * WU_PER_WARP * i; if (device_texturecache[thr_id] == 1) { // bind linear memory to a 1D texture reference if (kernel->get_texel_width() == 2) kernel->bindtexture_1D(d_V, SCRATCH * WU_PER_WARP * MAXWARPS[thr_id] * sizeof(uint32_t)); else kernel->bindtexture_1D(d_V, SCRATCH * WU_PER_WARP * MAXWARPS[thr_id] * sizeof(uint32_t)); } else if (device_texturecache[thr_id] == 2) { // bind pitch linear memory to a 2D texture reference if (kernel->get_texel_width() == 2) kernel->bindtexture_2D(d_V, SCRATCH/2, WU_PER_WARP * MAXWARPS[thr_id], SCRATCH*sizeof(uint32_t)); else kernel->bindtexture_2D(d_V, SCRATCH/4, WU_PER_WARP * MAXWARPS[thr_id], SCRATCH*sizeof(uint32_t)); } // update pointers to scratch buffer in constant memory after reallocation kernel->set_scratchbuf_constants(MAXWARPS[thr_id], h_V[thr_id]); } else { applog(LOG_ERR, "GPU #%d: Unable to allocate enough memory for launch config '%s'.", device_map[thr_id], device_config[thr_id]); } } } else { // back off unnecessary memory allocations to have some breathing room while (MAXWARPS[thr_id] > 0 && MAXWARPS[thr_id] > optimal_blocks * WARPS_PER_BLOCK) { (MAXWARPS[thr_id])--; checkCudaErrors(cudaFree(h_V[thr_id][MAXWARPS[thr_id]]-h_V_extra[thr_id][MAXWARPS[thr_id]])); h_V[thr_id][MAXWARPS[thr_id]] = NULL; h_V_extra[thr_id][MAXWARPS[thr_id]] = 0; } } return optimal_blocks; } void cuda_scrypt_HtoD(int thr_id, uint32_t *X, int stream) { unsigned int GRID_BLOCKS = context_blocks[thr_id]; unsigned int WARPS_PER_BLOCK = context_wpb[thr_id]; unsigned int THREADS_PER_WU = context_kernel[thr_id]->threads_per_wu(); unsigned int mem_size = WU_PER_LAUNCH * sizeof(uint32_t) * 32; // copy host memory to device cudaMemcpyAsync(context_idata[stream][thr_id], X, mem_size, cudaMemcpyHostToDevice, context_streams[stream][thr_id]); } void cuda_scrypt_serialize(int thr_id, int stream) { // if the device can concurrently execute multiple kernels, then we must // wait for the serialization event recorded by the other stream if (context_concurrent[thr_id] || device_interactive[thr_id]) cudaStreamWaitEvent(context_streams[stream][thr_id], context_serialize[(stream+1)&1][thr_id], 0); } void cuda_scrypt_done(int thr_id, int stream) { // record the serialization event in the current stream cudaEventRecord(context_serialize[stream][thr_id], context_streams[stream][thr_id]); } void cuda_scrypt_flush(int thr_id, int stream) { // flush the work queue (required for WDDM drivers) cudaStreamSynchronize(context_streams[stream][thr_id]); } void cuda_scrypt_core(int thr_id, int stream, unsigned int N) { unsigned int GRID_BLOCKS = context_blocks[thr_id]; unsigned int WARPS_PER_BLOCK = context_wpb[thr_id]; unsigned int THREADS_PER_WU = context_kernel[thr_id]->threads_per_wu(); unsigned int LOOKUP_GAP = device_lookup_gap[thr_id]; // setup execution parameters dim3 grid(WU_PER_LAUNCH/WU_PER_BLOCK, 1, 1); dim3 threads(THREADS_PER_WU*WU_PER_BLOCK, 1, 1); context_kernel[thr_id]->run_kernel(grid, threads, WARPS_PER_BLOCK, thr_id, context_streams[stream][thr_id], context_idata[stream][thr_id], context_odata[stream][thr_id], N, LOOKUP_GAP, device_interactive[thr_id], opt_benchmark, device_texturecache[thr_id] ); } void cuda_scrypt_DtoH(int thr_id, uint32_t *X, int stream, bool postSHA) { unsigned int GRID_BLOCKS = context_blocks[thr_id]; unsigned int WARPS_PER_BLOCK = context_wpb[thr_id]; unsigned int THREADS_PER_WU = context_kernel[thr_id]->threads_per_wu(); unsigned int mem_size = WU_PER_LAUNCH * sizeof(uint32_t) * (postSHA ? 8 : 32); // copy result from device to host (asynchronously) checkCudaErrors(cudaMemcpyAsync(X, postSHA ? context_hash[stream][thr_id] : context_odata[stream][thr_id], mem_size, cudaMemcpyDeviceToHost, context_streams[stream][thr_id])); } bool cuda_scrypt_sync(int thr_id, int stream) { cudaError_t err; uint32_t wait_us = 0; if (device_interactive[thr_id] && !opt_benchmark) { // For devices that also do desktop rendering or compositing, we want to free up some time slots. // That requires making a pause in work submission when there is no active task on the GPU, // and Device Synchronize ensures that. // this call was replaced by the loop below to workaround the high CPU usage issue //err = cudaDeviceSynchronize(); while((err = cudaStreamQuery(context_streams[0][thr_id])) == cudaErrorNotReady || (err == cudaSuccess && (err = cudaStreamQuery(context_streams[1][thr_id])) == cudaErrorNotReady)) { usleep(50); wait_us+=50; } usleep(50); wait_us+=50; } else { // this call was replaced by the loop below to workaround the high CPU usage issue //err = cudaStreamSynchronize(context_streams[stream][thr_id]); while((err = cudaStreamQuery(context_streams[stream][thr_id])) == cudaErrorNotReady) { usleep(50); wait_us+=50; } } if (err != cudaSuccess) { if (!abort_flag) applog(LOG_ERR, "GPU #%d: CUDA error `%s` while waiting the kernel.", device_map[thr_id], cudaGetErrorString(err)); return false; } //if (opt_debug) { // applog(LOG_DEBUG, "GPU #%d: %s %u us", device_map[thr_id], __FUNCTION__, wait_us); //} return true; } uint32_t* cuda_transferbuffer(int thr_id, int stream) { return context_X[stream][thr_id]; } uint32_t* cuda_hashbuffer(int thr_id, int stream) { return context_H[stream][thr_id]; }
the_stack
#include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <cfloat> #include <iostream> using std::cout; using std::endl; #define BLOCKSIZE 512 // TODO: // at::numeric_limits<scalar_t>::lowest; // implement like pytorch-softmax: two kernels: one is for inner size to be 1, and the other is for spatial. Besides, in the spatial kernel method, we should use threadIdx.x and threadIdx.y for dimsize and inner size parallelization // define spatial kernel block like this: /* * inline dim3 SpatialSoftMax_getBlockSize( * uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) { * uint32_t inner_threads = inner_size; const int max_threads = 1024; * inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads)); * uint32_t dim_threads = 1; * if (inner_threads <= 64 && dim_size >= 64) { * while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size) * dim_threads *= 2; * dim_threads /= 2; * } * return dim3(dim_threads, inner_threads); * } * */ // consider max_active_blocks when assign grid blocks, the total number of blocks should not be greater than max_active_blocks which is multiProcessCount namespace large_margin_space { template<typename scalar_t> __forceinline__ __device__ void reduce_max(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void reduce_sum(scalar_t* sdata, int tid) { __syncthreads(); for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } } template<typename scalar_t> __forceinline__ __device__ void compute_reduce_values( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // b is max logits without target // b+1 is max logits with target // b+2 is sum of exp without target // b+3 is sum of exp with target // compute max with and without label index const scalar_t zero(0.); __syncthreads(); sdata[tid] = scalar_t(-10000.); __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[tid]) sdata[tid] = val; } reduce_max(sdata, tid); if (tid == 0) { sdata[blockDim.x] = sdata[0]; sdata[blockDim.x + 1] = sdata[0]; int idx = n_idx * dimsize * m_size + lb * m_size + m_idx; scalar_t val = logits[idx]; if (val > sdata[0]) sdata[blockDim.x + 1] = val; } __syncthreads(); // compute sum of exp with and without label index sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 2] = sdata[0]; __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += exp(val - sdata[blockDim.x + 1]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) sdata[blockDim.x + 3] = sdata[0]; } template<typename scalar_t> __forceinline__ __device__ void compute_sum_of_qx( const scalar_t* logits, scalar_t* sdata, const int dimsize, const int m_size, int n_idx, int m_idx, int64_t lb, int tid) { // compute sum of q * x to sdata[blockDim.x + 5] const scalar_t zero(0.); __syncthreads(); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j += blockDim.x) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sdata[tid] += val * exp(val - sdata[blockDim.x]); } reduce_sum<scalar_t>(sdata, tid); if (tid == 0) { sdata[blockDim.x + 5] = sdata[0] / sdata[blockDim.x + 2]; } } } // kernel function for forward and backward template<typename scalar_t> __global__ void LMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory // b+4 is coeff of 1/(dimsize - 1) extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = scalar_t(1.) / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; if (lb == ignore_index) { if (tid == 0) losses[i] = zero; continue; } int n_idx = i / m_size; int m_idx = i % m_size; large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); sdata[tid] = zero; __syncthreads(); for (int j{tid}; j < dimsize; j+=blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t dval = logits[idx]; scalar_t term(0); if (j == lb) { term = -(dval - sdata[blockDim.x + 1]); term += log(sdata[blockDim.x + 3]); } else { dval -= sdata[blockDim.x]; term = exp(dval) / sdata[blockDim.x + 2]; term -= sdata[blockDim.x + 4]; term *= (dval - log(sdata[blockDim.x + 2])); term *= scalar_t(lam / 2.f); } sdata[tid] += term; } large_margin_space::reduce_sum<scalar_t>(sdata, tid); if (tid == 0) losses[i] = sdata[0]; } } template<typename scalar_t> __global__ void LMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + (blockDim.x + 8) * threadIdx.y; scalar_t zero(0.f); int tid = threadIdx.x; int sample_id = blockIdx.x * blockDim.y + threadIdx.y; int sample_offset = gridDim.x * blockDim.y; if (tid == 0) { sdata[blockDim.x + 4] = 1. / (dimsize - 1); } int samplesize = n_size * m_size; for (int i{sample_id}; i < samplesize; i += sample_offset) { int64_t lb = labels[i]; int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = zero; } continue; } large_margin_space::compute_reduce_values<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); large_margin_space::compute_sum_of_qx<scalar_t>(logits, sdata, dimsize, m_size, n_idx, m_idx, lb, tid); const scalar_t one(1.f); for (int j{tid}; j < dimsize; j += blockDim.x) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; scalar_t pc = exp(val - sdata[blockDim.x + 1]) / sdata[blockDim.x + 3]; scalar_t gval; if (j == lb) { gval = pc - one; } else { gval = val - sdata[blockDim.x + 5] + one; gval *= exp(val - sdata[blockDim.x]) / sdata[blockDim.x + 2]; gval = pc + (gval - sdata[blockDim.x + 4]) * scalar_t(lam / 2.); } grad_logits[idx] = gval; } } } template<typename scalar_t> __global__ void SpatialLMarginLossForward(const int n_size, const int dimsize, const int m_size, const scalar_t *logits, const int64_t *labels, scalar_t *losses, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); if (lb == ignore_index) { losses[i] = scalar_t(0.f); continue; } int n_idx = i / m_size; int m_idx = i % m_size; // compute max scalar_t max_with_lb(-10000.f); scalar_t max_no_lb(-10000.f); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute loss scalar_t loss_val(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (j == lb) { loss_val += - (val - max_with_lb) + log(sum_with_lb); } else { loss_val += scalar_t(lam / 2.) * (exp(val - max_no_lb) / sum_no_lb - (scalar_t(1.) / (dimsize - 1))) * (val - max_no_lb - log(sum_no_lb)); } } losses[i] = loss_val; } } template<typename scalar_t> __global__ void SpatialLMarginLossBackward(const int n_size, const int dimsize, const int m_size, scalar_t *grad_logits, const scalar_t *logits, const int64_t *labels, const int64_t ignore_index, const float lam) { // shared memory __shared__ int sdata[BLOCKSIZE]; sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid sdata[1] = n_size * m_size; // samplesize sdata[2] = gridDim.x * blockDim.x; // sample_offset const scalar_t one(1.); for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) { int lb = static_cast<int>(labels[i]); int n_idx = i / m_size; int m_idx = i % m_size; if (lb == ignore_index) { for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; grad_logits[idx] = scalar_t(0.f); } continue; } // compute max scalar_t max_with_lb(-10000.); scalar_t max_no_lb(-10000.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (val > max_with_lb) max_with_lb = val; if (j == lb) continue; if (val > max_no_lb) max_no_lb = val; } // compute sum of exp scalar_t sum_with_lb(0.); scalar_t sum_no_lb(0.); for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_with_lb += exp(val - max_with_lb); if (j == lb) continue; sum_no_lb += exp(val - max_no_lb); } // compute sum of qx scalar_t sum_qx(0.); for (int j{0}; j < dimsize; ++j) { if (j == lb) continue; int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; sum_qx += val * exp(val - max_no_lb) / sum_no_lb; } // compute grads for (int j{0}; j < dimsize; ++j) { int idx = n_idx * dimsize * m_size + j * m_size + m_idx; scalar_t val = logits[idx]; if (lb == j) { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb - one; } else { grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb + scalar_t(lam / 2.) * ((val + one - sum_qx) * exp(val - max_no_lb) / sum_no_lb - (one / (dimsize - 1))); } } } } // cuda forward and backward at::Tensor large_margin_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto losses = torch::empty_like(labels, logits.options()); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } // call kernel if (dimsize < 32 && samplesize > 4096) { int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); SpatialLMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32); int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = std::max(std::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] { int shm_size = n_shm * sizeof(scalar_t); LMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), losses.contiguous().data_ptr<scalar_t>(), ignore_index, lam ); }); } THCudaCheck(cudaGetLastError()); return losses; } at::Tensor large_margin_backward_cuda(const at::Tensor &logits, const at::Tensor &labels, const int64_t ignore_index, const float lam) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int n_size = logits.size(0); const int dimsize = logits.size(1); const int m_size = logits.numel() / (n_size * dimsize); const int samplesize = labels.numel(); // allocate memory and cuda grid/block auto grad_logits = torch::empty_like(logits); if (grad_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_logits; } if (dimsize < 32 && samplesize > 4096) { int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1); dim3 block(BLOCKSIZE); dim3 grid(gridx); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = BLOCKSIZE * sizeof(scalar_t); SpatialLMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } else { int blockx = 32; while (blockx < dimsize) blockx *= 2; blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32); int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1); int gridx = std::max(std::min(4096, samplesize / blocky), 1); int n_shm = (blockx + 8) * blocky; dim3 block(blockx, blocky); dim3 grid(gridx); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] { int shm_size = n_shm * sizeof(scalar_t); LMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>( n_size, dimsize, m_size, grad_logits.contiguous().data_ptr<scalar_t>(), logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<int64_t>(), ignore_index, lam ); }); } THCudaCheck(cudaGetLastError()); return grad_logits; } // python inferface at::Tensor large_margin_forward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_forward_cuda(logits, labels, ignore_index, lam); } at::Tensor large_margin_backward(const at::Tensor &logits, const at::Tensor &labels, const float lam, const int64_t ignore_index) { // TODO: try AT_ASSERTM if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this large margin loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return large_margin_backward_cuda(logits, labels, ignore_index, lam); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("l_margin_forward", &large_margin_forward, "large margin forward"); m.def("l_margin_backward", &large_margin_backward, "large margin backward"); }
the_stack
namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename Dtype> __global__ void topk_avg_pooling_kernel_by_row_improve( Dtype *output_data, const Dtype *input, const int *gpu_input_offset_l, const int *gpu_input_offset_r, const int row_max, const int col_max, const int topk_size, const int *topks, const int feat_map_num) { int row = gpu_input_offset_l[blockIdx.x + 1] - gpu_input_offset_l[blockIdx.x]; // 8 int col = gpu_input_offset_r[blockIdx.x + 1] - gpu_input_offset_r[blockIdx.x]; // 30 int max_k = topks[topk_size - 1]; max_k = max_k < col ? max_k : col; extern __shared__ Dtype smem[]; // H*W const Dtype *fm_row_in_data = input + blockIdx.x * row_max * feat_map_num * col_max + blockIdx.y * row_max * col_max; for (int i = threadIdx.x; i < row * col_max; i += blockDim.x) { smem[i] = fm_row_in_data[i]; } __syncthreads(); for (int idx = threadIdx.x; idx < row; idx += blockDim.x) { Dtype *fm_row_out_data = output_data + (gpu_input_offset_l[blockIdx.x] + idx) * feat_map_num * topk_size + blockIdx.y * topk_size; for (int i = 0; i < topk_size; ++i) { fm_row_out_data[i] = 0; } Dtype *smem_start_col = smem + idx * col_max; int counter = max_k; // topk_size; Dtype last_max_val = -20000.0; while (counter) { Dtype max_val = -10000.0; int max_pos = 0; // -1; int m = 0; for (; m < col; m++) { Dtype cur_data = smem_start_col[m]; if (cur_data > max_val) { max_val = cur_data; max_pos = m; last_max_val = max_val; } } if (max_val < -9999.0) { // == -10000.0 max_val = last_max_val; } smem_start_col[max_pos] = -10000000.0; int i = max_k - counter; for (int c = 0; c < topk_size; c++) { if (i <= topks[c] - 1) { fm_row_out_data[c] += max_val; } } counter--; } __syncthreads(); // compute avg for (int i = 0; i < topk_size; i++) { fm_row_out_data[i] = fm_row_out_data[i] / topks[i]; } } } template <typename Dtype> __global__ void topk_avg_pooling_kernel_for_big_data( Dtype *output_data, const Dtype *input_data, const int *gpu_input_offset_l, const int *gpu_input_offset_r, const int row_max, const int col_max, const int topk_size, const int *topks, const int feat_map_num, const int actual_row_in_shared_mem) { int row = gpu_input_offset_l[blockIdx.x + 1] - gpu_input_offset_l[blockIdx.x]; // 75 int col = gpu_input_offset_r[blockIdx.x + 1] - gpu_input_offset_r[blockIdx.x]; // 300 int max_k = topks[topk_size - 1]; max_k = max_k < col ? max_k : col; extern __shared__ Dtype smem[]; // H1*W or H2*W ... int filled_z = row / actual_row_in_shared_mem; int remain_row = row - filled_z * actual_row_in_shared_mem; if (blockIdx.z > filled_z || (blockIdx.z == filled_z && remain_row == 0)) { return; } const Dtype *fm_row_in_data = input_data + blockIdx.x * row_max * feat_map_num * col_max + blockIdx.y * row_max * col_max + blockIdx.z * actual_row_in_shared_mem * col_max; if (blockIdx.z == filled_z) { for (int i = threadIdx.x; i < remain_row * col_max; i += blockDim.x) { smem[i] = fm_row_in_data[i]; } } else { for (int i = threadIdx.x; i < actual_row_in_shared_mem * col_max; i += blockDim.x) { smem[i] = fm_row_in_data[i]; } } __syncthreads(); int cur_row; if (blockIdx.z == filled_z) { cur_row = remain_row; } else { cur_row = actual_row_in_shared_mem; } for (int idx = threadIdx.x; idx < cur_row; idx += blockDim.x) { Dtype *fm_row_out_data = output_data + (gpu_input_offset_l[blockIdx.x] + blockIdx.z * actual_row_in_shared_mem + idx) * feat_map_num * topk_size + blockIdx.y * topk_size; for (int i = 0; i < topk_size; ++i) { fm_row_out_data[i] = 0; } Dtype *smem_start_col = smem + idx * col_max; int counter = max_k; // topk_size; Dtype last_max_val = -20000.0; while (counter) { Dtype max_val = -10000.0; int max_pos = 0; // -1; int m = 0; for (; m < col; m++) { Dtype cur_data = smem_start_col[m]; if (cur_data > max_val) { max_val = cur_data; max_pos = m; last_max_val = max_val; } } if (max_val < -9999.0) { // == -10000.0 max_val = last_max_val; } smem_start_col[max_pos] = -10000000.0; int i = max_k - counter; for (int c = 0; c < topk_size; c++) { if (i <= topks[c] - 1) { fm_row_out_data[c] += max_val; } } counter--; } __syncthreads(); // compute avg for (int i = 0; i < topk_size; i++) { fm_row_out_data[i] = fm_row_out_data[i] / topks[i]; } } } template <typename T> void SequenceTopkAvgPoolingCompute<T>::PrepareForRun() { int device_id; cudaGetDevice(&device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); _shared_mem_size = deviceProp.sharedMemPerBlock; } template <typename T> void SequenceTopkAvgPoolingCompute<T>::Run() { auto &param = this->Param<param_t>(); auto &ctx = this->ctx_->template As<CUDAContext>(); auto cuda_stream = ctx.exec_stream(); CHECK(param.X->lod().size() > 0 && param.X->lod()[0].size() > 0) << "X sequence offset is not valid"; CHECK(param.ROW->lod().size() > 0 && param.ROW->lod()[0].size() > 0) << "ROW sequence offset is not valid"; int width_offset_len = param.X->lod()[0].size(); lite::DDim width_offset_shape(std::vector<int64_t>{width_offset_len}); _width_offset.Resize(width_offset_shape); std::vector<int> width_lod_0(width_offset_len, 0); for (size_t i = 0; i < param.X->lod()[0].size(); ++i) { width_lod_0[i] = static_cast<int>(param.X->lod()[0][i]); } cudaMemcpyAsync(_width_offset.mutable_data<int>(TARGET(kCUDA)), &width_lod_0[0], sizeof(int) * width_offset_len, cudaMemcpyHostToDevice, cuda_stream); int height_offset_len = param.ROW->lod()[0].size(); lite::DDim height_offset_shape(std::vector<int64_t>{height_offset_len}); _height_offset.Resize(height_offset_shape); std::vector<int> height_lod_0(height_offset_len, 0); for (size_t i = 0; i < param.ROW->lod()[0].size(); ++i) { height_lod_0[i] = static_cast<int>(param.ROW->lod()[0][i]); } cudaMemcpyAsync(_height_offset.mutable_data<int>(TARGET(kCUDA)), &height_lod_0[0], sizeof(int) * height_offset_len, cudaMemcpyHostToDevice, cuda_stream); const Tensor *x_tensor = param.X; Tensor *out_tensor = param.Out; const T *in_data = x_tensor->data<T>(); T *out_data = out_tensor->mutable_data<T>(TARGET(kCUDA)); int topk_num = param.topks.size(); lite::DDim top_ks_shape(std::vector<int64_t>{topk_num, 1, 1, 1}); _top_ks.Resize(top_ks_shape); cudaMemcpyAsync(_top_ks.mutable_data<int>(TARGET(kCUDA)), &param.topks[0], sizeof(int) * topk_num, cudaMemcpyHostToDevice, cuda_stream); int num = param.X->dims()[0]; int channel = param.X->dims()[1]; int height = param.X->dims()[2]; int width = param.X->dims()[3]; const int *height_offset = _height_offset.data<int>(); const int *width_offset = _width_offset.data<int>(); int feat_map_size = height * width; if (feat_map_size * sizeof(T) <= _shared_mem_size) { dim3 blocks(num, channel); dim3 threads(32, 1); topk_avg_pooling_kernel_by_row_improve< T><<<blocks, threads, feat_map_size * sizeof(T), cuda_stream>>>( out_data, in_data, height_offset, width_offset, height, width, param.topks.size(), _top_ks.data<int>(), param.channel_num); } else { int actual_row = _shared_mem_size / width / sizeof(T); int num_z = (height + actual_row - 1) / actual_row; dim3 blocks(num, channel, num_z); dim3 threads(32, 1); topk_avg_pooling_kernel_for_big_data< T><<<blocks, threads, actual_row * width * sizeof(T), cuda_stream>>>( out_data, in_data, height_offset, width_offset, height, width, param.topks.size(), _top_ks.data<int>(), param.channel_num, actual_row); } } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL( sequence_topk_avg_pooling, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SequenceTopkAvgPoolingCompute<float>, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("ROW", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("COLUMN", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("pos", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .Finalize();
the_stack
#include <glog/logging.h> #include <cmath> #include <iostream> #include <vector> namespace dietgpu { template <FloatType FT, int Threads> struct JoinFloatNonAligned { static __device__ void join( const typename FloatTypeInfo<FT>::CompT* __restrict__ compIn, const typename FloatTypeInfo<FT>::NonCompT* __restrict__ nonCompIn, uint32_t size, typename FloatTypeInfo<FT>::WordT* __restrict__ out) { for (uint32_t i = blockIdx.x * Threads + threadIdx.x; i < size; i += gridDim.x * Threads) { out[i] = FloatTypeInfo<FT>::join(compIn[i], nonCompIn[i]); } } }; template <int Threads> struct JoinFloatNonAligned<FloatType::kFloat32, Threads> { static __device__ void join( const typename FloatTypeInfo< FloatType::kFloat32>::CompT* __restrict__ compIn, const typename FloatTypeInfo< FloatType::kFloat32>::NonCompT* __restrict__ nonCompIn, uint32_t size, typename FloatTypeInfo<FloatType::kFloat32>::WordT* __restrict__ out) { using FTI = FloatTypeInfo<FloatType::kFloat32>; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; // Where the low order 2 bytes are read uint16_t* nonComp2In = (uint16_t*)nonCompIn; // Where the high order byte is read uint8_t* nonComp1In = (uint8_t*)(nonComp2In + roundUp(size, 8)); for (uint32_t i = blockIdx.x * Threads + threadIdx.x; i < size; i += gridDim.x * Threads) { uint32_t nc = (uint32_t(nonComp1In[i]) * 65536U) + uint32_t(nonComp2In[i]); out[i] = FTI::join(compIn[i], nc); } } }; template <FloatType FT, int Threads> struct JoinFloatAligned16 { static __device__ void join( const typename FloatTypeInfo<FT>::CompT* __restrict__ compIn, const typename FloatTypeInfo<FT>::NonCompT* __restrict__ nonCompIn, uint32_t size, typename FloatTypeInfo<FT>::WordT* __restrict__ out) { using FTI = FloatTypeInfo<FT>; using WordT = typename FTI::WordT; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; using VecT = typename FTI::VecT; using CompVecT = typename FTI::CompVecT; using NonCompVecT = typename FTI::NonCompVecT; constexpr int kOuterUnroll = 2; constexpr int kInnerUnroll = sizeof(VecT) / sizeof(WordT); const CompVecT* compInV = (const CompVecT*)compIn; const NonCompVecT* nonCompInV = (const NonCompVecT*)nonCompIn; VecT* outV = (VecT*)out; // Each block handles Threads * kOuterUnroll * kInnerUnroll inputs/outputs // at a time, or Threads * kOuterUnroll 16-byte words at a time constexpr int kWordsPerBlock = Threads * kOuterUnroll; constexpr int kFloatsPerBlock = kWordsPerBlock * kInnerUnroll; uint32_t fullBlocks = divDown(size, kFloatsPerBlock); // Handle by block uint32_t startBlock = blockIdx.x * kWordsPerBlock; compInV += startBlock + threadIdx.x; nonCompInV += startBlock + threadIdx.x; outV += startBlock + threadIdx.x; for (uint32_t b = blockIdx.x; b < fullBlocks; b += gridDim.x, compInV += gridDim.x * kWordsPerBlock, nonCompInV += gridDim.x * kWordsPerBlock, outV += gridDim.x * kWordsPerBlock) { CompVecT comp[kOuterUnroll]; NonCompVecT nonComp[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { comp[i] = compInV[i * Threads]; nonComp[i] = nonCompInV[i * Threads]; } VecT v[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { #pragma unroll for (int j = 0; j < kInnerUnroll; ++j) { v[i].x[j] = FTI::join(comp[i].x[j], nonComp[i].x[j]); } } #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { outV[i * Threads] = v[i]; } } // Handle last (partial) block for (uint32_t i = fullBlocks * kFloatsPerBlock + blockIdx.x * Threads + threadIdx.x; i < size; i += blockDim.x) { out[i] = FTI::join(compIn[i], nonCompIn[i]); } } }; // float32 specialization template <int Threads> struct JoinFloatAligned16<FloatType::kFloat32, Threads> { static __device__ void join( const typename FloatTypeInfo< FloatType::kFloat32>::CompT* __restrict__ compIn, const typename FloatTypeInfo< FloatType::kFloat32>::NonCompT* __restrict__ nonCompIn, uint32_t size, typename FloatTypeInfo<FloatType::kFloat32>::WordT* __restrict__ out) { using FTI = FloatTypeInfo<FloatType::kFloat32>; using WordT = typename FTI::WordT; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; constexpr int kOuterUnroll = 1; constexpr int kInnerUnroll = sizeof(uint32x4) / sizeof(uint32_t); auto compInV = (const uint8x4*)compIn; auto nonCompIn2 = (const uint16_t*)nonCompIn; auto nonCompIn1 = (const uint8_t*)(nonCompIn2 + roundUp(size, 8)); auto nonCompInV2 = (uint16x4*)nonCompIn2; auto nonCompInV1 = (uint8x4*)nonCompIn1; auto outV = (uint32x4*)out; // Each block handles Threads * kOuterUnroll * kInnerUnroll inputs/outputs // at a time, or Threads * kOuterUnroll 16-byte words at a time constexpr int kWordsPerBlock = Threads * kOuterUnroll; constexpr int kFloatsPerBlock = kWordsPerBlock * kInnerUnroll; uint32_t fullBlocks = divDown(size, kFloatsPerBlock); // Handle by block uint32_t startBlock = blockIdx.x * kWordsPerBlock; compInV += startBlock + threadIdx.x; nonCompInV2 += startBlock + threadIdx.x; nonCompInV1 += startBlock + threadIdx.x; outV += startBlock + threadIdx.x; for (uint32_t b = blockIdx.x; b < fullBlocks; b += gridDim.x, compInV += gridDim.x * kWordsPerBlock, nonCompInV2 += gridDim.x * kWordsPerBlock, nonCompInV1 += gridDim.x * kWordsPerBlock, outV += gridDim.x * kWordsPerBlock) { uint8x4 comp[kOuterUnroll]; uint16x4 nonComp2[kOuterUnroll]; uint8x4 nonComp1[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { comp[i] = compInV[i * Threads]; nonComp2[i] = nonCompInV2[i * Threads]; nonComp1[i] = nonCompInV1[i * Threads]; } uint32x4 nonComp[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { #pragma unroll for (int j = 0; j < kInnerUnroll; ++j) { nonComp[i].x[j] = nonComp1[i].x[j] * 65536U + nonComp2[i].x[j]; } } uint32x4 v[kOuterUnroll]; #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { #pragma unroll for (int j = 0; j < kInnerUnroll; ++j) { v[i].x[j] = FTI::join(comp[i].x[j], nonComp[i].x[j]); } } #pragma unroll for (uint32_t i = 0; i < kOuterUnroll; ++i) { outV[i * Threads] = v[i]; } } // Handle last (partial) block for (uint32_t i = fullBlocks * kFloatsPerBlock + blockIdx.x * Threads + threadIdx.x; i < size; i += blockDim.x) { uint32_t nc2 = nonCompIn2[i]; uint32_t nc1 = nonCompIn1[i]; uint32_t nc = nc1 * 65536U + nc2; out[i] = FTI::join(compIn[i], nc); } } }; template <FloatType FT, int Threads> struct JoinFloatImpl { static __device__ void join( const typename FloatTypeInfo<FT>::CompT* compIn, const typename FloatTypeInfo<FT>::NonCompT* nonCompIn, uint32_t size, typename FloatTypeInfo<FT>::WordT* out) { // compIn should always be aligned, as we decompress into temporary memory auto compUnalignedBytes = getAlignmentRoundUp<sizeof(uint4)>(compIn); auto nonCompUnalignedBytes = getAlignmentRoundUp<sizeof(uint4)>(nonCompIn); auto outUnalignedBytes = getAlignmentRoundUp<sizeof(uint4)>(out); if (compUnalignedBytes || nonCompUnalignedBytes || outUnalignedBytes) { JoinFloatNonAligned<FT, Threads>::join(compIn, nonCompIn, size, out); } else { JoinFloatAligned16<FT, Threads>::join(compIn, nonCompIn, size, out); } } }; template <int Threads> struct JoinFloatImpl<FloatType::kFloat32, Threads> { static __device__ void join( const typename FloatTypeInfo<FloatType::kFloat32>::CompT* compIn, const typename FloatTypeInfo<FloatType::kFloat32>::NonCompT* nonCompIn, uint32_t size, typename FloatTypeInfo<FloatType::kFloat32>::WordT* out) { // FIXME: implement vectorization JoinFloatNonAligned<FloatType::kFloat32, Threads>::join( compIn, nonCompIn, size, out); } }; template < typename InProviderComp, typename InProviderNonComp, typename OutProvider, FloatType FT, int Threads> __global__ void joinFloat( InProviderComp inProviderComp, InProviderNonComp inProviderNonComp, OutProvider outProvider, uint8_t* __restrict__ outSuccess, uint32_t* __restrict__ outSize) { using FTI = FloatTypeInfo<FT>; using WordT = typename FTI::WordT; using CompT = typename FTI::CompT; using NonCompT = typename FTI::NonCompT; int batch = blockIdx.y; auto curCompIn = (const CompT*)inProviderComp.getBatchStart(batch); auto curHeaderIn = (const GpuFloatHeader*)inProviderNonComp.getBatchStart(batch); auto curOut = (WordT*)outProvider.getBatchStart(batch); // FIXME: test out capacity if (outSuccess && !outSuccess[batch]) { // ANS decompression failed, so nothing for us to do return; } // Get size as a header GpuFloatHeader h = *curHeaderIn; assert(h.magic == kGpuFloatHeaderMagic); auto curSize = h.size; if (outSize && (curSize != outSize[batch])) { // Reported size mismatch between ANS decompression and fp unpacking assert(false); return; } auto curNonCompIn = (const NonCompT*)(curHeaderIn + 1); JoinFloatImpl<FT, Threads>::join(curCompIn, curNonCompIn, curSize, curOut); } template <FloatType FT, typename InProvider> struct FloatANSProvider { using FTI = FloatTypeInfo<FT>; __host__ FloatANSProvider(InProvider& provider) : inProvider_(provider) {} __device__ void* getBatchStart(uint32_t batch) { uint8_t* p = (uint8_t*)inProvider_.getBatchStart(batch); GpuFloatHeader h = *((GpuFloatHeader*)p); assert(h.magic == kGpuFloatHeaderMagic); // Increment the pointer to past the floating point data return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(h.size); } __device__ const void* getBatchStart(uint32_t batch) const { const uint8_t* p = (const uint8_t*)inProvider_.getBatchStart(batch); GpuFloatHeader h = *((const GpuFloatHeader*)p); assert(h.magic == kGpuFloatHeaderMagic); // Increment the pointer to past the floating point data return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(h.size); } InProvider inProvider_; }; template <FloatType FT, int N> struct FloatANSProviderInline { using FTI = FloatTypeInfo<FT>; __host__ FloatANSProviderInline(int num, const void** in) { CHECK_LE(num, N); for (int i = 0; i < num; ++i) { in_[i] = in[i]; } } __device__ void* getBatchStart(uint32_t batch) { uint8_t* p = (uint8_t*)in_[batch]; GpuFloatHeader h = *((GpuFloatHeader*)p); assert(h.magic == kGpuFloatHeaderMagic); // Increment the pointer to past the floating point data return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(h.size); } __device__ const void* getBatchStart(uint32_t batch) const { const uint8_t* p = (const uint8_t*)in_[batch]; GpuFloatHeader h = *((const GpuFloatHeader*)p); assert(h.magic == kGpuFloatHeaderMagic); // Increment the pointer to past the floating point data return p + sizeof(GpuFloatHeader) + FTI::getUncompDataSize(h.size); } const void* in_[N]; }; template <FloatType FT, uint32_t BlockSize> struct JoinFloatWriter { using FTI = FloatTypeInfo<FT>; __host__ __device__ JoinFloatWriter( uint32_t size, typename FTI::WordT* out, const typename FTI::NonCompT* nonComp) : out_(out), nonComp_(nonComp), outBlock_(nullptr), nonCompBlock_(nullptr) {} __host__ __device__ void setBlock(uint32_t block) { outBlock_ = out_ + block * BlockSize; nonCompBlock_ = nonComp_ + block * BlockSize; } __device__ void write(uint32_t offset, uint8_t sym) { auto nonComp = nonCompBlock_[offset]; outBlock_[offset] = FTI::join(sym, nonComp); } // // The preload is an offset of a NonCompVec4 // __device__ void preload(uint32_t offset) { // // We can preload this before decompressing all of the ANS compressed // data // // to hide memory latency // preload_ = ((typename FTI::NonCompVec4*)nonCompBlock_)[offset]; // } // __device__ void writeVec(uint32_t offset, ANSDecodedTx4 symV) { // typename FTI::Vec4 outV; // #pragma unroll // // We always receive 4 decoded values each iteration // // FIXME: this is hacky // for (int i = 0; i < 4; ++i) { // outV.x[i] = JoinFloat<FT>::join(symV.x[i], preload_.x[i]); // } // ((typename FTI::Vec4*)outBlock_)[offset] = outV; // } // typename FTI::NonCompVec4 preload_; typename FTI::WordT* out_; const typename FTI::NonCompT* nonComp_; typename FTI::WordT* outBlock_; const typename FTI::NonCompT* nonCompBlock_; }; template <uint32_t BlockSize> struct JoinFloatWriter<FloatType::kFloat32, BlockSize> { static constexpr bool kVectorize = false; using FTI = FloatTypeInfo<FloatType::kFloat32>; __host__ __device__ JoinFloatWriter( uint32_t size, typename FTI::WordT* out, const typename FTI::NonCompT* nonComp) : size_(size), out_(out), nonComp_(nonComp), outBlock_(nullptr), nonCompBlock2_(nullptr), nonCompBlock1_(nullptr) {} __host__ __device__ void setBlock(uint32_t block) { nonCompBlock2_ = (const uint16_t*)nonComp_ + block * BlockSize; nonCompBlock1_ = (const uint8_t*)((const uint16_t*)nonComp_ + roundUp(size_, 8U)) + block * BlockSize; outBlock_ = out_ + block * BlockSize; } __device__ void write(uint32_t offset, uint8_t sym) { uint32_t nc = uint32_t(nonCompBlock1_[offset]) * 65536U + uint32_t(nonCompBlock2_[offset]); outBlock_[offset] = FTI::join(sym, nc); } // // This implementation does not preload // __device__ void preload(uint32_t offset) { // } // // This implementation does not vectorize // __device__ void writeVec(uint32_t offset, ANSDecodedTx4 symV) { // } uint32_t size_; typename FTI::WordT* out_; const typename FTI::NonCompT* nonComp_; typename FTI::WordT* outBlock_; const uint16_t* nonCompBlock2_; const uint8_t* nonCompBlock1_; }; template < typename InProvider, typename OutProvider, FloatType FT, uint32_t BlockSize> struct FloatOutProvider { using Writer = JoinFloatWriter<FT, BlockSize>; using FTI = FloatTypeInfo<FT>; __host__ FloatOutProvider(InProvider& inProvider, OutProvider& outProvider) : inProvider_(inProvider), outProvider_(outProvider) {} __device__ Writer getWriter(uint32_t batch) { // Get float header auto h = (const GpuFloatHeader*)inProvider_.getBatchStart(batch); return Writer( h->size, (typename FTI::WordT*)outProvider_.getBatchStart(batch), // advance past the header (const typename FTI::NonCompT*)(h + 1)); } __device__ uint32_t getBatchSize(uint32_t batch) { return outProvider_.getBatchSize(batch); } InProvider inProvider_; OutProvider outProvider_; }; template <int N, FloatType FT, uint32_t BlockSize> struct FloatOutProviderInline { using FTI = FloatTypeInfo<FT>; using Writer = JoinFloatWriter<FT, BlockSize>; __host__ FloatOutProviderInline( int num, const void** in, void** out, const uint32_t* outCapacity) { CHECK_LE(num, N); for (int i = 0; i < num; ++i) { in_[i] = in[i]; out_[i] = out[i]; outCapacity_[i] = outCapacity[i]; } } __device__ Writer getWriter(uint32_t batch) { // Get float header auto h = (const GpuFloatHeader*)in_[batch]; return Writer( h->size, (typename FTI::WordT*)out_[batch], // advance past the header (const typename FTI::NonCompT*)(h + 1)); } __device__ uint32_t getBatchSize(uint32_t batch) { return outCapacity_[batch]; } const void* in_[N]; void* out_[N]; uint32_t outCapacity_[N]; }; template <typename InProvider, typename OutProvider> void floatDecompressDevice( StackDeviceMemory& res, const FloatDecompressConfig& config, uint32_t numInBatch, InProvider& inProvider, OutProvider& outProvider, uint32_t maxCapacity, uint8_t* outSuccess_dev, uint32_t* outSize_dev, cudaStream_t stream) { // We can perform decoding in a single pass if all input data is 16 byte // aligned if (config.is16ByteAligned) { // // Fused kernel: perform decompression in a single pass // #define RUN_FUSED(FT) \ do { \ auto inProviderANS = FloatANSProvider<FT, InProvider>(inProvider); \ auto outProviderANS = \ FloatOutProvider<InProvider, OutProvider, FT, kDefaultBlockSize>( \ inProvider, outProvider); \ \ ansDecodeBatch( \ res, \ config.ansConfig, \ numInBatch, \ inProviderANS, \ outProviderANS, \ outSuccess_dev, \ outSize_dev, \ stream); \ } while (false) switch (config.floatType) { case kFloat16: RUN_FUSED(FloatType::kFloat16); break; case kBFloat16: RUN_FUSED(FloatType::kBFloat16); break; case kFloat32: RUN_FUSED(FloatType::kFloat32); break; default: CHECK(false); break; } #undef RUN_FUSED } else { // // Two pass kernel: decompress the ANS compressed data, then rejoin with // uncompressed data // // Temporary space for the decompressed exponents // We need to ensure 16 byte alignment for the decompressed data due to // vectorization uint32_t maxCapacityAligned = roundUp(maxCapacity, sizeof(uint4)); auto exp_dev = res.alloc<uint8_t>(stream, numInBatch * maxCapacityAligned); #define RUN_DECODE(FT) \ do { \ using InProviderANS = FloatANSProvider<FT, InProvider>; \ auto inProviderANS = InProviderANS(inProvider); \ \ using OutProviderANS = BatchProviderStride; \ auto outProviderANS = OutProviderANS( \ exp_dev.data(), maxCapacityAligned, maxCapacityAligned); \ \ ansDecodeBatch( \ res, \ config.ansConfig, \ numInBatch, \ inProviderANS, \ outProviderANS, \ outSuccess_dev, \ outSize_dev, \ stream); \ \ constexpr int kThreads = 256; \ auto& props = getCurrentDeviceProperties(); \ int maxBlocksPerSM = 0; \ CUDA_VERIFY(cudaOccupancyMaxActiveBlocksPerMultiprocessor( \ &maxBlocksPerSM, \ joinFloat<OutProviderANS, InProvider, OutProvider, FT, kThreads>, \ kThreads, \ 0)); \ uint32_t maxGrid = maxBlocksPerSM * props.multiProcessorCount; \ uint32_t perBatchGrid = divUp(maxGrid, numInBatch); \ if ((perBatchGrid * numInBatch > maxGrid) && perBatchGrid > 1) { \ perBatchGrid -= 1; \ } \ auto grid = dim3(perBatchGrid, numInBatch); \ \ joinFloat<OutProviderANS, InProvider, OutProvider, FT, kThreads> \ <<<grid, kThreads, 0, stream>>>( \ outProviderANS, \ inProvider, \ outProvider, \ outSuccess_dev, \ outSize_dev); \ } while (false) switch (config.floatType) { case kFloat16: RUN_DECODE(FloatType::kFloat16); break; case kBFloat16: RUN_DECODE(FloatType::kBFloat16); break; case kFloat32: RUN_DECODE(FloatType::kFloat32); break; default: CHECK(false); break; } #undef RUN_DECODE } CUDA_TEST_ERROR(); } } // namespace dietgpu
the_stack
* This sample implements a conjugate gradient solver on GPU using * Multi Block Cooperative Groups, also uses Unified Memory. * */ // includes, system #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> // Utilities and system includes #include <helper_cuda.h> // helper function CUDA error checking and initialization #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> namespace cg = cooperative_groups; const char *sSDKname = "conjugateGradientMultiBlockCG"; #define ENABLE_CPU_DEBUG_CODE 0 #define THREADS_PER_BLOCK 512 /* genTridiag: generate a random tridiagonal symmetric matrix */ void genTridiag(int *I, int *J, float *val, int N, int nz) { I[0] = 0, J[0] = 0, J[1] = 1; val[0] = static_cast<float>(rand()) / RAND_MAX + 10.0f; val[1] = static_cast<float>(rand()) / RAND_MAX; int start; for (int i = 1; i < N; i++) { if (i > 1) { I[i] = I[i - 1] + 3; } else { I[1] = 2; } start = (i - 1) * 3 + 2; J[start] = i - 1; J[start + 1] = i; if (i < N - 1) { J[start + 2] = i + 1; } val[start] = val[start - 1]; val[start + 1] = static_cast<float>(rand()) / RAND_MAX + 10.0f; if (i < N - 1) { val[start + 2] = static_cast<float>(rand()) / RAND_MAX; } } I[N] = nz; } // I - contains location of the given non-zero element in the row of the matrix // J - contains location of the given non-zero element in the column of the // matrix val - contains values of the given non-zero elements of the matrix // inputVecX - input vector to be multiplied // outputVecY - resultant vector void cpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha, float *inputVecX, float *outputVecY) { for (int i = 0; i < num_rows; i++) { int num_elems_this_row = I[i + 1] - I[i]; float output = 0.0; for (int j = 0; j < num_elems_this_row; j++) { output += alpha * val[I[i] + j] * inputVecX[J[I[i] + j]]; } outputVecY[i] = output; } return; } double dotProduct(float *vecA, float *vecB, int size) { double result = 0.0; for (int i = 0; i < size; i++) { result = result + (vecA[i] * vecB[i]); } return result; } void scaleVector(float *vec, float alpha, int size) { for (int i = 0; i < size; i++) { vec[i] = alpha * vec[i]; } } void saxpy(float *x, float *y, float a, int size) { for (int i = 0; i < size; i++) { y[i] = a * x[i] + y[i]; } } void cpuConjugateGrad(int *I, int *J, float *val, float *x, float *Ax, float *p, float *r, int nnz, int N, float tol) { int max_iter = 10000; float alpha = 1.0; float alpham1 = -1.0; float r0 = 0.0, b, a, na; cpuSpMV(I, J, val, nnz, N, alpha, x, Ax); saxpy(Ax, r, alpham1, N); float r1 = dotProduct(r, r, N); int k = 1; while (r1 > tol * tol && k <= max_iter) { if (k > 1) { b = r1 / r0; scaleVector(p, b, N); saxpy(r, p, alpha, N); } else { for (int i = 0; i < N; i++) p[i] = r[i]; } cpuSpMV(I, J, val, nnz, N, alpha, p, Ax); float dot = dotProduct(p, Ax, N); a = r1 / dot; saxpy(p, x, a, N); na = -a; saxpy(Ax, r, na, N); r0 = r1; r1 = dotProduct(r, r, N); printf("\nCPU code iteration = %3d, residual = %e\n", k, sqrt(r1)); k++; } } __device__ void gpuSpMV(int *I, int *J, float *val, int nnz, int num_rows, float alpha, float *inputVecX, float *outputVecY, cg::thread_block &cta, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < num_rows; i += grid.size()) { int row_elem = I[i]; int next_row_elem = I[i + 1]; int num_elems_this_row = next_row_elem - row_elem; float output = 0.0; for (int j = 0; j < num_elems_this_row; j++) { // I or J or val arrays - can be put in shared memory // as the access is random and reused in next calls of gpuSpMV function. output += alpha * val[row_elem + j] * inputVecX[J[row_elem + j]]; } outputVecY[i] = output; } } __device__ void gpuSaxpy(float *x, float *y, float a, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { y[i] = a * x[i] + y[i]; } } __device__ void gpuDotProduct(float *vecA, float *vecB, double *result, int size, const cg::thread_block &cta, const cg::grid_group &grid) { extern __shared__ double tmp[]; double temp_sum = 0.0; for (int i = grid.thread_rank(); i < size; i += grid.size()) { temp_sum += static_cast<double>(vecA[i] * vecB[i]); } cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>()); if (tile32.thread_rank() == 0) { tmp[tile32.meta_group_rank()] = temp_sum; } cg::sync(cta); if (tile32.meta_group_rank() == 0) { temp_sum = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0; temp_sum = cg::reduce(tile32, temp_sum, cg::plus<double>()); if (tile32.thread_rank() == 0) { atomicAdd(result, temp_sum); } } } __device__ void gpuCopyVector(float *srcA, float *destB, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { destB[i] = srcA[i]; } } __device__ void gpuScaleVectorAndSaxpy(const float *x, float *y, float a, float scale, int size, const cg::grid_group &grid) { for (int i = grid.thread_rank(); i < size; i += grid.size()) { y[i] = a * x[i] + scale * y[i]; } } extern "C" __global__ void gpuConjugateGradient(int *I, int *J, float *val, float *x, float *Ax, float *p, float *r, double *dot_result, int nnz, int N, float tol) { cg::thread_block cta = cg::this_thread_block(); cg::grid_group grid = cg::this_grid(); int max_iter = 10000; float alpha = 1.0; float alpham1 = -1.0; float r0 = 0.0, r1, b, a, na; gpuSpMV(I, J, val, nnz, N, alpha, x, Ax, cta, grid); cg::sync(grid); gpuSaxpy(Ax, r, alpham1, N, grid); cg::sync(grid); gpuDotProduct(r, r, dot_result, N, cta, grid); cg::sync(grid); r1 = *dot_result; int k = 1; while (r1 > tol * tol && k <= max_iter) { if (k > 1) { b = r1 / r0; gpuScaleVectorAndSaxpy(r, p, alpha, b, N, grid); } else { gpuCopyVector(r, p, N, grid); } cg::sync(grid); gpuSpMV(I, J, val, nnz, N, alpha, p, Ax, cta, grid); if (threadIdx.x == 0 && blockIdx.x == 0) *dot_result = 0.0; cg::sync(grid); gpuDotProduct(p, Ax, dot_result, N, cta, grid); cg::sync(grid); a = r1 / *dot_result; gpuSaxpy(p, x, a, N, grid); na = -a; gpuSaxpy(Ax, r, na, N, grid); r0 = r1; cg::sync(grid); if (threadIdx.x == 0 && blockIdx.x == 0) *dot_result = 0.0; cg::sync(grid); gpuDotProduct(r, r, dot_result, N, cta, grid); cg::sync(grid); r1 = *dot_result; k++; } } bool areAlmostEqual(float a, float b, float maxRelDiff) { float diff = fabsf(a - b); float abs_a = fabsf(a); float abs_b = fabsf(b); float largest = abs_a > abs_b ? abs_a : abs_b; if (diff <= largest * maxRelDiff) { return true; } else { printf("maxRelDiff = %.8e\n", maxRelDiff); printf( "diff %.8e > largest * maxRelDiff %.8e therefore %.8e and %.8e are not " "same\n", diff, largest * maxRelDiff, a, b); return false; } } int main(int argc, char **argv) { int N = 0, nz = 0, *I = NULL, *J = NULL; float *val = NULL; const float tol = 1e-5f; float *x; float *rhs; float r1; float *r, *p, *Ax; cudaEvent_t start, stop; printf("Starting [%s]...\n", sSDKname); // This will pick the best possible CUDA capable device cudaDeviceProp deviceProp; int devID = findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); if (!deviceProp.managedMemory) { // This sample requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } // This sample requires being run on a device that supports Cooperative Kernel // Launch if (!deviceProp.cooperativeLaunch) { printf( "\nSelected GPU (%d) does not support Cooperative Kernel Launch, " "Waiving the run\n", devID); exit(EXIT_WAIVED); } // Statistics about the GPU device printf( "> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n", deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor); /* Generate a random tridiagonal symmetric matrix in CSR format */ N = 1048576; nz = (N - 2) * 3 + 4; cudaMallocManaged(reinterpret_cast<void **>(&I), sizeof(int) * (N + 1)); cudaMallocManaged(reinterpret_cast<void **>(&J), sizeof(int) * nz); cudaMallocManaged(reinterpret_cast<void **>(&val), sizeof(float) * nz); genTridiag(I, J, val, N, nz); cudaMallocManaged(reinterpret_cast<void **>(&x), sizeof(float) * N); cudaMallocManaged(reinterpret_cast<void **>(&rhs), sizeof(float) * N); double *dot_result; cudaMallocManaged(reinterpret_cast<void **>(&dot_result), sizeof(double)); *dot_result = 0.0; // temp memory for CG checkCudaErrors( cudaMallocManaged(reinterpret_cast<void **>(&r), N * sizeof(float))); checkCudaErrors( cudaMallocManaged(reinterpret_cast<void **>(&p), N * sizeof(float))); checkCudaErrors( cudaMallocManaged(reinterpret_cast<void **>(&Ax), N * sizeof(float))); cudaDeviceSynchronize(); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); #if ENABLE_CPU_DEBUG_CODE float *Ax_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N)); float *r_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N)); float *p_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N)); float *x_cpu = reinterpret_cast<float *>(malloc(sizeof(float) * N)); for (int i = 0; i < N; i++) { r_cpu[i] = 1.0; Ax_cpu[i] = x_cpu[i] = 0.0; } #endif for (int i = 0; i < N; i++) { r[i] = rhs[i] = 1.0; x[i] = 0.0; } void *kernelArgs[] = { (void *)&I, (void *)&J, (void *)&val, (void *)&x, (void *)&Ax, (void *)&p, (void *)&r, (void *)&dot_result, (void *)&nz, (void *)&N, (void *)&tol, }; int sMemSize = sizeof(double) * ((THREADS_PER_BLOCK/32) + 1); int numBlocksPerSm = 0; int numThreads = THREADS_PER_BLOCK; checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocksPerSm, gpuConjugateGradient, numThreads, sMemSize)); int numSms = deviceProp.multiProcessorCount; dim3 dimGrid(numSms * numBlocksPerSm, 1, 1), dimBlock(THREADS_PER_BLOCK, 1, 1); checkCudaErrors(cudaEventRecord(start, 0)); checkCudaErrors(cudaLaunchCooperativeKernel((void *)gpuConjugateGradient, dimGrid, dimBlock, kernelArgs, sMemSize, NULL)); checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaDeviceSynchronize()); float time; checkCudaErrors(cudaEventElapsedTime(&time, start, stop)); r1 = *dot_result; printf("GPU Final, residual = %e, kernel execution time = %f ms\n", sqrt(r1), time); #if ENABLE_CPU_DEBUG_CODE cpuConjugateGrad(I, J, val, x_cpu, Ax_cpu, p_cpu, r_cpu, nz, N, tol); #endif float rsum, diff, err = 0.0; for (int i = 0; i < N; i++) { rsum = 0.0; for (int j = I[i]; j < I[i + 1]; j++) { rsum += val[j] * x[J[j]]; } diff = fabs(rsum - rhs[i]); if (diff > err) { err = diff; } } checkCudaErrors(cudaFree(I)); checkCudaErrors(cudaFree(J)); checkCudaErrors(cudaFree(val)); checkCudaErrors(cudaFree(x)); checkCudaErrors(cudaFree(rhs)); checkCudaErrors(cudaFree(r)); checkCudaErrors(cudaFree(p)); checkCudaErrors(cudaFree(Ax)); checkCudaErrors(cudaFree(dot_result)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); #if ENABLE_CPU_DEBUG_CODE free(Ax_cpu); free(r_cpu); free(p_cpu); free(x_cpu); #endif printf("Test Summary: Error amount = %f \n", err); fprintf(stdout, "&&&& conjugateGradientMultiBlockCG %s\n", (sqrt(r1) < tol) ? "PASSED" : "FAILED"); exit((sqrt(r1) < tol) ? EXIT_SUCCESS : EXIT_FAILURE); }
the_stack
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../Observers/ColorHelpers.cu" extern "C" { __device__ float activationProbability(float x, float sigma) { return 1.0 / (1.0 + expf(-sigma * x)); } __device__ float activateRandomly(float probability, float random) { return random < probability; } // RBM Kernels //////////////////////////////////////////////////////////////// __global__ void RBMInputForwardKernel( float *inputPtr, float *outputPtr, float *biasPtr, bool applyBias, int thisLayerSize ) { // i: current neuron id int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (i < thisLayerSize) { float result = inputPtr[i]; if (applyBias) result += biasPtr[i]; outputPtr[i] = inputPtr[i]; } } __global__ void RBMInputForwardAndStoreKernel( float *inputPtr, float *outputPtr, float *biasPtr, float *storePtr, bool applyBias, int thisLayerSize ) { // i: current neuron id int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (i < thisLayerSize) { float result = inputPtr[i]; if (applyBias) result += biasPtr[i]; outputPtr[i] = result; storePtr[i] = result; } } __global__ void RBMSamplePositiveKernel( float *inputPtr, float *outputPtr, float *positivePtr, int thisLayerSize, // = outputPtr size int weightCount // = prevLayerSize * thisLayerSize ) { int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (weightIndex < weightCount) { // i: prev. layer neuron id int i = weightIndex / thisLayerSize; // j: current layer neuron id int j = weightIndex % thisLayerSize; positivePtr[weightIndex] = inputPtr[i] * outputPtr[j]; } } __global__ void RBMRandomActivationKernel( float *outputPtr, float *randomPtr, int size ) { int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (i < size) { outputPtr[i] = activateRandomly(outputPtr[i], randomPtr[i]); } } __global__ void RBMForwardKernel( float *inputPtr, float *outputPtr, float *weightPtr, float *biasPtr, float sigma, int prevLayerSize, int thisLayerSize, bool useDropoutMask, bool useDropout, float dropoutRate, float *dropoutMask ) { // i: prev. layer neuron id // j: current layer neuron id int i; int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (j < thisLayerSize) { // dropout this neuron if (useDropout && useDropoutMask && !dropoutMask[j]) { outputPtr[j] = 0; } else { float sum = 0.0; int index = j; for (i = 0; i < prevLayerSize; i++) { sum += weightPtr[index] * inputPtr[i]; index += thisLayerSize; } // add bias sum += biasPtr[j]; float result = activationProbability(sum, sigma); // only used for reconstruction forward calls if (useDropout && !useDropoutMask && dropoutRate < 1) result /= dropoutRate; // set output value outputPtr[j] = result; } } } // This is the same as Forward, only stores output to another memory block in addition... __global__ void RBMForwardAndStoreKernel( float *inputPtr, float *outputPtr, float *weightPtr, float *biasPtr, float *storedOutputPtr, float sigma, int prevLayerSize, int thisLayerSize, bool useDropout, float *dropoutMask ) { // i: prev. layer neuron id // j: current layer neuron id int i; int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (j < thisLayerSize) { // dropout this neuron if (useDropout && !dropoutMask[j]) { outputPtr[j] = 0; storedOutputPtr[j] = 0; } else { float sum = 0.0; int index = j; for (i = 0; i < prevLayerSize; i++) { sum += weightPtr[index] * inputPtr[i]; index += thisLayerSize; } // add bias sum += biasPtr[j]; float result = activationProbability(sum, sigma); // set output value outputPtr[j] = result; // store output value storedOutputPtr[j] = result; } } } __global__ void RBMBackwardKernel( float *inputPtr, // output of layer x+1 == this layer float *outputPtr, // output of layer x (we are going backwards) = input of layer x+1 float *weightPtr, float *biasPtr, // biases of the layer x == previous layer float sigma, int prevLayerSize, int thisLayerSize ) { // i: prev. layer neuron id // j: current layer neuron id int j; int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (i < prevLayerSize) { float sum = 0.0; int index = i * thisLayerSize; for (j = 0; j < thisLayerSize; j++) sum += weightPtr[index + j] * inputPtr[j]; sum += biasPtr[i]; // set output value outputPtr[i] = activationProbability(sum, sigma); } } __global__ void RBMUpdateBiasesKernel( float *biasPtr, float *positivePtr, // previous output of this layer == created by Forw&Store kernel float *negativePtr, // current output of this layer == outputPtr float *previousDeltaPtr, float *energyPtr, float learningRate, float momentum, float weightDecay, int thisLayerSize, bool storeEnergy ) { int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (i < thisLayerSize) { float difference = positivePtr[i] - negativePtr[i]; float delta = // gradient descent learningRate * difference + // momentum momentum * previousDeltaPtr[i] - // weight decay weightDecay * biasPtr[i] * learningRate; previousDeltaPtr[i] = delta; biasPtr[i] += delta; if (storeEnergy) atomicAdd(energyPtr, difference * difference); } } __global__ void RBMUpdateWeightsKernel( float *inputPtr, float *outputPtr, float *weightPtr, float *positivePtr, float *previousDeltaPtr, float *energyPtr, float learningRate, float momentum, float weightDecay, int thisLayerSize, // = outputPtr size int weightCount, // = prevLayerSize * thisLayerSize bool storeEnergy ) { int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (weightIndex < weightCount) { // first, compute negative (computed exactly as positive): // i: prev. layer neuron id int i = weightIndex / thisLayerSize; // j: current layer neuron id int j = weightIndex % thisLayerSize; float negative = inputPtr[i] * outputPtr[j]; float difference = positivePtr[weightIndex] - negative; float delta = // gradient descent learningRate * (difference) + // momentum momentum * previousDeltaPtr[weightIndex] - // weight decay weightDecay * weightPtr[weightIndex] * learningRate; previousDeltaPtr[weightIndex] = delta; weightPtr[weightIndex] += delta; if (storeEnergy) atomicAdd(energyPtr, difference * difference); } } __global__ void RBMCopyFilterKernel( float *weightPtr, float *filterPtr, int weightCount, int i, int thisLayerSize ) { int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (weightIndex < weightCount) { filterPtr[weightIndex] = weightPtr[i + weightIndex * thisLayerSize]; } } __global__ void RBMFilterObserver(float* refVectors, float* activations, int patchCount, int patchWidth, int patchHeight, float minValue, float maxValue, int textureWidth, int textureHeight, unsigned int* pixels) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; int size = textureWidth * textureHeight; int patchSize = patchWidth * patchHeight; int patchIndex = threadId % patchCount; int pixelIndex = threadId / patchCount; int pixX = pixelIndex % patchWidth; int pixY = pixelIndex / patchWidth; int patchesInRow = textureWidth / patchWidth; int patchX = patchIndex % patchesInRow; int patchY = patchIndex / patchesInRow; if (threadId < size) { float activation = activations[patchIndex]; float hue = (1 - activation) * 0.2f; float saturation = 0.8 * (activation > 0.05f); float value = scale_to_interval(refVectors[threadId], minValue, maxValue); pixels[(patchY * patchHeight + pixY) * textureWidth + patchX * patchWidth + pixX] = hsva_to_uint_rgba(hue, saturation, value, 1); } } __global__ void RBMDropoutMaskKernel( float *maskPtr, float dropout, int thisLayerSize ) { int index = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (index < thisLayerSize) { maskPtr[index] = dropout < maskPtr[index]; } } }
the_stack
#if defined(THC_REAL_IS_HALF) #define _REAL(val) THC_float2half(val) #else #define _REAL(val) (val) #endif static int nn_(StepGRU_updateOutput)(lua_State *L) { THCState *state = getCudaState(L); THCTensor *weight = (THCTensor *)luaT_checkudata(L, 1, torch_Tensor); THCTensor *bias = (THCTensor *)luaT_checkudata(L, 2, torch_Tensor); THCTensor *gates = (THCTensor *)luaT_checkudata(L, 3, torch_Tensor); THCTensor *cur_x = (THCTensor *)luaT_checkudata(L, 4, torch_Tensor); THCTensor *prev_h = (THCTensor *)luaT_checkudata(L, 5, torch_Tensor); int inputsize = luaL_checkinteger(L, 6); int outputsize = luaL_checkinteger(L, 7); THCTensor *next_h = (THCTensor *)luaT_checkudata(L, 8, torch_Tensor); int batchsize = THCTensor_(size)(state, cur_x, 0); if (THCTensor_(size)(state, cur_x, 1) != inputsize) return LUA_HANDLE_ERROR_STR(L, "expected input[1]:size(2) == inputsize"); THLongStorage* size = THLongStorage_newWithSize2(1, 3 * outputsize); THCTensor *buffer = THCTensor_(newView)(state, bias, size); buffer->stride[0] = 0; buffer->size[0] = batchsize; THCTensor_(resize2d)(state, next_h, batchsize, outputsize); long nElement = THCTensor_(nElement)(state, gates); THCTensor_(resize2d)(state, gates, batchsize, 3 * outputsize); if (nElement != batchsize * 3 * outputsize) THCTensor_(fill)(state, gates, _REAL(0)); THCTensor *Wx = THCTensor_(newNarrow)(state, weight, 0, 0, inputsize); THCTensor *Wh = THCTensor_(newNarrow)(state, weight, 0, inputsize, outputsize); THCTensor *sub_gates = THCTensor_(newNarrow)(state, gates, 1, 0, 2 * outputsize); THCTensor *sub_Wh = THCTensor_(newNarrow)(state, Wh, 1, 0, 2 * outputsize); // r = sig(Wx * x + Wh * prev_h + b) THCTensor *reset_gate = THCTensor_(newNarrow)(state, gates, 1, 0, outputsize); // u = sig(Wx * x + Wh * prev_h + b) THCTensor *update_gate = THCTensor_(newNarrow)(state, gates, 1, outputsize, outputsize); // hc = tanh(Wx * x + Wh * r . prev_h + b) THCTensor *hidden_candidate = THCTensor_(newNarrow)(state, gates, 1, 2*outputsize, outputsize); // forward THCTensor_(addmm)(state, gates, _REAL(1), buffer, _REAL(1), cur_x, Wx); THCTensor_(addmm)(state, sub_gates, _REAL(1), sub_gates, _REAL(1), prev_h, sub_Wh); THCTensor_(sigmoid)(state, sub_gates, sub_gates); // temporary buffer : r . prev_h THCTensor_(cmul)(state, next_h, reset_gate, prev_h); THCTensor_(narrow)(state, sub_Wh, Wh, 1, 2 * outputsize, outputsize); // hc += Wh * r . prev_h THCTensor_(addmm)(state, hidden_candidate, _REAL(1), hidden_candidate, _REAL(1), next_h, sub_Wh); // hc = tanh(Wx * x + Wh * r . prev_h + b) THCTensor_(tanh)(state, hidden_candidate, hidden_candidate); // (1-u) . hc = hc - (u . hc) THCTensor_(addcmul)(state, next_h, hidden_candidate, _REAL(-1), update_gate, hidden_candidate); // next_h = (1-u) . hc + u . prev_h THCTensor_(addcmul)(state, next_h, next_h, _REAL(1), update_gate, prev_h); THCTensor_(free)(state, Wx); THCTensor_(free)(state, Wh); THCTensor_(free)(state, buffer); THCTensor_(free)(state, reset_gate); THCTensor_(free)(state, update_gate); THCTensor_(free)(state, hidden_candidate); THCTensor_(free)(state, sub_gates); THCTensor_(free)(state, sub_Wh); THLongStorage_free(size); return 1; } static int nn_(StepGRU_backward)(lua_State *L) { THCState *state = getCudaState(L); THCTensor *weight = (THCTensor *)luaT_checkudata(L, 1, torch_Tensor); THCTensor *gates = (THCTensor *)luaT_checkudata(L, 2, torch_Tensor); THCTensor *gradWeight = (THCTensor *)luaT_checkudata(L, 3, torch_Tensor); THCTensor *grad_b = (THCTensor *)luaT_checkudata(L, 4, torch_Tensor); THCTensor *grad_gates = (THCTensor *)luaT_checkudata(L, 5, torch_Tensor); THCTensor *buffer = (THCTensor *)luaT_checkudata(L, 6, torch_Tensor); THCTensor *cur_x = (THCTensor *)luaT_checkudata(L, 7, torch_Tensor); THCTensor *prev_h = (THCTensor *)luaT_checkudata(L, 8, torch_Tensor); THCTensor *grad_next_h = (THCTensor *)luaT_checkudata(L, 9, torch_Tensor); lua_Number scale = luaL_checknumber(L, 10); int inputsize = luaL_checkinteger(L, 11); int outputsize = luaL_checkinteger(L, 12); THCTensor *grad_cur_x = (THCTensor *)luaT_checkudata(L, 13, torch_Tensor); THCTensor *grad_prev_h = (THCTensor *)luaT_checkudata(L, 14, torch_Tensor); int batchsize = THCTensor_(size)(state, cur_x, 0); if (THCTensor_(size)(state, cur_x, 1) != inputsize) return LUA_HANDLE_ERROR_STR(L, "expected input[1]:size(2) == inputsize"); if (THCTensor_(size)(state, grad_next_h, 1) != outputsize) return LUA_HANDLE_ERROR_STR(L, "expected gradOutput[1]:size(2) == outputsize"); THCTensor_(resize2d)(state, grad_cur_x, batchsize, inputsize); THCTensor_(resize2d)(state, grad_prev_h, batchsize, outputsize); THCTensor_(resize2d)(state, grad_gates, batchsize, 3 * outputsize); THCTensor *Wx = THCTensor_(newNarrow)(state, weight, 0, 0, inputsize); THCTensor *Wh = THCTensor_(newNarrow)(state, weight, 0, inputsize, outputsize); THCTensor *reset_gate = THCTensor_(newNarrow)(state, gates, 1, 0, outputsize); THCTensor *update_gate = THCTensor_(newNarrow)(state, gates, 1, outputsize, outputsize); THCTensor *hidden_candidate = THCTensor_(newNarrow)(state, gates, 1, 2*outputsize, outputsize); THCTensor *grad_Wx = THCTensor_(newNarrow)(state, gradWeight, 0, 0, inputsize); THCTensor *grad_Wh = THCTensor_(newNarrow)(state, gradWeight, 0, inputsize, outputsize); THCTensor *grad_reset_gate = THCTensor_(newNarrow)(state, grad_gates, 1, 0, outputsize); THCTensor *grad_update_gate = THCTensor_(newNarrow)(state, grad_gates, 1, outputsize, outputsize); THCTensor *grad_hidden_candidate = THCTensor_(newNarrow)(state, grad_gates, 1, 2*outputsize, outputsize); THCTensor *sub_Wh = THCTensor_(newNarrow)(state, Wh, 1, 2 * outputsize, outputsize); THCTensor *sub_Wh_t = THCTensor_(newTranspose)(state, sub_Wh, 0, 1); THCTensor *Wx_t = THCTensor_(newTranspose)(state, Wx, 0, 1); THCTensor *cur_x_t = THCTensor_(newTranspose)(state, cur_x, 0, 1); THCTensor *sub_grad_gates = THCTensor_(newNarrow)(state, grad_gates, 1, 0, 2 * outputsize); THCTensor *sub_grad_Wh = THCTensor_(newNarrow)(state, grad_Wh, 1, 0, 2 * outputsize); THCTensor *prev_h_t = THCTensor_(newTranspose)(state, prev_h, 0, 1); // use grad_update_gate as temporary buffer to compute grad_hidden_candidate and grad_reset_gate THCTensor_(fill)(state, grad_update_gate, _REAL(0)); THCTensor_(addcmul)(state, grad_update_gate, grad_next_h, _REAL(-1), update_gate, grad_next_h); THCTensor_(fill)(state, grad_hidden_candidate, _REAL(1)); THCTensor_(addcmul)(state, grad_hidden_candidate, grad_hidden_candidate, _REAL(-1), hidden_candidate, hidden_candidate); THCTensor_(cmul)(state, grad_hidden_candidate, grad_hidden_candidate, grad_update_gate); THCTensor_(fill)(state, grad_update_gate, _REAL(0)); THCTensor_(addmm)(state, grad_update_gate, _REAL(1), grad_update_gate, _REAL(1), grad_hidden_candidate, sub_Wh_t); THCTensor_(cmul)(state, grad_update_gate, grad_update_gate, prev_h); THCTensor_(fill)(state, grad_reset_gate, _REAL(1)); THCTensor_(cadd)(state, grad_reset_gate, grad_reset_gate, _REAL(-1), reset_gate); THCTensor_(cmul)(state, grad_reset_gate, grad_reset_gate, reset_gate); THCTensor_(cmul)(state, grad_reset_gate, grad_reset_gate, grad_update_gate); THCTensor_(cadd)(state, buffer, prev_h, _REAL(-1), hidden_candidate); THCTensor_(fill)(state, grad_update_gate, _REAL(1)); THCTensor_(cadd)(state, grad_update_gate, grad_update_gate, _REAL(-1), update_gate); THCTensor_(cmul)(state, grad_update_gate, grad_update_gate, update_gate); THCTensor_(cmul)(state, grad_update_gate, grad_update_gate, buffer); THCTensor_(cmul)(state, grad_update_gate, grad_update_gate, grad_next_h); THCTensor_(addmm)(state, grad_cur_x, _REAL(0), grad_cur_x, _REAL(1), grad_gates, Wx_t); THCTensor_(addmm)(state, grad_Wx, _REAL(scale), grad_Wx, _REAL(1), cur_x_t, grad_gates); THCTensor_(addmm)(state, sub_grad_Wh, _REAL(scale), sub_grad_Wh, _REAL(1), prev_h_t, sub_grad_gates); THCTensor_(resize1d)(state, buffer, outputsize); THCTensor_(sum)(state, buffer, grad_gates, 0, 0); THCTensor_(cadd)(state, grad_b, grad_b, _REAL(scale), buffer); THCTensor_(cmul)(state, buffer, prev_h, reset_gate); THCTensor_(narrow)(state, sub_grad_Wh, grad_Wh, 1, 2 * outputsize, outputsize); THCTensor_(transpose)(state, cur_x_t, buffer, 0, 1); // reuse cur_x_t as buffer_t THCTensor_(addmm)(state, sub_grad_Wh, _REAL(scale), sub_grad_Wh, _REAL(1), cur_x_t, grad_hidden_candidate); THCTensor_(cmul)(state, grad_prev_h, grad_next_h, update_gate); THCTensor_(narrow)(state, sub_Wh, Wh, 1, 0, 2 * outputsize); THCTensor_(transpose)(state, cur_x_t, sub_Wh, 0, 1); // reuse cur_x_t as sub_Wh_t THCTensor_(addmm)(state, grad_prev_h, _REAL(1), grad_prev_h, _REAL(1), sub_grad_gates, cur_x_t); THCTensor_(addmm)(state, buffer, _REAL(0), buffer, _REAL(1), grad_hidden_candidate, sub_Wh_t); THCTensor_(cmul)(state, buffer, buffer, reset_gate); THCTensor_(cadd)(state, grad_prev_h, grad_prev_h, _REAL(1), buffer); THCTensor_(free)(state, Wx); THCTensor_(free)(state, Wh); THCTensor_(free)(state, reset_gate); THCTensor_(free)(state, update_gate); THCTensor_(free)(state, hidden_candidate); THCTensor_(free)(state, grad_Wx); THCTensor_(free)(state, grad_Wh); THCTensor_(free)(state, grad_reset_gate); THCTensor_(free)(state, grad_update_gate); THCTensor_(free)(state, grad_hidden_candidate); THCTensor_(free)(state, sub_Wh); THCTensor_(free)(state, sub_Wh_t); THCTensor_(free)(state, Wx_t); THCTensor_(free)(state, cur_x_t); THCTensor_(free)(state, sub_grad_gates); THCTensor_(free)(state, sub_grad_Wh); THCTensor_(free)(state, prev_h_t); return 2; } static const struct luaL_Reg nn_(StepGRU__) [] = { {"StepGRU_updateOutput", nn_(StepGRU_updateOutput)}, {"StepGRU_backward", nn_(StepGRU_backward)}, {NULL, NULL} }; static void nn_(StepGRU_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(StepGRU__), "nn"); lua_pop(L,1); } #undef _REAL #endif
the_stack
#define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8 // #define DEBUG const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p){ //params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++){ #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box2d(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++){ printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __global__ void tmpfunc(const int boxes_num, const float nms_overlap_thresh, const float *reg, const float* height, const float* dim, const float* rot, const int* indexs, unsigned long long *mask,float* block_boxes) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); if (row_start + col_start == 0 && threadIdx.x < col_size) { const int col_actual_idx = indexs[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x]; block_boxes[threadIdx.x * 7 + 0] = reg[col_actual_idx ]; block_boxes[threadIdx.x * 7 + 1] = reg[OUTPUT_H * OUTPUT_W + col_actual_idx]; block_boxes[threadIdx.x * 7 + 2] = height[col_actual_idx]; block_boxes[threadIdx.x * 7 + 3] = dim[col_actual_idx]; block_boxes[threadIdx.x * 7 + 4] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H]; block_boxes[threadIdx.x * 7 + 5] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H * 2]; float theta = atan2f(rot[col_actual_idx], rot[col_actual_idx + OUTPUT_W * OUTPUT_H]); block_boxes[threadIdx.x * 7 + 6] = theta; } } __global__ void raw_nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *reg, const float* height, const float* dim, const float* rot, const int* indexs, unsigned long long *mask){ //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { const int col_actual_idx = indexs[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x]; const int xIdx = col_actual_idx % OUTPUT_W; const int yIdx = col_actual_idx / OUTPUT_W; //encode boxs according kitti format : (N, 7) [x, y, z, dy, dx, dz, heading] block_boxes[threadIdx.x * 7 + 0] = (reg[col_actual_idx ]+xIdx)*OUT_SIZE_FACTOR*X_STEP + X_MIN; block_boxes[threadIdx.x * 7 + 1] = (reg[OUTPUT_H * OUTPUT_W + col_actual_idx] + yIdx ) * OUT_SIZE_FACTOR*Y_STEP + Y_MIN; block_boxes[threadIdx.x * 7 + 2] = height[col_actual_idx]; block_boxes[threadIdx.x * 7 + 4] = dim[col_actual_idx]; block_boxes[threadIdx.x * 7 + 3] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H]; block_boxes[threadIdx.x * 7 + 5] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H * 2]; float theta = atan2f(rot[col_actual_idx], rot[col_actual_idx + OUTPUT_W * OUTPUT_H]); theta = -theta - 3.1415926/2; block_boxes[threadIdx.x * 7 + 6] = theta; } __syncthreads(); if (threadIdx.x < row_size) { const int row_actual_idx = indexs[THREADS_PER_BLOCK_NMS * row_start + threadIdx.x]; const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const int xIdx = row_actual_idx % OUTPUT_W; const int yIdx = row_actual_idx / OUTPUT_W; //encode boxs according kitti format : (N, 7) [x, y, z, dy, dx, dz, heading] float cur_box[7]; cur_box[0] = (reg[row_actual_idx ]+xIdx)*OUT_SIZE_FACTOR*X_STEP + X_MIN; cur_box[1] = (reg[OUTPUT_H * OUTPUT_W + row_actual_idx] + yIdx ) * OUT_SIZE_FACTOR*Y_STEP + Y_MIN; cur_box[2] = height[row_actual_idx]; cur_box[4] = dim[row_actual_idx]; cur_box[3] = dim[row_actual_idx + OUTPUT_W * OUTPUT_H]; cur_box[5] = dim[row_actual_idx + OUTPUT_W * OUTPUT_H * 2]; float theta = atan2f(rot[row_actual_idx], rot[row_actual_idx + OUTPUT_W * OUTPUT_H]); theta = -theta - 3.1415926/2; cur_box[6] = theta; // const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // assume cur_box_idx = 21, col_start = 0, row_start = 0 , threadIdx = 21, mark 21 th box and top 64 boxes mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const * const a, float const * const b) { //params: a: [x, y, z, dx, dy, dz, heading] //params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } /////////////////////////////////////////////////////////////////////////////////////BEGIN//////////////////////////////////////////////////////////////////////////////////////////// __global__ void boxAssignKernel(float* reg, float* height , float* dim, float*rot,float* boxes, float*score, int* label, float* out_score, int*out_label, int* validIndexs , int output_h , int output_w) { int boxId = blockIdx.x; int channel = threadIdx.x; int idx = validIndexs[boxId]; if (channel ==0 ) boxes[boxId * 7 + 0] = reg[idx ]; else if (channel == 1) boxes[boxId * 7 + 1] = reg[idx + output_w * output_h]; else if (channel == 2) boxes[boxId * 7 + 2] = height[idx]; else if (channel == 3) boxes[boxId * 7 + 3] = dim[idx]; else if (channel == 4) boxes[boxId * 7 + 4] = dim[idx + output_h * output_w]; else if (channel == 5) boxes[boxId * 7 + 5] = dim[idx + 2 * output_w * output_h]; else if (channel == 6){ float theta = atan2f(rot[0*output_h*output_w + idx], rot[1*output_h*output_w + idx]); theta = -theta - 3.1415926/2; boxes[boxId * 7 + 6] = theta; } // else if(channel == 7) // out_score[boxId] = score[idx]; else if(channel == 8) out_label[boxId] = label[idx]; } void _box_assign_launcher(float* reg, float* height , float* dim, float*rot, float* boxes, float*score, int* label, float* out_score, int*out_label, int* validIndexs ,int boxSize, int output_h, int output_w) { boxAssignKernel<<< boxSize, 9 >>> (reg, height , dim ,rot, boxes, score, label, out_score, out_label, validIndexs, output_h, output_w); } __global__ void indexAssign(int* indexs) { int yIdx = blockIdx.x; int xIdx = threadIdx.x; int idx = yIdx * blockDim.x + xIdx; indexs[idx] = idx; } void _index_assign_launcher(int* indexs, int output_h, int output_w) { indexAssign<<<output_h, output_w>>>(indexs); } // compute how many scores are valid struct is_greater{ is_greater(float thre) : _thre(thre) { } __host__ __device__ bool operator()(const float &x) { return x>= _thre; } float _thre; }; struct is_odd { __host__ __device__ bool operator()(const int &x) { return true ; } }; __global__ void _find_valid_score_numKernel_(float* score, float* thre, float* N) { int yIdx = blockIdx.x; int xIdx = threadIdx.x; int idx = yIdx * blockDim.x + xIdx; if (score[idx] >= 0.1) atomicAdd(N, 1.0); } int _find_valid_score_num(float* score, float thre, int output_h, int output_w) { // thrust::device_vector<float> score_vec(score,score + output_h * output_w); return thrust::count_if(thrust::device, score, score + output_h * output_w, is_greater(thre)); // return thrust::count_if(thrust::device, score_vec.begin(),score_vec.end(),is_greater(thre)); } void _sort_by_key(float* keys, int* values,int size) { thrust::sequence(thrust::device, values, values+size); // size = OUTPUT_H * OUTPUT_W; thrust::sort_by_key(thrust::device, keys, keys + size, values, thrust::greater<float>()); } void _gather_all(float* host_boxes, int* host_label, float* reg, float* height, float* dim, float* rot, float* sorted_score, int32_t* label, int* dev_indexs, long* host_keep_indexs, int boxSizeBef, int boxSizeAft) { // copy keep_indexs from host to device // int* tmp_keep_indexs = static_cast<int*>(host_keep_indexs); thrust::device_vector<long> dev_keep_indexs(host_keep_indexs, host_keep_indexs + boxSizeAft); // thrust::host_vector<long> host_keep_indexs_vec(host_keep_indexs,host_keep_indexs+boxSizeAft); // // thrust::copy(host_keep_indexs,host_keep_indexs+boxSizeAft, dev_keep_indexs.begin()); // thrust::copy(host_keep_indexs_vec.begin(), host_keep_indexs_vec.end(), dev_keep_indexs.begin()); // gather keeped indexs after nms thrust::device_vector<int> dev_indexs_bef(dev_indexs, dev_indexs + boxSizeBef); thrust::device_vector<int> dev_indexs_aft(boxSizeAft); thrust::gather(dev_keep_indexs.begin(), dev_keep_indexs.end(), dev_indexs_bef.begin(), dev_indexs_aft.begin()); // gather boxes, score, label thrust::device_vector<float> tmp_boxes(boxSizeAft * 9); thrust::device_vector<int> tmp_label(boxSizeAft); // gather x, y thrust::device_vector<float> reg_vec(reg,reg+OUTPUT_H * OUTPUT_W * 2); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), reg_vec.begin(),tmp_boxes.begin()); thrust::gather(dev_indexs_aft.begin(), dev_indexs_aft.end(), reg_vec.begin() + OUTPUT_W * OUTPUT_H, tmp_boxes.begin() + boxSizeAft); // gather height thrust::device_vector<float> height_vec(height, height + OUTPUT_H * OUTPUT_W); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), height_vec.begin(),tmp_boxes.begin() + 2 * boxSizeAft); // gather dim thrust::device_vector<float> dim_vec(dim, dim + 3 * OUTPUT_H * OUTPUT_W); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 3 * boxSizeAft); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 1,tmp_boxes.begin() + 4 * boxSizeAft); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 2,tmp_boxes.begin() + 5 * boxSizeAft); // gather rotation thrust::device_vector<float> rot_vec(rot, rot + 2 * OUTPUT_H * OUTPUT_W); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), rot_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 6 * boxSizeAft); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), rot_vec.begin() + OUTPUT_W * OUTPUT_H * 1,tmp_boxes.begin() + 7 * boxSizeAft); // gather score thrust::device_vector<float> sorted_score_vec(sorted_score, sorted_score + 1 * OUTPUT_H * OUTPUT_W); thrust::gather(dev_keep_indexs.begin(),dev_keep_indexs.end(), sorted_score_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 8 * boxSizeAft); // gather label thrust::device_vector<int> label_vec(label, label + 1 * OUTPUT_H * OUTPUT_W); thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), label_vec.begin() + OUTPUT_W * OUTPUT_H * 0, tmp_label.begin()); // copy values from device => host // host_boxes = tmp_boxes; // host_label = tmp_label; thrust::copy(tmp_boxes.begin(), tmp_boxes.end(), host_boxes); thrust::copy(tmp_label.begin(),tmp_label.end(), host_label); } ///////////////////////////////////////////////////////////////////////////////////END////////////////////////////////////////////////////////////////////////////////////////// __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float* boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void rawNmsLauncher(const float *reg, const float* height, const float* dim, const float* rot, const int* indexs, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); raw_nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, reg,height,dim,rot, indexs, mask); } int _raw_nms_gpu(const float* reg, const float* height, const float* dim , const float* rot, const int* indexs, long* host_keep_data,unsigned long long* mask_cpu, unsigned long long* remv_cpu, int boxes_num, float nms_overlap_thresh){ // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) // int boxes_num = boxes.size(0); // const float * boxes_data = boxes.data<float>(); // long * keep_data = keep.data<long>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long)); rawNmsLauncher(reg, height, dim, rot, indexs, mask_data, boxes_num, nms_overlap_thresh); // unsigned long long mask_cpu[boxes_num * col_blocks]; // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks]; // std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks); // printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); cudaMemcpy(mask_cpu, mask_data, boxes_num * col_blocks * sizeof(unsigned long long), cudaMemcpyDeviceToHost); // TODO : CUT HERE ! ! ! cudaFree(mask_data); // unsigned long long remv_cpu[col_blocks]; // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); memset(remv_cpu, 0 , col_blocks * sizeof(unsigned long long )); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++){ int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))){ host_keep_data[num_to_keep++] = i; for (int j = nblock; j < col_blocks; j++){ remv_cpu[j] |= mask_cpu[ i * col_blocks + j]; } } } if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); return num_to_keep; }
the_stack
#include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) convert::To<AccT>(__ldg(x + i)) template <typename T, typename AccT> __global__ void _AvgPool2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; int hend = min(hstart + kernel_h, H + pad_h); int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, H); wend = min(wend, W); const T* offset_x = x + (n * C + c) * H * W; AccT val = AccT(0); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { val += LDG(offset_x, h * W + w); } } y[yi] = convert::To<T>(val / area); } } template <typename T, typename AccT> __global__ void _AvgPool2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_w / out_h; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; int hend = min(hstart + kernel_h, H + pad_h); int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, H); wend = min(wend, W); const T* offset_x = x + n * H * W * C + c; AccT val = AccT(0); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { val += LDG(offset_x, (h * W + w) * C); } } y[yi] = convert::To<T>(val / area); } } template <typename T, typename AccT> __global__ void _AvgPool2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int w = xi % W + pad_w; const int h = (xi / W) % H + pad_h; const int c = (xi / W / H) % C; const int n = xi / W / H / C; const int out_hstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int out_wstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int out_hend = min(h / stride_h + 1, out_h); const int out_wend = min(w / stride_w + 1, out_w); const T* offset_dy = dy + (n * C + c) * out_h * out_w; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { const int hstart = h_out * stride_h - pad_h; const int hend = min(hstart + kernel_h, H + pad_h); for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int wstart = w_out * stride_w - pad_w; const int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (hend - hstart) * (wend - wstart); val += LDG(offset_dy, h_out * out_w + w_out) / area; } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _AvgPool2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int c = xi % C; const int w = (xi / C) % W + pad_w; const int h = (xi / C / W) % H + pad_h; const int n = xi / C / W / H; const int out_hstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int out_wstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int out_hend = min(h / stride_h + 1, out_h); const int out_wend = min(w / stride_w + 1, out_w); const T* offset_dy = dy + n * out_h * out_w * C + c; AccT val = AccT(0); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { const int hstart = h_out * stride_h - pad_h; const int hend = min(hstart + kernel_h, H + pad_h); for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int wstart = w_out * stride_w - pad_w; const int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (hend - hstart) * (wend - wstart); val += LDG(offset_dy, (h_out * out_w + w_out) * C) / area; } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _AvgPool3dNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / out_w; const int w_out = yi % out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; tmp /= out_d; const int c = tmp % C; const int n = tmp / C; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; int dend = min(dstart + kernel_d, D + pad_d); int hend = min(hstart + kernel_h, H + pad_h); int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (dend - dstart) * (hend - hstart) * (wend - wstart); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); dend = min(dend, D); hend = min(hend, H); wend = min(wend, W); const T* offset_x = x + (n * C + c) * D * H * W; AccT val = AccT(0); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { val += LDG(offset_x, (d * H + h) * W + w); } } } y[yi] = convert::To<T>(val / area); } } template <typename T, typename AccT> __global__ void _AvgPool3dNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / C; const int c = yi % C; const int w_out = tmp % out_w; tmp /= out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; const int n = tmp / out_d; int dstart = d_out * stride_d - pad_d; int hstart = h_out * stride_h - pad_h; int wstart = w_out * stride_w - pad_w; int dend = min(dstart + kernel_d, D + pad_d); int hend = min(hstart + kernel_h, H + pad_h); int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (dend - dstart) * (hend - hstart) * (wend - wstart); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); dend = min(dend, D); hend = min(hend, H); wend = min(wend, W); const T* offset_x = x + n * D * H * W * C + c; AccT val = AccT(0); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { val += LDG(offset_x, ((d * H + h) * W + w) * C); } } } y[yi] = convert::To<T>(val / area); } } template <typename T, typename AccT> __global__ void _AvgPool3dGradNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / W; const int w = xi % W + pad_w; const int h = tmp % H + pad_h; tmp /= H; const int d = tmp % D + pad_d; tmp /= D; const int c = tmp % C; const int n = tmp / C; const int out_dstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int out_hstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int out_wstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int out_dend = min(d / stride_d + 1, out_d); const int out_hend = min(h / stride_h + 1, out_h); const int out_wend = min(w / stride_w + 1, out_w); const T* offset_dy = dy + (n * C + c) * out_d * out_h * out_w; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { const int dstart = d_out * stride_d - pad_d; const int dend = min(dstart + kernel_d, D + pad_d); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { const int hstart = h_out * stride_h - pad_h; const int hend = min(hstart + kernel_h, H + pad_h); for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int wstart = w_out * stride_w - pad_w; const int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (dend - dstart) * (hend - hstart) * (wend - wstart); val += LDG(offset_dy, (d_out * out_h + h_out) * out_w + w_out) / area; } } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _AvgPool3dGradNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(xi, nthreads) { int tmp = xi / C; const int c = xi % C; const int w = tmp % W + pad_w; tmp /= W; const int h = tmp % H + pad_h; tmp /= H; const int d = tmp % D + pad_d; const int n = tmp / D; const int out_dstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int out_hstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int out_wstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int out_dend = min(d / stride_d + 1, out_d); const int out_hend = min(h / stride_h + 1, out_h); const int out_wend = min(w / stride_w + 1, out_w); const T* offset_dy = dy + n * out_d * out_h * out_w * C + c; AccT val = AccT(0); for (int d_out = out_dstart; d_out < out_dend; ++d_out) { const int dstart = d_out * stride_d - pad_d; const int dend = min(dstart + kernel_d, D + pad_d); for (int h_out = out_hstart; h_out < out_hend; ++h_out) { const int hstart = h_out * stride_h - pad_h; const int hend = min(hstart + kernel_h, H + pad_h); for (int w_out = out_wstart; w_out < out_wend; ++w_out) { const int wstart = w_out * stride_w - pad_w; const int wend = min(wstart + kernel_w, W + pad_w); const AccT area = (dend - dstart) * (hend - hstart) * (wend - wstart); val += LDG(offset_dy, ((d_out * out_h + h_out) * out_w + w_out) * C) / area; } } } dx[xi] = convert::To<T>(val); } } #undef LDG } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_POOL_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name##NCHW<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name##NHWC<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(AvgPool2d, float16, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool2d, float, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool2d, double, (out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool2dGrad, float16, (H * W)); // AvgPool2dGrad DEFINE_KERNEL_LAUNCHER(AvgPool2dGrad, float, (H * W)); // AvgPool2dGrad DEFINE_KERNEL_LAUNCHER(AvgPool2dGrad, double, (H * W)); // AvgPool2dGrad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, T, out_dim) \ template <> \ void name<T, CUDAContext>( \ const int N, \ const int C, \ const int D, \ const int H, \ const int W, \ const int out_d, \ const int out_h, \ const int out_w, \ const int kernel_d, \ const int kernel_h, \ const int kernel_w, \ const int stride_d, \ const int stride_h, \ const int stride_w, \ const int pad_d, \ const int pad_h, \ const int pad_w, \ const string& data_format, \ const T* x, \ T* y, \ CUDAContext* ctx) { \ const int nthreads = N * C * out_dim; \ DISPATCH_POOL_KERNEL( \ _##name, \ math::ScalarType<T>::type, \ math::AccmulatorType<T>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ D, \ H, \ W, \ out_d, \ out_h, \ out_w, \ kernel_d, \ kernel_h, \ kernel_w, \ stride_d, \ stride_h, \ stride_w, \ pad_d, \ pad_h, \ pad_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(AvgPool3d, float16, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool3d, float, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool3d, double, (out_d * out_h * out_w)); DEFINE_KERNEL_LAUNCHER(AvgPool3dGrad, float16, (D * H * W)); // AvgPool3dGrad DEFINE_KERNEL_LAUNCHER(AvgPool3dGrad, float, (D * H * W)); // AvgPool3dGrad DEFINE_KERNEL_LAUNCHER(AvgPool3dGrad, double, (D * H * W)); // AvgPool3dGrad #undef DEFINE_KERNEL_LAUNCHER #undef DISPATCH_POOL_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
the_stack
// external functions and GPU-related set-up #ifdef __CUDACC__ #include "CUAPI.h" #include "CUFLU_Shared_FluUtility.cu" #include "CUDA_ConstMemory.h" #endif // #ifdef __CUDACC__ // local function prototypes #ifndef __CUDACC__ void Src_SetAuxArray_User_Template( double [], int [] ); void Src_SetConstMemory_User_Template( const double AuxArray_Flt[], const int AuxArray_Int[], double *&DevPtr_Flt, int *&DevPtr_Int ); void Src_SetFunc_User_Template( SrcFunc_t & ); void Src_WorkBeforeMajorFunc_User_Template( const int lv, const double TimeNew, const double TimeOld, const double dt, double AuxArray_Flt[], int AuxArray_Int[] ); void Src_End_User_Template(); #endif /******************************************************** 1. Template of a user-defined source term --> Enabled by the runtime option "SRC_USER" 2. This file is shared by both CPU and GPU CUSRC_Src_User_Template.cu -> CPU_Src_User_Template.cpp 3. Four steps are required to implement a source term I. Set auxiliary arrays II. Implement the source-term function III. [Optional] Add the work to be done every time before calling the major source-term function IV. Set initialization functions 4. The source-term function must be thread-safe and not use any global variable ********************************************************/ // ======================= // I. Set auxiliary arrays // ======================= //------------------------------------------------------------------------------------------------------- // Function : Src_SetAuxArray_User_Template // Description : Set the auxiliary arrays AuxArray_Flt/Int[] // // Note : 1. Invoked by Src_Init_User_Template() // 2. AuxArray_Flt/Int[] have the size of SRC_NAUX_USER defined in Macro.h (default = 10) // 3. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : AuxArray_Flt/Int : Floating-point/Integer arrays to be filled up // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void Src_SetAuxArray_User_Template( double AuxArray_Flt[], int AuxArray_Int[] ) { /* AuxArray_Flt[0] = ...; AuxArray_Flt[1] = ...; AuxArray_Int[0] = ...; AuxArray_Int[1] = ...; */ } // FUNCTION : Src_SetAuxArray_User_Template #endif // #ifndef __CUDACC__ // ====================================== // II. Implement the source-term function // ====================================== //------------------------------------------------------------------------------------------------------- // Function : Src_User_Template // Description : Major source-term function // // Note : 1. Invoked by CPU/GPU_SrcSolver_IterateAllCells() // 2. See Src_SetAuxArray_User_Template() for the values stored in AuxArray_Flt/Int[] // 3. Shared by both CPU and GPU // // Parameter : fluid : Fluid array storing both the input and updated values // --> Including both active and passive variables // B : Cell-centered magnetic field // SrcTerms : Structure storing all source-term variables // dt : Time interval to advance solution // dh : Grid size // x/y/z : Target physical coordinates // TimeNew : Target physical time to reach // TimeOld : Physical time before update // --> This function updates physical time from TimeOld to TimeNew // MinDens/Pres/Eint : Density, pressure, and internal energy floors // EoS : EoS object // AuxArray_* : Auxiliary arrays (see the Note above) // // Return : fluid[] //----------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static void Src_User_Template( real fluid[], const real B[], const SrcTerms_t *SrcTerms, const real dt, const real dh, const double x, const double y, const double z, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t *EoS, const double AuxArray_Flt[], const int AuxArray_Int[] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); # endif // example /* const bool CheckMinEint_Yes = true; const real CoolingRate = (real)AuxArray_Flt[0]; real Eint, Enth, Emag; # ifdef MHD Emag = (real)0.5*( SQR(B[MAGX]) + SQR(B[MAGY]) + SQR(B[MAGZ]) ); # else Emag = (real)0.0; # endif Eint = Hydro_Con2Eint( fluid[DENS], fluid[MOMX], fluid[MOMY], fluid[MOMZ], fluid[ENGY], CheckMinEint_Yes, MinEint, Emag ); Enth = fluid[ENGY] - Eint; Eint -= fluid[DENS]*CoolingRate*dt; fluid[ENGY] = Enth + Eint; */ } // FUNCTION : Src_User_Template // ================================================== // III. [Optional] Add the work to be done every time // before calling the major source-term function // ================================================== //------------------------------------------------------------------------------------------------------- // Function : Src_WorkBeforeMajorFunc_User_Template // Description : Specify work to be done every time before calling the major source-term function // // Note : 1. Invoked by Src_WorkBeforeMajorFunc() // --> By linking to "Src_WorkBeforeMajorFunc_User_Ptr" in Src_Init_User_Template() // 2. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : lv : Target refinement level // TimeNew : Target physical time to reach // TimeOld : Physical time before update // --> The major source-term function will update the system from TimeOld to TimeNew // dt : Time interval to advance solution // --> Physical coordinates : TimeNew - TimeOld == dt // Comoving coordinates : TimeNew - TimeOld == delta(scale factor) != dt // AuxArray_Flt/Int : Auxiliary arrays // --> Can be used and/or modified here // --> Must call Src_SetConstMemory_User_Template() after modification // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void Src_WorkBeforeMajorFunc_User_Template( const int lv, const double TimeNew, const double TimeOld, const double dt, double AuxArray_Flt[], int AuxArray_Int[] ) { // uncomment the following lines if the auxiliary arrays have been modified //# ifdef GPU // Src_SetConstMemory_User_Template( AuxArray_Flt, AuxArray_Int, // SrcTerms.User_AuxArrayDevPtr_Flt, SrcTerms.User_AuxArrayDevPtr_Int ); //# endif } // FUNCTION : Src_WorkBeforeMajorFunc_User_Template #endif // ================================ // IV. Set initialization functions // ================================ #ifdef __CUDACC__ # define FUNC_SPACE __device__ static #else # define FUNC_SPACE static #endif FUNC_SPACE SrcFunc_t SrcFunc_Ptr = Src_User_Template; //----------------------------------------------------------------------------------------- // Function : Src_SetFunc_User_Template // Description : Return the function pointer of the CPU/GPU source-term function // // Note : 1. Invoked by Src_Init_User_Template() // 2. Call-by-reference // 3. Use either CPU or GPU but not both of them // // Parameter : SrcFunc_CPU/GPUPtr : CPU/GPU function pointer to be set // // Return : SrcFunc_CPU/GPUPtr //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __host__ void Src_SetFunc_User_Template( SrcFunc_t &SrcFunc_GPUPtr ) { CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &SrcFunc_GPUPtr, SrcFunc_Ptr, sizeof(SrcFunc_t) ) ); } #elif ( !defined GPU ) void Src_SetFunc_User_Template( SrcFunc_t &SrcFunc_CPUPtr ) { SrcFunc_CPUPtr = SrcFunc_Ptr; } #endif // #ifdef __CUDACC__ ... elif ... #ifdef __CUDACC__ //------------------------------------------------------------------------------------------------------- // Function : Src_SetConstMemory_User_Template // Description : Set the constant memory variables on GPU // // Note : 1. Adopt the suggested approach for CUDA version >= 5.0 // 2. Invoked by Src_Init_User_Template() and, if necessary, Src_WorkBeforeMajorFunc_User_Template() // 3. SRC_NAUX_USER is defined in Macro.h // // Parameter : AuxArray_Flt/Int : Auxiliary arrays to be copied to the constant memory // DevPtr_Flt/Int : Pointers to store the addresses of constant memory arrays // // Return : c_Src_User_AuxArray_Flt[], c_Src_User_AuxArray_Int[], DevPtr_Flt, DevPtr_Int //--------------------------------------------------------------------------------------------------- void Src_SetConstMemory_User_Template( const double AuxArray_Flt[], const int AuxArray_Int[], double *&DevPtr_Flt, int *&DevPtr_Int ) { // copy data to constant memory CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Src_User_AuxArray_Flt, AuxArray_Flt, SRC_NAUX_USER*sizeof(double) ) ); CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Src_User_AuxArray_Int, AuxArray_Int, SRC_NAUX_USER*sizeof(int ) ) ); // obtain the constant-memory pointers CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&DevPtr_Flt, c_Src_User_AuxArray_Flt) ); CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&DevPtr_Int, c_Src_User_AuxArray_Int) ); } // FUNCTION : Src_SetConstMemory_User_Template #endif // #ifdef __CUDACC__ #ifndef __CUDACC__ // function pointer extern void (*Src_WorkBeforeMajorFunc_User_Ptr)( const int lv, const double TimeNew, const double TimeOld, const double dt, double AuxArray_Flt[], int AuxArray_Int[] ); extern void (*Src_End_User_Ptr)(); //----------------------------------------------------------------------------------------- // Function : Src_Init_User_Template // Description : Initialize a user-specified source term // // Note : 1. Set auxiliary arrays by invoking Src_SetAuxArray_*() // --> Copy to the GPU constant memory and store the associated addresses // 2. Set the source-term function by invoking Src_SetFunc_*() // --> Unlike other modules (e.g., EoS), here we use either CPU or GPU but not // both of them // 3. Set the function pointers "Src_WorkBeforeMajorFunc_User_Ptr" and "Src_End_User_Ptr" // 4. Invoked by Src_Init() // --> Enable it by linking to the function pointer "Src_Init_User_Ptr" // 5. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void Src_Init_User_Template() { // set the auxiliary arrays Src_SetAuxArray_User_Template( Src_User_AuxArray_Flt, Src_User_AuxArray_Int ); // copy the auxiliary arrays to the GPU constant memory and store the associated addresses # ifdef GPU Src_SetConstMemory_User_Template( Src_User_AuxArray_Flt, Src_User_AuxArray_Int, SrcTerms.User_AuxArrayDevPtr_Flt, SrcTerms.User_AuxArrayDevPtr_Int ); # else SrcTerms.User_AuxArrayDevPtr_Flt = Src_User_AuxArray_Flt; SrcTerms.User_AuxArrayDevPtr_Int = Src_User_AuxArray_Int; # endif // set the major source-term function Src_SetFunc_User_Template( SrcTerms.User_FuncPtr ); // set the auxiliary functions Src_WorkBeforeMajorFunc_User_Ptr = Src_WorkBeforeMajorFunc_User_Template; Src_End_User_Ptr = Src_End_User_Template; } // FUNCTION : Src_Init_User_Template //----------------------------------------------------------------------------------------- // Function : Src_End_User_Template // Description : Free the resources used by a user-specified source term // // Note : 1. Invoked by Src_End() // --> Enable it by linking to the function pointer "Src_End_User_Ptr" // 2. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void Src_End_User_Template() { } // FUNCTION : Src_End_User_Template #endif // #ifndef __CUDACC__
the_stack
namespace apollo { namespace perception { namespace inference { // Decode bbox. // boxes dims: [num_box, 4], deltas dims: [N, num_box, C, 4], // out_boxes dims: [N, num_box, C, 4] // nthreads = N * num_box * C __global__ void bbox_transform_inv_kernel( const int nthreads, const float *boxes, const float *deltas, const int num_box, const int num_channel, float *out_boxes) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= nthreads) { return; } int box_id = (index / num_channel) % num_box; float x_min = boxes[box_id * 4]; float y_min = boxes[box_id * 4 + 1]; float x_max = boxes[box_id * 4 + 2]; float y_max = boxes[box_id * 4 + 3]; float w = x_max - x_min + 1; float h = y_max - y_min + 1; float x_ctr = x_min + 0.5 * (w - 1); float y_ctr = y_min + 0.5 * (h - 1); float dx = deltas[index * 4]; float dy = deltas[index * 4 + 1]; float dw = deltas[index * 4 + 2]; float dh = deltas[index * 4 + 3]; float pred_x_ctr = dx * w + x_ctr; float pred_y_ctr = dy * h + y_ctr; float pred_w = std::exp(dw) * w; float pred_h = std::exp(dh) * h; out_boxes[index * 4] = pred_x_ctr - 0.5 * (pred_w - 1); // pred x_min out_boxes[index * 4 + 1] = pred_y_ctr - 0.5 * (pred_h - 1); // pred y_min out_boxes[index * 4 + 2] = pred_x_ctr + 0.5 * (pred_w - 1); // pred x_max out_boxes[index * 4 + 3] = pred_y_ctr + 0.5 * (pred_h - 1); // pred y_max } // boxes dim: [N, num_box, 4], nthreads = N * num_box * 4 __global__ void clip_boxes_kernel(const int nthreads, float *boxes, const float height, const float width) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= nthreads) { return; } // refine x_min, x_max to be in [0, img_width) if (index % 4 == 0 || index % 4 == 2) { if (boxes[index] < 0) { boxes[index] = 0; } else if (boxes[index] > width - 1) { boxes[index] = width - 1; } } else { // refine y_min, y_max to be in [0, img_height) if (boxes[index] < 0) { boxes[index] = 0; } else if (boxes[index] > height - 1) { boxes[index] = height - 1; } } } // boxes dims: [N, num_box, num_channel, 4], // filtered_boxes dims: [N, num_box, 4] // scores dims: [N, num_box, num_class], filtered_scores dims: [N, num_box] // all_probs dims: [N, num_box, num_prob], // filtered_all_probs dims: [N, num_box, num_prob] // filtered_count dims: [N] __global__ void filter_boxes_kernel( const int nthreads, const float *boxes, const float *scores, const float *all_probs, const int num_box, const int num_channel, const int num_class, const int num_prob, const int filter_channel, const int filter_class, const int min_size_mode, const float min_size_h, const float min_size_w, const float threshold_score, float *filtered_boxes, float *filtered_scores, float *filtered_all_probs, int *filtered_count) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= nthreads) { return; } int batch_id = index / num_box; if (scores[index * num_class + filter_class] > threshold_score) { bool keep = true; int box_id = index * num_channel + filter_channel; float w = boxes[box_id * 4 + 2] - boxes[box_id * 4] + 1; float h = boxes[box_id * 4 + 3] - boxes[box_id * 4 + 1] + 1; if (min_size_mode == 0) { // filter boxes with minimum size of height & width if (h < min_size_h || w < min_size_w) { keep = false; } } else if (min_size_mode == 1) { // filter boxes with minimum size of height or width if (h < min_size_h && w < min_size_w) { keep = false; } } if (keep) { int counter = atomicAdd(&filtered_count[batch_id], 1); for (int i = 0; i < 4; ++i) { filtered_boxes[batch_id * num_box * 4 + counter * 4 + i] = boxes[box_id * 4 + i]; } filtered_scores[batch_id * num_box + counter] = scores[index * num_class + filter_class]; if (all_probs != nullptr && filtered_all_probs != nullptr) { for (int i = 0; i < num_prob; ++i) { filtered_all_probs[batch_id * num_box * num_prob + counter * num_prob + i] = all_probs[index * num_prob + i]; } } } } } // Gather boxes by indexes and keep top N boxes. // boxes dims: [N, num_box, 4], scores dims: [N, num_box], // all_probs dims: [N, num_box, num_prob] // indexes dims: [N, num_box], count dims: [N] // out_boxes dims: [N, topN, 4], out_scores dims: [N, topN] // out_all_probs dims: [N, topN, num_prob] // nthreads = N * max_num_box __global__ void keep_topN_boxes_kernel( const int nthreads, const float *boxes, const float *scores, const float *all_probs, const int *indexes, const int *count, const bool keep_score, const int num_box, const int num_prob, const int topN, float *out_boxes, float *out_scores, float *out_all_probs) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= nthreads) { return; } int batch_id = index / topN; int box_id = index % topN; if (box_id < count[batch_id]) { int in_box_id = indexes[batch_id * num_box + box_id]; for (int i = 0; i < 4; ++i) { out_boxes[index * 4 + i] = boxes[batch_id * num_box * 4 + in_box_id * 4 + i]; } if (keep_score) { out_scores[index] = scores[batch_id * num_box + in_box_id]; for (int i = 0; i < num_prob; i++) { out_all_probs[index * num_prob + i] = all_probs[batch_id * num_box * num_prob + in_box_id * num_prob + i]; } } } } __global__ void repeatedly_add_kernel(const int nthreads, const float *in_data, float *out_data, const float *add_vec, int add_vec_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { out_data[index] = in_data[index] + add_vec[index % add_vec_size]; } } __global__ void repeatedly_mul_kernel(const int nthreads, const float *in_data, float *out_data, const float *mul_vec, int mul_vec_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < nthreads) { out_data[index] = in_data[index] * mul_vec[index % mul_vec_size]; } } // input dims: [N, C], output dims: [N, C_sliced] __global__ void slice2d_kernel(const int nthreads, const float *in_data, float *out_data, const int *slice_axises, int slice_axis_num, int input_axis_size) { int out_index = threadIdx.x + blockIdx.x * blockDim.x; if (out_index < nthreads) { int id = out_index / slice_axis_num; int slice_axis_id = out_index % slice_axis_num; int in_index = slice_axises[slice_axis_id] + id * input_axis_size; out_data[out_index] = in_data[in_index]; } } void bbox_transform_inv_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *boxes, const float *deltas, const int num_box, const int num_channel, float *out_boxes) { bbox_transform_inv_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, boxes, deltas, num_box, num_channel, out_boxes); } void clip_boxes_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, float *boxes, const float height, const float width) { clip_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, boxes, height, width); } void filter_boxes_cuda( int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *boxes, const float *scores, const float *all_probs, const int num_box, const int num_channel, const int num_class, const int num_prob, const int filter_channel, const int filter_class, const int min_size_mode, const float min_size_h, const float min_size_w, const float threshold_score, float *filtered_boxes, float *filtered_scores, float *filtered_all_probs, int *filtered_count) { filter_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, boxes, scores, all_probs, num_box, num_channel, num_class, num_prob, filter_channel, filter_class, min_size_mode, min_size_h, min_size_w, threshold_score, filtered_boxes, filtered_scores, filtered_all_probs, filtered_count); } void keep_topN_boxes_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *boxes, const float *scores, const float *all_probs, const int *indexes, const int *count, const bool keep_score, const int num_box, const int num_prob, const int topN, float *out_boxes, float *out_scores, float *out_all_probs) { keep_topN_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, boxes, scores, all_probs, indexes, count, keep_score, num_box, num_prob, topN, out_boxes, out_scores, out_all_probs); } void repeatedly_add_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *in_data, float *out_data, const float *add_vec, int add_vec_size) { repeatedly_add_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, in_data, out_data, add_vec, add_vec_size); } void repeatedly_mul_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *in_data, float *out_data, const float *mul_vec, int mul_vec_size) { repeatedly_mul_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, in_data, out_data, mul_vec, mul_vec_size); } void slice2d_cuda(int block_size, int thread_size, int shared_mem, cudaStream_t stream, const int nthreads, const float *in_data, float *out_data, const int *slice_axises, int slice_axis_num, int input_axis_size) { slice2d_kernel<<<block_size, thread_size, shared_mem, stream>>>( nthreads, in_data, out_data, slice_axises, slice_axis_num, input_axis_size); } } // namespace inference } // namespace perception } // namespace apollo
the_stack
#include <stdio.h> #include <iostream> #include <vector> #include <time.h> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __global__ void points_inside_boxes(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){ // n: boxes_num, npoint: points_num, points: points_num x 3, anchors: boxes_num, 6 // return: points_sample_mask: boxes_num x npoint for (int batch_idx=blockIdx.x; batch_idx < n; batch_idx += gridDim.x){ // xmin, ymin, zmin, xmax, ymax, zmax const float* cur_anchors = anchors + batch_idx * 6; int *cur_points_sample_mask = points_sample_mask + batch_idx * npoint; int x_index = threadIdx.x; int x_stride = blockDim.x; const float cur_anchors_xmin = cur_anchors[0] - cur_anchors[3] / 2.; const float cur_anchors_ymin = cur_anchors[1] - cur_anchors[4]; const float cur_anchors_zmin = cur_anchors[2] - cur_anchors[5] / 2.; const float cur_anchors_xmax = cur_anchors[0] + cur_anchors[3] / 2.; const float cur_anchors_ymax = cur_anchors[1]; const float cur_anchors_zmax = cur_anchors[2] + cur_anchors[5] / 2.; for (int points_idx = x_index; points_idx < npoint; points_idx += x_stride){ const float* cur_points = points + points_idx * 3; const float cur_points_x = cur_points[0]; const float cur_points_y = cur_points[1]; const float cur_points_z = cur_points[2]; int _x = (cur_points_x >= cur_anchors_xmin) * (cur_points_x <= cur_anchors_xmax); int _y = (cur_points_y >= cur_anchors_ymin) * (cur_points_y <= cur_anchors_ymax); int _z = (cur_points_z >= cur_anchors_zmin) * (cur_points_z <= cur_anchors_zmax); cur_points_sample_mask[points_idx] = _x * _y * _z; } } } __global__ void points_iou_kernel(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){ // points_sample_mask, [n, npoint], 0/1 // iou_matrix, [n, n] for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){ for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){ const int* x_points_sample_mask = points_sample_mask + x_num_idx * npoint; const int* y_points_sample_mask = points_sample_mask + y_num_idx * npoint; int x_index = threadIdx.x; int x_stride = blockDim.x; __shared__ float intersect_list[threadsPerBlock]; __shared__ float union_list[threadsPerBlock]; // first initialize intersect_list and union_list by zero intersect_list[x_index] = 0; union_list[x_index] = 0; __syncthreads(); for(int i_x=x_index; i_x<npoint; i_x+= x_stride){ intersect_list[x_index] = intersect_list[x_index] + float(x_points_sample_mask[i_x] && y_points_sample_mask[i_x]); union_list[x_index] = union_list[x_index] + float(x_points_sample_mask[i_x] || y_points_sample_mask[i_x]); } __syncthreads(); // after calc the intersect // then get the sum __shared__ float intersect_sum; __shared__ float union_sum; intersect_sum = 0; union_sum = 0; __syncthreads(); atomicAdd(&intersect_sum, intersect_list[x_index]); atomicAdd(&union_sum, union_list[x_index]); __syncthreads(); float iou = intersect_sum / max(union_sum, 1.); iou_matrix[x_num_idx * n + y_num_idx] = iou; } } } __device__ inline float devIou(const int *a, const int *b, int npoint) { // a:[npoint], b[npoint], then calc the iou float intersect = 0; float union_sect = 0; for (int i = 0; i < npoint; i ++){ intersect += a[i] && b[i]; union_sect += a[i] || b[i]; } return intersect / union_sect; } __global__ void points_nms_block_kernel(const int n, const int npoint, const int merge_function, const float iou_thresh, const int*points_sample, unsigned long long *keep_inds, int *nmsed_points_sample){ const int row_start = blockIdx.y; const int col_start = blockIdx.x; const int row_size = min(n - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n - col_start * threadsPerBlock, threadsPerBlock); const int* col_points_sample = points_sample + (threadsPerBlock * col_start) * npoint; if (threadIdx.x < row_size){ const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const int *cur_points_sample = points_sample + cur_box_idx * npoint; int *cur_nmsed_points_sample = nmsed_points_sample + cur_box_idx * npoint; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start){ start = threadIdx.x + 1; } for (i = start; i < col_size; i ++){ if (devIou(cur_points_sample, col_points_sample + i * npoint, npoint) > iou_thresh) { // merge the nmsed_points_sample const int *merged_col_points_sample = col_points_sample + i * npoint; if (merge_function == 0){ for (int j = 0; j < npoint; j++){ atomicOr(&cur_nmsed_points_sample[j], merged_col_points_sample[j]); } } else if (merge_function == 1){ for (int j = 0; j < npoint; j++){ atomicAnd(&cur_nmsed_points_sample[j], merged_col_points_sample[j]); } } t |= 1ULL << i; } } const int col_blocks = DIVUP(n, threadsPerBlock); // keep_inds, [col_blocks, threadsPerBlock] keep_inds[cur_box_idx * col_blocks + col_start] = t; } } __global__ void points_nms_kernel(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) { // nmsed_points_sample [n, npoint] for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){ for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){ if (x_num_idx == y_num_idx) continue; // const int* x_points_sample = points_sample + x_num_idx * npoint; const int* y_points_sample = points_sample + y_num_idx * npoint; const float* x_iou_matrix = iou_matrix + x_num_idx * n; int *x_keep_inds = keep_inds + x_num_idx * n; int* x_nmsed_points_sample = nmsed_points_sample + x_num_idx * npoint; int index = threadIdx.x; int stride = blockDim.x; float cur_iou = x_iou_matrix[y_num_idx]; if (cur_iou > iou_thresh){ // merge them togethor x_keep_inds[y_num_idx] = 1; for (int i=index;i<npoint;i+=stride){ // merge the result if (merge_function == 0){ // union the two vector atomicOr(&x_nmsed_points_sample[i], y_points_sample[i]); } else if(merge_function == 1){ atomicAnd(&x_nmsed_points_sample[i], y_points_sample[i]); } else{ continue; } } } } } } __global__ void points_nms_sample(const int n, const int npoint, int merge_function, int* nmsed_points_sample_media, int* nmsed_points_sample){ for (int num_idx=blockIdx.x; num_idx<n; num_idx+=gridDim.x){ int *batch_nmsed_points_sample_media = nmsed_points_sample_media + num_idx * n *npoint; int *batch_nmsed_points_sample = nmsed_points_sample + num_idx * npoint; int index = threadIdx.x; int stride = blockDim.x; for (int i=index; i<n; i+=stride){ for(int j=0; j < npoint; j++){ if (merge_function == 0 || merge_function == 2){ // union or keep the origin atomicOr(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]); // batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] + batch_nmsed_points_sample_media[i * npoint + j]; } else if (merge_function == 1){ atomicAnd(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]); // batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] && batch_nmsed_points_sample_media[i * npoint + j]; } } } } } void points_iou_gpu(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){ dim3 blocks(512, 512); points_iou_kernel<<<blocks, threadsPerBlock>>>(n, npoint, points_sample_mask, iou_matrix); // std::cout << "Iou Caluculating Done!!" << std::endl; } void points_inside_boxes_gpu(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){ CUDA_CHECK(cudaMemset(points_sample_mask, 1, n * npoint * sizeof(int))); points_inside_boxes<<<512, threadsPerBlock>>>(n, npoint, points, anchors, points_sample_mask); } void points_nms_block_gpu(const int n, const int npoint, const int merge_function, const float iou_thresh, const int num_to_keep, const int *points_sample, int *keep_inds, int *nmsed_points_sample){ unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(n, threadsPerBlock); CUDA_CHECK(cudaMalloc(&mask_dev, n * col_blocks * sizeof(unsigned long long))); CUDA_CHECK(cudaMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToDevice)); time_t c_start, c_end; c_start = clock(); cudaEvent_t start, stop; // variables that holds 2 events float time; // Variable that will hold the time cudaEventCreate(&start); // creating the event 1 cudaEventCreate(&stop); // creating the event 2 cudaEventRecord(start, 0); // start measuring the time dim3 blocks(DIVUP(n, threadsPerBlock), DIVUP(n, threadsPerBlock)); dim3 threads(threadsPerBlock); points_nms_block_kernel<<<blocks, threads>>>(n, npoint, merge_function, iou_thresh, points_sample, mask_dev, nmsed_points_sample); c_end = clock(); cudaEventRecord(stop, 0); // Stop time measuring cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << difftime(c_end,c_start) << std::endl; std::cout << time << std::endl; std::cout << "Finished main working !!!" << std::endl; c_start = clock(); std::vector<unsigned long long> mask_host(n * col_blocks); cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * n * col_blocks, cudaMemcpyDeviceToHost); c_end = clock(); std::cout << difftime(c_end,c_start) << std::endl; std::cout << "Finished copying" << std::endl; c_start = clock(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); std::vector<int> cpu_keep_inds(n); memset(&cpu_keep_inds[0], -1, sizeof(int) * num_to_keep); std::cout << "setting the output to -1" << std::endl; int keeping_num = 0; for (int i=0; i < n; i ++){ int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))){ cpu_keep_inds[keeping_num++] = i; if (keeping_num >= num_to_keep) break; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j ++){ remv[j] |= p[j]; } } } c_end = clock(); std::cout << difftime(c_end,c_start) << std::endl; CUDA_CHECK(cudaFree(mask_dev)); CUDA_CHECK(cudaMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * num_to_keep, cudaMemcpyHostToDevice)); std::cout << "Finished!!!" << std::endl; } void points_nms_gpu(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) { // std::cout << "Beginning points nms !!!" << std::endl; int *remove_inds = NULL; CUDA_CHECK(cudaMalloc(&remove_inds, n * n * sizeof(int))); CUDA_CHECK(cudaMemset(remove_inds, 0, n * n * sizeof(int))); std::vector<int> cpu_keep_inds(n, 1); // First initialize the nmsed_points_sample by the points_sample CUDA_CHECK(cudaMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToDevice)); dim3 blocks(block_num, block_num); points_nms_kernel<<<blocks, threadsPerBlock>>>(n, npoint, merge_function, iou_thresh, iou_matrix, points_sample, remove_inds, nmsed_points_sample); // Using for Debug // std::vector<int> debug(n * npoint); // CUDA_CHECK(cudaMemcpy(&debug[0], media_nmsed_points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToHost)); // for (int i=0; i<n; i++){ // for (int j=0; j< npoint; j++) // std::cout << debug[i * npoint + j] << " "; // std::cout << std::endl; // } // std::cout << std::endl; std::vector<int> cpu_remove_inds(n * n); CUDA_CHECK(cudaMemcpy(&cpu_remove_inds[0], remove_inds, sizeof(int) * n * n, cudaMemcpyDeviceToHost)); // std::cout << "points nms_remove inds Done !!!" << std::endl; // finally get the keep_inds for (int i=0; i<n; i++){ // std::cout << 1 << std::endl; if (cpu_keep_inds[i] == 0){ continue; } for(int j=i+1; j<n; j++){ if (cpu_remove_inds[i * n + j] == 1){ // remove this point cpu_keep_inds[j] = 0; } } } // at last, make it back CUDA_CHECK(cudaMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * n, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaFree(remove_inds)); // std::cout << "points nms Done !!!" << std::endl; }
the_stack
* \file * cub::BlockHistogramTilesSort implements a stateful abstraction of CUDA thread blocks for histogramming multiple tiles as part of device-wide histogram using local sorting */ #pragma once #include <iterator> #include "../../../block/block_radix_sort.cuh" #include "../../../block/block_discontinuity.cuh" #include "../../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * BlockHistogramTilesSort implements a stateful abstraction of CUDA thread blocks for histogramming multiple tiles as part of device-wide histogram using local sorting */ template < typename BlockHistogramTilesPolicy, ///< Tuning policy int BINS, ///< Number of histogram bins per channel int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of active channels being histogrammed) int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that can be cast as an integer in the range [0..BINS-1] typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin typename SizeT> ///< Integer type for offsets struct BlockHistogramTilesSort { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Sample type typedef typename std::iterator_traits<InputIteratorRA>::value_type SampleT; // Constants enum { BLOCK_THREADS = BlockHistogramTilesPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = BlockHistogramTilesPolicy::ITEMS_PER_THREAD, TILE_CHANNEL_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, TILE_ITEMS = TILE_CHANNEL_ITEMS * CHANNELS, STRIPED_COUNTERS_PER_THREAD = (BINS + BLOCK_THREADS - 1) / BLOCK_THREADS, }; // Parameterize BlockRadixSort type for our thread block typedef BlockRadixSort<SampleT, BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT; // Parameterize BlockDiscontinuity type for our thread block typedef BlockDiscontinuity<SampleT, BLOCK_THREADS> BlockDiscontinuityT; /// Shared memory type required by this thread block union _TempStorage { // Storage for sorting bin values typename BlockRadixSortT::TempStorage sort; struct { // Storage for detecting discontinuities in the tile of sorted bin values typename BlockDiscontinuityT::TempStorage flag; // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values int run_begin[BLOCK_THREADS * STRIPED_COUNTERS_PER_THREAD]; int run_end[BLOCK_THREADS * STRIPED_COUNTERS_PER_THREAD]; }; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; // Discontinuity functor struct DiscontinuityOp { // Reference to temp_storage _TempStorage &temp_storage; // Constructor __device__ __forceinline__ DiscontinuityOp(_TempStorage &temp_storage) : temp_storage(temp_storage) {} // Discontinuity predicate __device__ __forceinline__ bool operator()(const SampleT &a, const SampleT &b, int b_index) { if (a != b) { // Note the begin/end offsets in shared storage temp_storage.run_begin[b] = b_index; temp_storage.run_end[a] = b_index; return true; } else { return false; } } }; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- /// Reference to temp_storage _TempStorage &temp_storage; /// Histogram counters striped across threads HistoCounter thread_counters[ACTIVE_CHANNELS][STRIPED_COUNTERS_PER_THREAD]; /// Reference to output histograms HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS]; /// Input data to reduce InputIteratorRA d_in; //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ BlockHistogramTilesSort( TempStorage &temp_storage, ///< Reference to temp_storage InputIteratorRA d_in, ///< Input data to reduce HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS]) ///< Reference to output histograms : temp_storage(temp_storage.Alias()), d_in(d_in), d_out_histograms(d_out_histograms) { // Initialize histogram counters striped across threads #pragma unroll for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) { #pragma unroll for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER) { thread_counters[CHANNEL][COUNTER] = 0; } } } /** * Composite a tile of input items */ __device__ __forceinline__ void Composite( SampleT (&items)[ITEMS_PER_THREAD], ///< Tile of samples HistoCounter thread_counters[STRIPED_COUNTERS_PER_THREAD]) ///< Histogram counters striped across threads { // Sort bytes in blocked arrangement BlockRadixSortT(temp_storage.sort).Sort(items); __syncthreads(); // Initialize the shared memory's run_begin and run_end for each bin #pragma unroll for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER) { temp_storage.run_begin[(COUNTER * BLOCK_THREADS) + threadIdx.x] = TILE_CHANNEL_ITEMS; temp_storage.run_end[(COUNTER * BLOCK_THREADS) + threadIdx.x] = TILE_CHANNEL_ITEMS; } __syncthreads(); // Note the begin/end run offsets of bin runs in the sorted tile int flags[ITEMS_PER_THREAD]; // unused DiscontinuityOp flag_op(temp_storage); BlockDiscontinuityT(temp_storage.flag).FlagHeads(flags, items, flag_op); // Update begin for first item if (threadIdx.x == 0) temp_storage.run_begin[items[0]] = 0; __syncthreads(); // Composite into histogram // Initialize the shared memory's run_begin and run_end for each bin #pragma unroll for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER) { int bin = (COUNTER * BLOCK_THREADS) + threadIdx.x; HistoCounter run_length = temp_storage.run_end[bin] - temp_storage.run_begin[bin]; thread_counters[COUNTER] += run_length; } } /** * Process one channel within a tile. */ template <bool FULL_TILE> __device__ __forceinline__ void ConsumeTileChannel( int channel, SizeT block_offset, int valid_items) { // Load items in striped fashion if (FULL_TILE) { // Full tile of samples to read and composite SampleT items[ITEMS_PER_THREAD]; // Unguarded loads #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = d_in[channel + block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS)]; } // Composite our histogram data Composite(items, thread_counters[channel]); } else { // Only a partially-full tile of samples to read and composite SampleT items[ITEMS_PER_THREAD]; // Assign our tid as the bin for out-of-bounds items (to give an even distribution), and keep track of how oob items to subtract out later int bounds = (valid_items - (threadIdx.x * CHANNELS)); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = ((ITEM * BLOCK_THREADS * CHANNELS) < bounds) ? d_in[channel + block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS)] : 0; } // Composite our histogram data Composite(items, thread_counters[channel]); __syncthreads(); // Correct the overcounting in the zero-bin from invalid (out-of-bounds) items if (threadIdx.x == 0) { int extra = (TILE_ITEMS - valid_items) / CHANNELS; thread_counters[channel][0] -= extra; } } } /** * Template iteration over channels (to silence not-unrolled warnings for SM10-13). Inductive step. */ template <bool FULL_TILE, int CHANNEL, int END> struct IterateChannels { /** * Process one channel within a tile. */ static __device__ __forceinline__ void ConsumeTileChannel( BlockHistogramTilesSort *cta, SizeT block_offset, int valid_items) { __syncthreads(); cta->ConsumeTileChannel<FULL_TILE>(CHANNEL, block_offset, valid_items); IterateChannels<FULL_TILE, CHANNEL + 1, END>::ConsumeTileChannel(cta, block_offset, valid_items); } }; /** * Template iteration over channels (to silence not-unrolled warnings for SM10-13). Base step. */ template <bool FULL_TILE, int END> struct IterateChannels<FULL_TILE, END, END> { static __device__ __forceinline__ void ConsumeTileChannel(BlockHistogramTilesSort *cta, SizeT block_offset, int valid_items) {} }; /** * Process a single tile of input */ template <bool FULL_TILE> __device__ __forceinline__ void ConsumeTile( SizeT block_offset, ///< The offset the tile to consume int valid_items = TILE_ITEMS) ///< The number of valid items in the tile { // First channel ConsumeTileChannel<FULL_TILE>(0, block_offset, valid_items); // Iterate through remaining channels IterateChannels<FULL_TILE, 1, ACTIVE_CHANNELS>::ConsumeTileChannel(this, block_offset, valid_items); } /** * Aggregate results into output */ __device__ __forceinline__ void AggregateOutput() { // Copy counters striped across threads into the histogram output #pragma unroll for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) { int channel_offset = (blockIdx.x * BINS); #pragma unroll for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER) { int bin = (COUNTER * BLOCK_THREADS) + threadIdx.x; if ((STRIPED_COUNTERS_PER_THREAD * BLOCK_THREADS == BINS) || (bin < BINS)) { d_out_histograms[CHANNEL][channel_offset + bin] = thread_counters[CHANNEL][COUNTER]; } } } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <nvidia/helper_cuda.h> #include <cudaPcl/cudaSphereHelpers.h> #define BLOCK_WIDTH 16 #define BLOCK_SIZE BLOCK_WIDTH*BLOCK_WIDTH // step size of the normals // for PointXYZI #define X_STEP 8 #define X_OFFSET 0 // for PointXYZ //#define X_STEP 4 //#define X_OFFSET 0 // TODO: try to copy the points in the tangent space out of the memory // and process them on CPU __global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z, float *mu_karch, int w, int h) //, float *N) { __shared__ float p[3*6]; // one J per column; BLOCK_SIZE columns; per column first 3 first col of J, // second 3 columns second cols of J // forth row is number of associated points __shared__ float mu[BLOCK_SIZE*4*6]; //__shared__ float Ni[BLOCK_SIZE*6]; //const int tid = threadIdx.x; const int tid = threadIdx.x + blockDim.x * threadIdx.y; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idy = threadIdx.y + blockDim.y * blockIdx.y; // caching if(tid < 3*6) p[tid] = d_p[tid]; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 0.0f; } //#pragma unroll // for(int s=0; s<6; ++s) { // Ni[tid+BLOCK_SIZE*s] = 0.0f; // } __syncthreads(); // make sure that ys have been cached for(uint32_t ix=0; ix<8; ++ix) for(uint32_t iy=0; iy<4; ++iy) { int id = idx+ix*w/8 + (idy+iy*h/4)*w; if (id<w*h) { uint16_t zi = z[id]; if(zi<6){ // if point is good float q[3], x[3]; q[0] = d_q[id*X_STEP+X_OFFSET+0]; q[1] = d_q[id*X_STEP+X_OFFSET+1]; q[2] = d_q[id*X_STEP+X_OFFSET+2]; Log_p(p+zi*3,q,x); // float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1] // + q[2]*p[zi*3+2])); // float theta = acosf(dot); // float sinc; // if(theta < 1.e-8) // sinc = 1.0f; // else // sinc = theta/sinf(theta); // float x[3]; // x[0] = (q[0]-p[zi*3+0]*dot)*sinc; // x[1] = (q[1]-p[zi*3+1]*dot)*sinc; // x[2] = (q[2]-p[zi*3+2]*dot)*sinc; mu[tid+(zi*4+0)*BLOCK_SIZE] += x[0]; mu[tid+(zi*4+1)*BLOCK_SIZE] += x[1]; mu[tid+(zi*4+2)*BLOCK_SIZE] += x[2]; mu[tid+(zi*4+3)*BLOCK_SIZE] += 1.0f; } } } __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; } } __syncthreads(); } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]); } } __global__ void meanInTpS2(float *d_p, float *d_q, unsigned short *z, float* d_weights, float *mu_karch, int w, int h) //, float *N) { __shared__ float p[3*6]; // one J per column; BLOCK_SIZE columns; per column first 3 first col of J, // second 3 columns second cols of J // forth row is number of associated points __shared__ float mu[BLOCK_SIZE*4*6]; //__shared__ float Ni[BLOCK_SIZE*6]; //const int tid = threadIdx.x; const int tid = threadIdx.x + blockDim.x * threadIdx.y; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idy = threadIdx.y + blockDim.y * blockIdx.y; // caching if(tid < 3*6) p[tid] = d_p[tid]; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 0.0f; } //#pragma unroll // for(int s=0; s<6; ++s) { // Ni[tid+BLOCK_SIZE*s] = 0.0f; // } __syncthreads(); // make sure that ys have been cached for(uint32_t ix=0; ix<8; ++ix) for(uint32_t iy=0; iy<4; ++iy) { int id = idx+ix*w/8 + (idy+iy*h/4)*w; if (id<w*h) { uint16_t zi = z[id]; float wi = d_weights[id]; if(zi<6){ // if point is good float q[3],x[3]; q[0] = d_q[id*X_STEP+X_OFFSET+0]; q[1] = d_q[id*X_STEP+X_OFFSET+1]; q[2] = d_q[id*X_STEP+X_OFFSET+2]; Log_p(p+zi*3,q,x); // float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1] // + q[2]*p[zi*3+2])); // float theta = acosf(dot); // float sinc; // if(theta < 1.e-8) // sinc = 1.0f; // else // sinc = theta/sinf(theta); // float x[3]; // x[0] = (q[0]-p[zi*3+0]*dot)*sinc; // x[1] = (q[1]-p[zi*3+1]*dot)*sinc; // x[2] = (q[2]-p[zi*3+2]*dot)*sinc; mu[tid+(zi*4+0)*BLOCK_SIZE] += wi*x[0]; mu[tid+(zi*4+1)*BLOCK_SIZE] += wi*x[1]; mu[tid+(zi*4+2)*BLOCK_SIZE] += wi*x[2]; mu[tid+(zi*4+3)*BLOCK_SIZE] += wi; } } } __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; } } __syncthreads(); } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]); } } extern "C" void meanInTpS2GPU(float *h_p, float *d_p, float *h_mu_karch, float *d_mu_karch, float *d_q, uint16_t *d_z, float* d_weights ,int w, int h) { for(uint32_t i=0; i<4*6; ++i) h_mu_karch[i] =0.0f; checkCudaErrors(cudaMemcpy(d_mu_karch, h_mu_karch, 6*4* sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, 6*3* sizeof(float), cudaMemcpyHostToDevice)); dim3 threads(BLOCK_WIDTH,BLOCK_WIDTH,1); // this way for 640x480 there is no remainders //dim3 blocks(w/128+(w%128>0?1:0), h/32+(h%32>0?1:0),1); // this still seems to be fastest dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1); //printf("%d x %d",w/32+(w%32>0?1:0),h/16+(h%16>0?1:0)); if(d_weights == NULL) meanInTpS2<<<blocks,threads>>>(d_p,d_q, d_z, d_mu_karch,w,h); else meanInTpS2<<<blocks,threads>>>(d_p,d_q, d_z, d_weights, d_mu_karch,w,h); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_mu_karch, d_mu_karch, 6*4*sizeof(float), cudaMemcpyDeviceToHost)); }; __global__ void sufficientStatisticsOnTpS2( float *d_p, float *Rnorths, float *d_q, unsigned short *z, int w, int h, float *SSs ) //, float *N) { __shared__ float p[3*6]; // sufficient statistics for whole blocksize // 2 (x in TpS @north) + 1 (count) + 4 (outer product in TpS @north) // all fo that times 6 for the different axes __shared__ float xSSs[BLOCK_SIZE*(2+1+4)*6]; __shared__ float sRnorths[6*6]; //const int tid = threadIdx.x; const int tid = threadIdx.x + blockDim.x * threadIdx.y; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idy = threadIdx.y + blockDim.y * blockIdx.y; // caching if(tid < 3*6) p[tid] = d_p[tid]; if(3*6 <= tid && tid <3*6+6*6) sRnorths[tid-3*6] = Rnorths[tid-3*6]; #pragma unroll for(int s=0; s<6*7; ++s) { // this is almost certainly bad ordering xSSs[tid+BLOCK_SIZE*s] = 0.0f; } //#pragma unroll // for(int s=0; s<6; ++s) { // Ni[tid+BLOCK_SIZE*s] = 0.0f; // } __syncthreads(); // make sure that ys have been cached for(uint32_t ix=0; ix<8; ++ix) for(uint32_t iy=0; iy<4; ++iy) { int id = idx+ix*w/8 + (idy+iy*h/4)*w; if (id<w*h) { uint16_t zi = z[id]; if(zi<6){ // if point is good // copy q into local memory float q[3]; q[0] = d_q[id*X_STEP+X_OFFSET+0]; q[1] = d_q[id*X_STEP+X_OFFSET+1]; q[2] = d_q[id*X_STEP+X_OFFSET+2]; // transform to TpS^2 float dot = min(1.0f,max(-1.0f,q[0]*p[zi*3+0] + q[1]*p[zi*3+1] + q[2]*p[zi*3+2])); float theta = acosf(dot); float sinc; if(theta < 1.e-8) sinc = 1.0f; else sinc = theta/sinf(theta); float x[3]; x[0] = (q[0]-p[zi*3+0]*dot)*sinc; x[1] = (q[1]-p[zi*3+1]*dot)*sinc; x[2] = (q[2]-p[zi*3+2]*dot)*sinc; // rotate up to north pole float xNorth[2]; xNorth[0] = sRnorths[zi*6+0]*x[0] + sRnorths[zi*6+1]*x[1] + sRnorths[zi*6+2]*x[2]; xNorth[1] = sRnorths[zi*6+3]*x[0] + sRnorths[zi*6+4]*x[1] + sRnorths[zi*6+5]*x[2]; // input sufficient statistics xSSs[tid+(zi*7+0)*BLOCK_SIZE] += xNorth[0]; xSSs[tid+(zi*7+1)*BLOCK_SIZE] += xNorth[1]; xSSs[tid+(zi*7+2)*BLOCK_SIZE] += xNorth[0]*xNorth[0]; xSSs[tid+(zi*7+3)*BLOCK_SIZE] += xNorth[1]*xNorth[0]; xSSs[tid+(zi*7+4)*BLOCK_SIZE] += xNorth[0]*xNorth[1]; xSSs[tid+(zi*7+5)*BLOCK_SIZE] += xNorth[1]*xNorth[1]; xSSs[tid+(zi*7+6)*BLOCK_SIZE] += 1.0f; } } } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*7; ++k) { int tidk = k*BLOCK_SIZE+tid; xSSs[tidk] += xSSs[tidk + s]; } } __syncthreads(); } if(tid < 6*7) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd(&SSs[tid],xSSs[tid*BLOCK_SIZE]+xSSs[tid*BLOCK_SIZE+1]); } } extern "C" void sufficientStatisticsOnTpS2GPU(float *h_p, float *d_p, float *h_Rnorths, float *d_Rnorths, float *d_q, uint16_t *d_z ,int w, int h, float *h_SSs, float *d_SSs) { for(uint32_t i=0; i<7*6; ++i) h_SSs[i] =0.0f; checkCudaErrors(cudaMemcpy(d_SSs, h_SSs, 6*7* sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, 6*3* sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_Rnorths, h_Rnorths, 6*6* sizeof(float), cudaMemcpyHostToDevice)); dim3 threads(16,16,1); dim3 blocks(w/128+(w%128>0?1:0), h/64+(h%64>0?1:0),1); sufficientStatisticsOnTpS2<<<blocks,threads>>>(d_p,d_Rnorths, d_q, d_z,w,h,d_SSs); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_SSs, d_SSs, 6*7*sizeof(float), cudaMemcpyDeviceToHost)); };
the_stack
#define LBANN_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/regularizers/batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** Functor for adding arrays. */ template <typename T, size_t N> struct array_sum { using ArrayType = gpu_lib::array<T,N>; __device__ __forceinline__ ArrayType operator()(const ArrayType& x, const ArrayType& y) { ArrayType sum; #pragma unroll for (size_t i = 0; i < N; ++i) { sum[i] = x[i] + y[i]; } return sum; } }; /** Accumulate sums and sums of squares for each channel. * * On input, sums and sqsums are assumed to be filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (channel_size / bsize) x num_channels x 1 */ template <typename TensorDataType, int bdimx> __global__ void fp_sums_kernel( int mini_batch_size, int num_channels, int channel_size, const TensorDataType * __restrict__ data, int data_ldim, TensorDataType * __restrict__ sums, TensorDataType * __restrict__ sqsums) { // Indices and dimensions constexpr int bdimy = 1; constexpr int bdimz = 1; const auto& tid = threadIdx.x; const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x; const auto& gidy = blockIdx.y; const auto& nthreadsx = blockDim.x * gridDim.x; const auto& nthreadsy = gridDim.y; for (int channel = gidy; channel < num_channels; channel += nthreadsy) { // Accumulate sums and perform block-wide reduction using array_t = gpu_lib::array<TensorDataType,2>; using array_sum_t = array_sum<TensorDataType,2>; array_t sum_sqsum; sum_sqsum[0] = TensorDataType(0); sum_sqsum[1] = TensorDataType(0); for (int i = gidx; i < channel_size; i += nthreadsx) { for (int j = 0; j < mini_batch_size; ++j) { const auto& x = data[i + channel*channel_size + j*data_ldim]; sum_sqsum[0] += x; sum_sqsum[1] += x * x; } } sum_sqsum = gpu_lib::block_reduce<bdimx,bdimy,bdimz,array_t,array_sum_t>(sum_sqsum); // Output result to global memory if (tid == 0) { gpu_lib::atomic_add(&sums[channel], sum_sqsum[0]); gpu_lib::atomic_add(&sqsums[channel], sum_sqsum[1]); } } } /** Compute statistics for each channel. * * On input, global_mean and global_var are assumed to contain sums * and squares of sums, respectively. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (num_channels / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void fp_statistics_kernel( int num_sums, int num_per_sum, TensorDataType epsilon, TensorDataType decay, TensorDataType * __restrict__ global_mean, TensorDataType * __restrict__ global_var, TensorDataType * __restrict__ global_running_mean, TensorDataType * __restrict__ global_running_var) { const auto& gid = threadIdx.x + blockIdx.x * blockDim.x; const auto& num_threads = blockDim.x * gridDim.x; for (auto i = gid; i < num_sums; i += num_threads) { TensorDataType num_per_sum_dt = TensorDataType(num_per_sum); // Compute mean and variance const auto& mean = global_mean[i] / num_per_sum_dt; const auto& sqmean = global_var[i] / num_per_sum_dt; auto var = num_per_sum_dt * (sqmean - mean * mean) / TensorDataType(num_per_sum - 1); var = var > epsilon ? var : epsilon; global_mean[gid] = mean; global_var[gid] = var; // Compute running statistics auto& running_mean = global_running_mean[gid]; auto& running_var = global_running_var[gid]; running_mean = decay * running_mean + (TensorDataType(1.0) - decay) * mean; running_var = decay * running_var + (TensorDataType(1.0) - decay) * var; } } /** Compute outputs. * * y_i = (x_i - mean) / sqrt(var + epsilon) * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (channel_size / bdimx) x (mini_batch_size / bdimy) x (num_channels / bdimz) * */ template <typename TensorDataType> __global__ void fp_output_kernel( int mini_batch_size, int num_channels, int channel_size, const TensorDataType * __restrict__ global_input, int input_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_bias, TensorDataType * __restrict__ global_output, int output_ldim) { // Indices and dimensions const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x; const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y; const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z; const auto& nthreadsx = blockDim.x * gridDim.x; const auto& nthreadsy = blockDim.y * gridDim.y; const auto& nthreadsz = blockDim.z * gridDim.z; for (auto k = gidz; k < num_channels; k += nthreadsz) { const auto& mean = global_mean[k]; const auto& var = global_var[k]; const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); const auto& scale = global_scale[k]; const auto& bias = global_bias[k]; for (auto j = gidy; j < mini_batch_size; j += nthreadsy) { for (auto i = gidx; i < channel_size; i += nthreadsx) { const auto& x = global_input[i + k*channel_size + j*input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& y = scale * xhat + bias; global_output[i + k*channel_size + j*output_ldim] = y; } } } } /** Compute gradients w.r.t. statistics and affine transform. * * dL/dscale = sum(dL/dy_i * xhat_i) * * dL/dbias = sum(dL/dy_i) * * dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon) * * dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2 * * On input, means_grad and vars_grad are filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (channel_size / bsize) x num_channels x 1 */ template <typename TensorDataType, int bdimx> __global__ void bp_statistics_grad_kernel( int mini_batch_size, int num_channels, int channel_size, const TensorDataType * __restrict__ global_input, int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, TensorDataType * __restrict__ global_dscale, TensorDataType * __restrict__ global_dbias, TensorDataType * __restrict__ global_dmean, TensorDataType * __restrict__ global_dvar) { // Indices and dimensions constexpr int bdimy = 1; constexpr int bdimz = 1; const auto& tid = threadIdx.x; const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x; const auto& gidy = blockIdx.y; const auto& nthreadsx = blockDim.x * gridDim.x; const auto& nthreadsy = gridDim.y; for (int channel = gidy; channel < num_channels; channel += nthreadsy) { // Copy batch normalization parameters to private memory const auto& mean = global_mean[channel]; const auto& var = global_var[channel]; const auto& scale = global_scale[channel]; // Compute useful constants const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); const auto& dvar_factor = inv_stdev * inv_stdev * inv_stdev * TensorDataType(0.5); // Accumulate sums and perform block-wide reduction using array_t = gpu_lib::array<TensorDataType,4>; using array_sum_t = array_sum<TensorDataType,4>; array_t sums; sums[0] = TensorDataType(0); sums[1] = TensorDataType(0); sums[2] = TensorDataType(0); sums[3] = TensorDataType(0); for (int i = gidx; i < channel_size; i += nthreadsx) { for (int j = 0; j < mini_batch_size; ++j) { const auto& x = global_input[i + channel*channel_size + j*input_ldim]; const auto& xhat = (x - mean) * inv_stdev; const auto& dy = global_gradient_wrt_output[i + channel*channel_size + j*gradient_wrt_output_ldim]; sums[0] += dy * xhat; sums[1] += dy; const auto& dxhat = dy * scale; sums[2] -= dxhat * inv_stdev; sums[3] -= dxhat * (x - mean) * dvar_factor; } } sums = gpu_lib::block_reduce<bdimx,bdimy,bdimz,array_t,array_sum_t>(sums); // Output result to global memory if (tid == 0) { gpu_lib::atomic_add(&global_dscale[channel], sums[0]); gpu_lib::atomic_add(&global_dbias[channel], sums[1]); gpu_lib::atomic_add(&global_dmean[channel], sums[2]); gpu_lib::atomic_add(&global_dvar[channel], sums[3]); } } } /** Compute gradients w.r.t. input. * * dL/dx_i = ( dL/dxhat_i / sqrt(var+epsilon) * + dL/dmean / n * + dL/dvar * (x_i - mean) * 2/(n-1) ) * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (channel_size / bdimx) x (mini_batch_size / bdimy) x (num_channels / bdimz) */ template <typename TensorDataType> __global__ void bp_input_grad_kernel( int mini_batch_size, int num_channels, int channel_size, int num_per_sum, const TensorDataType * __restrict__ global_input, int input_ldim, const TensorDataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const TensorDataType * __restrict__ global_mean, const TensorDataType * __restrict__ global_var, TensorDataType epsilon, const TensorDataType * __restrict__ global_scale, const TensorDataType * __restrict__ global_dmean, const TensorDataType * __restrict__ global_dvar, TensorDataType * __restrict__ global_gradient_wrt_input, int gradient_wrt_input_ldim) { // Indices and dimensions const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x; const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y; const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z; const auto& nthreadsx = blockDim.x * gridDim.x; const auto& nthreadsy = blockDim.y * gridDim.y; const auto& nthreadsz = blockDim.z * gridDim.z; for (auto k = gidz; k < num_channels; k += nthreadsz) { const auto& mean = global_mean[k]; const auto& var = global_var[k]; const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); const auto& scale = global_scale[k]; const auto& dmean = global_dmean[k]; const auto& dvar = global_dvar[k]; const auto& dmean_term = dmean / TensorDataType(num_per_sum); const auto& dvar_term = dvar * TensorDataType(2) / TensorDataType(num_per_sum - 1); for (auto j = gidy; j < mini_batch_size; j += nthreadsy) { for (auto i = gidx; i < channel_size; i += nthreadsx) { const auto& x = global_input[i + k*channel_size + j*input_ldim]; const auto& dy = global_gradient_wrt_output[i + k*channel_size + j*gradient_wrt_output_ldim]; const auto& dxhat = dy * scale; auto& dx = global_gradient_wrt_input[i + k*channel_size + j*gradient_wrt_input_ldim]; dx = dxhat * inv_stdev + dmean_term + dvar_term * (x - mean); } } } } } // namespace #ifdef LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::fp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; auto& local_running_mean = ValuesGetter::mutable_values(l.get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(l.get_weights(3)).Matrix(); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_bias, l.weights_values(1).LockedMatrix().LockedBuffer())); assert0(dc::tensor::View( m_running_mean, local_running_mean.Buffer())); assert0(dc::tensor::View( m_running_var, local_running_var.Buffer())); m_bn->forward_stage1(this->get_prev_activations(), m_mean, m_var, is_training); if (l.m_statistics_group_size == 0) { l.get_comm()->allreduce(*l.m_mean_and_var, l.m_mean_and_var->RedundantComm(), El::mpi::SUM); } else if (l.m_statistics_group_size == 1) { // Local aggregation } else { LBANN_ERROR("statics_group_size must be either 0 or 1 for now."); } m_bn->forward_stage2(this->get_prev_activations(), m_mean, m_var, m_running_mean, m_running_var, m_scale, m_bias, this->get_activations(), is_training); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_distconv_adapter<TensorDataType, T_layout, Dev>::bp_compute() { assert_always(Dev == El::Device::GPU); assert_always(T_layout == data_layout::DATA_PARALLEL); auto &l = dynamic_cast<batch_normalization_layer< TensorDataType, T_layout, Dev>&>(this->layer()); // Check execution mode const bool is_training = l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; assert_always(is_training); assert0(dc::tensor::View( m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); m_bn->backward_stage1(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_scale_gradient, m_bias_gradient, m_mean_gradient, m_var_gradient); // Verbatim copy from bp_compute_gpu // Accumulate gradients if (is_training) { if (l.m_statistics_group_size == 0) { l.get_comm()->allreduce(*l.m_mean_and_var_gradient, l.m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } } else { Zero(*l.m_mean_and_var_gradient); } auto* scale_optimizer = l.get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*l.m_scale_gradient, TensorDataType{1.f}, true); } auto* bias_optimizer = l.get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*l.m_bias_gradient, TensorDataType{1.f}, true); } m_bn->backward_stage2(this->get_prev_activations(), this->get_prev_error_signals(), m_mean, m_var, m_scale, m_mean_gradient, m_var_gradient, this->get_error_signals()); } #endif // LBANN_HAS_DISTCONV template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().fp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // Matrices const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); auto& local_output = this->get_local_activations(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute statistics if (is_training) { using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; // Local matrices auto& local_mean = this->m_mean_v->Matrix(); auto& local_var = this->m_var_v->Matrix(); auto& local_running_mean = ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); auto& local_running_var = ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); // Compute sums and sums of squares El::Zero(local_mean); El::Zero(local_var); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_mean), gpu::get_sync_info(local_var), gpu::get_sync_info(local_input)); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_sums_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, local_width, num_channels, channel_size, local_input.LockedBuffer(), local_input.LDim(), local_mean.Buffer(), local_var.Buffer()); } int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation; allreduce on fused buffer. this->get_comm()->allreduce(*this->m_mean_and_var, this->m_mean_and_var->RedundantComm(), El::mpi::SUM); num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation, no allreduce needed. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. Allreduce on fused buffer. this->get_comm()->allreduce( *this->m_mean_and_var, this->get_comm()->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); if (this->m_num_per_sum_cache.count(width) == 0) { num_per_sum = channel_size * local_width; num_per_sum = this->get_comm()->allreduce( num_per_sum, this->get_comm()->get_packed_group_comm(this->m_statistics_group_size)); this->m_num_per_sum_cache[width] = num_per_sum; } else { num_per_sum = this->m_num_per_sum_cache[width]; } } // Compute minibatch statistics if (num_per_sum <= 1) { El::Fill(local_var, TensorDataType(1.0)); } else if (num_channels > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_running_mean), gpu::get_sync_info(local_running_var), gpu::get_sync_info(local_mean), gpu::get_sync_info(local_var)); constexpr size_t block_dim = 256; dim3 grid_dims((num_channels + block_dim - 1) / block_dim, 1, 1); gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_statistics_kernel<TensorDataType>, grid_dims, block_dim, 0, multisync, num_channels, num_per_sum, this->m_epsilon, this->m_decay, local_mean.Buffer(), local_var.Buffer(), local_running_mean.Buffer(), local_running_var.Buffer()); } } // Apply batch normalization const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_bias = this->weights_values(1).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_scale), gpu::get_sync_info(local_bias), gpu::get_sync_info(local_var), gpu::get_sync_info(local_mean), gpu::get_sync_info(local_input)); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = local_width; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_width, num_channels, channel_size, local_input.LockedBuffer(), local_input.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_bias.LockedBuffer(), local_output.Buffer(), local_output.LDim()); } } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() { #ifdef LBANN_HAS_DISTCONV if (this->distconv_enabled()) { get_distconv_adapter().bp_compute(); return; } #endif // LBANN_HAS_DISTCONV const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; // Matrices const auto& local_scale = this->weights_values(0).LockedMatrix(); const auto& local_mean = (is_training ? this->m_mean_v->LockedMatrix() : this->weights_values(2).LockedMatrix()); const auto& local_var = (is_training ? this->m_var_v->LockedMatrix() : this->weights_values(3).LockedMatrix()); const auto& input = this->get_prev_activations(); const auto& local_input = input.LockedMatrix(); const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); auto& local_gradient_wrt_input = this->get_local_error_signals(); auto& local_mean_gradient = this->m_mean_gradient_v->Matrix(); auto& local_var_gradient = this->m_var_gradient_v->Matrix(); auto& local_scale_gradient = this->m_scale_gradient->Matrix(); auto& local_bias_gradient = this->m_bias_gradient->Matrix(); // Matrix parameters const auto& width = input.Width(); const auto& local_width = local_input.Width(); const auto& output_dims = this->get_output_dims(); const auto& num_channels = output_dims[0]; const auto& channel_size = this->get_output_size() / num_channels; // Compute local gradients // Compute gradients w.r.t. batch norm parameters El::Zero(local_scale_gradient); El::Zero(local_bias_gradient); El::Zero(local_mean_gradient); El::Zero(local_var_gradient); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_mean_gradient), gpu::get_sync_info(local_var_gradient), gpu::get_sync_info(local_scale_gradient), gpu::get_sync_info(local_bias_gradient)); if (!local_input.IsEmpty()) { constexpr int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_statistics_grad_kernel<TensorDataType,block_size>, grid_dims, block_dims, 0, multisync, local_width, num_channels, channel_size, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_scale_gradient.Buffer(), local_bias_gradient.Buffer(), local_mean_gradient.Buffer(), local_var_gradient.Buffer()); } // Accumulate gradients if (is_training) { if (this->m_statistics_group_size == 0) { // Global aggregation; allreduce on fused buffer. this->get_comm()->allreduce(*this->m_mean_and_var_gradient, this->m_mean_and_var_gradient->RedundantComm(), El::mpi::SUM); } else if (this->m_statistics_group_size > 1) { // Grouped batchnorm; allreduce on fused buffer. this->get_comm()->allreduce(*this->m_mean_and_var_gradient, this->get_comm()->get_packed_group_comm(this->m_statistics_group_size), El::mpi::SUM); } } else { // Zero fused buffer. El::Zero(*this->m_mean_and_var_gradient); } auto* scale_optimizer = this->get_weights(0).get_optimizer(); if (scale_optimizer != nullptr) { scale_optimizer->add_to_gradient(*this->m_scale_gradient, TensorDataType(1.0), true); } auto* bias_optimizer = this->get_weights(1).get_optimizer(); if (bias_optimizer != nullptr) { bias_optimizer->add_to_gradient(*this->m_bias_gradient, TensorDataType(1.0), true); } // Compute error signal int num_per_sum; if (this->m_statistics_group_size == 0) { // Global statistics aggregation. num_per_sum = channel_size * width; } else if (this->m_statistics_group_size == 1) { // Local aggregation. num_per_sum = channel_size * local_width; } else { // Grouped batchnorm. num_per_sum = this->m_num_per_sum_cache[width]; // This was computed in FP. } if (num_per_sum <= 1) { El::Zero(local_gradient_wrt_input); } else if (!local_input.IsEmpty()) { constexpr int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = local_width; grid_dims.z = num_channels; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_input_grad_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_width, num_channels, channel_size, num_per_sum, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, local_scale.LockedBuffer(), local_mean_gradient.LockedBuffer(), local_var_gradient.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } #define PROTO(T) \ template class batch_normalization_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
the_stack
#include "cuda_helper.h" uint32_t *d_gnounce[MAX_GPUS]; uint32_t *d_GNonce[MAX_GPUS]; //__constant__ uint32_t pTarget[8]; #define C32e(x) \ ((SPH_C32(x) >> 24) \ | ((SPH_C32(x) >> 8) & SPH_C32(0x0000FF00)) \ | ((SPH_C32(x) << 8) & SPH_C32(0x00FF0000)) \ | ((SPH_C32(x) << 24) & SPH_C32(0xFF000000))) #define PC32up(j, r) ((uint32_t)((j) + (r))) #define PC32dn(j, r) 0 #define QC32up(j, r) 0xFFFFFFFF #define QC32dn(j, r) ((((uint32_t)(r)^ ~((uint32_t)(j))) << 24) ^0xffffff) #define B32_0(x) (x&0xff) //#define B32_0(x) __byte_perm(x, 0, 0x4440) #define B32_1(x) __byte_perm(x, 0, 0x4441) #define B32_2(x) __byte_perm(x, 0, 0x4442) #define B32_3(x) __byte_perm(x, 0, 0x4443) #define USE_SHARED 1 // Maxwell and Fermi cards get the best speed with SHARED access it seems. #if USE_SHARED #define T0up(x) (*(mixtabs + ( (x)))) #define T0dn(x) (*(mixtabs + (256+(x)))) #define T1up(x) (*(mixtabs + (512+(x)))) #define T1dn(x) (*(mixtabs + (768+(x)))) #define T2up(x) (*(mixtabs + (1024+(x)))) #define T2dn(x) (*(mixtabs + (1280+(x)))) #define T3up(x) (*(mixtabs + (1536+(x)))) #define T3dn(x) (*(mixtabs + (1792+(x)))) #else #define T0up(x) tex1Dfetch(t0up2, x) #define T0dn(x) tex1Dfetch(t0dn2, x) #define T1up(x) tex1Dfetch(t1up2, x) #define T1dn(x) tex1Dfetch(t1dn2, x) #define T2up(x) tex1Dfetch(t2up2, x) #define T2dn(x) tex1Dfetch(t2dn2, x) #define T3up(x) tex1Dfetch(t3up2, x) #define T3dn(x) tex1Dfetch(t3dn2, x) #endif texture<uint32_t, 1, cudaReadModeElementType> t0up2; texture<uint32_t, 1, cudaReadModeElementType> t0dn2; texture<uint32_t, 1, cudaReadModeElementType> t1up2; texture<uint32_t, 1, cudaReadModeElementType> t1dn2; texture<uint32_t, 1, cudaReadModeElementType> t2up2; texture<uint32_t, 1, cudaReadModeElementType> t2dn2; texture<uint32_t, 1, cudaReadModeElementType> t3up2; texture<uint32_t, 1, cudaReadModeElementType> t3dn2; #define RSTT(d0, d1, a, b0, b1, b2, b3, b4, b5, b6, b7) do { \ t[d0] = T0up(B32_0(a[b0])) \ ^ T1up(B32_1(a[b1])) \ ^ T2up(B32_2(a[b2])) \ ^ T3up(B32_3(a[b3])) \ ^ T0dn(B32_0(a[b4])) \ ^ T1dn(B32_1(a[b5])) \ ^ T2dn(B32_2(a[b6])) \ ^ T3dn(B32_3(a[b7])); \ t[d1] = T0dn(B32_0(a[b0])) \ ^ T1dn(B32_1(a[b1])) \ ^ T2dn(B32_2(a[b2])) \ ^ T3dn(B32_3(a[b3])) \ ^ T0up(B32_0(a[b4])) \ ^ T1up(B32_1(a[b5])) \ ^ T2up(B32_2(a[b6])) \ ^ T3up(B32_3(a[b7])); \ } while (0) extern uint32_t T0up_cpu[]; extern uint32_t T0dn_cpu[]; extern uint32_t T1up_cpu[]; extern uint32_t T1dn_cpu[]; extern uint32_t T2up_cpu[]; extern uint32_t T2dn_cpu[]; extern uint32_t T3up_cpu[]; extern uint32_t T3dn_cpu[]; __device__ __forceinline__ void groestl256_perm_P(uint32_t thread, uint32_t *a, uint32_t *mixtabs) { uint32_t t[16]; #pragma unroll for (int r = 0; r<10; r++) { a[0x0] ^= PC32up(0x00, r); a[0x2] ^= PC32up(0x10, r); a[0x4] ^= PC32up(0x20, r); a[0x6] ^= PC32up(0x30, r); a[0x8] ^= PC32up(0x40, r); a[0xA] ^= PC32up(0x50, r); a[0xC] ^= PC32up(0x60, r); a[0xE] ^= PC32up(0x70, r); RSTT(0x0, 0x1, a, 0x0, 0x2, 0x4, 0x6, 0x9, 0xB, 0xD, 0xF); RSTT(0x2, 0x3, a, 0x2, 0x4, 0x6, 0x8, 0xB, 0xD, 0xF, 0x1); RSTT(0x4, 0x5, a, 0x4, 0x6, 0x8, 0xA, 0xD, 0xF, 0x1, 0x3); RSTT(0x6, 0x7, a, 0x6, 0x8, 0xA, 0xC, 0xF, 0x1, 0x3, 0x5); RSTT(0x8, 0x9, a, 0x8, 0xA, 0xC, 0xE, 0x1, 0x3, 0x5, 0x7); RSTT(0xA, 0xB, a, 0xA, 0xC, 0xE, 0x0, 0x3, 0x5, 0x7, 0x9); RSTT(0xC, 0xD, a, 0xC, 0xE, 0x0, 0x2, 0x5, 0x7, 0x9, 0xB); RSTT(0xE, 0xF, a, 0xE, 0x0, 0x2, 0x4, 0x7, 0x9, 0xB, 0xD); #pragma unroll 16 for (int k = 0; k<16; k++) a[k] = t[k]; } } __device__ __forceinline__ void groestl256_perm_Q(uint32_t thread, uint32_t *a, uint32_t *mixtabs) { uint32_t t[16]; #pragma unroll 2 for (int r = 0; r<10; r++) { a[0x0] ^= QC32up(0x00, r); a[0x1] ^= QC32dn(0x00, r); a[0x2] ^= QC32up(0x10, r); a[0x3] ^= QC32dn(0x10, r); a[0x4] ^= QC32up(0x20, r); a[0x5] ^= QC32dn(0x20, r); a[0x6] ^= QC32up(0x30, r); a[0x7] ^= QC32dn(0x30, r); a[0x8] ^= QC32up(0x40, r); a[0x9] ^= QC32dn(0x40, r); a[0xA] ^= QC32up(0x50, r); a[0xB] ^= QC32dn(0x50, r); a[0xC] ^= QC32up(0x60, r); a[0xD] ^= QC32dn(0x60, r); a[0xE] ^= QC32up(0x70, r); a[0xF] ^= QC32dn(0x70, r); RSTT(0x0, 0x1, a, 0x2, 0x6, 0xA, 0xE, 0x1, 0x5, 0x9, 0xD); RSTT(0x2, 0x3, a, 0x4, 0x8, 0xC, 0x0, 0x3, 0x7, 0xB, 0xF); RSTT(0x4, 0x5, a, 0x6, 0xA, 0xE, 0x2, 0x5, 0x9, 0xD, 0x1); RSTT(0x6, 0x7, a, 0x8, 0xC, 0x0, 0x4, 0x7, 0xB, 0xF, 0x3); RSTT(0x8, 0x9, a, 0xA, 0xE, 0x2, 0x6, 0x9, 0xD, 0x1, 0x5); RSTT(0xA, 0xB, a, 0xC, 0x0, 0x4, 0x8, 0xB, 0xF, 0x3, 0x7); RSTT(0xC, 0xD, a, 0xE, 0x2, 0x6, 0xA, 0xD, 0x1, 0x5, 0x9); RSTT(0xE, 0xF, a, 0x0, 0x4, 0x8, 0xC, 0xF, 0x3, 0x7, 0xB); #pragma unroll for (int k = 0; k<16; k++) a[k] = t[k]; } } __global__ __launch_bounds__(256,2) void groestl256_gpu_hash32(uint32_t threads, uint32_t startNounce, uint64_t *const __restrict__ outputHash, uint32_t *const __restrict__ nonceVector, uint32_t target) { #if USE_SHARED __shared__ uint32_t mixtabs[2048]; uint32_t backup = target; /* if (threadIdx.x < 256) { *(mixtabs + (threadIdx.x)) = tex1Dfetch(t0up2, threadIdx.x); *(mixtabs + (256 + threadIdx.x)) = tex1Dfetch(t0dn2, threadIdx.x); *(mixtabs + (512 + threadIdx.x)) = tex1Dfetch(t1up2, threadIdx.x); *(mixtabs + (768 + threadIdx.x)) = tex1Dfetch(t1dn2, threadIdx.x); *(mixtabs + (1024 + threadIdx.x)) = tex1Dfetch(t2up2, threadIdx.x); *(mixtabs + (1280 + threadIdx.x)) = tex1Dfetch(t2dn2, threadIdx.x); *(mixtabs + (1536 + threadIdx.x)) = tex1Dfetch(t3up2, threadIdx.x); *(mixtabs + (1792 + threadIdx.x)) = tex1Dfetch(t3dn2, threadIdx.x); } */ if (threadIdx.x < 128) { *(mixtabs + (threadIdx.x)) = tex1Dfetch(t0up2, threadIdx.x); *(mixtabs + (128 + threadIdx.x)) = tex1Dfetch(t0up2, threadIdx.x+128); *(mixtabs + (256 + threadIdx.x)) = tex1Dfetch(t0dn2, threadIdx.x); *(mixtabs + (128 + 256 + threadIdx.x)) = tex1Dfetch(t0dn2, threadIdx.x+128); *(mixtabs + (512 + threadIdx.x)) = tex1Dfetch(t1up2, threadIdx.x); *(mixtabs + (128 + 512 + threadIdx.x)) = tex1Dfetch(t1up2, threadIdx.x + 128); *(mixtabs + (768 + threadIdx.x)) = tex1Dfetch(t1dn2, threadIdx.x); *(mixtabs + (128 + 768 + threadIdx.x)) = tex1Dfetch(t1dn2, threadIdx.x + 128); *(mixtabs + (1024 + threadIdx.x)) = tex1Dfetch(t2up2, threadIdx.x); *(mixtabs + (128 + 1024 + threadIdx.x)) = tex1Dfetch(t2up2, threadIdx.x + 128); *(mixtabs + (1280 + threadIdx.x)) = tex1Dfetch(t2dn2, threadIdx.x); *(mixtabs + (128 + 1280 + threadIdx.x)) = tex1Dfetch(t2dn2, threadIdx.x + 128); *(mixtabs + (1536 + threadIdx.x)) = tex1Dfetch(t3up2, threadIdx.x); *(mixtabs + (128 + 1536 + threadIdx.x)) = tex1Dfetch(t3up2, threadIdx.x + 128); *(mixtabs + (1792 + threadIdx.x)) = tex1Dfetch(t3dn2, threadIdx.x); *(mixtabs + (128 + 1792 + threadIdx.x)) = tex1Dfetch(t3dn2, threadIdx.x + 128); } #endif uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nonce = (startNounce + thread); // GROESTL uint32_t message[16]; uint32_t state[16]; #pragma unroll for (int k = 0; k<4; k++) LOHI(message[2*k], message[2*k+1], outputHash[k*threads+thread]); #pragma unroll for (int k = 9; k<15; k++) message[k] = 0; message[8] = 0x80; message[15] = 0x01000000; #pragma unroll 16 for (int u = 0; u<16; u++) state[u] = message[u]; state[15] ^= 0x10000; // Perm #if USE_SHARED groestl256_perm_P(thread, state, mixtabs); state[15] ^= 0x10000; groestl256_perm_Q(thread, message, mixtabs); #else groestl256_perm_P(thread, state, NULL); state[15] ^= 0x10000; groestl256_perm_P(thread, message, NULL); #endif #pragma unroll 16 for (int u = 0; u<16; u++) state[u] ^= message[u]; #pragma unroll 16 for (int u = 0; u<16; u++) message[u] = state[u]; #if USE_SHARED groestl256_perm_P(thread, message, mixtabs); #else groestl256_perm_P(thread, message, NULL); #endif state[15] ^= message[15]; if (state[15] <= backup) { uint32_t tmp = atomicCAS(nonceVector, 0xffffffff, nonce); if (tmp != 0xffffffff) nonceVector[1] = nonce; } } } #define texDef(texname, texmem, texsource, texsize) \ uint32_t *texmem; \ cudaMalloc(&texmem, texsize); \ cudaMemcpy(texmem, texsource, texsize, cudaMemcpyHostToDevice); \ texname.normalized = 0; \ texname.filterMode = cudaFilterModePoint; \ texname.addressMode[0] = cudaAddressModeClamp; \ { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uint32_t>(); \ cudaBindTexture(NULL, &texname, texmem, &channelDesc, texsize ); } \ __host__ void groestl256_cpu_init(int thr_id, uint32_t threads) { // Texturen mit obigem Makro initialisieren texDef(t0up2, d_T0up, T0up_cpu, sizeof(uint32_t) * 256); texDef(t0dn2, d_T0dn, T0dn_cpu, sizeof(uint32_t) * 256); texDef(t1up2, d_T1up, T1up_cpu, sizeof(uint32_t) * 256); texDef(t1dn2, d_T1dn, T1dn_cpu, sizeof(uint32_t) * 256); texDef(t2up2, d_T2up, T2up_cpu, sizeof(uint32_t) * 256); texDef(t2dn2, d_T2dn, T2dn_cpu, sizeof(uint32_t) * 256); texDef(t3up2, d_T3up, T3up_cpu, sizeof(uint32_t) * 256); texDef(t3dn2, d_T3dn, T3dn_cpu, sizeof(uint32_t) * 256); cudaMalloc(&d_GNonce[thr_id], 2*sizeof(uint32_t)); cudaMallocHost(&d_gnounce[thr_id], 2*sizeof(uint32_t)); } __host__ void groestl256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *d_outputHash, uint32_t *resultnonces, uint32_t target) { cudaMemset(d_GNonce[thr_id], 0xffffffff, 2 * sizeof(uint32_t)); const uint32_t threadsperblock = 128; // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); groestl256_gpu_hash32<<<grid, block>>>(threads, startNounce, d_outputHash, d_GNonce[thr_id],target); cudaMemcpy(d_gnounce[thr_id], d_GNonce[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost); resultnonces[0] = *(d_gnounce[thr_id]); resultnonces[1] = *(d_gnounce[thr_id] + 1); } /* __host__ void groestl256_setTarget(const void *pTargetIn) { cudaMemcpyToSymbol(pTarget, pTargetIn, 8 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice); } */
the_stack
#include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void PoolingLayer<Dtype, MItype, MOtype>::GenerateProgram() { this->device_program_ = this->device_->CreateProgram(); stringstream ss; ss << this->device_program_->setup(); ss << this->device_program_->template define_type<Dtype>("Dtype"); ss << this->device_program_->template define_type<MItype>("MItype"); ss << this->device_program_->template define_type<MOtype>("MOtype"); #ifdef USE_HALF if (std::is_same<MItype, half_fp>::value) { ss << "#define DTYPE_MAX HALF_MAX" << std::endl; ss << "#define DTYPE_MIN HALF_MIN" << std::endl; } else if (std::is_same<MItype, float>::value || std::is_same<MItype, double>::value) { #endif ss << "#define DTYPE_MAX FLT_MAX" << std::endl; ss << "#define DTYPE_MIN FLT_MIN" << std::endl; #ifdef USE_HALF } else { ss << "#define DTYPE_MAX " << 0 << std::endl; ss << "#define DTYPE_MIN " << 0 << std::endl; } #endif { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolForwardSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "int_tp pw = index % pooled_width;" << std::endl; ss << "int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "int_tp hend = min((int_tpc) (hstart + ext_kernel_h)," << " (int_tpc) height);" << std::endl; ss << "int_tp wend = min((int_tpc) (wstart + ext_kernel_w)," << " (int_tpc) width);" << std::endl; ss << "while (hstart < 0) {" << std::endl; ss << "hstart += dilation_h;" << std::endl; ss << "}" << std::endl; ss << "while (wstart < 0) {" << std::endl; ss << "wstart += dilation_w;" << std::endl; ss << "}" << std::endl; ss << "Dtype maxval = -DTYPE_MAX;" << std::endl; ss << "int_tp maxidx = -1;" << std::endl; ss << "bottom_data += (n * channels + c) * height * width;" << std::endl; ss << "for (int_tp h = hstart; h < hend; h += dilation_h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; w += dilation_w) {" << std::endl; ss << "if (bottom_data[h * width + w] > maxval) {" << std::endl; ss << "maxidx = h * width + w;" << std::endl; ss << "maxval = bottom_data[maxidx];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = maxval;" << std::endl; ss << "if (mask) {" << std::endl; ss << "mask[index] = maxidx;" << std::endl; ss << "} else {" << std::endl; ss << "top_mask[index] = maxidx;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("AvePoolForwardSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "int_tp pool_size = 0;" << std::endl; ss << "int_tp pw = index % pooled_width;" << std::endl; ss << "int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "int_tp hend = hstart + ext_kernel_h;" << std::endl; ss << "int_tp wend = wstart + ext_kernel_w;" << std::endl; // Overspill over the image + pad does // not contribute to pool size ss << "while (hend > height + pad_h) {" << std::endl; ss << "hend -= dilation_h;" << std::endl; ss << "}" << std::endl; ss << "while (wend > width + pad_w) {" << std::endl; ss << "wend -= dilation_w;" << std::endl; ss << "}" << std::endl; ss << "Dtype aveval = 0;" << std::endl; ss << "bottom_data += (n * channels + c) * height * width;" << std::endl; ss << "for (int_tp h = hstart; h < hend; h += dilation_h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; w += dilation_w) {" << std::endl; ss << "if (h >= 0 && h < height && w >= 0 && w < width) {" << std::endl; ss << "aveval += bottom_data[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "++pool_size;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = aveval / ((Dtype)pool_size);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "rand_idx", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("StoPoolForwardTrainSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "int_tp pw = index % pooled_width;" << std::endl; ss << "int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h;" << std::endl; ss << "int_tp hend = min((int_tpc) (hstart + ext_kernel_h)," " (int_tpc) height);" << std::endl; ss << "int_tp wstart = pw * stride_w;" << std::endl; ss << "int_tp wend = min((int_tpc) (wstart + ext_kernel_w)," << " (int_tpc) width);" << std::endl; ss << "Dtype cumsum = 0.;" << std::endl; ss << "bottom_data += (n * channels + c) * height * width;" << std::endl; // First pass: get sum ss << "for (int_tp h = hstart; h < hend; h += dilation_h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; w += dilation_w) {" << std::endl; ss << "cumsum += bottom_data[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "Dtype thres = rand_idx[index] * cumsum;" << std::endl; // Second pass: get value, and set index. ss << "cumsum = 0;" << std::endl; ss << "for (int_tp h = hstart; h < hend; h += dilation_h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; w += dilation_w) {" << std::endl; ss << "cumsum += bottom_data[h * width + w];" << std::endl; ss << "if (cumsum >= thres) {" << std::endl; ss << "rand_idx[index] = ((n * channels + c) * height + h) * width + w;" << std::endl; ss << "top_data[index] = bottom_data[h * width + w];" << std::endl; ss << "return;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("StoPoolForwardTestSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "int_tp pw = index % pooled_width;" << std::endl; ss << "int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h;" << std::endl; ss << "int_tp hend = min((int_tpc) (hstart + ext_kernel_h)," << " (int_tpc) height);" << std::endl; ss << "int_tp wstart = pw * stride_w;" << std::endl; ss << "int_tp wend = min((int_tpc) (wstart + ext_kernel_w)," << " (int_tpc) width);" << std::endl; // We set cumsum to be 0 to avoid divide-by-zero problems ss << "Dtype cumsum = DTYPE_MIN;" << std::endl; ss << "Dtype cumvalues = 0.;" << std::endl; ss << "bottom_data += (n * channels + c) * height * width;" << std::endl; // First pass: get sum ss << "for (int_tp h = hstart; h < hend; h += dilation_h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; w += dilation_w) {" << std::endl; ss << "cumsum += bottom_data[h * width + w];" << std::endl; ss << "cumvalues += bottom_data[h * width + w]" << " * bottom_data[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = cumvalues / cumsum;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolBackwardSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); // find out the local index // find out the local offset ss << "int_tp w = index % width;" << std::endl; ss << "int_tp h = (index / width) % height;" << std::endl; ss << "int_tp c = (index / width / height) % channels;" << std::endl; ss << "int_tp n = index / width / height / channels;" << std::endl; ss << "int_tp phstart =" << " (h + pad_h < ext_kernel_h) ? 0 : (h + pad_h - ext_kernel_h)" << " / stride_h + 1;" << std::endl; ss << "int_tp phend = min((int_tpc) ((h + pad_h) / stride_h + 1L)," << " (int_tpc) pooled_height);" << std::endl; ss << "int_tp pwstart =" << " (w + pad_w < ext_kernel_w) ? 0 : (w + pad_w - ext_kernel_w)" << " / stride_w + 1;" << std::endl; ss << "int_tp pwend = min((int_tpc) ((w + pad_w) / stride_w + 1L)," " (int_tpc) pooled_width);" << std::endl; ss << "Dtype gradient = 0.0;" << std::endl; ss << "int_tp offset = (n * channels + c) * pooled_height * pooled_width;" << std::endl; ss << "top_diff += offset;" << std::endl; ss << "if (mask) {" << std::endl; ss << "mask += offset;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; ss << "if (mask[ph * pooled_width + pw] == h * width + w) {" << std::endl; ss << "gradient += top_diff[ph * pooled_width + pw];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "} else {" << std::endl; ss << "top_mask += offset;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; ss << "if (top_mask[ph * pooled_width + pw] == (Dtype)(h * width + w)) {" << std::endl; ss << "gradient += top_diff[ph * pooled_width + pw];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("AvePoolBackwardSK", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); // find out the local index // find out the local offset ss << "const int_tp w = index % width;" << std::endl; ss << "const int_tp h = (index / width) % height;" << std::endl; ss << "const int_tp c = (index / width / height) % channels;" << std::endl; ss << "const int_tp n = index / width / height / channels;" << std::endl; ss << "int_tp phstart = " << "(h + pad_h < ext_kernel_h) ? 0 :" << "(h + pad_h - ext_kernel_h) / stride_h + 1;" << std::endl; ss << "int_tp phend = min(((h + pad_h) / stride_h + 1), pooled_height);" << std::endl; ss << "int_tp pwstart = " << "(w + pad_w < ext_kernel_w) ? 0 :" << "(w + pad_w - ext_kernel_w) / stride_w + 1;" << std::endl; ss << "int_tp pwend = min(((w + pad_w) / stride_w + 1), pooled_width);" << std::endl; ss << "Dtype gradient = 0.0;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "top_diff_slice") << " = top_diff + (n * channels + c) * pooled_height * pooled_width;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; // figure out the pooling size ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "int_tp hend = min(hstart + ext_kernel_h, height + pad_h);" << std::endl; ss << "int_tp wend = min(wstart + ext_kernel_w, width + pad_w);" << std::endl; ss << "int_tp pool_size =" << "((hend - hstart - 1) / dilation_h + 1) *" << "((wend - wstart - 1) / dilation_w + 1);" << std::endl; ss << "if (h >= hstart && h < hend &&" << "(h - hstart) % dilation_h == 0 &&" << "w >= wstart && w < wend &&" << "(w - wstart) % dilation_w == 0) {" << std::endl; ss << "gradient += top_diff_slice[ph * pooled_width + pw]" << " / ((Dtype)pool_size);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolForward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp pw = index % pooled_width;" << std::endl; ss << "const int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "const int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "const int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "const int_tp hend = min((int_tpc) (hstart + kernel_h)," << " (int_tpc) height);" << std::endl; ss << "const int_tp wend = min((int_tpc) (wstart + kernel_w)," << " (int_tpc) width);" << std::endl; ss << "hstart = max((int_tpc) (hstart), (int_tpc) (0));" << std::endl; ss << "wstart = max((int_tpc) (wstart), (int_tpc) (0));" << std::endl; ss << "Dtype maxval = -DTYPE_MAX;" << std::endl; ss << "int_tp maxidx = -1;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "bottom_slice") << " = bottom_data + (n * channels + c) * height * width;" << std::endl; ss << "for (int_tp h = hstart; h < hend; ++h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; ++w) {" << std::endl; ss << "if (bottom_slice[h * width + w] > maxval) {" << std::endl; ss << "maxidx = h * width + w;" << std::endl; ss << "maxval = bottom_slice[maxidx];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = maxval;" << std::endl; ss << "if (mask) {" << std::endl; ss << "mask[index] = maxidx;" << std::endl; ss << "} else {" << std::endl; ss << "top_mask[index] = maxidx;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("AvePoolForward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp pw = index % pooled_width;" << std::endl; ss << "const int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "const int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "const int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "int_tp hend = min((int_tpc) (hstart + kernel_h)," << " (int_tpc) (height + pad_h));" << std::endl; ss << "int_tp wend = min((int_tpc) (wstart + kernel_w)," << " (int_tpc) (width + pad_w));" << std::endl; ss << "const int_tp pool_size = (hend - hstart) * (wend - wstart);" << std::endl; ss << "hstart = max((int_tpc) (hstart), (int_tpc) (0));" << std::endl; ss << "wstart = max((int_tpc) (wstart), (int_tpc) (0));" << std::endl; ss << "hend = min((int_tpc) (hend), (int_tpc) (height));" << std::endl; ss << "wend = min((int_tpc) (wend), (int_tpc) (width));" << std::endl; ss << "Dtype aveval = 0;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "bottom_slice") << " = bottom_data + (n * channels + c) * height * width;" << std::endl; ss << "for (int_tp h = hstart; h < hend; ++h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; ++w) {" << std::endl; ss << "aveval += bottom_slice[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = aveval / ((Dtype)pool_size);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "rand_idx", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("StoPoolForwardTrain", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp pw = index % pooled_width;" << std::endl; ss << "const int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "const int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "const int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "const int_tp hstart = ph * stride_h;" << std::endl; ss << "const int_tp hend = min((int_tpc) (hstart + kernel_h)," << " (int_tpc) height);" << std::endl; ss << "const int_tp wstart = pw * stride_w;" << std::endl; ss << "const int_tp wend = min((int_tpc) (wstart + kernel_w)," << " (int_tpc) width);" << std::endl; ss << "Dtype cumsum = 0.;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "bottom_slice") << " = bottom_data + (n * channels + c) * height * width;" << std::endl; // First pass: get sum ss << "for (int_tp h = hstart; h < hend; ++h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; ++w) {" << std::endl; ss << "cumsum += bottom_slice[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "const float thres = rand_idx[index] * cumsum;" << std::endl; // Second pass: get value, and set index. ss << "cumsum = 0;" << std::endl; ss << "for (int_tp h = hstart; h < hend; ++h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; ++w) {" << std::endl; ss << "cumsum += bottom_slice[h * width + w];" << std::endl; ss << "if (cumsum >= ((Dtype)thres)) {" << std::endl; ss << "rand_idx[index] = ((n * channels + c) * height + h) * width + w;" << std::endl; ss << "top_data[index] = bottom_slice[h * width + w];" << std::endl; ss << "return;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("StoPoolForwardTest", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp pw = index % pooled_width;" << std::endl; ss << "const int_tp ph = (index / pooled_width) % pooled_height;" << std::endl; ss << "const int_tp c = (index / pooled_width / pooled_height) % channels;" << std::endl; ss << "const int_tp n = index / pooled_width / pooled_height / channels;" << std::endl; ss << "const int_tp hstart = ph * stride_h;" << std::endl; ss << "const int_tp hend = min((int_tpc) (hstart + kernel_h)," << " (int_tpc) height);" << std::endl; ss << "const int_tp wstart = pw * stride_w;" << std::endl; ss << "const int_tp wend = min((int_tpc) (wstart + kernel_w)," << " (int_tpc) width);" << std::endl; // We set cumsum to be 0 to avoid divide-by-zero problems ss << "Dtype cumsum = 0.;" << std::endl; ss << "Dtype cumvalues = 0.;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "bottom_slice") << " = bottom_data + (n * channels + c) * height * width;" << std::endl; // First pass: get sum ss << "for (int_tp h = hstart; h < hend; ++h) {" << std::endl; ss << "for (int_tp w = wstart; w < wend; ++w) {" << std::endl; ss << "cumsum += bottom_slice[h * width + w];" << std::endl; ss << "cumvalues += bottom_slice[h * width + w]" << " * bottom_slice[h * width + w];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "top_data[index] = (cumsum > (Dtype)(0.0)) ? " << "cumvalues / cumsum : (Dtype)(0.0);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolBackward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); // find out the local index // find out the local offset ss << "const int_tp w = index % width;" << std::endl; ss << "const int_tp h = (index / width) % height;" << std::endl; ss << "const int_tp c = (index / width / height) % channels;" << std::endl; ss << "const int_tp n = index / width / height / channels;" << std::endl; ss << "const int_tp phstart =" << "(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;" << std::endl; ss << "const int_tp phend = min((int_tpc) ((h + pad_h) / stride_h + 1L)," << "(int_tpc) pooled_height);" << std::endl; ss << "const int_tp pwstart =" << "(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;" << std::endl; ss << "const int_tp pwend = min((int_tpc) ((w + pad_w) / stride_w + 1L)," << " (int_tpc) pooled_width);" << std::endl; ss << "Dtype gradient = 0;" << std::endl; ss << "const int_tp offset = (n * channels + c)" << " * pooled_height * pooled_width;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "top_diff_slice") << " = top_diff + offset;" << std::endl; ss << "if (mask) {" << std::endl; ss << this->device_program_->global_ptr("const int_tp", "mask_slice") << " = mask + offset;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; ss << "if (mask_slice[ph * pooled_width + pw] == h * width + w) {" << std::endl; ss << "gradient += top_diff_slice[ph * pooled_width + pw];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "} else {" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "top_mask_slice") << " = top_mask + offset;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; ss << "if (top_mask_slice[ph * pooled_width + pw] == " << "(Dtype)(h * width + w)) {" << std::endl; ss << "gradient += top_diff_slice[ph * pooled_width + pw];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("AvePoolBackward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); // find out the local index // find out the local offset ss << "const int_tp w = index % width + pad_w;" << std::endl; ss << "const int_tp h = (index / width) % height + pad_h;" << std::endl; ss << "const int_tp c = (index / width / height) % channels;" << std::endl; ss << "const int_tp n = index / width / height / channels;" << std::endl; ss << "const int_tp phstart = (h < kernel_h) ? 0" << " : (h - kernel_h) / stride_h + 1;" << std::endl; ss << "const int_tp phend = min((int_tpc) (h / stride_h + 1)," << " (int_tpc) (pooled_height));" << std::endl; ss << "const int_tp pwstart = (w < kernel_w) ? 0" << " : (w - kernel_w) / stride_w + 1;" << std::endl; ss << "const int_tp pwend = min((int_tpc) (w / stride_w + 1)," << " (int_tpc) (pooled_width));" << std::endl; ss << "Dtype gradient = 0;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "top_diff_slice") << " = top_diff + (n * channels + c) * pooled_height * pooled_width;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; // figure out the pooling size ss << "int_tp hstart = ph * stride_h - pad_h;" << std::endl; ss << "int_tp wstart = pw * stride_w - pad_w;" << std::endl; ss << "int_tp hend = min((int_tpc) (hstart + kernel_h)," << " (int_tpc) (height + pad_h));" << std::endl; ss << "int_tp wend = min((int_tpc) (wstart + kernel_w)," << " (int_tpc) (width + pad_w));" << std::endl; ss << "int_tp pool_size = (hend - hstart) * (wend - wstart);" << std::endl; ss << "gradient += top_diff_slice[ph * pooled_width + pw]" << " / ((Dtype)pool_size);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "nthreads", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "rand_idx", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_height", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_width", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_h", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride_w", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("StoPoolBackward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); // find out the local index // find out the local offset ss << "const int_tp w = index % width;" << std::endl; ss << "const int_tp h = (index / width) % height;" << std::endl; ss << "const int_tp c = (index / width / height) % channels;" << std::endl; ss << "const int_tp n = index / width / height / channels;" << std::endl; ss << "const int_tp phstart = (h < kernel_h) ? 0" << " : (h - kernel_h) / stride_h + 1;" << std::endl; ss << "const int_tp phend = min((int_tpc) (h / stride_h + 1)," << " (int_tpc) pooled_height);" << std::endl; ss << "const int_tp pwstart = (w < kernel_w) ? 0" << " : (w - kernel_w) / stride_w + 1;" << std::endl; ss << "const int_tp pwend = min((int_tpc) (w / stride_w + 1)," << " (int_tpc) pooled_width);" << std::endl; ss << "Dtype gradient = 0;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "rand_idx_slice") << " = rand_idx + (n * channels + c) * pooled_height * pooled_width;" << std::endl; ss << this->device_program_->global_ptr("const Dtype", "top_diff_slice") << " = top_diff + (n * channels + c) * pooled_height * pooled_width;" << std::endl; ss << "for (int_tp ph = phstart; ph < phend; ++ph) {" << std::endl; ss << "for (int_tp pw = pwstart; pw < pwend; ++pw) {" << std::endl; ss << "gradient += top_diff_slice[ph * pooled_width + pw]" << " * (index == (int_tpc)(rand_idx_slice[ph * pooled_width + pw]) ?" << " (Dtype)1.0 : (Dtype)0.0);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "n", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num_axes", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_data", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolForwardND", args); ss << "int_tp d_idx[6];" << std::endl; ss << "int_tp d_start[6];" << std::endl; ss << "int_tp d_end[6];" << std::endl; ss << "int_tp d_iter[6];" << std::endl; ss << "int_tp i;" << std::endl; ss << this->device_program_->kernel_loop("uint_tp", "index", "n"); ss << "int_tp offset = 1;" << std::endl; ss << "int_tp num = index;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "d_idx[i] = num % pooled_size[i];" << std::endl; ss << "d_start[i] = d_idx[i] * stride[i] - pad[i];" << std::endl; ss << "d_end[i] = min((int_tpc) (d_start[i] + ext_kernel_size[i])," << " (int_tpc) (size[i]));" << std::endl; ss << "while (d_start[i] < 0) {" << std::endl; ss << "d_start[i] += dilation[i];" << std::endl; ss << "}" << std::endl; ss << "num /= pooled_size[i];" << std::endl; ss << "offset *= size[i];" << std::endl; ss << "d_iter[i] = d_start[i];" << std::endl; ss << "if (d_start[i] >= d_end[i]) {" << std::endl; ss << "top_data[index] = -DTYPE_MAX;" << std::endl; ss << "if (mask) {" << std::endl; ss << "mask[index] = -1;" << std::endl; ss << "} else {" << std::endl; ss << "top_mask[index] = -1;" << std::endl; ss << "}" << std::endl; ss << "return;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "int_tp chan = num % channels;" << std::endl; ss << "num /= channels;" << std::endl; ss << "offset *= (num * channels + chan);" << std::endl; ss << "Dtype maxval = -DTYPE_MAX;" << std::endl; ss << "int_tp maxidx = -1;" << std::endl; ss << "int_tp final_offset = 0;" << std::endl; ss << "bool incremented;" << std::endl; ss << "do {" << std::endl; ss << "final_offset = 0;" << std::endl; ss << "int_tp size_prod = 1;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "final_offset += d_iter[i] * size_prod;" << std::endl; ss << "size_prod *= size[i];" << std::endl; ss << "}" << std::endl; ss << "if (bottom_data[final_offset + offset] > maxval) {" << std::endl; ss << "maxidx = final_offset;" << std::endl; ss << "maxval = bottom_data[offset + final_offset];" << std::endl; ss << "}" << std::endl; ss << "incremented = false;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "if (d_iter[i] >= d_end[i] - dilation[i]) {" << std::endl; ss << "d_iter[i] = d_start[i];" << std::endl; ss << "} else {" << std::endl; ss << "d_iter[i] += dilation[i];" << std::endl; ss << "incremented = true;" << std::endl; ss << "break;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "} while (incremented);" << std::endl; ss << "top_data[index] = maxval;" << std::endl; ss << "if (mask) {" << std::endl; ss << "mask[index] = maxidx;" << std::endl; ss << "} else {" << std::endl; ss << "top_mask[index] = maxidx;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "n", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num_axes", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top_mask", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pooled_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "kernel_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ext_kernel_size", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "stride", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dilation", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "pad", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("MaxPoolBackwardND", args); ss << "int_tp d_idx[6];" << std::endl; ss << "int_tp d_start[6];" << std::endl; ss << "int_tp d_end[6];" << std::endl; ss << "int_tp d_iter[6];" << std::endl; ss << "int_tp i;" << std::endl; ss << this->device_program_->kernel_loop("uint_tp", "index", "n"); // find out the local index // find out the local offset ss << "int_tp offset = 1;" << std::endl; ss << "int_tp num = index;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "d_idx[i] = num % size[i];" << std::endl; ss << "d_start[i] =" << " (d_idx[i] + pad[i] < ext_kernel_size[i]) ?" << " 0L : (d_idx[i] + pad[i] - ext_kernel_size[i]) / stride[i] + 1L;" << std::endl; ss << "d_end[i] = min((int_tpc) ((d_idx[i] + pad[i]) / stride[i])," << " (int_tpc) (pooled_size[i] - 1L));" << std::endl; ss << "num /= size[i];" << std::endl; ss << "offset *= pooled_size[i];" << std::endl; ss << "d_iter[i] = d_start[i];" << std::endl; ss << "if (d_start[i] > d_end[i]) {" << std::endl; ss << "bottom_diff[index] = 0;" << std::endl; ss << "return;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "int_tp chan = num % channels;" << std::endl; ss << "num /= channels;" << std::endl; ss << "offset *= (num * channels + chan);" << std::endl; ss << "Dtype gradient = 0.0;" << std::endl; ss << "int_tp final_offset = 0;" << std::endl; ss << "int_tp im_offset = 0;" << std::endl; ss << "bool incremented;" << std::endl; ss << "do {" << std::endl; ss << "final_offset = offset;" << std::endl; ss << "im_offset = 0;" << std::endl; ss << "int_tp size_prod = 1;" << std::endl; ss << "int_tp pooled_size_prod = 1;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "final_offset += d_iter[i] * pooled_size_prod;" << std::endl; ss << "im_offset += d_idx[i] * size_prod;" << std::endl; ss << "size_prod *= size[i];" << std::endl; ss << "pooled_size_prod *= pooled_size[i];" << std::endl; ss << "}" << std::endl; ss << "if (mask) {" << std::endl; ss << "if (mask[final_offset] == im_offset) {" << std::endl; ss << "gradient += top_diff[final_offset];" << std::endl; ss << "}" << std::endl; ss << "} else {" << std::endl; ss << "if (top_mask[final_offset] == (Dtype)im_offset) {" << std::endl; ss << "gradient += top_diff[final_offset];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "incremented = false;" << std::endl; ss << "for (i = num_axes - 1; i >= 0; --i) {" << std::endl; ss << "if (d_iter[i] >= d_end[i]) {" << std::endl; ss << "d_iter[i] = d_start[i];" << std::endl; ss << "} else {" << std::endl; ss << "++d_iter[i];" << std::endl; ss << "incremented = true;" << std::endl; ss << "break;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "} while (incremented);" << std::endl; ss << "bottom_diff[index] = gradient;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } this->device_program_->set_source(ss.str()); this->device_program_->Compile(true, true); } template<typename Dtype, typename MItype, typename MOtype> void PoolingLayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); vptr<Dtype> top_data = top[0]->mutable_gpu_data(); uint_tp count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; vptr<int_tp> mask; vptr<Dtype> top_mask; if (num_spatial_axes_ == 2) { int_tp kernel_h = kernel_shape_.cpu_data()[0]; int_tp kernel_w = kernel_shape_.cpu_data()[1]; int_tp stride_h = stride_.cpu_data()[0]; int_tp stride_w = stride_.cpu_data()[1]; int_tp pad_h = pad_.cpu_data()[0]; int_tp pad_w = pad_.cpu_data()[1]; int_tp dilation_h = dilation_.cpu_data()[0]; int_tp dilation_w = dilation_.cpu_data()[1]; int_tp num = bottom[0]->shape(0); int_tp height = size_.cpu_data()[0]; int_tp width = size_.cpu_data()[1]; int_tp pooled_height = pooled_size_.cpu_data()[0]; int_tp pooled_width = pooled_size_.cpu_data()[1]; int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[1]; // 2D case if (use_skernel_) { // 2D-SK case switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolForwardSK"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&top_data); kernel->add_arg(&mask); kernel->add_arg(&top_mask); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_AVE: { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("AvePoolForwardSK"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == caffe::TRAIN) { // We need to create the random index as well. this->device_->template rng_uniform<Dtype>(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("StoPoolForwardTrainSK"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } else { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("StoPoolForwardTestSK"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } break; } default: { LOG(FATAL)<< "Unknown pooling method."; } } } else { // 2D case switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolForward"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&top_data); kernel->add_arg(&mask); kernel->add_arg(&top_mask); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_AVE: { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("AvePoolForward"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_STOCHASTIC: { if (this->phase_ == TRAIN) { // We need to create the random index as well. this->device_->template rng_uniform<Dtype>(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); vptr<Dtype> rand_idx_data = rand_idx_.mutable_gpu_data(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("StoPoolForwardTrain"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&rand_idx_data); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } else { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("StoPoolForwardTest"); kernel->add_arg(&count); kernel->add_arg(&bottom_data); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&top_data); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } break; } default: { LOG(FATAL)<< "Unknown pooling method."; } } } } else { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } vptr<const int_tp> size_data = size_.gpu_data(); vptr<const int_tp> pooled_size_data = pooled_size_.gpu_data(); vptr<const int_tp> kernel_shape_data = kernel_shape_.gpu_data(); vptr<const int_tp> ext_kernel_shape_data = ext_kernel_shape_.gpu_data(); vptr<const int_tp> stride_data = stride_.gpu_data(); vptr<const int_tp> dilation_data = dilation_.gpu_data(); vptr<const int_tp> pad_data = pad_.gpu_data(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolForwardND"); kernel->add_arg(&count); kernel->add_arg(&num_spatial_axes_); kernel->add_arg(&bottom_data); kernel->add_arg(&channels_); kernel->add_arg(&size_data); kernel->add_arg(&pooled_size_data); kernel->add_arg(&kernel_shape_data); kernel->add_arg(&ext_kernel_shape_data); kernel->add_arg(&stride_data); kernel->add_arg(&dilation_data); kernel->add_arg(&pad_data); kernel->add_arg(&top_data); kernel->add_arg(&mask); kernel->add_arg(&top_mask); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } default: { LOG(FATAL)<< "Unknown pooling method."; } } } } template<typename Dtype, typename MItype, typename MOtype> void PoolingLayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { vptr<const Dtype> top_diff = top[0]->gpu_diff(); vptr<Dtype> bottom_diff = bottom[0]->mutable_gpu_diff(); const int_tp count = bottom[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; vptr<const int_tp> mask; vptr<const Dtype> top_mask; this->device_->set(count, Dtype(0.), bottom_diff); if (num_spatial_axes_ == 2) { int_tp kernel_h = kernel_shape_.cpu_data()[0]; int_tp kernel_w = kernel_shape_.cpu_data()[1]; int_tp stride_h = stride_.cpu_data()[0]; int_tp stride_w = stride_.cpu_data()[1]; int_tp pad_h = pad_.cpu_data()[0]; int_tp pad_w = pad_.cpu_data()[1]; int_tp dilation_h = dilation_.cpu_data()[0]; int_tp dilation_w = dilation_.cpu_data()[1]; int_tp num = top[0]->shape(0); int_tp height = size_.cpu_data()[0]; int_tp width = size_.cpu_data()[1]; int_tp pooled_height = pooled_size_.cpu_data()[0]; int_tp pooled_width = pooled_size_.cpu_data()[1]; int_tp ext_kernel_h = ext_kernel_shape_.cpu_data()[0]; int_tp ext_kernel_w = ext_kernel_shape_.cpu_data()[1]; if (use_skernel_) { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolBackwardSK"); kernel->add_arg(&count); kernel->add_arg(&top_diff); kernel->add_arg(&mask); kernel->add_arg(&top_mask); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_AVE: { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("AvePoolBackwardSK"); kernel->add_arg(&count); kernel->add_arg(&top_diff); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&ext_kernel_h); kernel->add_arg(&ext_kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&dilation_h); kernel->add_arg(&dilation_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } default: { LOG(FATAL)<< "Unknown or unsupported pooling method in Backward_gpu()."; } } } else { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolBackward"); kernel->add_arg(&count); kernel->add_arg(&top_diff); kernel->add_arg(&mask); kernel->add_arg(&top_mask); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_AVE: { shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("AvePoolBackward"); kernel->add_arg(&count); kernel->add_arg(&top_diff); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&pad_h); kernel->add_arg(&pad_w); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } case PoolingParameter_PoolMethod_STOCHASTIC: { vptr<const Dtype> rand_idx_data = rand_idx_.gpu_data(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("StoPoolBackward"); kernel->add_arg(&count); kernel->add_arg(&rand_idx_data); kernel->add_arg(&top_diff); kernel->add_arg(&num); kernel->add_arg(&channels_); kernel->add_arg(&height); kernel->add_arg(&width); kernel->add_arg(&pooled_height); kernel->add_arg(&pooled_width); kernel->add_arg(&kernel_h); kernel->add_arg(&kernel_w); kernel->add_arg(&stride_h); kernel->add_arg(&stride_w); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } default: { LOG(FATAL) << "Unknown pooling method."; } } } } else { switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } vptr<const int_tp> size_data = size_.gpu_data(); vptr<const int_tp> pooled_size_data = pooled_size_.gpu_data(); vptr<const int_tp> kernel_shape_data = kernel_shape_.gpu_data(); vptr<const int_tp> ext_kernel_shape_data = ext_kernel_shape_.gpu_data(); vptr<const int_tp> stride_data = stride_.gpu_data(); vptr<const int_tp> dilation_data = dilation_.gpu_data(); vptr<const int_tp> pad_data = pad_.gpu_data(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("MaxPoolBackwardND"); kernel->add_arg(&count); kernel->add_arg(&num_spatial_axes_); kernel->add_arg(&top_diff); kernel->add_arg(&mask); kernel->add_arg(&top_mask); kernel->add_arg(&channels_); kernel->add_arg(&size_data); kernel->add_arg(&pooled_size_data); kernel->add_arg(&kernel_shape_data); kernel->add_arg(&ext_kernel_shape_data); kernel->add_arg(&stride_data); kernel->add_arg(&dilation_data); kernel->add_arg(&pad_data); kernel->add_arg(&bottom_diff); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); break; } default: { LOG(FATAL) << "Unknown or unsupported pooling method in Backward_gpu()."; } } } } INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (half_fp), (half_fp), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (float), (float), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (double), (double), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (uint8_t), (uint8_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (uint16_t), (uint16_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (uint32_t), (uint32_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, GenerateProgram, (uint64_t), (uint64_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (half_fp), (half_fp), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (float), (float), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (double), (double), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (uint8_t), (uint8_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (uint16_t), (uint16_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (uint32_t), (uint32_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Forward_gpu, (uint64_t), (uint64_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (half_fp), (half_fp), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (float), (float), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (double), (double), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (uint8_t), (uint8_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (uint16_t), (uint16_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (uint32_t), (uint32_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PoolingLayer, Backward_gpu, (uint64_t), (uint64_t), PROTO_TYPES); } // namespace caffe
the_stack
#include "common.h" #include "polish_E.cu" #include "kernel_functions.cu" /* * CUDA macros, constants and functions */ const int subset_size = 5; const unsigned long long seed = 1234; #define CudaErrorCheck(ans) {__CudaErrorCheck((ans), __FILE__, __LINE__);} void __CudaErrorCheck(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { std::cout << "CUDA Error (" << file << ":" << line << "): " << cudaGetErrorString(code) << std::endl; exit(code); } } /* * Decompose essential matrix into angles */ at::Tensor EssentialMatrixDecompose( at::Tensor Emat) { double *E_ptr = Emat.data_ptr<double>(); Ematrix E; double parameter[5]; memcpy(E, E_ptr, 3 * 3 * sizeof(double)); Edecomp(E, parameter); at::Tensor parameters = at::empty(5, Emat.options()); double *parameter_ptr = parameters.data_ptr<double>(); memcpy(parameter_ptr, parameter, 5 * sizeof(double)); return parameters; } /* * Decompose essential matrix into UI^VT, doing SVD. */ std::tuple<at::Tensor, at::Tensor> EssentialMatrixDecomposeUV( at::Tensor Emat) { double *E_ptr = Emat.data_ptr<double>(); Ematrix E; Ematrix U; Ematrix V; memcpy(E, E_ptr, 3 * 3 * sizeof(double)); Edecomp(E, U, V); at::Tensor Umat = at::empty(3 * 3, Emat.options()); at::Tensor Vmat = at::empty(3 * 3, Emat.options()); double *U_ptr = Umat.data_ptr<double>(); memcpy(U_ptr, U, 3 * 3 * sizeof(double)); double *V_ptr = Vmat.data_ptr<double>(); memcpy(V_ptr, V, 3 * 3 * sizeof(double)); Umat.resize_({3,3}); Vmat.resize_({3,3}); auto t = std::make_tuple(Umat, Vmat); return t; } /* * Five point algorithm cuda optimization using robust cost functions */ at::Tensor EssentialMatrixOptimise( at::Tensor input1, // input 1 has size nx2, type double at::Tensor input2, at::Tensor initial_essential_matrix, const double delta, const double alpha, const int max_iterations) { auto clock_begin = std::chrono::steady_clock::now(); const int num_points = input1.size(0); // Input data pointers double *input1_ptr = input1.data_ptr<double>(); double *input2_ptr = input2.data_ptr<double>(); double *essential_matrix_ptr = initial_essential_matrix.data_ptr<double>(); Ematrix E_in; memcpy(E_in, essential_matrix_ptr, 3 * 3 * sizeof(double)); polish_E_robust_parametric(E_in, input1_ptr, input2_ptr, num_points, delta, alpha, max_iterations); at::Tensor E_out = at::empty(3 * 3, initial_essential_matrix.options()); double* outptr = E_out.data_ptr<double>(); memcpy(outptr, E_in, 3 * 3 * sizeof(double)); E_out.resize_({3,3}); // std::cout << "Runtime (Optimise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl; return E_out; //E_optimised } /* * Five point algorithm cuda initialization */ at::Tensor EssentialMatrixInitialise( at::Tensor input1, // input 1 has size nx2, type double at::Tensor input2, const int num_test_points, // 10 const int num_ransac_test_points, // 1000 const int num_ransac_iterations, // number of iterations to run RANSAC const double inlier_threshold) { auto clock_begin = std::chrono::steady_clock::now(); const int num_points = input1.size(0); const int num_threads_per_block = 64; const int num_blocks = 8; const int num_threads = num_blocks * num_threads_per_block; // CUDA Setup // Set GPU to use // int device = 0; cudaSetDevice(input1.get_device()); // cudaSetDevice(input1.get_device()); // Input data pointer (on GPU) double *input1_ptr = input1.data_ptr<double>(); double *input2_ptr = input2.data_ptr<double>(); int *num_inliers; double (*essential_matrices)[3][3]; curandState* state; CudaErrorCheck(cudaMallocManaged((void **) &num_inliers, num_threads * sizeof(int))); CudaErrorCheck(cudaMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double))); CudaErrorCheck(cudaMallocManaged((void **) &state, num_threads * sizeof(curandState))); // Copy constants to device constant memory CudaErrorCheck(cudaMemcpyToSymbol(c_num_points, &num_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double))); // Generate random states, one for each thread SetupRandomState<<<num_blocks, num_threads_per_block>>>(seed, state); auto clock_begin_kernel = std::chrono::steady_clock::now(); EstimateEssentialMatrix<subset_size><<<num_blocks, num_threads_per_block>>>( input1_ptr, // Two sets of matching points input2_ptr, // (flattened 2D arrays) state, // Random number generator state num_inliers, // Number of inliers per thread essential_matrices); // Essential matrices per thread CudaErrorCheck(cudaPeekAtLastError()); // Check for kernel launch error CudaErrorCheck(cudaDeviceSynchronize()); // Check for kernel execution error // std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl; int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads)); // cout << "The largest element is " << ind_max << '\n'; cout << "The number of inliers: " << num_inliers[ind_max] << '\n'; at::Tensor E_out = at::empty(3 * 3, input1.options()); double* dataptr = E_out.data_ptr<double>(); CudaErrorCheck(cudaMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), cudaMemcpyDeviceToDevice)); CudaErrorCheck(cudaFree(num_inliers)); CudaErrorCheck(cudaFree(essential_matrices)); CudaErrorCheck(cudaFree(state)); E_out.resize_({3, 3}); // std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl; return E_out; } /* * Five point algorithm cuda initialization */ std::tuple<at::Tensor, at::Tensor,int> ProjectionMatrixRansac( at::Tensor input1, // input 1 has size nx2, type double at::Tensor input2, const int num_test_points, // 10 const int num_ransac_test_points, // 1000 const int num_ransac_iterations, // number of iterations to run RANSAC const double inlier_threshold) { // auto clock_begin = std::chrono::steady_clock::now(); const int num_points = input1.size(0); const int num_threads_per_block = 64; const int num_blocks = 8; const int num_threads = num_blocks * num_threads_per_block; // CUDA Setup // Set GPU to use // int device = 0; // CudaErrorCheck(cudaSetDevice(device)); cudaSetDevice(input1.get_device()); // cudaSetDevice(input1.get_device()); // Input data pointer (on GPU) double *input1_ptr = input1.data_ptr<double>(); double *input2_ptr = input2.data_ptr<double>(); int *num_inliers; double (*essential_matrices)[3][3]; double (*projection_matrices)[3][4]; curandState* state; CudaErrorCheck(cudaMallocManaged((void **) &num_inliers, num_threads * sizeof(int))); CudaErrorCheck(cudaMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double))); CudaErrorCheck(cudaMallocManaged((void **) &projection_matrices, num_threads * 3 * 4 * sizeof(double))); CudaErrorCheck(cudaMallocManaged((void **) &state, num_threads * sizeof(curandState))); // Copy constants to device constant memory CudaErrorCheck(cudaMemcpyToSymbol(c_num_points, &num_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double))); // Generate random states, one for each thread SetupRandomState<<<num_blocks, num_threads_per_block>>>(seed, state); // auto clock_begin_kernel = std::chrono::steady_clock::now(); EstimateProjectionMatrix<subset_size><<<num_blocks, num_threads_per_block>>>( input1_ptr, // Two sets of matching points input2_ptr, // (flattened 2D arrays) state, // Random number generator state num_inliers, // Number of inliers per thread essential_matrices, projection_matrices); // Essential matrices per thread CudaErrorCheck(cudaPeekAtLastError()); // Check for kernel launch error CudaErrorCheck(cudaDeviceSynchronize()); // Check for kernel execution error // std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl; int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads)); // cout << "The largest element is " << ind_max << '\n'; // cout << "The number of inliers: " << num_inliers[ind_max] << '\n'; at::Tensor E_out = at::empty(3 * 3, input1.options()); at::Tensor P_out = at::empty(3 * 4, input1.options()); double* dataptr = E_out.data_ptr<double>(); double* dataptr_p = P_out.data_ptr<double>(); CudaErrorCheck(cudaMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), cudaMemcpyDeviceToDevice)); CudaErrorCheck(cudaMemcpy(dataptr_p, &projection_matrices[ind_max], sizeof(projection_matrices[ind_max]), cudaMemcpyDeviceToDevice)); const int Max_inlier = num_inliers[ind_max]; E_out.resize_({3, 3}); P_out.resize_({3, 4}); CudaErrorCheck(cudaFree(num_inliers)); CudaErrorCheck(cudaFree(essential_matrices)); CudaErrorCheck(cudaFree(projection_matrices)); CudaErrorCheck(cudaFree(state)); // std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl; auto t = std::make_tuple(E_out, P_out, Max_inlier); return t; }
the_stack
extern "C" { #include "../cwc.h" #include "../cwc_internal.h" } #include "../../inc/ccv_convnet_internal.h" template <int input_per_thread> __global__ static void _cwc_kern_max_pool_forward_propagate(const int strides, const int border, const int size, const int batch, float* input, const int rows, const int cols, const int channels, float* out, const int out_rows, const int out_cols) { assert(gridDim.x == out_cols); assert(gridDim.y == out_rows); assert(gridDim.z == channels); extern __shared__ float shared[]; float* shared_input = &shared[0]; assert(blockDim.x == batch); const int thidx = threadIdx.x; int i, x, y; input += blockIdx.z * rows * cols * batch + (blockIdx.y * strides * cols + blockIdx.x * strides) * batch; float prod[input_per_thread]; const int input_y = blockIdx.y * strides - border; const int input_x = blockIdx.x * strides - border; const int input_start_y = max(input_y, 0); const int input_start_x = max(input_x, 0); const int input_end_y = min(input_y + size, rows); const int input_end_x = min(input_x + size, cols); const int size_start_y = input_start_y - input_y - border; const int size_start_x = input_start_x - input_x - border; const int size_end_y = size - border + (input_end_y - (input_y + size)); const int size_end_x = size - border + (input_end_x - (input_x + size)); // this is equal to iterating over 0 to size, and then compute the input origin by blockIdx.y * strides - border + y #pragma unroll for (y = size_start_y; y < size_end_y; y++) #pragma unroll for (x = size_start_x; x < size_end_x; x++) { shared_input[thidx] = input[(y * cols + x) * batch + thidx]; __syncthreads(); if (x == size_start_x && y == size_start_y) #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] = shared_input[i + threadIdx.x * input_per_thread]; else #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] = max(prod[i], shared_input[i + threadIdx.x * input_per_thread]); __syncthreads(); } out += blockIdx.z * out_rows * out_cols * batch + (blockIdx.y * out_cols + blockIdx.x) * batch; #pragma unroll for (i = 0; i < input_per_thread; i++) out[i + threadIdx.x * input_per_thread] = prod[i]; } void cwc_convnet_max_pool_forward_propagate(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, rows, cols, &out_rows, &out_cols, &out_partition); dim3 num_blocks(out_cols, out_rows, layer->input.matrix.channels); dim3 threads_per_block(batch); assert(threads_per_block.x <= 1024); int shared_memory_size = sizeof(float) * batch; _cwc_kern_max_pool_forward_propagate <1> <<<num_blocks, threads_per_block, shared_memory_size, stream>>> (layer->net.pool.strides, layer->net.pool.border, layer->net.pool.size, batch, a, rows, cols, layer->input.matrix.channels, b, out_rows, out_cols); } template <int input_per_thread> __global__ static void _cwc_kern_average_pool_forward_propagate(const int strides, const int border, const int size, const int batch, float* input, const int rows, const int cols, const int channels, float* out, const int out_rows, const int out_cols) { assert(gridDim.x == out_rows); assert(gridDim.y == out_cols); assert(gridDim.z == channels); extern __shared__ float shared[]; float* shared_input = &shared[0]; const int thcnt = blockDim.x; const int thidx = threadIdx.x; assert(thcnt >= batch); int i, x, y; input += blockIdx.z * rows * cols * batch + (blockIdx.x * strides * cols + blockIdx.y * strides) * batch; float prod[input_per_thread]; #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] = 0; const int input_y = blockIdx.x * strides - border; const int input_x = blockIdx.y * strides - border; const int input_start_y = max(input_y, 0); const int input_start_x = max(input_x, 0); const int input_end_y = min(input_y + size, rows); const int input_end_x = min(input_x + size, cols); const int size_start_y = input_start_y - input_y - border; const int size_start_x = input_start_x - input_x - border; const int size_end_y = size - border + (input_end_y - (input_y + size)); const int size_end_x = size - border + (input_end_x - (input_x + size)); // this is equal to iterating over 0 to size, and then compute the input origin by blockIdx.x * strides - border + y #pragma unroll for (y = size_start_y; y < size_end_y; y++) #pragma unroll for (x = size_start_x; x < size_end_x; x++) { if (thidx < batch) shared_input[thidx] = input[(y * cols + x) * batch + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] += shared_input[i + threadIdx.x * input_per_thread]; __syncthreads(); } float inv_size = 1.0 / ((input_end_y - input_start_y) * (input_end_x - input_start_x)); out += blockIdx.z * out_rows * out_cols * batch + (blockIdx.x * out_cols + blockIdx.y) * batch; #pragma unroll for (i = 0; i < input_per_thread; i++) out[i + threadIdx.x * input_per_thread] = prod[i] * inv_size; } void cwc_convnet_average_pool_forward_propagate(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, rows, cols, &out_rows, &out_cols, &out_partition); dim3 num_blocks(out_rows, out_cols, layer->input.matrix.channels); dim3 threads_per_block(batch); int shared_memory_size = sizeof(float) * batch; _cwc_kern_average_pool_forward_propagate <1> <<<num_blocks, threads_per_block, shared_memory_size, stream>>> (layer->net.pool.strides, layer->net.pool.border, layer->net.pool.size, batch, a, rows, cols, layer->input.matrix.channels, b, out_rows, out_cols); } template <int input_per_thread> __global__ static void _cwc_kern_max_pool_backward_propagate(const int strides, const int border, const int size, const int batch, float* input, float* input_grad, const int rows, const int cols, const int channels, float* out, float* out_grad, const int out_rows, int out_cols) { assert(gridDim.x == cols); assert(gridDim.y == rows); assert(gridDim.z == channels); extern __shared__ float shared[]; float* shared_input = &shared[0]; float* shared_out = &shared[batch]; float* shared_grad = &shared[batch * 2]; assert(blockDim.x == batch); const int thidx = threadIdx.x; float prod[input_per_thread]; int i, x, y; #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] = 0; const int ycnt = (size - 1 - (blockIdx.y + border) % strides) / strides + 1; const int xcnt = (size - 1 - (blockIdx.x + border) % strides) / strides + 1; const int out_y = (blockIdx.y + border) / strides - ycnt + 1; const int out_x = (blockIdx.x + border) / strides - xcnt + 1; const int out_start_y = max(out_y, 0); const int out_start_x = max(out_x, 0); out += (blockIdx.z * out_rows * out_cols + out_start_y * out_cols) * batch; out_grad += (blockIdx.z * out_rows * out_cols + out_start_y * out_cols) * batch; const int out_end_y = min(out_y + ycnt, out_rows); const int out_end_x = min(out_x + xcnt, out_cols); input += (blockIdx.z * rows * cols + blockIdx.y * cols + blockIdx.x) * batch; if (thidx < batch) shared_input[thidx] = input[thidx]; for (y = out_start_y; y < out_end_y; y++) { for (x = out_start_x; x < out_end_x; x++) { shared_out[thidx] = out[x * batch + thidx], shared_grad[thidx] = out_grad[x * batch + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < input_per_thread; i++) // we have to do direct comparison otherwise it will contribute to too many cells // and the propagation won't work. But CPU will have different result comparing with GPU if (shared_out[i + threadIdx.x * input_per_thread] == shared_input[i + threadIdx.x * input_per_thread]) prod[i] += shared_grad[i + threadIdx.x * input_per_thread]; __syncthreads(); } out += out_cols * batch; out_grad += out_cols * batch; } input_grad += (blockIdx.z * rows * cols + blockIdx.y * cols + blockIdx.x) * batch; #pragma unroll for (i = 0; i < input_per_thread; i++) input_grad[i + threadIdx.x * input_per_thread] = prod[i]; } void cwc_convnet_max_pool_backward_propagate(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, const cudaStream_t& stream) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); dim3 num_blocks(layer->input.matrix.cols, layer->input.matrix.rows, layer->input.matrix.channels); dim3 threads_per_block(batch); int shared_memory_size = sizeof(float) * batch * 3; _cwc_kern_max_pool_backward_propagate <1> <<<num_blocks, threads_per_block, shared_memory_size, stream>>> (layer->net.pool.strides, layer->net.pool.border, layer->net.pool.size, batch, m, b, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, n, a, out_rows, out_cols); } template <int input_per_thread> __global__ static void _cwc_kern_average_pool_backward_propagate(const int strides, const int border, const int size, const int batch, float* input_grad, const int rows, const int cols, const int channels, float* out_grad, const int out_rows, int out_cols) { assert(gridDim.x == cols); assert(gridDim.y == rows); assert(gridDim.z == channels); extern __shared__ float shared[]; float* shared_grad = &shared[0]; const int thcnt = blockDim.x; const int thidx = threadIdx.x; assert(thcnt >= batch); float prod[input_per_thread]; int i, x, y; #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] = 0; const int ycnt = (size - 1 - (blockIdx.y + border) % strides) / strides + 1; const int xcnt = (size - 1 - (blockIdx.x + border) % strides) / strides + 1; const int out_y = (blockIdx.y + border) / strides - ycnt + 1; const int out_x = (blockIdx.x + border) / strides - xcnt + 1; const int out_start_y = max(out_y, 0); const int out_start_x = max(out_x, 0); out_grad += (blockIdx.z * out_rows * out_cols + out_start_y * out_cols) * batch; const int out_end_y = min(out_y + ycnt, out_rows); const int out_end_x = min(out_x + xcnt, out_cols); for (y = out_start_y; y < out_end_y; y++) { for (x = out_start_x; x < out_end_x; x++) { if (thidx < batch) shared_grad[thidx] = out_grad[x * batch + thidx]; __syncthreads(); float inv_size = 1.0 / ((min(y * strides + size - border, rows) - max(y * strides - border, 0)) * (min(x * strides + size - border, cols) - max(x * strides - border, 0))); #pragma unroll for (i = 0; i < input_per_thread; i++) prod[i] += shared_grad[i + threadIdx.x * input_per_thread] * inv_size; __syncthreads(); } out_grad += out_cols * batch; } input_grad += (blockIdx.z * rows * cols + blockIdx.y * cols + blockIdx.x) * batch; #pragma unroll for (i = 0; i < input_per_thread; i++) input_grad[i + threadIdx.x * input_per_thread] = prod[i]; } void cwc_convnet_average_pool_backward_propagate(ccv_convnet_layer_t* layer, int batch, float* a, float* b, const cudaStream_t& stream) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); dim3 num_blocks(layer->input.matrix.cols, layer->input.matrix.rows, layer->input.matrix.channels); dim3 threads_per_block(batch); assert(threads_per_block.x <= 1024); int shared_memory_size = sizeof(float) * batch; _cwc_kern_average_pool_backward_propagate <1> <<<num_blocks, threads_per_block, shared_memory_size, stream>>> (layer->net.pool.strides, layer->net.pool.border, layer->net.pool.size, batch, b, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, a, out_rows, out_cols); }
the_stack
#include <thrust/count.h> #include <thrust/inner_product.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust_wrapper.h> #include <solvers/block_common_solver.h> namespace amgx { namespace classical { /////////////////////////////////////////////////////////////////////////////////////////////////// /************************************************************************* * Generate uniform random vector ************************************************************************/ struct prg { float a, b; __host__ __device__ prg(float _a = 0.f, float _b = 1.f) : a(_a), b(_b) {}; __host__ __device__ float operator()(const unsigned int n) const { thrust::default_random_engine rng; thrust::uniform_real_distribution<float> dist(a, b); rng.discard(n); return dist(rng); } }; // Initialization of test error vector with random entries template <class Vector> void initRandom(Vector &vec) { const unsigned int size = vec.size(); thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + size, vec.begin(), prg(0.f, 1.f)); } // Zero out the entries of a vector corresponding to the coarse points template <typename ValueType> __global__ void zero_coarse_points_kernel(const int *cf_map, const int numRows, ValueType *vec) { for ( int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < numRows; tIdx += blockDim.x * gridDim.x ) { // if point is coarse, zero out the corresponding entry of vec if (cf_map[tIdx] == COARSE) { vec[tIdx] = 0.; } } } // Create a vector which extracts fine points from the error vector and is // zero at the coarse points template <typename ValueType> __global__ void get_fine_error_kernel( const int *cf_map, const int numRows, const ValueType *v_err, ValueType *vec ) { for ( int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < numRows; tIdx += blockDim.x * gridDim.x ) { // if point is coarse, zero out the corresponding entry of vec if (cf_map[tIdx] == COARSE) { vec[tIdx] = 0.; } else { vec[tIdx] = v_err[tIdx]; } } } // Update only the fine points of the error vector (v_err) based on input vec. template <typename ValueType> __global__ void update_fine_zero_coarse_error_kernel(const int *cf_map, const int numRows, ValueType *v_err, const ValueType *vec) { for ( int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < numRows; tIdx += blockDim.x * gridDim.x ) { // if point is coarse, zero out the corresponding entry of vec if (cf_map[tIdx] == FINE) { v_err[tIdx] = vec[tIdx]; } else { v_err[tIdx] = (ValueType) 0; } } } // Update coarse and fine points from the matrix coloring taking the first // levels colors to form the independent set. template <typename IndexType> __global__ void update_cf_map_kernel(IndexType *cf_map, const int numRows, const int levels, const IndexType *A_row_colors) { //coarse = [coarse; find(independent)]; for ( int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < numRows; tIdx += blockDim.x * gridDim.x ) { // go over each row if (A_row_colors[tIdx] <= levels) // double check that this is what we want { cf_map[tIdx] = COARSE; } } } #define EXPERIMENTAL_CR #ifdef EXPERIMENTAL_CR // Update only the fine points of the error vector (v_err) based on input vec. template <typename IndexType, typename ValueType> __global__ void compute_Asc_nnzPerRow_kernel2(const IndexType *ArowOffsets, const IndexType *AcolInd, const ValueType *Avalues, const ValueType *AdiagValues, const IndexType AnumRows, const IndexType *cf_map, IndexType *Asc_nnzPerRow, ValueType *row_thresh, const ValueType thresh) { for (int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < AnumRows; tIdx += blockDim.x * gridDim.x) { // each thread works on one "fine" row of A at a time const int ArowBegin = ArowOffsets[tIdx]; const int ArowEnd = ArowOffsets[tIdx + 1]; if (cf_map[tIdx] == FINE) { // work with fine pts only // Matlab pseudo-code: // Asc(:,i) = A(:,i)/A(i,i)<-thresh; // but this thresh is dynamic ValueType fine_avg_thresh = 0.; int fine_count = 0; for (int j = ArowBegin; j < ArowEnd; j++) { int AjColIndex = AcolInd[j]; if ((AjColIndex != tIdx) && (cf_map[AjColIndex] == FINE)) { fine_avg_thresh += Avalues[j]; fine_count ++; } } fine_avg_thresh *= thresh / fine_count; row_thresh[tIdx] = fine_avg_thresh; for (int j = ArowBegin; j < ArowEnd; j++) { int AjColIndex = AcolInd[j]; if (AjColIndex == tIdx) { /*Asc_nnzPerRow[tIdx]++;*/ continue; } if (cf_map[AjColIndex] == FINE) { if (Avalues[j] / AdiagValues[tIdx] < fine_avg_thresh / AdiagValues[tIdx] || Avalues[j] / AdiagValues[AjColIndex] < fine_avg_thresh / AdiagValues[AjColIndex]) { Asc_nnzPerRow[tIdx]++; } } } } } } // Update only the fine points of the error vector (v_err) based on input vec. template <typename IndexType, typename ValueType> __global__ void compute_AscColInd_kernel(const IndexType *ArowOffsets, const IndexType *AcolInd, const ValueType *Avalues, const ValueType *AdiagValues, const IndexType AnumRows, const IndexType *cf_map, const IndexType *AscRowOffsets, IndexType *AscColInd, const ValueType *row_thresh) { for (int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < AnumRows; tIdx += blockDim.x * gridDim.x) { // each thread works on one "coarse" row of A at a time const int ArowBegin = ArowOffsets[tIdx]; const int ArowEnd = ArowOffsets[tIdx + 1]; const int AscRowEnd = AscRowOffsets[tIdx + 1]; ValueType rowMax(0); if (cf_map[tIdx] == FINE) // tIdx = j (in matlab) i.e. row of A { // work with fine pts only // Matlab pseudo-code: // Asc(:,i) = A(:,i)/A(i,i)<-thresh; int AscEntryNum = AscRowOffsets[tIdx]; // starting location of the current column of P // calculate max(-rowentries) for (int j = ArowBegin; j < ArowEnd && AscEntryNum < AscRowEnd; j++) { rowMax = max( rowMax, -Avalues[j]); } for (int j = ArowBegin; j < ArowEnd && AscEntryNum < AscRowEnd; j++) { // go thru each "fine" column of A int AjColIndex = AcolInd[j]; if (AjColIndex == tIdx) { /*AscColInd[AscEntryNum] = AjColIndex; AscEntryNum++;*/ continue; } if (cf_map[AjColIndex] == FINE) { if (-Avalues[j] >= row_thresh[tIdx]*rowMax) { AscColInd[AscEntryNum] = AjColIndex; AscEntryNum++; } } } } } } #else // Update only the fine points of the error vector (v_err) based on input vec. template <typename IndexType, typename ValueType> __global__ void compute_Asc_nnzPerRow_kernel(const IndexType *ArowOffsets, const IndexType *AcolInd, const ValueType *Avalues, const ValueType *AdiagValues, const IndexType AnumRows, const IndexType *cf_map, IndexType *Asc_nnzPerRow, const ValueType thresh) { for (int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < AnumRows; tIdx += blockDim.x * gridDim.x) { // each thread works on one "fine" row of A at a time const int ArowBegin = ArowOffsets[tIdx]; const int ArowEnd = ArowOffsets[tIdx + 1]; if (cf_map[tIdx] == FINE) { // work with fine pts only // Matlab pseudo-code: // Asc(:,i) = A(:,i)/A(i,i)<-thresh; for (int j = ArowBegin; j < ArowEnd; j++) { // go thru each "fine" column of A int AjColIndex = AcolInd[j]; if (AjColIndex == tIdx) { /*Asc_nnzPerRow[tIdx]++;*/ continue; } if (cf_map[AjColIndex] == FINE) { if (Avalues[j] / AdiagValues[tIdx] < -thresh || Avalues[j] / AdiagValues[AjColIndex] < -thresh) { Asc_nnzPerRow[tIdx]++; } } } } } } // Update only the fine points of the error vector (v_err) based on input vec. template <typename IndexType, typename ValueType> __global__ void compute_AscColInd_kernel(const IndexType *ArowOffsets, const IndexType *AcolInd, const ValueType *Avalues, const ValueType *AdiagValues, const IndexType AnumRows, const IndexType *cf_map, const IndexType *AscRowOffsets, IndexType *AscColInd, const ValueType thresh) { for (int tIdx = threadIdx.x + blockDim.x * blockIdx.x; tIdx < AnumRows; tIdx += blockDim.x * gridDim.x) { // each thread works on one "coarse" row of A at a time const int ArowBegin = ArowOffsets[tIdx]; const int ArowEnd = ArowOffsets[tIdx + 1]; const int AscRowEnd = AscRowOffsets[tIdx + 1]; if (cf_map[tIdx] == FINE) // tIdx = j (in matlab) i.e. row of A { // work with fine pts only // Matlab pseudo-code: // Asc(:,i) = A(:,i)/A(i,i)<-thresh; int AscEntryNum = AscRowOffsets[tIdx]; // starting location of the current column of P for (int j = ArowBegin; j < ArowEnd && AscEntryNum < AscRowEnd; j++) { // go thru each "fine" column of A int AjColIndex = AcolInd[j]; if (AjColIndex == tIdx) { /*AscColInd[AscEntryNum] = AjColIndex; AscEntryNum++;*/ continue; } if (cf_map[AjColIndex] == FINE) { if ((Avalues[j] / AdiagValues[tIdx] < -thresh) || (Avalues[j] / AdiagValues[AjColIndex] < -thresh)) { AscColInd[AscEntryNum] = AjColIndex; AscEntryNum++; } } } } } } #endif /////////////////////////////////////////////////////////////////////////////////////////////////// template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> CR_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: CR_Selector() : CR_SelectorBase<TConfig_d>(), m_smoother(0) {} template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> CR_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: ~CR_Selector() { if (m_smoother) { delete m_smoother; } } // Private functions for the implementation on device // Presmooth error template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void CR_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::presmoothFineError( Matrix_d &A, const VVector &AdiagValues, const IVector &cf_map, Vector_d &v_u, Vector_d &v_tmp, Vector_d &v_z, Solver<TConfig_d> *smoother, ValueType &norm0, const ValueType rho_thresh, const int pre ) { typedef typename Matrix_d::index_type IndexType; typedef typename Vector_d::value_type ValueTypeB; // Matrix A const IndexType AnumRows = (IndexType) A.get_num_rows(); // Coarse/Fine map const int *cf_map_ptr = cf_map.raw(); // Vector with the initial 'fine' error ValueTypeB *v_u_ptr = v_u.raw(); // choose blocksize. Using 1 thread / row for now const int blocksize = 64; const int numBlocks = min (AMGX_GRID_MAX_SIZE, (int) (AnumRows / blocksize + 1)); // Compute 'energy norm' of v_u (normalize error before smoothing) // v_tmp = diag(Aff).*v_u thrust::transform(AdiagValues.begin(), AdiagValues.end(), v_u.begin(), v_tmp.begin(), thrust::multiplies<ValueTypeB>()); cudaCheckError(); // norm0 = sqrt(v_tmp' * v_u) norm0 = sqrt( thrust::inner_product(v_u.begin(), v_u.end(), v_tmp.begin(), ValueTypeB(0.)) ); cudaCheckError(); // normalize: v_u = v_u / (norm0 + 1e-12); thrust::transform(v_u.begin(), v_u.end(), thrust::make_constant_iterator(norm0 + 1.0e-12), v_u.begin(), thrust::divides<ValueTypeB>()); cudaCheckError(); // ---------------------------- begin error presmoothing -------------------------- for (int k = 1; k <= 5; k++) { // Presmooth with Symmetric Gauss-Seidel (or ilu) for pre number of sm(ooth) s(teps) // WARNING: zeroing out technique used here is not equivalent to the fine matrix pre-smoothing! for (int sms = 1; sms <= pre; sms++) { smoother->solve_iteration( v_z, v_u, false ); zero_coarse_points_kernel<ValueTypeB> <<< numBlocks, blocksize>>>(cf_map_ptr, AnumRows, v_u_ptr); cudaCheckError(); } // -------------------- compute 'energy norm' of v_u ------------------------------ // v_tmp = diag(Aff).*v_u thrust::transform(AdiagValues.begin(), AdiagValues.end(), v_u.begin(), v_tmp.begin(), thrust::multiplies<ValueTypeB>()); cudaCheckError(); // norm0 = sqrt(v_tmp' * v_u) norm0 = sqrt( thrust::inner_product(v_u.begin(), v_u.end(), v_tmp.begin(), ValueTypeB(0.)) ); cudaCheckError(); // -------------------- done with 'energy norm' of v_u ---------------------------- // normalize: v_u = v_u / (norm0 + 1e-12); thrust::transform(v_u.begin(), v_u.end(), thrust::make_constant_iterator(norm0 + 1.0e-12), v_u.begin(), thrust::divides<ValueTypeB>()); cudaCheckError(); // norm0 := rho if (norm0 > 5 || norm0 < rho_thresh) { break; } } // ---------------------------- end error presmoothing ------------------------------ } // A single iteration of CR while loop. template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void CR_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::CR_iteration( Matrix_d &A, IVector &cf_map, int &numFine, Vector_d &v_err, ValueType &norm0, const ValueType maxit ) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; typedef typename Vector_d::value_type ValueTypeB; // Matrix A const IndexType AnumRows = (IndexType) A.get_num_rows(); // Coarse/Fine map IndexType *cf_map_ptr = cf_map.raw(); // Raw ptr for the row colors from the multi-color smoother const IndexType *A_row_colors_ptr; // choose blocksize. Using 1 thread / row for now const int blocksize = 64; const int numBlocks = min (AMGX_GRID_MAX_SIZE, (int) (AnumRows / blocksize + 1)); const int pre = 10; const ValueType alpha = 0.7; const ValueType rho_thresh = 1.0e-2; const IndexType *ArowOffsets_ptr = A.row_offsets.raw(); const IndexType *AcolInd_ptr = A.col_indices.raw(); const ValueType *Avalues_ptr = A.values.raw(); // temporary vectors - diagonal and non-zero offsets VVector AdiagValues(AnumRows, 0.0); // Extract the diagonal of A find_diag_kernel_indexed_dia <<< numBlocks, blocksize>>>(AnumRows, A.diag.raw(), Avalues_ptr, AdiagValues.raw()); cudaCheckError(); // ----------------------------------- define smoother ------------------------------------------ // Set the smoother to Gauss-Seidel (json config format) AMG_Config cfg; std::string cfg_string = "{ \"config_version\": 2"; cfg_string += ", \"solver\": { \"scope\": \"presmoothError\", \"solver\": \"MULTICOLOR_GS\""; cfg_string += " ,\"matrix_coloring_scheme\": \"PARALLEL_GREEDY\""; cfg_string += " } }"; cfg.parseParameterString(cfg_string.c_str()); // Allocate smoother m_smoother = SolverFactory<TConfig_d>::allocate(cfg, "default", "solver"); // --------------------------------- end define smoother ---------------------------------------- Vector_d v_z(AnumRows, 0.0); Vector_d v_u(AnumRows); Vector_d v_tmp(v_u.size()); ValueTypeB *v_err_ptr = v_err.raw(); ValueTypeB *v_u_ptr = v_u.raw(); // Store the number of non-zeros in each column of the interpolation matrix P IVector Asc_nnzPerRow(AnumRows, 0); #ifdef EXPERIMENTAL_CR VVector Asc_thresh_per_row(AnumRows, 0); ValueTypeB CR_P_trunc = 0.9; // truncation coefficient for CR P matrix, [0.0 ... 1.0] ValueTypeB Asc_thresh = (CR_P_trunc < epsilon(CR_P_trunc)) ? (-1. / epsilon(CR_P_trunc)) : (-1. / CR_P_trunc + 1); compute_Asc_nnzPerRow_kernel2<IndexType, ValueType> <<< numBlocks, blocksize>>>(ArowOffsets_ptr, AcolInd_ptr, Avalues_ptr, AdiagValues.raw(), AnumRows, cf_map_ptr, Asc_nnzPerRow.raw(), Asc_thresh_per_row.raw(), Asc_thresh); #else compute_Asc_nnzPerRow_kernel<IndexType, ValueType> <<< numBlocks, blocksize>>>(ArowOffsets_ptr, AcolInd_ptr, Avalues_ptr, AdiagValues.raw(), AnumRows, cf_map_ptr, Asc_nnzPerRow.raw(), 0.1); #endif cudaCheckError(); // get the offsets in Asc with an inclusive scan thrust_wrapper::inclusive_scan(Asc_nnzPerRow.begin(), Asc_nnzPerRow.end(), Asc_nnzPerRow.begin()); cudaCheckError(); // get total num of non-zeros in P const int Asc_nnz = Asc_nnzPerRow[AnumRows - 1]; // Declare Asc (strong connections of A) Matrix_d Asc; // resize Asc Asc.resize(0, 0, 0, 1); Asc.addProps(CSR); Asc.resize(AnumRows, AnumRows, Asc_nnz, 1); Asc.row_offsets[0] = 0; // set P offsets (P column offsets or P^T row offsets) thrust::copy(Asc_nnzPerRow.begin(), Asc_nnzPerRow.end(), Asc.row_offsets.begin() + 1); cudaCheckError(); #ifdef EXPERIMENTAL_CR compute_AscColInd_kernel<IndexType, ValueType> <<< numBlocks, blocksize>>>(ArowOffsets_ptr, AcolInd_ptr, Avalues_ptr, AdiagValues.raw(), AnumRows, cf_map_ptr, Asc.row_offsets.raw(), Asc.col_indices.raw(), Asc_thresh_per_row.raw()); #else compute_AscColInd_kernel<IndexType, ValueType> <<< numBlocks, blocksize>>>(ArowOffsets_ptr, AcolInd_ptr, Avalues_ptr, AdiagValues.raw(), AnumRows, cf_map_ptr, Asc.row_offsets.raw(), Asc.col_indices.raw(), 0.25); #endif cudaCheckError(); Asc.set_initialized(0); Asc.colorMatrix(cfg, "presmoothError"); Asc.set_initialized(1); // get the coloring of matrix A_row_colors_ptr = Asc.getMatrixColoring().getRowColors().raw(); m_smoother->setup(A, false); int levels = 1; // number of CR iterations while (levels < 5) { // Here, we normally (i.e. as is done in Brannick's matlab code) we need to smooth using Aff, // however, since we are using Gauss-Seidel (symmetric or not) we can instead use A and // simply zero out v_err at the coarse points of each iteration. // v_u = v_err(fine) get_fine_error_kernel<ValueTypeB> <<< numBlocks, blocksize>>>(cf_map_ptr, AnumRows, v_err_ptr, v_u_ptr); cudaCheckError(); // Presmooth error at the current FINE points. presmoothFineError( A, AdiagValues, cf_map, v_u, v_tmp, v_z, m_smoother, norm0, rho_thresh, pre ); // check for convergence and break if it's fast enough if (norm0 <= alpha && levels > 1) { break; } // v_err(fine) = v_u; update_fine_zero_coarse_error_kernel<ValueTypeB> <<< numBlocks, blocksize>>>(cf_map_ptr, AnumRows, v_err_ptr, v_u_ptr); cudaCheckError(); // TODO: add aggressive coarsening here. // if (AggressiveCoarsening) { A2 = Al*Al; } // else { A2 = Al; } // Add nodes with lowest colors (<=levels) to the coarse set. update_cf_map_kernel<IndexType> <<< numBlocks, blocksize>>>(cf_map_ptr, AnumRows, levels - 1, A_row_colors_ptr); cudaCheckError(); // Get the new FINE points count. numFine = (int) thrust::count(cf_map.begin(), cf_map.end(), (int)FINE); cudaCheckError(); if (numFine == 0) { break; } // no fine points left, just exit levels++; } // end while (levels < maxit) } // end CR_iteration() /************************************************************************* * Implementing the CR algorithm ************************************************************************/ // ---------------------------- // specialization for device // ---------------------------- template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void CR_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::markCoarseFinePoints_1x1( Matrix_d &A, IVector &cf_map) { if (A.hasProps(DIAG)) { FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED); } typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; // Dimension of A. const int AnumRows = (int) A.get_num_rows(); if (AnumRows == 0) { return; } // Choose blocksize. Using 1 thread / row for now const int blocksize = 256; const int numBlocks = min(AMGX_GRID_MAX_SIZE, (int) ((AnumRows + blocksize - 1) / blocksize)); const int maxit = 5; // Max num of CR iterations. ValueType norm0; // Stores the energy norm of the smoothed error in CR iteration. int *cf_map_ptr = cf_map.raw(); // Initially all points are set to FINE - this is done in the level // Initial number of fine points (all points are fine). int numFine = AnumRows; // Randomly initialize error vector v_err: AnumRows x 1 vector. Vector_d v_err(AnumRows); initRandom(v_err); // Perform Compatible Relaxation (CR) iteration. CR_iteration(A, cf_map, numFine, v_err, norm0, maxit); int numCoarse = AnumRows - numFine; printf("CR: numrows = %d, numCoarse = %d\n", AnumRows, numCoarse); } // end markCoarseFinePoints_1x1 (device specialization) template <class T_Config> void CR_SelectorBase<T_Config>::markCoarseFinePoints( Matrix<TConfig> &A, FVector &weights, const BVector &s_con, IVector &cf_map, IVector &scratch, int cf_map_init) { ViewType oldView = A.currentView(); A.setView(OWNED); if (A.get_block_size() == 1) { markCoarseFinePoints_1x1(A, cf_map); } else { FatalError("Unsupported block size CR selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } A.setView(oldView); } /**************************************** * Explicit instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class CR_SelectorBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class CR_Selector<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace classical } // namespace amgx
the_stack
__global__ void BlochKernelCESTGPU(float Gyro, double *d_CS, float *d_Rho, float *d_T1, float *d_T2, float *d_K, float *d_Mz, float *d_My, float *d_Mx, float *d_Buffer, float *d_dB0, float *d_dWRnd, float *d_Gzgrid, float *d_Gygrid, float *d_Gxgrid, float *d_TxCoilmg, float *d_TxCoilpe, float *d_RxCoilx, float *d_RxCoily, float *d_Sig, float RxCoilDefault, float TxCoilDefault, float *d_Sx, float *d_Sy, float rfRef, int SignalLen, int SBufferLen, int RunMode, int utsi, float *d_b_Mz, float *d_b_My, float *d_b_Mx, int SpinMxX, int SpinMxY, int SpinMxZ, int SpinNum, int TypeNum, int TxCoilNum, int RxCoilNum, int SeqLen) { /* CUDA index */ unsigned tid = blockIdx.x * blockDim.y + threadIdx.y; /* thread id in one slice */ unsigned id = threadIdx.y; /* thread id in one block */ /* sequence buffer in shared memory */ float *g_d_Sig; extern __shared__ float s_d_Sig[]; int i; if (SBufferLen !=0){ for (i=0; i< (int)floor((float)(SeqLen*(5 + 3 * TxCoilNum))/(float)blockDim.y); i++){ s_d_Sig[blockDim.y*i+id] = d_Sig[blockDim.y*i+id]; } if (blockDim.y*i+id < SeqLen*(5 + 3 * TxCoilNum)){ s_d_Sig[blockDim.y*i+id] = d_Sig[blockDim.y*i+id]; } __syncthreads(); g_d_Sig = s_d_Sig; }else{ g_d_Sig = d_Sig; } /* matrix dim */ int SpinMxNum = SpinMxX * SpinMxY; int SpinMxAllNum = SpinMxX * SpinMxY * SpinMxZ; /* signal counter*/ int Signalptr; /* dt buffer */ float dt; float ExpdtT2; float ExpdtT1; float M0dtT1; float ExpdtK; float rffreq; float ExpdtT1b; float M0dtT1b; float ExpdtKb; float ExpdtKout; float ExpdtKxin; float ExpdtKyin; float ExpdtKzin; /* matrix pointers */ float *p_d_Mz; float *p_d_Mzb; float *p_d_My; float *p_d_Mx; float *p_d_Buffer; float *p_d_dWRnd; float *p_d_Rho; float *p_d_T1; float *p_d_T2; float *p_d_K; float *p_d_K12; float *p_d_K21; float *p_d_Gzgrid; float *p_d_Gygrid; float *p_d_Gxgrid; float *p_d_dB0; float *p_d_TxCoilmg; float *p_d_TxCoilpe; float *p_d_RxCoilx; float *p_d_RxCoily; float *p_d_Sx; float *p_d_Sy; float *p_d_rfAmp; float *p_d_rfPhase; float *p_d_rfFreq; float *p_d_GzAmp; float *p_d_GyAmp; float *p_d_GxAmp; float *p_d_dt; float *p_d_ADC; float *p_d_b_Mx; float *p_d_b_My; float *p_d_b_Mz; /* multi-Tx variables */ float rfAmpSum; float rfAmp; float rfPhase; float rfFreq; float t2b; float buffer1; float buffer2; float buffer3; float buffer4; /* spin variables */ float Mx, My, Mz; float T1, T2, Rho; float Gzgrid, Gygrid, Gxgrid, dB0, dWRnd; float Mzb, T1b, T2b, Rhob; float K12, K21; float Mxc, Myc, Mzc; float T1c, T2c, Rhoc; /* temporary variables */ float dW, sinAlpha, sinBeta, sinPhi, cosAlpha, cosBeta, cosPhi, Alpha, Beta; float bufferMz, bufferMy, bufferMx; float Mzbuffer, Mzbbuffer; float u, n, G1, W; /* loop through slice <- spins <- species */ p_d_Buffer = d_Buffer + tid; /* buffer for tempMx, tempMy, tempMz, ExpdtT2, ExpdtT1, M0dtT1 in the same order */ rffreq = g_d_Sig[3] + 1; /* rffreq != rfFreq at start-up */ t2b = 0; /* flag for calculating lineshape */ for (int s=0; s < SpinNum; s++){ for (int k=0; k < SpinMxZ; k++){ /* free pool */ p_d_Rho = d_Rho + k * SpinMxNum + tid + 0 * SpinMxAllNum; p_d_T1 = d_T1 + k * SpinMxNum + tid + 0 * SpinMxAllNum; p_d_T2 = d_T2 + k * SpinMxNum + tid + 0 * SpinMxAllNum; if (*p_d_T2==0 || *p_d_T1==0 || *p_d_Rho==0) continue; /* avoid background 23%*/ /* free pool */ p_d_Mz = d_Mz + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_My = d_My + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Mx = d_Mx + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; Rho = *p_d_Rho; T1 = *p_d_T1; T2 = *p_d_T2; Mx = *p_d_Mx; My = *p_d_My; Mz = *p_d_Mz; Mzbuffer = Mz; /* bound pool */ p_d_Rho = d_Rho + k * SpinMxNum + tid + 1 * SpinMxAllNum; p_d_T1 = d_T1 + k * SpinMxNum + tid + 1 * SpinMxAllNum; p_d_T2 = d_T2 + k * SpinMxNum + tid + 1 * SpinMxAllNum; p_d_Mzb = d_Mz + k * SpinMxNum + tid + 1 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; Rhob = *p_d_Rho; T1b = *p_d_T1; T2b = *p_d_T2; Mzb = *p_d_Mzb; Mzbbuffer = Mzb; /* MT exchange term */ p_d_K12 = d_K + k * SpinMxNum + tid + 0 * SpinMxAllNum; p_d_K21 = d_K + k * SpinMxNum + tid + (TypeNum-1) * SpinMxAllNum; K12 = *p_d_K12; K21 = *p_d_K21; /* field term */ p_d_Gzgrid = d_Gzgrid + k * SpinMxNum + tid ; p_d_Gygrid = d_Gygrid + k * SpinMxNum + tid ; p_d_Gxgrid = d_Gxgrid + k * SpinMxNum + tid ; p_d_dB0 = d_dB0 + k * SpinMxNum + tid ; p_d_TxCoilmg = d_TxCoilmg + k * SpinMxNum + tid ; p_d_TxCoilpe = d_TxCoilpe + k * SpinMxNum + tid ; p_d_RxCoilx = d_RxCoilx + k * SpinMxNum + tid ; p_d_RxCoily = d_RxCoily + k * SpinMxNum + tid ; Gzgrid = *p_d_Gzgrid; Gygrid = *p_d_Gygrid; Gxgrid = *p_d_Gxgrid; dB0 = *p_d_dB0; Signalptr = 0; dt = 0; for (int q=0; q< SeqLen; q++){ if (RunMode == 1){ /* free pool */ p_d_b_Mz = d_b_Mz + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_b_My = d_b_My + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_b_Mx = d_b_Mx + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; *p_d_b_Mz = Mz; *p_d_b_My = My; *p_d_b_Mx = Mx; /* bound pool */ p_d_b_Mz = d_b_Mz + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + 1 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; *p_d_b_Mz = Mzb; /* CEST pool */ for (int t=2; t < TypeNum; t++){ p_d_Mz = d_Mz + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_My = d_My + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Mx = d_Mx + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_b_Mz = d_b_Mz + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_b_My = d_b_My + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_b_Mx = d_b_Mx + (utsi - SeqLen + q) * SpinMxAllNum * SpinNum * TypeNum + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; *p_d_b_Mz = *p_d_Mz; *p_d_b_My = *p_d_My; *p_d_b_Mx = *p_d_Mx; } } p_d_dt = g_d_Sig + q * (5 + 3 * TxCoilNum); if (*p_d_dt<= 0) continue; p_d_rfAmp = g_d_Sig + q * (5 + 3 * TxCoilNum) + 1; p_d_rfPhase = g_d_Sig + q * (5 + 3 * TxCoilNum) + 2; p_d_rfFreq = g_d_Sig + q * (5 + 3 * TxCoilNum) + 3; p_d_GzAmp = g_d_Sig + q * (5 + 3 * TxCoilNum) + 3 * TxCoilNum + 1; p_d_GyAmp = g_d_Sig + q * (5 + 3 * TxCoilNum) + 3 * TxCoilNum + 2; p_d_GxAmp = g_d_Sig + q * (5 + 3 * TxCoilNum) + 3 * TxCoilNum + 3; p_d_ADC = g_d_Sig + q * (5 + 3 * TxCoilNum) + 3 * TxCoilNum + 4; /* signal acquisition */ if (*p_d_ADC == 1) { for (int c = 0; c < RxCoilNum; c++){ /* signal acquisition per Rx coil */ /* RxCoil sensitivity */ if (RxCoilDefault ==0){ buffer1 = Mx * (* (p_d_RxCoilx + c * SpinMxAllNum)) +My * (* (p_d_RxCoily + c * SpinMxAllNum)); buffer2 = -Mx * (* (p_d_RxCoily + c * SpinMxAllNum)) +My * (* (p_d_RxCoilx + c * SpinMxAllNum)); buffer3 = buffer1; buffer4 = buffer2; }else{ buffer1 = Mx; buffer2 = My; buffer3 = buffer1; buffer4 = buffer2; } /* rfRef for demodulating rf Phase */ if (rfRef!=0){ buffer1 = cos(-rfRef) * buffer1; buffer2 = -sin(-rfRef) * buffer2; buffer3 = sin(-rfRef) * buffer3; buffer4 = cos(-rfRef) * buffer4; buffer1 = buffer1 + buffer2; buffer3 = buffer3 + buffer4; }else{ buffer3 = buffer4; } /* signal buffer pointer */ /* ? is it right for only collecting signal for free pool ? */ p_d_Sx = d_Sx + tid + 0 * (SpinMxNum * SignalLen * RxCoilNum) + c * (SpinMxNum * SignalLen) + Signalptr * SpinMxNum; p_d_Sy = d_Sy + tid + 0 * (SpinMxNum * SignalLen * RxCoilNum) + c * (SpinMxNum * SignalLen) + Signalptr * SpinMxNum; /* update signal buffer */ *p_d_Sx += buffer1; *p_d_Sy += buffer3; } Signalptr++; } /* buffer Mx, My, Mz for CEST exchange */ for (int t=2; t < TypeNum; t++){ p_d_Mx = d_Mx + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_My = d_My + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Mz = d_Mz + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Buffer[(0 * TypeNum + t) * SpinMxNum] = *p_d_Mx; p_d_Buffer[(1 * TypeNum + t) * SpinMxNum] = *p_d_My; p_d_Buffer[(2 * TypeNum + t) * SpinMxNum] = *p_d_Mz; } p_d_Buffer[(0 * TypeNum + 0) * SpinMxNum] = Mx; p_d_Buffer[(1 * TypeNum + 0) * SpinMxNum] = My; p_d_Buffer[(2 * TypeNum + 0) * SpinMxNum] = Mz; /*********************************** free pool spin precession ********************************/ p_d_dWRnd = d_dWRnd + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; dWRnd = *p_d_dWRnd; dW = dB0 * Gyro + dWRnd + 2 * PI * (float)d_CS[0] + Gzgrid * (*p_d_GzAmp) * Gyro + Gygrid * (*p_d_GyAmp) * Gyro + Gxgrid * (*p_d_GxAmp) * Gyro; rfAmpSum = 0; for (int c = 0; c<TxCoilNum; c++){ rfAmpSum+=fabs(p_d_rfAmp[c*3]); } if (rfAmpSum != 0){ if (TxCoilNum == 1) { /* single-Tx */ rfAmp = p_d_rfAmp[0]; rfPhase = p_d_rfPhase[0]; rfFreq = p_d_rfFreq[0]; /* note rfFreq is defined as fB0-frf */ dW += 2 * PI * rfFreq; buffer1 = *p_d_TxCoilmg * rfAmp; buffer2 = *p_d_TxCoilpe + rfPhase; Alpha = sqrt(pow(dW,2) + pow(buffer1,2) * pow(Gyro,2)) * (*p_d_dt); /* calculate alpha */ Beta = atan(dW/(buffer1 * Gyro)); /* calculate beta */ sinAlpha = sin(Alpha); sinBeta = sin(Beta); cosAlpha = cos(Alpha); cosBeta = cos(Beta); cosPhi = cos(-buffer2); sinPhi = sin(-buffer2); } else{ buffer3 = 0; buffer4 = 0; for (int c = 0; c<TxCoilNum; c++){ /* multi-Tx, sum all (B1+ * rf) */ rfAmp = p_d_rfAmp[c*3]; rfPhase = p_d_rfPhase[c*3]; rfFreq = p_d_rfFreq[c*3]; /* note rfFreq is defined as fB0-frf */ if (rfAmp !=0 ){ dW += 2 * PI * rfFreq; buffer1 = *(p_d_TxCoilmg + c * SpinMxAllNum) * rfAmp; buffer2 = *(p_d_TxCoilpe + c * SpinMxAllNum) + rfPhase; buffer3 += buffer1 * cos(buffer2); buffer4 += buffer1 * sin(buffer2); } } buffer1 = sqrt(pow(buffer3, 2) + pow(buffer4,2)); buffer2 = atan2(buffer4, buffer3); Alpha = sqrt(pow(dW,2) + pow(buffer1,2) * pow(Gyro,2)) * (*p_d_dt); /* calculate alpha */ Beta = atan(dW/(buffer1 * Gyro)); /* calculate beta */ sinAlpha = sin(Alpha); sinBeta = sin(Beta); cosAlpha = cos(Alpha); cosBeta = cos(Beta); cosPhi = cos(-buffer2); sinPhi = sin(-buffer2); } buffer1 = pow(cosBeta,2)*cosPhi - sinBeta*(sinAlpha*sinPhi - cosAlpha*cosPhi*sinBeta); buffer2 = sinPhi*pow(cosBeta,2) + sinBeta*(cosPhi*sinAlpha + cosAlpha*sinBeta*sinPhi); bufferMx = Mx * (cosPhi*buffer1 + sinPhi*(cosAlpha*sinPhi + cosPhi*sinAlpha*sinBeta)) -My * (sinPhi*buffer1 - cosPhi*(cosAlpha*sinPhi + cosPhi*sinAlpha*sinBeta)) +Mz * (cosBeta*(sinAlpha*sinPhi - cosAlpha*cosPhi*sinBeta) + cosBeta*cosPhi*sinBeta); /*Calculate Mx */ bufferMy = My * (sinPhi*buffer2 + cosPhi*(cosAlpha*cosPhi - sinAlpha*sinBeta*sinPhi)) -Mx * (cosPhi*buffer2 - sinPhi*(cosAlpha*cosPhi - sinAlpha*sinBeta*sinPhi)) +Mz * (cosBeta*(cosPhi*sinAlpha + cosAlpha*sinBeta*sinPhi) - cosBeta*sinBeta*sinPhi); /*Calculate My */ bufferMz = Mx * (cosPhi*(cosBeta*sinBeta - cosAlpha*cosBeta*sinBeta) - cosBeta*sinAlpha*sinPhi) -My * (sinPhi*(cosBeta*sinBeta - cosAlpha*cosBeta*sinBeta) + cosBeta*cosPhi*sinAlpha) +Mz * (cosAlpha*pow(cosBeta,2) + pow(sinBeta,2)); /*Calculate Mz */ } else{ Alpha = dW * (*p_d_dt); /* calculate alpha */ sinAlpha = sin(Alpha); cosAlpha = cos(Alpha); bufferMx = Mx * cosAlpha + My * sinAlpha; /* calculate Mx */ bufferMy = My * cosAlpha - Mx * sinAlpha; /* calculate My */ bufferMz = Mz ; /* calculate Mz */ } /* relax & MT exchange*/ if (dt != *p_d_dt){ /* exp & division is very time consuming */ ExpdtT2 = exp(-*p_d_dt/T2); ExpdtT1 = exp(-*p_d_dt/T1 - *p_d_dt*K12); M0dtT1 = (Rho*(1-exp(-*p_d_dt/T1)))/SpinNum; ExpdtK = 1-exp(-*p_d_dt*K21); } /* CEST exchange for free pool */ ExpdtKout = 1; ExpdtKxin = 0; ExpdtKyin = 0; ExpdtKzin = 0; for (int t=2; t< TypeNum; t++){ /* go away to CEST pool */ p_d_K = d_K + k * SpinMxNum + tid + (t - 1) * SpinMxAllNum; if (*p_d_K != 0 ){ ExpdtKout *= exp(-(*p_d_dt) * (*p_d_K)); } /* come in from CEST pool */ p_d_K = d_K + k * SpinMxNum + tid + (t + TypeNum - 2) * SpinMxAllNum; if (*p_d_K != 0 ){ ExpdtKxin += (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(0 * TypeNum + t) * SpinMxNum]; ExpdtKyin += (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(1 * TypeNum + t) * SpinMxNum]; ExpdtKzin += (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(2 * TypeNum + t) * SpinMxNum]; } } Mx = bufferMx * ExpdtT2 * ExpdtKout + ExpdtKxin; My = bufferMy * ExpdtT2 * ExpdtKout + ExpdtKyin; Mz = bufferMz * ExpdtT1 * ExpdtKout + ExpdtKzin + Mzbbuffer * ExpdtK + M0dtT1; /*********************************** CEST pool spin evolution *******************************/ for (int t=2; t < TypeNum; t++){ p_d_Mx = d_Mx + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_My = d_My + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Mz = d_Mz + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_dWRnd = d_dWRnd + k * SpinMxNum + tid + t * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Rho = d_Rho + k * SpinMxNum + tid + t * SpinMxAllNum; p_d_T1 = d_T1 + k * SpinMxNum + tid + t * SpinMxAllNum; p_d_T2 = d_T2 + k * SpinMxNum + tid + t * SpinMxAllNum; Mxc = *p_d_Mx; Myc = *p_d_My; Mzc = *p_d_Mz; T1c = *p_d_T1; T2c = *p_d_T2; Rhoc = *p_d_Rho; dWRnd = *p_d_dWRnd; dW = dB0 * Gyro + dWRnd + 2 * PI * (float)d_CS[t] + Gzgrid * (*p_d_GzAmp) * Gyro + Gygrid * (*p_d_GyAmp) * Gyro + Gxgrid * (*p_d_GxAmp) * Gyro; if (rfAmpSum != 0){ if (TxCoilNum == 1) { /* single-Tx */ rfAmp = p_d_rfAmp[0]; rfPhase = p_d_rfPhase[0]; rfFreq = p_d_rfFreq[0]; /* note rfFreq is defined as fB0-frf */ dW += 2 * PI * rfFreq; buffer1 = *p_d_TxCoilmg * rfAmp; buffer2 = *p_d_TxCoilpe + rfPhase; Alpha = sqrt(pow(dW,2) + pow(buffer1,2) * pow(Gyro,2)) * (*p_d_dt); /* calculate alpha */ Beta = atan(dW/(buffer1 * Gyro)); /* calculate beta */ sinAlpha = sin(Alpha); sinBeta = sin(Beta); cosAlpha = cos(Alpha); cosBeta = cos(Beta); cosPhi = cos(-buffer2); sinPhi = sin(-buffer2); } else{ buffer3 = 0; buffer4 = 0; for (int c = 0; c<TxCoilNum; c++){ /* multi-Tx, sum all (B1+ * rf) */ rfAmp = p_d_rfAmp[c*3]; rfPhase = p_d_rfPhase[c*3]; rfFreq = p_d_rfFreq[c*3]; /* note rfFreq is defined as fB0-frf */ if (rfAmp !=0 ){ dW += 2 * PI * rfFreq; buffer1 = *(p_d_TxCoilmg + c * SpinMxAllNum) * rfAmp; buffer2 = *(p_d_TxCoilpe + c * SpinMxAllNum) + rfPhase; buffer3 += buffer1 * cos(buffer2); buffer4 += buffer1 * sin(buffer2); } } buffer1 = sqrt(pow(buffer3, 2) + pow(buffer4,2)); buffer2 = atan2(buffer4, buffer3); Alpha = sqrt(pow(dW,2) + pow(buffer1,2) * pow(Gyro,2)) * (*p_d_dt); /* calculate alpha */ Beta = atan(dW/(buffer1 * Gyro)); /* calculate beta */ sinAlpha = sin(Alpha); sinBeta = sin(Beta); cosAlpha = cos(Alpha); cosBeta = cos(Beta); cosPhi = cos(-buffer2); sinPhi = sin(-buffer2); } buffer1 = pow(cosBeta,2)*cosPhi - sinBeta*(sinAlpha*sinPhi - cosAlpha*cosPhi*sinBeta); buffer2 = sinPhi*pow(cosBeta,2) + sinBeta*(cosPhi*sinAlpha + cosAlpha*sinBeta*sinPhi); bufferMx = Mxc * (cosPhi*buffer1 + sinPhi*(cosAlpha*sinPhi + cosPhi*sinAlpha*sinBeta)) -Myc * (sinPhi*buffer1 - cosPhi*(cosAlpha*sinPhi + cosPhi*sinAlpha*sinBeta)) +Mzc * (cosBeta*(sinAlpha*sinPhi - cosAlpha*cosPhi*sinBeta) + cosBeta*cosPhi*sinBeta); /*Calculate Mx */ bufferMy = Myc * (sinPhi*buffer2 + cosPhi*(cosAlpha*cosPhi - sinAlpha*sinBeta*sinPhi)) -Mxc * (cosPhi*buffer2 - sinPhi*(cosAlpha*cosPhi - sinAlpha*sinBeta*sinPhi)) +Mzc * (cosBeta*(cosPhi*sinAlpha + cosAlpha*sinBeta*sinPhi) - cosBeta*sinBeta*sinPhi); /*Calculate My */ bufferMz = Mxc * (cosPhi*(cosBeta*sinBeta - cosAlpha*cosBeta*sinBeta) - cosBeta*sinAlpha*sinPhi) -Myc * (sinPhi*(cosBeta*sinBeta - cosAlpha*cosBeta*sinBeta) + cosBeta*cosPhi*sinAlpha) +Mzc * (cosAlpha*pow(cosBeta,2) + pow(sinBeta,2)); /*Calculate Mz */ } else{ Alpha = dW * (*p_d_dt); /* calculate alpha */ sinAlpha = sin(Alpha); cosAlpha = cos(Alpha); bufferMx = Mxc * cosAlpha + Myc * sinAlpha; /* calculate Mx */ bufferMy = Myc * cosAlpha - Mxc * sinAlpha; /* calculate My */ bufferMz = Mzc ; /* calculate Mz */ } /* relax */ if (dt != *p_d_dt){ /* exp & division is very time consuming */ p_d_Buffer[(3 * TypeNum + t) * SpinMxNum] = exp(-(*p_d_dt)/(T2c)); p_d_Buffer[(4 * TypeNum + t) * SpinMxNum] = exp(-(*p_d_dt)/(T1c)); p_d_Buffer[(5 * TypeNum + t) * SpinMxNum] = (Rhoc*(1 - p_d_Buffer[(4 * TypeNum + t) * SpinMxNum]))/SpinNum; } bufferMx *= p_d_Buffer[(3 * TypeNum + t) * SpinMxNum]; bufferMy *= p_d_Buffer[(3 * TypeNum + t) * SpinMxNum]; bufferMz *= p_d_Buffer[(4 * TypeNum + t) * SpinMxNum]; ExpdtKout = 1; ExpdtKxin = 0; ExpdtKyin = 0; ExpdtKzin = 0; /* go away to free pool */ p_d_K = d_K + k * SpinMxNum + tid + (t + TypeNum - 2) * SpinMxAllNum; if (*p_d_K != 0 ){ ExpdtKout = exp(-(*p_d_dt) * (*p_d_K)); } /* come in from free pool */ p_d_K = d_K + k * SpinMxNum + tid + (t - 1) * SpinMxAllNum; if (*p_d_K != 0 ){ ExpdtKxin = (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(0 * TypeNum + 0) * SpinMxNum]; ExpdtKyin = (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(1 * TypeNum + 0) * SpinMxNum]; ExpdtKzin = (1-exp(-(*p_d_dt) * (*p_d_K))) * p_d_Buffer[(2 * TypeNum + 0) * SpinMxNum]; } Mxc = bufferMx * ExpdtKout + ExpdtKxin; Myc = bufferMy * ExpdtKout + ExpdtKyin; Mzc = bufferMz * ExpdtKout + ExpdtKzin + p_d_Buffer[(5 * TypeNum + t) * SpinMxNum]; *p_d_Mx = Mxc; *p_d_My = Myc; *p_d_Mz = Mzc; } /****************************************bound pool spin evolution ******************************/ if (rfAmpSum != 0){ if (TxCoilNum == 1) { /* single-Tx */ rfAmp = fabs(p_d_rfAmp[0]); rfFreq = p_d_rfFreq[0]; /* note rfFreq is defined as fB0-frf */ buffer1 = *p_d_TxCoilmg * rfAmp; /* bound pool saturation */ /* deal with on-resonance singularity if (rfFreq == 0){ rfFreq = 1; } */ if (rffreq != rfFreq || t2b != T2b){ /* SuperLorentian lineshape -- this is very time consuming */ rffreq = rfFreq; t2b = T2b; u=0; G1=0; for (int t=0; t < 1000; t++){ buffer2 = ((2*PI*rfFreq+dB0*Gyro)*T2b)/(3*pow(u,2)-1); G1+=(sqrt(2/PI)*(T2b/fabs(3*pow(u,2)-1))*exp(-2*pow(buffer2,2)))*0.001; u+=0.001; } } W = PI * pow(buffer1 * Gyro, 2) * G1; } else{ n=0; W=0; for (int c=0; c<TxCoilNum; c++){ /* multi-Tx, sum all (B1+ * rf) */ rfAmp = p_d_rfAmp[c*3]; rfFreq = p_d_rfFreq[c*3]; /* note rfFreq is defined as fB0-frf*/ if (rfAmp !=0 ){ rfAmp=fabs(rfAmp); buffer1= *(p_d_TxCoilmg + c * SpinMxAllNum) * rfAmp; /* bound pool saturation */ /* deal with on-resonance singularity if (rfFreq == 0){ rfFreq = 1; } */ if (rffreq != rfFreq || t2b != T2b){ /* SuperLorentian lineshape -- this is very time consuming */ rffreq = rfFreq; t2b = T2b; u=0; G1=0; for (int t=0; t < 1000; t++){ buffer2 = ((2*PI*rfFreq+dB0*Gyro)*T2b)/(3*pow(u,2)-1); G1+=(sqrt(2/PI)*(T2b/fabs(3*pow(u,2)-1))*exp(-2*pow(buffer2,2)))*0.001; u+=0.001; } } W += PI * pow(buffer1 * Gyro, 2) * G1; n++; } } W = W / n; } bufferMz = Mzb * exp(-W*(*p_d_dt)); /*calculate Mz */ } else{ bufferMz = Mzb; /* calculate Mz */ } /* relax & exchange*/ if (dt != *p_d_dt){ /* exp & division is very time consuming */ ExpdtT1b = exp(-*p_d_dt/T1b - *p_d_dt*K21); M0dtT1b = (Rhob*(1-exp(-*p_d_dt/T1b)))/SpinNum; ExpdtKb = 1-exp(-*p_d_dt*K12); } Mzb = bufferMz * ExpdtT1b + M0dtT1b + Mzbuffer * ExpdtKb; /* update Mz buffer */ Mzbuffer = Mz; Mzbbuffer = Mzb; dt = *p_d_dt; } p_d_Mz = d_Mz + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_My = d_My + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; p_d_Mx = d_Mx + k * SpinMxNum + tid + 0 * (SpinMxAllNum * SpinNum) + s * SpinMxAllNum; *p_d_Mx = Mx; *p_d_My = My; *p_d_Mz = Mz; *p_d_Mzb = Mzb; } } } #endif
the_stack
typedef float (*fntype)(float); typedef float (*optype)(float,float); __device__ float link_linear(float a) {return a;} __device__ float link_logistic(float a) {return log(a/(1.0f - a));} __device__ float mean_linear(float a) {return a;} __device__ float mean_logistic(float a) { if (a > 20.0f) { return 1.0f; } else if (a < -80.0f) { return 0.0f; } else { return 1.0f/(1.0f + exp(-a)); } } __device__ float deriv_linear(float a, float b) {return b-a;} __device__ float deriv_logistic(float a, float b) {return b-a;} __device__ float deriv_maxp(float p, float t) {return (2.0f*t - 1.0f)*p*(1.0f-p);} __device__ float deriv_svm(float p, float t) { float tt = 2 * t - 1; return (p * tt < 1.0f) ? tt : 0.0f; } #define EPS 1.0e-10f __device__ float ll_linear(float a, float t) {return (t-a)*(a-t);} __device__ float ll_logistic(float a, float b) {return log(a * b + (1.0f - a) * (1.0f - b) + EPS);} __device__ float ll_maxp(float a, float t) {return a * t + (1.0f - a) * (1.0f - t) - 1.0f;} __device__ float ll_svm(float p, float t) { float tt = 2 * t - 1; return min(0.0f, tt * p - 1); } __device__ const fntype linkfns[] = { link_linear, link_logistic, link_logistic, link_linear}; __device__ const fntype meanfns[] = { mean_linear, mean_logistic, mean_logistic, mean_linear}; __device__ const optype derivfns[] = { deriv_linear, deriv_logistic, deriv_maxp, deriv_svm}; __device__ const optype llfns[] = { ll_linear, ll_logistic, ll_maxp, ll_svm}; typedef double (*dfntype)(double); typedef double (*doptype)(double,double); __device__ double dlink_linear(double a) {return a;} __device__ double dlink_logistic(double a) {return log(a/(1.0 - a));} __device__ double dmean_linear(double a) {return a;} __device__ double dmean_logistic(double a) { double tmp; if (a > 0) { tmp = exp(-a); return 1.0/(1.0 + tmp); } else { tmp = exp(a); return tmp/(1.0 + tmp); } } __device__ double dderiv_linear(double a, double b) {return b-a;} __device__ double dderiv_logistic(double a, double b) {return b-a;} __device__ double dderiv_maxp(double p, double t) {return (2.0*t - 1.0f)*p*(1.0-p);} __device__ double dderiv_svm(double p, double t) { double tt = 2 * t - 1; return (p * tt < 1.0) ? tt : 0.0; } __device__ double dll_linear(double a, double t) {return (t-a)*(a-t);} __device__ double dll_logistic(double a, double b) {return log(a * b + (1.0 - a) * (1.0 - b) + EPS);} __device__ double dll_maxp(double a, double t) {return a * t + (1.0 - a) * (1.0 - t) - 1.0;} __device__ double dll_svm(double p, double t) { double tt = 2 * t - 1; return min(0.0, tt * p - 1); } __device__ const dfntype dlinkfns[] = { dlink_linear, dlink_logistic, dlink_logistic, dlink_linear}; __device__ const dfntype dmeanfns[] = { dmean_linear, dmean_logistic, dmean_logistic, dmean_linear}; __device__ const doptype dderivfns[] = { dderiv_linear, dderiv_logistic, dderiv_maxp, dderiv_svm}; __device__ const doptype dllfns[] = { dll_linear, dll_logistic, dll_maxp, dll_svm}; void setsizes(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersion(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = 2*nblocks; } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } void setsizesLean(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersion(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = max(nblocks, 1 + (int)((N-1)/nthreads)); } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } __global__ void __apply_preds(float *A, int *L, float *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { fntype fn = meanfns[L[i % nrows]]; C[i] = fn(A[i]); } } int apply_preds(float *A, int *L, float *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_preds<<<griddims,nthreads>>>(A, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_links(float *A, int *L, float *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { fntype fn = linkfns[L[i % nrows]]; C[i] = fn(A[i]); } } int apply_links(float *A, int *L, float *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_links<<<griddims,nthreads>>>(A, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { optype op = llfns[L[i % nrows]]; C[i] = op(A[i],B[i]); } } int apply_lls(float *A, float *B, int *L, float *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_lls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { optype op = derivfns[L[i % nrows]]; C[i] = op(A[i],B[i]); } } int apply_derivs(float *A, float *B, int *L, float *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_derivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { dfntype fn = dmeanfns[L[i % nrows]]; C[i] = fn(A[i]); } } int apply_dpreds(double *A, int *L, double *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_dpreds<<<griddims,nthreads>>>(A, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { dfntype fn = dlinkfns[L[i % nrows]]; C[i] = fn(A[i]); } } int apply_dlinks(double *A, int *L, double *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_dlinks<<<griddims,nthreads>>>(A, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { doptype op = dllfns[L[i % nrows]]; C[i] = op(A[i],B[i]); } } int apply_dlls(double *A, double *B, int *L, double *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_dlls<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { doptype op = dderivfns[L[i % nrows]]; C[i] = op(A[i],B[i]); } } int apply_dderivs(double *A, double *B, int *L, double *C, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLean(nrows*ncols, &griddims, &nthreads); __apply_dderivs<<<griddims,nthreads>>>(A, B, L, C, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { float lr, ve, te, pve, ste, ngrad, ssq, ssqnew; ssq = Sumsq[ihere]; ssqnew = hypotf(grad,ssq); atomicAdd(&Sumsq[ihere], ssqnew - ssq); ssq = ssqnew * sqrtf(istep); if (addgrad) { lr = (lrlen > 1) ? lrate[i] : lrate[0]; ve = (vexplen > 1) ? vexp[i] : vexp[0]; te = (texplen > 1) ? texp[i] : texp[0]; pve = (ve == 0.5f) ? ssq : ((ve == 0) ? 1.0f : pow(ssq, 2*ve)); ste = pow(istep, te); ngrad = grad * lr * ste / pve; atomicAdd(&MM[ihere], ngrad); } if (Mask != NULL) { if (maskrows > 1) { if (Mask[ihere] == 0) MM[ihere] = 0; } else { if (Mask[jhere] == 0) MM[ihere] = 0; } } } /* __forceinline__ __device__ void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) { float lr, ve, te, pve, ste, ngrad; Sumsq[ihere] += grad * grad + epsilon; if (addgrad) { lr = (lrlen > 1) ? lrate[i] : lrate[0]; ve = (vexplen > 1) ? vexp[i] : vexp[0]; te = (texplen > 1) ? texp[i] : texp[0]; pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve); ste = pow(istep, te); ngrad = grad * lr * ste / pve; atomicAdd(&MM[ihere], ngrad); } if (Mask != NULL) { if (maskrows > 1) { if (Mask[ihere] == 0) MM[ihere] = 0; } else { if (Mask[jhere] == 0) MM[ihere] = 0; } } } */ __global__ void __multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; if (biasv > 0) { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; grad = aval; ihere = i + nrows * nbr; jhere = nbr; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } grad = aval * Bdata[j]; ihere = i + nrows * Bir[j]; jhere = Bir[j]; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } else { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } grad = aval * Bdata[j]; ihere = i + nrows * Bir[j]; jhere = Bir[j]; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } __global__ void __multADAGradx(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; i = threadIdx.x; aval = 0; if (biasv > 0) { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; grad = aval; ihere = i + nrows * nbr; jhere = nbr; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } grad = aval * Bdata[j]; ihere = i + nrows * Bir[j]; jhere = Bir[j]; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } else { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } grad = aval * Bdata[j]; ihere = i + nrows * Bir[j]; jhere = Bir[j]; __gupdate(grad, i, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } int multADAGrad(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(256, max(1, 1 + (ncols-1)/nt)); __multADAGradx<<<nblocks,threadDim>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr); } else { int nthreads = min(1024, 32*(1+(nrows-1)/32)); int nblocks = min(128, ncols); __multADAGrad<<<nblocks,nthreads>>>(nrows, ncols, nnz, A, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __multADAGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; if (biasv > 0) { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; grad = aval; ihere = i + nrows * nbr; jhere = nbr; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } else { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } } __global__ void __multADAGradxTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; i = threadIdx.x; aval = 0; if (biasv > 0) { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; grad = aval; jhere = nbr - x; ihere = i + nrows * jhere; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } else { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __gupdate(grad, i+y, ihere, jhere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon); } } } } int multADAGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen, float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon, int biasv, int nbr) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(256, max(1, 1 + (ncols-1)/nt)); __multADAGradxTile<<<nblocks,threadDim>>>(nrows, ncols, y, x, nnz, A, lda, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr); } else { int nthreads = min(1024, 32*(1+(nrows-1)/32)); int nblocks = min(128, ncols); __multADAGradTile<<<nblocks,nthreads>>>(nrows, ncols, y, x, nnz, A, lda, Bdata, Bir, Bic, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon, biasv, nbr); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __forceinline__ __device__ void __kupdate(float grad, int i, int ihere, int jhere, float *MM, float *Mask, int maskrows, float *lrate, int lrlen, float limit) { float lr, ngrad; lr = (lrlen > 1) ? lrate[i] : lrate[0]; ngrad = grad * lr; if (limit > 0) ngrad = max(-limit, min(limit, ngrad)); atomicAdd(&MM[ihere], ngrad); if (Mask != NULL) { if (maskrows > 1) { if (Mask[ihere] == 0) MM[ihere] = 0; } else { if (Mask[jhere] == 0) MM[ihere] = 0; } } } __global__ void __multGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Mask, int maskrows, float *lrate, int lrlen, float limit, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; if (biasv > 0) { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; grad = aval; ihere = i + nrows * nbr; jhere = nbr; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } } } } else { for (i = threadIdx.x; i < nrows; i += blockDim.x) { aval = 0; for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } } } } } __global__ void __multGradxTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Mask, int maskrows, float *lrate, int lrlen, float limit, int biasv, int nbr) { float aval, grad; int i, j, ihere, jhere; int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; i = threadIdx.x; aval = 0; if (biasv > 0) { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + y + lda * Bic[j]]; grad = aval; jhere = nbr - x; ihere = i + nrows * jhere; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } } } else { for (j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } grad = aval * Bdata[j]; jhere = Bir[j] - x; if (jhere >= 0 && jhere < ncols) { ihere = i + nrows * jhere; __kupdate(grad, i+y, ihere, jhere, MM, Mask, maskrows, lrate, lrlen, limit); } } } } int multGradTile(int nrows, int ncols, int y, int x, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, float *MM, float *Mask, int maskrows, float *lrate, int lrlen, float limit, int biasv, int nbr) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(256, max(1, 1 + (ncols-1)/nt)); __multGradxTile<<<nblocks,threadDim>>>(nrows, ncols, y, x, nnz, A, lda, Bdata, Bir, Bic, MM, Mask, maskrows, lrate, lrlen, limit, biasv, nbr); } else { int nthreads = min(1024, 32*(1+(nrows-1)/32)); int nblocks = min(128, ncols); __multGradTile<<<nblocks,nthreads>>>(nrows, ncols, y, x, nnz, A, lda, Bdata, Bir, Bic, MM, Mask, maskrows, lrate, lrlen, limit, biasv, nbr); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __nrandinit(curandState *rstates) { int id = threadIdx.x + blockDim.x * blockIdx.x; curand_init(1234, id, 0, &rstates[id]); } __global__ void __ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) { int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int nthreads = blockDim.x * gridDim.x * gridDim.y; int i, irow, icol; float mmval, umval, sqrtss, sqrtnewss, veval, tsval, lrval, denom, grad; float sqrtnw = sqrtf(nw); float sqrt1mnw = sqrtf(1-nw); float sqrteps = sqrt(eps); curandState *prstate = &rstates[ithread]; for (i = ithread; i < nrows*ncols; i += nthreads) { icol = i / nrows; irow = i - icol * nrows; umval = um[i]; sqrtss = ssq[i]; // newsumsq = (nw * umval * umval) + (1 - nw) * sumsq; sqrtnewss = hypotf(sqrtnw * umval, sqrt1mnw * sqrtss); ssq[i] = sqrtnewss; if (doupdate) { mmval = mm[i]; veval = (nve > 1) ? ve[irow] : ve[0]; tsval = (nts > 1) ? ts[irow] : ts[0]; lrval = (nlr > 1) ? lr[irow] : lr[0]; sqrtnewss = hypotf(sqrtnewss, sqrteps); denom = (veval == 0.5f) ? sqrtnewss : powf(sqrtnewss, veval*2); grad = (umval / denom); if (langevin > 0) grad += curand_normal(prstate) * langevin; mmval += grad * lrval * tsval; if (maskr > 0) { if (maskr > 1) { mmval *= mask[i]; } else { mmval *= mask[icol]; } } mm[i] = mmval; } } } // ADAGRAD with standard momentum __global__ void __ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) { int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int nthreads = blockDim.x * gridDim.x * gridDim.y; int i, irow, icol; float mmval, umval, sqrtss, sqrtnewss, veval, tsval, lrval, denom, grad; float sqrtnw = sqrtf(nw); float sqrt1mnw = sqrtf(1-nw); float sqrteps = sqrt(eps); curandState *prstate = &rstates[ithread]; for (i = ithread; i < nrows*ncols; i += nthreads) { icol = i / nrows; irow = i - icol * nrows; umval = um[i]; sqrtss = ssq[i]; // newss = (nw * umval * umval) + (1 - nw) * sqval; sqrtnewss = hypotf(sqrtnw * umval, sqrt1mnw * sqrtss); ssq[i] = sqrtnewss; if (doupdate) { mmval = mm[i]; veval = (nve > 1) ? ve[irow] : ve[0]; tsval = (nts > 1) ? ts[irow] : ts[0]; lrval = (nlr > 1) ? lr[irow] : lr[0]; sqrtnewss = hypotf(sqrtnewss, sqrteps); denom = (veval == 0.5f) ? sqrtnewss : powf(sqrtnewss, veval*2); grad = (umval / denom); if (langevin > 0) grad += curand_normal(prstate) * langevin; grad = grad * lrval * tsval; // Normal gradient grad = grad + mu * momentum[i]; // Gradient with momentum momentum[i] = grad; // Save it mmval += grad; // Add the new gradient if (maskr > 0) { if (maskr > 1) { mmval *= mask[i]; } else { mmval *= mask[icol]; } } mm[i] = mmval; } } } // ADAGRAD with Nesterov momentum __global__ void __ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lr, int nlr, float langevin, float eps, int doupdate, curandState *rstates) { int ithread = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int nthreads = blockDim.x * gridDim.x * gridDim.y; int i, irow, icol; float mmval, umval, sqrtss, sqrtnewss, veval, tsval, lrval, denom, grad, oldmom, newmom; float sqrtnw = sqrtf(nw); float sqrt1mnw = sqrtf(1-nw); float sqrteps = sqrt(eps); curandState *prstate = &rstates[ithread]; for (i = ithread; i < nrows*ncols; i += nthreads) { icol = i / nrows; irow = i - icol * nrows; umval = um[i]; sqrtss = ssq[i]; // newss = (nw * umval * umval) + (1 - nw) * sqval; sqrtnewss = hypotf(sqrtnw * umval, sqrt1mnw * sqrtss); ssq[i] = sqrtnewss; if (doupdate) { mmval = mm[i]; veval = (nve > 1) ? ve[irow] : ve[0]; tsval = (nts > 1) ? ts[irow] : ts[0]; lrval = (nlr > 1) ? lr[irow] : lr[0]; sqrtnewss = hypotf(sqrtnewss, sqrteps); denom = (veval == 0.5f) ? sqrtnewss : powf(sqrtnewss, veval*2); grad = (umval / denom); if (langevin > 0) grad += curand_normal(prstate) * langevin; grad = grad * lrval * tsval; // Normal gradient oldmom = momentum[i]; // Momentum newmom = grad + mu * oldmom; // Compute new momentum momentum[i] = newmom; // Save new momentum mmval += newmom + mu * (newmom - oldmom); // x_t = x_t-1 + p_t + mu(p_t - p_t-1) if (maskr > 0) { if (maskr > 1) { mmval *= mask[i]; } else { mmval *= mask[icol]; } } mm[i] = mmval; } } } int ADAGrad(int nrows, int ncols, float *mm, float *um, float *ssq, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate) { int nthreads; dim3 griddims; int basesize; if (langevin > 0) { basesize = max(32, nrows * ncols / 32); } else { basesize = max(32, nrows * ncols); } setsizesLean(basesize, &griddims, &nthreads); int ntt = nthreads * griddims.x * griddims.y; curandState *rstates = NULL; if (langevin > 0) { cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaStreamSynchronize(SYNC_STREAM); __nrandinit<<<griddims,nthreads>>>(rstates); cudaStreamSynchronize(SYNC_STREAM); } __ADAGrad<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates); cudaStreamSynchronize(SYNC_STREAM); if (langevin > 0) cudaFree(rstates); cudaError_t err = cudaGetLastError(); return err; } int ADAGradm(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate) { int nthreads; dim3 griddims; int basesize; if (langevin > 0) { basesize = max(32, nrows * ncols / 32); } else { basesize = max(32, nrows * ncols); } setsizesLean(basesize, &griddims, &nthreads); int ntt = nthreads * griddims.x * griddims.y; curandState *rstates = NULL; if (langevin > 0) { cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaStreamSynchronize(SYNC_STREAM); __nrandinit<<<griddims,nthreads>>>(rstates); cudaStreamSynchronize(SYNC_STREAM); } __ADAGradm<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates); cudaStreamSynchronize(SYNC_STREAM); if (langevin > 0) cudaFree(rstates); cudaError_t err = cudaGetLastError(); return err; } int ADAGradn(int nrows, int ncols, float *mm, float *um, float *ssq, float *momentum, float mu, float *mask, int maskr, float nw, float *ve, int nve, float *ts, int nts, float *lrate, int nlrate, float langevin, float eps, int doupdate) { int nthreads; dim3 griddims; int basesize; if (langevin > 0) { basesize = max(32, nrows * ncols / 32); } else { basesize = max(32, nrows * ncols); } setsizesLean(basesize, &griddims, &nthreads); int ntt = nthreads * griddims.x * griddims.y; curandState *rstates = NULL; if (langevin > 0) { cudaError_t err = cudaMalloc(( void **)& rstates , ntt * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaStreamSynchronize(SYNC_STREAM); __nrandinit<<<griddims,nthreads>>>(rstates); cudaStreamSynchronize(SYNC_STREAM); } __ADAGradn<<<griddims,nthreads>>>(nrows, ncols, mm, um, ssq, momentum, mu, mask, maskr, nw, ve, nve, ts, nts, lrate, nlrate, langevin, eps, doupdate, rstates); cudaStreamSynchronize(SYNC_STREAM); if (langevin > 0) cudaFree(rstates); cudaError_t err = cudaGetLastError(); return err; }
the_stack
// ----------------------------------------------------------------------------------------- // NVEnc by rigaya // ----------------------------------------------------------------------------------------- // // The MIT License // // Copyright (c) 2014-2016 rigaya // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // ------------------------------------------------------------------------------------------ #include <map> #include <array> #include <cmath> #pragma warning (push) #pragma warning (disable: 4819) #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <vector_types.h> #pragma warning (pop) #include "convert_csp.h" #include "NVEncFilterAfs.h" #include "NVEncParam.h" #define SYN_BLOCK_INT_X (32) //work groupサイズ(x) = スレッド数/work group #define SYN_BLOCK_Y (8) //work groupサイズ(y) = スレッド数/work group #define SYN_BLOCK_LOOP_Y (1) //work groupのy方向反復数 #define u8x4(x) (((uint32_t)x) | (((uint32_t)x) << 8) | (((uint32_t)x) << 16) | (((uint32_t)x) << 24)) template <typename T> __device__ __inline__ T lerp(T v0, T v1, T t) { return fma(t, v1, fma(-t, v0, v0)); } __device__ __inline__ uint32_t blend(uint32_t a_if_0, uint32_t b_if_1, uint32_t mask) { return (a_if_0 & (~mask)) | (b_if_1 & (mask)); } // 後方フィールド判定 __device__ __inline__ int is_latter_field(int pos_y, int tb_order) { return (((pos_y + tb_order + 1) & 1)); } __device__ __inline__ uint32_t deint(int src1, int src3, int src4, int src5, int src7, uint32_t flag, uint32_t mask, int max) { const int tmp2 = src1 + src7; const int tmp3 = src3 + src5; //const int tmp = (tmp3 - ((tmp2 - tmp3) >> 3) + 1) >> 1; const int tmp = clamp(__rhadd(tmp3, (tmp3 - tmp2) >> 3), 0, max); return (uint32_t)(((flag & mask) == 0) ? tmp : src4); } __device__ __inline__ float deint(float src1, float src3, float src4, float src5, float src7, uint32_t flag, uint32_t mask) { //const float tmp2 = src1 + src7; //const float tmp3 = src3 + src5; //const float tmp = (tmp3 - ((tmp2 - tmp3) * 0.125f)) * 0.5f; const float tmp = (src3 + src5) * 0.5625f - (src1 + src7) * 0.0625f; return (((flag & mask) == 0) ? tmp : src4); } __device__ __inline__ uint32_t blend(int src1, int src2, int src3, uint32_t flag, uint32_t mask) { int tmp = (src1 + src3 + src2 + src2 + 2) >> 2; return (uint32_t)(((flag & mask) == 0) ? tmp : src2); } __device__ __inline__ float blend(float src1, float src2, float src3, uint32_t flag, uint32_t mask) { float tmp = (src1 + src3 + 2.0f * src2) * 0.25f; return ((flag & mask) == 0) ? tmp : src2; } __device__ __inline__ uint32_t mie_inter(int src1, int src2, int src3, int src4) { return (uint32_t)((src1 + src2 + src3 + src4 + 2) >> 2); } __device__ __inline__ float mie_inter(float src1, float src2, float src3, float src4) { return (src1 + src2 + src3 + src4) * 0.25f; } __device__ __inline__ uint32_t mie_spot(int src1, int src2, int src3, int src4, int src_spot) { return __urhadd(mie_inter(src1, src2, src3, src4), src_spot); } __device__ __inline__ float mie_spot(float src1, float src2, float src3, float src4, float src_spot) { return (mie_inter(src1, src2, src3, src4) + src_spot) * 0.5f; } __device__ __inline__ uint32_t deint4(uint32_t src1, uint32_t src3, uint32_t src4, uint32_t src5, uint32_t src7, uint32_t flag, uint32_t mask) { uint32_t p0 = deint((int)(src1 & 0x000000ff), (int)(src3 & 0x000000ff), (int)(src4 & 0x000000ff), (int)(src5 & 0x000000ff), (int)(src7 & 0x000000ff), flag & 0x000000ff, mask, 0xff); uint32_t p1 = deint((int)(src1 & 0x0000ff00) >> 8, (int)(src3 & 0x0000ff00) >> 8, (int)(src4 & 0x0000ff00) >> 8, (int)(src5 & 0x0000ff00) >> 8, (int)(src7 & 0x0000ff00) >> 8, flag & 0x0000ff00, mask, 0xff) << 8; uint32_t p2 = deint((int)(src1 & 0x00ff0000) >> 16, (int)(src3 & 0x00ff0000) >> 16, (int)(src4 & 0x00ff0000) >> 16, (int)(src5 & 0x00ff0000) >> 16, (int)(src7 & 0x00ff0000) >> 16, flag & 0x00ff0000, mask, 0xff) << 16; uint32_t p3 = deint((int)(src1 >> 24), (int)(src3 >> 24), (int)(src4 >> 24), (int)(src5 >> 24), (int)(src7 >> 24), flag >> 24, mask, 0xff) << 24; return p0 | p1 | p2 | p3; } __device__ __inline__ uint32_t deint2(uint32_t src1, uint32_t src3, uint32_t src4, uint32_t src5, uint32_t src7, uint32_t flag, uint32_t mask) { uint32_t p0 = deint((int)(src1 & 0x0000ffff), (int)(src3 & 0x0000ffff), (int)(src4 & 0x0000ffff), (int)(src5 & 0x0000ffff), (int)(src7 & 0x0000ffff), flag & 0x000000ff, mask, 0x0000ffff); uint32_t p1 = deint((int)(src1 >> 16), (int)(src3 >> 16), (int)(src4 >> 16), (int)(src5 >> 16), (int)(src7 >> 16), flag & 0x0000ff00, mask, 0x0000ffff) << 16; return p0 | p1; } __device__ __inline__ uint32_t blend4(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t flag, uint32_t mask) { uint32_t p0 = blend((int)(src1 & 0x000000ff), (int)(src2 & 0x000000ff), (int)(src3 & 0x000000ff), flag & 0x000000ff, mask); uint32_t p1 = blend((int)(src1 & 0x0000ff00), (int)(src2 & 0x0000ff00), (int)(src3 & 0x0000ff00), flag & 0x0000ff00, mask) & 0x0000ff00; uint32_t p2 = blend((int)(src1 & 0x00ff0000), (int)(src2 & 0x00ff0000), (int)(src3 & 0x00ff0000), flag & 0x00ff0000, mask) & 0x00ff0000; uint32_t p3 = blend((int)(src1 >> 24), (int)(src2 >> 24), (int)(src3 >> 24), flag >> 24, mask) << 24; return p0 | p1 | p2 | p3; } __device__ __inline__ uint32_t blend2(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t flag, uint32_t mask) { uint32_t p0 = blend((int)(src1 & 0x0000ffff), (int)(src2 & 0x0000ffff), (int)(src3 & 0x0000ffff), flag & 0x0000ffff, mask); uint32_t p1 = blend((int)(src1 >> 16), (int)(src2 >> 16), (int)(src3 >> 16), flag & 0x0000ff00, mask) << 16; return p0 | p1; } __device__ __inline__ uint32_t mie_inter4(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t src4) { uint32_t p0 = mie_inter((int)(src1 & 0x000000ff), (int)(src2 & 0x000000ff), (int)(src3 & 0x000000ff), (int)(src4 & 0x000000ff)); uint32_t p1 = mie_inter((int)(src1 & 0x0000ff00), (int)(src2 & 0x0000ff00), (int)(src3 & 0x0000ff00), (int)(src4 & 0x0000ff00)) & 0x0000ff00; uint32_t p2 = mie_inter((int)(src1 & 0x00ff0000), (int)(src2 & 0x00ff0000), (int)(src3 & 0x00ff0000), (int)(src4 & 0x00ff0000)) & 0x00ff0000; uint32_t p3 = mie_inter((int)(src1 >> 24), (int)(src2 >> 24), (int)(src3 >> 24), (int)(src4 >> 24) ) << 24; return p0 | p1 | p2 | p3; } __device__ __inline__ uint32_t mie_inter2(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t src4) { uint32_t p0 = mie_inter((int)(src1 & 0x0000ffff), (int)(src2 & 0x0000ffff), (int)(src3 & 0x0000ffff), (int)(src4 & 0x0000ffff)); uint32_t p1 = mie_inter((int)(src1 >> 16), (int)(src2 >> 16), (int)(src3 >> 16), (int)(src4 >> 16)) << 16; return p0 | p1; } __device__ __inline__ uint32_t mie_spot4(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t src4, uint32_t src_spot) { uint32_t p0 = mie_spot((int)(src1 & 0x000000ff), (int)(src2 & 0x000000ff), (int)(src3 & 0x000000ff), (int)(src4 & 0x000000ff), (int)(src_spot & 0x000000ff)); uint32_t p1 = mie_spot((int)(src1 & 0x0000ff00), (int)(src2 & 0x0000ff00), (int)(src3 & 0x0000ff00), (int)(src4 & 0x0000ff00), (int)(src_spot & 0x0000ff00)) & 0x0000ff00; uint32_t p2 = mie_spot((int)(src1 & 0x00ff0000), (int)(src2 & 0x00ff0000), (int)(src3 & 0x00ff0000), (int)(src4 & 0x00ff0000), (int)(src_spot & 0x00ff0000)) & 0x00ff0000; uint32_t p3 = mie_spot((int)(src1 >> 24), (int)(src2 >> 24), (int)(src3 >> 24), (int)(src4 >> 24), (int)(src_spot >> 24) ) << 24; return p0 | p1 | p2 | p3; } __device__ __inline__ uint32_t mie_spot2(uint32_t src1, uint32_t src2, uint32_t src3, uint32_t src4, uint32_t src_spot) { uint32_t p0 = mie_spot((int)(src1 & 0x0000ffff), (int)(src2 & 0x0000ffff), (int)(src3 & 0x0000ffff), (int)(src4 & 0x0000ffff), (int)(src_spot & 0x0000ffff)); uint32_t p1 = mie_spot((int)(src1 >> 16), (int)(src2 >> 16), (int)(src3 >> 16), (int)(src4 >> 16), (int)(src_spot >> 16)) << 16; return p0 | p1; } __device__ __inline__ uint2 deint8(uint2 src1, uint2 src3, uint2 src4, uint2 src5, uint2 src7, uint2 flag, uint32_t mask) { uint2 pout; pout.x = deint4(src1.x, src3.x, src4.x, src5.x, src7.x, flag.x, mask); pout.y = deint4(src1.y, src3.y, src4.y, src5.y, src7.y, flag.y, mask); return pout; } __device__ __inline__ uint4 deint8(uint4 src1, uint4 src3, uint4 src4, uint4 src5, uint4 src7, uint2 flag, uint32_t mask) { uint4 pout; pout.x = deint2(src1.x, src3.x, src4.x, src5.x, src7.x, flag.x >> 0, mask); pout.y = deint2(src1.y, src3.y, src4.y, src5.y, src7.y, flag.x >> 16, mask); pout.z = deint2(src1.z, src3.z, src4.z, src5.z, src7.z, flag.y >> 0, mask); pout.w = deint2(src1.w, src3.w, src4.w, src5.w, src7.w, flag.y >> 16, mask); return pout; } __device__ __inline__ uint2 blend8(uint2 src1, uint2 src2, uint2 src3, uint2 flag, uint32_t mask) { uint2 pout; pout.x = blend4(src1.x, src2.x, src3.x, flag.x, mask); pout.y = blend4(src1.y, src2.y, src3.y, flag.y, mask); return pout; } __device__ __inline__ uint4 blend8(uint4 src1, uint4 src2, uint4 src3, uint2 flag, uint32_t mask) { uint4 pout; pout.x = blend2(src1.x, src2.x, src3.x, flag.x >> 0, mask); pout.y = blend2(src1.y, src2.y, src3.y, flag.x >> 16, mask); pout.z = blend2(src1.z, src2.z, src3.z, flag.y >> 0, mask); pout.w = blend2(src1.w, src2.w, src3.w, flag.y >> 16, mask); return pout; } __device__ __inline__ uint2 mie_inter8(uint2 src1, uint2 src2, uint2 src3, uint2 src4) { uint2 pout; pout.x = mie_inter4(src1.x, src2.x, src3.x, src4.x); pout.y = mie_inter4(src1.y, src2.y, src3.y, src4.y); return pout; } __device__ __inline__ uint4 mie_inter8(uint4 src1, uint4 src2, uint4 src3, uint4 src4) { uint4 pout; pout.x = mie_inter2(src1.x, src2.x, src3.x, src4.x); pout.y = mie_inter2(src1.y, src2.y, src3.y, src4.y); pout.z = mie_inter2(src1.z, src2.z, src3.z, src4.z); pout.w = mie_inter2(src1.w, src2.w, src3.w, src4.w); return pout; } __device__ __inline__ uint2 mie_spot8(uint2 src1, uint2 src2, uint2 src3, uint2 src4, uint2 src_spot) { uint2 pout; pout.x = mie_spot4(src1.x, src2.x, src3.x, src4.x, src_spot.x); pout.y = mie_spot4(src1.y, src2.y, src3.y, src4.y, src_spot.y); return pout; } __device__ __inline__ uint4 mie_spot8(uint4 src1, uint4 src2, uint4 src3, uint4 src4, uint4 src_spot) { uint4 pout; pout.x = mie_spot2(src1.x, src2.x, src3.x, src4.x, src_spot.x); pout.y = mie_spot2(src1.y, src2.y, src3.y, src4.y, src_spot.y); pout.z = mie_spot2(src1.z, src2.z, src3.z, src4.z, src_spot.z); pout.w = mie_spot2(src1.z, src2.w, src3.w, src4.w, src_spot.w); return pout; } template<typename Type8, int plane, int line> __device__ __inline__ Type8 piny( const uint8_t *__restrict__ p0, const uint8_t *__restrict__ p1, int y_h1_pos, int y_h2_pos, int y_h3_pos, int y_h4_pos, int y_h5_pos, int y_h6_pos, int y_h7_pos) { const uint8_t *ptr = (plane) ? p1 : p0; switch (line) { case 1: ptr += y_h1_pos; break; case 2: ptr += y_h2_pos; break; case 3: ptr += y_h3_pos; break; case 4: ptr += y_h4_pos; break; case 5: ptr += y_h5_pos; break; case 6: ptr += y_h6_pos; break; case 7: ptr += y_h7_pos; break; default: break; } return *(Type8 *)ptr; } template<typename Type8, int mode> __device__ __inline__ void proc_y( uint8_t *__restrict__ dst, const uint8_t *__restrict__ p0, const uint8_t *__restrict__ p1, const uint8_t *__restrict__ sip, const int tb_order, const uint8_t status, int y_h1_pos, int y_h2_pos, int y_h3_pos, int y_h4_pos, int y_h5_pos, int y_h6_pos, int y_h7_pos ) { static_assert(-1 <= mode && mode <= 4, "mode should be -1 - 4"); #define pin(plane, line) piny<Type8, plane, line>(p0, p1, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos) Type8 pout; if (mode == 1) { if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(0, tb_order)) { pout = mie_inter8(pin(0, 2), pin(1, 1), pin(1, 2), pin(1, 3)); } else { pout = mie_spot8(pin(0, 1), pin(0, 3), pin(1, 1), pin(1, 3), pin(1, 2)); } } else { if (is_latter_field(0, tb_order)) { pout = mie_inter8(pin(0, 1), pin(0, 2), pin(0, 3), pin(1, 2)); } else { pout = mie_spot8(pin(0, 1), pin(0, 3), pin(1, 1), pin(1, 3), pin(0, 2)); } } } else if (mode == 2) { if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(0, tb_order)) { pout = blend8(pin(1,1), pin(0,2), pin(1,3), *(uint2 *)sip, 0x02020202); } else { pout = blend8(pin(0,1), pin(1,2), pin(0,3), *(uint2 *)sip, 0x02020202); } } else { pout = blend8(pin(0,1), pin(0,2), pin(0,3), *(uint2 *)sip, 0x01010101); } } else if (mode == 3) { if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(0, tb_order)) { pout = blend8(pin(1, 1), pin(0, 2), pin(1, 3), *(uint2 *)sip, 0x06060606); } else { pout = blend8(pin(0, 1), pin(1, 2), pin(0, 3), *(uint2 *)sip, 0x06060606); } } else { pout = blend8(pin(0, 1), pin(0, 2), pin(0, 3), *(uint2 *)sip, 0x05050505); } } else if (mode == 4) { if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(0, tb_order)) { pout = deint8(pin(1,1), pin(1,3), pin(0,4), pin(1,5), pin(1,7), *(uint2 *)sip, 0x06060606); } else { pout = pin(1, 4); } } else { if (is_latter_field(0, tb_order)) { pout = deint8(pin(0,1), pin(0,3), pin(0,4), pin(0,5), pin(0,7), *(uint2 *)sip, 0x05050505); } else { pout = pin(0, 4); } } } *(Type8 *)dst = pout; #undef pin } __inline__ __device__ float get_uv(cudaTextureObject_t src_p0_0, cudaTextureObject_t src_p0_1, float ifx, int iy) { //static const float WEIGHT[4] = { // 7.0f / 8.0f, // 5.0f / 8.0f, // 3.0f / 8.0f, // 1.0f / 8.0f //}; //const float ifytex = ify + WEIGHT[iy & 3]; const float ify = ((iy - 2) >> 2) + 0.5f; const float ifytex = ify + (3.5f - (float)(iy & 3)) * 0.25f; return tex2D<float>((iy & 1) ? src_p0_1 : src_p0_0, ifx, ifytex); } template<typename Type, typename Type4, int mode> __device__ __inline__ void proc_uv( Type4 *__restrict__ dst, cudaTextureObject_t src_p0_0, cudaTextureObject_t src_p0_1, cudaTextureObject_t src_p1_0, cudaTextureObject_t src_p1_1, const uint8_t *__restrict__ sip, const int sip_pitch, const int src_width, const int src_height, const int imgx, //グローバルスレッドID x const int imgy, //グローバルスレッドID y const int lx, const int ly, //ローカルスレッドID const int tb_order, const uint8_t status ) { static_assert(-1 <= mode && mode <= 4, "mode should be -1 - 4"); #define PREREAD_Y ((mode == 4) ? 6 : 2) //必要になる前後の行のぶん #define SHARED_C_Y (SYN_BLOCK_Y * 2 + PREREAD_Y) #define SHARED_C_XY (SHARED_C_Y * SYN_BLOCK_INT_X) #define SOFFSET(x,y,depth) ((depth) * SHARED_C_XY + (y) * SYN_BLOCK_INT_X + (x)) __shared__ float s_tmp[3][SHARED_C_Y][SYN_BLOCK_INT_X]; __shared__ Type s_out[SYN_BLOCK_Y][SYN_BLOCK_INT_X * 4]; float *pSharedX = (float *)&s_tmp[0][0][0] + SOFFSET(lx, 0, 0); Type *psOut = &s_out[ly][lx]; int ix = blockIdx.x * SYN_BLOCK_INT_X * 4 + threadIdx.x; //YUV422ベースの色差インデックス 1スレッドは横方向に4つの色差pixelを担当 int iy = blockIdx.y * SYN_BLOCK_Y * 2 + threadIdx.y; //YUV422ベースの色差インデックス 1スレッドは縦方向に2つの色差pixelを担当 (出力時はYUV420なので1つの色差pixelを担当) float ifx = (float)ix + 0.5f; //この関数内でsipだけはYUV444のデータであることに注意 sip += iy * sip_pitch + ix * 4/*4画素/スレッド*/ * 2/*YUV444->YUV420*/ * sizeof(uint8_t); //sharedメモリ上に、YUV422相当のデータ(32x(16+PREREAD))を縦方向のテクスチャ補間で作ってから、 //blendを実行して、YUV422相当の合成データ(32x16)を作り、 //その後YUV420相当のデータ(32x8)をs_outに出力する //横方向に4回ループを回して、32pixel x4の出力結果をs_out(横:128pixel)に格納する for (int i = 0; i < 4; i++, ifx += SYN_BLOCK_INT_X, psOut += SYN_BLOCK_INT_X, sip += 2/*YUV444->YUV420*/) { //shredメモリに値をロード //縦方向のテクスチャ補間を使って、YUV422相当のデータとしてロード //横方向には補間しない if (ly < PREREAD_Y) { float *pShared = pSharedX + SOFFSET(0, ly, 0); pShared[SOFFSET(0,0,0)] = get_uv(src_p0_0, src_p0_1, ifx, iy - (PREREAD_Y >> 1)); pShared[SOFFSET(0,0,1)] = get_uv(src_p1_0, src_p1_1, ifx, iy - (PREREAD_Y >> 1)); } float *pShared = pSharedX + SOFFSET(0, ly + PREREAD_Y, 0); #pragma unroll for (int j = 0; j < 2; j++) { pShared[SOFFSET(0,j*SYN_BLOCK_Y,0)] = get_uv(src_p0_0, src_p0_1, ifx, iy + (PREREAD_Y >> 1) + j * SYN_BLOCK_Y); pShared[SOFFSET(0,j*SYN_BLOCK_Y,1)] = get_uv(src_p1_0, src_p1_1, ifx, iy + (PREREAD_Y >> 1) + j * SYN_BLOCK_Y); } __syncthreads(); #pragma unroll for (int j = 0; j < 2; j++) { //sipのy (境界チェックに必要) const int iy_sip = iy + j * SYN_BLOCK_Y; const uint8_t *psip = sip + SYN_BLOCK_Y * sip_pitch; // -1するのは、pinのlineは最小値が1だから float *pShared = pSharedX + SOFFSET(0, ly-1+j*SYN_BLOCK_Y, 0); #define pin(plane, line) (pShared[SOFFSET(0,line,plane)]) float pout; if (mode == 1) { if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(ly, tb_order)) { pout = mie_inter(pin(0, 2), pin(1, 1), pin(1, 2), pin(1, 3)); } else { pout = mie_spot(pin(0, 1), pin(0, 3), pin(1, 1), pin(1, 3), pin(1, 2)); } } else { if (is_latter_field(ly, tb_order)) { pout = mie_inter(pin(0, 1), pin(0, 2), pin(0, 3), pin(1, 2)); } else { pout = mie_spot(pin(0, 1), pin(0, 3), pin(1, 1), pin(1, 3), pin(0, 2)); } } } else if (mode == 2) { const uint8_t sip0 = (iy_sip < src_height) ? *psip : 0; if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(ly, tb_order)) { pout = blend(pin(1, 1), pin(0, 2), pin(1, 3), sip0, 0x02020202); } else { pout = blend(pin(0, 1), pin(1, 2), pin(0, 3), sip0, 0x02020202); } } else { pout = blend(pin(0, 1), pin(0, 2), pin(0, 3), sip0, 0x01010101); } } else if (mode == 3) { const uint8_t sip0 = (iy_sip < src_height) ? *psip : 0; if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(ly, tb_order)) { pout = blend(pin(1, 1), pin(0, 2), pin(1, 3), sip0, 0x06060606); } else { pout = blend(pin(0, 1), pin(1, 2), pin(0, 3), sip0, 0x06060606); } } else { pout = blend(pin(0, 1), pin(0, 2), pin(0, 3), sip0, 0x05050505); } } else if (mode == 4) { const uint8_t sip0 = (iy_sip < src_height) ? *psip : 0; if (status & AFS_FLAG_SHIFT0) { if (!is_latter_field(ly, tb_order)) { pout = deint(pin(1, 1), pin(1, 3), pin(0, 4), pin(1, 5), pin(1, 7), sip0, 0x06060606); } else { pout = pin(1, 4); } } else { if (is_latter_field(ly, tb_order)) { pout = deint(pin(0, 1), pin(0, 3), pin(0, 4), pin(0, 5), pin(0, 7), sip0, 0x05050505); } else { pout = pin(0, 4); } } } pSharedX[SOFFSET(0, ly+j*SYN_BLOCK_Y, 2)] = pout; } __syncthreads(); //sharedメモリ内でYUV422->YUV420 const int sy = (ly << 1) - (ly & 1); pShared = pSharedX + SOFFSET(0,sy,2); psOut[0] = (Type)(lerp(pShared[SOFFSET(0,0,0)], pShared[SOFFSET(0,2,0)], (ly & 1) ? 0.75f : 0.25f) * (float)(1<<(8*sizeof(Type))) + 0.5f); } __syncthreads(); //s_outに出力したものをメモリに書き出す if (imgx < (src_width >> 1) && imgy < (src_height >> 1)) { *dst = *(Type4 *)(&s_out[ly][lx << 2]); } #undef SHARED_C_Y #undef SHARED_C_XY #undef SOFFSET #undef PREREAD_Y } template<typename Type8, int mode> __inline__ __device__ void set_y_h_pos(const int imgx, const int y_h_center, int height, const int src_pitch, int& y_h1_pos, int& y_h2_pos, int& y_h3_pos, int& y_h4_pos, int& y_h5_pos, int& y_h6_pos, int& y_h7_pos, int& y_h8_pos) { if (mode == 4) { y_h4_pos = y_h_center * src_pitch + imgx * sizeof(Type8); y_h3_pos = y_h4_pos + ((y_h_center - 1 >= 0) ? -src_pitch : src_pitch); y_h2_pos = y_h3_pos + ((y_h_center - 2 >= 0) ? -src_pitch : src_pitch); y_h1_pos = y_h2_pos + ((y_h_center - 3 >= 0) ? -src_pitch : src_pitch); y_h5_pos = y_h4_pos + ((y_h_center < height - 1) ? src_pitch : -src_pitch); y_h6_pos = y_h5_pos + ((y_h_center < height - 2) ? src_pitch : -src_pitch); y_h7_pos = y_h6_pos + ((y_h_center < height - 3) ? src_pitch : -src_pitch); y_h8_pos = y_h7_pos + ((y_h_center < height - 4) ? src_pitch : -src_pitch); } else { y_h2_pos = y_h_center * src_pitch + imgx * sizeof(Type8); y_h1_pos = y_h2_pos + ((y_h_center - 1 >= 0) ? -src_pitch : src_pitch); y_h3_pos = y_h2_pos + ((y_h_center < height - 1) ? src_pitch : -src_pitch); y_h4_pos = y_h3_pos + ((y_h_center < height - 2) ? src_pitch : -src_pitch); } } template<typename Type, typename Type4, typename Type8, int mode> __global__ void kernel_synthesize_mode_1234_yuv420( uint8_t *__restrict__ dstY, uint8_t *__restrict__ dstU, uint8_t *__restrict__ dstV, const uint8_t *__restrict__ p0, const uint8_t *__restrict__ p1, const uint8_t *__restrict__ sip, cudaTextureObject_t src_u0_0, cudaTextureObject_t src_u0_1, cudaTextureObject_t src_u1_0, cudaTextureObject_t src_u1_1, cudaTextureObject_t src_v0_0, cudaTextureObject_t src_v0_1, cudaTextureObject_t src_v1_0, cudaTextureObject_t src_v1_1, const int width, const int height, const int src_pitch_y, const int dst_pitch_y, const int dst_pitch_uv, const int sip_pitch, const int tb_order, const uint8_t status) { const int lx = threadIdx.x; //スレッド数=SYN_BLOCK_INT_X const int ly = threadIdx.y; //スレッド数=SYN_BLOCK_Y const int imgx = blockIdx.x * SYN_BLOCK_INT_X /*blockDim.x*/ + lx; //グローバルスレッドID x const int imgy = blockIdx.y * SYN_BLOCK_Y /*blockDim.y*/ + ly; //グローバルスレッドID y if (imgx * 8 < width && imgy < (height >> 1)) { //y const int y_h_center = imgy << 1; int y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos; set_y_h_pos<Type8, mode>(imgx, y_h_center, height, src_pitch_y, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); uint8_t *dst_y = dstY + y_h_center * dst_pitch_y + imgx * sizeof(Type8); const uint8_t *sip_y = sip + (y_h_center * sip_pitch + imgx * sizeof(uint8_t) * 8); proc_y<Type8, mode>(dst_y + 0, p0, p1, sip_y + 0, tb_order + 0, status, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos); proc_y<Type8, mode>(dst_y + dst_pitch_y, p0, p1, sip_y + sip_pitch, tb_order + 1, status, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); } { //u const int uv_pos_dst = imgy * dst_pitch_uv + imgx * sizeof(Type4); Type4 *dst_u = (Type4 *)(dstU + uv_pos_dst); proc_uv<Type, Type4, mode>(dst_u, src_u0_0, src_u0_1, src_u1_0, src_u1_1, sip, sip_pitch, width, height, imgx, imgy, lx, ly, tb_order, status); //v Type4 *dst_v = (Type4 *)((uint8_t *)dstV + uv_pos_dst); proc_uv<Type, Type4, mode>(dst_v, src_v0_0, src_v0_1, src_v1_0, src_v1_1, sip, sip_pitch, width, height, imgx, imgy, lx, ly, tb_order, status); } } template<typename Type, typename Type4, typename Type8, int mode> __global__ void kernel_synthesize_mode_1234_yuv444( uint8_t *__restrict__ dstY, uint8_t *__restrict__ dstU, uint8_t *__restrict__ dstV, const uint8_t *__restrict__ p0Y, const uint8_t *__restrict__ p0U, const uint8_t *__restrict__ p0V, const uint8_t *__restrict__ p1Y, const uint8_t *__restrict__ p1U, const uint8_t *__restrict__ p1V, const uint8_t *__restrict__ sip, const int width, const int height, const int src_pitch, const int dst_pitch, const int sip_pitch, const int tb_order, const uint8_t status) { const int lx = threadIdx.x; //スレッド数=SYN_BLOCK_INT_X const int ly = threadIdx.y; //スレッド数=SYN_BLOCK_Y const int imgx = blockIdx.x * SYN_BLOCK_INT_X /*blockDim.x*/ + lx; //グローバルスレッドID x const int imgy = blockIdx.y * SYN_BLOCK_Y /*blockDim.y*/ + ly; //グローバルスレッドID y if (imgx * 8 < width && imgy < (height >> 1)) { //y const int y_h_center = imgy << 1; int y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos; set_y_h_pos<Type8, mode>(imgx, y_h_center, height, dst_pitch, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); const int pix_offset = y_h_center * dst_pitch + imgx * sizeof(Type8); uint8_t *dst_y = dstY + pix_offset; uint8_t *dst_u = dstU + pix_offset; uint8_t *dst_v = dstV + pix_offset; const uint8_t *sip_y = sip + (y_h_center * sip_pitch + imgx * sizeof(uint8_t) * 8); proc_y<Type8, mode>(dst_y + 0, p0Y, p1Y, sip_y + 0, tb_order + 0, status, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos); proc_y<Type8, mode>(dst_y + dst_pitch, p0Y, p1Y, sip_y + sip_pitch, tb_order + 1, status, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); proc_y<Type8, mode>(dst_u + 0, p0U, p1U, sip_y + 0, tb_order + 0, status, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos); proc_y<Type8, mode>(dst_u + dst_pitch, p0U, p1U, sip_y + sip_pitch, tb_order + 1, status, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); proc_y<Type8, mode>(dst_v + 0, p0V, p1V, sip_y + 0, tb_order + 0, status, y_h1_pos, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos); proc_y<Type8, mode>(dst_v + dst_pitch, p0V, p1V, sip_y + sip_pitch, tb_order + 1, status, y_h2_pos, y_h3_pos, y_h4_pos, y_h5_pos, y_h6_pos, y_h7_pos, y_h8_pos); } } template<typename Type4, typename Type8, bool yuv420> __global__ void kernel_synthesize_mode_0( uint8_t *__restrict__ dstY, uint8_t *__restrict__ dstU, uint8_t *__restrict__ dstV, const uint8_t *__restrict__ p0Y, const uint8_t *__restrict__ p0U, const uint8_t *__restrict__ p0V, const uint8_t *__restrict__ p1Y, const uint8_t *__restrict__ p1U, const uint8_t *__restrict__ p1V, const int width, const int height, const int dst_pitch_y, const int dst_pitch_uv, const int src_pitch_y, const int src_pitch_uv, const int tb_order, const uint8_t status) { const int lx = threadIdx.x; //スレッド数=SYN_BLOCK_INT_X const int ly = threadIdx.y; //スレッド数=SYN_BLOCK_Y const int imgx = blockIdx.x * SYN_BLOCK_INT_X /*blockDim.x*/ + lx; const int imgy = blockIdx.y * SYN_BLOCK_Y /*blockDim.y*/ + ly; if (imgx * 8 < width) { const uint8_t *srcY = (is_latter_field(ly, tb_order) & (status & AFS_FLAG_SHIFT0)) ? p1Y : p0Y; const uint8_t *srcU = (is_latter_field(ly, tb_order) & (status & AFS_FLAG_SHIFT0)) ? p1U : p0U; const uint8_t *srcV = (is_latter_field(ly, tb_order) & (status & AFS_FLAG_SHIFT0)) ? p1V : p0V; { //y const int y_line = (blockIdx.y * SYN_BLOCK_Y * 2) + ly; Type8 *dst_y = ( Type8 *)(dstY + y_line * dst_pitch_y + imgx * sizeof(Type8)); const Type8 *src_y = (const Type8 *)(srcY + y_line * src_pitch_y + imgx * sizeof(Type8)); Type8 *dst_u, *dst_v; const Type8 *src_u, *src_v; if (y_line < height) { dst_y[0] = src_y[0]; if (!yuv420) { dst_u = (Type8 *)(dstU + y_line * dst_pitch_uv + imgx * sizeof(Type8)); dst_v = (Type8 *)(dstV + y_line * dst_pitch_uv + imgx * sizeof(Type8)); src_u = (const Type8 *)(srcU + y_line * src_pitch_uv + imgx * sizeof(Type8)); src_v = (const Type8 *)(srcV + y_line * src_pitch_uv + imgx * sizeof(Type8)); dst_u[0] = src_u[0]; dst_v[0] = src_v[0]; } } if (y_line + SYN_BLOCK_Y < height) { dst_y = ( Type8 *)(( uint8_t *)dst_y + dst_pitch_y * SYN_BLOCK_Y); src_y = (const Type8 *)((const uint8_t *)src_y + src_pitch_y * SYN_BLOCK_Y); dst_y[0] = src_y[0]; if (!yuv420) { dst_u = ( Type8 *)(( uint8_t *)dst_u + dst_pitch_uv * SYN_BLOCK_Y); dst_v = ( Type8 *)(( uint8_t *)dst_v + dst_pitch_uv * SYN_BLOCK_Y); src_u = (const Type8 *)((const uint8_t *)src_u + src_pitch_uv * SYN_BLOCK_Y); src_v = (const Type8 *)((const uint8_t *)src_v + src_pitch_uv * SYN_BLOCK_Y); dst_u[0] = src_u[0]; dst_v[0] = src_v[0]; } } } if (yuv420 && ((imgy << 1) < height)) { //u const int uv_pos_dst = imgy * dst_pitch_uv + imgx * sizeof(Type4); const int uv_pos_src = imgy * src_pitch_uv + imgx * sizeof(Type4); Type4 *dst_u = (Type4 *)(dstU + uv_pos_dst); Type4 *dst_v = (Type4 *)(dstV + uv_pos_dst); const Type4 *src_u = (const Type4 *)(srcU + uv_pos_src); const Type4 *src_v = (const Type4 *)(srcV + uv_pos_dst); dst_u[0] = src_u[0]; dst_v[0] = src_v[0]; } } } enum { TUNE_COLOR_BLACK = 0, TUNE_COLOR_GREY, TUNE_COLOR_BLUE, TUNE_COLOR_LIGHT_BLUE, }; __device__ __inline__ int synthesize_mode_tune_select_color(const uint8_t sip, const uint8_t status) { int ret = 0; if (status & AFS_FLAG_SHIFT0) { if (!(sip & 0x06)) ret = TUNE_COLOR_LIGHT_BLUE; else if (~sip & 0x02) ret = TUNE_COLOR_GREY; else if (~sip & 0x04) ret = TUNE_COLOR_BLUE; else ret = TUNE_COLOR_BLACK; } else { if (!(sip & 0x05)) ret = TUNE_COLOR_LIGHT_BLUE; else if (~sip & 0x01) ret = TUNE_COLOR_GREY; else if (~sip & 0x04) ret = TUNE_COLOR_BLUE; else ret = TUNE_COLOR_BLACK; } return ret; } template<typename Type, typename Type2, bool yuv420> __global__ void kernel_synthesize_mode_tune( uint8_t *__restrict__ dstY, uint8_t *__restrict__ dstU, uint8_t *__restrict__ dstV, const uint8_t *__restrict__ sip, const int width, const int height, const int dst_pitch_y, const int dst_pitch_uv, const int sip_pitch, const int bit_depth, const int tb_order, const uint8_t status) { const int lx = threadIdx.x; //スレッド数=SYN_BLOCK_INT_X const int ly = threadIdx.y; //スレッド数=SYN_BLOCK_Y const int imgc_x = blockIdx.x * blockDim.x + lx; const int imgc_y = blockIdx.y * blockDim.y + ly; const int imgy_x = imgc_x << 1; const int imgy_y = imgc_y << 1; static const int YUY2_COLOR[4][3] = { { 16, 128, 128 }, { 98, 128, 128 }, { 41, 240, 110 }, { 169, 166, 16 } }; if (imgy_x < width && imgy_y < height) { sip += imgy_y * sip_pitch + imgy_x * sizeof(uint8_t); uint8_t *dst_y = dstY + imgy_y * dst_pitch_y + imgy_x * sizeof(Type); uchar2 sip2 = *(uchar2 *)sip; const int c00 = synthesize_mode_tune_select_color(sip2.x, status); const int c01 = synthesize_mode_tune_select_color(sip2.y, status); sip2 = *(uchar2 *)(sip + sip_pitch); const int c10 = synthesize_mode_tune_select_color(sip2.x, status); const int c11 = synthesize_mode_tune_select_color(sip2.y, status); Type2 dst_y2; dst_y2.x = (Type)(YUY2_COLOR[c00][0] << (bit_depth - 8)); dst_y2.y = (Type)(YUY2_COLOR[c01][0] << (bit_depth - 8)); *(Type2 *)dst_y = dst_y2; dst_y2.x = (Type)(YUY2_COLOR[c10][0] << (bit_depth - 8)); dst_y2.y = (Type)(YUY2_COLOR[c11][0] << (bit_depth - 8)); *(Type2 *)(dst_y + dst_pitch_y) = dst_y2; if (yuv420) { uint8_t *dst_u = dstU + imgc_y * dst_pitch_uv + imgc_x * sizeof(Type); uint8_t *dst_v = dstV + imgc_y * dst_pitch_uv + imgc_x * sizeof(Type); *(Type *)dst_u = (Type)(((YUY2_COLOR[c00][1] + YUY2_COLOR[c01][1] + YUY2_COLOR[c10][1] + YUY2_COLOR[c11][1] + 2) << (bit_depth - 8)) >> 2); *(Type *)dst_v = (Type)(((YUY2_COLOR[c00][2] + YUY2_COLOR[c01][2] + YUY2_COLOR[c10][2] + YUY2_COLOR[c11][2] + 2) << (bit_depth - 8)) >> 2); } else { uint8_t *dst_u = dstU + imgy_y * dst_pitch_uv + imgy_x * sizeof(Type); uint8_t *dst_v = dstV + imgy_y * dst_pitch_uv + imgy_x * sizeof(Type); Type2 dst_u2; dst_u2.x = (Type)(YUY2_COLOR[c00][1] << (bit_depth - 8)); dst_u2.y = (Type)(YUY2_COLOR[c01][1] << (bit_depth - 8)); *(Type2 *)dst_u = dst_u2; dst_u2.x = (Type)(YUY2_COLOR[c10][1] << (bit_depth - 8)); dst_u2.y = (Type)(YUY2_COLOR[c11][1] << (bit_depth - 8)); *(Type2 *)(dst_u + dst_pitch_uv) = dst_u2; Type2 dst_v2; dst_v2.x = (Type)(YUY2_COLOR[c00][2] << (bit_depth - 8)); dst_v2.y = (Type)(YUY2_COLOR[c01][2] << (bit_depth - 8)); *(Type2 *)dst_v = dst_v2; dst_v2.x = (Type)(YUY2_COLOR[c10][2] << (bit_depth - 8)); dst_v2.y = (Type)(YUY2_COLOR[c11][2] << (bit_depth - 8)); *(Type2 *)(dst_v + dst_pitch_uv) = dst_v2; } } } template<typename Type> cudaError_t textureCreateSynthesize(cudaTextureObject_t& tex, cudaTextureFilterMode filterMode, cudaTextureReadMode readMode, uint8_t *ptr, int pitch, int width, int height) { cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = ptr; resDesc.res.pitch2D.pitchInBytes = pitch; resDesc.res.pitch2D.width = width; resDesc.res.pitch2D.height = height; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<Type>(); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.filterMode = filterMode; texDesc.readMode = readMode; texDesc.normalizedCoords = 0; return cudaCreateTextureObject(&tex, &resDesc, &texDesc, nullptr); } template<typename Type, typename Type2, typename Type4, typename Type8, int mode, bool yuv420> cudaError_t run_synthesize(RGYFrameInfo *pFrameOut, const RGYFrameInfo *pFrame0, const RGYFrameInfo *pFrame1, uint8_t *sip, const int sipPitch, const int tb_order, const uint8_t status, const RGY_CSP csp, cudaStream_t stream) { auto cudaerr = cudaSuccess; auto pDstY = getPlane(pFrameOut, RGY_PLANE_Y); auto pDstU = getPlane(pFrameOut, RGY_PLANE_U); auto pDstV = getPlane(pFrameOut, RGY_PLANE_V); const auto p0Y = getPlane(pFrame0, RGY_PLANE_Y); const auto p0U = getPlane(pFrame0, RGY_PLANE_U); const auto p0V = getPlane(pFrame0, RGY_PLANE_V); const auto p1Y = getPlane(pFrame1, RGY_PLANE_Y); const auto p1U = getPlane(pFrame1, RGY_PLANE_U); const auto p1V = getPlane(pFrame1, RGY_PLANE_V); if ( p0Y.width != p1Y.width || p0Y.height != p1Y.height || p0U.width != p1U.width || p0U.height != p1U.height || p0V.width != p1V.width || p0V.height != p1V.height || p0Y.pitch != p1Y.pitch || p0U.pitch != p1U.pitch) { return cudaErrorUnknown; } if ( pDstU.pitch != pDstV.pitch || p0U.pitch != p0V.pitch || p1U.pitch != p1V.pitch) { return cudaErrorUnknown; } if (!yuv420) { if ( pDstY.pitch != pDstU.pitch || p0Y.pitch != p0U.pitch || p1Y.pitch != p1U.pitch) { return cudaErrorUnknown; } } if (mode < 0) { const dim3 blockSize(SYN_BLOCK_INT_X, SYN_BLOCK_Y); const dim3 gridSize(divCeil(pDstY.width, blockSize.x * 2), divCeil(pDstY.height, blockSize.y * 2)); kernel_synthesize_mode_tune<Type, Type2, yuv420><<<gridSize, blockSize, 0, stream>>>( pDstY.ptr, pDstU.ptr, pDstV.ptr, sip, pDstY.width, pDstY.height, pDstY.pitch, pDstU.pitch, sipPitch, RGY_CSP_BIT_DEPTH[csp], tb_order, status); } else if (mode == 0) { const dim3 blockSize(SYN_BLOCK_INT_X, SYN_BLOCK_Y); const dim3 gridSize(divCeil(pDstY.width, blockSize.x * 8), divCeil(pDstY.height, blockSize.y * 2)); kernel_synthesize_mode_0<Type4, Type8, yuv420><<<gridSize, blockSize, 0, stream>>>( pDstY.ptr, pDstU.ptr, pDstV.ptr, p0Y.ptr, p0U.ptr, p0V.ptr, p1Y.ptr, p1U.ptr, p1V.ptr, pDstY.width, pDstY.height, pDstY.pitch, pDstU.pitch, p0Y.pitch, p0U.pitch, tb_order, status); } else { const dim3 blockSize(SYN_BLOCK_INT_X, SYN_BLOCK_Y); const dim3 gridSize(divCeil(pDstY.width, blockSize.x * 8), divCeil(pDstY.height, blockSize.y * 2)); if (yuv420) { cudaTextureObject_t texP0U0, texP0U1, texP0V0, texP0V1, texP1U0, texP1U1, texP1V0,texP1V1; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP0U0, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p0U.ptr + p0U.pitch * 0, p0U.pitch * 2, p0U.width, p0U.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP0U1, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p0U.ptr + p0U.pitch * 1, p0U.pitch * 2, p0U.width, p0U.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP0V0, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p0V.ptr + p0V.pitch * 0, p0V.pitch * 2, p0V.width, p0V.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP0V1, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p0V.ptr + p0V.pitch * 1, p0V.pitch * 2, p0V.width, p0V.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP1U0, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p1U.ptr + p1U.pitch * 0, p1U.pitch * 2, p1U.width, p1U.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP1U1, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p1U.ptr + p1U.pitch * 1, p1U.pitch * 2, p1U.width, p1U.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP1V0, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p1V.ptr + p1V.pitch * 0, p1V.pitch * 2, p1V.width, p1V.height >> 1))) return cudaerr; if (cudaSuccess != (cudaerr = textureCreateSynthesize<Type>(texP1V1, cudaFilterModeLinear, cudaReadModeNormalizedFloat, p1V.ptr + p1V.pitch * 1, p1V.pitch * 2, p1V.width, p1V.height >> 1))) return cudaerr; kernel_synthesize_mode_1234_yuv420<Type, Type4, Type8, mode><<<gridSize, blockSize, 0, stream>>>( pDstY.ptr, pDstU.ptr, pDstV.ptr, p0Y.ptr, p1Y.ptr, sip, texP0U0, texP0U1, texP1U0, texP1U1, texP0V0, texP0V1, texP1V0, texP1V1, p0Y.width, p0Y.height, p0Y.pitch, pDstY.pitch, pDstU.pitch, sipPitch, tb_order, status); cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { return cudaerr; } cudaDestroyTextureObject(texP0U0); cudaDestroyTextureObject(texP0U1); cudaDestroyTextureObject(texP0V0); cudaDestroyTextureObject(texP0V1); cudaDestroyTextureObject(texP1U0); cudaDestroyTextureObject(texP1U1); cudaDestroyTextureObject(texP1V0); cudaDestroyTextureObject(texP1V1); } else { kernel_synthesize_mode_1234_yuv444<Type, Type4, Type8, mode><<<gridSize, blockSize, 0, stream>>>( pDstY.ptr, pDstU.ptr, pDstV.ptr, p0Y.ptr, p0U.ptr, p0V.ptr, p1Y.ptr, p1U.ptr, p1V.ptr, sip, p0Y.width, p0Y.height, p0Y.pitch, pDstY.pitch, sipPitch, tb_order, status); } } return cudaGetLastError(); } cudaError_t NVEncFilterAfs::synthesize(int iframe, CUFrameBuf *pOut, CUFrameBuf *p0, CUFrameBuf *p1, AFS_STRIPE_DATA *sip, const NVEncFilterParamAfs *pAfsPrm, cudaStream_t stream) { struct synthesize_func { decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 3, true>)* func[6]; synthesize_func( decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, -1, true>)* mode_tune, decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 0, true>)* mode_0, decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 1, true>)* mode_1, decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 2, true>)* mode_2, decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 3, true>)* mode_3, decltype(run_synthesize<uint8_t, uchar2, uint32_t, uint2, 4, true>)* mode_4) { func[0] = mode_tune; func[1] = mode_0; func[2] = mode_1; func[3] = mode_2; func[4] = mode_3; func[5] = mode_4; }; }; static const std::map<RGY_CSP, synthesize_func> synthesize_func_list = { { RGY_CSP_YV12, synthesize_func( run_synthesize<uint8_t, uchar2, uint32_t, uint2, -1, true>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 0, true>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 1, true>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 2, true>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 3, true>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 4, true>) }, { RGY_CSP_YV12_16, synthesize_func( run_synthesize<uint16_t, ushort2, uint64_t, uint4, -1, true>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 0, true>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 1, true>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 2, true>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 3, true>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 4, true>) }, { RGY_CSP_YUV444, synthesize_func( run_synthesize<uint8_t, uchar2, uint32_t, uint2, -1, false>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 0, false>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 1, false>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 2, false>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 3, false>, run_synthesize<uint8_t, uchar2, uint32_t, uint2, 4, false>) }, { RGY_CSP_YUV444_16, synthesize_func( run_synthesize<uint16_t, ushort2, uint64_t, uint4, -1, false>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 0, false>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 1, false>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 2, false>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 3, false>, run_synthesize<uint16_t, ushort2, uint64_t, uint4, 4, false>) } }; if (synthesize_func_list.count(pAfsPrm->frameIn.csp) == 0) { AddMessage(RGY_LOG_ERROR, _T("unsupported csp for afs_synthesize: %s\n"), RGY_CSP_NAMES[pAfsPrm->frameIn.csp]); return cudaErrorNotSupported; } int mode = pAfsPrm->afs.analyze; if (pAfsPrm->afs.tune) { mode = -1; } auto cudaerr = synthesize_func_list.at(pAfsPrm->frameIn.csp).func[mode+1]( &pOut->frame, &p0->frame, &p1->frame, sip->map.frame.ptr, sip->map.frame.pitch, pAfsPrm->afs.tb_order, m_status[iframe], pOut->frame.csp, stream); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaSuccess; }
the_stack
#include "ew_op_gpu.h" #include <stdio.h> template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(i); V tan_u = ew_tanh(u); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, const V* __restrict__ Bias, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM4_Forward( T* C_next, T* H_next, const T* __restrict__ C, const T* __restrict__ I, const T* __restrict__ F, const T* __restrict__ O, const T* __restrict__ U, float forget_bias, int size) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, x, b); store(H_next, h_nxt, x, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, const V* __restrict__ Bias, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename A, typename V> __global__ void __launch_bounds__(32) LSTM4_Backward( B* DC, B* DI, B* DF, B* DO, B* DU, const B* __restrict__ EC, const B* __restrict__ EH, const A* __restrict__ C, const A* __restrict__ I, const A* __restrict__ F, const A* __restrict__ O, const A* __restrict__ U, int size, int ec_valid, float forget_bias) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V eh = load(EH, x, b); V ec = load(EC, x, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, x, b); store(DI, dI, x, b); store(DF, dF, x, b); store(DO, dO, x, b); store(DU, dU, x, b); } template <typename T, typename V> bool LSTM_Gates_Forward(CUstream stream, T* c_next, T* h_next, const T* c_prev, const T* h_prev, const float* bias, float forget_bias, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C_prev = (const V*)c_prev; const V* H_prev = (const V*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) LSTM_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C_prev, H_prev, forget_bias, K, K4); else LSTM_Bias_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C_prev, H_prev, Bias, forget_bias, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) LSTM_Forward<T,float><<<grid,32,0,stream>>>(c_next, h_next, c_prev, h_prev, forget_bias, K, K4); else LSTM_Bias_Forward<T,float><<<grid,32,0,stream>>>(c_next, h_next, c_prev, h_prev, bias, forget_bias, K, K4); } return true; } template <typename T, typename V> bool LSTM4_Gates_Forward(CUstream stream, T* c_next, T* h_next, const T* c, const T* i, const T* f, const T* o, const T* u, float forget_bias, int N, int K) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C = (const V*)c; const V* I = (const V*)i; const V* F = (const V*)f; const V* O = (const V*)o; const V* U = (const V*)u; LSTM4_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C, I, F, O, U, forget_bias, size); } else { int grid = (size >> 5) + ((size & 31) != 0); LSTM4_Forward<T,float ><<<grid,32,0,stream>>>(c_next, h_next, c, i, f, o, u, forget_bias, size); } return true; } template <typename B, typename F, typename VB, typename VF> bool LSTM_Gates_Backward(CUstream stream, B* dc, B* dh, const B* ec, const B* eh, const F* c_prev, const F* h_prev, const float* bias, int N, int K, float forget_bias) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); VB* DC = ( VB*)dc; VB* DH = ( VB*)dh; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VF* C_prev = (const VF*)c_prev; const VF* H_prev = (const VF*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) LSTM_Backward<VB,VF,float4><<<grid,32,0,stream>>>(DC, DH, EC, EH, C_prev, H_prev, K, K4, ec != 0, forget_bias); else LSTM_Bias_Backward<VB,VF,float4><<<grid,32,0,stream>>>(DC, DH, EC, EH, C_prev, H_prev, Bias, K, K4, ec != 0, forget_bias); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) LSTM_Backward< B, F,float ><<<grid,32,0,stream>>>(dc, dh, ec, eh, c_prev, h_prev, K, K4, ec != 0, forget_bias); else LSTM_Bias_Backward< B, F,float ><<<grid,32,0,stream>>>(dc, dh, ec, eh, c_prev, h_prev, bias, K, K4, ec != 0, forget_bias); } return true; } template <typename B, typename A, typename VB, typename VA> bool LSTM4_Gates_Backward(CUstream stream, B* dc, B* di, B* df, B* doo, B* du, const B* ec, const B* eh, const A* c, const A* i, const A* f, const A* o, const A* u, int N, int K, float forget_bias) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); VB* DC = ( VB*)dc; VB* DI = ( VB*)di; VB* DF = ( VB*)df; VB* DO = ( VB*)doo; VB* DU = ( VB*)du; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VA* C = (const VA*)c; const VA* I = (const VA*)i; const VA* F = (const VA*)f; const VA* O = (const VA*)o; const VA* U = (const VA*)u; LSTM4_Backward<VB,VA,float4><<<grid,32,0,stream>>>(DC, DI, DF, DO, DU, EC, EH, C, I, F, O, U, size, ec != 0, forget_bias); } else { int grid = (size >> 5) + ((size & 31) != 0); LSTM4_Backward< B, A,float ><<<grid,32,0,stream>>>(dc, di, df, doo, du, ec, eh, c, i, f, o, u, size, ec != 0, forget_bias); } return true; } template bool LSTM_Gates_Forward <float,float4>(CUstream stream, float* c_next, float* h_next, const float* c_prev, const float* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <ehalf,ehalf4>(CUstream stream, ehalf* c_next, ehalf* h_next, const ehalf* c_prev, const ehalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <bhalf,bhalf4>(CUstream stream, bhalf* c_next, bhalf* h_next, const bhalf* c_prev, const bhalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Backward<float,float,float4,float4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const float* c_prev, const float* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(CUstream stream, ehalf* dc, ehalf* dh, const ehalf* ec, const ehalf* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,ehalf,float4,ehalf4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(CUstream stream, bhalf* dc, bhalf* dh, const bhalf* ec, const bhalf* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,bhalf,float4,bhalf4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM4_Gates_Forward <float,float4>(CUstream stream, float* c_next, float* h_next, const float* c, const float* i, const float* f, const float* o, const float* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <ehalf,ehalf4>(CUstream stream, ehalf* c_next, ehalf* h_next, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <bhalf,bhalf4>(CUstream stream, bhalf* c_next, bhalf* h_next, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Backward<float,float,float4,float4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const float* c, const float* i, const float* f, const float* o, const float* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(CUstream stream, ehalf* dc, ehalf* di, ehalf* df, ehalf* doo, ehalf* du, const ehalf* ec, const ehalf* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,ehalf,float4,ehalf4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(CUstream stream, bhalf* dc, bhalf* di, bhalf* df, bhalf* doo, bhalf* du, const bhalf* ec, const bhalf* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,bhalf,float4,bhalf4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); template <typename T, typename V> __global__ void __launch_bounds__(32) Split4( T* Z0, T* Z1, T* Z2, T* Z3, const T* __restrict__ X, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V x0 = load(X, i0, b); V x1 = load(X, i1, b); V x2 = load(X, i2, b); V x3 = load(X, i3, b); store(Z0, x0, z, b); store(Z1, x1, z, b); store(Z2, x2, z, b); store(Z3, x3, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) Concat4( T* DX, const T* __restrict__ DZ0, const T* __restrict__ DZ1, const T* __restrict__ DZ2, const T* __restrict__ DZ3, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V dx0 = load(DZ0, z, b); V dx1 = load(DZ1, z, b); V dx2 = load(DZ2, z, b); V dx3 = load(DZ3, z, b); store(DX, dx0, i0, b); store(DX, dx1, i1, b); store(DX, dx2, i2, b); store(DX, dx3, i3, b); } template <typename T, typename V> bool Split4_Forward(CUstream stream, T* z0, T* z1, T* z2, T* z3, const T* x, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* Z0 = (V*)z0; V* Z1 = (V*)z1; V* Z2 = (V*)z2; V* Z3 = (V*)z3; const V* X = (const V*)x; Split4<V,float4><<<grid,32,0,stream>>>(Z0, Z1, Z2, Z3, X, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); Split4<T,float ><<<grid,32,0,stream>>>(z0, z1, z2, z3, x, K, K4); } return true; } template <typename T, typename V> bool Concat4_Forward(CUstream stream, T* dx, const T* z0, const T* z1, const T* z2, const T* z3, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* DX = (V*)dx; const V* Z0 = (const V*)z0; const V* Z1 = (const V*)z1; const V* Z2 = (const V*)z2; const V* Z3 = (const V*)z3; Concat4<V,float4><<<grid,32,0,stream>>>(DX, Z0, Z1, Z2, Z3, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); Concat4<T,float ><<<grid,32,0,stream>>>(dx, z0, z1, z2, z3, K, K4); } return true; } template bool Split4_Forward <float,float4>(CUstream stream, float* z0, float* z1, float* z2, float* z3, const float* x, int N, int K); template bool Split4_Forward <ehalf,ehalf4>(CUstream stream, ehalf* z0, ehalf* z1, ehalf* z2, ehalf* z3, const ehalf* x, int N, int K); template bool Split4_Forward <bhalf,bhalf4>(CUstream stream, bhalf* z0, bhalf* z1, bhalf* z2, bhalf* z3, const bhalf* x, int N, int K); template bool Concat4_Forward<float,float4>(CUstream stream, float* dx, const float* z0, const float* z1, const float* z2, const float* z3, int N, int K); template bool Concat4_Forward<ehalf,ehalf4>(CUstream stream, ehalf* dx, const ehalf* z0, const ehalf* z1, const ehalf* z2, const ehalf* z3, int N, int K); template bool Concat4_Forward<bhalf,bhalf4>(CUstream stream, bhalf* dx, const bhalf* z0, const bhalf* z1, const bhalf* z2, const bhalf* z3, int N, int K); // mean = mean(x, axis=1) // std = std(x, axis=1) // cutoff = mean + alpha*std // y = fmaxf(x, cutoff) - cutoff; template <typename T, typename V, int THREADS> __global__ void __launch_bounds__(THREADS) sparse_relu_forward( T* Y, const T* __restrict__ X, float alpha, uint K, float rcpK) { int tid = threadIdx.x; int n = blockIdx.x; int offset = n*K + tid; // Mean const T* X1 = X + offset; V v_mean1, v_mean2; ew_zero(v_mean1); ew_zero(v_mean2); #pragma unroll 4 for (int k = tid; k < K; k += THREADS) { V x = load(X1); v_mean1 = ew_add(v_mean1, x); v_mean2 = ew_add(v_mean2, ew_sqr(x)); X1 += THREADS; } float2 mean; mean.x = ew_sum(v_mean1) * rcpK; mean.y = ew_sum(v_mean2) * rcpK; // reduce within warp for (int i = 16; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float2 Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = mean; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions mean = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // outputs final reduction to shared Share[tid] = mean; } __syncthreads(); // broadcast result to all threads mean = Share[0]; } // var = avg(x**2) - avg(x)**2 // std = sqrt(var) float std = sqrtf(precise_sub(mean.y, mean.x*mean.x)); // Norm/Gain/Bias X += offset; Y += offset; for (int k = tid; k < K; k += THREADS) { float cutoff = mean.x + alpha*std; V x = load(X); V y = ew_sub(ew_maximum(x, cutoff), cutoff); store(Y, ew_relu(y), 0, true); X += THREADS; Y += THREADS; } } template <typename T, typename V> bool SparseReluForward(CUstream stream, T* y, const T* x, float alpha, uint K, uint N) { dim3 grid(N, 1, 1); float rcpK = 1.0f / (float)K; if ((K & 3) == 0) { K >>= 2; // use vector loads V* Y = (V*)y; const V* X = (const V*)x; // if (K >= 1024) // sparse_relu_forward<V,float4,1024><<<grid,1024,0,stream>>>(Y, X, alpha, K, rcpK); if (K >= 256) sparse_relu_forward<V,float4, 256><<<grid, 256,0,stream>>>(Y, X, alpha, K, rcpK); else sparse_relu_forward<V,float4, 64><<<grid, 64,0,stream>>>(Y, X, alpha, K, rcpK); } else { // if (K >= 1024) // sparse_relu_forward<T,float ,1024><<<grid,1024,0,stream>>>(y, x, alpha, K, rcpK); if (K >= 256) sparse_relu_forward<T,float , 256><<<grid, 256,0,stream>>>(y, x, alpha, K, rcpK); else sparse_relu_forward<T,float , 64><<<grid, 64,0,stream>>>(y, x, alpha, K, rcpK); } return true; // TODO } template bool SparseReluForward<float,float4>(CUstream stream, float* y, const float* x,float alpha, uint K, uint N); template bool SparseReluForward<ehalf,ehalf4>(CUstream stream, ehalf* y, const ehalf* x,float alpha, uint K, uint N); template bool SparseReluForward<bhalf,bhalf4>(CUstream stream, bhalf* y, const bhalf* x,float alpha, uint K, uint N); #endif
the_stack