text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
template<typename Dtype> __global__ static void _fix_neuron_v1(const int N, const Dtype* src, const Dtype* fragpos, Dtype* dst, int val_max, int keep_scale, int method){ NNDCT_KERNEL_LOOP(index, N){ //method: //1: dummy //2: for CNN feature map //3: for weights and bias //4: for RNN feature map int result_ = 0; Dtype val_amp = pow(2, *fragpos); _fix_neuron_v2_device(src[index], result_, val_max, val_amp, method); if(0 != keep_scale) dst[index] = Dtype(result_) * (1 / val_amp); else dst[index] = result_; } } template<typename Dtype> __global__ static void _fix_neuron_v2(const int N, const Dtype* src, Dtype* dst, int val_max, Dtype val_amp, int keep_scale, int method){ NNDCT_KERNEL_LOOP(index, N){ //method: //1: dummy //2: for CNN feature map //3: for weights and bias //4: for RNN feature map int result_ = 0; _fix_neuron_v2_device(src[index], result_, val_max, val_amp, method); if(0 != keep_scale) dst[index] = Dtype(result_) * (1 / val_amp); else dst[index] = result_; } } template<typename Dtype> void cuda_fix_neuron_v1(const int N, const Dtype* src, const Dtype* fragpos, Dtype* dst, int val_max, int keep_scale, int method){ _fix_neuron_v1<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>( N, src, fragpos, dst, val_max, keep_scale, method); } template void cuda_fix_neuron_v1<float>(const int N, const float* src, const float* fragpos, float* dst, int val_max, int keep_scale, int method); template void cuda_fix_neuron_v1<double>(const int N, const double* src, const double* fragpos, double* dst, int val_max, int keep_scale, int method); template<typename Dtype> void cuda_fix_neuron_v2(const int N, const Dtype* src, Dtype* dst, int val_max, Dtype val_amp, int keep_scale, int method){ _fix_neuron_v2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>( N, src, dst, val_max, val_amp, keep_scale, method); } template void cuda_fix_neuron_v2<float>(const int N, const float* src, float* dst, int val_max, float val_amp, int keep_scale, int method); template void cuda_fix_neuron_v2<double>(const int N, const double* src, double* dst, int val_max, double val_amp, int keep_scale, int method); template<typename Dtype> void cuda_diff_S(const int N, const Dtype* src, Dtype* buffer, Dtype* output, int bitwidth, int range, int method){ // Calc search range for scale int max_scale; Dtype fix_lb = -pow(2, bitwidth - 1) - 0.5; Dtype fix_ub = pow(2, bitwidth - 1) - 0.5; Dtype x_max, x_min; cuda_max(N, src, buffer); cudaMemcpy(&x_max, buffer, sizeof(Dtype), cudaMemcpyDeviceToHost); cuda_min(N, src, buffer); cudaMemcpy(&x_min, buffer, sizeof(Dtype), cudaMemcpyDeviceToHost); // Find max_scale Dtype step = std::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) { max_scale = 18; } else { max_scale = floor(log2(1 / step)); } #if 0 printf( "$$$$$$$$$$$ bw: %d range: %d method: %d\n", bitwidth, range, method ); printf( "$$$$$$$$$$$ max: %g min: %g\n", x_max, x_min ); printf( "$$$$$$$$$$$ overflow scale is %d\n", max_scale ); #endif // Find fix pos in range [max_scale + range , max_scale] Dtype final_scale = max_scale; Dtype fixed_diff_min = FLT_MAX; for (int scale = max_scale; scale < max_scale + range; scale++) { cuda_fix_neuron_v2(N, src, buffer, 1<<(bitwidth-1), Dtype(pow(2, scale)), 1, method); cuda_sub(N, src, buffer); cuda_pow(N, buffer, Dtype(2)); Dtype fixed_diff; cuda_sum_inplace(N, buffer); cudaMemcpy(&fixed_diff, buffer, sizeof(Dtype), cudaMemcpyDeviceToHost); if (fixed_diff < fixed_diff_min) { final_scale = scale; fixed_diff_min = fixed_diff; } } final_scale = final_scale > 12 ? 12: final_scale; cuda_set(1, output, final_scale); #if 0 printf( "$$$$$$$$$$$ diffs scale is %g, setting to %p...\n", final_scale, output ); fflush(stdout); #endif } template void cuda_diff_S<float>(const int N, const float* src, float* buffer, float* output, int bitwidth, int range, int method); template void cuda_diff_S<double>(const int N, const double* src, double* buffer, double* output, int bitwidth, int range, int method); template<typename Dtype> __global__ static void _sigmoid_table_lookup(const int N, const int fragpos, const Dtype scale, const Dtype fuzz, const Dtype* input, const Dtype* table, Dtype* output) { NNDCT_KERNEL_LOOP(i, N){ if (input[i] >= 8.0) output[i] = 1.0 - fuzz; else if (input[i] < -8.0) output[i] = 0.0; else { int x = int(input[i] * scale); int pos = 0; if (x >= 0) { if (fragpos >= 7) pos = (x >> (fragpos - 7)) % 1024; else pos = (x << (7 - fragpos)) % 1024; output[i] = table[pos + 1024] * fuzz; } else { //if (fragpos >= 7) // pos = (abs(x) >> (fragpos - 7)) % 1024; //else // pos = (x << (7 - fragpos)) % 1024; pos = abs(int(floor(x / pow(2.0, (fragpos - 7))))) % 1024; if (x >> fragpos == -8 && pos == 0) output[i] = table[pos] * fuzz; else output[i] = table[1024 - pos] * fuzz; } } } } template<typename Dtype> void cuda_sigmoid_table_lookup(const int N, const Dtype* input, const Dtype* table, Dtype* output, int fragpos) { Dtype scale = pow(2.0, fragpos); Dtype fuzz = 1.0 / 32768; _sigmoid_table_lookup<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>( N, fragpos, scale, fuzz, input, table, output); } template void cuda_sigmoid_table_lookup<float>(const int N, const float* input, const float* table, float* output, int fragpos); template void cuda_sigmoid_table_lookup<double>(const int N, const double* input, const double* table, double* output, int fragpos); template<typename Dtype> __global__ static void _tanh_table_lookup(const int N, const int fragpos, const Dtype scale, const Dtype fuzz, const Dtype* input, const Dtype* table, Dtype* output) { NNDCT_KERNEL_LOOP(i, N){ if (input[i] >= 4.0) output[i] = 1.0 - fuzz; else if (input[i] < -4.0) output[i] = -1.0; else { int x = int(input[i] * scale); int pos = 0; if (x >= 0) { if (fragpos >= 8) pos = (x >> (fragpos - 8)) % 1024; else pos = (x << (8 - fragpos)) % 1024; output[i] = table[pos + 1024] * fuzz; } else { //if (fragpos >= 8) // pos = (abs(x) >> (fragpos - 8)) % 1024; //else // pos = (abs(x) << (8 - fragpos)) % 1024; pos = abs(int(floor(x / pow(2.0, (fragpos - 8))))) % 1024; if (x >> fragpos == -4 && pos == 0) output[i] = table[pos] * fuzz; else output[i] = table[1024 - pos] * fuzz; } } } } template<typename Dtype> void cuda_tanh_table_lookup(const int N, const Dtype* input, const Dtype* table, Dtype* output, int fragpos) { Dtype scale = pow(2.0, fragpos); Dtype fuzz = 1.0 / 32768; _tanh_table_lookup<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>( N, fragpos, scale, fuzz, input, table, output); } template void cuda_tanh_table_lookup<float>(const int N, const float* input, const float* table, float* output, int fragpos); template void cuda_tanh_table_lookup<double>(const int N, const double* input, const double* table, double* output, int fragpos);
the_stack
namespace SparseOperationKit { struct IdenticalHash { using result_type = uint32_t; IdenticalHash() = default; template <typename TKey> static __device__ result_type compute(TKey const &key) { return static_cast<result_type>(key); } }; /*It will dispatcher keys based on key % GPU_NUM */ template <typename KeyType, typename Hasher> __global__ void selectKernel(KeyType const *input_keys, size_t num_keys, KeyType *output_keys, uint32_t *output_indices, size_t chunks, size_t max_chunk_size, uint32_t *chunk_sizes, const size_t ITEMS_PER_GPU_PER_WARP) { // set indices const size_t thread_cnt = blockDim.x * blockDim.y; const size_t stride_size = thread_cnt * gridDim.x; const size_t items_per_warp = chunks * ITEMS_PER_GPU_PER_WARP; const size_t items_per_block = KEY_WARPS_PER_BLOCK * items_per_warp; const size_t gpu_cnt_by_warps_cnt = chunks * KEY_WARPS_PER_BLOCK; int thread_idx = threadIdx.x + blockDim.x * threadIdx.y; // set ptrs in smem extern __shared__ char smem[]; KeyType *key_smem = (KeyType *)smem; uint32_t *idx_smem = (uint32_t *)(key_smem + items_per_block); uint32_t *cnt_smem = idx_smem + items_per_block; if (thread_idx < gpu_cnt_by_warps_cnt) { cnt_smem[thread_idx] = 0; } // if (thread_idx + blockIdx.x * thread_cnt < chunks) { // chunk_sizes[thread_idx] = 0; // } __syncthreads(); // do offset KeyType *curr_warp_key_smem = key_smem + threadIdx.y * items_per_warp; uint32_t *curr_warp_idx_smem = idx_smem + threadIdx.y * items_per_warp; uint32_t *curr_warp_cnt_smem = cnt_smem + threadIdx.y * chunks; uint32_t padded_input_size = (num_keys + warpSize - 1) / warpSize * warpSize; // loop on input_keys for (size_t idx = thread_idx + blockIdx.x * thread_cnt; idx < padded_input_size; idx += stride_size) { KeyType key = 0; size_t chunk_id = 0; uint32_t curr_local_idx = 0; uint32_t offset = 0; uint32_t is_full = 0; if (idx < num_keys) { key = input_keys[idx]; chunk_id = Hasher::compute(key) % chunks; curr_local_idx = atomicAdd(curr_warp_cnt_smem + chunk_id, 1); offset = chunk_id * ITEMS_PER_GPU_PER_WARP + curr_local_idx; curr_warp_key_smem[offset] = key; curr_warp_idx_smem[offset] = idx; } is_full = (curr_local_idx == ITEMS_PER_GPU_PER_WARP - warpSize); uint32_t ballot_val = __ballot_sync(0xffffffff, is_full); // __syncwarp(); int leading_zeros = __clz(ballot_val); while (leading_zeros < warpSize) { uint32_t full_gpu_idx = __shfl_sync(0xffffffff, chunk_id, warpSize - leading_zeros - 1); ballot_val &= (((uint32_t)0xffffffff) >> (leading_zeros + 1)); leading_zeros = __clz(ballot_val); uint32_t curr_global_idx = 0; if (threadIdx.x == 0) { curr_global_idx = atomicAdd(chunk_sizes + full_gpu_idx, curr_warp_cnt_smem[full_gpu_idx]); } curr_global_idx = __shfl_sync(0xffffffff, curr_global_idx, 0); // __syncwarp(); for (size_t output_idx = threadIdx.x; output_idx < curr_warp_cnt_smem[full_gpu_idx]; output_idx += warpSize) { output_keys[full_gpu_idx * max_chunk_size + curr_global_idx + output_idx] = curr_warp_key_smem[full_gpu_idx * ITEMS_PER_GPU_PER_WARP + output_idx]; output_indices[full_gpu_idx * max_chunk_size + curr_global_idx + output_idx] = curr_warp_idx_smem[full_gpu_idx * ITEMS_PER_GPU_PER_WARP + output_idx]; } // __syncwarp(); } __syncwarp(); if (is_full) { curr_warp_cnt_smem[chunk_id] = 0; } __syncwarp(); } // tail for (size_t has_gpu_idx = 0; has_gpu_idx < chunks; ++has_gpu_idx) { uint32_t curr_gpu_items = curr_warp_cnt_smem[has_gpu_idx]; if (curr_gpu_items == 0) { continue; } uint32_t curr_global_idx = 0; if (threadIdx.x == 0) { curr_global_idx = atomicAdd(chunk_sizes + has_gpu_idx, curr_warp_cnt_smem[has_gpu_idx]); } curr_global_idx = __shfl_sync(0xffffffff, curr_global_idx, 0); for (size_t output_idx = threadIdx.x; output_idx < curr_warp_cnt_smem[has_gpu_idx]; output_idx += warpSize) { output_keys[has_gpu_idx * max_chunk_size + curr_global_idx + output_idx] = curr_warp_key_smem[has_gpu_idx * ITEMS_PER_GPU_PER_WARP + output_idx]; output_indices[has_gpu_idx * max_chunk_size + curr_global_idx + output_idx] = curr_warp_idx_smem[has_gpu_idx * ITEMS_PER_GPU_PER_WARP + output_idx]; } __syncwarp(); } } template <typename KeyType, typename ValueType> class All2AllInputDispatcher : public Dispatcher { public: explicit All2AllInputDispatcher(ConstructionContext_t context) : Dispatcher(context), resource_mgr_(base_context()->get_resource_mgr()), num_keys_per_rank_(base_context()->get_replica_batch_size() * base_context()->get_slot_num() * base_context()->get_nnz_per_slot()), ITEMS_PER_GPU_PER_WARP_(0) { const size_t local_gpu_count = resource_mgr_->get_local_gpu_count(); selected_keys_buf_.reserve(local_gpu_count); selected_indices_buf_.reserve(local_gpu_count); num_selected_keys_.reserve(local_gpu_count); num_exchanged_keys_.reserve(local_gpu_count); h_num_selected_keys_.reserve(local_gpu_count); h_num_exchanged_keys_.reserve(local_gpu_count); exchanged_keys_buf_.reserve(local_gpu_count); h_recv_chunk_offsets_.reserve(local_gpu_count); const size_t max_smem_size = resource_mgr_->get_local_gpu(0)->get_max_smem_size_per_sm(); const size_t global_gpu_count = resource_mgr_->get_global_gpu_count(); ITEMS_PER_GPU_PER_WARP_ = max_smem_size - (sizeof(uint32_t) * KEY_WARPS_PER_BLOCK * global_gpu_count); ITEMS_PER_GPU_PER_WARP_ /= (global_gpu_count * KEY_WARPS_PER_BLOCK * (sizeof(KeyType) + sizeof(uint32_t))); ITEMS_PER_GPU_PER_WARP_ = (ITEMS_PER_GPU_PER_WARP_ / 16) * 16; if (ITEMS_PER_GPU_PER_WARP_ <= 33) { MESSAGE("[WARNING]: the performance in this device is not good enough.."); } } void allocate_forward_spaces() override { const size_t local_gpu_count = resource_mgr_->get_local_gpu_count(); const size_t global_gpu_count = resource_mgr_->get_global_gpu_count(); const size_t embedding_vec_size = base_context()->get_param()->get_embedding_vec_size(); for (size_t dev_id = 0; dev_id < local_gpu_count; ++dev_id) { auto &buffer = base_context()->get_buffer(dev_id); auto &host_buffer = base_context()->get_host_buffer(dev_id); { Tensor2<KeyType> tensor; buffer->reserve({global_gpu_count, num_keys_per_rank_}, &tensor); selected_keys_buf_.push_back(tensor); } { Tensor2<uint32_t> tensor; buffer->reserve({global_gpu_count, num_keys_per_rank_}, &tensor); selected_indices_buf_.push_back(tensor); } { Tensor2<uint32_t> tensor; buffer->reserve({global_gpu_count}, &tensor); num_selected_keys_.push_back(tensor); } { Tensor2<uint32_t> tensor; buffer->reserve({global_gpu_count}, &tensor); num_exchanged_keys_.push_back(tensor); } { Tensor2<uint32_t> tensor; host_buffer->reserve({global_gpu_count}, &tensor); h_num_selected_keys_.push_back(tensor); } { Tensor2<uint32_t> tensor; host_buffer->reserve({global_gpu_count}, &tensor); h_num_exchanged_keys_.push_back(tensor); } { Tensor2<KeyType> tensor; buffer->reserve({global_gpu_count, num_keys_per_rank_}, &tensor); exchanged_keys_buf_.push_back(tensor); } { Tensor2<uint32_t> tensor; host_buffer->reserve({global_gpu_count + 1}, &tensor); h_recv_chunk_offsets_.push_back(tensor); } } // for dev_id in local_gpu_count } void allocate_backward_spaces() override {} void forward(const Context_t &replica_context, const bool training) override { const size_t global_gpu_count = resource_mgr_->get_global_gpu_count(); const size_t global_replica_id = replica_context->get_global_replica_id(); const size_t local_replica_id = resource_mgr_->cal_local_id_from_global_id(global_replica_id); const auto &local_gpu = resource_mgr_->get_local_gpu(local_replica_id); // step 1: reset count spaces. CK_CUDA(cudaMemsetAsync(num_selected_keys_[local_replica_id].get_ptr(), 0, num_selected_keys_[local_replica_id].get_size_in_bytes(), local_gpu->get_stream())); std::memset(h_recv_chunk_offsets_[local_replica_id].get_ptr(), 0, h_recv_chunk_offsets_[local_replica_id].get_size_in_bytes()); // step 2: select keys for each GPU (rank) const auto &input_keys = replica_context->input("replica_values"); { const size_t smem_size = local_gpu->get_max_smem_size_per_sm(); CK_CUDA(cudaFuncSetAttribute(selectKernel<KeyType, IdenticalHash>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); size_t const grid_dim = local_gpu->get_sm_count(); dim3 const block_dim(local_gpu->get_warp_size(), KEY_WARPS_PER_BLOCK); selectKernel<KeyType, IdenticalHash> <<<grid_dim, block_dim, smem_size, local_gpu->get_stream()>>>( /*input_keys=*/input_keys->GetPtrWithType<KeyType>(), /*num_keys=*/input_keys->get_num_elements(), /*output_keys=*/selected_keys_buf_[local_replica_id].get_ptr(), /*output_indices=*/selected_indices_buf_[local_replica_id].get_ptr(), /*chunks=*/global_gpu_count, /*max_chunk_size=*/num_keys_per_rank_, /*chunk_sizes=*/num_selected_keys_[local_replica_id].get_ptr(), /*ITEMS_PER_GPU_PER_WARP=*/ITEMS_PER_GPU_PER_WARP_); CK_CUDA(cudaGetLastError()); } // step 3: exchange selected keys count among all GPUs CK_NCCL(ncclGroupStart()); for (size_t dev_id = 0; dev_id < global_gpu_count; dev_id++) { CK_NCCL(ncclSend(num_selected_keys_[local_replica_id].get_ptr() + dev_id, 1, ncclUint32, /*peer=*/dev_id, local_gpu->get_nccl(), local_gpu->get_stream())); CK_NCCL(ncclRecv(num_exchanged_keys_[local_replica_id].get_ptr() + dev_id, 1, ncclUint32, /*peer=*/dev_id, local_gpu->get_nccl(), local_gpu->get_stream())); } // for dev_id in global_gpu_count CK_NCCL(ncclGroupEnd()); // step 4: copy count from GPU to CPU and calculate count offsets CK_CUDA(cudaMemcpyAsync(h_num_selected_keys_[local_replica_id].get_ptr(), num_selected_keys_[local_replica_id].get_ptr(), num_selected_keys_[local_replica_id].get_size_in_bytes(), cudaMemcpyDeviceToHost, local_gpu->get_stream())); CK_CUDA(cudaMemcpyAsync(h_num_exchanged_keys_[local_replica_id].get_ptr(), num_exchanged_keys_[local_replica_id].get_ptr(), num_exchanged_keys_[local_replica_id].get_size_in_bytes(), cudaMemcpyDeviceToHost, local_gpu->get_stream())); CK_CUDA(cudaStreamSynchronize(local_gpu->get_stream())); for (size_t dev_id = 0; dev_id < global_gpu_count; dev_id++) { h_recv_chunk_offsets_[local_replica_id].get_ptr()[dev_id + 1] = h_recv_chunk_offsets_[local_replica_id].get_ptr()[dev_id] + h_num_exchanged_keys_[local_replica_id].get_ptr()[dev_id]; } // for dev_id in global_gpu_count // step 5: exchange selected keys among all GPUs CK_NCCL(ncclGroupStart()); for (size_t dev_id = 0; dev_id < global_gpu_count; dev_id++) { CK_NCCL(ncclSend(selected_keys_buf_[local_replica_id].get_ptr() + dev_id * num_keys_per_rank_, h_num_selected_keys_[local_replica_id].get_ptr()[dev_id], GetNCCLType<KeyType>(), /*peer=*/dev_id, local_gpu->get_nccl(), local_gpu->get_stream())); CK_NCCL(ncclRecv(exchanged_keys_buf_[local_replica_id].get_ptr() + h_recv_chunk_offsets_[local_replica_id].get_ptr()[dev_id], h_num_exchanged_keys_[local_replica_id].get_ptr()[dev_id], GetNCCLType<KeyType>(), /*peer=*/dev_id, local_gpu->get_nccl(), local_gpu->get_stream())); } // for dev_id in global_gpu_count CK_NCCL(ncclGroupEnd()); // set output of this dispatcher replica_context->set_output("replica_exchanged_keys", exchanged_keys_buf_[local_replica_id]); replica_context->set_output("replica_h_recv_chunk_offsets", h_recv_chunk_offsets_[local_replica_id]); replica_context->set_output("replica_h_num_exchanged_keys", h_num_exchanged_keys_[local_replica_id]); replica_context->set_output("replica_h_num_selected_keys", h_num_selected_keys_[local_replica_id]); replica_context->set_output("replica_num_selected_keys", num_selected_keys_[local_replica_id]); replica_context->set_output("replica_selected_indices_buf", selected_indices_buf_[local_replica_id]); } void backward(const Context_t &replica_context) override {} private: const std::shared_ptr<ResourcesManager> resource_mgr_; const size_t num_keys_per_rank_; size_t ITEMS_PER_GPU_PER_WARP_; // forward spaces Tensors2<KeyType> selected_keys_buf_; Tensors2<uint32_t> selected_indices_buf_; Tensors2<uint32_t> num_selected_keys_; Tensors2<uint32_t> num_exchanged_keys_; Tensors2<uint32_t> h_num_selected_keys_; Tensors2<uint32_t> h_num_exchanged_keys_; Tensors2<KeyType> exchanged_keys_buf_; Tensors2<uint32_t> h_recv_chunk_offsets_; }; REGISTER_INPUT_DISPATCHER_BUILDER("All2AllInput", DataType::Int64, DataType::Float32, All2AllInputDispatcher<int64_t, float>); REGISTER_INPUT_DISPATCHER_BUILDER("All2AllInput", DataType::Int64, DataType::Float16, All2AllInputDispatcher<int64_t, __half>); REGISTER_INPUT_DISPATCHER_BUILDER("All2AllInput", DataType::Uint32, DataType::Float32, All2AllInputDispatcher<uint32_t, float>); REGISTER_INPUT_DISPATCHER_BUILDER("All2AllInput", DataType::Uint32, DataType::Float16, All2AllInputDispatcher<uint32_t, __half>); } // namespace SparseOperationKit
the_stack
#include <nbla/cuda/function/kernel/sync_batch_normalization.cuh> namespace nbla { static bool can_use_int_as_index_t(const Size_t size0, const Size_t size1, const Size_t size2) { return size0 * size1 * size2 < std::numeric_limits<int>::max(); } template <typename T> void forward_collect_statistics(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *local_mean, Variable *local_invstd, const float epsilon, Context &ctx) { using input_scalar_t = T; using stat_accscalar_t = typename CudaTypeForceFloat<T>::type; const auto *x_ptr = x->get_data_pointer<input_scalar_t>(ctx); auto *local_mean_ptr = local_mean->cast_data_and_get_pointer<stat_accscalar_t>(ctx); auto *local_invstd_ptr = local_invstd->cast_data_and_get_pointer<stat_accscalar_t>(ctx); dim3 blocks(size1); int tf = getNumThreads(size2); dim3 threads(tf, SYNC_BN_MAX_BLOCK_SIZE / tf); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_collect_statistics_kernel< InvStd, input_scalar_t, stat_accscalar_t, index_t><<<blocks, threads>>>( x_ptr, epsilon, local_mean_ptr, local_invstd_ptr, size0, size1, size2); } else { using index_t = Size_t; batch_norm_collect_statistics_kernel< InvStd, input_scalar_t, stat_accscalar_t, index_t><<<blocks, threads>>>( x_ptr, epsilon, local_mean_ptr, local_invstd_ptr, size0, size1, size2); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void forward_collect_statistics_channels_last( const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *local_mean, Variable *local_invstd, Variable *staging_data, Variable *semaphores, const float epsilon, Context &ctx) { using scalar_t = T; using accscalar_t = typename CudaTypeForceFloat<T>::type; const Size_t reduction_size = size0; const Size_t stride = size1; dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); if (grid.y > 1) { staging_data->reshape({4 * stride * grid.y}, true); semaphores->reshape({grid.x}, true); } const auto *x_ptr = x->get_data_pointer<scalar_t>(ctx); auto *local_mean_ptr = local_mean->cast_data_and_get_pointer<accscalar_t>(ctx); auto *local_invstd_ptr = local_invstd->cast_data_and_get_pointer<accscalar_t>(ctx); auto *staging_data_ptr = grid.y > 1 ? staging_data->cast_data_and_get_pointer<accscalar_t>(ctx) : nullptr; int *semaphores_ptr = grid.y > 1 ? semaphores->cast_data_and_get_pointer<int>(ctx) : nullptr; if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_collect_statistics_channels_last_kernel< InvStd, scalar_t, accscalar_t, index_t, SYNC_BN_ELEMENTS_PER_ITER><<<grid, block>>>( x_ptr, local_mean_ptr, local_invstd_ptr, staging_data_ptr, semaphores_ptr, reduction_size, stride, epsilon); } else { using index_t = Size_t; batch_norm_collect_statistics_channels_last_kernel< InvStd, scalar_t, accscalar_t, index_t, SYNC_BN_ELEMENTS_PER_ITER><<<grid, block>>>( x_ptr, local_mean_ptr, local_invstd_ptr, staging_data_ptr, semaphores_ptr, reduction_size, stride, epsilon); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void forward_reduce_statistics(const Size_t size0, const Size_t size1, const Size_t size2, Variable *all_mean, Variable *all_invstd, Variable *all_count, Variable *global_mean, Variable *global_var, Variable *r_mean, Variable *r_var, const float epsilon, const float decay_rate, Context &ctx, const int n_workers) { using scalar_t = T; using accscalar_t = typename CudaTypeForceFloat<T>::type; const accscalar_t *all_mean_ptr = all_mean->get_data_pointer<accscalar_t>(ctx); const accscalar_t *all_invstd_ptr = all_invstd->get_data_pointer<accscalar_t>(ctx); auto *global_mean_ptr = global_mean->cast_data_and_get_pointer<accscalar_t>(ctx); auto *global_var_ptr = global_var->cast_data_and_get_pointer<accscalar_t>(ctx); auto *r_mean_ptr = r_mean ? r_mean->cast_data_and_get_pointer<scalar_t>(ctx) : nullptr; auto *r_var_ptr = r_var ? r_var->cast_data_and_get_pointer<scalar_t>(ctx) : nullptr; const auto *all_count_ptr = all_count->get_data_pointer<scalar_t>(ctx); const int feature_size = size1; int block = getNumThreads(feature_size); int grid = std::max<int>(1, feature_size / block); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_reduce_statistics_kernel<scalar_t, accscalar_t, index_t><<<grid, block>>>( all_mean_ptr, all_invstd_ptr, global_mean_ptr, global_var_ptr, r_mean_ptr, r_var_ptr, epsilon, decay_rate, all_count_ptr, feature_size, n_workers); } else { using index_t = Size_t; batch_norm_reduce_statistics_kernel<scalar_t, accscalar_t, index_t><<<grid, block>>>( all_mean_ptr, all_invstd_ptr, global_mean_ptr, global_var_ptr, r_mean_ptr, r_var_ptr, epsilon, decay_rate, all_count_ptr, feature_size, n_workers); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void forward_normalization(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *y, Variable *global_mean, Variable *global_var, Variable *beta, Variable *gamma, const float epsilon, Context &ctx) { using input_scalar_t = T; using stat_scalar_t = T; using stat_accscalar_t = typename CudaTypeForceFloat<T>::type; const auto *x_ptr = x->get_data_pointer<input_scalar_t>(ctx); auto *y_ptr = y->cast_data_and_get_pointer<input_scalar_t>(ctx); const auto *global_mean_ptr = global_mean->get_data_pointer<stat_accscalar_t>(ctx); const auto *global_var_ptr = global_var->get_data_pointer<stat_accscalar_t>(ctx); const auto *weight_ptr = gamma->get_data_pointer<stat_scalar_t>(ctx); const auto *bias_ptr = beta->get_data_pointer<stat_scalar_t>(ctx); // (Following comments are quoted from PyTorch.) // The input_transform kernel is pointwise, but we need to balance reading // parameters (save_var/mean, weight/bias) - which we only do once and have a // for loop afterwards - with having many threads and blocks and good // occupancy. Quiet likely, we could go with even more blocks than 1024. // The various planes are independent, so we use blocks for them. const Size_t tf = std::max<int>(getNumThreads(size2 / 4), std::min<int>(getNumThreads(size1), 64)); const Size_t tb = std::max<int>(64 / tf, 1); dim3 blocks_trans(size1, std::max<int>(1, std::min<int>((256 * 1024) / size1, (size0 + tb - 1) / tb))); blocks_trans.y = std::min(blocks_trans.y, SYNC_BN_MAX_GRID_SIZE); dim3 threads_trans(tf, tb); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_transform_input_kernel<input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<blocks_trans, threads_trans>>>( x_ptr, y_ptr, global_mean_ptr, global_var_ptr, weight_ptr, bias_ptr, epsilon, size0, size1, size2); } else { using index_t = Size_t; batch_norm_transform_input_kernel<input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<blocks_trans, threads_trans>>>( x_ptr, y_ptr, global_mean_ptr, global_var_ptr, weight_ptr, bias_ptr, epsilon, size0, size1, size2); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void forward_normalization_channel_last(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *global_mean, Variable *global_var, Variable *beta, Variable *gamma, Variable *y, const float epsilon, Context &ctx) { using scalar_t = T; using layerscalar_t = T; using accscalar_t = typename CudaTypeForceFloat<T>::type; const Size_t reduction_size = size0; const Size_t stride = size1; const scalar_t *x_ptr = x->get_data_pointer<scalar_t>(ctx); const accscalar_t *global_mean_ptr = global_mean->get_data_pointer<accscalar_t>(ctx); const accscalar_t *global_var_ptr = global_var->get_data_pointer<accscalar_t>(ctx); const layerscalar_t *gamma_ptr = gamma->get_data_pointer<layerscalar_t>(ctx); const layerscalar_t *beta_ptr = beta->get_data_pointer<layerscalar_t>(ctx); scalar_t *y_ptr = y->cast_data_and_get_pointer<scalar_t>(ctx); dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_transform_input_channels_last_kernel< scalar_t, accscalar_t, layerscalar_t, index_t, SYNC_BN_ELEMENTS_PER_ITER><<<grid, block>>>( x_ptr, global_mean_ptr, global_var_ptr, gamma_ptr, beta_ptr, y_ptr, epsilon, reduction_size, stride); } else { using index_t = Size_t; batch_norm_transform_input_channels_last_kernel< scalar_t, accscalar_t, layerscalar_t, index_t, SYNC_BN_ELEMENTS_PER_ITER><<<grid, block>>>( x_ptr, global_mean_ptr, global_var_ptr, gamma_ptr, beta_ptr, y_ptr, epsilon, reduction_size, stride); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void backward_reduce(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *y, Variable *global_mean, Variable *global_var, Variable *sum_dy, Variable *sum_dy_xmu, Variable *beta, Variable *gamma, const float epsilon, Context &ctx) { using input_scalar_t = T; using stat_scalar_t = T; using stat_accscalar_t = typename CudaTypeForceFloat<T>::type; const auto *x_ptr = x->get_data_pointer<input_scalar_t>(ctx); const auto *dy_ptr = y->get_grad_pointer<input_scalar_t>(ctx); const auto *global_mean_ptr = global_mean->get_data_pointer<stat_accscalar_t>(ctx); const auto *global_var_ptr = global_var->get_data_pointer<stat_accscalar_t>(ctx); auto *sum_dy_ptr = sum_dy->cast_data_and_get_pointer<stat_accscalar_t>(ctx, true); auto *sum_dy_xmu_ptr = sum_dy_xmu->cast_data_and_get_pointer<stat_accscalar_t>(ctx, true); auto *grad_weight_ptr = gamma->cast_data_and_get_pointer<stat_scalar_t>(ctx, true); auto *grad_bias_ptr = beta->cast_data_and_get_pointer<stat_scalar_t>(ctx, true); auto batch_size = size0; auto n_input = size1; auto feature_size = size2; int block_y = std::min<int>(lastPow2(batch_size), SYNC_BN_MAX_BLOCK_SIZE / CUDA_WARP_SIZE); // We want block_x to be at least a warp width int block_x = std::min<int>(std::max<int>(getNumThreads(feature_size), CUDA_WARP_SIZE), SYNC_BN_MAX_BLOCK_SIZE / block_y); const dim3 grid(n_input); const dim3 block(block_x, block_y); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_backward_reduce_kernel<input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<grid, block>>>( x_ptr, dy_ptr, global_mean_ptr, global_var_ptr, sum_dy_ptr, sum_dy_xmu_ptr, grad_weight_ptr, grad_bias_ptr, epsilon, size0, size1, size2); } else { using index_t = Size_t; batch_norm_backward_reduce_kernel<input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<grid, block>>>( x_ptr, dy_ptr, global_mean_ptr, global_var_ptr, sum_dy_ptr, sum_dy_xmu_ptr, grad_weight_ptr, grad_bias_ptr, epsilon, size0, size1, size2); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T> void backward_reduce_channels_last(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *y, Variable *batch_mean, Variable *batch_var, Variable *sum_dy_o, Variable *sum_dy_xmu_o, Variable *beta, Variable *gamma, Variable *staging_data, Variable *semaphores, const float epsilon, Context &ctx) { using scalar_t = T; using layerscalar_t = T; using accscalar_t = typename CudaTypeForceFloat<T>::type; const Size_t reduction_size = size0; const Size_t stride = size1; dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); if (grid.y > 1) { staging_data->reshape({2 * stride * grid.y}, true); semaphores->reshape({grid.x}, true); } const auto *x_ptr = x->get_data_pointer<scalar_t>(ctx); const auto *dy_ptr = y->get_grad_pointer<scalar_t>(ctx); const auto *mean_ptr = batch_mean->get_data_pointer<accscalar_t>(ctx); const auto *var_ptr = batch_var->get_data_pointer<accscalar_t>(ctx); auto *sum_dy_o_ptr = sum_dy_o->cast_data_and_get_pointer<accscalar_t>(ctx); auto *sum_dy_xmu_o_ptr = sum_dy_xmu_o->cast_data_and_get_pointer<accscalar_t>(ctx); auto *grad_weight_ptr = gamma->cast_data_and_get_pointer<layerscalar_t>(ctx); auto *grad_bias_ptr = beta->cast_data_and_get_pointer<layerscalar_t>(ctx); auto *staging_data_ptr = staging_data->cast_data_and_get_pointer<accscalar_t>(ctx); int *semaphores_ptr = semaphores->cast_data_and_get_pointer<int>(ctx); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_backward_reduce_channels_last_kernel< SYNC_BN_ELEMENTS_PER_ITER, scalar_t, accscalar_t, layerscalar_t, index_t><<<grid, block>>>( x_ptr, dy_ptr, mean_ptr, var_ptr, sum_dy_o_ptr, sum_dy_xmu_o_ptr, grad_weight_ptr, grad_bias_ptr, staging_data_ptr, semaphores_ptr, reduction_size, stride, epsilon); } else { using index_t = Size_t; batch_norm_backward_reduce_channels_last_kernel< SYNC_BN_ELEMENTS_PER_ITER, scalar_t, accscalar_t, layerscalar_t, index_t><<<grid, block>>>( x_ptr, dy_ptr, mean_ptr, var_ptr, sum_dy_o_ptr, sum_dy_xmu_o_ptr, grad_weight_ptr, grad_bias_ptr, staging_data_ptr, semaphores_ptr, reduction_size, stride, epsilon); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T, bool accum> void backward_dx_post(const Size_t size0, const Size_t size1, const Size_t size2, Variable *x, Variable *y, Variable *global_mean, Variable *global_var, Variable *sum_dy, Variable *sum_dy_xmu, Variable *gamma, Variable *all_count, const bool output_stat, const float epsilon, Context &ctx) { using input_scalar_t = T; using stat_scalar_t = T; using stat_accscalar_t = typename CudaTypeForceFloat<T>::type; const auto *x_ptr = x->get_data_pointer<input_scalar_t>(ctx); const auto *dy_ptr = y->get_grad_pointer<input_scalar_t>(ctx); const auto *global_mean_ptr = global_mean->get_data_pointer<stat_accscalar_t>(ctx); const auto *global_var_ptr = global_var->get_data_pointer<stat_accscalar_t>(ctx); const auto *dmean_ptr = output_stat ? global_mean->get_grad_pointer<stat_accscalar_t>(ctx) : nullptr; const auto *dvar_ptr = output_stat ? global_var->get_grad_pointer<stat_accscalar_t>(ctx) : nullptr; const auto *weight_ptr = gamma->get_data_pointer<stat_scalar_t>(ctx); const auto *sum_dy_ptr = sum_dy->get_data_pointer<stat_accscalar_t>(ctx); const auto *sum_dy_xmu_ptr = sum_dy_xmu->get_data_pointer<stat_accscalar_t>(ctx); auto *dx_ptr = x->cast_grad_and_get_pointer<input_scalar_t>(ctx, false); const int *all_count_ptr = all_count->get_data_pointer<int>(ctx); const Size_t all_count_numel = all_count->size(); const Size_t tf = std::max<int>(getNumThreads(size2 / 4), std::min<int>(getNumThreads(size2), 64)); const Size_t tb = std::max<int>(64 / tf, 1); dim3 blocks_trans(size1, std::max<int>(1, std::min<int>((256 * 1024) / size1, (size0 + tb - 1) / tb))); blocks_trans.y = std::min(blocks_trans.y, SYNC_BN_MAX_GRID_SIZE); dim3 threads_trans(tf, tb); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_backward_elemt_kernel<accum, input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<blocks_trans, threads_trans>>>( x_ptr, dy_ptr, global_mean_ptr, global_var_ptr, dmean_ptr, dvar_ptr, weight_ptr, sum_dy_ptr, sum_dy_xmu_ptr, dx_ptr, epsilon, all_count_ptr, all_count_numel, size0, size1, size2); } else { using index_t = Size_t; batch_norm_backward_elemt_kernel<accum, input_scalar_t, stat_scalar_t, stat_accscalar_t, index_t><<<blocks_trans, threads_trans>>>( x_ptr, dy_ptr, global_mean_ptr, global_var_ptr, dmean_ptr, dvar_ptr, weight_ptr, sum_dy_ptr, sum_dy_xmu_ptr, dx_ptr, epsilon, all_count_ptr, all_count_numel, size0, size1, size2); } NBLA_CUDA_KERNEL_CHECK(); } template <typename T, bool accum> void backward_dx_post_channels_last(const Size_t size0, const Size_t size1, const Size_t size2, Variable *y, Variable *x, Variable *batch_mean, Variable *batch_var, Variable *gamma, Variable *sum_dy_o, Variable *sum_dy_xmu_o, Variable *count, const bool output_stat, const float epsilon, Context &ctx) { using scalar_t = T; using layerscalar_t = T; using accscalar_t = typename CudaTypeForceFloat<T>::type; const Size_t reduction_size = size0; const Size_t stride = size1; dim3 block; dim3 grid; flexible_launch_configs(reduction_size, stride, block, grid, true); const auto *dy_ptr = y->get_grad_pointer<scalar_t>(ctx); const auto *x_ptr = x->get_data_pointer<scalar_t>(ctx); const auto *mean_ptr = batch_mean->get_data_pointer<accscalar_t>(ctx); const auto *var_ptr = batch_var->get_data_pointer<accscalar_t>(ctx); const auto *dmean_ptr = output_stat ? batch_mean->get_grad_pointer<accscalar_t>(ctx) : nullptr; const auto *dvar_ptr = output_stat ? batch_var->get_grad_pointer<accscalar_t>(ctx) : nullptr; const auto *weight_ptr = gamma->get_data_pointer<layerscalar_t>(ctx); const auto *sum_dy_ptr = sum_dy_o->get_data_pointer<accscalar_t>(ctx); const auto *sum_dy_xmu_ptr = sum_dy_xmu_o->get_data_pointer<accscalar_t>(ctx); const auto *numel_ptr = count->get_data_pointer<int>(ctx); auto *dx_ptr = x->cast_grad_and_get_pointer<scalar_t>(ctx); const Size_t world_size = count->size(); if (can_use_int_as_index_t(size0, size1, size2)) { using index_t = int; batch_norm_backward_elemt_channels_last_kernel< SYNC_BN_ELEMENTS_PER_ITER, accum, scalar_t, accscalar_t, layerscalar_t, index_t><<<grid, block>>>(dy_ptr, x_ptr, mean_ptr, var_ptr, dmean_ptr, dvar_ptr, weight_ptr, sum_dy_ptr, sum_dy_xmu_ptr, numel_ptr, dx_ptr, world_size, reduction_size, stride, epsilon); } else { using index_t = Size_t; batch_norm_backward_elemt_channels_last_kernel< SYNC_BN_ELEMENTS_PER_ITER, accum, scalar_t, accscalar_t, layerscalar_t, index_t><<<grid, block>>>(dy_ptr, x_ptr, mean_ptr, var_ptr, dmean_ptr, dvar_ptr, weight_ptr, sum_dy_ptr, sum_dy_xmu_ptr, numel_ptr, dx_ptr, world_size, reduction_size, stride, epsilon); } NBLA_CUDA_KERNEL_CHECK(); } }
the_stack
* Test evaluation for caching allocator of device memory ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>]" "[--bytes=<timing bytes>]" "[--i=<timing iterations>]" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Get number of GPUs and current GPU int num_gpus; int initial_gpu; int timing_iterations = 10000; int timing_bytes = 1024 * 1024; if (CubDebug(cudaGetDeviceCount(&num_gpus))) exit(1); if (CubDebug(cudaGetDevice(&initial_gpu))) exit(1); args.GetCmdLineArgument("i", timing_iterations); args.GetCmdLineArgument("bytes", timing_bytes); // Create default allocator (caches up to 6MB in device allocations per GPU) CachingDeviceAllocator allocator; allocator.debug = true; printf("Running single-gpu tests...\n"); fflush(stdout); // // Test0 // // Create a new stream cudaStream_t other_stream; CubDebugExit(cudaStreamCreate(&other_stream)); // Allocate 999 bytes on the current gpu in stream0 char *d_999B_stream0_a; char *d_999B_stream0_b; CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); // Run some big kernel in stream 0 EmptyKernel<void><<<32000, 512, 1024 * 8, 0>>>(); // Free d_999B_stream0_a CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); // Allocate another 999 bytes in stream 0 CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); // Check that that we have 1 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 1); // Check that that we have no cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 0); // Run some big kernel in stream 0 EmptyKernel<void><<<32000, 512, 1024 * 8, 0>>>(); // Free d_999B_stream0_b CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); // Allocate 999 bytes on the current gpu in other_stream char *d_999B_stream_other_a; char *d_999B_stream_other_b; allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream); // Check that that we have 1 live blocks on the initial GPU (that we allocated a new one because d_999B_stream0_b is only available for stream 0 until it becomes idle) AssertEquals(allocator.live_blocks.size(), 1); // Check that that we have one cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 1); // Run some big kernel in other_stream EmptyKernel<void><<<32000, 512, 1024 * 8, other_stream>>>(); // Free d_999B_stream_other CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a)); // Check that we can now use both allocations in stream 0 after synchronizing the device CubDebugExit(cudaDeviceSynchronize()); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); // Check that that we have 2 live blocks on the initial GPU AssertEquals(allocator.live_blocks.size(), 2); // Check that that we have no cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 0); // Free d_999B_stream0_a and d_999B_stream0_b CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); // Check that we can now use both allocations in other_stream CubDebugExit(cudaDeviceSynchronize()); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream)); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_b, 999, other_stream)); // Check that that we have 2 live blocks on the initial GPU AssertEquals(allocator.live_blocks.size(), 2); // Check that that we have no cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 0); // Run some big kernel in other_stream EmptyKernel<void><<<32000, 512, 1024 * 8, other_stream>>>(); // Free d_999B_stream_other_a and d_999B_stream_other_b CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a)); CubDebugExit(allocator.DeviceFree(d_999B_stream_other_b)); // Check that we can now use both allocations in stream 0 after synchronizing the device and destroying the other stream CubDebugExit(cudaDeviceSynchronize()); CubDebugExit(cudaStreamDestroy(other_stream)); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); // Check that that we have 2 live blocks on the initial GPU AssertEquals(allocator.live_blocks.size(), 2); // Check that that we have no cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 0); // Free d_999B_stream0_a and d_999B_stream0_b CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); // Free all cached CubDebugExit(allocator.FreeAllCached()); // // Test1 // // Allocate 5 bytes on the current gpu char *d_5B; CubDebugExit(allocator.DeviceAllocate((void **) &d_5B, 5)); // Check that that we have zero free bytes cached on the initial GPU AssertEquals(allocator.cached_bytes[initial_gpu].free, 0); // Check that that we have 1 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 1); // // Test2 // // Allocate 4096 bytes on the current gpu char *d_4096B; CubDebugExit(allocator.DeviceAllocate((void **) &d_4096B, 4096)); // Check that that we have 2 live blocks on the initial GPU AssertEquals(allocator.live_blocks.size(), 2); // // Test3 // // DeviceFree d_5B CubDebugExit(allocator.DeviceFree(d_5B)); // Check that that we have min_bin_bytes free bytes cached on the initial gpu AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); // Check that that we have 1 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 1); // Check that that we have 1 cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 1); // // Test4 // // DeviceFree d_4096B CubDebugExit(allocator.DeviceFree(d_4096B)); // Check that that we have the 4096 + min_bin free bytes cached on the initial gpu AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes + 4096); // Check that that we have 0 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 0); // Check that that we have 2 cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 2); // // Test5 // // Allocate 768 bytes on the current gpu char *d_768B; CubDebugExit(allocator.DeviceAllocate((void **) &d_768B, 768)); // Check that that we have the min_bin free bytes cached on the initial gpu (4096 was reused) AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); // Check that that we have 1 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 1); // Check that that we have 1 cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 1); // // Test6 // // Allocate max_cached_bytes on the current gpu char *d_max_cached; CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached, allocator.max_cached_bytes)); // DeviceFree d_max_cached CubDebugExit(allocator.DeviceFree(d_max_cached)); // Check that that we have the min_bin free bytes cached on the initial gpu (max cached was not returned because we went over) AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); // Check that that we have 1 live block on the initial GPU AssertEquals(allocator.live_blocks.size(), 1); // Check that that we still have 1 cached block on the initial GPU AssertEquals(allocator.cached_blocks.size(), 1); // // Test7 // // Free all cached blocks on all GPUs CubDebugExit(allocator.FreeAllCached()); // Check that that we have 0 bytes cached on the initial GPU AssertEquals(allocator.cached_bytes[initial_gpu].free, 0); // Check that that we have 0 cached blocks across all GPUs AssertEquals(allocator.cached_blocks.size(), 0); // Check that that still we have 1 live block across all GPUs AssertEquals(allocator.live_blocks.size(), 1); // // Test8 // // Allocate max cached bytes + 1 on the current gpu char *d_max_cached_plus; CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached_plus, allocator.max_cached_bytes + 1)); // DeviceFree max cached bytes CubDebugExit(allocator.DeviceFree(d_max_cached_plus)); // DeviceFree d_768B CubDebugExit(allocator.DeviceFree(d_768B)); unsigned int power; size_t rounded_bytes; allocator.NearestPowerOf(power, rounded_bytes, allocator.bin_growth, 768); // Check that that we have 4096 free bytes cached on the initial gpu AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes); // Check that that we have 1 cached blocks across all GPUs AssertEquals(allocator.cached_blocks.size(), 1); // Check that that still we have 0 live block across all GPUs AssertEquals(allocator.live_blocks.size(), 0); #ifndef CUB_CDP // BUG: find out why these tests fail when one GPU is CDP compliant and the other is not if (num_gpus > 1) { printf("\nRunning multi-gpu tests...\n"); fflush(stdout); // // Test9 // // Allocate 768 bytes on the next gpu int next_gpu = (initial_gpu + 1) % num_gpus; char *d_768B_2; CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768)); // DeviceFree d_768B on the next gpu CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2)); // Re-allocate 768 bytes on the next gpu CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768)); // Re-free d_768B on the next gpu CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2)); // Check that that we have 4096 free bytes cached on the initial gpu AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes); // Check that that we have 4096 free bytes cached on the second gpu AssertEquals(allocator.cached_bytes[next_gpu].free, rounded_bytes); // Check that that we have 2 cached blocks across all GPUs AssertEquals(allocator.cached_blocks.size(), 2); // Check that that still we have 0 live block across all GPUs AssertEquals(allocator.live_blocks.size(), 0); } #endif // CUB_CDP // // Performance // printf("\nCPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes); fflush(stdout); fflush(stderr); // CPU performance comparisons vs cached. Allocate and free a 1MB block 2000 times CpuTimer cpu_timer; char *d_1024MB = NULL; allocator.debug = false; // Prime the caching allocator and the kernel CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); CubDebugExit(allocator.DeviceFree(d_1024MB)); cub::EmptyKernel<void><<<1, 32>>>(); // CUDA cpu_timer.Start(); for (int i = 0; i < timing_iterations; ++i) { CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes)); CubDebugExit(cudaFree(d_1024MB)); } cpu_timer.Stop(); float cuda_malloc_elapsed_millis = cpu_timer.ElapsedMillis(); // CUB cpu_timer.Start(); for (int i = 0; i < timing_iterations; ++i) { CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); CubDebugExit(allocator.DeviceFree(d_1024MB)); } cpu_timer.Stop(); float cub_calloc_elapsed_millis = cpu_timer.ElapsedMillis(); printf("\t CUB CachingDeviceAllocator allocation CPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n", cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis, cuda_malloc_elapsed_millis / timing_iterations, cub_calloc_elapsed_millis / timing_iterations); // GPU performance comparisons. Allocate and free a 1MB block 2000 times GpuTimer gpu_timer; printf("\nGPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes); fflush(stdout); fflush(stderr); // Kernel-only gpu_timer.Start(); for (int i = 0; i < timing_iterations; ++i) { cub::EmptyKernel<void><<<1, 32>>>(); } gpu_timer.Stop(); float cuda_empty_elapsed_millis = gpu_timer.ElapsedMillis(); // CUDA gpu_timer.Start(); for (int i = 0; i < timing_iterations; ++i) { CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes)); cub::EmptyKernel<void><<<1, 32>>>(); CubDebugExit(cudaFree(d_1024MB)); } gpu_timer.Stop(); cuda_malloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis; // CUB gpu_timer.Start(); for (int i = 0; i < timing_iterations; ++i) { CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); cub::EmptyKernel<void><<<1, 32>>>(); CubDebugExit(allocator.DeviceFree(d_1024MB)); } gpu_timer.Stop(); cub_calloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis; printf("\t CUB CachingDeviceAllocator allocation GPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n", cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis, cuda_malloc_elapsed_millis / timing_iterations, cub_calloc_elapsed_millis / timing_iterations); printf("Success\n"); return 0; }
the_stack
#include <Environment.h> #include <loops/transform.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> template <typename T> __device__ void transformGeneric( int opNum, Nd4jLong n, T *dy, Nd4jLong incy, T *params, T *result, Nd4jLong resultStride, int *allocationPointer, T *reductionPointer) { functions::transform::Transform<T>::transformCuda( opNum, n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, nullptr); } template <typename T, typename OpClass> __device__ void transformSimpleGeneric( Nd4jLong n, T *dy, Nd4jLong incy, T *params, T *result, Nd4jLong resultStride, int *allocationPointer, T *reductionPointer) { functions::transform::Transform<T>::template transformCuda<OpClass>( n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, nullptr); } template <typename T> __device__ void transformGeneric( int opNum, T *dy, Nd4jLong *xShapeInfo, int xRank, T *params, T *result,Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer) { functions::transform::Transform<T>::transformCuda( opNum, dy, xShapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, nullptr); } template <typename T, typename OpClass> __device__ void transformSimpleGeneric( T *dy, Nd4jLong *xShapeInfo, int xRank, T *params, T *result, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::transform::Transform<T>::template transformCuda<OpClass>( dy, xShapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); } // transform strided DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float, INPUT(Nd4jLong n, float *x, Nd4jLong xStride, float *extraParams, float *z, Nd4jLong zStride, int *allocationPointer, float *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, double, INPUT(Nd4jLong n, double *x, Nd4jLong xStride, double *extraParams, double *z, Nd4jLong zStride, int *allocationPointer, double *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float16, INPUT(Nd4jLong n, float16 *x, Nd4jLong xStride, float16 *extraParams, float16 *z, Nd4jLong zStride, int *allocationPointer, float16 *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // transform shaped DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float, INPUT(float *x, Nd4jLong *xShape, int xRank, float *extraParams, float *z, Nd4jLong *zShape, int zRank, int *allocationPointer, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, double, INPUT(double *x, Nd4jLong *xShape, int xRank, double *extraParams, double *z, Nd4jLong *zShape, int zRank, int *allocationPointer, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float16, INPUT(float16 *x, Nd4jLong *xShape, int xRank, float16 *extraParams, float16 *z, Nd4jLong *zShape, int zRank, int *allocationPointer, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) namespace functions { namespace transform { template <> _CUDA_H void Transform<float>::executeTransformStrided(dim3 launchDims, cudaStream_t *stream, int opNum, Nd4jLong n, float *x, Nd4jLong xStride, float *extraParams, float *z, Nd4jLong zStride, int *allocationPointer, float *reductionPointer) { DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); }; template <> _CUDA_H void Transform<double>::executeTransformStrided(dim3 launchDims, cudaStream_t *stream, int opNum, Nd4jLong n, double *x, Nd4jLong xStride, double *extraParams, double *z, Nd4jLong zStride, int *allocationPointer, double *reductionPointer) { DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); }; template <> _CUDA_H void Transform<float16>::executeTransformStrided(dim3 launchDims, cudaStream_t *stream, int opNum, Nd4jLong n, float16 *x, Nd4jLong xStride, float16 *extraParams, float16 *z, Nd4jLong zStride, int *allocationPointer, float16 *reductionPointer) { DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); }; template <> _CUDA_H void Transform<float>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, float *x, Nd4jLong *xShape, int xRank, float *extraParams, float *z, Nd4jLong *zShape, int zRank, int *allocationPointer, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_SIMPLE(transformShaped, float, PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void Transform<float16>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, float16 *x, Nd4jLong *xShape, int xRank, float16 *extraParams, float16 *z, Nd4jLong *zShape, int zRank, int *allocationPointer, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_SIMPLE(transformShaped, float16, PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) if (nd4j::Environment::getInstance()->isDebug()) checkCudaErrors(cudaStreamSynchronize(*stream)); } template <> _CUDA_H void Transform<double>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, double *x, Nd4jLong *xShape, int xRank, double *extraParams, double *z, Nd4jLong *zShape, int zRank, int *allocationPointer, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_SIMPLE(transformShaped, double, PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template <typename T> template <typename OpType> __device__ void Transform<T>::transformCuda( T *dy, Nd4jLong *shapeInfo, T *params, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if(OpType::requiresSpecial) { OpType::execSpecialCuda(dy,shapeInfo,result,resultShapeInfo,params, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); return; } else { auto xShape = shape::shapeOf(shapeInfo); auto xStride = shape::stride(shapeInfo); auto xOrder = shape::order(shapeInfo); auto resultOrder = shape::order(resultShapeInfo); auto xRank = shape::rank(shapeInfo); auto xElementWiseStride = shape::elementWiseStride(shapeInfo); auto resultElementWiseStride = shape::elementWiseStride(resultShapeInfo); auto tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ Nd4jLong length; if(threadIdx.x == 0) length = shape::length(shapeInfo); __syncthreads(); if(xElementWiseStride >= 1 && resultElementWiseStride >= 1 && xOrder == resultOrder) { transformCuda<OpType>( length, dy, xElementWiseStride, params, result, resultElementWiseStride, allocationPointer, reductionPointer, manager); } else { Nd4jLong xCoord[MAX_RANK]; for (Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { shape::ind2sub(xRank,shape::shapeOf(shapeInfo),i, xCoord); auto xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto resultOffset2 = shape::getOffset(0,xShape,shape::stride(resultShapeInfo),xCoord,xRank); result[resultOffset2] = OpType::op(dy[xOffset2], params); } } } }; template <typename T> template <typename OpType> __device__ void Transform<T>::transformCuda( Nd4jLong n, T *dy, Nd4jLong incy, T *params, T *result, Nd4jLong resultStride, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { int totalThreads = gridDim.x * blockDim.x; Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; if(incy == 1 && resultStride == 1) { /* equal, positive, non-unit increments. */ for (; i < n; i += totalThreads) { result[i] = OpType::op(dy[i], params); } } else { for (; i < n; i += totalThreads) { result[i * resultStride] = OpType::op(dy[i * incy], params); } } } template <typename T> __device__ void Transform<T>::transformCuda( const int opNum, T *dy, Nd4jLong *shapeInfo, T *params, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(dy, shapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets), TRANSFORM_OPS); } template <typename T> __device__ void Transform<T>::transformCuda( const int opNum, Nd4jLong n, T *dy, Nd4jLong incy, T *params, T *result, Nd4jLong resultStride, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, manager), TRANSFORM_OPS); } //template class ND4J_EXPORT Transform<float>; //template class ND4J_EXPORT Transform<float16>; //template class ND4J_EXPORT Transform<double>; BUILD_CALL_1(template __device__ void Transform<float>::transformCuda, float, (float*, Nd4jLong*, float*, float*,Nd4jLong*, int*,float*, UnifiedSharedMemory*, Nd4jLong*, Nd4jLong*), TRANSFORM_OPS) BUILD_CALL_1(template __device__ void Transform<float16>::transformCuda, float16, (float16*, Nd4jLong*, float16*, float16*,Nd4jLong*, int*, float16*, UnifiedSharedMemory*, Nd4jLong*, Nd4jLong*), TRANSFORM_OPS) BUILD_CALL_1(template __device__ void Transform<double>::transformCuda, double, (double*, Nd4jLong*, double*, double*,Nd4jLong*, int*, double*, UnifiedSharedMemory*, Nd4jLong*, Nd4jLong*), TRANSFORM_OPS) } }
the_stack
// CurveTracing // 实现的曲线跟踪 #include "CurveTracing.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; // 宏:CURVE_VALUE(曲线最大数目) // 设置图像能获得的曲线最大数目 #define CURVE_VALUE 1000 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数:_traverseKer(并行遍历图像得到端点数组和交点数组,并且得到去掉 // 交点后的输出图像) // 遍历图像,得到曲线的所有端点坐标和交点坐标,并且得到去掉交点后的输出图像, // 对每个像素点取其周围八领域像素点,如果八领域像素点的个数为 1,则这个为端点, // 若八领域像素点的个数为大于等于 3,则认为这个点作为伪交点,存储起来,这些伪 // 交点中有部分是真正的交点,后面计算需要从一堆伪交点中得到真正的交点。 static __global__ void // Kernel 函数无返回值 _traverseKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 去掉交点后的输出图像 int *array1_dev, // 存储端点的数组 int *array2_dev, // 存储交点的数组 Template boxtpl // 3 * 3 领域模板 ); // Kernel 函数:_traverseKerNew(遍历图像,得到图像上所有的像素点) // 遍历图像,保存图像上所有灰度值不为 0 的像素点,主要用于 CPU 串行代码中第二次 // 遍历的实现 static __global__ void // Kernel 函数无返回值 _traverseKerNew( ImageCuda inimg, // 输入图像 int *array1_dev // 存储端点的数组 ); // Host 函数:traverse(遍历图像,得到端点坐标、交点坐标及去掉交点后的图像) // 遍历图像,得到曲线的所有端点坐标和交点坐标,并且得到去掉交点后的输出图像, // 对每个像素点取其周围八领域像素点,如果八领域像素点的个数为 1,则这个为端点, // 若八领域像素点的个数为大于等于 3,则认为这个点作为伪交点,存储起来,这些伪 // 交点中有部分是真正的交点,后面计算需要从一堆伪交点中得到真正的交点。主要是 // 用于 CPU 串行代码的实现中处理 static __host__ void // 无返回值 traverse( DynamicArrays &Vertex, // 存储端点的动态数组 DynamicArrays &Intersect, // 存储伪交点的动态数组 Image *inimg, // 输入图像 Image *outimg, // 输出图像 int *tpl // 八领域模板 ); // Host 函数:traverseNew(遍历图像,得到图像上所有的像素点) // 遍历图像,保存图像上所有灰度值不为 0 的像素点,主要用于 CPU 串行代码中第二次 // 遍历的实现 static __host__ void // 无返回值 traverseNew( DynamicArrays &array, //存储点的坐标 Image *inimg // 输入图像 ); // Host 函数:getCurve(得到去掉交点后的所有曲线段) // 递归调用函数,得到去掉交点后的所有曲线段,并且这些曲线段都是非闭合曲线 static __host__ void // 无返回值 getCurve( DynamicArrays *pcurve, // 存储所有提取到的非闭合曲线 int &test, // 检测某端点开始的曲线是否已提取过 int count, // 曲线条数 Image *img, // 输入图像,待提取曲线的图像 int *mark, // 标志数组,大小为图像大小,表示像素点是否 // 访问,初始都为 0,如果访问则对应位置设为 1 int *tpl, // 八领域模板 int Vx, // 提取的曲线起点 x 坐标 int Vy // 提取的曲线起点 y 坐标 ); // Host 函数:insectClassify(从得到的一堆交点中,进行分类,确定交点个数) // 递归调用函数,实现原先得到的一堆交点进行分类,每一类是一部分点集,并且同一类 // 的点集是连通的,这些点集中可以找到一个合适的交点,同时根据分类的结果可以得到 // 交点的个数,有多少类就有多少交点 static __host__ void // 无返回值 insectClassify( int x, // 点的 x 坐标 int y, // 点的 y 坐标 DynamicArrays &Intersect, // 存储交点的动态数组 DynamicArrays *insect, // 存储分类的结果 int sectnum, // 交点个数,即分类的类数 int *tpl // 八领域模板 ); // Host 函数:makeCur(根据两点坐标得到一条曲线) // 根据两点坐标得到一条曲线,两个点的连线方式为从第一个点开始,先从对角线往 // 第二个点移动,如果第二个点的 x 或者 y 坐标的值与对角线 45° 移动的对应坐标值 // 一样,则沿着 x 或者 y 坐标移动直到重合,从而得到一条简短曲线 static __host__ void // 无返回值 makeCur( DynamicArrays &cur, // 存储得到的曲线 int dx1, // 曲线第一个点的 x 坐标 int dy1, // 曲线第一个点的 y 坐标 int dx2, // 曲线第一个点的 x 坐标 int dy2 // 曲线第一个点的 y 坐标 ); // Host 函数:interAssemble(交点曲线与原先得到的曲线进行重组,得到重组后曲线) // 根据得到的交点扩散出的曲线和原先得到的非闭合曲线进行重组,得到重组后曲线, // 以便之后的曲线还原 static __host__ void // 无返回值 interAssemble( DynamicArrays *pcurve, // 非闭合曲线集 int count, // 曲线条数 DynamicArrays *insect, // 交点分类的结果 int sectnum, // 交点曲线条数 DynamicArrays realsect, // 真正的交点数组 int *tpl // 八领域模板 ); // Host 函数:bpConnect(根据用户输入的半径得到近域点集) // 根据用户输入的半径得到近域点集,并且更新端点动态数组 static __host__ void bpConnect( DynamicArrays *pcurve, // 输入的曲线集 int count, // 输入的曲线集条数 int radius, // 半径大小参数 DynamicArrays *psect, // 得到新增加的近域点集 int *pcount, // 得到新增加的交点个数 DynamicArrays &Vertex // 存储端点的动态数组 ); // Host 函数:AidNorm(判断两个端点之间距离是否在领域大小内,若在则添加到点集中) // 判断两个端点之间距离是否在领域大小内,如果在领域半径大小内,则把找到的端点加 // 入到新增加的近域点集 static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false AidNorm( DynamicArrays *pcurve, // 输入的曲线集 int i, // 从编号为 i 的曲线往后搜索 int count, // 输入的曲线集条数 DynamicArrays *psect, // 新增加的近域点集 int pcount, // 新增加的交点个数 int radius, // 半径大小参数 DynamicArrays &Vertex, // 存储端点的动态数组 int x, int y // 曲线的端点坐标 ); // Host 函数:pcurveAcord(根据坐标得到曲线的编号) // 根据曲线的端点坐标查找曲线的编号,遍历所有曲线的端点,查找是否存在和给定的坐 // 标相等的点,则得到相应的返回结果。 static __host__ int // 返回值,如果找到的是曲线首部返回 0, // 如果找到的是曲线尾部则返回 1,否则返回 -1。 pcurveAcord( DynamicArrays *pcurve, // 输入的曲线集 int count, // 输入的曲线集条数 int &location, // 得到曲线的编号 int x, int y // 端点坐标 ); // Host 函数:verAssemble(断点重组,根据近域点集重组曲线集) // 根据近域点集重组曲线集,根据近域的每个集合里的那些点,进行计算,得到其中 // 最合适的点作为中心点,这个中心点也即是一个新产生的交点,然后发散出去多条曲线, // 把这些曲线更新到原来的曲线集中。更抽象成层的含义,断点的重组,根据用户输入的 // 半径进行曲线端点组合,如果两个端点离得太近就变成一段连续的曲线, // 达到的端点的连接性。 static __host__ void verAssemble( DynamicArrays *pcurve, // 曲线集 int count, // 曲线集的条数 DynamicArrays *psect, // 近域点集 int pcount, // 近域交点个数 DynamicArrays &realsect // 更新交点集合 ); // Host 函数:IsFindPoint(判断坐标是不是坐标集动态数组里的点) static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false IsFindPoint( DynamicArrays &array, // 判断该坐标是不是动态数组里的点集 int x, int y // 坐标 ); // Host 函数:makeNode(根据曲线的起点和终点,以及边的情况,得到曲线的编号) // 根据曲线的起点和终点,以及边的情况,从而得到曲线的编号,并且编号的起点和终点 // 是唯一的,也不会和边的编号重复,为之后构图提供方便 static __host__ void // 无返回值 makeNode( DynamicArrays *pcurve, // 输入的曲线集 int count, // 曲线的条数 DynamicArrays *pcurno // 存储曲线的编号 ); // Host 函数:openCurvePath(得到非闭合曲线编号序列) // 根据图的搜索得到非闭合曲线对应的编号序列,用于得到从 start 到 end 的所有路径 static __host__ void // 无返回值 openCurvePath( DynamicArrays *opencurnode, // 存储非闭合曲线编号集 int *openNum, // 得到非闭合曲线的条数 Graph *G, // 曲线构建的图 int start, // 搜索的起点 int end // 搜索的终点 ); // Host 函数:closeCurvePath(得到闭合曲线编号序列) // 根据图的搜索得到闭合曲线对应的编号序列,用于得到从 start 到 end 的所有路径 static __host__ void // 无返回值 closeCurvePath( DynamicArrays *closecurnode, // 存储闭合曲线编号集 int *closeNum, // 得到闭合曲线的条数 Graph *G, // 曲线构建的图 int insect // 搜索的起点,闭合曲线起点和终点一样 ); // Host 函数:IsArrayEqual(判断两个动态数组表示的曲线是否表示同一条曲线) // 判断两个动态数组表示的曲线是否表示同一条曲线,首先得到的是曲线编号,且不会 // 出现编号顺序一致的数组,可能会出现数量和编号一样但是顺序不一样的数组,排序后 // 比较结果,主要用于闭合曲线的提取,由于闭合曲线头尾编号一样,排序比较的时候 // 不算最后编号数 static __host__ bool // 返回值为 bool 型,如果表示相同就返回 true, // 否则返回 false IsArrayEqual( DynamicArrays object1, // 动态数组1 DynamicArrays object2 // 动态数组2 ); // Host 函数:getPointNo(根据坐标对得到数组内对应编号) // 通过得到的曲线序列,及首尾编号,得到点坐标对的数组对应编号 static __host__ void // 无返回值 getPointNo( DynamicArrays *pcurve, // 提取的曲线序列 int count, // 曲线数目 DynamicArrays *pcurno, // 与曲线序列相对应的首尾编号 DynamicArrays &array, // 点坐标对数组 DynamicArrays &arrayno // 存储得到的对应编号 ); // Host 函数:getCurveNonFormat(得到非格式化输出曲线数据有序序列) // 通过曲线编号集合和首尾编号集得到非格式化输出曲线数据有序序列 static __host__ void // 无返回值 getCurveNonFormat( DynamicArrays *curnode, // 曲线编号集 DynamicArrays *pcurve, // 提取的曲线序列 int count, // 提取的曲线序列的数量 DynamicArrays *pcurno, // 与曲线序列相对应的首尾编号 DynamicArrays *cur, // 最终得到的曲线非格式输出数据 int num, // 曲线的数量 bool close = false // 标志闭合还是非闭合曲线,默认为非闭合 ); // Host 函数:traverse(遍历图像,得到端点坐标、交点坐标及去掉交点后的图像) static __host__ void traverse(DynamicArrays &Vertex, DynamicArrays &Intersect, Image *inimg, Image *outimg,int *tpl) { // 定义临时变量,用于循环 int i, j, k; // 定义临时变量,存储八领域的值 int dx, dy; // 对每一个像素值不为 0 的像素点进行八领域处理 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 如果该像素点为 0 则扫描下一个像素点 if (inimg->imgData[i * inimg->width + j] == 0) { outimg->imgData[i * inimg->width + j] = 0; continue; } // 定义变量并且初始化为 0,用于取八领域下标 int m = 0; // 定义变量并且初始化为 0,用于得到八领域内有多少个像素值不为 0 的点 int flag = 0; for(k = 0; k < 8; k++) { dx = j + tpl[m++]; dy = i + tpl[m++]; // 符合要求的八领域内的点的像素值如果不为 0,就累加到 flag 中 if (dx >= 0 && dx < inimg->width && dy >= 0 && dy < inimg->height) { if (inimg->imgData[dy * inimg->width + dx] != 0) { flag++; } } } // 如果 flag 为 0,表示该像素八领域没有不为 0 的像素点,则该点是 // 孤立点,则给对应输出图像在该处赋值为 0 if (flag == 0) { outimg->imgData[i * inimg->width + j] = 0; // 如果 flag 为 1,表示该像素八领域有一个不为 0 的像素点,则该点是 // 曲线端点,并给对应输出图像在该处赋值原图像对应点像素值 } else if (flag == 1) { Vertex.addElem(j); Vertex.addElem(i); outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; // 如果 flag 大于等于 3,表示该像素点作为曲线交点,则给对应输出图像 // 在该处赋值为 0 } else if (flag >= 3) { Intersect.addElem(j); Intersect.addElem(i); outimg->imgData[i * inimg->width + j] = 0; // 否则flag则为 2,表示该像素点作为曲线上的点,并给对应输出图像在该处 // 赋值原图像对应点像素值 } else { outimg->imgData[i * inimg->width + j] = inimg->imgData[i * inimg->width + j]; } } } } // Host 函数:traverseNew(遍历图像,得到图像上所有的像素点) static __host__ void traverseNew(DynamicArrays &array, Image *inimg) { // 定义临时变量,用于循环 int i, j; // 对每一个像素值不为 0 的像素点进行八领域处理 for (i = 0; i < inimg->height; i++) { for(j = 0; j < inimg->width; j++) { // 如果该像素点不为 0 则保存 if (inimg->imgData[i * inimg->width + j] != 0) { // 得到所有灰度值不为 0 的像素点 array.addElem(j); array.addElem(i); } } } } // Host 函数:getCurve(得到去掉交点后的所有曲线段) static __host__ void getCurve(DynamicArrays *pcurve, int &test, int count, Image *img, int *mark, int *tpl, int Vx, int Vy) { // 标志点是否已经访问过,如果访问过,test 加 1,并且退出,主要是判断该端点 // 是否和另一个端点是同一条曲线,如果是就不需要再重复提取 if (mark[Vy * img->width + Vx] == 1) { test++; return; } // 定义临时变量,存储八领域的值 int dx, dy; int j = 0; // 定义变量,用于循环 // 定义标志,表示八领域是否还有像素值不为 0 的点 int flag = 0; // 把该点的坐标值加入第 count 条曲线中,并且设置标志该点已经访问过 pcurve[count].addElem(Vx); pcurve[count].addElem(Vy); mark[Vy * img->width + Vx] = 1; // 按顺时针访问八领域的像素点 for(int i = 0; i < 8; i++) { dx = Vx + tpl[j++]; dy = Vy + tpl[j++]; // 得到第一个不为 0 并且没有访问过的像素点就退出循环,并且标志 flag 为 1 if (img->imgData[dy * img->width + dx] != 0 && mark[dy * img->width + dx] != 1) { flag = 1; break; } } // 如果 flag 为 1,说明找到了一个曲线上的点,以该点递归调用函数 if (flag == 1) { getCurve(pcurve, test, count, img, mark, tpl, dx, dy); } // 如果找不到了,说明已经全部搜索完,退出 return; } // Host 函数:insectClassify(从得到的一堆交点中,进行分类,确定交点个数) static __host__ void insectClassify(int x, int y, DynamicArrays &Intersect, DynamicArrays *insect, int sectnum, int *tpl) { // 把 x,y 坐标加入交点曲线中 insect[sectnum - 1].addElem(x); insect[sectnum - 1].addElem(y); // 加入完后就删除交点数组中的 x,y 坐标 Intersect.delElem(x, y); // if (Intersect.getSize() == 0) return; // 定义临时变量,存储八领域的坐标点 int dx, dy; for(int i = 0; i < 16; i += 2) { dx = x + tpl[i]; dy = y + tpl[i + 1]; // 寻找到交点中是否有和八领域一样的坐标点,若有,则递归调用函数 for(int j = 0; j < Intersect.getSize(); j += 2) { if (dx == Intersect[j] && dy == Intersect[j + 1]) { insectClassify(dx, dy, Intersect, insect, sectnum, tpl); } } } // 返回 return; } // Host 函数:makeCur(根据两点坐标得到一条曲线) static __host__ void makeCur(DynamicArrays &cur, int dx1, int dy1, int dx2, int dy2) { // 定义临时变量,存储坐标值 int x, y; // 首先把起始点加入临时曲线中 cur.addElem(dx1); cur.addElem(dy1); // 如果两坐标值一样,则返回,无须后续步骤 if (dx1 == dx2 && dy1 == dy2) return; // 分别计算两坐标值的差 int m = dx1 - dx2, n = dy1 - dy2; // 设置起始点 x = dx1; y = dy1; // 通过差值开始给交点曲线赋值,首先通过差值相对可以分成四个象限,第一、 // 第二、第三、第四象限,并且以第一个点为中心开始。 // 如果 m >= 0 并且 n >= 0,则表示第二个点相对第一个点在第一象限或者坐标轴 if (m >= 0 && n >= 0) { // 计算坐标差值的差 int d = m - n; // 根据差值的差给交点曲线赋值 if (d >= 0) { for (int c = 0; c < n; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // 如果 m >= 0 并且 n < 0,则表示第二个点相对第一个点在第四象限或者坐标轴 } else if (m >= 0 && n < 0) { n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x--; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x--; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } // 如果 m < 0 并且 n >= 0,则表示第二个点相对第一个点在第二象限或者坐标轴 } else if (m < 0 && n >= 0) { m = -m; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y--; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y--; cur.addElem(x); cur.addElem(y); } } // 否则 m < 0 并且 n < 0,则表示第二个点相对第一个点在第三象限 } else { m = -m; n = -n; int d = m - n; if (d >= 0) { for (int c = 0; c < n; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < d; c++) { x++; cur.addElem(x); cur.addElem(y); } } else { for (int c = 0; c < m; c++) { x++; y++; cur.addElem(x); cur.addElem(y); } for (int c = 0; c < -d; c++) { y++; cur.addElem(x); cur.addElem(y); } } } } // Host 函数:interAssemble(交点曲线与原先得到的曲线进行重组,得到重组后曲线) static __host__ void interAssemble(DynamicArrays *pcurve, int count, DynamicArrays *insect, int sectnum, DynamicArrays realsect, int *tpl) { // 如果没有交点则直接返回 if (realsect.getSize() == 0) return; // 定义临时变量 int i, j, k, x1, y1, x2, y2, dx1, dy1, dx2, dy2, num, num2; int mark1, mark2, flag1, flag2; // 对每一条得到的曲线,先得到其首尾端点,进行八领域寻找交点曲线的尾端点, // 如果找到就把交点曲线添加到原曲线中,实现交点曲线与原先得到的曲线重组 for(i = 0; i < count; i++) { // 初始化首尾都没有找到交点曲线的尾端点 flag1 = 0; flag2 = 0; // 初始化找到的交点曲线的曲线下标为 -1 mark1 = -1; mark2 = -1; // 得到原曲线动态数组的大小 num = pcurve[i].getSize(); // 得到原曲线的首尾端点坐标 x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; // 首先对原曲线的首端点开始进行查找 for (j = 0; j < 16; j += 2) { // 得到八领域的坐标 dx1 = x1 + tpl[j]; dy1 = y1 + tpl[j + 1]; // 进行查找,找到退出循环 for (k = 0; k < sectnum; k++) { // 得到交点曲线的动态数组的大小 num2 = insect[k].getSize(); // 找到就相应赋值,并且退出循环 for (int m = 0; m < num2; m += 2) { if (dx1 == insect[k][m] && dy1 == insect[k][m + 1]) { mark1 = k; flag1 += 1; break; } } // 找到退出循环 if (flag1) { break; } } // 找到退出循环 if (flag1) { break; } } // 对原曲线的尾端点开始进行查找 for (j = 0; j < 16; j += 2) { // 得到八领域的坐标 dx2 = x2 + tpl[j]; dy2 = y2 + tpl[j + 1]; // 进行查找,找到退出循环 for (k = 0; k < sectnum; k++) { // 得到交点曲线的动态数组的大小 num2 = insect[k].getSize(); // 找到就相应赋值,并且退出循环 for (int m = 0; m < num2; m += 2) { if (dx2 == insect[k][m] && dy2 == insect[k][m + 1]) { mark2 = k; flag2 += 1; break; } } // 找到退出循环 if (flag2) { break; } } // 找到退出循环 if (flag2) { break; } } // 如果没有找到可以组合的交点曲线,则进行下一个循环 if (mark1 < 0 && mark2 < 0) { continue; } // 如果首部找到了可以组合的交点曲线,尾部没有,则原曲线反转,然后把交点 // 曲线添加到反转后的曲线后边 if (mark1 >= 0 && mark2 < 0) { // 曲线反转 pcurve[i].reverse(); // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); // 如果尾部找到了可以组合的交点曲线,首部没有,直接把交点曲线添加到原来 // 曲线后边 } else if (mark1 < 0 && mark2 >= 0) { // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // 如果首部和尾部都找到了可以组合的交点曲线,先把尾部找到的交点曲线添加 // 到原来曲线后边,然后反转曲线,然后把首部找到的交点曲线添加到反转后的 // 曲线后边 } else { // 构造曲线加入到当前曲线中 DynamicArrays temp; makeCur(temp, dx2, dy2, realsect[2 * mark2], realsect[2 * mark2 + 1]); pcurve[i].addArray(temp); // 清空得到的曲线 temp.clear(); // 曲线反转 pcurve[i].reverse(); // 构造曲线加入到当前曲线中 makeCur(temp, dx1, dy1, realsect[2 * mark1], realsect[2 * mark1 + 1]); pcurve[i].addArray(temp); } } } // Host 函数:pcurveAcord(根据坐标得到曲线的编号) static __host__ int pcurveAcord(DynamicArrays *pcurve, int count, int &location, int x, int y) { // 定义临时变量 int i, dx1, dy1, dx2, dy2; // 根据输入坐标查找曲线集中对应的曲线编号 location for (i = 0; i < count; i++) { // 得到曲线的两个端点 dx1 = pcurve[i][0]; dy1 = pcurve[i][1]; dx2 = pcurve[i][pcurve[i].getSize() - 2]; dy2 = pcurve[i][pcurve[i].getSize() - 1]; // 根据端点查找对应的曲线,如果找到则返回首尾情况,表示端点是曲线的首部 // 还是尾部, 0 表示曲线首部,1 表示尾部 if ((dx1 == x) && (dy1 == y)) { location = i; return 0; } if ((dx2 == x) && (dy2 == y)) { location = i; return 1; } } // 如果没有找到则返回 -1 return -1; } // Host 函数:verAssemble(根据近域点集重组曲线集) static __host__ void verAssemble(DynamicArrays *pcurve, int count, DynamicArrays *psect, int pcount, DynamicArrays &realsect) { // 定义临时变量 int i, j, dx, dy, mark, location; int cen_x, cen_y; // 计算得到每个点集中的最中心点,加入到交点集合中 for (i = 0; i < pcount; i++) { cen_x = 0; cen_y = 0; for (j = 0; j < psect[i].getSize();) { cen_x += psect[i][j++]; cen_y += psect[i][j++]; } // 得到最中心点 cen_x = cen_x * 2 / j; cen_y = cen_y * 2 / j; realsect.addTail(cen_x, cen_y); // 组合曲线,更新曲线集合和交点动态数组 for (j = 0; j < psect[i].getSize();) { dx = psect[i][j++]; dy = psect[i][j++]; if ((mark = pcurveAcord(pcurve, count, location, dx, dy)) != -1) { if(!mark) { pcurve[location].reverse(); } DynamicArrays temp; makeCur(temp, dx, dy, cen_x, cen_y); temp.delElemXY(dx, dy); pcurve[location].addArray(temp); } } } } // Host 函数:IsFindPoint(判断坐标是不是坐标集动态数组里的点) static __host__ bool IsFindPoint(DynamicArrays &array, int x, int y) { // 遍历动态数组里的点 for (int i = 0; i < array.getSize(); i += 2) { // 找到就返回 true if (array[i] == x && array[i + 1] == y) return true; } // 没有找到则返回 false return false; } // Host 函数:AidNorm(判断两个端点之间距离是否在领域大小内,若在则添加到点集中) static __host__ bool AidNorm(DynamicArrays *pcurve, int i, int count, DynamicArrays *psect, int pcount, int radius, DynamicArrays &Vertex, int x, int y) { // 定义临时变量 int j, dx1, dy1, dx2, dy2; int dis1, dis2; bool mark1, mark2; bool find = false; // 查找编号 i 之后的曲线端点是否存在距离小于半径的端点 for (j = i + 1; j < count; j++) { // 得到曲线的两个端点坐标 dx1 = pcurve[j][0]; dy1 = pcurve[j][1]; dx2 = pcurve[j][pcurve[j].getSize() - 2]; dy2 = pcurve[j][pcurve[j].getSize() - 1]; mark1 = false; mark2 = false; // 查找第一个端点到曲线 i 端点的距离是否小于 radius if (IsFindPoint(Vertex, dx1, dy1)) { // 得到两点之间的距离并且向上取整 dis1 = (int)floor(sqrt((dx1 - x) * (dx1 - x) + (dy1 - y) * (dy1 - y))); if (dis1 <= radius) { mark1 = true; } } // 查找第二个端点到曲线 i 端点的距离是否小于 radius if(IsFindPoint(Vertex, dx2, dy2)) { // 得到两点之间的距离并且向上取整 dis2 = (int)floor(sqrt((dx2 - x) * (dx2 - x) + (dy2 - y) * (dy2 - y))); if (dis2 <= radius) { mark2 = true; } } // 找到两个端点中到到曲线 i 端点的距离最小的端点进行处理 if (mark1 && mark2) { if (dis1 <= dis2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); } else { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); } find = true; } else if (mark1 && !mark2) { psect[pcount].addTail(dx1, dy1); Vertex.delElem(dx1, dy1); find = true; } else if (!mark1 && mark2) { psect[pcount].addTail(dx2, dy2); Vertex.delElem(dx2, dy2); find = true; } } // 返回值 find return find; } // Host 函数:bpConnect(断点的重组,根据用户输入的半径进行曲线端点组合) static __host__ void bpConnect(DynamicArrays *pcurve, int count, int radius, DynamicArrays *psect, int *pcount, DynamicArrays &Vertex) { // 定义临时变量 int i, num; int x1, y1, x2, y2; bool find; // 初始化为新增加的交点数为 0 *pcount = 0; // 循环遍历每条曲线的两个端点 for (i = 0; i < count - 1; i++) { num = pcurve[i].getSize(); // 得到曲线的端点坐标 x1 = pcurve[i][0]; y1 = pcurve[i][1]; x2 = pcurve[i][num - 2]; y2 = pcurve[i][num - 1]; find = false; // 判断原先是不是从端点点集得到的端点 if (IsFindPoint(Vertex, x1, y1)) { // 从编号往后的曲线中找到符合条件的端点 find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x1, y1); // 如果找到,从端点数组中删除这个端点,增加到编号为 *pcount 的 // 近域点集中 if (find) { Vertex.delElem(x1, y1); psect[*pcount].addTail(x1, y1); *pcount += 1; } } find = false; // 判断原先是不是从端点点集得到的端点 if (IsFindPoint(Vertex, x2, y2)) { // 从编号往后的曲线中找到符合条件的端点 find = AidNorm(pcurve, i, count, psect, *pcount, radius, Vertex, x2, y2); // 如果找到,从端点数组中删除这个端点,增加到编号为 *pcount 的 // 近域点集中 if (find) { Vertex.delElem(x2, y2); psect[*pcount].addTail(x2, y2); *pcount += 1; } } } } // Host 函数:makeNode(根据曲线的起点和终点,以及边的情况,得到曲线的编号) static __host__ void makeNode(DynamicArrays *pcurve, int count, DynamicArrays *pcurno) { // 定义临时变量 int num1 = 0, num2 = 1, num = 0; int i, j, size1, size2; int x1, y1, x2, y2; // 定义 bool 型变量,表示查找是否之前相同的端点出现过 bool find1, find2; // 给第一条曲线,添加首尾端点编号为 0 1 pcurno[0].addTail(0, 1); // 接下来的端点编号从 2 开始 num = 2; // 循环给剩下的曲线端点编号,并且编号不能重复 for (i = 1; i < count; i++) { // 初始化没有找到 find1 = find2 = false; // 得到当前曲线的动态数组长度 size2 = pcurve[i].getSize(); // 查找之前的曲线端点 for (j = i - 1; j >= 0; j--) { // 得到当前曲线的动态数组长度 size1 = pcurve[j].getSize(); // 得到当前曲线的首尾端点坐标 x1 = pcurve[j][0]; y1 = pcurve[j][1]; x2 = pcurve[j][size1 - 2]; y2 = pcurve[j][size1 - 1]; // 如果找到了首端点编号,得到当前编号值 if (pcurve[i][0] == x1 && pcurve[i][1] == y1) { num1 = pcurno[j][0]; find1 = true; } else if (pcurve[i][0] == x2 && pcurve[i][1] == y2) { num1 = pcurno[j][1]; find1 = true; } // 如果找到了尾端点编号,得到当前编号值 if (pcurve[i][size2 - 2] == x1 && pcurve[i][size2 - 1] == y1) { num2 = pcurno[j][0]; find2 = true; } else if (pcurve[i][size2 - 2] == x2 && pcurve[i][size2 - 1] == y2) { num2 = pcurno[j][1]; find2 = true; } } // 如果首尾端点都找到了,则把之前得到的编号赋给当前曲线 if (find1 && find2) { pcurno[i].addTail(num1, num2); // 如果仅仅首端点找到了,则把之前得到的编号赋给当前曲线 } else if (find1 && !find2) { pcurno[i].addTail(num1, num); num++; // 如果仅仅尾端点找到了,则把之前得到的编号赋给当前曲线 } else if (!find1 && find2) { pcurno[i].addTail(num, num2); num++; // 如果首尾端点都没有找到,则重新累加赋值 } else { pcurno[i].addTail(num, num + 1); num += 2; } } // 曲线端点编号结束后,给曲线的边赋值,也不会重复 for (i = 0; i < count; i++) { pcurno[i].addElem(num++); } } // Host 函数:openCurvePath(得到非闭合曲线编号序列) static __host__ void openCurvePath(DynamicArrays *opencurnode, int *openNum, Graph *G, int start, int end) { // 定义动态数组变量,表示边栈和点栈 DynamicArrays edgestack, vexstack; // 定义点栈顶和边栈顶数,并且初始化 int vtop = -1, etop = -1; // 定义点栈和边栈的大小 int vstacksize, estacksize; // 定义临时变量 int curnode; // 定义临时边指针 Edge *cur; // 首端点入栈 vexstack.addElem(start); // 复位所有当前要访问的边 G->resetCurrent(); // 循环,用于得到从起点到终点的所有路径 while (vexstack.getSize() != 0) { // 得到当前栈的大小 vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // 如果栈顶的值为终点 if (vexstack[vstacksize - 1] == end) { // 得到一条从起点到终点的路径并且保存。即添加端点编号和边编号 for (int i = 0; i < estacksize; i++) { opencurnode[*openNum].addTail(vexstack[i], edgestack[i]); } // 添加终点编号 opencurnode[*openNum].addElem(end); // 曲线条数增加 1 *openNum += 1; // 删除点栈顶和边栈顶的端点,搜索下一条可能的路径 vexstack.delTail(vtop); edgestack.delTail(etop); // 如果栈顶的值不是终点,则继续搜索可能的路径 } else { // 得到当前栈顶的值 curnode = vexstack[vstacksize - 1]; // 得到图的当前点要访问的边 cur = G->vertexlist[curnode].current; // 如果当前要访问的边不为空 if (cur != NULL) { // 得到当前边的另一个顶点,如果该顶点不在点栈中,当前边也不在边 // 栈中,则把当前点和边分别入栈,把当前要访问的边指针指向下一条 // 边。判断是为了确保路径的点和边不能重复 if (!edgestack.findElem(cur->eno) && !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } G->vertexlist[curnode].current = cur->link; // 如果当前要访问的边为空,则当前点连接的边都访问过,删除点栈顶和 // 边栈顶的端点,重新设置当前栈顶端点的当前要访问的边 } else { vexstack.delTail(vtop); edgestack.delTail(etop); // 如果点栈顶的值等于起始点,则退出循环 if (vtop == start) break; // 设置当前栈顶端点的当前要访问的边为第一条边 G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } } // Host 函数:closeCurvePath(得到闭合曲线编号序列) static __host__ void closeCurvePath(DynamicArrays *closecurnode, int *closeNum, Graph *G, int insect) { // 定义动态数组变量,表示边栈和点栈 DynamicArrays edgestack, vexstack; // 定义点栈顶和边栈顶数,并且初始化 int vtop = -1, etop = -1; // 定义点栈和边栈的大小 int vstacksize, estacksize; // 定义临时变量 int curnode; // 是否找到一样的路径,尽管顺序不一样 bool isFind; // 定义临时边指针 Edge *cur; // 路径起始端点入栈 vexstack.addElem(insect); // 闭合曲线数量 int num = *closeNum; // 复位所有当前要访问的边 G->resetCurrent(); while (vexstack.getSize() != 0) { // 得到当前栈的大小 vstacksize = vexstack.getSize(); estacksize = edgestack.getSize(); // 初始化 isFind 为 false isFind = false; // 当边栈不为空,且点栈栈顶元素值为起点,则保存一条得到的闭合路径 if (estacksize != 0 && vexstack[vstacksize - 1] == insect) { for (int i = 0; i < estacksize; i++) { closecurnode[num].addTail(vexstack[i], edgestack[i]); } closecurnode[num].addElem(insect); // 查找是否和之前得到的路径表示是同一条路径 for (int j = 0; j < num; j++) { if (IsArrayEqual(closecurnode[j], closecurnode[num])) { isFind = true; break; } } // 如果找到了一样的路径,就清空当前得到的闭合路径 if (isFind) { closecurnode[num].clear(); // 如果没有找到,则保存当前得到的闭合路径,并且路径数量加 1 } else { num++; } // 删除点栈顶和边栈顶的端点,搜索下一条可能的路径 vexstack.delTail(vtop); edgestack.delTail(etop); // 栈顶不是起点,则继续搜索可能的路径 } else { // 得到当前栈顶的值 curnode = vexstack[vstacksize - 1]; // 得到图的当前点要访问的边 cur = G->vertexlist[curnode].current; // 如果当前要访问的边不为空 if (cur != NULL) { // 得到当前边的另一个顶点,如果当前边不在边栈中,则把当前点和边 // 分别入栈,把当前要访问的边指针指向下一条边。 if (!edgestack.findElem(cur->eno)) { if ((cur->jvex == insect)|| !vexstack.findElem(cur->jvex)) { vexstack.addElem(cur->jvex); edgestack.addElem(cur->eno); } } G->vertexlist[curnode].current = cur->link; // 如果当前要访问的边为空,则当前点连接的边都访问过,删除点栈顶和 // 边栈顶的端点,重新设置当前栈顶端点的当前要访问的边 } else { vexstack.delTail(vtop); edgestack.delTail(etop); // 如果点栈顶的值等于起始点,则退出循环 if (vtop == insect) break; // 设置设置当前栈顶端点的当前要访问的边为第一条边 G->vertexlist[vtop].current = G->vertexlist[vtop].firstedge; } } } // 得到闭合曲线的数量 *closeNum = num; } // Host 函数:IsArrayEqual(判断两个动态数组表示的曲线是否表示同一条曲线) static __host__ bool IsArrayEqual(DynamicArrays object1, DynamicArrays object2) { // 两个动态数组的大小不一致,则直接返回 false if (object1.getSize() != object2.getSize()) { return false; // 否则看排序后结果是否一样,如果一样,则返回 true,否则返回 false // 由于处理的是闭合曲线编号,头尾是一致的,则排序比较不包括最后一个编号 } else { // 得到曲线编号动态数组大小 int size = object1.getSize(); // 定义临时指针变量,得到第一个动态数组的整型指针 int *p = object1.getCrvDatap(); // 定义临时变量,用于交换数据 int temp; // 临时变量 int min; // 排序第一个动态数组的数据 for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (p[j] < p[min]) { min = j; } } // 如果找到其他最小的则交换 if (min != i) { temp = p[i]; p[i] = p[min]; p[min] = temp; } } // 定义临时指针变量,得到第二个动态数组的整型指针 int *q = object2.getCrvDatap(); // 排序第二个动态数组的数据 for (int i = 0; i < size - 2; i++) { min = i; for (int j = i + 1; j < size - 1; j++) { if (q[j] < q[min]) { min = j; } } // 如果找到其他最小的则交换 if (min != i) { temp = q[i]; q[i] = q[min]; q[min] = temp; } } // 排序结果如果不一样,则返回 false for (int i = 0; i < size - 1; i++) { if (p[i] != q[i]) { return false; } } // 表示同一条路径,返回 true return true; } } // Host 函数:getPointNo(根据坐标对得到数组内对应编号) static __host__ void getPointNo(DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays &array, DynamicArrays &arrayno) { // 临时变量,用于循环计数 int i, j; // 定义临时变量,存储坐标 int dx, dy; // 循环得到数组内坐标对编号 for (i = 0; i < array.getSize();) { // 得到数组的 x,y 坐标 dx = array[i++]; dy = array[i++]; // 根据得到的曲线头尾坐标得到相应编号 for (j = 0; j < count; j++) { // 如果为曲线首部 if (dx == pcurve[j][0] && dy == pcurve[j][1]) { arrayno.addElem(pcurno[j][0]); break; // 如果为曲线尾部 } else if (dx == pcurve[j][pcurve[j].getSize() - 2] && dy == pcurve[j][pcurve[j].getSize() - 1]) { arrayno.addElem(pcurno[j][1]); break; } } } } // Host 函数:getCurveNonFormat(得到非格式化输出曲线数据有序序列) static __host__ void getCurveNonFormat(DynamicArrays *curnode, DynamicArrays *pcurve, int count, DynamicArrays *pcurno, DynamicArrays *cur, int num, bool close) { // 临时变量,存储曲线编号数组的大小 int nodesize; // 临时变量,存储得到的端点编号值 int inode; // 临时变量,得到点的数目 int vnum = pcurno[count - 1][2] - count + 1; // 临时变量,存储得到的曲线下标 int icur; // 定义循环计数变量 int i, j; // 临时变量,作为函数参数得到曲线的末尾坐标 int xtop, ytop; // 根据得到的曲线编号集获得对应曲线 for (i = 0; i < num; i++) { // 得到曲线编号数组的大小 nodesize = curnode[i].getSize(); // 循环得到曲线端点和边编号并且得到组合曲线 for (j = 0; j < nodesize;) { // 得到点编号 inode = curnode[i][j++]; // 如果超过大小,则推出循环 if (j >= nodesize) break; // 根据边编号得到曲线下标 icur = curnode[i][j++] - vnum; // 点编号和曲线下标,得到组合曲线 if (inode == pcurno[icur][0]) { cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } } else if (inode == pcurno[icur][1]) { pcurve[icur].reverse(); cur[i].addArray(pcurve[icur]); if (j != nodesize - 1) { cur[i].delTail(ytop); cur[i].delTail(xtop); } pcurve[icur].reverse(); } } // 如果为闭合曲线就删除末尾坐标 if (close) { // 由于末尾坐标和起始一样,删除末尾坐标 cur[i].delTail(ytop); cur[i].delTail(xtop); } } } // Host 函数:freeCurve(释放曲线申请的空间) void freeCurve(Curve ***curveList, int count) { if (curveList == NULL) return; // 循环释放空间 for (int i = 0; i < count; i++) { CurveBasicOp::deleteCurve((*curveList)[i]); } delete [](*curveList); } // Kernel 函数:_traverseKer(并行遍历图像得到端点数组和交点数组,并且得到去掉 // 交点后的输出图像) static __global__ void _traverseKer(ImageCuda inimg, ImageCuda outimg, int *array1_dev, int *array2_dev, Template boxtpl) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算输出坐标点对应的图像数据数组下标。 int outidx = r * inimg.imgMeta.width + c; // 如果当前像素点为 0,则输出图像对应位置零,并且返回。 if (inimg.imgMeta.imgData[inidx] == 0) { outimg.imgMeta.imgData[inidx] = 0; return; } int tmpidx; // 临时变量,存储模板其他点的图像数据数组下标 int count = 0; // 临时变量,存储灰度不为 0 的个数 int dx, dy; // 临时变量,存储模板坐标 int *p = boxtpl.tplData; // 临时变量,得到模板指针 // 扫描该点模版范围内有多少个灰度值不为 0 的点 for (int i = 0; i < boxtpl.count; i++) { // 计算当模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的 // 数组表示一个点,所以使用当前模版位置的指针加一操作 dx = c + *(p++); dy = r + *(p++); // 如果是当前点则理下一个点 if (dx == c && dy == r) continue; // 计算坐标点对应的图像数据数组下标。 tmpidx = dy * inimg.pitchBytes + dx; // 得到当前点 8 领域内的非零像素点个数 if (inimg.imgMeta.imgData[tmpidx] != 0) { count++; } } // 如果 count 为 0,表示该像素八领域没有不为 0 的像素点,则该点是 // 孤立点,则给对应输出图像在该处赋值为 0 if (count == 0) { outimg.imgMeta.imgData[inidx] = 0; return; // 如果 flag 大于等于 3,表示该像素点作为曲线交点,则给对应输出图像 // 在该处赋值为 0 } else if (count >= 3) { array2_dev[2 * outidx] = c; array2_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = 0; // 如果 count 为 1,表示该像素八领域有一个不为 0 的像素点,则该点是 // 曲线端点,并给对应输出图像在该处赋值原图像对应点像素值 } else if (count == 1) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; // 否则flag则为 2,表示该像素点作为曲线上的点,并给对应输出图像在该处 // 赋值原图像对应点像素值 } else { outimg.imgMeta.imgData[inidx] = inimg.imgMeta.imgData[inidx]; } } // Kernel 函数:_traverseKerNew(遍历图像,得到图像上所有的像素点) static __global__ void _traverseKerNew(ImageCuda inimg, int *array1_dev) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算输出坐标点对应的图像数据数组下标。 int outidx = r * inimg.imgMeta.width + c; // 如果当前像素点不为 0,则得到该坐标 if (inimg.imgMeta.imgData[inidx] != 0) { array1_dev[2 * outidx] = c; array1_dev[2 * outidx + 1] = r; } } // 宏:FAIL_CURVETRACING_FREE // 当下面函数运行出错时,使用该宏清除内存,防止内存泄漏。 #define FAIL_CURVETRACING_FREE do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (tmpdev != NULL) { \ cudaFree(tmpdev); \ tmpdev = NULL; \ } \ if (array1 != NULL) { \ delete []array1; \ array1 = NULL; \ } \ if (array2 != NULL) { \ delete []array2; \ array2 = NULL; \ } \ if (boxtpl != NULL) { \ TemplateFactory::putTemplate(boxtpl); \ boxtpl = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (psect != NULL) { \ delete []psect; \ psect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host 成员方法:curveTracing(曲线跟踪) // 对图像进行曲线跟踪,得到非闭合曲线和闭合曲线的有序序列 __host__ int CurveTracing::curveTracing(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // 如果输入图像指针为空或者输出的曲线集指针为空,错误返回 if (inimg == NULL || curveList == NULL) return NULL_POINTER; // 定义错误码变量 int errcode; cudaError_t cuerrcode; // 定义输出图像 1 和 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // 定义指针 tmpdev 给设备端端点数组和交点数组创建存储空间 int *tmpdev = NULL; // 定义 CPU 端端点数组和交点数组 int *array1 = NULL; int *array2 = NULL; // 定义模板 boxtpl 用于获取模板 Template *boxtpl = NULL; // 定义标志数组,标志图像上非零点的访问情况 int *mark = NULL; // 定义曲线数组,存储得到的曲线 DynamicArrays *pcurve = NULL; // 定义交点分类的动态数组,存储分类的结果 DynamicArrays *insect = NULL; // 定义近域点集动态数组,用于断点连续的处理 DynamicArrays *psect = NULL; // 定义变量,存储曲线的编号; DynamicArrays *pcurno = NULL; // 定义非闭合曲线 DynamicArrays *opencur = NULL; // 定义闭合曲线 DynamicArrays *closecur = NULL; // 定义图类的指针变量 Graph *G = NULL; // 给输出图像构建空间 ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } errcode = ImageBasicOp::copyToCurrentDevice(outimg2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输出图像 1 的 ROI 子图像。 ImageCuda outsubimgCud1; errcode = ImageBasicOp::roiSubImage(outimg1, &outsubimgCud1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 提取输出图像 2的 ROI 子图像。 ImageCuda outsubimgCud2; errcode = ImageBasicOp::roiSubImage(outimg2, &outsubimgCud2); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 定义八领域模板 int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // 定义变量,用于循环 int i, j, k; // 定义临时变量,得到第一次遍历得到的端点和交点动态数组大小 int num1 = 0, num2 = 0; // 定义临时变量存储坐标值 int dx, dy; // 计算数据尺寸。 int arraysize = inimg->width * inimg->height * 2; int datasize = arraysize * 2 * sizeof(int); // 在当前设备上申请坐标数据的空间。 cuerrcode = cudaMalloc((void **)(&tmpdev), datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 给该空间内容全部赋值为 -1 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义设备端端点数组和交点数组 int *array1_dev = tmpdev; int *array2_dev = tmpdev + arraysize; // 定义模板的尺寸 dim3 boxsize(3, 3, 1); // 通过模板工厂得到圆形领域模板 errcode = TemplateFactory::getTemplate(&boxtpl, TF_SHAPE_BOX, boxsize, NULL); // 检查模板是否为 NULL,如果为 NULL 直接报错返回。 if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(boxtpl); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 计算调用第一个 Kernel 所需要的线程块尺寸。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用第一个 Kernel 生成图像标志位数组。 _traverseKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud1, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 给 CPU 端端点数组和交点数组申请空间 array1 = new int[arraysize]; array2 = new int[arraysize]; // 把两个数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } cuerrcode = cudaMemcpy(array2, array2_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义端点动态数组和交点动态数组 DynamicArrays Vertex, Intersect; // 把得到的端点和交点数组的非 -1 值赋值给端点动态数组和交点动态数组 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex.addElem(array1[i]); } } for (i = 0; i < arraysize; i++) { if (array2[i] != -1) { Intersect.addElem(array2[i]); } } // 得到第一次遍历得到的端点和交点动态数组大小 num1 = Vertex.getSize(); num2 = Intersect.getSize(); // 如果图像上曲线有端点和交点时,说明有曲线相交,可能有闭合和非闭合曲线, // 如果图像上曲线有端点没有交点时,但是经过断续连接有可能产生闭合和 // 非闭合曲线 if ((num1 && num2) || (num1 && !num2)) { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历 _traverseKer<<<gridsize, blocksize>>>(outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的端点动态数组 DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 申请交点分类的动态数组空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义近域点集的个数,得到新产生的交点个数 int pcount = 0; // 给近域点集申请最大空间 psect = new DynamicArrays[Vertex.getSize() / 2]; // 根据用户输入的半径得到近域点集,并且更新端点动态数组 bpConnect(pcurve, count, radius, psect, &pcount, Vertex); // 断点重组,根据用户输入的半径进行曲线断点组合,更新交点动态数组 verAssemble(pcurve, count, psect, pcount, realsect); // 存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 给图申请空间,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,分为非闭合曲线和闭合曲线 DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到端点和交点的坐标对应的编号数 DynamicArrays vertexno; DynamicArrays intersectno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 调用函数得到数组交点的编号 if (realsect.getSize() > 0) getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 申请非闭合曲线空间 opencur = new DynamicArrays[*openNum]; // 申请闭合曲线空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } // 循环得到输出闭合曲线 for (; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (!num1 && num2) { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(tmpdev, -1, datasize); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历 _traverseKer<<<gridsize, blocksize>>>(outsubimgCud1, outsubimgCud2, array1_dev, array2_dev, *boxtpl); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的端点动态数组 DynamicArrays Vertex1; for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { Vertex1.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 申请交点分类的动态数组空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 申请曲线编号大小,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有闭合曲线 DynamicArrays closecurnode[CURVE_VALUE]; // 定义交点编号数组,得到端点坐标对应的编号数 DynamicArrays intersectno; // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 申请闭合曲线空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 否则只有闭合曲线,且闭合曲线之间没有相交 else { // 重新给该空间内容全部赋值为 -1,用于第二次遍历 cuerrcode = cudaMemset(array1_dev, -1, arraysize * sizeof (int)); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 第二次并行遍历,得到曲线上所有点集 _traverseKerNew<<<gridsize, blocksize>>>(outsubimgCud1, array1_dev); if (cudaGetLastError() != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 把端点数组拷贝到 Host 端 cuerrcode = cudaMemcpy(array1, array1_dev, arraysize * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { FAIL_CURVETRACING_FREE; return CUDA_ERROR; } // 定义第二次遍历要得到的点集 DynamicArrays point; // 把得到的端点和交点数组的非 -1 值赋值给端点动态数组和交点动态数组 for (i = 0; i < arraysize; i++) { if (array1[i] != -1) { point.addElem(array1[i]); } } // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(outimg1); if (errcode != NO_ERROR) { FAIL_CURVETRACING_FREE; return errcode; } // 申请标志数组的空间,大小和图像一样 mark = new int[arraysize / 2]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * arraysize / 2); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 申请曲线数组空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [point.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; *closeNum = count; // 定义曲线总数 int total = count; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(pcurve[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE; return errcode; } } } // 释放动态申请的空间 FAIL_CURVETRACING_FREE; // 函数执行完毕,返回 return NO_ERROR; } // 宏:FAIL_CURVETRACING_FREE_CPU // 当下面函数运行出错时,使用该宏清除内存,防止内存泄漏。 #define FAIL_CURVETRACING_FREE_CPU do { \ if (outimg1 != NULL) { \ ImageBasicOp::deleteImage(outimg1); \ outimg1 = NULL; \ } \ if (outimg2 != NULL) { \ ImageBasicOp::deleteImage(outimg2); \ outimg2 = NULL; \ } \ if (mark != NULL) { \ delete []mark; \ mark = NULL; \ } \ if (pcurve != NULL) { \ delete []pcurve; \ pcurve = NULL; \ } \ if (insect != NULL) { \ delete []insect; \ insect = NULL; \ } \ if (pcurno != NULL) { \ delete []pcurno; \ pcurno = NULL; \ } \ if (opencur != NULL) { \ delete []opencur; \ opencur = NULL; \ } \ if (closecur != NULL) { \ delete []closecur; \ closecur = NULL; \ } \ if (G != NULL) { \ delete G; \ G = NULL; \ } \ } while (0) // Host 成员方法:curveTracingCPU(曲线跟踪) // 对图像进行曲线跟踪,得到非闭合曲线和闭合曲线的有序序列 __host__ int CurveTracing::curveTracingCPU(Image *inimg, Curve ***curveList, int *openNum, int *closeNum) { // 如果输入图像指针为空或者输出的曲线集指针为空,错误返回 if (inimg == NULL || curveList == NULL) return NULL_POINTER; // 定义错误码变量 int errcode; // 定义输出图像 1 和 2 Image *outimg1 = NULL; Image *outimg2 = NULL; // 定义标志数组,标志图像上非零点的访问情况 int *mark = NULL; // 定义曲线数组,存储得到的曲线 DynamicArrays *pcurve = NULL; // 定义交点分类的动态数组,存储分类的结果 DynamicArrays *insect = NULL; // 定义变量,存储曲线的编号; DynamicArrays *pcurno = NULL; // 定义非闭合曲线 DynamicArrays *opencur = NULL; // 定义闭合曲线 DynamicArrays *closecur = NULL; // 定义图类的指针变量 Graph *G = NULL; // 构建输出图像 1 和 2 ImageBasicOp::newImage(&outimg1); ImageBasicOp::makeAtHost(outimg1, inimg->width, inimg->height); ImageBasicOp::newImage(&outimg2); ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height); // 定义八领域模板 int tpl[16] = { -1, -1, 0, -1, 1, -1, 1, 0, 1, 1, 0, 1, -1, 1, -1, 0 }; // 定义临时变量,得到第一次遍历得到的端点和交点动态数组大小 int num1 = 0, num2 = 0; // 定义第一次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex; DynamicArrays Intersect; // 定义变量,用于循环 int i, j, k; // 定义临时变量存储坐标值 int dx, dy; // 遍历图像,得到端点和交点的动态数组 traverse(Vertex, Intersect, inimg, outimg1, tpl); // 得到第一次遍历得到的端点和交点动态数组大小 num1 = Vertex.getSize(); num2 = Intersect.getSize(); // 如果图像上曲线有端点和交点时,说明有曲线相交,可能有闭合和非闭合曲线 if (num1 && num2) { // 定义第二次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex1, Intersect1; // 第二次遍历图像,得到端点和交点的动态数组 traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 申请标志数组的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 定义交点分类的动态数组,存储分类的结果,并且申请空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,分为非闭合曲线和闭合曲线 DynamicArrays opencurnode[CURVE_VALUE], closecurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到顶点和端点的坐标对应的编号数 DynamicArrays vertexno; DynamicArrays intersectno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 定义非闭合曲线,并且申请空间 opencur = new DynamicArrays[*openNum]; // 定义闭合曲线,并且申请大小空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } // 循环得到输出闭合曲线 for (; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (num1 && !num2) { // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex[i], Vertex[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有非闭合曲线 DynamicArrays opencurnode[CURVE_VALUE]; // 定义端点编号数组和交点编号数组,分别得到顶点和端点的坐标对应的编号数 DynamicArrays vertexno; // 调用函数得到数组端点的编号 getPointNo(pcurve, count, pcurno, Vertex, vertexno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到非闭合曲线的路径编号 for (i = 0; i < vertexno.getSize(); i++) { // 定义起始点 int start, end; start = vertexno[i]; for (j = i + 1; j < vertexno.getSize(); j++) { end = vertexno[j]; // 调用函数,得到非闭合曲线编号序列集 openCurvePath(opencurnode, openNum, G, start, end); } } // 定义非闭合曲线,并且申请空间 opencur = new DynamicArrays[*openNum]; // 调用函数得到非格式输出的非闭合曲线 getCurveNonFormat(opencurnode, pcurve, count, pcurno, opencur, *openNum, false); // 定义曲线总数 int total = *openNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出非闭合曲线 for (i = 0; i < *openNum; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM; } // 得到曲线长度 curveLength = (size_t)(opencur[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = opencur[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 如果图像上没有端点只有交点时候,说明是闭合曲线相交 else if (!num1 && num2) { // 定义第二次遍历要得到的端点动态数组和交点动态数组 DynamicArrays Vertex1, Intersect1; // 第二次遍历图像,得到端点和交点的动态数组 traverse(Vertex1, Intersect1, outimg1, outimg2, tpl); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 DynamicArrays *pcurve = new DynamicArrays [Vertex1.getSize() / 2]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < Vertex1.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, Vertex1[i], Vertex1[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 定义临时变量存储坐标值 int x, y; // 定义变量,存储交点的个数 int sectnum = 0; // 定义交点分类的动态数组,存储分类的结果,并且申请空间 insect = new DynamicArrays [num2 / 2]; // 循环得到交点分类动态数组值 while (Intersect.getSize()) { x = Intersect[0]; y = Intersect[1]; sectnum++; insectClassify(x, y, Intersect, insect, sectnum, tpl); } // 定义真正的交点数组,得到的是唯一确定的交点,与交点曲线方向动态数组集 // 相对应,大小其实为交点个数。从分类的交点集中取领域数最大的点作为交点 DynamicArrays realsect; // 循环得到交点曲线方向动态数组集和真正的交点数组 for (i = 0; i < sectnum; i++) { // 定义变量,存储领域数最大的点标记值,并初始化为 0 int maxvalue = 0; // 定义变量,存储坐标值,初始化为第一条曲线第一个点的坐标值 int insect_x = insect[i][0], insect_y = insect[i][1]; // 根据之前的分类结果,循环得到交点曲线方向动态数组 for (j = 0; j < insect[i].getSize(); j += 2) { x = insect[i][j]; y = insect[i][j + 1]; // 定义临时变量,存储分类集合中的点的八领域内有多少个点 int value = 0; for (k = 0; k < 16; k += 2) { dx = x + tpl[k]; dy = y + tpl[k + 1]; // 遍历点周围有多少个点 for (int s = 0; s < insect[i].getSize(); s += 2) { if (dx == insect[i][s] && dy == insect[i][s + 1]) { value++; } } } // 找到最中心的交点 if (value > maxvalue) { maxvalue = value; insect_x = x; insect_y = y; } } // 得到交点坐标值 realsect.addElem(insect_x); realsect.addElem(insect_y); } // 调用函数得到重组后的曲线,还是存储于 pcurve 中 interAssemble(pcurve, count, insect, sectnum, realsect, tpl); // 定义变量,存储曲线的编号,空间大小和之前提取的曲线一样 pcurno = new DynamicArrays[count]; // 调用函数得到曲线编号集合 makeNode(pcurve, count, pcurno); // 定义变量,存储图的边数,并且赋值 int edgenum = count; // 定义变量,存储图的点数,并且赋值 int vexnum = pcurno[count - 1][2] - edgenum + 1; // 定义图的指针变量,根据边数和点数,初始化图 G = new Graph(vexnum, edgenum); // 根据曲线编号集,给图设置相应的边 for (i = 0; i < count; i++) { G->setEdge(pcurno[i][0], pcurno[i][1], pcurno[i][2]); } // 定义曲线编号集数组,只有闭合曲线 DynamicArrays closecurnode[CURVE_VALUE]; // 定义交点编号数组,得到端点坐标对应的编号数 DynamicArrays intersectno; // 调用函数得到数组交点的编号 getPointNo(pcurve, count, pcurno, realsect, intersectno); // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; // 循环得到闭合曲线的路径编号 for (i = 0; i < intersectno.getSize(); i++) { // 调用函数,得到闭合曲线编号序列集 closeCurvePath(closecurnode, closeNum, G, intersectno[i]); } // 定义闭合曲线,并且申请大小空间 closecur = new DynamicArrays[*closeNum]; // 调用函数得到非格式输出的闭合曲线 getCurveNonFormat(closecurnode, pcurve, count, pcurno, closecur, *closeNum, true); // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(closecur[i - *openNum].getSize() / 2); // 得到动态数组里的整型指针 crvData = closecur[i - *openNum].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 否则只有闭合曲线,且闭合曲线之间没有相交 else { // 定义第二次遍历要得到的点集 DynamicArrays point; // 第二次遍历图像,得到端点和交点的动态数组 traverseNew(point, outimg1); // 定义变量得到输入图像的像素点数目 int maxnum = inimg->width * inimg->height; // 定义标志数组,并且申请和图像大小的空间 mark = new int[maxnum]; // 初始化标志数组的值为 0 memset(mark, 0, sizeof(int) * maxnum); // 定义变量 count 表示得到的曲线输量 int count = 0; // 标志曲线跟踪的端点是否已经在曲线中,用于 getCurve 函数调用 int test = 0; // 定义曲线数组,并且申请空间,曲线最多数目是端点的个数 pcurve = new DynamicArrays [point.getSize()]; // 循环调用 getCurve 函数得到非闭合曲线的有序序列 for(i = 0; i < point.getSize(); i += 2) { getCurve(pcurve, test, count, outimg1, mark, tpl, point[i], point[i + 1]); // 如果 test 不为 0,则 count 不加 1,继续循环,否则曲线数目加 1 if (test) { test = 0; continue; } count++; } // 起始闭合和非闭合曲线的数目都设置为 0 *openNum = 0; *closeNum = 0; *closeNum = count; // 定义曲线总数 int total = *openNum + *closeNum; // 给输出结果赋值,首先申请空间大小 *curveList = new Curve *[total]; // 定义变量,表示曲线长度 size_t curveLength; // 定义变量,表示动态数组里的整型指针 int *crvData; // 循环得到输出闭合曲线 for (i = 0; i < total; i++) { // 申请曲线空间 errcode = CurveBasicOp::newCurve(&((*curveList)[i])); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i); FAIL_CURVETRACING_FREE_CPU; return OUT_OF_MEM;; } // 得到曲线长度 curveLength = (size_t)(pcurve[i].getSize() / 2); // 得到动态数组里的整型指针 crvData = pcurve[i].getCrvDatap(); if (crvData == NULL) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return NULL_POINTER; } // 在 CPU 端构建曲线值 errcode = CurveBasicOp::makeAtHost((*curveList)[i], curveLength, crvData); if (errcode != NO_ERROR) { // 释放动态申请的空间 freeCurve(curveList, i + 1); FAIL_CURVETRACING_FREE_CPU; return errcode; } } } // 释放动态申请的空间 FAIL_CURVETRACING_FREE_CPU; // 函数执行完毕,返回 return NO_ERROR; }
the_stack
* @file * Operations for reading linear tiles of data into the CUDA warp. */ #pragma once #include <iterator> #include <type_traits> #include <cub/block/block_load.cuh> #include <cub/config.cuh> #include <cub/iterator/cache_modified_input_iterator.cuh> #include <cub/util_ptx.cuh> #include <cub/util_type.cuh> #include <cub/warp/warp_exchange.cuh> CUB_NAMESPACE_BEGIN /** * @brief cub::WarpLoadAlgorithm enumerates alternative algorithms for * cub::WarpLoad to read a linear segment of data from memory into a * a CUDA warp. */ enum WarpLoadAlgorithm { /** * @par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read * directly from memory. * * @par Performance Considerations * The utilization of memory transactions (coalescing) decreases as the * access stride between threads increases (i.e., the number items per thread). */ WARP_LOAD_DIRECT, /** * @par Overview * * A [<em>striped arrangement</em>](index.html#sec5sec3) of data is read * directly from memory. * * @par Performance Considerations * The utilization of memory transactions (coalescing) doesn't depend on * the number of items per thread. */ WARP_LOAD_STRIPED, /** * @par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read * from memory using CUDA's built-in vectorized loads as a coalescing optimization. * For example, <tt>ld.global.v4.s32</tt> instructions will be generated * when @p T = @p int and @p ITEMS_PER_THREAD % 4 == 0. * * @par Performance Considerations * - The utilization of memory transactions (coalescing) remains high until the the * access stride between threads (i.e., the number items per thread) exceeds the * maximum vector load width (typically 4 items or 64B, whichever is lower). * - The following conditions will prevent vectorization and loading will fall * back to cub::WARP_LOAD_DIRECT: * - @p ITEMS_PER_THREAD is odd * - The @p InputIteratorT is not a simple pointer type * - The block input offset is not quadword-aligned * - The data type @p T is not a built-in primitive or CUDA vector type * (e.g., @p short, @p int2, @p double, @p float2, etc.) */ WARP_LOAD_VECTORIZE, /** * @par Overview * * A [<em>striped arrangement</em>](index.html#sec5sec3) of data is read * efficiently from memory and then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). * * @par Performance Considerations * - The utilization of memory transactions (coalescing) remains high * regardless of items loaded per thread. * - The local reordering incurs slightly longer latencies and throughput than * the direct cub::WARP_LOAD_DIRECT and cub::WARP_LOAD_VECTORIZE * alternatives. */ WARP_LOAD_TRANSPOSE }; /** * @brief The WarpLoad class provides [<em>collective</em>](index.html#sec0) * data movement methods for loading a linear segment of items from * memory into a [<em>blocked arrangement</em>](index.html#sec5sec3) * across a CUDA thread block. * @ingroup WarpModule * @ingroup UtilIo * * @tparam InputT * The data type to read into (which must be convertible from the input * iterator's value type). * * @tparam ITEMS_PER_THREAD * The number of consecutive items partitioned onto each thread. * * @tparam ALGORITHM * <b>[optional]</b> cub::WarpLoadAlgorithm tuning policy. * default: cub::WARP_LOAD_DIRECT. * * @tparam LOGICAL_WARP_THREADS * <b>[optional]</b> The number of threads per "logical" warp (may be less * than the number of hardware warp threads). Default is the warp size of the * targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a * power of two. * * @tparam PTX_ARCH * <b>[optional]</b> \ptxversion * * @par Overview * - The WarpLoad class provides a single data movement abstraction that can be * specialized to implement different cub::WarpLoadAlgorithm strategies. This * facilitates different performance policies for different architectures, data * types, granularity sizes, etc. * - WarpLoad can be optionally specialized by different data movement strategies: * -# <b>cub::WARP_LOAD_DIRECT</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * of data is read directly from memory. [More...](@ref cub::WarpLoadAlgorithm) * -# <b>cub::WARP_LOAD_STRIPED,</b>. A [<em>striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory. [More...](@ref cub::WarpLoadAlgorithm) * -# <b>cub::WARP_LOAD_VECTORIZE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * of data is read directly from memory using CUDA's built-in vectorized * loads as a coalescing optimization. [More...](@ref cub::WarpLoadAlgorithm) * -# <b>cub::WARP_LOAD_TRANSPOSE</b>. A [<em>striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory and is then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). [More...](@ref cub::WarpLoadAlgorithm) * * @par A Simple Example * @par * The code snippet below illustrates the loading of a linear segment of 64 * integers into a "blocked" arrangement across 16 threads where each thread * owns 4 consecutive items. The load is specialized for @p WARP_LOAD_TRANSPOSE, * meaning memory references are efficiently coalesced using a warp-striped access * pattern (after which items are locally reordered among threads). * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_load.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * * // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each * using WarpLoadT = WarpLoad<int, * items_per_thread, * cub::WARP_LOAD_TRANSPOSE, * warp_threads>; * * constexpr int warps_in_block = block_threads / warp_threads; * constexpr int tile_size = items_per_thread * warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Allocate shared memory for WarpLoad * __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, * thread_data); * @endcode * @par * Suppose the input @p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>. * The set of @p thread_data across the first logical warp of threads in those * threads will be: * <tt>{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }</tt>. */ template <typename InputT, int ITEMS_PER_THREAD, WarpLoadAlgorithm ALGORITHM = WARP_LOAD_DIRECT, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int PTX_ARCH = CUB_PTX_ARCH> class WarpLoad { constexpr static bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH); static_assert(PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); private: /***************************************************************************** * Algorithmic variants ****************************************************************************/ /// Load helper template <WarpLoadAlgorithm _POLICY, int DUMMY> struct LoadInternal; template <int DUMMY> struct LoadInternal<WARP_LOAD_DIRECT, DUMMY> { using TempStorage = NullType; int linear_tid; __device__ __forceinline__ LoadInternal(TempStorage & /*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) { LoadDirectBlocked(linear_tid, block_itr, items); } template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) { LoadDirectBlocked(linear_tid, block_itr, items, valid_items); } template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) { LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); } }; template <int DUMMY> struct LoadInternal<WARP_LOAD_STRIPED, DUMMY> { using TempStorage = NullType; int linear_tid; __device__ __forceinline__ LoadInternal(TempStorage & /*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items); } template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items, valid_items); } template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items, valid_items, oob_default); } }; template <int DUMMY> struct LoadInternal<WARP_LOAD_VECTORIZE, DUMMY> { using TempStorage = NullType; int linear_tid; __device__ __forceinline__ LoadInternal(TempStorage &/*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} template <typename InputIteratorT> __device__ __forceinline__ void Load( InputT *block_ptr, InputT (&items)[ITEMS_PER_THREAD]) { InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items); } template <typename InputIteratorT> __device__ __forceinline__ void Load( const InputT *block_ptr, InputT (&items)[ITEMS_PER_THREAD]) { InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items); } template < CacheLoadModifier MODIFIER, typename ValueType, typename OffsetT> __device__ __forceinline__ void Load( CacheModifiedInputIterator<MODIFIER, ValueType, OffsetT> block_itr, InputT (&items)[ITEMS_PER_THREAD]) { InternalLoadDirectBlockedVectorized<MODIFIER>(linear_tid, block_itr.ptr, items); } template <typename _InputIteratorT> __device__ __forceinline__ void Load( _InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) { LoadDirectBlocked(linear_tid, block_itr, items); } template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) { LoadDirectBlocked(linear_tid, block_itr, items, valid_items); } template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) { LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); } }; template <int DUMMY> struct LoadInternal<WARP_LOAD_TRANSPOSE, DUMMY> { using WarpExchangeT = WarpExchange<InputT, ITEMS_PER_THREAD, LOGICAL_WARP_THREADS, PTX_ARCH>; struct _TempStorage : WarpExchangeT::TempStorage {}; struct TempStorage : Uninitialized<_TempStorage> {}; _TempStorage &temp_storage; int linear_tid; __device__ __forceinline__ LoadInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items); WarpExchangeT(temp_storage).StripedToBlocked(items, items); } template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items, valid_items); WarpExchangeT(temp_storage).StripedToBlocked(items, items); } template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) { LoadDirectStriped<LOGICAL_WARP_THREADS>(linear_tid, block_itr, items, valid_items, oob_default); WarpExchangeT(temp_storage).StripedToBlocked(items, items); } }; /***************************************************************************** * Type definitions ****************************************************************************/ /// Internal load implementation to use using InternalLoad = LoadInternal<ALGORITHM, 0>; /// Shared memory storage layout type using _TempStorage = typename InternalLoad::TempStorage; /***************************************************************************** * Utility methods ****************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /***************************************************************************** * Thread fields ****************************************************************************/ /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; public: /// @smemstorage{WarpLoad} struct TempStorage : Uninitialized<_TempStorage> {}; /*************************************************************************//** * @name Collective constructors ****************************************************************************/ //@{ /** * @brief Collective constructor using a private static allocation of * shared memory as temporary storage. */ __device__ __forceinline__ WarpLoad() : temp_storage(PrivateStorage()) , linear_tid(IS_ARCH_WARP ? LaneId() : (LaneId() % LOGICAL_WARP_THREADS)) {} /** * @brief Collective constructor using the specified memory allocation as * temporary storage. */ __device__ __forceinline__ WarpLoad(TempStorage &temp_storage) : temp_storage(temp_storage.Alias()) , linear_tid(IS_ARCH_WARP ? LaneId() : (LaneId() % LOGICAL_WARP_THREADS)) {} //@} end member group /*************************************************************************//** * @name Data movement ****************************************************************************/ //@{ /** * @brief Load a linear segment of items from memory. * * @par * \smemreuse * * @par Snippet * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_load.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * * // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each * using WarpLoadT = WarpLoad<int, * items_per_thread, * cub::WARP_LOAD_TRANSPOSE, * warp_threads>; * * constexpr int warps_in_block = block_threads / warp_threads; * constexpr int tile_size = items_per_thread * warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Allocate shared memory for WarpLoad * __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, * thread_data); * @endcode * @par * Suppose the input @p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>. * The set of @p thread_data across the first logical warp of threads in those * threads will be: * <tt>{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }</tt>. * * @param[in] block_itr The thread block's base input iterator for loading from * @param[out] items Data to load */ template <typename InputIteratorT> __device__ __forceinline__ void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) { InternalLoad(temp_storage, linear_tid).Load(block_itr, items); } /** * @brief Load a linear segment of items from memory, guarded by range. * * @par * \smemreuse * * @par Snippet * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_load.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * * // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each * using WarpLoadT = WarpLoad<int, * items_per_thread, * cub::WARP_LOAD_TRANSPOSE, * warp_threads>; * * constexpr int warps_in_block = block_threads / warp_threads; * constexpr int tile_size = items_per_thread * warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Allocate shared memory for WarpLoad * __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, * thread_data, * valid_items); * @endcod * @par * Suppose the input @p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt> and @p valid_items * is @p 5. * The set of @p thread_data across the first logical warp of threads in those * threads will be: * <tt>{ [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }</tt> with only the first * two threads being unmasked to load portions of valid data (and other items * remaining unassigned). * * @param[in] block_itr The thread block's base input iterator for loading from * @param[out] items Data to load * @param[in] valid_items Number of valid items to load */ template <typename InputIteratorT> __device__ __forceinline__ void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) { InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items); } /** * @brief Load a linear segment of items from memory, guarded by range. * * @par * \smemreuse * * @par Snippet * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_load.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * * // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each * using WarpLoadT = WarpLoad<int, * items_per_thread, * cub::WARP_LOAD_TRANSPOSE, * warp_threads>; * * constexpr int warps_in_block = block_threads / warp_threads; * constexpr int tile_size = items_per_thread * warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Allocate shared memory for WarpLoad * __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, * thread_data, * valid_items, * -1); * @endcod * @par * Suppose the input @p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>, @p valid_items * is @p 5, and the out-of-bounds default is @p -1. * The set of @p thread_data across the first logical warp of threads in those * threads will be: * <tt>{ [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }</tt> with only the first * two threads being unmasked to load portions of valid data (and other items * are assigned @p -1). * * @param[in] block_itr The thread block's base input iterator for loading from * @param[out] items Data to load * @param[in] valid_items Number of valid items to load * @param[in] oob_default Default value to assign out-of-bound items */ template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) { InternalLoad(temp_storage, linear_tid) .Load(block_itr, items, valid_items, oob_default); } //@} end member group }; CUB_NAMESPACE_END
the_stack
#define BLOCK_SIZE_0 256 #define BLOCK_SIZE_1_X 16 #define BLOCK_SIZE_1_Y 16 long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) +tv.tv_usec; } // create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 void create_matrix(float *m, int size){ int i,j; float lamda = -0.01; float coe[2*size-1]; float coe_i =0.0; for (i=0; i < size; i++) { coe_i = 10*exp(lamda*i); j=size-1+i; coe[j]=coe_i; j=size-1-i; coe[j]=coe_i; } for (i=0; i < size; i++) { for (j=0; j < size; j++) { m[i*size+j]=coe[size-1-i+j]; } } } // reference implementation for verification void gaussian_reference(float *a, float *b, float *m, float* finalVec, int size) { for (int t=0; t<(size-1); t++) { for (int i = 0; i < size-1-t; i++) { m[size * (i + t + 1)+t] = a[size * (i + t + 1) + t] / a[size * t + t]; } for (int x = 0; x < size-1-t; x++) { for (int y = 0; y < size-t; y++) { a[size * (x + t + 1)+y+t] -= m[size * (x + t + 1) + t] * a[size * t + y + t]; if (y == 0) b[x+1+t] -= m[size*(x+1+t)+(y+t)] * b[t]; } } } BackSub(a,b,finalVec,size); } int main(int argc, char *argv[]) { printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", BLOCK_SIZE_0, BLOCK_SIZE_1_X, BLOCK_SIZE_1_Y); float *a=NULL, *b=NULL, *finalVec=NULL; float *m=NULL; int size = -1; FILE *fp; // args char filename[200]; int quiet=0,timing=0; // parse command line if (parseCommandline(argc, argv, filename, &quiet, &timing, &size)) { printUsage(); return 0; } if(size < 1) { fp = fopen(filename, "r"); fscanf(fp, "%d", &size); a = (float *) malloc(size * size * sizeof(float)); InitMat(fp,size, a, size, size); b = (float *) malloc(size * sizeof(float)); InitAry(fp, b, size); fclose(fp); } else { printf("create input internally before create, size = %d \n", size); a = (float *) malloc(size * size * sizeof(float)); create_matrix(a, size); b = (float *) malloc(size * sizeof(float)); for (int i =0; i< size; i++) b[i]=1.0; } if (!quiet) { printf("The input matrix a is:\n"); PrintMat(a, size, size, size); printf("The input array b is:\n"); PrintAry(b, size); } // create the solution matrix m = (float *) malloc(size * size * sizeof(float)); InitPerRun(size,m); // create a new vector to hold the final answer finalVec = (float *) malloc(size * sizeof(float)); // verification float* a_host = (float *) malloc(size * size * sizeof(float)); memcpy(a_host, a, size * size * sizeof(float)); float* b_host = (float *) malloc(size * sizeof(float)); memcpy(b_host, b, size*sizeof(float)); float* m_host = (float *) malloc(size * size * sizeof(float)); memcpy(m_host, m, size*size*sizeof(float)); float* finalVec_host = (float *) malloc(size * sizeof(float)); // Compute the reference on a host gaussian_reference(a_host, b_host, m_host, finalVec_host, size); // Compute the forward phase on a device long long offload_start = get_time(); ForwardSub(a,b,m,size,timing); long long offload_end = get_time(); if (timing) { printf("Device offloading time %lld (us)\n\n",offload_end - offload_start); } // Compute the backward phase on a host BackSub(a,b,finalVec,size); if (!quiet) { printf("The result of array a is after forwardsub: \n"); PrintMat(a, size, size, size); printf("The result of array b is after forwardsub: \n"); PrintAry(b, size); printf("The result of matrix m is after forwardsub: \n"); PrintMat(m, size, size, size); printf("The final solution is: \n"); PrintAry(finalVec,size); } // verification printf("Checking the results..\n"); for (int i = 0; i < size; i++) { if (fabsf(finalVec[i] - finalVec_host[i]) > 1e-3) { printf("Result mismatch at index %d: %f(device) %f(host)\n", i, finalVec[i], finalVec_host[i]); } } free(m); free(a); free(b); free(finalVec); // verification free(a_host); free(m_host); free(b_host); free(finalVec_host); return 0; } __global__ void fan1 (const float* a, float* m, const int size, const int t) { int globalId = blockDim.x * blockIdx.x + threadIdx.x; if (globalId < size-1-t) { m[size * (globalId + t + 1)+t] = a[size * (globalId + t + 1) + t] / a[size * t + t]; } } __global__ void fan2 (float* a, float* b, float* m, const int size, const int t) { int globalIdy = blockDim.x * blockIdx.x + threadIdx.x; int globalIdx = blockDim.y * blockIdx.y + threadIdx.y; if (globalIdx < size-1-t && globalIdy < size-t) { a[size*(globalIdx+1+t)+(globalIdy+t)] -= m[size*(globalIdx+1+t)+t] * a[size*t+(globalIdy+t)]; if(globalIdy == 0){ b[globalIdx+1+t] -= m[size*(globalIdx+1+t)+(globalIdy+t)] * b[t]; } } } /*------------------------------------------------------ ** ForwardSub() -- Forward substitution of Gaussian ** elimination. **------------------------------------------------------ */ void ForwardSub(float *a, float *b, float *m, int size, int timing){ dim3 blockDim_fan1 (BLOCK_SIZE_0); dim3 gridDim_fan1 ((size + BLOCK_SIZE_0 - 1) / BLOCK_SIZE_0); dim3 blockDim_fan2 (BLOCK_SIZE_1_Y, BLOCK_SIZE_1_X); dim3 gridDim_fan2 ((size + BLOCK_SIZE_1_Y - 1) / BLOCK_SIZE_1_Y, (size + BLOCK_SIZE_1_X - 1) / BLOCK_SIZE_1_X); float *d_a, *d_b, *d_m; hipMalloc((void**)&d_a, size*size*sizeof(float)); hipMalloc((void**)&d_b, size*sizeof(float)); hipMalloc((void**)&d_m, size*size*sizeof(float)); hipMemcpy(d_a, a, size*size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_m, m, size*size*sizeof(float), hipMemcpyHostToDevice); for (int t=0; t<(size-1); t++) { hipLaunchKernelGGL(fan1, gridDim_fan1, blockDim_fan1, 0, 0, d_a, d_m, size, t); hipLaunchKernelGGL(fan2, gridDim_fan2, blockDim_fan2, 0, 0, d_a, d_b, d_m, size, t); } hipMemcpy(a, d_a, size*size*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(b, d_b, size*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(m, d_m, size*size*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_m); } // Ke Wang add a function to generate input internally int parseCommandline(int argc, char *argv[], char* filename, int *q, int *t, int *size){ int i; if (argc < 2) return 1; // error // strncpy(filename,argv[1],100); char flag; for(i=1;i<argc;i++) { if (argv[i][0]=='-') {// flag flag = argv[i][1]; switch (flag) { case 's': // matrix size i++; *size = atoi(argv[i]); printf("Create matrix internally in parse, size = %d \n", *size); break; case 'f': // file name i++; strncpy(filename,argv[i],100); printf("Read file from %s \n", filename); break; case 'h': // help return 1; case 'q': // quiet *q = 1; break; case 't': // timing *t = 1; break; } } } return 0; } void printUsage(){ printf("Gaussian Elimination Usage\n"); printf("\n"); printf("gaussianElimination -f [filename] [-hqt]\n"); printf("\n"); printf("example:\n"); printf("$ ./gaussianElimination matrix4.txt\n"); printf("\n"); printf("filename the filename that holds the matrix data\n"); printf("\n"); printf("-h Display the help file\n"); printf("-q Quiet mode. Suppress all text output.\n"); printf("-t Print timing information.\n"); printf("-s Specifiy the matrix size when the path to a matrix data file is not set.\n"); printf("\n"); printf("\n"); printf("Notes: 1. The filename is required as the first parameter.\n"); printf(" 2. If you declare either the device or the platform,\n"); printf(" you must declare both.\n\n"); } /*------------------------------------------------------ ** InitPerRun() -- Initialize the contents of the ** multipier matrix **m **------------------------------------------------------ */ void InitPerRun(int size,float *m) { int i; for (i=0; i<size*size; i++) *(m+i) = 0.0; } void BackSub(float *a, float *b, float *finalVec, int size) { // solve "bottom up" int i,j; for(i=0;i<size;i++){ finalVec[size-i-1]=b[size-i-1]; for(j=0;j<i;j++) { finalVec[size-i-1]-=*(a+size*(size-i-1)+(size-j-1)) * finalVec[size-j-1]; } finalVec[size-i-1]=finalVec[size-i-1]/ *(a+size*(size-i-1)+(size-i-1)); } } void InitMat(FILE *fp, int size, float *ary, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { fscanf(fp, "%f", ary+size*i+j); } } } /*------------------------------------------------------ ** InitAry() -- Initialize the array (vector) by reading ** data from the data file **------------------------------------------------------ */ void InitAry(FILE *fp, float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { fscanf(fp, "%f", &ary[i]); } } /*------------------------------------------------------ ** PrintMat() -- Print the contents of the matrix **------------------------------------------------------ */ void PrintMat(float *ary, int size, int nrow, int ncol) { int i, j; for (i=0; i<nrow; i++) { for (j=0; j<ncol; j++) { printf("%8.2e ", *(ary+size*i+j)); } printf("\n"); } printf("\n"); } /*------------------------------------------------------ ** PrintAry() -- Print the contents of the array (vector) **------------------------------------------------------ */ void PrintAry(float *ary, int ary_size) { int i; for (i=0; i<ary_size; i++) { printf("%.2e ", ary[i]); } printf("\n\n"); }
the_stack
#include "nnroipooling.hpp" #include "impl/dispatcher.hpp" #include <limits> #include <cassert> #include <cstring> #include <cmath> #include <iostream> using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; template<DeviceType deviceType, DataType dataType> struct ROIPoolingForward ; template<DeviceType deviceType, DataType dataType> struct ROIPoolingBackward ; // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- template <typename type> struct acc_max { inline acc_max(int poolHeight, int poolWidth, type derOutput = 0) : value(-std::numeric_limits<type>::infinity()), derOutput(derOutput), derDataActivePt(NULL) { } inline void accumulate_forward(type x) { value = std::max(value, x) ; } inline void accumulate_backward(type const* data, type* derDataPt) { type x = *data ; if (x > value) { value = x ; derDataActivePt = derDataPt ; } } inline type done_forward() const { return value ; } inline void done_backward() const { if (derDataActivePt) { *derDataActivePt += derOutput ; } } type value ; type derOutput ; type* derDataActivePt ; } ; template <typename type> struct acc_sum { inline acc_sum(int poolHeight, int poolWidth, type derOutput = 0) : value(0), scale(type(1)/type(poolHeight*poolWidth)), derOutput(derOutput) { } inline void accumulate_forward(type x) { value += x ; } inline void accumulate_backward(type const* data, type* derDataPt) { *derDataPt += derOutput * scale ; } inline type done_forward() const { return value * scale ; } inline void done_backward() const { } type value ; type derOutput ; type scale; } ; // ------------------------------------------------------------------- // Forward // ------------------------------------------------------------------- template<DataType dataType, class Accumulator> struct ROIPoolingForwardCPU { vl::ErrorCode operator()(ROIPooling &op, Tensor &pooled, Tensor const &input, Tensor const &rois) { typedef typename vl::DataTypeTraits<dataType>::type type ; auto numROIs = rois.getNumElements() / 5 ; auto height = input.getHeight() ; auto width = input.getWidth() ; auto depth = input.getDepth() ; auto size = input.getSize() ; auto roisData = (type const*)rois.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto pooledData = (type*)pooled.getMemory() ; // For each ROI R = [t x1 y1 x2 y2]. for (int roi = 0; roi < numROIs; ++roi) { // Apply scale and offset to each ROI coordinate. type u1_ = roisData[5 * roi + 1] ; type v1_ = roisData[5 * roi + 2] ; type u2_ = roisData[5 * roi + 3] ; type v2_ = roisData[5 * roi + 4] ; type u1 = op.transform[0] * u1_ + op.transform[2] * v1_ + op.transform[4] ; type v1 = op.transform[1] * u1_ + op.transform[3] * v1_ + op.transform[5] ; type u2 = op.transform[0] * u2_ + op.transform[2] * v2_ + op.transform[4] ; type v2 = op.transform[1] * u2_ + op.transform[3] * v2_ + op.transform[5] ; // First and last pixel of each ROI (rounded // for compatibility with the Caffe definition). int roi_image = (int)roisData[5 * roi + 0]; int roi_start_h = (int)::round(v1) - 1 ; int roi_start_w = (int)::round(u1) - 1 ; int roi_end_h = (int)::round(v2) - 1 ; int roi_end_w = (int)::round(u2) - 1 ; int roi_height = std::max(roi_end_h - roi_start_h + 1, 1) ; int roi_width = std::max(roi_end_w - roi_start_w + 1, 1) ; roi_image = std::min(std::max(roi_image - 1,0), (int)size - 1) ; type const * data_offset = inputData + (roi_image * depth) * (width*height) ; type bin_size_h = (double)roi_height / op.subdivisions[0] ; type bin_size_w = (double)roi_width / op.subdivisions[1] ; // For each feature channel. for (int z = 0; z < depth; ++z) { // For each column of tiles. for (int pw = 0; pw < op.subdivisions[1]; ++pw) { int wstart = (int)floor(((type)pw) * bin_size_w) ; int wend = (int)ceil(((type)(pw + 1)) * bin_size_w) ; wstart = std::min(std::max(wstart + roi_start_w, 0), (int)width) ; wend = std::min(std::max(wend + roi_start_w, 0), (int)width) ; // For each tile in a column. for (int ph = 0; ph < op.subdivisions[0]; ++ph) { int hstart = (int)floor(((type)ph) * bin_size_h) ; int hend = (int)ceil(((type)(ph + 1)) * bin_size_h) ; hstart = std::min(std::max(hstart + roi_start_h, 0), (int)height) ; hend = std::min(std::max(hend + roi_start_h, 0), (int)height) ; bool is_empty = (hend <= hstart) || (wend <= wstart); if (is_empty) { *pooledData++ = 0 ; } else { Accumulator acc(hend - hstart, wend - wstart) ; for (int w = wstart ; w < wend; ++w) { for (int h = hstart ; h < hend; ++h) { const int index = w * height + h ; acc.accumulate_forward(data_offset[index]) ; } } *pooledData++ = acc.done_forward() ; } } // end of ph } // end of pw data_offset += width*height; } // end of z } // end of n return VLE_Success ; } } ; template<DataType dataType> struct ROIPoolingForward<VLDT_CPU,dataType> { vl::ErrorCode operator()(ROIPooling &op, Tensor &pooled, Tensor const &input, Tensor const &rois) { switch (op.method) { case ROIPooling::Max: return ROIPoolingForwardCPU<dataType,acc_max<typename vl::DataTypeTraits<dataType>::type> > ()(op,pooled,input,rois) ; case ROIPooling::Average: return ROIPoolingForwardCPU<dataType,acc_sum<typename vl::DataTypeTraits<dataType>::type> > ()(op,pooled,input,rois) ; default: return VLE_IllegalArgument ; } } } ; // ------------------------------------------------------------------- // Backward // ------------------------------------------------------------------- template<DataType dataType, class Accumulator> struct ROIPoolingBackwardCPU { vl::ErrorCode operator()(ROIPooling &op, Tensor &derInput, Tensor const &input, Tensor const &rois, Tensor const &derOutput) { typedef typename vl::DataTypeTraits<dataType>::type type ; auto numROIs = rois.getNumElements() / 5 ; auto height = input.getHeight() ; auto width = input.getWidth() ; auto depth = input.getDepth() ; auto size = input.getSize() ; auto derInputData = (type*)derInput.getMemory() ; auto roisData = (type const*)rois.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto derOutputData = (type const*)derOutput.getMemory() ; memset(derInputData, 0, derInput.getNumElements() * sizeof(type)) ; // For each ROI R = [t x1 y1 x2 y2]. for (size_t roi = 0; roi < numROIs ; ++roi) { // Apply sacle and offset to each ROI coordinate. type u1_ = roisData[5 * roi + 1] ; type v1_ = roisData[5 * roi + 2] ; type u2_ = roisData[5 * roi + 3] ; type v2_ = roisData[5 * roi + 4] ; type u1 = op.transform[0] * u1_ + op.transform[2] * v1_ + op.transform[4] ; type v1 = op.transform[1] * u1_ + op.transform[3] * v1_ + op.transform[5] ; type u2 = op.transform[0] * u2_ + op.transform[2] * v2_ + op.transform[4] ; type v2 = op.transform[1] * u2_ + op.transform[3] * v2_ + op.transform[5] ; // First and last pixel of each ROI (rounded // for compatibility with the Caffe definition). int roi_image = (int)roisData[5 * roi + 0]; int roi_start_h = (int)::round(v1) - 1 ; int roi_start_w = (int)::round(u1) - 1 ; int roi_end_h = (int)::round(v2) - 1 ; int roi_end_w = (int)::round(u2) - 1 ; int roi_height = std::max(roi_end_h - roi_start_h + 1, 1) ; int roi_width = std::max(roi_end_w - roi_start_w + 1, 1) ; roi_image = std::min(std::max(roi_image - 1,0), (int)size - 1) ; type const * data_offset = inputData + (roi_image * depth) * (width*height); type * derInputData_offset = derInputData + (roi_image * depth) * (width*height); const type bin_size_h = (double)roi_height / op.subdivisions[0] ; const type bin_size_w = (double)roi_width / op.subdivisions[1] ; // For each feature channel. for (int z = 0; z < depth; ++z) { // For each column of tiles. for (int pw = 0; pw < op.subdivisions[1]; ++pw) { int wstart = (int)floor(((type)pw) * bin_size_w) ; int wend = (int)ceil(((type)(pw + 1)) * bin_size_w) ; wstart = std::min(std::max(wstart + roi_start_w, 0), (int)width) ; wend = std::min(std::max(wend + roi_start_w, 0), (int)width) ; // For each tile in a column. for (int ph = 0; ph < op.subdivisions[0]; ++ph) { int hstart = (int)floor(((type)ph) * bin_size_h) ; int hend = (int)ceil(((type)(ph + 1)) * bin_size_h) ; hstart = std::min(std::max(hstart + roi_start_h, 0), (int)height) ; hend = std::min(std::max(hend + roi_start_h, 0), (int)height) ; Accumulator acc(hend - hstart, wend - wstart, *derOutputData++) ; for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { const int index = w * height + h ; acc.accumulate_backward(&data_offset[index], &derInputData_offset[index]) ; } } acc.done_backward() ; } // end of pw } // end of ph data_offset += width*height ; derInputData_offset += width*height ; } // end of z } // end of n return VLE_Success ; } } ; template<DataType dataType> struct ROIPoolingBackward<VLDT_CPU,dataType> { vl::ErrorCode operator()(ROIPooling &op, Tensor &derInput, Tensor const &input, Tensor const &rois, Tensor const &derOutput) { switch (op.method) { case ROIPooling::Max: return ROIPoolingBackwardCPU<dataType,acc_max<typename vl::DataTypeTraits<dataType>::type> > ()(op,derInput,input,rois,derOutput) ; case ROIPooling::Average: return ROIPoolingBackwardCPU<dataType,acc_sum<typename vl::DataTypeTraits<dataType>::type> > ()(op,derInput,input,rois,derOutput) ; default: return VLE_IllegalArgument ; } } } ; // ------------------------------------------------------------------- // Driver // ------------------------------------------------------------------- #if ENABLE_GPU #include "nnroipooling_gpu.cu" #endif ROIPooling::ROIPooling(Context &context, std::array<int,2> subdivisions, std::array<double,6> transform, Method method) : context(context), subdivisions(subdivisions), transform(transform), method(method) { } vl::ErrorCode ROIPooling::forward(Tensor &output, Tensor const &input, Tensor const &rois) { return dispatch<ROIPoolingForward>()(*this,output,input,rois) ; } vl::ErrorCode ROIPooling::backward(Tensor &derInput, Tensor const &input, Tensor const &rois, Tensor const &derOutput) { return dispatch<ROIPoolingBackward>()(*this,derInput,input,rois,derOutput) ; }
the_stack
#define FUNC1(i) \ { \ Ajreg = Ajlocal[i]; \ Axreg = Axlocal[i]; \ if(Ajreg > 0) \ { \ unsigned int idxi = Ajreg >> 16; \ unsigned int idxj = Ajreg - (idxi << 16); \ atomicAdd(&s_Ax[idxi], Axreg * s_x[idxj]); \ atomicAdd(&s_Ax[idxj], Axreg * s_x[idxi]); \ } \ else \ { \ goto gotolabel; \ } \ } \ #define FUNC2(i) \ { \ Ajreg = Ajlocal[i]; \ Axreg = Axlocal[i]; \ if(Ajreg > 0) \ { \ unsigned int idxi = Ajreg >> 16; \ unsigned int idxj = Ajreg - (idxi << 16); \ atomicAdd(&s_Ax[idxi], Axreg * s_x[idxj]); \ atomicAdd(&s_Ax[idxj], Axreg * s_x[idxi]); \ } \ else \ { \ goto gotolabel2; \ } \ } \ #define LOOP10_FUNC1() { FUNC1(0) FUNC1(1) FUNC1(2) FUNC1(3) FUNC1(4) FUNC1(5) FUNC1(6) FUNC1(7) FUNC1(8) FUNC1(9)} #define LOOP20_FUNC1() {LOOP10_FUNC1() FUNC1(10) FUNC1(11) FUNC1(12) FUNC1(13) FUNC1(14) FUNC1(15) FUNC1(16) FUNC1(17) FUNC1(18) FUNC1(19)} #define LOOP30_FUNC1() {LOOP20_FUNC1() FUNC1(20) FUNC1(21) FUNC1(22) FUNC1(23) FUNC1(24) FUNC1(25) FUNC1(26) FUNC1(27) FUNC1(28) FUNC1(29)} #define LOOP40_FUNC1() {LOOP30_FUNC1() FUNC1(30) FUNC1(31) FUNC1(32) FUNC1(33) FUNC1(34) FUNC1(35) FUNC1(36) FUNC1(37) FUNC1(38) FUNC1(39)} #define LOOP50_FUNC1() {LOOP40_FUNC1() FUNC1(40) FUNC1(41) FUNC1(42) FUNC1(43) FUNC1(44) FUNC1(45) FUNC1(46) FUNC1(47) FUNC1(48) FUNC1(49)} #define LOOP60_FUNC1() {LOOP50_FUNC1() FUNC1(50) FUNC1(51) FUNC1(52) FUNC1(53) FUNC1(54) FUNC1(55) FUNC1(56) FUNC1(57) FUNC1(58) FUNC1(59)} #define LOOP70_FUNC1() {LOOP60_FUNC1() FUNC1(60) FUNC1(61) FUNC1(62) FUNC1(63) FUNC1(64) FUNC1(65) FUNC1(66) FUNC1(67) FUNC1(68) FUNC1(69)} #define LOOP10_FUNC2() { FUNC2(0) FUNC2(1) FUNC2(2) FUNC2(3) FUNC2(4) FUNC2(5) FUNC2(6) FUNC2(7) FUNC2(8) FUNC2(9)} #define LOOP20_FUNC2() {LOOP10_FUNC2() FUNC2(10) FUNC2(11) FUNC2(12) FUNC2(13) FUNC2(14) FUNC2(15) FUNC2(16) FUNC2(17) FUNC2(18) FUNC2(19)} #define LOOP30_FUNC2() {LOOP20_FUNC2() FUNC2(20) FUNC2(21) FUNC2(22) FUNC2(23) FUNC2(24) FUNC2(25) FUNC2(26) FUNC2(27) FUNC2(28) FUNC2(29)} #define LOOP40_FUNC2() {LOOP30_FUNC2() FUNC2(30) FUNC2(31) FUNC2(32) FUNC2(33) FUNC2(34) FUNC2(35) FUNC2(36) FUNC2(37) FUNC2(38) FUNC2(39)} #define LOOP50_FUNC2() {LOOP40_FUNC2() FUNC2(40) FUNC2(41) FUNC2(42) FUNC2(43) FUNC2(44) FUNC2(45) FUNC2(46) FUNC2(47) FUNC2(48) FUNC2(49)} #define LOOP60_FUNC2() {LOOP50_FUNC2() FUNC2(50) FUNC2(51) FUNC2(52) FUNC2(53) FUNC2(54) FUNC2(55) FUNC2(56) FUNC2(57) FUNC2(58) FUNC2(59)} #define LOOP70_FUNC2() {LOOP60_FUNC2() FUNC2(60) FUNC2(61) FUNC2(62) FUNC2(63) FUNC2(64) FUNC2(65) FUNC2(66) FUNC2(67) FUNC2(68) FUNC2(69)} /*************************************** * Source Definitions ***************************************/ template <class Matrix, class Vector> gauss_seidel<Matrix, Vector>::gauss_seidel(double smoothWeight, int preInnerIters, int postInnerIters, int postRelaxes, const Matrix_d& Ainit) { cusp::detail::extract_diagonal(Ainit, this->diag); smootherWeight_ = smoothWeight; nPreInnerIter_ = preInnerIters; nPostInnerIter_ = postInnerIters; post_relaxes_ = postRelaxes; } template<> void gauss_seidel<Matrix_d, Vector_d>::find_diag(const Matrix_ell_d& A) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = 256; const size_t NUM_BLOCKS = min(65535, (int)ceil((double)A.num_rows / (double)THREADS_PER_BLOCK)); diag.resize(A.num_rows); find_diag_kernel<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> > (A.num_rows, A.num_cols, A.column_indices.num_cols, A.column_indices.pitch, thrust::raw_pointer_cast(&A.column_indices.values[0]), thrust::raw_pointer_cast(&A.values.values[0]), thrust::raw_pointer_cast(&diag[0])); } template<typename IndexType, typename ValueType> __global__ void GS_smooth_kernel(const IndexType num_rows, const IndexType * Ap, const IndexType * Aj, const ValueType * Ax, const ValueType * diag, const ValueType * b, const double weight, ValueType * x) { IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x; for(int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x) { IndexType row_start = Ap[ridx]; IndexType row_end = Ap[ridx + 1]; ValueType Axi = 0.0; for(int j = row_start; j < row_end; j++) { Axi += Ax[j] * x[Aj[j]]; } ValueType tmp = x[ridx] + weight * (b[ridx] - Axi) / diag[ridx]; x[ridx] = tmp; } } template<> void gauss_seidel<Matrix_d, Vector_d>::smooth(const Matrix_d &A, const Vector_d &b, Vector_d &x) { if(diag.empty()) find_diag(A); typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = 256; const size_t NUM_BLOCKS = min(65535, (int)ceil((double)A.num_rows / (double)THREADS_PER_BLOCK)); GS_smooth_kernel<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> > (A.num_rows, thrust::raw_pointer_cast(&A.row_offsets[0]), thrust::raw_pointer_cast(&A.column_indices[0]), thrust::raw_pointer_cast(&A.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0])); } template<typename IndexType, typename ValueType> __global__ void permutation_kernel1(const int n, const IndexType* permutation, ValueType* x, ValueType* xout) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for(int i = thread_id; i < n; i += blockDim.x * gridDim.x) xout[i] = x[permutation[i]]; } template<typename IndexType, typename ValueType> __global__ void permutation_kernel2(const int n, const IndexType* permutation, ValueType* x, ValueType* xout) { int thread_id = blockIdx.x * blockDim.x + threadIdx.x; for(int i = thread_id; i < n; i += blockDim.x * gridDim.x) xout[permutation[i]] = x[i]; } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void preRRCsr_kernel(const IndexType num_rows, const IndexType* offsets, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; short Ajlocal[NUMPERROW]; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType row = thread_id + blockstart; IndexType rowstart = offsets[row]; IndexType rowend = offsets[row + 1]; IndexType num_cols_per_row = rowend - rowstart; __shared__ ValueType s_x[1024]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); if(row < blockend) { //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[rowstart + n]; Ajlocal[n] = (short)(Aj[rowstart + n] - blockstart); } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; //compute Ax for residual sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } //use s_x to temperarily store the residual*P residual[row] = brow - sum; } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void preRRCsrShared_kernel(const IndexType num_rows, const IndexType* offsets, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; extern __shared__ char s_mem[]; ValueType* s_x = (ValueType*)s_mem; unsigned short* s_Ajlocal = (unsigned short*)& s_x[blockDim.x]; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType colidxstart = offsets[blockstart]; IndexType row = thread_id + blockstart; IndexType rowstart = offsets[row]; IndexType rowend = offsets[row + 1]; IndexType num_cols_per_row = rowend - rowstart; ValueType brow, drow; if(row < blockend) { //load in matrix Aj to shared mem for(int n = 0; n < num_cols_per_row; n++) { s_Ajlocal[rowstart + n - colidxstart] = (short)(Aj[rowstart + n] - blockstart); } brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; //load in matrix Ax to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[rowstart + n]; } } } __syncthreads(); ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[s_Ajlocal[rowstart + n - colidxstart]]; } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; //compute Ax for residual sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[s_Ajlocal[rowstart + n - colidxstart]]; } } //use s_x to temperarily store the residual*P residual[row] = brow - sum; } } template<> void gauss_seidel<Matrix_d, Vector_d>::preRRRFullCsr(const cusp::csr_matrix<IndexType, ValueType, MemorySpace>& AinCsr, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutCoo, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& restrictor, const cusp::array1d<IndexType, MemorySpace>& permutation, cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& bc, int level_id, int largestblksize, int largestnumentries, int largestnumperrow) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; //(int)ceil((double)AinEll.num_rows / (double)THREADS_PER_BLOCK); if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t SHAREDSIZE = THREADS_PER_BLOCK * sizeof(ValueType)+largestnumentries * sizeof(unsigned short); const bool useshared = (SHAREDSIZE <= 48 * 1024); const size_t NUMPERROW = largestnumperrow; cusp::array1d<ValueType, MemorySpace> residual(x.size(), 0.0); cusp::array1d<ValueType, MemorySpace> bout(b.size()); if(level_id != 0) { permutation_kernel1<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(b.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&bout[0])); b.swap(bout); } if(SHAREDSIZE <= 16 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferL1); } else if(SHAREDSIZE <= 48 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferShared); } if(NUMPERROW < 10) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 15) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 20) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 25) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 30) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 35) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 40) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 45) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 50) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 55) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 60) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 65) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 70) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 76) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 80) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 86) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 221) { if(useshared) { preRRCsrShared_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } else { preRRCsr_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "preRRRFullCsr num_per_row is equal or larger than 221!!" << std::endl; exit(0); } cudaThreadSetCacheConfig(cudaFuncCachePreferL1); cusp::array1d<ValueType, MemorySpace> Ax_buffer(x.size()); cusp::multiply(AoutCoo, x, Ax_buffer); cusp::blas::axpby(residual, Ax_buffer, residual, ValueType(1.0), ValueType(-1.0)); cusp::multiply(restrictor, residual, bc); } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRR_kernel(const IndexType num_rows, const IndexType num_cols, const IndexType num_cols_per_row, const IndexType pitch, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; short Ajlocal[NUMPERROW]; const short invalid_index = cusp::ell_matrix<short, ValueType, cusp::device_memory>::invalid_index; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; ValueType brow, drow; IndexType tmpidx; if(row < blockend) { brow = b[row]; drow = diag[row]; if(fabs(drow) < 1e-9) printf("drow is zero!!"); s_x[thread_id] = weight * brow / drow; } __syncthreads(); if(row < blockend) { IndexType offset = row; //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[offset]; Ajlocal[n] = invalid_index; if((tmpidx = Aj[offset]) != (IndexType)invalid_index) Ajlocal[n] = tmpidx - blockstart; offset += pitch; } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { if(Ajlocal[n] != invalid_index) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; //compute Ax for residual sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { if(Ajlocal[n] != invalid_index) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } } //use s_x to temperarily store the residual*P residual[row] = brow - sum; } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void preRRShared_kernel(const IndexType num_rows, const IndexType num_cols, const IndexType num_cols_per_row, const IndexType pitch, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; extern __shared__ char s_mem[]; ValueType* s_x = (ValueType*)s_mem; short* s_Ajlocal = (short*)&s_x[blockDim.x]; const short invalid_index = cusp::ell_matrix<short, ValueType, cusp::device_memory>::invalid_index; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType row = thread_id + blockstart; ValueType brow, drow; IndexType tmpidx; if(row < blockend) { brow = b[row]; drow = diag[row]; if(fabs(drow) < 1e-9) printf("drow is zero!!"); s_x[thread_id] = weight * brow / drow; } __syncthreads(); if(row < blockend) { IndexType offset = row; //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { IndexType Ajidx = thread_id * num_cols_per_row + n; Axlocal[n] = Ax[offset]; s_Ajlocal[Ajidx] = invalid_index; if((tmpidx = Aj[offset]) != (IndexType)invalid_index) s_Ajlocal[Ajidx] = (short)(tmpidx - blockstart); offset += pitch; } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { IndexType Ajidx = thread_id * num_cols_per_row + n; if(s_Ajlocal[Ajidx] != invalid_index) { sum += Axlocal[n] * s_x[s_Ajlocal[Ajidx]]; } } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; //compute Ax for residual sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { IndexType Ajidx = thread_id * num_cols_per_row + n; if(s_Ajlocal[Ajidx] != invalid_index) { sum += Axlocal[n] * s_x[s_Ajlocal[Ajidx]]; } } } //use s_x to temperarily store the residual*P residual[row] = brow - sum; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel1(const IndexType num_rows, const IndexType num_entries, const IndexType* __restrict__ Aj, const ValueType* __restrict__ Ax, const ValueType* __restrict__ diag, const IndexType* __restrict__ aggregateIdx, const IndexType* __restrict__ partitionIdx, const IndexType* __restrict__ permutation, const IndexType* __restrict__ AinBlockIdx, const ValueType* __restrict__ b, const double weight, ValueType* __restrict__ x, ValueType* __restrict__ residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; // __shared__ ValueType s_b[SHAREDSIZE] = {0}; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP10_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP10_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel2(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP20_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP20_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel3(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP30_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP30_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel4(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP40_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP40_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel5(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax LOOP50_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax for residual LOOP50_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel6(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax LOOP60_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax for residual LOOP60_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSym_kernel7(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const IndexType* AinBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax LOOP70_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax for residual LOOP70_FUNC2(); gotolabel2: __syncthreads(); if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preAout_kernel(const IndexType num_rows, const IndexType num_entries, const ValueType* __restrict__ x, ValueType* __restrict__ r, const IndexType* __restrict__ Aouti, const IndexType* __restrict__ Aoutj, const ValueType* __restrict__ Aoutv, const IndexType* __restrict__ AoutBlockIdx, const IndexType* aggregateIdx, const IndexType* partitionIdx) { __shared__ ValueType s_r[SHAREDSIZE]; IndexType tid = threadIdx.x; s_r[tid] = 0.0; __syncthreads(); //compute AoutX IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; for(int i = AoutBlockstart + tid; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_r[idxi - blockstart], v * x[idxj]); //assuming ValueType is float } __syncthreads(); if(tid < blockend - blockstart) { r[blockstart + tid] -= s_r[tid]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::preRRRFullSymmetric(const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AinSysCoo, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutSysCoo, const cusp::array1d<IndexType, MemorySpace>& AinBlockIdx, const cusp::array1d<IndexType, MemorySpace>& AoutBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& restrictor, const cusp::array1d<IndexType, MemorySpace>& permutation, cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& bc, int level_id, int largestblksize, int largestnumentries, bool verbose) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t num_entries_per_thread = ceil((double)largestnumentries / THREADS_PER_BLOCK); if (verbose) { std::cout << "In preRRRFullSymmetric : "; std::cout << "THREADS_PER_BLOCK = " << THREADS_PER_BLOCK; std::cout << ", NUM_BLOCKS = " << NUM_BLOCKS; std::cout << std::endl; } cusp::array1d<ValueType, MemorySpace> residual(x.size(), 0.0); cusp::array1d<ValueType, MemorySpace> bout(b.size()); if(level_id != 0) { permutation_kernel1<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(b.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&bout[0])); b.swap(bout); } const int shared_size = 1024; if(largestblksize > shared_size) { std::cout << "largest block size is larger than shared size!!!" << std::endl; exit(0); } cudaThreadSetCacheConfig(cudaFuncCachePreferL1); if(num_entries_per_thread < 11) { preRRSym_kernel1<IndexType, ValueType, 10, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 21) { preRRSym_kernel2<IndexType, ValueType, 20, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 31) { preRRSym_kernel3<IndexType, ValueType, 30, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 41) { preRRSym_kernel4<IndexType, ValueType, 40, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 51) { preRRSym_kernel5<IndexType, ValueType, 50, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 61) { preRRSym_kernel6<IndexType, ValueType, 60, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 71) { preRRSym_kernel7<IndexType, ValueType, 70, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 81) { preRRSym_kernel7<IndexType, ValueType, 80, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 91) { preRRSym_kernel7<IndexType, ValueType, 90, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "preRRRFullSymmetric num_entries_per_thread is larger than 90!!" << std::endl; exit(0); } preAout_kernel<IndexType, ValueType, 90, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> > (AoutSysCoo.num_rows, AoutSysCoo.num_entries, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0])); cusp::multiply(restrictor, residual, bc); } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE, int NUMITERS> __global__ void preRRSymSync_kernel(const IndexType num_rows, const IndexType num_entries, const IndexType* __restrict__ Aj, const ValueType* __restrict__ Ax, const ValueType* __restrict__ diag, const IndexType* __restrict__ aggregateIdx, const IndexType* __restrict__ partitionIdx, const IndexType* __restrict__ permutation, const IndexType* __restrict__ AinBlockIdx, const ValueType* __restrict__ b, const IndexType* __restrict__ segSyncIdx, const IndexType* __restrict__ partSyncIdx, const double weight, ValueType* __restrict__ x, ValueType* __restrict__ residual, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; // assuming that 0 is not valid means (0,0) is not in this array const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; const IndexType cooblockstart = AinBlockIdx[blockIdx.x]; const IndexType cooblockend = AinBlockIdx[blockIdx.x + 1]; IndexType row = thread_id + blockstart; __shared__ ValueType s_x[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType brow, drow; if(row < blockend) { brow = b[row]; drow = diag[row]; s_x[thread_id] = weight * brow / drow; } __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = cooblockstart + thread_id + i * blockDim.x; if(entryidx < cooblockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; unsigned int idxi; unsigned int idxj; IndexType partSyncStart = partSyncIdx[blockIdx.x]; IndexType partSyncEnd = partSyncIdx[blockIdx.x + 1]; IndexType nseg = partSyncEnd - partSyncStart; IndexType cooidx; int n; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax n = 0; #pragma unroll for(int segIdx = 0; segIdx < nseg; segIdx++) { IndexType segSyncStart = segSyncIdx[partSyncStart + segIdx]; IndexType segSyncEnd = segSyncIdx[partSyncStart + segIdx + 1]; bool inside = false; cooidx = cooblockstart + n * blockDim.x + threadIdx.x; inside = (cooidx >= segSyncStart && cooidx < segSyncEnd); if(inside) { Ajreg = Ajlocal[n]; Axreg = Axlocal[n]; idxi = Ajreg >> 16; idxj = Ajreg - (idxi << 16); s_Ax[idxi] += Axreg * s_x[idxj]; } __syncthreads(); if(inside) { s_Ax[idxj] += Axreg * s_x[idxi]; n++; } __syncthreads(); } if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } __syncthreads(); if(row < blockend) { //update glocal mem x values x[row] = s_x[thread_id]; } if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax for residual n = 0; #pragma unroll for(int segIdx = 0; segIdx < nseg; segIdx++) { IndexType segSyncStart = segSyncIdx[partSyncStart + segIdx]; IndexType segSyncEnd = segSyncIdx[partSyncStart + segIdx + 1]; bool inside = false; cooidx = cooblockstart + n * blockDim.x + threadIdx.x; inside = (cooidx >= segSyncStart && cooidx < segSyncEnd); if(inside) { Ajreg = Ajlocal[n]; Axreg = Axlocal[n]; idxi = Ajreg >> 16; idxj = Ajreg - (idxi << 16); s_Ax[idxi] += Axreg * s_x[idxj]; } __syncthreads(); if(inside) { s_Ax[idxj] += Axreg * s_x[idxi]; n++; } __syncthreads(); } if(row < blockend) { residual[row] = brow - s_Ax[thread_id] - drow * s_x[thread_id]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::preRRRFullSymmetricSync(const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AinSysCoo, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutSysCoo, const cusp::array1d<IndexType, MemorySpace>& AinBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& restrictor, const cusp::array1d<IndexType, MemorySpace>& permutation, cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& bc, const cusp::array1d<IndexType, MemorySpace>& segSyncIdx, const cusp::array1d<IndexType, MemorySpace>& partSyncIdx, int level_id, int largestblksize, int largestnumentries) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t num_entries_per_thread = ceil((double)largestnumentries / THREADS_PER_BLOCK); cusp::array1d<ValueType, MemorySpace> residual(x.size(), 0.0); cusp::array1d<ValueType, MemorySpace> bout(b.size()); if(level_id != 0) { permutation_kernel1<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(b.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&bout[0])); b.swap(bout); } const int shared_size = 1024; if(largestblksize > shared_size) { std::cout << "largest block size is larger than shared size!!!" << std::endl; exit(0); } if(num_entries_per_thread < 11) { preRRSymSync_kernel<IndexType, ValueType, 10, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 21) { preRRSymSync_kernel<IndexType, ValueType, 20, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 31) { preRRSymSync_kernel<IndexType, ValueType, 30, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 41) { preRRSymSync_kernel<IndexType, ValueType, 40, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 51) { preRRSymSync_kernel<IndexType, ValueType, 50, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 61) { preRRSymSync_kernel<IndexType, ValueType, 60, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 71) { preRRSymSync_kernel<IndexType, ValueType, 70, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "preRRRFullSymmetricSync num_entries_per_thread is larger than 70!!" << std::endl; exit(0); } cusp::array1d<ValueType, MemorySpace> Ax_buffer(x.size()); cusp::multiply(AoutSysCoo, x, Ax_buffer); cusp::blas::axpby(residual, Ax_buffer, residual, ValueType(1.0), ValueType(-1.0)); cusp::multiply(restrictor, residual, bc); } template<> void gauss_seidel<Matrix_d, Vector_d>::preRRRFull(const cusp::ell_matrix<IndexType, ValueType, MemorySpace>& AinEll, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutCoo, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& restrictor, const cusp::array1d<IndexType, MemorySpace>& permutation, cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& bc, int level_id, int largestblksize) { typedef typename Matrix_d::index_type IndexType; typedef typename Matrix_d::value_type ValueType; const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t NUMPERROW = AinEll.column_indices.num_cols; const size_t SHAREDSIZE = THREADS_PER_BLOCK * sizeof(ValueType) + NUMPERROW * THREADS_PER_BLOCK * sizeof(short); bool useshared = (SHAREDSIZE <= 48 * 1024); if(SHAREDSIZE <= 16 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferL1); } else if(SHAREDSIZE <= 48 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferShared); } cusp::array1d<ValueType, MemorySpace> residual(x.size(), 0.0); cusp::array1d<ValueType, MemorySpace> bout(b.size()); if(level_id != 0) { permutation_kernel1<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(b.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&bout[0])); b.swap(bout); } const int shared_size = 1024; if(largestblksize > shared_size) { std::cout << "largest block size is larger than shared size!!!" << std::endl; exit(0); } if(NUMPERROW < 10) { if(useshared) preRRShared_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 9, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 15) { if(useshared) preRRShared_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 14, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 20) { if(useshared) preRRShared_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 19, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 25) { if(useshared) preRRShared_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 24, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 30) { if(useshared) preRRShared_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 29, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 35) { if(useshared) preRRShared_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 34, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 40) { if(useshared) preRRShared_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 39, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 45) { if(useshared) preRRShared_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 44, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 50) { if(useshared) preRRShared_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 49, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 55) { if(useshared) preRRShared_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 54, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 60) { if(useshared) preRRShared_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 59, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 65) { if(useshared) preRRShared_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 64, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 70) { if(useshared) preRRShared_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 69, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 76) { if(useshared) preRRShared_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 75, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 80) { if(useshared) preRRShared_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 79, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 86) { if(useshared) preRRShared_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 85, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 221) { if(useshared) preRRShared_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); else preRR_kernel<IndexType, ValueType, 220, shared_size, 10 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&residual[0]), nPreInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "preRRRFull num_per_row is equal or larger than 221!!" << std::endl; exit(0); } cudaThreadSetCacheConfig(cudaFuncCachePreferL1); cusp::array1d<ValueType, MemorySpace> Ax_buffer(x.size()); cusp::multiply(AoutCoo, x, Ax_buffer); cusp::blas::axpby(residual, Ax_buffer, residual, ValueType(1.0), ValueType(-1.0)); cusp::multiply(restrictor, residual, bc); } template<typename IndexType, typename ValueType, int NUMPERROW, int NUMITERS, int SHAREDSIZE> __global__ void postPC_kernel(const IndexType num_rows, const IndexType num_cols, const IndexType num_cols_per_row, const IndexType pitch, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const ValueType* p, const ValueType* b, const double weight, ValueType* x, ValueType* xc) { const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::device_memory>::invalid_index; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; __shared__ ValueType s_array[SHAREDSIZE]; ValueType* s_x = &s_array[0]; ValueType* s_p = &s_array[SHAREDSIZE / 2]; IndexType row = thread_id + blockstart; if(row < blockend) { s_p[thread_id] = p[row]; s_x[thread_id] = x[row]; } __syncthreads(); //correction unsigned short num_aggregates = aggrend - aggrstart; if(thread_id < num_aggregates) { unsigned short vstart = aggregateIdx[aggrstart + thread_id]; unsigned short vend = aggregateIdx[aggrstart + thread_id + 1]; ValueType xctmp = xc[aggrstart + thread_id]; for(int i = vstart; i < vend; i++) { s_x[i - blockstart] += xctmp * s_p[i - blockstart]; } } __syncthreads(); //write out the corrected x if(row < blockend) { x[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void postRelax_kernel(const IndexType num_rows, const IndexType num_cols, const IndexType num_cols_per_row, const IndexType pitch, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType num_entries, const IndexType* AoutBlockIdx, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; short Ajlocal[NUMPERROW]; const short invalid_index = cusp::ell_matrix<short, ValueType, cusp::device_memory>::invalid_index; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; __shared__ ValueType s_array[1024]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; IndexType tmpIdx; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); if(row < blockend) { IndexType offset = row; //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[offset]; Ajlocal[n] = invalid_index; if((tmpIdx = Aj[offset]) != (IndexType)invalid_index) Ajlocal[n] = (short)(tmpIdx - blockstart); offset += pitch; } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { if(Ajlocal[n] != invalid_index) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void postRelaxShared_kernel(const IndexType num_rows, const IndexType num_cols, const IndexType num_cols_per_row, const IndexType pitch, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType num_entries, const IndexType* AoutBlockIdx, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { extern char s_mem[]; ValueType Axlocal[NUMPERROW]; const short invalid_index = cusp::ell_matrix<short, ValueType, cusp::device_memory>::invalid_index; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; ValueType* s_b = (ValueType*)s_mem; IndexType row = thread_id + blockstart; IndexType tmpIdx; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = (ValueType*)s_mem; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); short* s_Ajlocal = (short*)&s_x[blockDim.x]; if(row < blockend) { IndexType offset = row; //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { IndexType Ajidx = thread_id * num_cols_per_row + n; Axlocal[n] = Ax[offset]; s_Ajlocal[Ajidx] = invalid_index; if((tmpIdx = Aj[offset]) != (IndexType)invalid_index) s_Ajlocal[Ajidx] = (short)(tmpIdx - blockstart); offset += pitch; } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { IndexType Ajidx = thread_id * num_cols_per_row + n; if(s_Ajlocal[Ajidx] != invalid_index) { sum += Axlocal[n] * s_x[s_Ajlocal[Ajidx]]; } } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::postPCR(const cusp::ell_matrix<IndexType, ValueType, MemorySpace>& AinEll, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutCoo, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::array1d<ValueType, MemorySpace>& P, const cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& xc) { } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel1(const IndexType num_rows, const IndexType num_entries, const IndexType* __restrict__ Aj, const ValueType* __restrict__ Ax, const IndexType* __restrict__ Aouti, const IndexType* __restrict__ Aoutj, const ValueType* __restrict__ Aoutv, const IndexType Aout_num_entries, const ValueType* __restrict__ diag, const IndexType* __restrict__ aggregateIdx, const IndexType* __restrict__ partitionIdx, const IndexType* __restrict__ AinBlockIdx, const IndexType* __restrict__ AoutBlockIdx, const ValueType* __restrict__ b, const double weight, ValueType* __restrict__ x, ValueType* __restrict__ xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP10_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel2(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP20_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel3(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP30_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel4(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block // for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP40_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel5(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP50_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel6(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP60_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSym_kernel7(const IndexType num_rows, const IndexType num_entries, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType Aout_num_entries, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* AinBlockIdx, const IndexType* AoutBlockIdx, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AoutBlockstart = AoutBlockIdx[blockIdx.x]; IndexType AoutBlockend = AoutBlockIdx[blockIdx.x + 1]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); //add values to b out of this block // for(int i = AoutBlockstart + thread_id; i < AoutBlockend; i += blockDim.x) { IndexType idxi = Aouti[i]; IndexType idxj = Aoutj[i]; ValueType v = Aoutv[i]; atomicAdd(&s_b[idxi - blockstart], -v * x[idxj]); //assuming ValueType is float } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); LOOP70_FUNC1(); gotolabel: __syncthreads(); if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::postPCRFullSymmetric(const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AinSysCoo, const cusp::array1d<IndexType, MemorySpace>& AinBlockIdx, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutSysCoo, const cusp::array1d<IndexType, MemorySpace>& AoutBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& prolongator, const cusp::array1d<IndexType, MemorySpace>& permutation, const cusp::array1d<ValueType, MemorySpace>& b, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& xc, int level_id, int largestblksize, int largestnumentries) { Vector_d deltax(x.size()); cusp::multiply(prolongator, xc, deltax); // e = P * x cusp::blas::axpby(x, deltax, x, ValueType(1.0), ValueType(1.0)); // x = x + e const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; const size_t num_entries_per_thread = ceil((double)largestnumentries / THREADS_PER_BLOCK); if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t shared_size = 1024; cudaThreadSetCacheConfig(cudaFuncCachePreferL1); Vector_d xout(x.size()); for(int i = 0; i < post_relaxes_; i++) { if(num_entries_per_thread < 11) { postRelaxSym_kernel1<IndexType, ValueType, 10, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 21) { postRelaxSym_kernel2<IndexType, ValueType, 20, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 31) { postRelaxSym_kernel3<IndexType, ValueType, 30, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 41) { postRelaxSym_kernel4<IndexType, ValueType, 40, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 51) { postRelaxSym_kernel5<IndexType, ValueType, 50, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 61) { postRelaxSym_kernel6<IndexType, ValueType, 60, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 71) { postRelaxSym_kernel7<IndexType, ValueType, 70, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 81) { postRelaxSym_kernel7<IndexType, ValueType, 80, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 91) { postRelaxSym_kernel7<IndexType, ValueType, 90, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "In posePCRFull num_per_row larger than 90!!" << std::endl; exit(0); } x.swap(xout); } if(level_id != 0) { permutation_kernel2<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(x.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0])); x.swap(xout); } } template<typename IndexType, typename ValueType, int NUMPERROW, int SHAREDSIZE> __global__ void postRelaxSymSync_kernel(const IndexType num_rows, const IndexType num_entries, const IndexType* __restrict__ Aj, const ValueType* __restrict__ Ax, const IndexType* __restrict__ Aouti, const IndexType* __restrict__ Aoutj, const ValueType* __restrict__ Aoutv, const IndexType Aout_num_entries, const ValueType* __restrict__ diag, const IndexType* __restrict__ aggregateIdx, const IndexType* __restrict__ partitionIdx, const IndexType* __restrict__ AinBlockIdx, const IndexType* __restrict__ AoutBlockIdx, const ValueType* __restrict__ b, const IndexType* __restrict__ segSyncIdx, const IndexType* __restrict__ partSyncIdx, const double weight, ValueType* __restrict__ x, ValueType* __restrict__ xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; unsigned int Ajlocal[NUMPERROW] = {0}; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; IndexType AinBlockstart = AinBlockIdx[blockIdx.x]; IndexType AinBlockend = AinBlockIdx[blockIdx.x + 1]; __shared__ ValueType s_array[SHAREDSIZE]; __shared__ ValueType s_Ax[SHAREDSIZE]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); #pragma unroll for(int i = 0; i < NUMPERROW; i++) { int entryidx = AinBlockstart + thread_id + i * blockDim.x; if(entryidx < AinBlockend) { Ajlocal[i] = Aj[entryidx]; Axlocal[i] = Ax[entryidx]; } } unsigned int Ajreg; ValueType Axreg; unsigned int idxi; unsigned int idxj; IndexType partSyncStart = partSyncIdx[blockIdx.x]; IndexType partSyncEnd = partSyncIdx[blockIdx.x + 1]; IndexType nseg = partSyncEnd - partSyncStart; IndexType cooidx; int n; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { s_Ax[thread_id] = 0.0; } __syncthreads(); //compute Ax n = 0; #pragma unroll for(int segIdx = 0; segIdx < nseg; segIdx++) { IndexType segSyncStart = segSyncIdx[partSyncStart + segIdx]; IndexType segSyncEnd = segSyncIdx[partSyncStart + segIdx + 1]; bool inside = false; cooidx = AinBlockstart + n * blockDim.x + threadIdx.x; inside = (cooidx >= segSyncStart && cooidx < segSyncEnd); if(inside) { Ajreg = Ajlocal[n]; Axreg = Axlocal[n]; idxi = Ajreg >> 16; idxj = Ajreg - (idxi << 16); s_Ax[idxi] += Axreg * s_x[idxj]; } __syncthreads(); if(inside) { s_Ax[idxj] += Axreg * s_x[idxi]; n++; } __syncthreads(); } if(row < blockend) { s_x[thread_id] += weight * (brow - s_Ax[thread_id] - drow * s_x[thread_id]) / drow; } } __syncthreads(); if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::postPCRFullSymmetricSync(const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AinSysCoo, const cusp::array1d<IndexType, MemorySpace>& AinBlockIdx, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutSysCoo, const cusp::array1d<IndexType, MemorySpace>& AoutBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& prolongator, const cusp::array1d<IndexType, MemorySpace>& permutation, const cusp::array1d<ValueType, MemorySpace>& origb, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& xc, const cusp::array1d<IndexType, MemorySpace>& segSyncIdx, const cusp::array1d<IndexType, MemorySpace>& partSyncIdx, int level_id, int largestblksize, int largestnumentries) { Vector_d deltax(x.size()); cusp::multiply(prolongator, xc, deltax); cusp::blas::axpby(x, deltax, x, ValueType(1.0), ValueType(1.0)); cusp::multiply(AoutSysCoo, x, deltax); // b' = Aout * x cusp::array1d<ValueType, MemorySpace> b(x.size()); cusp::blas::axpby(origb, deltax, b, ValueType(1.0), ValueType(-1.0)); // b = b - b' const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; const size_t num_entries_per_thread = ceil((double)largestnumentries / THREADS_PER_BLOCK); if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t shared_size = 1024; Vector_d xout(x.size()); for(int i = 0; i < post_relaxes_; i++) { if(num_entries_per_thread < 11) { postRelaxSymSync_kernel<IndexType, ValueType, 10, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 21) { postRelaxSymSync_kernel<IndexType, ValueType, 20, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 31) { postRelaxSymSync_kernel<IndexType, ValueType, 30, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 41) { postRelaxSymSync_kernel<IndexType, ValueType, 40, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 51) { postRelaxSymSync_kernel<IndexType, ValueType, 50, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 61) { postRelaxSymSync_kernel<IndexType, ValueType, 60, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(num_entries_per_thread < 71) { postRelaxSymSync_kernel<IndexType, ValueType, 70, shared_size> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinSysCoo.num_rows, AinSysCoo.num_entries, thrust::raw_pointer_cast(&AinSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AinSysCoo.values[0]), thrust::raw_pointer_cast(&AoutSysCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutSysCoo.values[0]), AoutSysCoo.num_entries, thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&AinBlockIdx[0]), thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&b[0]), thrust::raw_pointer_cast(&segSyncIdx[0]), thrust::raw_pointer_cast(&partSyncIdx[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "In posePCRFullSymmetricSync num_per_row larger than 70!!" << std::endl; exit(0); } x.swap(xout); } if(level_id != 0) { permutation_kernel2<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(x.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0])); x.swap(xout); } } template<> void gauss_seidel<Matrix_d, Vector_d>::postPCRFull(const cusp::ell_matrix<IndexType, ValueType, MemorySpace>& AinEll, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutCoo, const cusp::array1d<IndexType, MemorySpace>& AoutBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& prolongator, const cusp::array1d<IndexType, MemorySpace>& permutation, const cusp::array1d<ValueType, MemorySpace>& origb, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& xc, int level_id, int largestblksize) { Vector_d deltax(x.size()); cusp::multiply(prolongator, xc, deltax); cusp::blas::axpby(x, deltax, x, ValueType(1.0), ValueType(1.0)); cusp::multiply(AoutCoo, x, deltax); // b' = Aout * x cusp::array1d<ValueType, MemorySpace> b(x.size()); cusp::blas::axpby(origb, deltax, b, ValueType(1.0), ValueType(-1.0)); // b = b - b' const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t NUMPERROW = AinEll.column_indices.num_cols; const size_t SHAREDSIZE = THREADS_PER_BLOCK * sizeof(ValueType) + NUMPERROW * THREADS_PER_BLOCK * sizeof(short); bool useshared = (SHAREDSIZE <= 48 * 1024); if(SHAREDSIZE <= 16 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferL1); } else if(SHAREDSIZE <= 48 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferShared); } Vector_d xout(x.size()); for(int i = 0; i < post_relaxes_; i++) { if(NUMPERROW < 10) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 15) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 20) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 25) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 30) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 35) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 40) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 45) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 50) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 55) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 60) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 65) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 70) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 76) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 80) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 86) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 221) { if(useshared) postRelaxShared_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); else postRelax_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinEll.num_rows, AinEll.num_cols, AinEll.column_indices.num_cols, AinEll.column_indices.pitch, thrust::raw_pointer_cast(&AinEll.column_indices.values[0]), thrust::raw_pointer_cast(&AinEll.values.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "In posePCRFull num_per_row equal or larger than 221!!" << std::endl; exit(0); } x.swap(xout); } cudaThreadSetCacheConfig(cudaFuncCachePreferL1); if(level_id != 0) { permutation_kernel2<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(x.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0])); x.swap(xout); } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void postRelaxCsr_kernel(const IndexType num_rows, const IndexType* offsets, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType num_entries, const IndexType* AoutBlockIdx, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { ValueType Axlocal[NUMPERROW]; short Ajlocal[NUMPERROW]; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; __shared__ ValueType s_array[1024]; ValueType* s_b = &s_array[0]; IndexType row = thread_id + blockstart; if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); IndexType rowstart = offsets[row]; IndexType rowend = offsets[row + 1]; IndexType num_cols_per_row = rowend - rowstart; if(row < blockend) { //load in matrix A to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[rowstart + n]; Ajlocal[n] = (short)(Aj[rowstart + n] - blockstart); } } } ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = &s_array[0]; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[Ajlocal[n]]; } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<typename IndexType, typename ValueType, int NUMPERROW> __global__ void postRelaxCsrShared_kernel(const IndexType num_rows, const IndexType* offsets, const IndexType* Aj, const ValueType* Ax, const IndexType* Aouti, const IndexType* Aoutj, const ValueType* Aoutv, const IndexType num_entries, const IndexType* AoutBlockIdx, const ValueType* diag, const IndexType* aggregateIdx, const IndexType* partitionIdx, const IndexType* permutation, const ValueType* b, const double weight, ValueType* x, ValueType* xout, int nInnerIter) { extern __shared__ char s_mem[]; ValueType Axlocal[NUMPERROW]; unsigned short* s_Ajlocal; const IndexType thread_id = threadIdx.x; IndexType aggrstart = partitionIdx[blockIdx.x]; IndexType aggrend = partitionIdx[blockIdx.x + 1]; const IndexType blockstart = aggregateIdx[aggrstart]; const IndexType blockend = aggregateIdx[aggrend]; ValueType* s_b = (ValueType*)s_mem; IndexType row = thread_id + blockstart; // if(row < blockend) { s_b[thread_id] = b[row]; } __syncthreads(); ValueType brow, drow; if(row < blockend) { brow = s_b[thread_id]; drow = diag[row]; } //load x to shared memory ValueType* s_x = (ValueType*)s_mem; if(row < blockend) s_x[thread_id] = x[row]; __syncthreads(); s_Ajlocal = (unsigned short*)& s_x[blockDim.x]; IndexType rowstart = offsets[row]; IndexType rowend = offsets[row + 1]; IndexType num_cols_per_row = rowend - rowstart; const IndexType colidxstart = offsets[blockstart]; if(row < blockend) { //load in matrix Aj to shared mem for(int n = 0; n < num_cols_per_row; n++) { s_Ajlocal[rowstart + n - colidxstart] = (short)(Aj[rowstart + n] - blockstart); } //load in matrix Ax to registers #pragma unroll for(int n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { Axlocal[n] = Ax[rowstart + n]; } } } ValueType sum; //inner iteration #pragma unroll for(int iter = 0; iter < nInnerIter; iter++) { if(row < blockend) { //compute Ax sum = 0.0; #pragma unroll for(unsigned short n = 0; n < NUMPERROW; n++) { if(n < num_cols_per_row) { sum += Axlocal[n] * s_x[s_Ajlocal[rowstart + n - colidxstart]]; } } s_x[thread_id] = s_x[thread_id] + weight * (brow - sum) / drow; } __syncthreads(); } if(row < blockend) { //update glocal mem x values xout[row] = s_x[thread_id]; } } template<> void gauss_seidel<Matrix_d, Vector_d>::postPCRFullCsr(const cusp::csr_matrix<IndexType, ValueType, MemorySpace>& AinCsr, const cusp::coo_matrix<IndexType, ValueType, MemorySpace>& AoutCoo, const cusp::array1d<IndexType, MemorySpace>& AoutBlockIdx, const cusp::array1d<IndexType, MemorySpace>& aggregateIdx, const cusp::array1d<IndexType, MemorySpace>& partitionIdx, const cusp::hyb_matrix<IndexType, ValueType, MemorySpace>& prolongator, const cusp::array1d<IndexType, MemorySpace>& permutation, const cusp::array1d<ValueType, MemorySpace>& origb, cusp::array1d<ValueType, MemorySpace>& x, cusp::array1d<ValueType, MemorySpace>& xc, int level_id, int largestblksize, int largestnumentries, int largestnumperrow) { Vector_d deltax(x.size()); cusp::multiply(prolongator, xc, deltax); cusp::blas::axpby(x, deltax, x, ValueType(1.0), ValueType(1.0)); cusp::multiply(AoutCoo, x, deltax); // b' = Aout * x cusp::array1d<ValueType, MemorySpace> b(x.size()); cusp::blas::axpby(origb, deltax, b, ValueType(1.0), ValueType(-1.0)); // b = b - b' const size_t THREADS_PER_BLOCK = largestblksize; const size_t NUM_BLOCKS = partitionIdx.size() - 1; if(NUM_BLOCKS > 65535) std::cout << "Block number larger than 65535!!" << std::endl; const size_t SHAREDSIZE = THREADS_PER_BLOCK * sizeof(ValueType)+largestnumentries * sizeof(unsigned short); const size_t NUMPERROW = largestnumperrow; const bool useshared = (SHAREDSIZE <= 48 * 1024); if(SHAREDSIZE <= 16 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferL1); } else if(SHAREDSIZE <= 48 * 1024) { cudaThreadSetCacheConfig(cudaFuncCachePreferShared); } Vector_d xout(x.size()); for(int i = 0; i < post_relaxes_; i++) { if(NUMPERROW < 10) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 9 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 15) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 14 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 20) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 19 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 25) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 24 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 30) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 29 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 35) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 34 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 40) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 39 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 45) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 44 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 50) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 49 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 55) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 54 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 60) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 59 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 65) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 64 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 70) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 69 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 76) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 75 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 80) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 79 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 86) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 85 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else if(NUMPERROW < 221) { if(useshared) { postRelaxCsrShared_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK, SHAREDSIZE >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } else { postRelaxCsr_kernel<IndexType, ValueType, 220 > << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(AinCsr.num_rows, thrust::raw_pointer_cast(&AinCsr.row_offsets[0]), thrust::raw_pointer_cast(&AinCsr.column_indices[0]), thrust::raw_pointer_cast(&AinCsr.values[0]), thrust::raw_pointer_cast(&AoutCoo.row_indices[0]), thrust::raw_pointer_cast(&AoutCoo.column_indices[0]), thrust::raw_pointer_cast(&AoutCoo.values[0]), AoutCoo.num_entries, thrust::raw_pointer_cast(&AoutBlockIdx[0]), thrust::raw_pointer_cast(&diag[0]), thrust::raw_pointer_cast(&aggregateIdx[0]), thrust::raw_pointer_cast(&partitionIdx[0]), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&b[0]), smootherWeight_, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0]), nPostInnerIter_); } AggMIS::CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__); } else { std::cout << "In posePCRFull num_per_row equal or larger than 221!!" << std::endl; exit(0); } x.swap(xout); } cudaThreadSetCacheConfig(cudaFuncCachePreferL1); if(level_id != 0) { permutation_kernel2<IndexType, ValueType> << <NUM_BLOCKS, THREADS_PER_BLOCK >> >(x.size(), thrust::raw_pointer_cast(&permutation[0]), thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&xout[0])); x.swap(xout); } } /**************************************** * Explict instantiations ***************************************/ template class gauss_seidel<Matrix_d, Vector_d>;
the_stack
#include <nvbio-test/alignment_test_utils.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cached_iterator.h> #include <nvbio/basic/packedstream.h> #include <nvbio/basic/packedstream_loader.h> #include <nvbio/basic/vector_view.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/basic/dna.h> #include <nvbio/alignment/alignment.h> #include <nvbio/alignment/batched.h> #include <nvbio/alignment/sink.h> #include <thrust/device_vector.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> using namespace nvbio; namespace nvbio { namespace aln { enum { CACHE_SIZE = 32 }; typedef nvbio::lmem_cache_tag<CACHE_SIZE> lmem_cache_tag_type; typedef nvbio::uncached_tag uncached_tag_type; // // An alignment stream class to be used in conjunction with the BatchAlignmentScore class // template <typename t_aligner_type, uint32 M, uint32 N, typename cache_type = lmem_cache_tag_type> struct AlignmentStream { typedef t_aligner_type aligner_type; typedef nvbio::cuda::ldg_pointer<uint32> storage_iterator; typedef nvbio::PackedStringLoader<storage_iterator,4,false,cache_type> pattern_loader_type; typedef typename pattern_loader_type::input_iterator uncached_pattern_iterator; typedef typename pattern_loader_type::iterator pattern_iterator; typedef nvbio::vector_view<pattern_iterator> pattern_string; typedef nvbio::PackedStringLoader<storage_iterator,2,false,cache_type> text_loader_type; typedef typename text_loader_type::input_iterator uncached_text_iterator; typedef typename text_loader_type::iterator text_iterator; typedef nvbio::vector_view<text_iterator> text_string; // an alignment context struct context_type { int32 min_score; aln::BestSink<int32> sink; }; // a container for the strings to be aligned struct strings_type { pattern_loader_type pattern_loader; text_loader_type text_loader; pattern_string pattern; trivial_quality_string quals; text_string text; }; // constructor AlignmentStream( aligner_type _aligner, const uint32 _count, const uint32* _patterns, const uint32* _text, int16* _scores) : m_aligner( _aligner ), m_count(_count), m_patterns(storage_iterator(_patterns)), m_text(storage_iterator(_text)), m_scores(_scores) {} // get the aligner NVBIO_FORCEINLINE NVBIO_HOST_DEVICE const aligner_type& aligner() const { return m_aligner; }; // return the maximum pattern length NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 max_pattern_length() const { return M; } // return the maximum text length NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 max_text_length() const { return N; } // return the stream size NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 size() const { return m_count; } // return the i-th pattern's length NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 pattern_length(const uint32 i, context_type* context) const { return M; } // return the i-th text's length NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint32 text_length(const uint32 i, context_type* context) const { return N; } // initialize the i-th context NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool init_context( const uint32 i, context_type* context) const { context->min_score = Field_traits<int32>::min(); return true; } // initialize the i-th context NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void load_strings( const uint32 i, const uint32 window_begin, const uint32 window_end, const context_type* context, strings_type* strings) const { strings->pattern = pattern_string( M, strings->pattern_loader.load( m_patterns + i * M, M, make_uint2( window_begin, window_end ), false ) ); strings->text = text_string( N, strings->text_loader.load( m_text + i * N, N ) ); } // handle the output NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void output( const uint32 i, const context_type* context) const { // copy the output score m_scores[i] = context->sink.score; } aligner_type m_aligner; uint32 m_count; uncached_pattern_iterator m_patterns; uncached_text_iterator m_text; int16* m_scores; }; // A simple kernel to test the speed of alignment without the possible overheads of the BatchAlignmentScore interface // template <uint32 BLOCKDIM, uint32 MAX_REF_LEN, typename aligner_type, typename score_type> __global__ void alignment_test_kernel(const aligner_type aligner, const uint32 N_probs, const uint32 M, const uint32 N, const uint32* strptr, const uint32* refptr, score_type* score) { const uint32 tid = blockIdx.x * BLOCKDIM + threadIdx.x; typedef lmem_cache_tag_type lmem_cache_type; typedef nvbio::cuda::ldg_pointer<uint32> storage_iterator; typedef nvbio::PackedStringLoader<storage_iterator,4,false,lmem_cache_type> pattern_loader_type; typedef typename pattern_loader_type::input_iterator uncached_pattern_iterator; typedef typename pattern_loader_type::iterator pattern_iterator; typedef nvbio::vector_view<pattern_iterator> pattern_string; typedef nvbio::PackedStringLoader<storage_iterator,2,false,lmem_cache_type> text_loader_type; typedef typename text_loader_type::input_iterator uncached_text_iterator; typedef typename text_loader_type::iterator text_iterator; typedef nvbio::vector_view<text_iterator> text_string; pattern_loader_type pattern_loader; pattern_string pattern = pattern_string( M, pattern_loader.load( uncached_pattern_iterator( strptr ) + tid * M, tid < N_probs ? M : 0u ) ); text_loader_type text_loader; text_string text = text_string( N, text_loader.load( uncached_text_iterator( refptr ) + tid * N, tid < N_probs ? N : 0u ) ); aln::BestSink<int32> sink; aln::alignment_score<MAX_REF_LEN>( aligner, pattern, aln::trivial_quality_string(), text, Field_traits<int32>::min(), sink ); score[tid] = sink.score; } // // A class for making a single alignment test, testing both scoring and traceback // struct SingleTest { thrust::host_vector<uint8> str_hvec; thrust::host_vector<uint8> ref_hvec; thrust::device_vector<uint8> str_dvec; thrust::device_vector<uint8> ref_dvec; thrust::device_vector<float> temp_dvec; thrust::device_vector<float> score_dvec; thrust::device_vector<uint2> sink_dvec; // test full DP alignment // // \param test test name // \param aligner alignment algorithm // \param ref_alignment reference alignment string // template <uint32 BLOCKDIM, uint32 N, uint32 M, typename aligner_type> void full(const char* test, const aligner_type aligner, const char* ref_alignment) { NVBIO_VAR_UNUSED const uint32 CHECKPOINTS = 32u; typedef ScoreMatrices<N,M,typename aligner_type::aligner_tag> SWMatrices; SharedPointer<SWMatrices> mat = SharedPointer<SWMatrices>( new SWMatrices() ); const uint8* str_hptr = nvbio::raw_pointer( str_hvec ); const uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); typename column_storage_type<aligner_type>::type column[N]; const int32 ref_score = ref_sw<M,N>( str_hptr, ref_hptr, aligner, mat.get() ); aln::BestSink<int32> sink; aln::alignment_score( aligner, vector_view<const uint8*>( M, str_hptr ), trivial_quality_string(), vector_view<const uint8*>( N, ref_hptr ), -1000, sink, column ); const int32 cpu_score = sink.score; if (cpu_score != ref_score) { log_error(stderr, " expected %s score %d, got: %d\n", test, ref_score, cpu_score); exit(1); } TestBacktracker backtracker; backtracker.clear(); const Alignment<int32> aln = aln::alignment_traceback<1024u,1024u,CHECKPOINTS>( aligner, vector_view<const uint8*>( M, str_hptr ), trivial_quality_string(), vector_view<const uint8*>( N, ref_hptr ), -1000, backtracker ); const int32 aln_score = backtracker.score( aligner, aln.source.x, str_hptr, ref_hptr ); const std::string aln_string = rle( backtracker.aln ).c_str(); if (aln_score != ref_score) { log_error(stderr, " expected %s backtracking score %d, got %d\n", test, ref_score, aln_score); log_error(stderr, " %s - %d - [%u, %u] x [%u, %u]\n", aln_string.c_str(), aln.score, aln.source.x, aln.sink.x, aln.source.y, aln.sink.y); //mat->print(); exit(1); } fprintf(stderr, " %15s : ", test); fprintf(stderr, "%d - %s - [%u:%u] x [%u:%u]\n", aln.score, aln_string.c_str(), aln.source.x, aln.sink.x, aln.source.y, aln.sink.y); if (strcmp( ref_alignment, aln_string.c_str() ) != 0) { log_error(stderr, " expected %s, got %s\n", ref_alignment, aln_string.c_str()); exit(1); } } // test banded alignment // // \param test test name // \param aligner alignment algorithm // \param ref_alignment reference alignment string // template <uint32 BLOCKDIM, uint32 BAND_LEN, const uint32 N, const uint32 M, typename aligner_type> void banded(const char* test, const aligner_type aligner, const char* ref_alignment) { NVBIO_VAR_UNUSED const uint32 CHECKPOINTS = 32u; const uint8* str_hptr = nvbio::raw_pointer( str_hvec ); const uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); const int32 ref_score = ref_banded_sw<M,N,BAND_LEN>( str_hptr, ref_hptr, 0u, aligner ); aln::BestSink<int32> sink; aln::banded_alignment_score<BAND_LEN>( aligner, vector_view<const uint8*>( M, str_hptr ), trivial_quality_string(), vector_view<const uint8*>( N, ref_hptr ), -1000, sink ); const int32 cpu_score = sink.score; if (cpu_score != ref_score) { log_error(stderr, " expected %s score %d, got: %d\n", test, ref_score, cpu_score); exit(1); } TestBacktracker backtracker; backtracker.clear(); const Alignment<int32> aln = aln::banded_alignment_traceback<BAND_LEN,1024u,CHECKPOINTS>( aligner, vector_view<const uint8*>( M, str_hptr ), trivial_quality_string(), vector_view<const uint8*>( N, ref_hptr ), -1000, backtracker ); const int32 aln_score = backtracker.score( aligner, aln.source.x, str_hptr, ref_hptr ); const std::string aln_string = rle( backtracker.aln ).c_str(); if (aln_score != ref_score) { log_error(stderr, " expected %s backtracking score %d, got %d\n", ref_score, aln_score); log_error(stderr, " %s - %d - [%u, %u] x [%u, %u]\n", aln_string.c_str(), aln.score, aln.source.x, aln.sink.x, aln.source.y, aln.sink.y); exit(1); } fprintf(stderr, " %15s : ", test); fprintf(stderr, "%d - %s - [%u:%u] x [%u:%u]\n", aln.score, aln_string.c_str(), aln.source.x, aln.sink.x, aln.source.y, aln.sink.y); if (strcmp( ref_alignment, aln_string.c_str() ) != 0) { log_error(stderr, " expected %s, got %s\n", ref_alignment, aln_string.c_str()); exit(1); } } }; // execute a given batch alignment type on a given stream // // \tparam batch_type a \ref BatchAlignment "Batch Alignment" // \tparam stream_type a stream compatible to the given batch_type // // \return average time // template <typename batch_type, typename stream_type> float enact_batch( batch_type& batch, const stream_type& stream, const uint32 n_tests, const uint32 n_tasks) { // alloc all the needed temporary storage const uint64 temp_size = batch_type::max_temp_storage( stream.max_pattern_length(), stream.max_text_length(), stream.size() ); thrust::device_vector<uint8> temp_dvec( temp_size ); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) { // enact the batch batch.enact( stream, temp_size, nvbio::raw_pointer( temp_dvec ) ); cudaDeviceSynchronize(); } timer.stop(); return timer.seconds() / float(n_tests); } // execute and time a batch of full DP alignments using BatchAlignmentScore // template <bool supported, typename scheduler_type, uint32 N, uint32 M, typename stream_type> struct batch_score_profile_dispatch { static void run( const stream_type stream, const uint32 n_tests, const uint32 n_tasks) {} }; // execute and time a batch of full DP alignments using BatchAlignmentScore // template <typename scheduler_type, uint32 N, uint32 M, typename stream_type> struct batch_score_profile_dispatch<true,scheduler_type,N,M,stream_type> { static void run( const stream_type stream, const uint32 n_tests, const uint32 n_tasks) { typedef aln::BatchedAlignmentScore<stream_type, scheduler_type> batch_type; // our batch type // setup a batch batch_type batch; const float time = enact_batch( batch, stream, n_tests, n_tasks ); fprintf(stderr," %5.1f", 1.0e-9f * float(n_tasks*uint64(N*M))/time ); } }; // execute and time a batch of full DP alignments using BatchAlignmentScore // template <typename scheduler_type, uint32 N, uint32 M, typename stream_type> void batch_score_profile( const stream_type stream, const uint32 n_tests, const uint32 n_tasks) { NVBIO_VAR_UNUSED const bool is_supported = aln::supports_scheduler<typename stream_type::aligner_type,scheduler_type>::pred; batch_score_profile_dispatch<is_supported,scheduler_type,N,M,stream_type>::run( stream, n_tests, n_tasks ); } // execute and time the batch_score<scheduler> algorithm for all possible schedulers // template <uint32 N, uint32 M, typename aligner_type> void batch_score_profile_all( const aligner_type aligner, const uint32 n_tests, const uint32 n_tasks, thrust::device_vector<uint32>& pattern_dvec, thrust::device_vector<uint32>& text_dvec, thrust::device_vector<int16>& score_dvec) { { typedef AlignmentStream<aligner_type,M,N> stream_type; // create a stream stream_type stream( aligner, n_tasks, nvbio::raw_pointer( pattern_dvec ), nvbio::raw_pointer( text_dvec ), nvbio::raw_pointer( score_dvec ) ); // test the DeviceThreadScheduler batch_score_profile<DeviceThreadScheduler,N,M>( stream, n_tests, n_tasks ); // test the DeviceStagedThreadScheduler batch_score_profile<DeviceStagedThreadScheduler,N,M>( stream, n_tests, n_tasks ); } { typedef AlignmentStream<aligner_type,M,N,uncached_tag_type> stream_type; // create a stream stream_type stream( aligner, n_tasks, nvbio::raw_pointer( pattern_dvec ), nvbio::raw_pointer( text_dvec ), nvbio::raw_pointer( score_dvec ) ); // test the DeviceWarpScheduler batch_score_profile<DeviceWarpScheduler,N,M>( stream, n_tests, n_tasks ); } { const uint32 BLOCKDIM = 128; const uint32 N_BLOCKS = (n_tasks + BLOCKDIM-1) / BLOCKDIM; Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) { // enact the batch alignment_test_kernel<BLOCKDIM,N> <<<N_BLOCKS,BLOCKDIM>>>( aligner, n_tasks, M, N, nvbio::raw_pointer( pattern_dvec ), nvbio::raw_pointer( text_dvec ), nvbio::raw_pointer( score_dvec ) ); cudaDeviceSynchronize(); } timer.stop(); const float time = timer.seconds(); fprintf(stderr," %5.1f", 1.0e-9f * float(n_tasks*uint64(N*M))*(float(n_tests)/time) ); } fprintf(stderr, " GCUPS\n"); } // execute and time a batch of banded alignments using BatchBandedAlignmentScore // template <uint32 BAND_LEN, typename scheduler_type, uint32 N, uint32 M, typename stream_type> void batch_banded_score_profile( const stream_type stream, const uint32 n_tests, const uint32 n_tasks) { typedef aln::BatchedBandedAlignmentScore<BAND_LEN,stream_type, scheduler_type> batch_type; // our batch type // setup a batch batch_type batch; const float time = enact_batch( batch, stream, n_tests, n_tasks ); fprintf(stderr," %5.1f", 1.0e-9f * float(n_tasks*uint64(BAND_LEN*M))*(float(n_tests)/time) ); } // execute and time the batch_banded_score<scheduler> algorithm for all possible schedulers // template <uint32 BAND_LEN, uint32 N, uint32 M, typename aligner_type> void batch_banded_score_profile_all( const aligner_type aligner, const uint32 n_tests, const uint32 n_tasks, thrust::device_vector<uint32>& pattern_dvec, thrust::device_vector<uint32>& text_dvec, thrust::device_vector<int16>& score_dvec) { typedef AlignmentStream<aligner_type,M,N> stream_type; // create a stream stream_type stream( aligner, n_tasks, nvbio::raw_pointer( pattern_dvec ), nvbio::raw_pointer( text_dvec ), nvbio::raw_pointer( score_dvec ) ); // test the DeviceThreadScheduler batch_banded_score_profile<BAND_LEN,DeviceThreadScheduler,N,M>( stream, n_tests, n_tasks ); // test the DeviceStagedThreadScheduler batch_banded_score_profile<BAND_LEN,DeviceStagedThreadScheduler,N,M>( stream, n_tests, n_tasks ); // TODO: test DeviceWarpScheduler fprintf(stderr, " GCUPS\n"); } // a simple banded edit distance test // template <typename string_type> void banded_edit_distance_test( const uint32 test_id, const string_type pattern, const string_type text, const int32 ref_score) { const int32 ed = banded_alignment_score<5>( make_edit_distance_aligner<aln::SEMI_GLOBAL>(), pattern, text, -255 ); if (ed != ref_score) { log_error(stderr, " synthetic Edit Distance test %u... failed\n", test_id); log_error(stderr, " expected %d, got: %d - pattern: %s text: %s\n", ref_score, ed, pattern.begin(), text.begin()); exit(1); } else fprintf(stderr, " synthetic Edit Distance test %u... passed!\n", test_id); } void test(int argc, char* argv[]) { uint32 n_tests = 1; NVBIO_VAR_UNUSED uint32 N_WARP_TASKS = 4096; uint32 N_THREAD_TASKS = 128*1024; uint32 TEST_MASK = 0xFFFFFFFFu; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-N-thread-tasks" ) == 0) N_THREAD_TASKS = atoi( argv[++i] ); else if (strcmp( argv[i], "-N-warp-tasks" ) == 0) N_WARP_TASKS = atoi( argv[++i] ); else if (strcmp( argv[i], "-N-tests" ) == 0) n_tests = atoi( argv[++i] ); else if (strcmp( argv[i], "-tests" ) == 0) { const std::string tests_string( argv[++i] ); char temp[256]; const char* begin = tests_string.c_str(); const char* end = begin; TEST_MASK = 0u; while (1) { while (*end != ':' && *end != '\0') { temp[end - begin] = *end; end++; } temp[end - begin] = '\0'; if (strcmp( temp, "functional" ) == 0) TEST_MASK |= FUNCTIONAL; else if (strcmp( temp, "ed" ) == 0) TEST_MASK |= ED; else if (strcmp( temp, "ed-banded" ) == 0) TEST_MASK |= ED_BANDED; else if (strcmp( temp, "sw" ) == 0) TEST_MASK |= SW; else if (strcmp( temp, "sw-banded" ) == 0) TEST_MASK |= SW_BANDED; else if (strcmp( temp, "sw-warp" ) == 0) TEST_MASK |= SW_WARP; else if (strcmp( temp, "sw-striped" ) == 0) TEST_MASK |= SW_STRIPED; else if (strcmp( temp, "gotoh" ) == 0) TEST_MASK |= GOTOH; else if (strcmp( temp, "gotoh-banded" ) == 0) TEST_MASK |= GOTOH_BANDED; if (*end == '\0') break; ++end; begin = end; } } } fprintf(stderr,"testing alignment... started\n"); if (TEST_MASK & FUNCTIONAL) { typedef vector_view<const char*> const_string; // right aligned, no gaps { const_string text = make_string("AAAAGGGTGCTCAA"); const_string pattern = make_string("GGGTGCTCAA"); banded_edit_distance_test( 1u, // test id pattern, // pattern text, // text 0 ); // expected score } // right aligned, 2 insertions { const_string text = make_string("AAAAGGGTGCTCAA"); const_string pattern = make_string("GGGTAAGCTC"); banded_edit_distance_test( 2u, // test id pattern, // pattern text, // text -2 ); // expected score } // right aligned, 2 deletions { const_string text = make_string("AAAAGGGTGCAATC"); const_string pattern = make_string("AAGGGTGCTC"); banded_edit_distance_test( 3u, // test id pattern, // pattern text, // text -2 ); // expected score } // left aligned, zero gaps { const_string text = make_string("AAAAGGGTGCTCAA"); const_string pattern = make_string("AAAAGGGTGC"); banded_edit_distance_test( 4u, // test id pattern, // pattern text, // text 0 ); // expected score } // left aligned, 2 deletions { const_string text = make_string("AAAAGGAAGTGCTC"); const_string pattern = make_string("AAAAGGGTG"); banded_edit_distance_test( 5u, // test id pattern, // pattern text, // text -2 ); // expected score } // centrally aligned, 2 insertions { const_string text = make_string("AACAGGGTGCTC"); const_string pattern = make_string("CACCGGGT"); banded_edit_distance_test( 6u, // test id pattern, // pattern text, // text -2 ); // expected score } } if (TEST_MASK & FUNCTIONAL) { NVBIO_VAR_UNUSED const uint32 BLOCKDIM = 128; const uint32 M = 7; const uint32 N = 20; thrust::host_vector<uint8> str_hvec( M ); thrust::host_vector<uint8> ref_hvec( N ); uint8* str_hptr = nvbio::raw_pointer( str_hvec ); uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); string_to_dna("ACAACTA", str_hptr); string_to_dna("AAACACCCTAACACACTAAA", ref_hptr); SingleTest test; nvbio::cuda::thrust_copy_vector(test.str_hvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_hvec, ref_hvec); nvbio::cuda::thrust_copy_vector(test.str_dvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_dvec, ref_hvec); { fprintf(stderr," testing Smith-Waterman scoring...\n"); aln::SimpleSmithWatermanScheme scoring; scoring.m_match = 2; scoring.m_mismatch = -1; scoring.m_deletion = -1; scoring.m_insertion = -1; test.full<BLOCKDIM,N,M>( "global", make_smith_waterman_aligner<aln::GLOBAL>( scoring ), "1M2D3M1D3M10D" ); test.full<BLOCKDIM,N,M>( "local", make_smith_waterman_aligner<aln::LOCAL>( scoring ), "4M1D3M" ); test.full<BLOCKDIM,N,M>( "semi-global", make_smith_waterman_aligner<aln::SEMI_GLOBAL>( scoring ), "4M1D3M" ); } { fprintf(stderr," testing Gotoh scoring...\n"); aln::SimpleGotohScheme scoring; scoring.m_match = 2; scoring.m_mismatch = -1; scoring.m_gap_open = -1; scoring.m_gap_ext = -1; test.full<BLOCKDIM,N,M>( "global", make_gotoh_aligner<aln::GLOBAL>( scoring ), "1M2D3M1D3M10D" ); test.full<BLOCKDIM,N,M>( "local", make_gotoh_aligner<aln::LOCAL>( scoring ), "4M1D3M" ); test.full<BLOCKDIM,N,M>( "semi-global", make_gotoh_aligner<aln::SEMI_GLOBAL>( scoring ), "4M1D3M" ); test.banded<BLOCKDIM, 7u, N, M>( "banded-semi-global", make_gotoh_aligner<aln::SEMI_GLOBAL>( scoring ), "4M1D3M" ); } } if (TEST_MASK & FUNCTIONAL) { fprintf(stderr," testing real banded Gotoh problem...\n"); NVBIO_VAR_UNUSED const uint32 BLOCKDIM = 128; NVBIO_VAR_UNUSED const uint32 BAND_LEN = 31; NVBIO_VAR_UNUSED const uint32 M = 150; NVBIO_VAR_UNUSED const uint32 N = 150 + 31; thrust::host_vector<uint8> str_hvec( M ); thrust::host_vector<uint8> ref_hvec( N ); uint8* str_hptr = nvbio::raw_pointer( str_hvec ); uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); string_to_dna("TTATGTAGGTGGTCTGGTTTTTGCCTTTTAAGCTTCTGCAAAAAACAACAACAAACTTGTGGTATTACACTGACTCTACAGATCAATTTGGGGACAACTTCCATGTGTTCCACCACCAATACTGAATCTTTCAATCGACTGACGTGGTAT", str_hptr); string_to_dna("ATCGGATTCTTTCTTACTTGTAGGTGGTCTGGTTTTTGCCTTTTAAGCTTCTGCAAAAAACAACAACAAACTTGTGGTATTACACTGACTCTACAGATCAATTTGGGGACAACTTCCATGTGTTCCACCACCAATACTGAATCTTTCAATCGACTGACGTGGTATCTCTCTCTCCATCTAT", ref_hptr); aln::SimpleGotohScheme scoring; scoring.m_match = 0; scoring.m_mismatch = -5; scoring.m_gap_open = -8; scoring.m_gap_ext = -3; SingleTest test; nvbio::cuda::thrust_copy_vector(test.str_hvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_hvec, ref_hvec); nvbio::cuda::thrust_copy_vector(test.str_dvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_dvec, ref_hvec); test.banded<BLOCKDIM, BAND_LEN, N, M>( "banded-semi-global", make_gotoh_aligner<aln::SEMI_GLOBAL>( scoring ), "147M2D3M" ); } // This code is for debugging purposes, useful to plug-in and analyze real problems coming from an app if (TEST_MASK & FUNCTIONAL) { fprintf(stderr," testing real full-matrix Gotoh problem...\n"); NVBIO_VAR_UNUSED const uint32 BLOCKDIM = 128; NVBIO_VAR_UNUSED const uint32 M = 144; NVBIO_VAR_UNUSED const uint32 N = 500; thrust::host_vector<uint8> str_hvec( M ); thrust::host_vector<uint8> ref_hvec( N ); uint8* str_hptr = nvbio::raw_pointer( str_hvec ); uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); const char* str_ascii = "TAGGAGGTAACATGTATGGAGCATTTACCATAGGCCAAGCACTGTTCTAAGAACTTCGGACATGTTATCTCACTTGTATAAGTACTTAGGTGCCTACAACATAAGCAGCACCTGGTAAATTAAGTATTGAAAAAATGCAGATCG"; const char* ref_ascii = "CAGCACTGACCGGTGAGCATAAACCCTGGGGATGCCCAGAGCTGGTACAGCCAGGAGCTCCAGAAGCGTGGGATTCTCAGAGGGAAGTGGAGCTCACTGCTCTACAGGTCCTATTCAAGTTAGAAAGTAAGATACAATGCACACAAAGCCAAATTGTC" "ATCATTCAGCTCCTATTACAGGGGAACTAAGAGCTGCATTGAAAATTATTTGCAAAGCTTGTAAGTGGTTCTGCCACTTATTAGCCGTGTGAACCTTAGCAAATTACCTAGCGTCTCTGAGTTTCAACTTCCTCATCTACAAAATAGAAATGATAATAAT" "AACCGCATCGCAAGAGTTGTTGGAAAAATGAAAATGAGGTATCATAGGAGGTAACATGTATGGAGCATTTACCATAGGCCAAGCACTGTTCTAAGAACTTCGGACATGTTATCTCACTTGTATAAGTACTTAGGTGCCTACAACATAAACAGCACCTGGT" "AAATTAAGTATTGAAAAAATGC"; string_to_dna( str_ascii, str_hptr ); string_to_dna( ref_ascii, ref_hptr ); aln::SimpleGotohScheme scoring; scoring.m_match = 0; scoring.m_mismatch = -5; scoring.m_gap_open = -8; scoring.m_gap_ext = -3; aln::GotohAligner<aln::SEMI_GLOBAL, aln::SimpleGotohScheme> aligner( scoring ); SingleTest test; nvbio::cuda::thrust_copy_vector(test.str_hvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_hvec, ref_hvec); nvbio::cuda::thrust_copy_vector(test.str_dvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_dvec, ref_hvec); test.full<BLOCKDIM,N,M>( "semi-global", aligner, "6I138M" ); } // This code is for debugging purposes, useful to plug-in and analyze real problems coming from an app if (TEST_MASK & FUNCTIONAL) { fprintf(stderr," testing real full-matrix Edit Distance problem...\n"); NVBIO_VAR_UNUSED const uint32 BLOCKDIM = 128; NVBIO_VAR_UNUSED const uint32 M = 144; NVBIO_VAR_UNUSED const uint32 N = 500; thrust::host_vector<uint8> str_hvec( M ); thrust::host_vector<uint8> ref_hvec( N ); uint8* str_hptr = nvbio::raw_pointer( str_hvec ); uint8* ref_hptr = nvbio::raw_pointer( ref_hvec ); const char* str_ascii = "TAGGAGGTAACATGTATGGAGCATTTACCATAGGCCAAGCACTGTTCTAAGAACTTCGGACATGTTATCTCACTTGTATAAGTACTTAGGTGCCTACAACATAAGCAGCACCTGGTAAATTAAGTATTGAAAAAATGCAGATCG"; const char* ref_ascii = "CAGCACTGACCGGTGAGCATAAACCCTGGGGATGCCCAGAGCTGGTACAGCCAGGAGCTCCAGAAGCGTGGGATTCTCAGAGGGAAGTGGAGCTCACTGCTCTACAGGTCCTATTCAAGTTAGAAAGTAAGATACAATGCACACAAAGCCAAATTGTC" "ATCATTCAGCTCCTATTACAGGGGAACTAAGAGCTGCATTGAAAATTATTTGCAAAGCTTGTAAGTGGTTCTGCCACTTATTAGCCGTGTGAACCTTAGCAAATTACCTAGCGTCTCTGAGTTTCAACTTCCTCATCTACAAAATAGAAATGATAATAAT" "AACCGCATCGCAAGAGTTGTTGGAAAAATGAAAATGAGGTATCATAGGAGGTAACATGTATGGAGCATTTACCATAGGCCAAGCACTGTTCTAAGAACTTCGGACATGTTATCTCACTTGTATAAGTACTTAGGTGCCTACAACATAAACAGCACCTGGT" "AAATTAAGTATTGAAAAAATGC"; string_to_dna( str_ascii, str_hptr ); string_to_dna( ref_ascii, ref_hptr ); aln::EditDistanceAligner<aln::SEMI_GLOBAL> aligner; SingleTest test; nvbio::cuda::thrust_copy_vector(test.str_hvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_hvec, ref_hvec); nvbio::cuda::thrust_copy_vector(test.str_dvec, str_hvec); nvbio::cuda::thrust_copy_vector(test.ref_dvec, ref_hvec); test.full<BLOCKDIM,N,M>( "semi-global", aligner, "1I1M2I1M3I136M" ); } // do a larger speed test of the Gotoh alignment if (TEST_MASK & (ED | SW | GOTOH)) { const uint32 N_TASKS = N_THREAD_TASKS; const uint32 M = 150; const uint32 N = 500; const uint32 M_WORDS = (M + 7) >> 3; const uint32 N_WORDS = (N + 15) >> 4; thrust::host_vector<uint32> str( M_WORDS * N_TASKS ); thrust::host_vector<uint32> ref( N_WORDS * N_TASKS ); LCG_random rand; fill_packed_stream<4u>( rand, 4u, M * N_TASKS, nvbio::raw_pointer( str ) ); fill_packed_stream<2u>( rand, 4u, N * N_TASKS, nvbio::raw_pointer( ref ) ); thrust::device_vector<uint32> str_dvec( str ); thrust::device_vector<uint32> ref_dvec( ref ); thrust::device_vector<int16> score_dvec( N_TASKS ); if (TEST_MASK & ED) { fprintf(stderr," testing Edit Distance scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_score_profile_all<N,M>( make_edit_distance_aligner<aln::GLOBAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_score_profile_all<N,M>( make_edit_distance_aligner<aln::SEMI_GLOBAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_score_profile_all<N,M>( make_edit_distance_aligner<aln::LOCAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } if (TEST_MASK & ED) { aln::SimpleSmithWatermanScheme scoring; scoring.m_match = 2; scoring.m_mismatch = -1; fprintf(stderr," testing Hamming Distance scoring speed...\n"); fprintf(stderr," %15s : ", "semi-global"); { batch_score_profile_all<N,M>( make_hamming_distance_aligner<aln::SEMI_GLOBAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_score_profile_all<N,M>( make_hamming_distance_aligner<aln::LOCAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } if (TEST_MASK & SW) { aln::SimpleSmithWatermanScheme scoring; scoring.m_match = 2; scoring.m_mismatch = -1; scoring.m_deletion = -1; scoring.m_insertion = -1; fprintf(stderr," testing Smith-Waterman scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_score_profile_all<N,M>( make_smith_waterman_aligner<aln::GLOBAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_score_profile_all<N,M>( make_smith_waterman_aligner<aln::SEMI_GLOBAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_score_profile_all<N,M>( make_smith_waterman_aligner<aln::LOCAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } if (TEST_MASK & GOTOH) { aln::SimpleGotohScheme scoring; scoring.m_match = 2; scoring.m_mismatch = -1; scoring.m_gap_open = -1; scoring.m_gap_ext = -1; fprintf(stderr," testing Gotoh scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_score_profile_all<N,M>( make_gotoh_aligner<aln::GLOBAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_score_profile_all<N,M>( make_gotoh_aligner<aln::SEMI_GLOBAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_score_profile_all<N,M>( make_gotoh_aligner<aln::LOCAL>( scoring ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } } // do a larger speed test of the banded SW alignment if (TEST_MASK & (ED_BANDED | SW_BANDED | GOTOH_BANDED)) { const uint32 BAND_LEN = 15u; const uint32 N_TASKS = N_THREAD_TASKS; const uint32 M = 150; const uint32 N = M+BAND_LEN; const uint32 M_WORDS = (M + 7) >> 3; const uint32 N_WORDS = (N + 15) >> 4; thrust::host_vector<uint32> str( M_WORDS * N_TASKS ); thrust::host_vector<uint32> ref( N_WORDS * N_TASKS ); LCG_random rand; fill_packed_stream<4u>( rand, 4u, M * N_TASKS, nvbio::raw_pointer( str ) ); fill_packed_stream<2u>( rand, 4u, N * N_TASKS, nvbio::raw_pointer( ref ) ); thrust::device_vector<uint32> str_dvec( str ); thrust::device_vector<uint32> ref_dvec( ref ); thrust::device_vector<int16> score_dvec( N_TASKS ); if (TEST_MASK & ED_BANDED) { fprintf(stderr," testing banded Edit Distance scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_edit_distance_aligner<aln::GLOBAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_edit_distance_aligner<aln::SEMI_GLOBAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_edit_distance_aligner<aln::LOCAL>(), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } if (TEST_MASK & SW_BANDED) { fprintf(stderr," testing banded Smith-Waterman scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_smith_waterman_aligner<aln::GLOBAL>( aln::SimpleSmithWatermanScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_smith_waterman_aligner<aln::SEMI_GLOBAL>( aln::SimpleSmithWatermanScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_smith_waterman_aligner<aln::LOCAL>( aln::SimpleSmithWatermanScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } if (TEST_MASK & GOTOH_BANDED) { fprintf(stderr," testing banded Gotoh scoring speed...\n"); fprintf(stderr," %15s : ", "global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_gotoh_aligner<aln::GLOBAL>( aln::SimpleGotohScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "semi-global"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_gotoh_aligner<aln::SEMI_GLOBAL>( aln::SimpleGotohScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } fprintf(stderr," %15s : ", "local"); { batch_banded_score_profile_all<BAND_LEN,N,M>( make_gotoh_aligner<aln::LOCAL>( aln::SimpleGotohScheme(2,-1,-1,-1) ), n_tests, N_TASKS, str_dvec, ref_dvec, score_dvec ); } } } fprintf(stderr,"testing alignment... done\n"); } } // namespace sw } // namespace nvbio
the_stack
#include <fast_gicp/cuda/vector3_hash.cuh> namespace fast_gicp { namespace cuda { // point coord -> voxel coord conversion struct voxel_coord_kernel { voxel_coord_kernel(const thrust::device_ptr<const VoxelMapInfo>& info) : voxelmap_info_ptr(info) {} __host__ __device__ Eigen::Vector3i operator()(const Eigen::Vector3f& x) const { const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr); return calc_voxel_coord(x, info.voxel_resolution); } const thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr; }; // assign voxel indices to buckets struct voxel_bucket_assignment_kernel { voxel_bucket_assignment_kernel( const thrust::device_ptr<const VoxelMapInfo>& voxelmap_info, const thrust::device_vector<Eigen::Vector3i>& point_coords, thrust::device_vector<thrust::pair<int, int>>& index_buckets, thrust::device_vector<int>& voxels_failures) : voxelmap_info_ptr(voxelmap_info), point_coords_ptr(point_coords.data()), index_buckets_ptr(index_buckets.data()), voxels_failures_ptr(voxels_failures.data()) {} __device__ void operator()(int point_index) const { const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr); const Eigen::Vector3i* coords = thrust::raw_pointer_cast(point_coords_ptr); uint64_t hash = vector3i_hash(coords[point_index]); for (int i = 0; i < info.max_bucket_scan_count; i++) { uint64_t bucket_index = (hash + i) % info.num_buckets; thrust::pair<int, int>* index_bucket = thrust::raw_pointer_cast(index_buckets_ptr) + bucket_index; int old = atomicCAS(&index_bucket->first, -1, point_index); if (old < 0) { index_bucket->second = atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr), 1); return; } if (equal(coords[point_index], coords[old])) { return; } } atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr) + 1, 1); } thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr; thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr; thrust::device_ptr<thrust::pair<int, int>> index_buckets_ptr; thrust::device_ptr<int> voxels_failures_ptr; }; // pair<point index, bucket index> to pair<voxel coord, bucket index> struct voxel_coord_select_kernel { voxel_coord_select_kernel(const thrust::device_vector<Eigen::Vector3i>& point_coords) : point_coords_ptr(point_coords.data()) {} __device__ thrust::pair<Eigen::Vector3i, int> operator()(const thrust::pair<int, int>& index_bucket) const { if (index_bucket.first < 0) { return thrust::make_pair(Eigen::Vector3i(0, 0, 0), -1); } return thrust::make_pair(thrust::raw_pointer_cast(point_coords_ptr)[index_bucket.first], index_bucket.second); } thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr; }; // accumulate points and covs struct accumulate_points_kernel { accumulate_points_kernel( const thrust::device_ptr<VoxelMapInfo>& voxelmap_info_ptr, const thrust::device_vector<thrust::pair<Eigen::Vector3i, int>>& buckets, thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs) : voxelmap_info_ptr(voxelmap_info_ptr), buckets_ptr(buckets.data()), num_points_ptr(num_points.data()), voxel_means_ptr(voxel_means.data()), voxel_covs_ptr(voxel_covs.data()) {} __device__ void operator()(const thrust::tuple<Eigen::Vector3f, Eigen::Matrix3f>& input) const { const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr); const auto& mean = thrust::get<0>(input); const auto& cov = thrust::get<1>(input); const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution); uint64_t hash = vector3i_hash(coord); for (int i = 0; i < info.max_bucket_scan_count; i++) { uint64_t bucket_index = (hash + i) % info.num_buckets; const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index]; if (equal(bucket.first, coord)) { int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second]; Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second]; Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second]; atomicAdd(&num_points, 1); for (int j = 0; j < 3; j++) { atomicAdd(voxel_mean.data() + j, mean[j]); } for (int j = 0; j < 9; j++) { atomicAdd(voxel_cov.data() + j, cov.data()[j]); } } } } __device__ void operator()(const Eigen::Vector3f& mean) const { const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr); const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution); uint64_t hash = vector3i_hash(coord); for (int i = 0; i < info.max_bucket_scan_count; i++) { uint64_t bucket_index = (hash + i) % info.num_buckets; const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index]; if (equal(bucket.first, coord)) { int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second]; Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second]; Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second]; Eigen::Matrix3f cov = mean * mean.transpose(); atomicAdd(&num_points, 1); for (int j = 0; j < 3; j++) { atomicAdd(voxel_mean.data() + j, mean[j]); } for (int j = 0; j < 9; j++) { atomicAdd(voxel_cov.data() + j, cov.data()[j]); } } } } thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr; thrust::device_ptr<const thrust::pair<Eigen::Vector3i, int>> buckets_ptr; thrust::device_ptr<int> num_points_ptr; thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr; thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr; }; struct finalize_voxels_kernel { finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs) : num_points_ptr(num_points.data()), voxel_means_ptr(voxel_means.data()), voxel_covs_ptr(voxel_covs.data()) {} __host__ __device__ void operator()(int i) const { int num_points = thrust::raw_pointer_cast(num_points_ptr)[i]; auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i]; auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i]; voxel_mean /= num_points; voxel_covs /= num_points; } thrust::device_ptr<int> num_points_ptr; thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr; thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr; }; struct ndt_finalize_voxels_kernel { ndt_finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs) : num_points_ptr(num_points.data()), voxel_means_ptr(voxel_means.data()), voxel_covs_ptr(voxel_covs.data()) {} __host__ __device__ void operator()(int i) const { int num_points = thrust::raw_pointer_cast(num_points_ptr)[i]; auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i]; auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i]; Eigen::Vector3f sum_pts = voxel_mean; voxel_mean /= num_points; voxel_covs = (voxel_covs - voxel_mean * sum_pts.transpose()) / num_points; } thrust::device_ptr<int> num_points_ptr; thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr; thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr; }; GaussianVoxelMap::GaussianVoxelMap(float resolution, int init_num_buckets, int max_bucket_scan_count) : init_num_buckets(init_num_buckets) { voxelmap_info.num_voxels = 0; voxelmap_info.num_buckets = init_num_buckets; voxelmap_info.max_bucket_scan_count = max_bucket_scan_count; voxelmap_info.voxel_resolution = resolution; voxelmap_info_ptr.resize(1); voxelmap_info_ptr[0] = voxelmap_info; } void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points) { cudaStream_t stream; cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); create_bucket_table(stream, points); num_points.resize(voxelmap_info.num_voxels); voxel_means.resize(voxelmap_info.num_voxels); voxel_covs.resize(voxelmap_info.num_voxels); num_points.resize(voxelmap_info.num_voxels); voxel_means.resize(voxelmap_info.num_voxels); voxel_covs.resize(voxelmap_info.num_voxels); thrust::fill(thrust::cuda::par.on(stream), num_points.begin(), num_points.end(), 0); thrust::fill(thrust::cuda::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval()); thrust::fill(thrust::cuda::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval()); thrust::for_each(thrust::cuda::par.on(stream), points.begin(), points.end(), accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs)); thrust::for_each(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), ndt_finalize_voxels_kernel(num_points, voxel_means, voxel_covs)); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); } void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points, const thrust::device_vector<Eigen::Matrix3f>& covariances) { cudaStream_t stream; cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); create_bucket_table(stream, points); num_points.resize(voxelmap_info.num_voxels); voxel_means.resize(voxelmap_info.num_voxels); voxel_covs.resize(voxelmap_info.num_voxels); thrust::fill(thrust::cuda::par.on(stream), num_points.begin(), num_points.end(), 0); thrust::fill(thrust::cuda::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval()); thrust::fill(thrust::cuda::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval()); thrust::for_each( thrust::cuda::par.on(stream), thrust::make_zip_iterator(thrust::make_tuple(points.begin(), covariances.begin())), thrust::make_zip_iterator(thrust::make_tuple(points.end(), covariances.end())), accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs)); thrust::for_each(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), finalize_voxels_kernel(num_points, voxel_means, voxel_covs)); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); } void GaussianVoxelMap::create_bucket_table(cudaStream_t stream, const thrust::device_vector<Eigen::Vector3f>& points) { thrust::device_vector<Eigen::Vector3i> coords(points.size()); thrust::transform(thrust::cuda::par.on(stream), points.begin(), points.end(), coords.begin(), voxel_coord_kernel(voxelmap_info_ptr.data())); thrust::device_vector<thrust::pair<int, int>> index_buckets; thrust::device_vector<int> voxels_failures(2, 0); for (int num_buckets = init_num_buckets; init_num_buckets * 4; num_buckets *= 2) { voxelmap_info.num_buckets = num_buckets; voxelmap_info_ptr[0] = voxelmap_info; index_buckets.resize(num_buckets); thrust::fill(thrust::cuda::par.on(stream), index_buckets.begin(), index_buckets.end(), thrust::make_pair(-1, -1)); thrust::fill(thrust::cuda::par.on(stream), voxels_failures.begin(), voxels_failures.end(), 0); thrust::for_each( thrust::cuda::par.on(stream), thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(points.size()), voxel_bucket_assignment_kernel(voxelmap_info_ptr.data(), coords, index_buckets, voxels_failures)); thrust::host_vector<int> h_voxels_failures = voxels_failures; if (static_cast<double>(h_voxels_failures[1]) / points.size() < 0.01) { voxelmap_info.num_voxels = h_voxels_failures[0]; voxelmap_info_ptr[0] = voxelmap_info; break; } } buckets.resize(index_buckets.size()); thrust::transform(thrust::cuda::par.on(stream), index_buckets.begin(), index_buckets.end(), buckets.begin(), voxel_coord_select_kernel(coords)); } } // namespace cuda } // namespace fast_gicp
the_stack
using namespace torch; #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <iostream> // Cuda tensor accessor definitions // restrict pointer traits piroritize speed over memory consumption #define TensorAcc4R PackedTensorAccessor32<scalar_t,4,RestrictPtrTraits> #define TensorAcc5R PackedTensorAccessor32<scalar_t,5,RestrictPtrTraits> #define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) #define THREADS_FORWARD 32 #define THREADS_BACKWARD 5 namespace corr { template <typename scalar_t> __global__ void correlation_cuda_forward_kernel( const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilationH, int dilationW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int iH = rInput1.size(1); const int iW = rInput1.size(2); const int C = rInput1.size(3); const int n = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int thread = threadIdx.x; const int start_i = -padH + h * dH; const int start_j = -padW + w * dW; const int patchRadH = dilation_patchH * (patchH - 1) / 2; const int patchRadW = dilation_patchW * (patchW - 1) / 2; __shared__ scalar_t prod_sum[THREADS_FORWARD]; for(int ph = 0; ph < patchH; ++ph){ int ph_dilated = ph * dilation_patchH - patchRadH; for(int pw = 0; pw < patchW; ++pw){ int pw_dilated = pw * dilation_patchW - patchRadW; prod_sum[thread] = 0; for (int i=0; i<kH; ++i){ int i1 = start_i + i * dilationH; int i2 = i1 + ph_dilated; if WITHIN_BOUNDS(i1, i2, iH, iH){ for (int j=0; j<kW; ++j){ int j1 = start_j + j * dilationW; int j2 = j1 + pw_dilated; if WITHIN_BOUNDS(j1, j2, iW, iW){ for (int c=thread; c<C; c += THREADS_FORWARD){ scalar_t v1 = rInput1[n][i1][j1][c]; scalar_t v2 = rInput2[n][i2][j2][c]; prod_sum[thread] += v1 * v2; } } } } } // accumulate __syncthreads(); if (thread == 0) { scalar_t reduce_sum = 0; for (int index = 0; index < THREADS_FORWARD; ++index) { reduce_sum += prod_sum[index]; } output[n][ph][pw][h][w] = reduce_sum; } } } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input1( const TensorAcc5R gradOutput, const TensorAcc4R input2, TensorAcc4R gradInput1, const int kH, const int kW, const int patchH, const int patchW, const int padH, const int padW, const int dilationH, const int dilationW, const int dilation_patchH, const int dilation_patchW, const int dH, const int dW, const int batch) { const int iH = input2.size(2); const int iW = input2.size(3); const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; const int h_2 = h + padH; const int w_2 = w + padW; const int min_h = h_2 - kH * dilationH; const int min_w = w_2 - kW * dilationW; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h + dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w + dilation_patchW * (pw - patchRadW); if (WITHIN_BOUNDS(i1, j1, iH, iW)){ scalar_t val = input2[n][c][i1][j1]; for(int h_3 = h_2; h_3 > min_h; h_3 -= dilationH) { int i2 = (h_3)/dH; if (i2 * dH != h_3) continue; for(int w_3 = w_2; w_3 > min_w; w_3 -= dilationW) { int j2 = (w_3) / dW; if(j2 * dW != w_3) continue; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput1[n][c][h][w] = reduce_sum; } } template <typename scalar_t> __global__ void correlation_cuda_backward_kernel_input2( const TensorAcc5R gradOutput, const TensorAcc4R input1, TensorAcc4R gradInput2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilationH, int dilationW, int dilation_patchH, int dilation_patchW, int dH, int dW, int batch) { const int iH = input1.size(2); const int iW = input1.size(3); const int patchRadH = (patchH - 1) / 2; const int patchRadW = (patchW - 1) / 2; const int H = gradOutput.size(3); const int W = gradOutput.size(4); const int dilatedKH = kH * dilationH; const int dilatedKW = kW * dilationW; const int n = batch; const int c = blockIdx.x; const int h = blockIdx.y; const int w = blockIdx.z; const int ph_off = threadIdx.x; const int pw_off = threadIdx.y; __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; prod_sum[ph_off][pw_off] = 0; for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { int i1 = h - dilation_patchH * (ph - patchRadH); for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { int j1 = w - dilation_patchW * (pw - patchRadW); if WITHIN_BOUNDS(i1, j1, iH, iW) { scalar_t val = input1[n][c][i1][j1]; const int h_2 = i1 + padH; const int w_2 = j1 + padW; const int min_h = h_2 - dilatedKH; const int min_w = w_2 - dilatedKW; for(int h_3 = h_2; h_3 > min_h; h_3 -= dilationH) { int i2 = (h_3)/dH; if (i2 * dH != h_3) continue; for(int w_3 = w_2; w_3 > min_w; w_3 -= dilationW) { int j2 = (w_3) / dW; if(j2 * dW != w_3) continue; if WITHIN_BOUNDS(i2, j2, H, W) { prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val; } } } } } } __syncthreads(); if (ph_off == 0 && pw_off == 0){ scalar_t reduce_sum =0; for (int ph = 0; ph < THREADS_BACKWARD; ++ph){ for (int pw = 0; pw < THREADS_BACKWARD; ++pw){ reduce_sum += prod_sum[ph][pw]; } } gradInput2[n][c][h][w] = reduce_sum; } } } // namsepace corr torch::Tensor correlation_cuda_forward( torch::Tensor input1, torch::Tensor input2, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilationH, int dilationW, int dilation_patchH, int dilation_patchW, int dH, int dW) { const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int dilatedKH = (kH - 1) * dilationH + 1; const int dilatedKW = (kW - 1) * dilationW + 1; const auto oH = (iH + 2 * padH - dilatedKH) / dH + 1; const auto oW = (iW + 2 * padW - dilatedKW) / dW + 1; auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options()); auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); const int threads = THREADS_FORWARD; const dim3 blocks(batch_size, oH, oW); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "correlation_forward_cuda", ([&] { TensorAcc4R trInput1_acc = trInput1.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc4R trInput2_acc = trInput2.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc5R output_acc = output.packed_accessor32<scalar_t,5,RestrictPtrTraits>(); corr::correlation_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, padH, padW, dilationH, dilationW, dilation_patchH, dilation_patchW, dH, dW); })); return output; } std::vector<torch::Tensor> correlation_cuda_backward( torch::Tensor input1, torch::Tensor input2, torch::Tensor gradOutput, int kH, int kW, int patchH, int patchW, int padH, int padW, int dilationH, int dilationW, int dilation_patchH, int dilation_patchW, int dH, int dW) { auto gradInput1 = torch::zeros_like(input1); auto gradInput2 = torch::zeros_like(input2); const int batch_size = input1.size(0); const int iH = input1.size(2); const int iW = input1.size(3); const int C = input1.size(1); const dim3 blocks(C, iH, iW); const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "correlation_backward_cuda", ([&] { TensorAcc4R input1_acc = input1.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc4R input2_acc = input2.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc4R gradInput1_acc = gradInput1.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc4R gradInput2_acc = gradInput2.packed_accessor32<scalar_t,4,RestrictPtrTraits>(); TensorAcc5R gradOutput_acc = gradOutput.packed_accessor32<scalar_t,5,RestrictPtrTraits>(); for (int n = 0; n < batch_size; ++n){ corr::correlation_cuda_backward_kernel_input1<scalar_t><<<blocks, threads>>>( gradOutput_acc, input2_acc, gradInput1_acc, kH, kW, patchH, patchW, padH, padW, dilationH, dilationW, dilation_patchH, dilation_patchW, dH, dW, n); } for (int n = 0; n < batch_size; ++n){ corr::correlation_cuda_backward_kernel_input2<scalar_t><<<blocks, threads>>>( gradOutput_acc, input1_acc, gradInput2_acc, kH, kW, patchH, patchW, padH, padW, dilationH, dilationW, dilation_patchH, dilation_patchW, dH, dW, n); } })); return {gradInput1, gradInput2}; }
the_stack
// redistribute between group-cyclic distributions with different cycles // c0 <= c1, n-dimensional version __global__ void gpu_b2c_pack_kernel_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, const cuda_cpx_t *local_data, cuda_cpx_t *send_data ) { HIP_DYNAMIC_SHARED( int, nidx_shared) // index of local component unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // do not read beyond end of array if (idx >= local_size) return; // determine local coordinate tuple int *nidx = &nidx_shared[threadIdx.x*ndim]; int tmp = idx; for (int i = ndim-1; i >= 0; --i) { int embed = d_embed[i]; nidx[i] = tmp % embed; tmp /= embed; int l = d_length[i]; if (nidx[i] >= l) return; } // determine index of packet and index in packet int lidx = 0; int size_tot = 1; tmp = 1; int packet_idx = 0; for (int i = 0; i <ndim; ++i) { int c0 = d_c0[i]; int c1 = d_c1[i]; int ratio = c1/c0; int l = d_length[i]; int size = ((l/ratio > 1) ? (l/ratio) : 1); lidx *= size; lidx += nidx[i]/ratio; // index in packet in column-major int num_packets = l/size; if (!row_m) { packet_idx *= num_packets; packet_idx += (nidx[i] % ratio); } else { packet_idx += tmp*(nidx[i] % ratio); tmp *= num_packets; } size_tot *= size; } send_data[packet_idx*size_tot+lidx] = local_data[idx]; } // redistribute between group-cyclic distributions with different cycles // c0 <= c1, n-dimensional version, unpack kernel __global__ void gpu_b2c_unpack_kernel_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, const cuda_cpx_t *recv_data, cuda_cpx_t *local_data ) { // index of local component unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // do not read beyond end of array if (idx >= local_size) return; int packet_idx = 0; int tmp = 1; int tmp_packet = 1; // index in packet int lidx = 0; int size = 1; // packet size int j = idx; for (int i = ndim-1; i>= 0; --i) { int l = d_length[i]; int c0 = d_c0[i]; int c1 = d_c1[i]; int ratio = c1/c0; int embed = d_embed[i]; // determine local index in current dimension int j1 = j % embed; j /= embed; if (j1 >= l) return; // do not fill outer embedding layer // determine packet idx along current dimension // and index in packet int lpidx; int num_packets; int lidxi; // index in packet int sizei; if (l >= ratio) { num_packets = ratio; sizei = l/ratio; lidxi = j1 % sizei; lpidx = j1 / sizei; } else { lpidx = j1; num_packets = l; sizei = 1; lidxi = 0; } if (!row_m) { /* packets in column major order */ packet_idx += tmp*lpidx; tmp *= num_packets; } else { /* packets in row-major order */ packet_idx *= num_packets; packet_idx += lpidx; } // inside packet: column-major lidx += tmp_packet*lidxi; tmp_packet *= sizei; size *= sizei; } local_data[idx] = recv_data[packet_idx*size + lidx]; } void gpu_b2c_pack_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, const cuda_cpx_t *local_data, cuda_cpx_t *send_data ) { unsigned int block_size =512; unsigned int n_blocks = local_size/block_size; if (local_size % block_size) n_blocks++; int shared_size = int(ndim*block_size*sizeof(int)); hipLaunchKernelGGL(gpu_b2c_pack_kernel_nd, dim3(n_blocks), dim3(block_size), shared_size, 0, local_size, d_c0, d_c1, ndim, d_embed, d_length, row_m, local_data, send_data); } void gpu_b2c_unpack_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, const cuda_cpx_t *recv_data, cuda_cpx_t *local_data ) { unsigned int block_size =512; unsigned int n_blocks = local_size/block_size; if (local_size % block_size) n_blocks++; hipLaunchKernelGGL(gpu_b2c_unpack_kernel_nd, dim3(n_blocks), dim3(block_size), 0, 0, local_size, d_c0, d_c1, ndim, d_embed, d_length, row_m, recv_data, local_data); } __device__ unsigned int bit_reverse(unsigned int in, unsigned int pow_of_two) { unsigned int rev = 0; unsigned int pow = pow_of_two; for (unsigned int i = 0; pow > 1; i++) { pow /= 2; rev *= 2; rev += ((in & (1 << i)) ? 1 : 0); } return rev; } // redistribute between group-cyclic distributions with different cycles // c0 >= c1, n-dimensional version __global__ void gpu_c2b_pack_kernel_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, int *d_pdim, int *d_rev_j1, int *d_rev, const cuda_cpx_t *local_data, cuda_cpx_t *send_data ) { HIP_DYNAMIC_SHARED( int, nidx_shared) // index of local component unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // do not read beyond end of array if (idx >= local_size) return; // determine local coordinate tuple int *nidx = &nidx_shared[threadIdx.x*ndim]; int tmp = idx; for (int i = ndim-1; i >= 0; --i) { int embed = d_embed[i]; nidx[i] = tmp % embed; tmp /= embed; int l = d_length[i]; if (nidx[i] >= l) return; } // determine index of packet and index in packet int lidx = 0; int size_tot = 1; tmp = 1; int packet_idx = 0; for (int i = 0; i <ndim; ++i) { int c0 = d_c0[i]; int c1 = d_c1[i]; int ratio = c0/c1; int l = d_length[i]; int size; int j1 = nidx[i]; int lpidx; int num_packets; int rev_j1 = d_rev_j1[i]; int rev_global = d_rev[i]; if (rev_j1) j1= bit_reverse(j1, l); if (! rev_global) { size = ((l/ratio > 1) ? (l/ratio) : 1); lidx *= size; lidx += (j1%size); // index in packet in column-major num_packets = l/size; lpidx = j1/size; } else { // global bitreversal int p = d_pdim[i]; if (p/c1 > c0) { size = ((p/c1 <= l*c0) ? (l*c0*c1/p) : 1); num_packets = l/size; // inside packet: column major lidx *= size; int lidxi = bit_reverse(j1/num_packets,size); lidx += lidxi; lpidx = bit_reverse(j1 %num_packets,num_packets); } else { size = ((l*p/c1 >= c0) ? l*p/c1/c0 : 1); num_packets = l/size; int lidxi = bit_reverse(j1%size,size); lidx *= size; lidx += lidxi; lpidx = bit_reverse(j1 / size,num_packets); } } if (!row_m) { packet_idx *= num_packets; packet_idx += lpidx; } else { packet_idx += tmp*lpidx; tmp *= num_packets; } size_tot *= size; } send_data[packet_idx*size_tot+lidx] = local_data[idx]; } // redistribute between group-cyclic distributions with different cycles // c0 >= c1, n-dimensional version, unpack kernel __global__ void gpu_c2b_unpack_kernel_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, int *d_pdim, int *d_rev, int *d_rev_partial, const cuda_cpx_t *recv_data, cuda_cpx_t *local_data ) { // index of local component unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // do not read beyond end of array if (idx >= local_size) return; int packet_idx = 0; int tmp = 1; int tmp_packet = 1; // index in packet int lidx = 0; int size = 1; // packet size int j = idx; for (int i = ndim-1; i>= 0; --i) { int l = d_length[i]; int c0 = d_c0[i]; int c1 = d_c1[i]; int ratio = c0/c1; int embed = d_embed[i]; // determine local index in current dimension int j1 = j % embed; j /= embed; if (j1 >= l) return; // do not fill outer embedding layer // determine packet idx along current dimension // and index in packet int lpidx; int num_packets; int lidxi; // index in packet int sizei; int rev = d_rev[i]; if (!rev) { if (l >= ratio) { num_packets = ratio; sizei = l/ratio; lidxi = j1 /ratio; lpidx = (j1 % ratio); } else { lpidx = j1; num_packets = l; sizei = 1; lidxi = 0; } } else { // global bit reversal int p = d_pdim[i]; if (c0 < p/c1) { // this section is usually not called during a dfft sizei = ((p/c1 <= l*c0) ? (l*c0*c1/p) : 1); num_packets = l/sizei; lidxi = j1 / num_packets; lpidx = bit_reverse(j1 % num_packets,num_packets); } else { sizei = ((l*p/c1 >= c0) ? l*p/c1/c0 : 1); num_packets = l/sizei; lidxi = j1 % sizei; int rev_partial = d_rev_partial[i]; if (rev_partial) lpidx = j1 / sizei; else lpidx = bit_reverse(j1 / sizei, num_packets); } } if (!row_m) { /* packets in column major order */ packet_idx += tmp*lpidx; tmp *= num_packets; } else { /* packets in row-major order */ packet_idx *= num_packets; packet_idx += lpidx; } // inside packet: column-major lidx += tmp_packet*lidxi; tmp_packet *= sizei; size *= sizei; } local_data[idx] = recv_data[packet_idx*size + lidx]; } void gpu_c2b_pack_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, int *d_pdim, int *d_rev_j1, int *d_rev, const cuda_cpx_t *local_data, cuda_cpx_t *send_data ) { unsigned int block_size =512; unsigned int n_blocks = local_size/block_size; if (local_size % block_size) n_blocks++; int shared_size = int(ndim*block_size*sizeof(int)); hipLaunchKernelGGL(gpu_c2b_pack_kernel_nd, dim3(n_blocks), dim3(block_size), shared_size, 0, local_size, d_c0, d_c1, ndim, d_embed, d_length, row_m, d_pdim, d_rev_j1, d_rev, local_data, send_data); } void gpu_c2b_unpack_nd(unsigned int local_size, int *d_c0, int *d_c1, int ndim, int *d_embed, int *d_length, int row_m, int *d_pdim, int *d_rev, int *d_rev_partial, const cuda_cpx_t *recv_data, cuda_cpx_t *local_data ) { unsigned int block_size =512; unsigned int n_blocks = local_size/block_size; if (local_size % block_size) n_blocks++; hipLaunchKernelGGL(gpu_c2b_unpack_kernel_nd, dim3(n_blocks), dim3(block_size), 0, 0, local_size, d_c0, d_c1, ndim, d_embed, d_length, row_m, d_pdim, d_rev, d_rev_partial, recv_data, local_data); } // redistribute between group-cyclic distributions with different cycles // c0 <= c1 __global__ void gpu_b2c_pack_kernel(unsigned int local_size, unsigned int ratio, unsigned int size, unsigned int npackets, unsigned int stride, cuda_cpx_t *local_data, cuda_cpx_t *send_data ) { // index of local component unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // do not read beyond end of array if (idx >= local_size) return; unsigned int j = (idx/stride) % npackets; // packet number unsigned int r = (idx/stride - j)/ratio; // index in packet unsigned int offset = j*size; send_data[offset + r*stride + (idx%stride)] = local_data[idx]; } void gpu_b2c_pack(unsigned int local_size, unsigned int ratio, unsigned int size, unsigned int npackets, unsigned int stride, cuda_cpx_t *local_data, cuda_cpx_t *send_data) { unsigned int block_size =512; unsigned int n_blocks = local_size/block_size; if (local_size % block_size) n_blocks++; hipLaunchKernelGGL(gpu_b2c_pack_kernel, dim3(n_blocks), dim3(block_size), 0, 0, local_size, ratio, size, npackets, stride, local_data, send_data); } // apply twiddle factors __global__ void gpu_twiddle_kernel(unsigned int local_size, const unsigned int length, const unsigned int stride, float alpha, cuda_cpx_t *d_in, cuda_cpx_t *d_out, int inv) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= local_size) return; int j = idx/stride; if (j >= length) return; float theta = -2.0f * float(M_PI) * alpha/(float) length; cuda_cpx_t w; CUDA_RE(w) = cosf((float)j*theta); CUDA_IM(w) = sinf((float)j*theta); cuda_cpx_t in = d_in[idx]; cuda_cpx_t out; float sign = inv ? -1.0f : 1.0f; w.y *= sign; CUDA_RE(out) = CUDA_RE(in) * CUDA_RE(w) - CUDA_IM(in) * CUDA_IM(w); CUDA_IM(out) = CUDA_RE(in) * CUDA_IM(w) + CUDA_IM(in) * CUDA_RE(w); d_out[idx] = out; } // apply twiddle factors (n-dimensional version) __global__ void gpu_twiddle_kernel_nd(unsigned int local_size, int ndim, int *d_embed, int *d_length, float *d_alpha, cuda_cpx_t *d_in, cuda_cpx_t *d_out, int inv) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= local_size) return; // complex-multiply twiddle factors for all dimensions int tmp = idx; float theta = 0.0f; for (int i = ndim-1; i>=0;--i) { int embed = d_embed[i]; int length = d_length[i]; int j = tmp % embed; if (j >= length) return; tmp /= embed; float alpha = d_alpha[i]; theta -= (float)j*2.0f * float(M_PI) * alpha/(float) length; } cuda_cpx_t w; CUDA_RE(w) = cosf(theta); CUDA_IM(w) = sinf(theta); cuda_cpx_t out; float sign = inv ? -1.0f : 1.0f; w.y *= sign; cuda_cpx_t in = d_in[idx]; CUDA_RE(out) = CUDA_RE(in) * CUDA_RE(w) - CUDA_IM(in) * CUDA_IM(w); CUDA_IM(out) = CUDA_RE(in) * CUDA_IM(w) + CUDA_IM(in) * CUDA_RE(w); d_out[idx] = out; } void gpu_twiddle(unsigned int local_size, const unsigned int length, const unsigned int stride, float alpha, cuda_cpx_t *d_in, cuda_cpx_t *d_out, int inv) { unsigned int block_size =512; unsigned int n_block = local_size/block_size; if (local_size % block_size ) n_block++; hipLaunchKernelGGL(gpu_twiddle_kernel, dim3(n_block), dim3(block_size), 0, 0, local_size, length, stride, alpha, d_in, d_out, inv); } void gpu_twiddle_nd(unsigned int local_size, int ndim, int *d_embed, int *d_length, float *d_alpha, cuda_cpx_t *d_in, cuda_cpx_t *d_out, int inv) { unsigned int block_size =512; unsigned int n_block = local_size/block_size; if (local_size % block_size ) n_block++; hipLaunchKernelGGL(gpu_twiddle_kernel_nd, dim3(n_block), dim3(block_size), 0, 0, local_size, ndim, d_embed, d_length, d_alpha, d_in, d_out, inv); } __global__ void gpu_c2b_unpack_kernel(const unsigned int local_size, const unsigned int length, const unsigned int c0, const unsigned int c1, const unsigned int size, const unsigned int j0, const unsigned int stride, int rev, cuda_cpx_t *d_local_data, const cuda_cpx_t *d_scratch) { unsigned int idx = blockDim.x*blockIdx.x+threadIdx.x; if (idx >= local_size) return; // source processor int r = idx/size; // packet index int j1, j1_offset, del; int j0_remote = j0 + r*c1; if (rev && (length >= c0)) { j1_offset = j0_remote*length/c0; del = 1; } else { j1_offset = j0_remote/c1; del = c0/c1; } // local index j1 = j1_offset + ((idx%size)/stride)*del; d_local_data[j1*stride+idx%stride] = d_scratch[idx]; } void gpu_c2b_unpack(const unsigned int local_size, const unsigned int length, const unsigned int c0, const unsigned int c1, const unsigned int size, const unsigned int j0, const unsigned int stride, const int rev, cuda_cpx_t *d_local_data, const cuda_cpx_t *d_scratch) { unsigned int block_size =512; unsigned int n_block = local_size/block_size; if (local_size % block_size ) n_block++; hipLaunchKernelGGL(gpu_c2b_unpack_kernel, dim3(n_block), dim3(block_size), 0, 0, local_size, length, c0, c1, size, j0, stride, rev, d_local_data, d_scratch); } __global__ void gpu_transpose_kernel(const unsigned int size, const unsigned int length, const unsigned int stride, const unsigned int embed, const cuda_cpx_t *in, cuda_cpx_t *out) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx >= size) return; int i = idx / stride; if (i >= length) return; int j = idx % stride; out[j*embed + i] = in[idx]; } #define TILE_DIM 16 #define BLOCK_ROWS 16 __global__ void transpose_sdk(cuda_cpx_t *odata, const cuda_cpx_t *idata, int width, int height, int embed) { __shared__ cuda_cpx_t tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; int xIndex_new = blockIdx.y * TILE_DIM + threadIdx.x; int yIndex_new = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex_new + (yIndex_new)*embed; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if ((xIndex < width) && ((i+yIndex) <height)) tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex_new< height && ((yIndex_new+i) <width)) odata[index_out+i*embed] = tile[threadIdx.x][threadIdx.y+i]; } } void gpu_transpose(const unsigned int size, const unsigned int length, const unsigned int stride, const unsigned int embed, const cuda_cpx_t *in, cuda_cpx_t *out) { unsigned int block_size =512; unsigned int n_block = size/block_size; if (size % block_size ) n_block++; // hipLaunchKernelGGL(gpu_transpose_kernel, dim3(n_block), dim3(block_size), 0, 0, size, length, stride, embed, in, out); int size_x = stride; int size_y = length; int nblocks_x = size_x/TILE_DIM; if (size_x%TILE_DIM) nblocks_x++; int nblocks_y = size_y/TILE_DIM; if (size_y%TILE_DIM) nblocks_y++; dim3 grid(nblocks_x, nblocks_y), threads(TILE_DIM,BLOCK_ROWS); if (stride == 1 || length ==1 ) hipMemcpy(out,in,sizeof(cuda_cpx_t)*stride*length,hipMemcpyDefault); else hipLaunchKernelGGL(transpose_sdk, dim3(grid), dim3(threads), 0, 0, out,in, size_x, size_y,embed); }
the_stack
#ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #ifndef MIN #define MIN(x,y) ((x < y) ? x : y) #endif #ifndef MAX #define MAX(x,y) ((x > y) ? x : y) #endif #include "cublas.h" #ifdef __cplusplus extern "C" { #endif struct cudamat { float* data_host; float* data_device; int on_device; int on_host; int size[2]; int is_trans; // 0 or 1 int owns_data; cudaTextureObject_t tex_obj; }; struct cudamat_double { double* data_host; double* data_device; int on_device; int on_host; int size[2]; int is_trans; // 0 or 1 int owns_data; cudaTextureObject_t tex_obj; }; struct rnd_struct { unsigned int* dev_mults; unsigned long long* dev_words; }; // bounding boxes. struct bbox { int *seg; // array of length 'size' + 1. int *labels; // labels[seg[i]:seg[i+1]] are the labels for image i. int *boxes; // boxes[4*seg[i]:4*seg[i+1]] are bounding boxes for image i. }; struct cudamat_bbox { bbox data_host; bbox data_device; int on_device; int on_host; int size; // Number of images in the (mini)batch. int numboxes; // Total number of boxes over all images in the (mini)batch. }; struct sparse_data { int *indices, *indptr; float* data; }; struct cudamat_sparse { sparse_data data_host; sparse_data data_device; int on_device; int on_host; int size[2]; int is_trans; // 0 or 1 int owns_data; int nnz; }; typedef struct Shape4D { int shape[4]; } Shape4D; typedef struct ConvDesc { int num_input_channels; int num_output_channels; int kernel_size_y; int kernel_size_x; int kernel_size_t; int stride_y; int stride_x; int stride_t; int padding_y; int padding_x; int padding_t; int input_channel_begin; int input_channel_end; int output_channel_begin; int output_channel_end; int num_groups; } ConvDesc; const char* get_last_cuda_error(); int cuda_record_event(cudaEvent_t* t); int cuda_synchronize_event(cudaEvent_t* t); int cuda_create_event(cudaEvent_t* t); int cublas_init(); int cublas_shutdown(); bool cuda_is_fermi(int deviceId); int cuda_set_device(int deviceId); int cuda_set_P2P(int gpu1, int gpu2); int init_random(rnd_struct* rnd_state, int seed); int get_rnd_state(rnd_struct* rnd_state, unsigned long long* host_words_out, int *size_out); int get_leading_dimension(cudamat* mat); int get_nonleading_dimension(cudamat* mat); void set_transpose(cudamat* mat, int is_trans); void cuda_sync_threads(); int allocate_device_memory(cudamat* mat); int allocate_device_memory_bbox(cudamat_bbox* mat); int allocate_device_memory_sparse(cudamat_sparse* mat); int destroy_tex(cudamat* mat); int write_at(cudamat* mat, int row, int col, float val); float read_from(cudamat* mat, int row, int col, int* err_code); int copy_to_host(cudamat* mat); int copy_to_host_slice(cudamat* mat, size_t start, size_t end); int copy_bbox_to_host(cudamat_bbox* mat); int copy_to_device(cudamat* mat); int copy_to_device_slice(cudamat* mat, size_t start, size_t end); int copy_bbox_to_device(cudamat_bbox* mat); int copy_sparse_to_device(cudamat_sparse* mat); int copy_on_device(cudamat* mat1, cudamat* mat2); int copy_on_device_p2p_async(cudamat* src, cudamat* dst, int src_dev, int dst_dev); int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end); int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end); int copy_transpose(cudamat* source, cudamat* target); int copy_transpose_big_matrix(cudamat* source, cudamat* target); int free_device_memory(cudamat* mat); int free_device_memory_bbox(cudamat_bbox* mat); int set_shape(cudamat* mat, unsigned int m, unsigned int n); int set_shape4d(Shape4D* shape, unsigned int s1, unsigned int s2, unsigned s3, unsigned s4); int reshape(cudamat* mat, int m, int n); int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col); int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind); void init_from_array(cudamat* mat, float* data, int m, int n); void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz); void set_on_device(cudamat* mat); int init_empty(cudamat* mat, int m, int n); int fill_with_rand(rnd_struct* rnd_state, cudamat* mat); int fill_with_randn(rnd_struct* rnd_state, cudamat* mat); int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target); int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target); int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target); int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult); int sample_vmf(rnd_struct* rnd_state, cudamat* kappa, cudamat* target, int num_dims, float tiny); int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target); int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target); int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val, float scale); int gaussian_dropout(rnd_struct* rnd_state, cudamat* mat, float scale); int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target); int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult); int add_to_each_pixel(cudamat* mat1, cudamat* mat2, cudamat* target, float mult); int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target); int add_diagonal_scalar(cudamat* mat, float val, cudamat* target); int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target); int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target); int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult); int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target); int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target); int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target, float scale_targets); int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target); int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target); int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target); int less_than(cudamat* mat1, cudamat* mat2, cudamat* target); int less_than_eq_scalar(cudamat* mat, float val, cudamat* target); int less_than_scalar(cudamat* mat, float val, cudamat* target); int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target); int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target); int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target); int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target); int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target); int greater_than_scalar(cudamat* mat, float val, cudamat* target); int upper_bound_scalar(cudamat* mat, float val, cudamat* target); int lower_bound_scalar(cudamat* mat, float val, cudamat* target); int upper_bound_mod_scalar(cudamat* mat, float val, cudamat* target); int max_by_axis(cudamat* mat, cudamat* target, int axis); int choose_max_and_accumulate(cudamat* mat, cudamat* acc); int choose_max_by_axis(cudamat* mat, cudamat* target, int axis); int argmax_by_axis(cudamat* mat, cudamat* target, int axis); int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p); int sum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p); float sum_all(cudamat* mat, int* err_code); int normlimit_by_axis(cudamat* mat, cudamat* target, int axis, float norm, int constraint); int norm_bprop_rowwise(cudamat* deriv, cudamat* input, cudamat* target); int normalize_by_axis(cudamat* mat, cudamat* target, int axis); int sign(cudamat* mat, cudamat* target); int apply_cos(cudamat* mat, cudamat* target); int apply_sin(cudamat* mat, cudamat* target); int apply_sigmoid(cudamat* mat, cudamat* target); int apply_tanh(cudamat* mat, cudamat* target); int apply_abs(cudamat* mat, cudamat* target); int apply_log_1_plus_exp(cudamat* mat, cudamat* target); int apply_relu_squash(cudamat* mat, cudamat* target, float lambda); int apply_log(cudamat* mat, cudamat* target, float tiny); int apply_exp(cudamat* mat, cudamat* target); int apply_ceil(cudamat* mat, cudamat* target); int apply_floor(cudamat* mat, cudamat* target); int apply_sqrt(cudamat* mat, cudamat* target); int apply_pow(cudamat* mat, float pow, cudamat* target); int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target); int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny); int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny); int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff); int reciprocal(cudamat* mat, cudamat* target); int bessel_ratio_activation(cudamat* mat, cudamat* target); int bessel_ratio_activation_continued_fraction(cudamat* mat, cudamat* target, float order, int num_steps); int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha); int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha); float vdot(cudamat* mat1, cudamat* mat2, int* err_code); int add_mult(cudamat* mat1, cudamat* mat2, float alpha); int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult); int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target); int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target); int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target); int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target, float scale_targets); int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target); int assign_scalar(cudamat* mat, float alpha); int mult_by_scalar(cudamat* mat, float alpha, cudamat* target, float scale_targets); int divide_by_scalar(cudamat* mat, float alpha, cudamat* target); int add_scalar(cudamat* mat, float alpha, cudamat* target); float euclid_norm(cudamat* mat, int* err_code); int selectRows(cudamat* source, cudamat* target, cudamat* indices); int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2); int shuffleColumns(cudamat* source, cudamat* rand_perm_indices); int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices); int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels); int blockify(cudamat* source, cudamat* target, int blocksize); int softmax(cudamat* mat, cudamat* target); int softmax_overwrite(cudamat* mat); int softmax_row_major(cudamat* mat, cudamat* target); int softmax_row_major_multi(cudamat* mat, int numslices, cudamat* target); int apply_logistic_grad(cudamat* mat1, cudamat* mat2, cudamat* out_grad); int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target); int apply_softmax_grad_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target); int apply_softmax_grad_row_major(cudamat* mat, cudamat* labels, cudamat* target); int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target); int get_softmax_correct_row_major(cudamat* mat, cudamat* labels, cudamat* target); int get_softmax_correct_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target); int hinge_loss_row_major(cudamat* mat, cudamat* labels, cudamat* target, int quadratic, float margin); int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg); int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny); int get_softmax_cross_entropy_row_major(cudamat* mat, cudamat* labels, cudamat* target, float tiny); int expand(cudamat* source, cudamat* indices, cudamat* target); int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult); int extract_patches(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height); int extract_patches_3(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height); int capsulify(cudamat* images, cudamat* output, int image_size, int crop_size); int rectify_bounding_boxes(cudamat* boxes, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int patch_width, int patch_height); int adagrad(cudamat* history, cudamat* grad, float delta); int rms_prop(cudamat* history, cudamat* grad, float factor); int apply_grad_bbox( cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, int loss_function); int get_softmax_correct_row_major_bbox( cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height); int get_logistic_correct_row_major_bbox( cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, float cutoff); int get_logistic_correct_normalized(cudamat* mat1, cudamat* mat2, cudamat* out); // LSTMs int lstm_fprop(cudamat* s_in, cudamat* s_out, cudamat* w_dense, cudamat* w_diag, cudamat* b, bool init, bool use_relu); int lstm_bprop(cudamat* s_in, cudamat* s_out, cudamat* d_in, cudamat* d_out, cudamat* w_dense, cudamat* w_diag, bool init, bool use_relu); int lstm_outp(cudamat* s_in, cudamat* s_out, cudamat* d_out, cudamat* dw_dense, cudamat* dw_diag, cudamat* db, bool init); // Batch Normalization int bn_bprop(cudamat* deriv, cudamat* input, cudamat* gamma, cudamat* mu, cudamat* sigma, cudamat* target, float scale_targets); int bn_grad(cudamat* deriv, cudamat* input, cudamat* mu, cudamat* sigma, cudamat* dgamma, cudamat* dbeta); int bn_bprop_inplace(cudamat* deriv, cudamat* act, cudamat* dgamma); // New LSTM kernels with data layout: cols = lstms, rows = batchsize, then timesteps. int lstm_fprop2(cudamat* gates, cudamat* cell_prev, cudamat* cell, cudamat* output, cudamat* w); int lstm_fprop2_init(cudamat* gates, cudamat* cell, cudamat* output, cudamat* w); int lstm_bprop2(cudamat* gates, cudamat* gates_deriv, cudamat* cell_prev, cudamat* cell_prev_deriv, cudamat* cell, cudamat* cell_deriv, cudamat* output_deriv, cudamat* w); int lstm_bprop2_init(cudamat* gates, cudamat* gates_deriv, cudamat* cell, cudamat* cell_deriv, cudamat* output_deriv, cudamat* w); int capsule_activation(cudamat* h_in, cudamat* h_out, cudamat* length, cudamat* bessel_ratio); int capsule_derivative_of_activation(cudamat* d_out, cudamat* h_out, cudamat* length, cudamat* bessel_ratio, cudamat* d_in, float sparsity_cost, float sparsity_scale); #ifdef __cplusplus } #endif #endif
the_stack
namespace cub = hipcub; #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T, framework::DataLayout layout> static __global__ void BNForwardInference( const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const int C, const int N, const int HxW, const double epsilon, T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int num = N * C * HxW; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> x_sub_mean = static_cast<BatchNormParamType<T>>(x[i]) - mean[c]; BatchNormParamType<T> inv_var = 1 / sqrt(variance[c] + epsilon); y[i] = static_cast<T>(scale[c] * x_sub_mean * inv_var + bias[c]); } } template <typename T, int BlockDim, framework::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining( const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const int C, const int N, const int HxW, const double epsilon, double exponentialAverageFactor, T *y, BatchNormParamType<T> *mean, BatchNormParamType<T> *variance, BatchNormParamType<T> *save_mean, BatchNormParamType<T> *save_inv_variance) { int outer_size = C; int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> variance_val; __shared__ BatchNormParamType<T> inv_var_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; variance_val = x_square_sum / inner_size - mean_val * mean_val; inv_var_val = 1 / sqrt(variance_val + epsilon); if (save_mean && save_inv_variance) { save_mean[i] = mean_val; save_inv_variance[i] = inv_var_val; } mean[i] = (1 - exponentialAverageFactor) * mean_val + exponentialAverageFactor * mean[i]; variance[i] = (1 - exponentialAverageFactor) * variance_val + exponentialAverageFactor * variance[i]; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_sub_mean = static_cast<BatchNormParamType<T>>(x[index]) - mean_val; y[index] = scale[i] * x_sub_mean * inv_var_val + bias[i]; } } } template <typename T> class BatchNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); const bool is_test = ctx.Attr<bool>("is_test"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool trainable_stats = ctx.Attr<bool>("trainable_statistics"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_stats); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input<Tensor>("X"); const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5" "But received: the size of input's dimensions is [%d]", x_dims.size())); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); auto dtype = platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = test_mode || (dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif Tensor transformed_x(x->type()); Tensor transformed_y(y->type()); if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y, &transformed_y); } else { transformed_x.ShareDataWith(*x); transformed_y.ShareDataWith(*y); } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) VLOG(3) << "Setting descriptors."; std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * D * C, 1, W * D * C, D * C, C}; } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // Note: PERSISTENT not implemented for inference // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor( // bn_param_desc_, data_desc_, test_mode ? miopenBNSpatial : mode_)); #else PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_)); #endif const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. // It is training mode when it's not reference AND not using pre-trained // model. bool training = !test_mode && !use_global_stats; if (!training) { // only when test we use input to do computation. const auto *est_mean = ctx.Input<Tensor>("Mean"); const auto *est_var = ctx.Input<Tensor>("Variance"); // Run inference mode. PADDLE_ENFORCE_EQ( est_mean->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of mean's dimensions must equal to 1." "But received: the size of mean's dimensions mean is [%d]," "the dimensions of mean is [%s].", est_mean->dims().size(), est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of variance's dimensions must equal to 1." "But received: the size of variance's dimensions is [%d]," "the dimensions of variance is [%s].", est_var->dims().size(), est_var->dims())); PADDLE_ENFORCE_EQ( est_mean->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of mean must equal to the number of " "Channels, which is [%d]. But received: the first dimension" "of mean is [%d], the dimensions of mean is [%s].", C, est_mean->dims()[0], est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of variance must equal to the number" "of Channels, which is [%d]. But received: the first dimension of" "variance is [%d], the dimensions of variance is [%s].", C, est_var->dims()[0], est_var->dims())); #ifdef PADDLE_WITH_HIP const int block_size = 256; const int grid_size = (N * C * H * W * D + block_size - 1) / block_size; if (compute_format == DataLayout::kNCHW) { BNForwardInference< T, DataLayout::kNCHW><<<grid_size, block_size, 0, dev_ctx.stream()>>>( transformed_x.template data<T>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), C, N, H * W * D, epsilon, transformed_y.template data<T>()); } else { BNForwardInference< T, DataLayout::kNHWC><<<grid_size, block_size, 0, dev_ctx.stream()>>>( transformed_x.template data<T>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), C, N, H * W * D, epsilon, transformed_y.template data<T>()); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenBatchNormalizationForwardInference( // handle, miopenBNSpatial, // const_cast<void *>( // static_cast<const void *>(CudnnDataType<T>::kOne())), // const_cast<void *>( // static_cast<const void *>(CudnnDataType<T>::kZero())), // data_desc_, // static_cast<const void *>(transformed_x.template data<T>()), // data_desc_, // static_cast<void *>( // transformed_y.template mutable_data<T>(ctx.GetPlace())), // bn_param_desc_, // const_cast<void *>(static_cast<const void *>( // scale->template data<BatchNormParamType<T>>())), // const_cast<void *>(static_cast<const void *>( // bias->template data<BatchNormParamType<T>>())), // const_cast<void *>(static_cast<const void *>( // est_mean->template data<BatchNormParamType<T>>())), // const_cast<void *>(static_cast<const void *>( // est_var->template data<BatchNormParamType<T>>())), // epsilon)); #else PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), epsilon)); #endif } else { // if MomentumTensor is set, use MomentumTensor value, momentum // is only used in this training branch if (ctx.HasInput("MomentumTensor")) { const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor"); Tensor mom_cpu; TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu); momentum = mom_cpu.data<float>()[0]; } // Run training mode. // obtain running mean and running inv var, and there is no need // to initialize them. auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); if ((N * H * W * D) == 1) { // Only 1 element in normalization dimension, // skip the batch norm calculation, let y = x. framework::TensorCopy(*x, ctx.GetPlace(), y); } else { double this_factor = 1. - momentum; bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; Tensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<Tensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*zDesc=*/nullptr, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*activationDesc=*/nullptr, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space_ptr = reserve_space->mutable_data( ctx.GetPlace(), transformed_x.type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), nullptr, nullptr, data_desc_, transformed_y.template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), nullptr, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { #ifdef PADDLE_WITH_HIP const int num = transformed_x.numel(); const int block = 256; const int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); const int grid = std::min(C, max_blocks); if (compute_format == DataLayout::kNCHW) { BNForwardTraining< T, block, DataLayout::kNCHW><<<grid, block, 0, dev_ctx.stream()>>>( transformed_x.template data<T>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), C, N, H * W * D, epsilon, this_factor, transformed_y.template data<T>(), mean_out->template data<BatchNormParamType<T>>(), variance_out->template data<BatchNormParamType<T>>(), saved_mean->template data<BatchNormParamType<T>>(), saved_variance->template data<BatchNormParamType<T>>()); } else { BNForwardTraining< T, block, DataLayout::kNHWC><<<grid, block, 0, dev_ctx.stream()>>>( transformed_x.template data<T>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), C, N, H * W * D, epsilon, this_factor, transformed_y.template data<T>(), mean_out->template data<BatchNormParamType<T>>(), variance_out->template data<BatchNormParamType<T>>(), saved_mean->template data<BatchNormParamType<T>>(), saved_variance->template data<BatchNormParamType<T>>()); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenBatchNormalizationForwardTraining( // handle, mode_, const_cast<void *>(static_cast<const void *>( // CudnnDataType<T>::kOne())), // const_cast<void *>( // static_cast<const void *>(CudnnDataType<T>::kZero())), // data_desc_, // static_cast<const void *>(transformed_x.template data<T>()), // data_desc_, // static_cast<void *>( // transformed_y.template mutable_data<T>(ctx.GetPlace())), // bn_param_desc_, // const_cast<void *>(static_cast<const void *>( // scale->template data<BatchNormParamType<T>>())), // const_cast<void *>(static_cast<const void *>( // bias->template data<BatchNormParamType<T>>())), // this_factor, // static_cast<void *>( // mean_out->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace())), // static_cast<void *>(variance_out->template mutable_data< // BatchNormParamType<T>>(ctx.GetPlace())), // epsilon, // static_cast<void *>( // saved_mean->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace())), // static_cast<void *>(saved_variance->template mutable_data< // BatchNormParamType<T>>(ctx.GetPlace())))); #else PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()))); #endif } } } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_y, y); } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #endif } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, framework::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const gpuStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); KeBNRestoreData<<<grid2, block, 0, stream>>>( layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *saved_mean, const BatchNormParamType<T> *saved_inv_variance, const int C, const int N, const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> dscale_val; __shared__ BatchNormParamType<T> dbias_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); if (saved_mean && saved_inv_variance) { if (threadIdx.x == 0) { inv_var_val = saved_inv_variance[i]; mean_val = saved_mean[i]; } } else { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = 1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon); } } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale_val = ds_sum * inv_var_val; dbias_val = db_sum; dscale[i] = dscale_val; dbias[i] = dbias_val; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T, int BlockDim, framework::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData( const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, cub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T> class BatchNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); // batch_norm with inplace as false will take X as grad input, which // is same as cuDNN batch_norm backward calculation, batch_norm // with inplace as true only take Y as input and X should be calculate // by inverse operation of batch_norm on Y const Tensor *x; bool is_inplace; if (ctx.HasInput("Y")) { x = ctx.Input<Tensor>("Y"); is_inplace = true; if (d_x) { PADDLE_ENFORCE_EQ(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD not inplace in inplace mode")); } } else { x = ctx.Input<Tensor>("X"); is_inplace = false; if (d_x) { PADDLE_ENFORCE_NE( d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD inplaced in non-inplace mode")); } } const bool is_test = ctx.Attr<bool>("is_test"); use_global_stats = is_test || use_global_stats; const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output if (d_x) { d_x->mutable_data<T>(ctx.GetPlace()); } if (d_scale && d_bias) { d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); } PADDLE_ENFORCE_EQ( scale->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale->dims().size(), scale->dims())); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale->dims()[0])); auto dtype = platform::CudnnDataType<T>::type; const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace"); #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && reserve_space != nullptr; auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif Tensor transformed_x(x->type()); Tensor transformed_d_y(d_y->type()); Tensor transformed_d_x; if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); if (d_x) { ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x, &transformed_d_x); } } else { transformed_x.ShareDataWith(*x); transformed_d_y.ShareDataWith(*d_y); if (d_x) { transformed_d_x.ShareDataWith(*d_x); } } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const int num = transformed_x.numel(); #ifdef HIPCC const int block = 256; #else const int block = 512; #endif int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); auto stream = dev_ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { if (d_x) { framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); } math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); #endif const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_x && d_scale && d_bias) { bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; void *workspace_ptr = nullptr; Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/transformed_d_x.template mutable_data<T>( ctx.GetPlace()), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/d_scale ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*dBnBiasData=*/d_bias ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>( reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { #ifdef PADDLE_WITH_HIP if (compute_format == DataLayout::kNCHW) { BNBackward< T, block, DataLayout::kNCHW><<<grid2, block, 0, dev_ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale->template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), d_scale->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace())); } else { BNBackward< T, block, DataLayout::kNHWC><<<grid2, block, 0, dev_ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale->template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), d_scale->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace())); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), data_desc_, // transformed_x.template data<T>(), data_desc_, // transformed_d_y.template data<T>(), data_desc_, // transformed_d_x.template mutable_data<T>(ctx.GetPlace()), // bn_param_desc_, scale->template data<BatchNormParamType<T>>(), // d_scale->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // d_bias->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, transformed_d_x.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), d_scale->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); #endif } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { BNBackwardData<T, block, framework::DataLayout::kNCHW><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNCHW><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { BNBackwardData<T, block, framework::DataLayout::kNHWC><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNHWC><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_CUDA_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); #endif } else { const auto *running_mean = ctx.Input<Tensor>("Mean"); const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = *x; inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x->data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { KeBNBackwardData< T, framework::DataLayout::kNCHW><<<grid1, block, 0, stream>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNCHW><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { KeBNBackwardData< T, framework::DataLayout::kNHWC><<<grid1, block, 0, stream>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNHWC><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } }; template <typename T> class BatchNormDoubleGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *X = ctx.Input<Tensor>("X"); const auto *Scale = ctx.Input<Tensor>("Scale"); const auto *dY = ctx.Input<Tensor>("DY"); const auto *Saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *Saved_variance = ctx.Input<Tensor>("SavedVariance"); const double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool is_test = ctx.Attr<bool>("is_test"); PADDLE_ENFORCE_EQ( is_test, false, platform::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *ddX = ctx.Input<Tensor>("DDX"); const auto *ddScale = ctx.Input<Tensor>("DDScale"); const auto *ddBias = ctx.Input<Tensor>("DDBias"); auto *dX = ctx.Output<Tensor>("DX"); auto *dScale = ctx.Output<Tensor>("DScale"); auto *ddY = ctx.Output<Tensor>("DDY"); NormDoubleGradFunctor<platform::CUDADeviceContext, T>( ctx, data_layout, X, Scale, dY, Saved_mean, Saved_variance, epsilon, use_global_stats, ddX, ddScale, ddBias, dX, dScale, ddY); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad_grad, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>); #else REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, double>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, double>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad_grad, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, double>); #endif
the_stack
#define BYDIMF 5 #define CDIM 10 #define BYDIMB 5 #if __CUDA_ARCH__ >= 300 /* * Positive kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float daa[NREPS]; float bb[NREPS][nwindow]; float dbb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v, ascale, bscale; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); float inr = 1.0f / nrows; #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } dbb[j][i] = 0; } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down dbb[j][i] = dbb[j][i+1]; // slide deriv down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } dbb[j][nwindow-1] = 0; if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); if (iwords[SKIP] >= 0) { #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols prod = 0; if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = 1.0f - v; // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] = 0; } ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) { bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp); v = lrate * CC[i - SKIP - lb]; #pragma unroll for (j = 0; j < NREPS; j++) { daa[j] += ascale * v * bb[j][i]; // Update A's derivative dbb[j][i] += bscale * v * aa[j]; // Update B's derivative } } } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { // Save the A column atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]); } } if (iwords[0] >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { // Save the B column atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]); } } } __syncthreads(); } } #pragma unroll for (i = 1; i < nwindow; i++) { // Clear out the derivative queue if (iwords[i] >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Save the B column if (tid + j * dxy < nrows) { atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]); } } } } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { const int nwindow = 2*SKIP+1; int iwords[nwindow]; float aa[NREPS]; float bb[NREPS][nwindow]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; double sum = 0; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 >= 0) { iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word } else { iwords[i] = -1; } good = (iwords[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the B vector for this word indx = tid + j * dxy; if (good && indx < nrows) { bb[j][i] = B[indx + iwords[i]]; } else { bb[j][i] = 0; } } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide iwords down iwords[i] = iwords[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { bb[j][i] = bb[j][i+1]; // slide data down } } good = (icol + SKIP < ncols); if (good) { iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word } else { iwords[nwindow - 1] = -1; } good = good && iwords[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new B column indx = tid + j * dxy; if (good && indx < nrows) { bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]]; } else { bb[j][nwindow - 1] = 0; } if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column aa[j] = A[indx + iwords[SKIP]]; } else { aa[j] = 0; } } lb = LB[icol]; ub = UB[icol]; __syncthreads(); #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols if (i >= SKIP + lb && i <= SKIP + ub) { if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss) prod = 20.0f; } else { prod = 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { v = __shfl_down(prod, k); // Reduce within warp prod += v; } } if (threadIdx.x == 0) { CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] += CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // Compute the loss } __syncthreads(); for (i = 1; i <= ub - lb; i = i + i) { if ((tid & (i-1)) == 0 && tid + i <= ub - lb) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } template<int NSKIP, int BYDIM> __global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; int ib[NSKIP*2]; float prods[NSKIP*2]; float bscale[NSKIP*2]; int ia, iword, lb, ub; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, db, dv, v, ascale, tmp; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1 - v); // All these pairs have label 1 } __syncthreads(); // Now do scaled gradients ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B } else { bscale[j] = 0; } prods[j] = CC[j]; } __syncthreads(); dv = 0; for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP * 2; j++) { // Load B data if (ib[j] >= 0) { bb = B[i + ib[j]]; dv += ascale * prods[j] * bb; db = bscale[j] * prods[j] * aa; atomicAdd(&B[i + ib[j]], db); // Update B } } atomicAdd(&A[i + ia], dv); // Update A } __syncthreads(); } } } template<int NSKIP, int BYDIM> __global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) { __shared__ float CC[NSKIP*2*BYDIM]; float aa; float prods[NSKIP*2]; int ia, iword, lb, ub; int ib[NSKIP*2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, jcol; float bb, v, tmp, sum; sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns ia = nrows * W[icol]; if (ia >= 0) { // Load lb and ub values lb = LB[icol]; ub = UB[icol]; jcol = threadIdx.x - NSKIP; iword = -1; if (jcol >= lb && jcol <= ub) { // Load words in the window iword = W[icol + jcol]; } #pragma unroll for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods ib[i] = nrows * __shfl(iword, i); ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1); prods[i] = 0; prods[i+NSKIP] = 0; } for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words aa = A[i + ia]; #pragma unroll for (j = 0; j < NSKIP*2; j++) { if (ib[j] >= 0) { bb = B[i + ib[j]]; prods[j] += aa * bb; } } } #pragma unroll for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp #pragma unroll for (k = 1; k < 32; k = k+k) { tmp = __shfl_down(prods[j], k); prods[j] += tmp; } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (j = 0; j < 2*NSKIP; j++) { CC[j + NSKIP * 2 * threadIdx.y] = prods[j]; } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps __syncthreads(); for (j = tid; j < NSKIP * 2; j += dxy) { CC[j] += CC[j + i * NSKIP * 2]; } } __syncthreads(); for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1 } __syncthreads(); // Now sum likelihood over window for (i = 1; i < 2 * NSKIP; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } } if (tid == 0) { atomicAdd(&retval[0], (float)sum); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float aa[NWA]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; float bscale[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float dv, v, ascale; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWA; j++) { // Load A data aa[j] = A[i + ia[j]]; } #pragma unroll for (k = 0; k < NWB; k++) { // Load B data bb[k] = B[i + ib[k]]; bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp); prods[0][k] = 0; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the products ascale = pow(max(0, ia[j])*inr + 1.0f, vexp); dv = 0; #pragma unroll for (k = 0; k < NWB; k++) { v = CC[j + k * NWA]; dv += ascale * v * bb[k]; prods[0][k] += bscale[k] * v * aa[j]; } atomicAdd(&A[i + ia[j]], dv); // Update A } #pragma unroll for (k = 0; k < NWB; k++) { atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B } } __syncthreads(); } } template<int NWA, int NWB, int BYDIM> __global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float aa[NWA]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; float bscale[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol, tmpi; float dv, v, ascale; float inr = 1.0f / nrows; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { tmpi = WA[i + icol * NWA]; // Fill the A word matrix if (tmpi < nwords) { tmpi = nrows * tmpi; } else { tmpi = -1; } ia[i] = tmpi; #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { tmpi = WB[i + icol * NWB]; // Fill the B word matrix if (tmpi < nwords) { tmpi = nrows * tmpi; } else { tmpi = -1; } ib[i] = tmpi; } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B if (ib[j] >= 0) { bb[j] = B[i + ib[j]]; } else { bb[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements if (ia[j] >= 0) { v = A[i + ia[j]]; } else { v = 0; } #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWA; j++) { // Load A data if (ia[j] >= 0) { aa[j] = A[i + ia[j]]; } else { aa[j] = 0; } } #pragma unroll for (k = 0; k < NWB; k++) { // Load B data if (ib[k] >= 0) { bb[k] = B[i + ib[k]]; } else { bb[k] = 0; } bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp); prods[0][k] = 0; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the products ascale = pow(max(0, ia[j])*inr + 1.0f, vexp); dv = 0; #pragma unroll for (k = 0; k < NWB; k++) { v = CC[j + k * NWA]; dv += ascale * v * bb[k]; prods[0][k] += bscale[k] * v * aa[j]; } if (ia[j] >= 0) { atomicAdd(&A[i + ia[j]], dv); // Update A } } #pragma unroll for (k = 0; k < NWB; k++) { if (ib[k] >= 0) { atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B } } } __syncthreads(); } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float bb[NWB]; float prods[NWA][NWB]; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); int i, j, k, icol; float v; double sum = 0; for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + ib[j]]; } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the products of these elements v = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += v * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else if (v < -16.0f) { v = 0.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0 } for (i = 1; i < NWA*NWB; i = i + i) { if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) { CC[tid] += CC[tid + i]; } __syncthreads(); } sum += CC[0]; __syncthreads(); } if (tid == 0) { atomicAdd(&Retval[0], (float)sum); } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; float aa[NREPS]; float da[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, icol, dxy, lb, ub, iword, cword; float bb, db, prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns iword = nrows * W[icol]; // Get the current word __syncthreads(); lb = LB[icol]; ub = UB[icol]; if (iword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get A aa[j] = A[tid + j * dxy + iword]; } else { aa[j] = 0; } } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols __syncthreads(); cword = nrows * W[icol + i]; // Get the current word prod = 0; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; prod += aa[j] * bb; // Compute the product between current A, B cols } } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } } if (threadIdx.x == 0) { CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } __syncthreads(); for (j = 1; j < blockDim.y; j++) { // Reduce across warps for (i = tid; i < ub - lb; i += dxy) { CC[i] += CC[i + j * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i < ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1.0f - v); // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { da[j] = 0; } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols cword = nrows * W[icol + i]; // Get the context word v = CC[i - lb]; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; da[j] += v * bb; db = v * aa[j]; atomicAdd(&B[tid + j * dxy + cword], db); } } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + iword], da[j]); } } } } } /* * Combined forward-backward word2vec kernel */ template<int NWA, int NWB, int MAXD, int BYDIM> __global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BYDIM]; float dd[MAXD]; float prods[NWA][NWB]; float aa, v, sum; int ia[NWA]; int ib[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Compute the inner products of these elements if (ia[j] >= 0) { aa = A[i + ia[j]]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * dd[k]; } } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = - lrate * v; // All these pairs have label 0 } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load B data if (ib[j] >= 0) { dd[j] = B[i + ib[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product if (ia[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = CC[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + ia[j]], sum); } } #pragma unroll for (j = 0; j < NWA; j++) { // Load A data if (ia[j] >= 0) { dd[j] = A[i + ia[j]]; } else { dd[j] = 0; } } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product if (ib[j] >= 0) { sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = CC[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + ib[j]], sum); } } } __syncthreads(); } } /* * * Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B. * The column indices are specified by two "word" matrices. The inner products are computed as an outer product * of the word matrices. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * * Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B. * */ template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) { const int NWAB = NWA*NWB; __shared__ float CC[NWA*NWB*BDIM]; float aa; float bb[NWB]; float prods[NWA][NWB]; int wa[NWA]; int wb[NWB]; int tid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int i, j, k, icol; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < NWA; i++) { wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix #pragma unroll for (j = 0; j < NWB; j++) { // clear the products matrix prods[i][j] = 0; } } #pragma unroll for (i = 0; i < NWB; i++) { wb[i] = WB[i + icol * NWB]; // Fill the B word matrix } for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block #pragma unroll for (j = 0; j < NWB ; j++) { // Read B bb[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Computes the products of these elements aa = A[i + wa[j] * nrows]; #pragma unroll for (k = 0; k < NWB; k++) { prods[j][k] += aa * bb[k]; } } } // Finished the entire block #pragma unroll for (i = 0; i < NWA; i++) { // Reduce the products within each warp #pragma unroll for (j = 0; j < NWB; j++) { #pragma unroll for (k = 1; k < 32; k = k+k) { float tmp = __shfl_down(prods[i][j], k); prods[i][j] += tmp; } } } __syncthreads(); if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp) #pragma unroll for (i = 0; i < NWA; i++) { #pragma unroll for (j = 0; j < NWB; j++) { CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j]; } } } __syncthreads(); for (i = 1; i < blockDim.y; i++) { __syncthreads(); #pragma unroll for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps CC[j] += CC[j + i * NWAB]; } } __syncthreads(); for (i = tid; i < NWAB; i += dxy) { // Save to main memory C[i + icol * NWAB] = CC[i]; //atomicAdd(&C[i + icol * NWAB], CC[i]); } __syncthreads(); } } /* * * Simple backward kernel for word2vec. * Computes the gradient for A given B or vice-versa, and does an SGD update. * * NWA is the number of words per column in WA * NWB is the number of words per column in WB * */ template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) { const int NWAB = NWA * NWB; float dd[MAXDIM]; int wa[NWA]; int wb[NWB]; __shared__ float cc[NWA*NWB]; int tid = threadIdx.x; int fid = threadIdx.x + blockDim.x * threadIdx.y; int dxy = blockDim.x * blockDim.y; int icol, i, j, k; float sum; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // iterate in columns #pragma unroll for (j = 0; j < NWA; j++) { wa[j] = WA[j + icol * NWA]; // Load the A word matrix } __syncthreads(); #pragma unroll for (j = 0; j < NWB; j++) { wb[j] = WB[j + icol * NWB]; // Load the B word matrix } for (i = fid; i < NWAB; i += dxy) { cc[i] = C[i + icol * NWAB]; } __syncthreads(); for (i = tid; i < nrows; i += dxy) { #pragma unroll for (j = 0; j < NWB; j++) { // Load the data dd[j] = B[i + wb[j] * nrows]; } #pragma unroll for (j = 0; j < NWA; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWB; k++) { float xx = cc[j + k * NWA]; sum += xx * dd[k]; } atomicAdd(&A[i + wa[j] * nrows], sum * lrate); } #pragma unroll for (j = 0; j < NWA; j++) { // Load the data dd[j] = A[i + wa[j] * nrows]; } #pragma unroll for (j = 0; j < NWB; j++) { // Now do the product sum = 0; #pragma unroll for (k = 0; k < NWA; k++) { float xx = cc[k + j * NWA]; sum += xx * dd[k]; } atomicAdd(&B[i + wb[j] * nrows], sum * lrate); } } } } #else template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {} template<int NWA, int NWB, int BYDIM> __global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} template<int NWA, int NWB, int BYDIM> __global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} template<int SKIP, int BYDIM, int NREPS> __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int BYDIM> __global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {} template<int NWA, int NWB, int BDIM> __global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {} template<int NWA, int NWB, int MAXDIM> __global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {} #endif int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); if (nrows <= 320) { switch(skip) { case 5 : __word2vecPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 3 : __word2vecPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 2 : __word2vecPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } } else if (nrows <= 640) { switch(skip) { case 5 : __word2vecPos<5, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 3 : __word2vecPos<3, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 2 : __word2vecPos<2, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } } else if (nrows <= 1280) { switch(skip) { case 5 : __word2vecPos<5, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 3 : __word2vecPos<3, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; case 2 : __word2vecPos<2, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } } else { printf("word2vecPos too many rows %d\n", nrows); return 1; } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { int which = nwa*10000 + nwb; int nblocks = min(2048, 2 + (ncols - 1)); if (nrows <= 320) { const int bydim = 2; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecNeg<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50005: __word2vecNeg<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 100005: __word2vecNeg<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50010: __word2vecNeg<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } } else { const int bydim = 5; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecNeg<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50005: __word2vecNeg<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 100005: __word2vecNeg<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; case 50010: __word2vecNeg<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecNegFilt(int nrows, int ncols, int nwords, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) { int which = nwa*10000 + nwb; int nblocks = min(2048, 2 + (ncols - 1)); if (nrows <= 320) { const int bydim = 2; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecNegFilt<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 50005: __word2vecNegFilt<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 100005: __word2vecNegFilt<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 50010: __word2vecNegFilt<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNegFilt<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } } else { const int bydim = 5; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecNegFilt<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 50005: __word2vecNegFilt<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 100005: __word2vecNegFilt<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; case 50010: __word2vecNegFilt<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break; // case 150010: __word2vecNegFilt<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate); break; default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1; } } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) { dim3 threads(32, CDIM, 1); int nblocks = min(64, ncols); if (nrows <= 320) { switch(skip) { case 5 : __word2vecEvalPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 3 : __word2vecEvalPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 2 : __word2vecEvalPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1; } } else if (nrows <= 640) { switch(skip) { case 5 : __word2vecEvalPos<5, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 3 : __word2vecEvalPos<3, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 2 : __word2vecEvalPos<2, CDIM, 20/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1; } } else if (nrows <= 1280) { switch(skip) { case 5 : __word2vecEvalPos<5, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 3 : __word2vecEvalPos<3, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; case 2 : __word2vecEvalPos<2, CDIM, 40/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break; default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1; } } else { printf("word2vecEvalPos nrows too large %d\n", nrows); return 1; } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) { int which = nwa*10000 + nwb; int nblocks = min(2048, 2 + (ncols - 1)); if (nrows <= 320) { const int bydim = 2; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecEvalNeg<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50005: __word2vecEvalNeg<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 100005: __word2vecEvalNeg<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50010: __word2vecEvalNeg<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; // case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, Retval); break; default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1; } } else { const int bydim = 5; dim3 threads(32, bydim, 1); switch (which) { case 50001: __word2vecEvalNeg<5,1,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50005: __word2vecEvalNeg<5,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 100005: __word2vecEvalNeg<10,5,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; case 50010: __word2vecEvalNeg<5,10,bydim><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break; // case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, Retval); break; default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1; } } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) { dim3 threads(32, BYDIMF, 1); int nblocks = min(4096, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecFwd<5,1,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; case 50005: __word2vecFwd<5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; case 100005: __word2vecFwd<10,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break; default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1; } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; } int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) { dim3 threads(32*BYDIMB, 1, 1); int nblocks = min(2048, 2 + (ncols - 1)); int which = nwa*10000 + nwb; switch (which) { case 50001: __word2vecBwd<5,1,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; case 50005: __word2vecBwd<5,5,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; case 100005: __word2vecBwd<10,5,10><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break; default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1; } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; }
the_stack
#include <boost/array.hpp> #include <boost/random.hpp> #include "caffe/util/rng.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/pixel_feature_layer.hpp" #include "caffe/util/gpu_util.cuh" namespace caffe { template <typename Dtype> __global__ void PixelFeatureXYForwardGPU(const int nthreads, const int height, const int width, const Dtype pos_scale, const Dtype offset_h, const Dtype offset_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; int out_dim = 2; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); top_data[top_offset_1] = pos_scale * y + offset_h; top_data[top_offset_2] = pos_scale * x + offset_w; } } template <typename Dtype> __global__ void PixelFeatureXYZForwardGPU(const int nthreads, const Dtype* bottom_depth, const int height, const int width, const Dtype pos_scale, const Dtype offset_h, const Dtype offset_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; int out_dim = 3; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); int top_offset_3 = ((n * out_dim + 2) * spatial_dim + s); Dtype depth = bottom_depth[n*1*spatial_dim +s]; top_data[top_offset_1] = (pos_scale * y + offset_h -238.4439) * depth / 582.6910; top_data[top_offset_2] = (pos_scale * x + offset_w - 313.0448) * depth/ 582.6245; top_data[top_offset_3] = depth; } } template <typename Dtype> __global__ void PixelFeatureXYZXYForwardGPU(const int nthreads, const Dtype* bottom_depth, const int height, const int width, const Dtype pos_scale, const Dtype offset_h, const Dtype offset_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; int out_dim = 5; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); int top_offset_3 = ((n * out_dim + 2) * spatial_dim + s); int top_offset_4 = ((n * out_dim + 3) * spatial_dim + s); int top_offset_5 = ((n * out_dim + 4) * spatial_dim + s); Dtype depth = bottom_depth[n*1*spatial_dim +s]; top_data[top_offset_1] = 10 * (pos_scale * y + offset_h -238.4439) * depth / 582.6910; top_data[top_offset_2] = 10 * (pos_scale * x + offset_w - 313.0448) * depth/ 582.6245; top_data[top_offset_3] = 10 * depth; top_data[top_offset_4] = 0.0001 * (pos_scale * y + offset_h); top_data[top_offset_5] = 0.0001 * (pos_scale * x + offset_w); } } template <typename Dtype> __global__ void PixelFeatureXYRGBForwardGPU(const int nthreads, const Dtype* bottom_data, const int height, const int width, const int in_dim, const Dtype pos_scale, const Dtype color_scale, const Dtype offset_h, const Dtype offset_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; int out_dim = 2 + in_dim; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); top_data[top_offset_1] = pos_scale * y + offset_h; top_data[top_offset_2] = pos_scale * x + offset_w; for (unsigned int c = 0; c < in_dim; ++c) { int bottom_offset = ((n * in_dim + c) * spatial_dim + s); int top_offset = ((n * out_dim + c + 2) * spatial_dim + s); top_data[top_offset] = color_scale * bottom_data[bottom_offset]; } } } template <typename Dtype> __global__ void PixelFeatureRGBXYForwardGPU(const int nthreads, const Dtype* bottom_data, const int height, const int width, const int in_dim, const Dtype pos_scale, const Dtype color_scale, const Dtype offset_h, const Dtype offset_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; int out_dim = 2 + in_dim; for (unsigned int c = 0; c < in_dim; ++c) { int bottom_offset = ((n * in_dim + c) * spatial_dim + s); int top_offset = ((n * out_dim + c) * spatial_dim + s); top_data[top_offset] = color_scale * bottom_data[bottom_offset]; } int top_offset_1 = ((n * out_dim + in_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + in_dim + 1) * spatial_dim + s); top_data[top_offset_1] = pos_scale * y + offset_h; top_data[top_offset_2] = pos_scale * x + offset_w; } } template <typename Dtype> __global__ void PixelFeatureRGBForwardGPU(const int nthreads, const Dtype* bottom_data, const int height, const int width, const int in_dim, const Dtype color_scale, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; int out_dim = in_dim; for (unsigned int c = 0; c < in_dim; ++c) { int bottom_offset = ((n * in_dim + c) * spatial_dim + s); int top_offset = ((n * out_dim + c) * spatial_dim + s); top_data[top_offset] = color_scale * bottom_data[bottom_offset]; } } } template <typename Dtype> __global__ void PixelFeatureWARPPOSForwardGPU(const int nthreads, const int height, const int width, const Dtype pos_scale, const Dtype angle, const Dtype angle_sigma, const Dtype cosAngle, const Dtype sinAngle, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; const Dtype mid_y = height / 2; const Dtype mid_x = width / 2; const Dtype scaled_y = pos_scale * y; const Dtype scaled_x = pos_scale * x; int out_dim = 3; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); int top_offset_3 = ((n * out_dim + 2) * spatial_dim + s); top_data[top_offset_1] = sinAngle * (scaled_x - mid_x) + cosAngle * (scaled_y - mid_y) + mid_y; top_data[top_offset_2] = cosAngle * (scaled_x - mid_x) - sinAngle * (scaled_y - mid_y) + mid_x; top_data[top_offset_3] = angle / angle_sigma; } } template <typename Dtype> __global__ void PixelFeatureRANDROTATEForwardGPU(const int nthreads, const int height, const int width, const Dtype pos_scale, const Dtype cosAngle, const Dtype sinAngle, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int spatial_dim = height * width; const int n = index / spatial_dim; const int s = index % spatial_dim; const int y = s / width; const int x = s % width; const Dtype mid_y = height / 2; const Dtype mid_x = width / 2; const Dtype scaled_y = pos_scale * y; const Dtype scaled_x = pos_scale * x; int out_dim = 2; int top_offset_1 = ((n * out_dim) * spatial_dim + s); int top_offset_2 = ((n * out_dim + 1) * spatial_dim + s); top_data[top_offset_1] = sinAngle * (scaled_x - mid_x) + cosAngle * (scaled_y - mid_y) + mid_y; top_data[top_offset_2] = cosAngle * (scaled_x - mid_x) - sinAngle * (scaled_y - mid_y) + mid_x; } } /* Forward CPU function */ template <typename Dtype> void PixelFeatureLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const PixelFeatureParameter& parameter = this->layer_param_.pixel_feature_param(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_depth = NULL; switch (this->layer_param_.pixel_feature_param().type()) { case PixelFeatureParameter_Feature_POSITION: { if (!ran_once) { const Dtype scale = parameter.pos_scale(); const Dtype offset_h = parameter.offset_h(); const Dtype offset_w = parameter.offset_w(); const int nthreads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, height_, width_, scale, offset_h, offset_w, top_data); } break; } case PixelFeatureParameter_Feature_POSITION_AND_RGB: { const Dtype scale = parameter.pos_scale(); const Dtype color_scale = parameter.color_scale(); const Dtype offset_h = parameter.offset_h(); const Dtype offset_w = parameter.offset_w(); const int nthreads = num_ * height_ * width_; const int channels = bottom[0]->channels(); // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureXYRGBForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, height_, width_, channels, scale, color_scale, offset_h, offset_w, top_data); break; } case PixelFeatureParameter_Feature_RGB_AND_POSITION: { const Dtype scale = parameter.pos_scale(); const Dtype color_scale = parameter.color_scale(); const Dtype offset_h = parameter.offset_h(); const Dtype offset_w = parameter.offset_w(); const int nthreads = num_ * height_ * width_; const int channels = bottom[0]->channels(); // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureRGBXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, height_, width_, channels, scale, color_scale, offset_h, offset_w, top_data); break; } case PixelFeatureParameter_Feature_RGB: { const Dtype color_scale = parameter.color_scale(); const int nthreads = num_ * height_ * width_; const int channels = bottom[0]->channels(); // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureRGBForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, height_, width_, channels, color_scale, top_data); break; } case PixelFeatureParameter_Feature_RANDOM_POSITION: case PixelFeatureParameter_Feature_NUM_RANDOM_POSITION: { if (!ran_once) { const int input_height = bottom[0]->height(); const int input_width = bottom[0]->width(); const Dtype scale = parameter.pos_scale(); boost::uniform_real<Dtype> random_height(0, input_height); boost::variate_generator<caffe::rng_t*, boost::uniform_real<Dtype> > variate_height(caffe_rng(), random_height); boost::uniform_real<Dtype> random_width(0, input_width); boost::variate_generator<caffe::rng_t*, boost::uniform_real<Dtype> > variate_width(caffe_rng(), random_width); for (unsigned int n = 0; n < num_; ++n) { for (unsigned int y = 0; y < height_; ++y) { for (unsigned int x = 0; x < width_; ++x) { top_data[top[0]->offset(n, 0, y, x)] = scale * variate_height(); top_data[top[0]->offset(n, 1, y, x)] = scale * variate_width(); } } } } break; } case PixelFeatureParameter_Feature_WARPED_POSITION: { if (!ran_once) { const Dtype angle = -parameter.rotation_angle() / 180.0 * M_PI; const Dtype scale = parameter.pos_scale(); const Dtype angle_sigma = parameter.rotation_sigma(); const Dtype cosAngle = std::cos(angle); const Dtype sinAngle = std::sin(angle); const int nthreads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureWARPPOSForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, height_, width_, scale, angle, angle_sigma, cosAngle, sinAngle, top_data); } break; } case PixelFeatureParameter_Feature_RANDOM_ROTATE: { if (!ran_once) { boost::uniform_real<Dtype> random_angle(-10, 10); boost::variate_generator<caffe::rng_t*, boost::uniform_real<Dtype> > variate_angle(caffe_rng(), random_angle); const Dtype angle = variate_angle() / 180.0 * M_PI; const Dtype scale = 1; const Dtype cosAngle = std::cos(angle); const Dtype sinAngle = std::sin(angle); const int nthreads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureRANDROTATEForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, height_, width_, scale, cosAngle, sinAngle, top_data); } break; } case PixelFeatureParameter_Feature_POSITIONXYZ: { if (!ran_once) { const Dtype scale = parameter.pos_scale(); const Dtype offset_h = parameter.offset_h(); const Dtype offset_w = parameter.offset_w(); bottom_depth = bottom[1]->gpu_data(); const int nthreads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureXYZForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_depth, height_, width_, scale, offset_h, offset_w, top_data); } break; } case PixelFeatureParameter_Feature_POSITIONXYZXY: { if (!ran_once) { const Dtype scale = parameter.pos_scale(); const Dtype offset_h = parameter.offset_h(); const Dtype offset_w = parameter.offset_w(); bottom_depth = bottom[1]->gpu_data(); const int nthreads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) PixelFeatureXYZXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_depth, height_, width_, scale, offset_h, offset_w, top_data); } break; } default: LOG(FATAL) << "Undefined feature type of pixel feature layer"; } ran_once = true; } /* Backward GPU function */ template <typename Dtype> void PixelFeatureLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { } INSTANTIATE_LAYER_GPU_FUNCS(PixelFeatureLayer); } // namespace caffe
the_stack
\brief Unit tests for thread-level GEMM */ #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/default_thread_map_tensor_op.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename TileIterator> __global__ void kernel_store_iterator( typename TileIterator::Params params, typename TileIterator::TensorRef ref, cutlass::MatrixCoord extent) { TileIterator iterator(params, ref.data(), extent, threadIdx.x, {0, 0}); typename TileIterator::Fragment fragment; CUTLASS_PRAGMA_NO_UNROLL for (int iter = 0; iter < TileIterator::ThreadMap::Count::kTile; ++iter) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < TileIterator::Fragment::kElements; ++i) { typename TileIterator::Element tidx(iter + 1); fragment[i] = tidx; } iterator.store(fragment); ++iterator; } } ///////////////////////////////////////////////////////////////////////////////////////////////// } } } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename Layout> static bool verify_footprint(cutlass::TensorView<T, Layout> view, cutlass::MatrixCoord extent) { for (int r = 0; r < view.extent().row(); ++r) { for (int c = 0; c < view.extent().column(); ++c) { cutlass::MatrixCoord coord{r, c}; bool within = coord < extent; if (within) { if (view.at(coord) == T(0)) { return false; } } else { if (view.at(coord) != T(0)) { return false; } } } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_64x64x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; // // The following tests were used to develop the OutputTileOptimalThreadMap // metaprogram. The definitions in the disabled blocks of code in this and // the following tests are hand-written quantities. They are expected to // match what is defined in the ThreadMap. // #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 8, 1, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 1, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 64>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 8, // row 1, // group 1, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 1, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 1, // group 1, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 64}; cutlass::MatrixCoord output_extent{62, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_64x64x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_128x64x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 64; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 8, 2, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 128>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 8, // row 2, // group 1, // cluster 8 // tile >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 2, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 64, // group 1, // cluster 1 // tile >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 64}; cutlass::MatrixCoord output_extent{125, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_128x64x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, tensor_op_128x256x32_64x64x8) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<256, 8, 2, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 8, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<256, 128>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 256, // column 8, // row 2, // group 1, // cluster 8 // tile >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row 2, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 64, // group 1, // cluster 1 // tile >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 8, // row 1, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 256}; cutlass::MatrixCoord output_extent{123, 252}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("tensor_op_128x256x32_64x64x8.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_64x64x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 2, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 8>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 2, // row 4, // group 1, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 8, // group 1, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 64}; cutlass::MatrixCoord output_extent{62, 56}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("volta_tensor_op_64x64x32_64x64x4.csv"); output << host_tensor.host_view(); } } /////////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_64x128x32_32x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 128; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<128, 8>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 128, // column 2, // row 2, // group 2, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 1, // group 2, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 8, // group 32, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 4, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{64, 128}; cutlass::MatrixCoord output_extent{57, 124}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("volta_tensor_op_64x128x32_32x64x4.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, volta_tensor_op_128x256x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<256, 2, 4, 2, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<256, 16>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 256, // column 2, // row 4, // group 2, // cluster 8 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 2, // group 2, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 1, // row 16, // group 64, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 256}; cutlass::MatrixCoord output_extent{128, 256}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed || true) { std::ofstream output("volta_tensor_op_128x256x32_64x64x4.csv"); output << host_tensor.host_view(); } } TEST(PredicatedTileIterator, volta_tensor_op_256x128x32_64x64x4) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 128 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 2, 4, 4, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{ 256, 128 }; cutlass::MatrixCoord output_extent{ 256, 128 }; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1, 1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator> <<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed || true) { std::ofstream output("volta_tensor_op_256x128x32_64x64x4.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, simt_32x64x8_32x64x1) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 32; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<64, 1, 4, 1, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<64, 4>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 64, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 2, // column 1, // row 4, // group 1, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 32, // column 1, // row 4, // group 16, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{32, 64}; cutlass::MatrixCoord output_extent{27, 63}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("simt_32x64x8_32x64x1.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(PredicatedTileIterator, simt_128x128x8_32x64x1) { using Layout = cutlass::layout::RowMajor; using Element = int; static int const kElementsPerAccess = 32 / cutlass::sizeof_bits<Element>::value; static int const kThreads = 256; #if 1 using ThreadMap = cutlass::epilogue::threadblock::OutputTileOptimalThreadMap < cutlass::epilogue::threadblock::OutputTileShape<128, 1, 4, 4, 1>, cutlass::epilogue::threadblock::OutputTileShape<1, 4, 2, 1, 8>, kThreads, kElementsPerAccess, cutlass::sizeof_bits<Element>::value >; #else using InternalThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< cutlass::layout::PitchLinearShape<128, 16>, kThreads, kElementsPerAccess >; using Shape = cutlass::epilogue::threadblock::OutputTileShape< 128, // column 1, // row 4, // group 4, // cluster 1 // iterations >; using Iterations = cutlass::epilogue::threadblock::OutputTileShape< 2, // column 1, // row 2, // group 4, // cluster 1 // iterations >; using Delta = cutlass::epilogue::threadblock::OutputTileShape< 32, // column 1, // row 8, // group 32, // cluster 1 // iterations >; using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 4, // row 2, // group 1, // cluster 8 // iterations >; using ThreadMap = cutlass::epilogue::threadblock::OutputTileThreadMap< InternalThreadMap, Shape, Iterations, Delta, Count >; #endif using PredicatedTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< ThreadMap, Element >; // // Initialize workspace // cutlass::MatrixCoord tensor_extent{128, 128}; cutlass::MatrixCoord output_extent{123, 121}; // // Configure parameters // cutlass::HostTensor<Element, Layout> host_tensor(tensor_extent); typename PredicatedTileIterator::Params iterator_params(host_tensor.layout()); host_tensor.sync_device(); // // Launch kernel // dim3 grid(1,1); dim3 block(kThreads, 1); test::epilogue::threadblock::kernel_store_iterator<PredicatedTileIterator><<< grid, block >>>( iterator_params, host_tensor.device_ref(), output_extent); cudaError_t result = cudaDeviceSynchronize(); ASSERT_EQ(result, cudaSuccess) << cudaGetErrorString(result); // // Verify results // host_tensor.sync_host(); bool passed = verify_footprint(host_tensor.host_view(), output_extent); EXPECT_TRUE(passed); if (!passed) { std::ofstream output("simt_128x128x8_32x64x1.csv"); output << host_tensor.host_view(); } } ///////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
using pcl::gpu::people::trees::Node; using pcl::gpu::people::trees::Label; using pcl::gpu::people::trees::AttribLocation; using pcl::gpu::people::trees::Attrib; using pcl::gpu::people::trees::focal; using pcl::gpu::people::trees::NUM_LABELS; using namespace std; typedef unsigned int uint; #ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code #define __global__ #define __device__ #define __shared__ #define __forceinline__ #define __constant__ #endif namespace pcl { namespace device { texture<unsigned short, 2, cudaReadModeElementType> depthTex; texture<char4, 2, cudaReadModeElementType> multilabelTex; __constant__ int constFGThresh; template<bool testFG> __device__ __forceinline__ Label evaluateTree(int u, int v, float f, int treeHeight, int numNodes, const Node* nodes, const Label* leaves) { int depth = tex2D(depthTex, u, v); float scale = f / depth; // go down the tree int nid = 0; for(int nodeDepth = 0; nodeDepth < treeHeight; ++nodeDepth) { const Node node = nodes[nid]; const AttribLocation& loc = node.loc; int d1 = tex2D (depthTex, u + loc.du1 * scale, v + loc.dv1 * scale); int d2 = tex2D (depthTex, u + loc.du2 * scale, v + loc.dv2 * scale); if (testFG) { if( d1 - depth > constFGThresh ) d1 = numeric_limits<short>::max(); if( d2 - depth > constFGThresh ) d2 = numeric_limits<short>::max(); } int delta = d1-d2; bool test = delta > (int)node.thresh; if( test ) nid = nid*2+2; else nid = nid*2+1; } return leaves[nid-numNodes]; } /** \brief This is the CUDA kernel doing the actual RDF evaluation */ __global__ void KernelCUDA_runTree( const float f, const int treeHeight, const int numNodes, const Node* nodes, const Label* leaves, PtrStepSz<Label> labels) { int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u < labels.cols && v < labels.rows) labels.ptr(v)[u] = evaluateTree<false>(u, v, f, treeHeight, numNodes, nodes, leaves); } template<bool testFG> __global__ void KernelCUDA_MultiTreePass( const int treeId, const float f, const int treeHeight, const int numNodes, const Node* nodes, const Label* leaves, PtrStepSz<unsigned short> depth, PtrStepSz<char4> multiLabels) { int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if(u < multiLabels.cols && v < multiLabels.rows) { // This maps a char4 pointer on a char pointer char* pixel = (char*)&multiLabels.ptr(v)[u]; // This test assures that in next iterations the FGPreperation is taking into account see utils.cu if(depth.ptr(v)[u] == numeric_limits<unsigned short>::max()) pixel[treeId] = 29; // see label_common.h for Background label (=29) // TODO remove this hardcoded label with enum part_t label else pixel[treeId] = evaluateTree<testFG>(u, v, f, treeHeight, numNodes, nodes, leaves); } } /** \brief This function wraps the actual CUDA kernel doing the RDF evaluation */ void CUDA_runTree ( float focal, int treeHeight, int numNodes, const Node* nodes, const Label* leaves, const Depth& depth, Labels& labels ) { labels.create( depth.rows(), depth.cols() ); depthTex.addressMode[0] = cudaAddressModeClamp; TextureBinder binder(depth, depthTex); dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); KernelCUDA_runTree<<< grid, block >>>( focal, treeHeight, numNodes, nodes, leaves, labels); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaThreadSynchronize() ); } void CUDA_runMultiTreePass ( int FGThresh, int treeId, float focal, int treeHeight, int numNodes, const Node* nodes_device, const Label* leaves_device, const Depth& depth, MultiLabels& multilabel ) { //std::cout << "(I) : CUDA_runMultiTreePass() called" << std::endl; depthTex.addressMode[0] = cudaAddressModeClamp; TextureBinder binder(depth, depthTex); dim3 block(32, 8); dim3 grid( divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); if(FGThresh == std::numeric_limits<int>::max()) { KernelCUDA_MultiTreePass<false><<< grid, block >>>( treeId, focal, treeHeight, numNodes, nodes_device, leaves_device, depth, multilabel); } else { cudaSafeCall( cudaMemcpyToSymbol(constFGThresh, &FGThresh, sizeof(FGThresh)) ); KernelCUDA_MultiTreePass<true><<< grid, block >>>( treeId, focal, treeHeight, numNodes, nodes_device, leaves_device, depth, multilabel); } cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaThreadSynchronize() ); } /////////////////////////////////////////////////////////////////////////////////////// __device__ int findMaxId( int numBins, char* bins ) { // HACK .. not testing against numBins = 0 int maxId = 0; char maxVal = bins[0]; for(int i=1;i<numBins;++i) { char val = bins[i]; if( val > maxVal ) { maxId = i; maxVal = val; } } return maxId; } //this will find the max Index but return -1 if there is a tie __device__ int findMaxId_testTie(int numBins, char* bins) { int maxId = 0; int maxId_other = -1; char maxVal = bins[0]; for(int i=1;i<numBins;++i) { char val = bins[i]; if( val == maxVal ) { maxId_other = i; } if( val > maxVal ) { maxId = i; maxId_other = -1; maxVal = val; } } if( maxId_other != -1) return -1; else return maxId; } __global__ void KernelCUDA_MultiTreeMerge( const int numTrees, PtrStepSz<Label> labels ) { int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= labels.cols || v >= labels.rows) return; // reset the bins char bins[NUM_LABELS]; for(int li = 0; li < NUM_LABELS; ++li) bins[li] = 0; // find a consensus with the current trees { char4 pixlabels = tex2D(multilabelTex, u ,v); char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members for(int ti = 0; ti < numTrees; ++ti) bins[ bob[ti] ]++; } int res = findMaxId_testTie(NUM_LABELS, bins); // if this fails... find a consensus in a 1 neighbourhood if( res < 0 ) { int depth = tex2D(depthTex, u,v); for(int i = -1 ; i <= 1; ++i) { for(int j = -1; j <= 1; ++j) { int depth_neighbor = tex2D(depthTex,u+i,v+j); char4 labels_neighbor = tex2D(multilabelTex, u+i,v+j); char* bob = (char*)&labels_neighbor; //horrible but char4's have xyzw members //TODO: redo this part int weight = abs(depth-depth_neighbor) < 50 ? 1:0; // 5cms for(int ti = 0; ti < numTrees; ++ti) bins[ bob[ti] ] += weight; } } res = findMaxId( NUM_LABELS, bins ); } labels.ptr(v)[u] = res; } /** \brief This merges the labels from all trees into a histogram of probabilities **/ __global__ void KernelCUDA_MultiTreeCreateProb (const int numTrees, PtrStepSz<prob_histogram> prob) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= prob.cols || v >= prob.rows ) return; char4 pixlabels = tex2D (multilabelTex, u ,v); char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members // Reset prob first, this should become NUM_LABELS for(int in = 0; in < NUM_LABELS; in++) { prob.ptr(v)[u].probs[in] = 0; } for(int ti = 0; ti < numTrees; ++ti) { // Each tree casts a vote to the probability // TODO: replace this with a histogram copy prob.ptr(v)[u].probs[bob[ti]] += 0.25; } } /** \brief This will merge the votes from the different trees into one final vote */ void CUDA_runMultiTreeMerge( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels) { //std::cout << "(I) : CUDA_runMultiTreeMerge() called" << std::endl; labels.create(depth.rows(), depth.cols()); depthTex.addressMode[0] = cudaAddressModeClamp; TextureBinder binder(depth, depthTex); multilabelTex.addressMode[0] = cudaAddressModeClamp; TextureBinder mlabels_binder(multilabel, multilabelTex); dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); KernelCUDA_MultiTreeMerge<<< grid, block >>>( numTrees, labels ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaThreadSynchronize() ); } /** \brief This will merge the votes from the different trees into one final vote, including probabilistic's */ void CUDA_runMultiTreeProb ( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels, LabelProbability& probabilities) { std::cout << "(I) : CUDA_runMultiTreeProb() called" << std::endl; //labels.create(depth.rows(), depth.cols()); //depthTex.addressMode[0] = cudaAddressModeClamp; //TextureBinder binder(depth, depthTex); multilabelTex.addressMode[0] = cudaAddressModeClamp; TextureBinder mlabels_binder(multilabel, multilabelTex); dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); KernelCUDA_MultiTreeCreateProb<<< grid, block >>>( numTrees, probabilities); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaThreadSynchronize() ); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// pcl::device::CUDATree::CUDATree (int treeHeight_arg, const vector<Node>& nodes, const vector<Label>& leaves) { treeHeight = treeHeight_arg; numNodes = (1 << treeHeight) - 1; assert (static_cast<int> (nodes.size ()) == numNodes ); assert (static_cast<int> (leaves.size ()) == (1 << treeHeight) ); nodes_device.upload(nodes); leaves_device.upload(leaves); } void pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap) { // TODO: is this assert needed if we only call process? //assert(!trees.empty()); // TODO is this iteration needed when we call multitreepass in the process step? /* if (trees.size() == 1) { const CUDATree& t = trees[0]; CUDA_runTree( focal, t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, lmap ); return; } */ process(dmap, lmap, std::numeric_limits<int>::max()); } void pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap, int FGThresh) { assert(!trees.empty()); unsigned int numTrees = static_cast<int> (trees.size ()); multilmap.create(dmap.rows(), dmap.cols()); // 1 - run the multi passes for( int ti = 0; ti < numTrees; ++ti ) { const CUDATree& t = trees[ti]; CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap ); } // 2 - run the merging assert( numTrees <= 4 ); device::CUDA_runMultiTreeMerge(numTrees, dmap, multilmap, lmap); } void pcl::device::MultiTreeLiveProc::processProb (const Depth& dmap, Labels& lmap, LabelProbability& prob, int FGThresh) { assert(!trees.empty()); unsigned int numTrees = static_cast<unsigned int> (trees.size ()); assert( numTrees <= 4 ); multilmap.create(dmap.rows(), dmap.cols()); // 1 - run the multi passes for( int ti = 0; ti < numTrees; ++ti ) { const CUDATree& t = trees[ti]; CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap ); } device::CUDA_runMultiTreeProb(numTrees, dmap, multilmap, lmap, prob); }
the_stack
#include "miner.h" extern "C" { #include "sph/sph_blake.h" } /* threads per block and nonces per thread */ #define TPB 768 #define NPT 192 #define maxResults 16 /* max count of found nonces in one call */ #define NBN 2 /* hash by cpu with blake 256 */ extern "C" void blake256_14roundHash(void *output, const void *input) { uchar hash[64]; sph_blake256_context ctx; sph_blake256_set_rounds(14); sph_blake256_init(&ctx); sph_blake256(&ctx, input, 80); sph_blake256_close(&ctx, hash); memcpy(output, hash, 32); } #include "cuda_helper.h" #ifdef __INTELLISENSE__ #define __byte_perm(x, y, b) x #endif __constant__ uint32_t _ALIGN(32) c_v[16]; __constant__ uint32_t _ALIGN(8) c_h[ 2]; __constant__ uint32_t c_m[ 3]; __constant__ uint32_t _ALIGN(32) c_x[90]; /* 8 adapters max */ static uint32_t *d_resNonce[MAX_GPUS]; static uint32_t *h_resNonce[MAX_GPUS]; #define GSn(a,b,c,d,x,y) { \ v[a]+= x + v[b]; \ v[d] = ROL16(v[d] ^ v[a]); \ v[c]+= v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 12); \ v[a]+= y + v[b]; \ v[d] = ROR8(v[d] ^ v[a]); \ v[c]+= v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 7); \ } #define GSn4(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2,a3,b3,c3,d3,x3,y3) { \ v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2]; v[a3]+= x3 + v[b3]; \ v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]); v[d3] = ROL16(v[d3] ^ v[a3]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12); v[b3] = ROTR32(v[b3] ^ v[c3], 12); \ v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2]; v[a3]+= y3 + v[b3]; \ v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]); v[d3] = ROR8(v[d3] ^ v[a3]); \ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7); v[b3] = ROTR32(v[b3] ^ v[c3], 7); \ } #define GSn3(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2) { \ v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2];\ v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]);\ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2];\ v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12);\ v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2];\ v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]);\ v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2];\ v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7);\ } #define hostGS(a,b,c,d,x,y) { \ v[a] += (m[x] ^ z[y]) + v[b]; \ v[d] = ROTR32(v[d] ^ v[a], 16); \ v[c] += v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 12); \ v[a] += (m[y] ^ z[x]) + v[b]; \ v[d] = ROTR32(v[d] ^ v[a], 8); \ v[c] += v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 7); \ } __global__ __launch_bounds__(TPB,1) void blake256_14round_gpu_hash_16(const uint32_t threads,const uint32_t startNonce, uint32_t *resNonce){ uint64_t m3 = startNonce + blockDim.x * blockIdx.x + threadIdx.x; const uint32_t step = gridDim.x * blockDim.x; const uint64_t maxNonce = startNonce + threads; const uint32_t z[16] = { 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89, 0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917 }; uint32_t v[16]; uint32_t m[16]; #pragma unroll for(int i=0;i<3;i++){ m[i] = c_m[i]; } m[13] = 1; m[15] = 640; const uint32_t m130 = z[12]^m[13]; const uint32_t m131 = m[13]^z[ 6]; const uint32_t m132 = z[15]^m[13]; const uint32_t m133 = z[ 3]^m[13]; const uint32_t m134 = z[ 4]^m[13]; const uint32_t m135 = z[14]^m[13]; const uint32_t m136 = m[13]^z[11]; const uint32_t m137 = m[13]^z[ 7]; const uint32_t m138 = m[13]^z[ 0]; volatile uint32_t m150 = z[14]^m[15]; volatile uint32_t m151 = z[ 9]^m[15]; volatile uint32_t m152 = m[15]^z[13]; volatile uint32_t m153 = m[15]^z[ 8]; const uint32_t m154 = z[10]^m[15]; const uint32_t m155 = z[ 1]^m[15]; const uint32_t m156 = m[15]^z[ 4]; const uint32_t m157 = z[ 6]^m[15]; const uint32_t m158 = m[15]^z[11]; const uint32_t h7 = c_h[ 0]; for( ; m3<maxNonce ; m3+=step){ m[ 3] = m3; #pragma unroll 16 for(int i=0;i<16;i++){ v[i] = c_v[i]; } uint32_t xors[16],i=0; //partial: 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } xors[ 5] = z[ 2]^m[ 3]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = z[15]; xors[12]=c_x[i++]; xors[13] = c_x[i++]; xors[14] = m130; xors[15] = m150; v[ 1]+= xors[ 5]; v[13] = ROR8(v[13] ^ v[1]); v[ 9]+= v[13]; v[ 5] = ROTR32(v[5] ^ v[9], 7); v[ 0]+= v[5]; v[15] = ROL16(v[15] ^ v[0]); v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 12); v[ 0]+= xors[12] + v[5]; v[15] = ROR8(v[15] ^ v[0]); v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 7); //i=3 GSn3(1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]); // 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m131; xors[ 8] = m[ 1]^z[12]; xors[ 9] = m[ 0]^z[ 2]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = m151; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 0]^m[ 2]; xors[14] = c_x[i++]; xors[15] = z[ 5]^m[ 3]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=12 // 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m152; xors[ 8] = c_x[i++]; xors[ 9] = m[ 3]^z[ 6]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[12]^m[ 0]; xors[ 6] = z[ 5]^m[ 2]; xors[ 7] = m132; xors[12] = z[10]; xors[13] = c_x[i++]; xors[14] = z[ 7]^m[ 1]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=21 // 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 3]^z[ 1]; xors[ 2] = m130; xors[ 3] = c_x[i++]; xors[ 8] = m[ 2]^z[ 6]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = m153; xors[ 4] = c_x[i++]; xors[ 5] = z[ 3]^m[ 1]; xors[ 6] = c_x[i++]; xors[ 7] = z[11]; xors[12] = c_x[i++]; xors[13] = c_x[i++]; xors[14] = z[ 4]^m[ 0]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=30 // 4{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = m[ 2]^z[ 4]; xors[ 3] = c_x[i++]; xors[ 8] = z[ 1]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = m[ 3]^z[13]; xors[ 4] = z[ 9]^m[ 0]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = m154; xors[12] = z[14]^m[ 1]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = m133; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=39 // 5{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } xors[ 0] = m[ 2]^z[12]; xors[ 1] = c_x[i++]; xors[ 2] = m[ 0]^z[11]; xors[ 3] = c_x[i++]; xors[ 8] = c_x[i++]; xors[ 9] = c_x[i++]; xors[10] = m150; xors[11] = m[ 1]^z[ 9]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = z[ 8]^m[ 3]; xors[12] = m134; xors[13] = c_x[i++]; xors[14] = z[15]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=48 // 6{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 1]^z[15]; xors[ 2] = z[13]; xors[ 3] = c_x[i++]; xors[ 8] = m[ 0]^z[ 7]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = m155; xors[ 6] = m135; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 6]^m[ 3]; xors[14] = z[ 9]^m[ 2]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=57 // 7{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } xors[ 0] = m136; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 3]^z[ 9]; xors[ 8] = c_x[i++]; xors[ 9] = m156; xors[10] = c_x[i++]; xors[11] = m[ 2]^z[10]; xors[ 4] = c_x[i++]; xors[ 5] = z[ 7]; xors[ 6] = z[12]^m[ 1]; xors[ 7] = c_x[i++]; xors[12] = z[ 5]^m[ 0]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=66 // 8{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } xors[ 0] = c_x[i++]; xors[ 1] = z[ 9]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 0]^z[ 8]; xors[ 8] = c_x[i++]; xors[ 9] = m137; xors[10] = m[ 1]^z[ 4]; xors[11] = c_x[i++]; xors[ 4] = m157; xors[ 5] = c_x[i++]; xors[ 6] = z[11]^m[ 3]; xors[ 7] = c_x[i++]; xors[12] = z[12]^m[ 2]; xors[13] = c_x[i++]; xors[14] = c_x[i++]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=75 // 9{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m[ 1]^z[ 5]; xors[ 8] = m158; xors[ 9] = c_x[i++]; xors[10] = m[ 3]^z[12]; xors[11] = m138; xors[ 4] = z[10]^m[ 2]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 9]; xors[14] = c_x[i++]; xors[15] = z[13]^m[ 0]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=85 // 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } xors[ 0] = m[ 0]^z[ 1]; xors[ 1] = m[ 2]^z[ 3]; xors[ 2] = c_x[i++]; xors[ 3] = c_x[i++]; xors[ 8] = c_x[i++]; xors[ 9] = c_x[ 0]; xors[10] = c_x[ 1]; xors[11] = z[15]; xors[ 4] = z[ 0]^m[ 1]; xors[ 5] = z[ 2]^m[ 3]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++]; xors[12] = c_x[ 2]; xors[13] = c_x[ 3]; xors[14] = m130; xors[15] = m150; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); //i=90 i=4; // 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m131; xors[ 8] = m[ 1]^z[12]; xors[ 9] = m[ 0]^z[ 2]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = m151; xors[ 7] = c_x[i++]; xors[12] = c_x[i++]; xors[13] = z[ 0]^m[ 2]; xors[14] = c_x[i++]; xors[15] = z[ 5]^m[ 3]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = m152; xors[ 8] = c_x[i++]; xors[ 9] = m[ 3]^z[ 6]; xors[10] = c_x[i++]; xors[11] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[12]^m[ 0]; xors[ 6] = z[ 5]^m[ 2]; xors[ 7] = m132; xors[12] = z[10]; xors[13] = c_x[i++]; xors[14] = z[ 7]^m[ 1]; xors[15] = c_x[i++]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } xors[ 0] = c_x[i++]; xors[ 1] = m[ 3]^z[ 1]; xors[ 2] = m130; xors[ 3] = c_x[i++]; xors[ 8] = m[ 2]^z[ 6]; i++; xors[10] = c_x[i++]; xors[ 4] = c_x[i++]; xors[ 5] = z[ 3]^m[ 1]; xors[ 6] = c_x[i++]; xors[ 7] = z[11]; xors[12] = c_x[i++]; xors[14] = z[ 4]^m[ 0]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); v[ 0]+= xors[ 8] + v[ 5]; v[ 2]+= xors[10] + v[ 7]; v[15] = ROL16(v[15] ^ v[ 0]); v[13] = ROL16(v[13] ^ v[ 2]); v[10]+= v[15]; v[ 8]+= v[13]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 0]+= xors[12] + v[ 5]; v[ 2]+= xors[14] + v[ 7]; v[15] = ROTR32(v[15] ^ v[ 0],1); v[13] = ROR8(v[13] ^ v[ 2]); v[ 8]+= v[13]; if(xor3x(v[ 7],h7,v[ 8])==v[15]){ uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1; if(pos<maxResults) resNonce[pos]=m[ 3]; return; } } } __host__ void blake256_14round_cpu_setBlock_16(const uint32_t *pend,const uint32_t *input) { const uint32_t z[16] = { 0x243F6A88UL, 0x85A308D3UL, 0x13198A2EUL, 0x03707344UL,0xA4093822UL, 0x299F31D0UL, 0x082EFA98UL, 0xEC4E6C89UL, 0x452821E6UL, 0x38D01377UL, 0xBE5466CFUL, 0x34E90C6CUL,0xC0AC29B7UL, 0xC97C50DDUL, 0x3F84D5B5UL, 0xB5470917UL }; sph_u32 _ALIGN(64) v[16]; sph_u32 _ALIGN(64) h[ 2]; sph_blake256_context ctx; sph_blake256_set_rounds(14); sph_blake256_init(&ctx); sph_blake256(&ctx, input, 64); v[ 0] = ctx.H[ 0]; v[ 1] = ctx.H[ 1]; v[ 2] = ctx.H[ 2]; v[ 3] = ctx.H[ 3]; v[ 4] = ctx.H[ 4]; v[ 5] = ctx.H[ 5]; v[ 6] = ctx.H[ 6]; v[ 7] = ctx.H[ 7]; v[ 8] = z[ 0]; v[ 9] = z[ 1]; v[10] = z[ 2]; v[11] = z[ 3]; v[12] = z[ 4] ^ 640; v[13] = z[ 5] ^ 640; v[14] = z[ 6]; v[15] = z[ 7]; const uint32_t m[16] = { pend[ 0], pend[ 1], pend[ 2], 0, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 640 }; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_m,m, 3*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); h[ 0] = v[ 7]; hostGS( 0, 4, 8,12, 0, 1); hostGS( 2, 6,10,14, 4, 5); hostGS( 3, 7,11,15, 6, 7); v[ 1]+= (m[ 2] ^ z[ 3]) + v[ 5]; v[13] = ROTR32(v[13] ^ v[ 1],16); v[ 9] += v[13]; v[ 5] = ROTR32(v[ 5] ^ v[ 9],12); v[ 1]+= v[ 5]; v[ 0]+= z[ 9]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_v, v,16*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); h[ 0] = SPH_ROTL32(h[ 0], 7); //align the rotation with v[7] v[15]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_h,h, 1*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); uint32_t x[90]; int i=0; x[i++] = m[10]^z[11]; x[i++] = m[12]^z[13]; x[i++] = m[ 9]^z[ 8]; x[i++] = z[10]^m[11]; x[i++] = m[ 4]^z[ 8]; x[i++] = m[ 9]^z[15]; x[i++] = m[11]^z[ 7]; x[i++] = m[ 5]^z[ 3]; x[i++] = z[14]^m[10]; x[i++] = z[ 4]^m[ 8]; x[i++] = z[13]^m[ 6]; x[i++] = z[ 1]^m[12]; x[i++] = z[11]^m[ 7]; x[i++] = m[11]^z[ 8]; x[i++] = m[12]^z[ 0]; x[i++] = m[ 5]^z[ 2]; x[i++] = m[10]^z[14]; x[i++] = m[ 7]^z[ 1]; x[i++] = m[ 9]^z[ 4]; x[i++] = z[11]^m[ 8]; x[i++] = z[ 3]^m[ 6]; x[i++] = z[ 9]^m[ 4]; x[i++] = m[ 7]^z[ 9]; x[i++] = m[11]^z[14]; x[i++] = m[ 5]^z[10]; x[i++] = m[ 4]^z[ 0]; x[i++] = z[ 7]^m[ 9]; x[i++] = z[13]^m[12]; x[i++] = z[ 2]^m[ 6]; x[i++] = z[ 5]^m[10]; x[i++] = z[15]^m[ 8]; x[i++] = m[ 9]^z[ 0]; x[i++] = m[ 5]^z[ 7]; x[i++] = m[10]^z[15]; x[i++] = m[11]^z[12]; x[i++] = m[ 6]^z[ 8]; x[i++] = z[ 5]^m[ 7]; x[i++] = z[ 2]^m[ 4]; x[i++] = z[11]^m[12]; x[i++] = z[ 6]^m[ 8]; x[i++] = m[ 6]^z[10]; x[i++] = m[ 8]^z[ 3]; x[i++] = m[ 4]^z[13]; x[i++] = m[ 7]^z[ 5]; x[i++] = z[ 2]^m[12]; x[i++] = z[ 6]^m[10]; x[i++] = z[ 0]^m[11]; x[i++] = z[ 7]^m[ 5]; x[i++] = z[ 1]^m[ 9]; x[i++] = m[12]^z[ 5]; x[i++] = m[ 4]^z[10]; x[i++] = m[ 6]^z[ 3]; x[i++] = m[ 9]^z[ 2]; x[i++] = m[ 8]^z[11]; x[i++] = z[12]^m[ 5]; x[i++] = z[ 4]^m[10]; x[i++] = z[ 0]^m[ 7]; x[i++] = z[ 8]^m[11]; x[i++] = m[ 7]^z[14]; x[i++] = m[12]^z[ 1]; x[i++] = m[ 5]^z[ 0]; x[i++] = m[ 8]^z[ 6]; x[i++] = z[13]^m[11]; x[i++] = z[ 3]^m[ 9]; x[i++] = z[15]^m[ 4]; x[i++] = z[ 8]^m[ 6]; x[i++] = z[ 2]^m[10]; x[i++] = m[ 6]^z[15]; x[i++] = m[11]^z[ 3]; x[i++] = m[12]^z[ 2]; x[i++] = m[10]^z[ 5]; x[i++] = z[14]^m[ 9]; x[i++] = z[ 0]^m[ 8]; x[i++] = z[13]^m[ 7]; x[i++] = z[ 1]^m[ 4]; x[i++] = z[10]^m[ 5]; x[i++] = m[10]^z[ 2]; x[i++] = m[ 8]^z[ 4]; x[i++] = m[ 7]^z[ 6]; x[i++] = m[ 9]^z[14]; x[i++] = z[ 8]^m[ 4]; x[i++] = z[ 7]^m[ 6]; x[i++] = z[ 1]^m[ 5]; x[i++] = z[15]^m[11]; x[i++] = z[ 3]^m[12]; x[i++] = m[ 4]^z[ 5]; x[i++] = m[ 6]^z[ 7]; x[i++] = m[ 8]^z[ 9]; x[i++] = z[ 4]^m[ 5]; x[i++] = z[ 6]^m[ 7]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_x, x, i*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); } /* ############################################################################################################################### */ static bool init[MAX_GPUS] = { 0 }; extern "C" int scanhash_blake256_14round(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done){ uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; int dev_id = device_map[thr_id]; int intensity = (device_sm[dev_id] > 500) ? 31 : 30; uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB)); const dim3 block(TPB); int rc = 0; if (opt_benchmark) { ptarget[6] = swab32(0xff); } if (!init[thr_id]) { cudaSetDevice(dev_id); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } gpulog(LOG_INFO,thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t))); h_resNonce[thr_id] = (uint32_t*) malloc(maxResults * sizeof(uint32_t)); if(h_resNonce[thr_id] == NULL){ gpulog(LOG_ERR,thr_id,"Host memory allocation failed"); exit(EXIT_FAILURE); } CUDA_LOG_ERROR(); init[thr_id] = true; } uint32_t _ALIGN(64) endiandata[20]; for (int k = 0; k < 19; k++) be32enc(&endiandata[k], pdata[k]); blake256_14round_cpu_setBlock_16(&pdata[16], endiandata); cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t)); do { // GPU HASH blake256_14round_gpu_hash_16<<<grid, block>>>(throughput, pdata[19], d_resNonce[thr_id]); cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost); if (h_resNonce[thr_id][0] != 0){ cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t)); if(h_resNonce[thr_id][0]>(maxResults-1)){ gpulog(LOG_WARNING,dev_id,"Candidate flood: %u",h_resNonce[thr_id][0]); h_resNonce[thr_id][0]=maxResults-1; } uint32_t i; for(i=1;i<h_resNonce[thr_id][0]+1;i++){ uint32_t vhashcpu[8]; be32enc(&endiandata[19], h_resNonce[thr_id][i]); blake256_14roundHash(vhashcpu, endiandata); if (vhashcpu[ 6] <= ptarget[ 6] && fulltest(vhashcpu, ptarget)){ work_set_target_ratio(work, vhashcpu); *hashes_done = pdata[19] - first_nonce + throughput; pdata[19] = h_resNonce[thr_id][i]; rc = 1; //search for 2nd nonce for(uint32_t j=i+1;j<h_resNonce[thr_id][0]+1;j++){ be32enc(&endiandata[19], h_resNonce[thr_id][j]); blake256_14roundHash(vhashcpu, endiandata); if (vhashcpu[ 6] <= ptarget[ 6] && fulltest(vhashcpu, ptarget)) { pdata[21] = h_resNonce[thr_id][j]; // if(!opt_quiet) // gpulog(LOG_BLUE,dev_id,"Found 2nd nonce: %u/%08X - %u/%08X",i,pdata[19],j,pdata[21]); if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) { work_set_target_ratio(work, vhashcpu); xchg(pdata[21], pdata[19]); } rc = 2; break; } } return rc; } } } pdata[19] += throughput; } while (!work_restart[thr_id].restart && (uint64_t)max_nonce > (uint64_t)throughput + pdata[19]); *hashes_done = pdata[19] - first_nonce; return rc; } // cleanup extern "C" void free_blake256_14round(int thr_id) { if (!init[thr_id]) return; cudaDeviceSynchronize(); free(h_resNonce[thr_id]); cudaFree(d_resNonce[thr_id]); init[thr_id] = false; cudaDeviceSynchronize(); }
the_stack
//////////////////////////////////////////////////////////////////////////////// // Global types //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <cutil_inline.h> #include "realtype.h" #include "MonteCarlo_common.h" //////////////////////////////////////////////////////////////////////////////// // Helper reduction template // Please see the "reduction" CUDA SDK sample for more information //////////////////////////////////////////////////////////////////////////////// #include "MonteCarlo_reduction.cuh" //////////////////////////////////////////////////////////////////////////////// // Internal GPU-side data structures //////////////////////////////////////////////////////////////////////////////// #define MAX_OPTIONS 2048 //Preprocessed input option data typedef struct{ real S; real X; real MuByT; real VBySqrtT; } __TOptionData; static __device__ __constant__ __TOptionData d_OptionData[MAX_OPTIONS]; //GPU outputs before CPU postprocessing typedef struct{ real Expected; real Confidence; } __TOptionValue; static __device__ __TOptionValue d_CallValue[MAX_OPTIONS]; //////////////////////////////////////////////////////////////////////////////// // Overloaded shortcut payoff functions for different precision modes //////////////////////////////////////////////////////////////////////////////// #ifndef DOUBLE_PRECISION __device__ inline float endCallValue(float S, float X, float r, float MuByT, float VBySqrtT){ float callValue = S * __expf(MuByT + VBySqrtT * r) - X; return (callValue > 0) ? callValue : 0; } #else __device__ inline double endCallValue(double S, double X, double r, double MuByT, double VBySqrtT){ double callValue = S * exp(MuByT + VBySqrtT * r) - X; return (callValue > 0) ? callValue : 0; } #endif //////////////////////////////////////////////////////////////////////////////// // This kernel computes partial integrals over all paths using a multiple thread // blocks per option. It is used when a single thread block per option would not // be enough to keep the GPU busy. Execution of this kernel is followed by // MonteCarloReduce() to get the complete integral for each option. //////////////////////////////////////////////////////////////////////////////// #define THREAD_N 256 static __global__ void MonteCarloKernel( __TOptionValue *d_Buffer, float *d_Samples, int pathN ){ const int optionIndex = blockIdx.y; const real S = d_OptionData[optionIndex].S; const real X = d_OptionData[optionIndex].X; const real MuByT = d_OptionData[optionIndex].MuByT; const real VBySqrtT = d_OptionData[optionIndex].VBySqrtT; //One thread per partial integral const int iSum = blockIdx.x * blockDim.x + threadIdx.x; const int accumN = blockDim.x * gridDim.x; //Cycle through the entire samples array: //derive end stock price for each path //accumulate into intermediate global memory array __TOptionValue sumCall = {0, 0}; for(int i = iSum; i < pathN; i += accumN){ real r = d_Samples[i]; real callValue = endCallValue(S, X, r, MuByT, VBySqrtT); sumCall.Expected += callValue; sumCall.Confidence += callValue * callValue; } d_Buffer[optionIndex * accumN + iSum] = sumCall; } //////////////////////////////////////////////////////////////////////////////// // This kernel computes the integral over all paths using a single thread block // per option. It is fastest when the number of thread blocks times the work per // block is high enough to keep the GPU busy. When this is not the case, using // more blocks per option is faster, so we use MonteCarloKernel() plus // MonteCarloReduce() instead. //////////////////////////////////////////////////////////////////////////////// static __global__ void MonteCarloOneBlockPerOption( float *d_Samples, int pathN ){ const int SUM_N = THREAD_N; __shared__ real s_SumCall[SUM_N]; __shared__ real s_Sum2Call[SUM_N]; const int optionIndex = blockIdx.x; const real S = d_OptionData[optionIndex].S; const real X = d_OptionData[optionIndex].X; const real MuByT = d_OptionData[optionIndex].MuByT; const real VBySqrtT = d_OptionData[optionIndex].VBySqrtT; //Cycle through the entire samples array: //derive end stock price for each path //accumulate partial integrals into intermediate shared memory buffer for(int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ __TOptionValue sumCall = {0, 0}; for(int i = iSum; i < pathN; i += SUM_N){ real r = d_Samples[i]; real callValue = endCallValue(S, X, r, MuByT, VBySqrtT); sumCall.Expected += callValue; sumCall.Confidence += callValue * callValue; } s_SumCall[iSum] = sumCall.Expected; s_Sum2Call[iSum] = sumCall.Confidence; } //Reduce shared memory accumulators //and write final result to global memory sumReduce<real, SUM_N, THREAD_N>(s_SumCall, s_Sum2Call); if(threadIdx.x == 0){ __TOptionValue t = {s_SumCall[0], s_Sum2Call[0]}; d_CallValue[optionIndex] = t; } } //////////////////////////////////////////////////////////////////////////////// //Finalizing reduction for MonteCarloKernel1() //Final reduction for each per-option accumulator output //////////////////////////////////////////////////////////////////////////////// static __global__ void MonteCarloReduce( __TOptionValue *d_Buffer, int accumN ){ const int SUM_N = THREAD_N; __shared__ real s_SumCall[SUM_N]; __shared__ real s_Sum2Call[SUM_N]; __TOptionValue *d_SumBase = &d_Buffer[blockIdx.x * accumN]; //Reduce global memory accumulators array for current option //to a set fitting into shared memory for(int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ __TOptionValue sumCall = {0, 0}; for(int pos = iSum; pos < accumN; pos += SUM_N){ __TOptionValue t = d_SumBase[pos]; sumCall.Expected += t.Expected; sumCall.Confidence += t.Confidence; } s_SumCall[iSum] = sumCall.Expected; s_Sum2Call[iSum] = sumCall.Confidence; } //Reduce shared memory accumulators //and write final result to global memory sumReduce<real, SUM_N, THREAD_N>(s_SumCall, s_Sum2Call); if(threadIdx.x == 0){ __TOptionValue t = {s_SumCall[0], s_Sum2Call[0]}; d_CallValue[blockIdx.x] = t; } } //////////////////////////////////////////////////////////////////////////////// // Host-side interface to GPU Monte Carlo //////////////////////////////////////////////////////////////////////////////// //Allocate internal device memory static void initMonteCarloGPU(TOptionPlan *plan){ const int doMultiBlock = (plan->pathN / plan->optionCount) >= 8192; if(doMultiBlock){ const int blocksPerOption = (plan->optionCount < 16) ? 64 : 16; const int accumN = THREAD_N * blocksPerOption; cutilSafeCall( cudaMalloc( (void **)&plan->d_Buffer, accumN * plan->optionCount * sizeof(__TOptionValue) ) ); } } //Deallocate internal device memory static void closeMonteCarloGPU(TOptionPlan *plan){ const int doMultiBlock = (plan->pathN / plan->optionCount) >= 8192; if(doMultiBlock) cutilSafeCall( cudaFree(plan->d_Buffer) ); } //Main computations static void MonteCarloGPU(TOptionPlan *plan){ __TOptionData h_OptionData[MAX_OPTIONS]; __TOptionValue h_CallValue[MAX_OPTIONS]; if(plan->optionCount <= 0 || plan->optionCount > MAX_OPTIONS){ printf("MonteCarloGPU(): bad option count.\n"); return; } for(int i = 0; i < plan->optionCount; i++){ const double T = plan->optionData[i].T; const double R = plan->optionData[i].R; const double V = plan->optionData[i].V; const double MuByT = (R - 0.5 * V * V) * T; const double VBySqrtT = V * sqrt(T); h_OptionData[i].S = (real)plan->optionData[i].S; h_OptionData[i].X = (real)plan->optionData[i].X; h_OptionData[i].MuByT = (real)MuByT; h_OptionData[i].VBySqrtT = (real)VBySqrtT; } cutilSafeCall( cudaMemcpyToSymbol( d_OptionData, h_OptionData, plan->optionCount * sizeof(__TOptionData) ) ); const int doMultiBlock = (plan->pathN / plan->optionCount) >= 8192; if(doMultiBlock){ const int blocksPerOption = (plan->optionCount < 16) ? 64 : 16; const int accumN = THREAD_N * blocksPerOption; const dim3 gridMain(blocksPerOption, plan->optionCount); MonteCarloKernel<<<gridMain, THREAD_N>>>( (__TOptionValue *)plan->d_Buffer, plan->d_Samples, plan->pathN ); cutilCheckMsg("MonteCarloKernel() execution failed\n"); MonteCarloReduce<<<plan->optionCount, THREAD_N>>>( (__TOptionValue *)plan->d_Buffer, accumN ); cutilCheckMsg("MonteCarloReduce() execution failed\n"); }else{ MonteCarloOneBlockPerOption<<<plan->optionCount, THREAD_N>>>( plan->d_Samples, plan->pathN ); cutilCheckMsg("MonteCarloOneBlockPerOption() execution failed\n"); } cutilSafeCall( cudaMemcpyFromSymbol( h_CallValue, d_CallValue, plan->optionCount * sizeof(__TOptionValue) ) ); for(int i = 0; i < plan->optionCount; i++){ const double RT = plan->optionData[i].R * plan->optionData[i].T; const double sum = h_CallValue[i].Expected; const double sum2 = h_CallValue[i].Confidence; const double pathN = plan->pathN; //Derive average from the total sum and discount by riskfree rate plan->callValue[i].Expected = (float)(exp(-RT) * sum / pathN); //Standart deviation double stdDev = sqrt((pathN * sum2 - sum * sum)/ (pathN * (pathN - 1))); //Confidence width; in 95% of all cases theoretical value lies within these borders plan->callValue[i].Confidence = (float)(exp(-RT) * 1.96 * stdDev / sqrt(pathN)); } } #endif
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/color.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace cuda { namespace device { template <typename T> struct Bayer2BGR; template <> struct Bayer2BGR<uchar> { uchar3 res0; uchar3 res1; uchar3 res2; uchar3 res3; __device__ void apply(const PtrStepSzb& src, int s_x, int s_y, bool blue_last, bool start_with_green) { uchar4 patch[3][3]; patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x]; patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)]; patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((src.cols + 3) >> 2) - 1)]; patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x]; patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)]; patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((src.cols + 3) >> 2) - 1)]; patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x]; patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)]; patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((src.cols + 3) >> 2) - 1)]; if ((s_y & 1) ^ start_with_green) { const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1; const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1; const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2; const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2; const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1; const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1; const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2; const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2; if ((s_y & 1) ^ blue_last) { res0.x = t1; res0.y = patch[1][1].x; res0.z = t0; res1.x = patch[1][1].y; res1.y = t3; res1.z = t2; res2.x = t5; res2.y = patch[1][1].z; res2.z = t4; res3.x = patch[1][1].w; res3.y = t7; res3.z = t6; } else { res0.x = t0; res0.y = patch[1][1].x; res0.z = t1; res1.x = t2; res1.y = t3; res1.z = patch[1][1].y; res2.x = t4; res2.y = patch[1][1].z; res2.z = t5; res3.x = t6; res3.y = t7; res3.z = patch[1][1].w; } } else { const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2; const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2; const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1; const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1; const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2; const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2; const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1; const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1; if ((s_y & 1) ^ blue_last) { res0.x = patch[1][1].x; res0.y = t1; res0.z = t0; res1.x = t3; res1.y = patch[1][1].y; res1.z = t2; res2.x = patch[1][1].z; res2.y = t5; res2.z = t4; res3.x = t7; res3.y = patch[1][1].w; res3.z = t6; } else { res0.x = t0; res0.y = t1; res0.z = patch[1][1].x; res1.x = t2; res1.y = patch[1][1].y; res1.z = t3; res2.x = t4; res2.y = t5; res2.z = patch[1][1].z; res3.x = t6; res3.y = patch[1][1].w; res3.z = t7; } } } }; template <typename D> __device__ __forceinline__ D toDst(const uchar3& pix); template <> __device__ __forceinline__ uchar toDst<uchar>(const uchar3& pix) { typename bgr_to_gray_traits<uchar>::functor_type f = bgr_to_gray_traits<uchar>::create_functor(); return f(pix); } template <> __device__ __forceinline__ uchar3 toDst<uchar3>(const uchar3& pix) { return pix; } template <> __device__ __forceinline__ uchar4 toDst<uchar4>(const uchar3& pix) { return make_uchar4(pix.x, pix.y, pix.z, 255); } template <typename D> __global__ void Bayer2BGR_8u(const PtrStepSzb src, PtrStep<D> dst, const bool blue_last, const bool start_with_green) { const int s_x = blockIdx.x * blockDim.x + threadIdx.x; int s_y = blockIdx.y * blockDim.y + threadIdx.y; if (s_y >= src.rows || (s_x << 2) >= src.cols) return; s_y = ::min(::max(s_y, 1), src.rows - 2); Bayer2BGR<uchar> bayer; bayer.apply(src, s_x, s_y, blue_last, start_with_green); const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2; const int d_y = blockIdx.y * blockDim.y + threadIdx.y; dst(d_y, d_x) = toDst<D>(bayer.res0); if (d_x + 1 < src.cols) dst(d_y, d_x + 1) = toDst<D>(bayer.res1); if (d_x + 2 < src.cols) dst(d_y, d_x + 2) = toDst<D>(bayer.res2); if (d_x + 3 < src.cols) dst(d_y, d_x + 3) = toDst<D>(bayer.res3); } template <> struct Bayer2BGR<ushort> { ushort3 res0; ushort3 res1; __device__ void apply(const PtrStepSzb& src, int s_x, int s_y, bool blue_last, bool start_with_green) { ushort2 patch[3][3]; patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x]; patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)]; patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((src.cols + 1) >> 1) - 1)]; patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x]; patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)]; patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((src.cols + 1) >> 1) - 1)]; patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x]; patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)]; patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((src.cols + 1) >> 1) - 1)]; if ((s_y & 1) ^ start_with_green) { const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1; const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1; const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2; const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2; if ((s_y & 1) ^ blue_last) { res0.x = t1; res0.y = patch[1][1].x; res0.z = t0; res1.x = patch[1][1].y; res1.y = t3; res1.z = t2; } else { res0.x = t0; res0.y = patch[1][1].x; res0.z = t1; res1.x = t2; res1.y = t3; res1.z = patch[1][1].y; } } else { const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2; const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2; const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1; const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1; if ((s_y & 1) ^ blue_last) { res0.x = patch[1][1].x; res0.y = t1; res0.z = t0; res1.x = t3; res1.y = patch[1][1].y; res1.z = t2; } else { res0.x = t0; res0.y = t1; res0.z = patch[1][1].x; res1.x = t2; res1.y = patch[1][1].y; res1.z = t3; } } } }; template <typename D> __device__ __forceinline__ D toDst(const ushort3& pix); template <> __device__ __forceinline__ ushort toDst<ushort>(const ushort3& pix) { typename bgr_to_gray_traits<ushort>::functor_type f = bgr_to_gray_traits<ushort>::create_functor(); return f(pix); } template <> __device__ __forceinline__ ushort3 toDst<ushort3>(const ushort3& pix) { return pix; } template <> __device__ __forceinline__ ushort4 toDst<ushort4>(const ushort3& pix) { return make_ushort4(pix.x, pix.y, pix.z, numeric_limits<ushort>::max()); } template <typename D> __global__ void Bayer2BGR_16u(const PtrStepSzb src, PtrStep<D> dst, const bool blue_last, const bool start_with_green) { const int s_x = blockIdx.x * blockDim.x + threadIdx.x; int s_y = blockIdx.y * blockDim.y + threadIdx.y; if (s_y >= src.rows || (s_x << 1) >= src.cols) return; s_y = ::min(::max(s_y, 1), src.rows - 2); Bayer2BGR<ushort> bayer; bayer.apply(src, s_x, s_y, blue_last, start_with_green); const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int d_y = blockIdx.y * blockDim.y + threadIdx.y; dst(d_y, d_x) = toDst<D>(bayer.res0); if (d_x + 1 < src.cols) dst(d_y, d_x + 1) = toDst<D>(bayer.res1); } template <int cn> void Bayer2BGR_8u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream) { typedef typename TypeVec<uchar, cn>::vec_type dst_t; const dim3 block(32, 8); const dim3 grid(divUp(src.cols, 4 * block.x), divUp(src.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, cudaFuncCachePreferL1) ); Bayer2BGR_8u<dst_t><<<grid, block, 0, stream>>>(src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int cn> void Bayer2BGR_16u_gpu(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream) { typedef typename TypeVec<ushort, cn>::vec_type dst_t; const dim3 block(32, 8); const dim3 grid(divUp(src.cols, 2 * block.x), divUp(src.rows, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, cudaFuncCachePreferL1) ); Bayer2BGR_16u<dst_t><<<grid, block, 0, stream>>>(src, (PtrStepSz<dst_t>)dst, blue_last, start_with_green); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void Bayer2BGR_8u_gpu<1>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); template void Bayer2BGR_8u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); template void Bayer2BGR_8u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); template void Bayer2BGR_16u_gpu<1>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); template void Bayer2BGR_16u_gpu<3>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); template void Bayer2BGR_16u_gpu<4>(PtrStepSzb src, PtrStepSzb dst, bool blue_last, bool start_with_green, cudaStream_t stream); ////////////////////////////////////////////////////////////// // Bayer Demosaicing (Malvar, He, and Cutler) // // by Morgan McGuire, Williams College // http://graphics.cs.williams.edu/papers/BayerJGT09/#shaders // // ported to CUDA texture<uchar, cudaTextureType2D, cudaReadModeElementType> sourceTex(false, cudaFilterModePoint, cudaAddressModeClamp); template <typename DstType> __global__ void MHCdemosaic(PtrStepSz<DstType> dst, const int2 sourceOffset, const int2 firstRed) { const float kAx = -1.0f / 8.0f, kAy = -1.5f / 8.0f, kAz = 0.5f / 8.0f /*kAw = -1.0f / 8.0f*/; const float kBx = 2.0f / 8.0f, /*kBy = 0.0f / 8.0f,*/ /*kBz = 0.0f / 8.0f,*/ kBw = 4.0f / 8.0f ; const float kCx = 4.0f / 8.0f, kCy = 6.0f / 8.0f, kCz = 5.0f / 8.0f /*kCw = 5.0f / 8.0f*/; const float /*kDx = 0.0f / 8.0f,*/ kDy = 2.0f / 8.0f, kDz = -1.0f / 8.0f /*kDw = -1.0f / 8.0f*/; const float kEx = -1.0f / 8.0f, kEy = -1.5f / 8.0f, /*kEz = -1.0f / 8.0f,*/ kEw = 0.5f / 8.0f ; const float kFx = 2.0f / 8.0f, /*kFy = 0.0f / 8.0f,*/ kFz = 4.0f / 8.0f /*kFw = 0.0f / 8.0f*/; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dst.cols - 1 || y == 0 || y >= dst.rows - 1) return; int2 center; center.x = x + sourceOffset.x; center.y = y + sourceOffset.y; int4 xCoord; xCoord.x = center.x - 2; xCoord.y = center.x - 1; xCoord.z = center.x + 1; xCoord.w = center.x + 2; int4 yCoord; yCoord.x = center.y - 2; yCoord.y = center.y - 1; yCoord.z = center.y + 1; yCoord.w = center.y + 2; float C = tex2D(sourceTex, center.x, center.y); // ( 0, 0) float4 Dvec; Dvec.x = tex2D(sourceTex, xCoord.y, yCoord.y); // (-1,-1) Dvec.y = tex2D(sourceTex, xCoord.y, yCoord.z); // (-1, 1) Dvec.z = tex2D(sourceTex, xCoord.z, yCoord.y); // ( 1,-1) Dvec.w = tex2D(sourceTex, xCoord.z, yCoord.z); // ( 1, 1) float4 value; value.x = tex2D(sourceTex, center.x, yCoord.x); // ( 0,-2) A0 value.y = tex2D(sourceTex, center.x, yCoord.y); // ( 0,-1) B0 value.z = tex2D(sourceTex, xCoord.x, center.y); // (-2, 0) E0 value.w = tex2D(sourceTex, xCoord.y, center.y); // (-1, 0) F0 // (A0 + A1), (B0 + B1), (E0 + E1), (F0 + F1) value.x += tex2D(sourceTex, center.x, yCoord.w); // ( 0, 2) A1 value.y += tex2D(sourceTex, center.x, yCoord.z); // ( 0, 1) B1 value.z += tex2D(sourceTex, xCoord.w, center.y); // ( 2, 0) E1 value.w += tex2D(sourceTex, xCoord.z, center.y); // ( 1, 0) F1 float4 PATTERN; PATTERN.x = kCx * C; PATTERN.y = kCy * C; PATTERN.z = kCz * C; PATTERN.w = PATTERN.z; float D = Dvec.x + Dvec.y + Dvec.z + Dvec.w; // There are five filter patterns (identity, cross, checker, // theta, phi). Precompute the terms from all of them and then // use swizzles to assign to color channels. // // Channel Matches // x cross (e.g., EE G) // y checker (e.g., EE B) // z theta (e.g., EO R) // w phi (e.g., EO B) #define A value.x // A0 + A1 #define B value.y // B0 + B1 #define E value.z // E0 + E1 #define F value.w // F0 + F1 float3 temp; // PATTERN.yzw += (kD.yz * D).xyy; temp.x = kDy * D; temp.y = kDz * D; PATTERN.y += temp.x; PATTERN.z += temp.y; PATTERN.w += temp.y; // PATTERN += (kA.xyz * A).xyzx; temp.x = kAx * A; temp.y = kAy * A; temp.z = kAz * A; PATTERN.x += temp.x; PATTERN.y += temp.y; PATTERN.z += temp.z; PATTERN.w += temp.x; // PATTERN += (kE.xyw * E).xyxz; temp.x = kEx * E; temp.y = kEy * E; temp.z = kEw * E; PATTERN.x += temp.x; PATTERN.y += temp.y; PATTERN.z += temp.x; PATTERN.w += temp.z; // PATTERN.xw += kB.xw * B; PATTERN.x += kBx * B; PATTERN.w += kBw * B; // PATTERN.xz += kF.xz * F; PATTERN.x += kFx * F; PATTERN.z += kFz * F; // Determine which of four types of pixels we are on. int2 alternate; alternate.x = (x + firstRed.x) % 2; alternate.y = (y + firstRed.y) % 2; // in BGR sequence; uchar3 pixelColor = (alternate.y == 0) ? ((alternate.x == 0) ? make_uchar3(saturate_cast<uchar>(PATTERN.y), saturate_cast<uchar>(PATTERN.x), saturate_cast<uchar>(C)) : make_uchar3(saturate_cast<uchar>(PATTERN.w), saturate_cast<uchar>(C), saturate_cast<uchar>(PATTERN.z))) : ((alternate.x == 0) ? make_uchar3(saturate_cast<uchar>(PATTERN.z), saturate_cast<uchar>(C), saturate_cast<uchar>(PATTERN.w)) : make_uchar3(saturate_cast<uchar>(C), saturate_cast<uchar>(PATTERN.x), saturate_cast<uchar>(PATTERN.y))); dst(y, x) = toDst<DstType>(pixelColor); } template <int cn> void MHCdemosaic(PtrStepSzb src, int2 sourceOffset, PtrStepSzb dst, int2 firstRed, cudaStream_t stream) { typedef typename TypeVec<uchar, cn>::vec_type dst_t; const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); bindTexture(&sourceTex, src); MHCdemosaic<dst_t><<<grid, block, 0, stream>>>((PtrStepSz<dst_t>)dst, sourceOffset, firstRed); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void MHCdemosaic<1>(PtrStepSzb src, int2 sourceOffset, PtrStepSzb dst, int2 firstRed, cudaStream_t stream); template void MHCdemosaic<3>(PtrStepSzb src, int2 sourceOffset, PtrStepSzb dst, int2 firstRed, cudaStream_t stream); template void MHCdemosaic<4>(PtrStepSzb src, int2 sourceOffset, PtrStepSzb dst, int2 firstRed, cudaStream_t stream); }}} #endif /* CUDA_DISABLER */
the_stack
using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } int YoloLayerPlugin::initialize() { return 0; } void YoloLayerPlugin::terminate() { CHECK(cudaFree(mAnchors)); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() { } const char* YoloLayerPlugin::getPluginType() const { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const { return "1"; } void YoloLayerPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { CalDetection_NewCoords<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); } // namespace nvinfer1
the_stack
#include <cuml/matrix/kernelparams.h> #include <cuml/common/logger.hpp> #include <matrix/grammatrix.cuh> #include <matrix/kernelfactory.cuh> #include <raft/linalg/cublas_wrappers.h> #include <raft/linalg/gemv.h> #include <raft/linalg/unary_op.cuh> #include <raft/cudart_utils.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <iostream> #include <limits> #include <raft/cuda_utils.cuh> #include <string> #include <type_traits> #include <cuml/matrix/kernelparams.h> #include <raft/linalg/cublas_wrappers.h> #include <raft/linalg/gemv.h> #include <cuml/common/logger.hpp> #include <matrix/grammatrix.cuh> #include <matrix/kernelfactory.cuh> #include <raft/linalg/unary_op.cuh> #include "kernelcache.cuh" #include "smo_sets.cuh" #include "smoblocksolve.cuh" #include "workingset.cuh" #include "ws_util.cuh" #include "results.cuh" namespace ML { namespace SVM { /** * @brief Solve the quadratic optimization problem using two level decomposition * and Sequential Minimal Optimization (SMO). * * The general decomposition idea by Osuna is to choose q examples from all the * training examples, and solve the QP problem for this subset (discussed in * section 11.2 by Joachims [1]). SMO is the extreme case where we choose q=2. * * Here we follow [2] and [3] and use two level decomposition. First we set * q_1=1024, and solve the QP sub-problem for that (let's call it QP1). This is * the outer iteration, implemented in SmoSolver::Solve. * * To solve QP1, we use another decomposition, specifically the SMO (q_2 = 2), * which is implemented in SmoBlockSolve. * * References: * - [1] Joachims, T. Making large-scale support vector machine learning * practical. In B. Scholkopf, C. Burges, & A. Smola (Eds.), Advances in * kernel methods: Support vector machines. Cambridge, MA: MIT Press (1998) * - [2] J. Vanek et al. A GPU-Architecture Optimized Hierarchical Decomposition * Algorithm for Support VectorMachine Training, IEEE Transactions on * Parallel and Distributed Systems, vol 28, no 12, 3330, (2017) * - [3] Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs, Journal * of Machine Learning Research, 19, 1-5 (2018) */ template <typename math_t> class SmoSolver { public: SmoSolver(const raft::handle_t& handle, SvmParameter param, MLCommon::Matrix::GramMatrixBase<math_t>* kernel) : handle(handle), C(param.C), tol(param.tol), kernel(kernel), cache_size(param.cache_size), nochange_steps(param.nochange_steps), epsilon(param.epsilon), svmType(param.svmType), stream(handle.get_stream()), return_buff(2, stream), alpha(0, stream), C_vec(0, stream), delta_alpha(0, stream), f(0, stream), y_label(0, stream) { ML::Logger::get().setLevel(param.verbosity); } #define SMO_WS_SIZE 1024 /** * @brief Solve the quadratic optimization problem. * * The output arrays (dual_coefs, x_support, idx) will be allocated on the * device, they should be unallocated on entry. * * @param [in] x training vectors in column major format, size [n_rows x n_cols] * @param [in] n_rows number of rows (training vectors) * @param [in] n_cols number of columns (features) * @param [in] y labels (values +/-1), size [n_rows] * @param [in] sample_weight device array of sample weights (or nullptr if not * applicable) * @param [out] dual_coefs size [n_support] on exit * @param [out] n_support number of support vectors * @param [out] x_support support vectors in column major format, size [n_support, n_cols] * @param [out] idx the original training set indices of the support vectors, size [n_support] * @param [out] b scalar constant for the decision function * @param [in] max_outer_iter maximum number of outer iteration (default 100 * n_rows) * @param [in] max_inner_iter maximum number of inner iterations (default 10000) */ void Solve(math_t* x, int n_rows, int n_cols, math_t* y, const math_t* sample_weight, math_t** dual_coefs, int* n_support, math_t** x_support, int** idx, math_t* b, int max_outer_iter = -1, int max_inner_iter = 10000) { // Prepare data structures for SMO WorkingSet<math_t> ws(handle, stream, n_rows, SMO_WS_SIZE, svmType); n_ws = ws.GetSize(); Initialize(&y, sample_weight, n_rows, n_cols); KernelCache<math_t> cache(handle, x, n_rows, n_cols, n_ws, kernel, cache_size, svmType); // Init counters max_outer_iter = GetDefaultMaxIter(n_train, max_outer_iter); n_iter = 0; int n_inner_iter = 0; diff_prev = 0; n_small_diff = 0; n_increased_diff = 0; report_increased_diff = true; bool keep_going = true; while (n_iter < max_outer_iter && keep_going) { CUDA_CHECK(cudaMemsetAsync(delta_alpha.data(), 0, n_ws * sizeof(math_t), stream)); ws.Select(f.data(), alpha.data(), y, C_vec.data()); math_t* cacheTile = cache.GetTile(ws.GetIndices()); SmoBlockSolve<math_t, SMO_WS_SIZE><<<1, n_ws, 0, stream>>>(y, n_train, alpha.data(), n_ws, delta_alpha.data(), f.data(), cacheTile, cache.GetWsIndices(), C_vec.data(), tol, return_buff.data(), max_inner_iter, svmType, cache.GetColIdxMap()); CUDA_CHECK(cudaPeekAtLastError()); raft::update_host(host_return_buff, return_buff.data(), 2, stream); UpdateF(f.data(), n_rows, delta_alpha.data(), cache.GetUniqueSize(), cacheTile); CUDA_CHECK(cudaStreamSynchronize(stream)); math_t diff = host_return_buff[0]; keep_going = CheckStoppingCondition(diff); n_inner_iter += host_return_buff[1]; n_iter++; if (n_iter % 500 == 0) { CUML_LOG_DEBUG("SMO iteration %d, diff %lf", n_iter, (double)diff); } } CUML_LOG_DEBUG( "SMO solver finished after %d outer iterations, total inner" " iterations, and diff %lf", n_iter, n_inner_iter, diff_prev); Results<math_t> res(handle, x, y, n_rows, n_cols, C_vec.data(), svmType); res.Get(alpha.data(), f.data(), dual_coefs, n_support, idx, x_support, b); ReleaseBuffers(); } /** * @brief Update the f vector after a block solve step. * * \f[ f_i = f_i + \sum_{k\in WS} K_{i,k} * \Delta \alpha_k, \f] * where i = [0..n_train-1], WS is the set of workspace indices, * and \f$K_{i,k}\f$ is the kernel function evaluated for training vector x_i and workspace vector * x_k. * * @param f size [n_train] * @param n_rows * @param delta_alpha size [n_ws] * @param n_ws * @param cacheTile kernel function evaluated for the following set K[X,x_ws], * size [n_rows, n_ws] */ void UpdateF(math_t* f, int n_rows, const math_t* delta_alpha, int n_ws, const math_t* cacheTile) { // multipliers used in the equation : f = 1*cachtile * delta_alpha + 1*f math_t one = 1; CUBLAS_CHECK(raft::linalg::cublasgemv(handle.get_cublas_handle(), CUBLAS_OP_N, n_rows, n_ws, &one, cacheTile, n_rows, delta_alpha, 1, &one, f, 1, stream)); if (svmType == EPSILON_SVR) { // SVR has doubled the number of trainig vectors and we need to update // alpha for both batches individually CUBLAS_CHECK(raft::linalg::cublasgemv(handle.get_cublas_handle(), CUBLAS_OP_N, n_rows, n_ws, &one, cacheTile, n_rows, delta_alpha, 1, &one, f + n_rows, 1, stream)); } } /** @brief Initialize the problem to solve. * * Both SVC and SVR are solved as a classification problem. * The optimization target (W) does not appear directly in the SMO * formulation, only its derivative through f (optimality indicator vector): * \f[ f_i = y_i \frac{\partial W }{\partial \alpha_i}. \f] * * The f_i values are initialized here, and updated at every solver iteration * when alpha changes. The update step is the same for SVC and SVR, only the * init step differs. * * Additionally, we zero init the dual coefficients (alpha), and initialize * class labels for SVR. * * @param[inout] y on entry class labels or target values, * on exit device pointer to class labels * @param[in] sample_weight sample weights (can be nullptr, otherwise device * array of size [n_rows]) * @param[in] n_rows * @param[in] n_cols */ void Initialize(math_t** y, const math_t* sample_weight, int n_rows, int n_cols) { this->n_rows = n_rows; this->n_cols = n_cols; n_train = (svmType == EPSILON_SVR) ? n_rows * 2 : n_rows; ResizeBuffers(n_train, n_cols); // Zero init alpha CUDA_CHECK(cudaMemsetAsync(alpha.data(), 0, n_train * sizeof(math_t), stream)); InitPenalty(C_vec.data(), sample_weight, n_rows); // Init f (and also class labels for SVR) switch (svmType) { case C_SVC: SvcInit(*y); break; case EPSILON_SVR: SvrInit(*y, n_rows, y_label.data(), f.data()); // We return the pointer to the class labels (the target values are // not needed anymore, they are incorporated in f). *y = y_label.data(); break; default: THROW("SMO initialization not implemented SvmType=%d", svmType); } } void InitPenalty(math_t* C_vec, const math_t* sample_weight, int n_rows) { if (sample_weight == nullptr) { thrust::device_ptr<math_t> c_ptr(C_vec); thrust::fill(thrust::cuda::par.on(stream), c_ptr, c_ptr + n_train, C); } else { math_t C = this->C; raft::linalg::unaryOp( C_vec, sample_weight, n_rows, [C] __device__(math_t w) { return C * w; }, stream); if (n_train > n_rows) { // Set the same penalty parameter for the duplicate set of vectors raft::linalg::unaryOp( C_vec + n_rows, sample_weight, n_rows, [C] __device__(math_t w) { return C * w; }, stream); } } } /** @brief Initialize Support Vector Classification * * We would like to maximize the following quantity * \f[ W(\mathbf{\alpha}) = -\mathbf{\alpha}^T \mathbf{1} * + \frac{1}{2} \mathbf{\alpha}^T Q \mathbf{\alpha}, \f] * * We initialize f as: * \f[ f_i = y_i \frac{\partial W(\mathbf{\alpha})}{\partial \alpha_i} = * -y_i + y_j \alpha_j K(\mathbf{x}_i, \mathbf{x}_j) \f] * * @param [in] y device pointer of class labels size [n_rows] */ void SvcInit(const math_t* y) { raft::linalg::unaryOp( f.data(), y, n_rows, [] __device__(math_t y) { return -y; }, stream); } /** * @brief Initializes the solver for epsilon-SVR. * * For regression we are optimizing the following quantity * \f[ * W(\alpha^+, \alpha^-) = * \epsilon \sum_{i=1}^l (\alpha_i^+ + \alpha_i^-) * - \sum_{i=1}^l yc_i (\alpha_i^+ - \alpha_i^-) * + \frac{1}{2} \sum_{i,j=1}^l * (\alpha_i^+ - \alpha_i^-)(\alpha_j^+ - \alpha_j^-) K(\bm{x}_i, \bm{x}_j) * \f] * * Then \f$ f_i = y_i \frac{\partial W(\alpha}{\partial \alpha_i} \f$ * \f$ = yc_i*epsilon - yr_i \f$ * * Additionally we set class labels for the training vectors. * * References: * [1] B. Schölkopf et. al (1998): New support vector algorithms, * NeuroCOLT2 Technical Report Series, NC2-TR-1998-031, Section 6 * [2] A.J. Smola, B. Schölkopf (2004): A tutorial on support vector * regression, Statistics and Computing 14, 199–222 * [3] Orchel M. (2011) Support Vector Regression as a Classification Problem * with a Priori Knowledge in the Form of Detractors, * Man-Machine Interactions 2. Advances in Intelligent and Soft Computing, * vol 103 * * @param [in] yr device pointer with values for regression, size [n_rows] * @param [in] n_rows * @param [out] yc device pointer to classes associated to the dual * coefficients, size [n_rows*2] * @param [out] f device pointer f size [n_rows*2] */ void SvrInit(const math_t* yr, int n_rows, math_t* yc, math_t* f) { // Init class labels to [1, 1, 1, ..., -1, -1, -1, ...] thrust::device_ptr<math_t> yc_ptr(yc); thrust::constant_iterator<math_t> one(1); thrust::copy(thrust::cuda::par.on(stream), one, one + n_rows, yc_ptr); thrust::constant_iterator<math_t> minus_one(-1); thrust::copy(thrust::cuda::par.on(stream), minus_one, minus_one + n_rows, yc_ptr + n_rows); // f_i = epsilon - y_i, for i \in [0..n_rows-1] math_t epsilon = this->epsilon; raft::linalg::unaryOp( f, yr, n_rows, [epsilon] __device__(math_t y) { return epsilon - y; }, stream); // f_i = -epsilon - y_i, for i \in [n_rows..2*n_rows-1] raft::linalg::unaryOp( f + n_rows, yr, n_rows, [epsilon] __device__(math_t y) { return -epsilon - y; }, stream); } private: const raft::handle_t& handle; cudaStream_t stream; int n_rows = 0; //!< training data number of rows int n_cols = 0; //!< training data number of columns int n_ws = 0; //!< size of the working set int n_train = 0; //!< number of training vectors (including duplicates for SVR) // Buffers for the domain [n_train] rmm::device_uvector<math_t> alpha; //!< dual coordinates rmm::device_uvector<math_t> f; //!< optimality indicator vector rmm::device_uvector<math_t> y_label; //!< extra label for regression rmm::device_uvector<math_t> C_vec; //!< penalty parameter vector // Buffers for the working set [n_ws] //! change in alpha parameter during a blocksolve step rmm::device_uvector<math_t> delta_alpha; // Buffers to return some parameters from the kernel (iteration number, and // convergence information) rmm::device_uvector<math_t> return_buff; math_t host_return_buff[2]; math_t C; math_t tol; //!< tolerance for stopping condition math_t epsilon; //!< epsilon parameter for epsiolon-SVR MLCommon::Matrix::GramMatrixBase<math_t>* kernel; float cache_size; //!< size of kernel cache in MiB SvmType svmType; ///!< Type of the SVM problem to solve // Variables to track convergence of training math_t diff_prev; int n_small_diff; int nochange_steps; int n_increased_diff; int n_iter; bool report_increased_diff; bool CheckStoppingCondition(math_t diff) { if (diff > diff_prev * 1.5 && n_iter > 0) { // Ideally, diff should decrease monotonically. In practice we can have // small fluctuations (10% increase is not uncommon). Here we consider a // 50% increase in the diff value large enough to indicate a problem. // The 50% value is an educated guess that triggers the convergence debug // message for problematic use cases while avoids false alarms in many // other cases. n_increased_diff++; } if (report_increased_diff && n_iter > 100 && n_increased_diff > n_iter * 0.1) { CUML_LOG_DEBUG( "Solver is not converging monotonically. This might be caused by " "insufficient normalization of the feature columns. In that case " "MinMaxScaler((0,1)) could help. Alternatively, for nonlinear kernels, " "you can try to increase the gamma parameter. To limit execution time, " "you can also adjust the number of iterations using the max_iter " "parameter."); report_increased_diff = false; } bool keep_going = true; if (abs(diff - diff_prev) < 0.001 * tol) { n_small_diff++; } else { diff_prev = diff; n_small_diff = 0; } if (n_small_diff > nochange_steps) { CUML_LOG_ERROR( "SMO error: Stopping due to unchanged diff over %d" " consecutive steps", nochange_steps); keep_going = false; } if (diff < tol) keep_going = false; if (isnan(diff)) { std::string txt; if (std::is_same<float, math_t>::value) { txt += " This might be caused by floating point overflow. In such case using" " fp64 could help. Alternatively, try gamma='scale' kernel" " parameter."; } THROW("SMO error: NaN found during fitting.%s", txt.c_str()); } return keep_going; } /// Return the number of maximum iterations. int GetDefaultMaxIter(int n_train, int max_outer_iter) { if (max_outer_iter == -1) { max_outer_iter = n_train < std::numeric_limits<int>::max() / 100 ? n_train * 100 : std::numeric_limits<int>::max(); max_outer_iter = max(100000, max_outer_iter); } // else we have user defined iteration count which we do not change return max_outer_iter; } void ResizeBuffers(int n_train, int n_cols) { // This needs to know n_train, therefore it can be only called during solve alpha.resize(n_train, stream); C_vec.resize(n_train, stream); f.resize(n_train, stream); delta_alpha.resize(n_ws, stream); if (svmType == EPSILON_SVR) y_label.resize(n_train, stream); } void ReleaseBuffers() { alpha.release(); delta_alpha.release(); f.release(); y_label.release(); } }; }; // end namespace SVM }; // end namespace ML
the_stack
#include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const Eigen::Vector3f *vertex_normals, const int *triangles, const Eigen::Vector3f *triangle_normals, const Eigen::Vector2f *triangle_uvs, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), triangle_uvs_(triangle_uvs), shade_option_(shade_option){}; const Eigen::Vector3f *vertices_; const Eigen::Vector3f *vertex_normals_; const int *triangles_; const Eigen::Vector3f *triangle_normals_; const Eigen::Vector2f *triangle_uvs_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator()(size_t k) const { int i = k / 3; int vi = triangles_[k]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]); } else { return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]); } } }; } // namespace bool TexturePhongShader::Compile() { if (CompileShaders(texture_phong_vertex_shader, NULL, texture_phong_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); light_position_world_ = glGetUniformLocation(program_, "light_position_world_4"); light_color_ = glGetUniformLocation(program_, "light_color_4"); light_diffuse_power_ = glGetUniformLocation(program_, "light_diffuse_power_4"); light_specular_power_ = glGetUniformLocation(program_, "light_specular_power_4"); light_specular_shininess_ = glGetUniformLocation(program_, "light_specular_shininess_4"); light_ambient_ = glGetUniformLocation(program_, "light_ambient"); diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture"); return true; } void TexturePhongShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &diffuse_texture_buffer_); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &diffuse_texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[3], diffuse_texture_pixel_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector3f *raw_normals_ptr; Eigen::Vector2f *raw_uvs_ptr; uint8_t *raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(4, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[3])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(4); bound_ = true; return true; } bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glUniformMatrix4fv(light_position_world_, 1, GL_FALSE, light_position_world_data_.data()); glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data()); glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data()); glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data()); glUniform4fv(light_specular_shininess_, 1, light_specular_shininess_data_.data()); glUniform4fv(light_ambient_, 1, light_ambient_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(diffuse_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); glDisableVertexAttribArray(vertex_uv_); return true; } void TexturePhongShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[3])); } glDeleteBuffers(1, &diffuse_texture_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &diffuse_texture_buffer_); bound_ = false; } } void TexturePhongShader::SetLighting(const ViewControl &view, const RenderOption &option) { const auto &box = view.GetBoundingBox(); light_position_world_data_.setOnes(); light_color_data_.setOnes(); for (int i = 0; i < 4; i++) { light_position_world_data_.block<3, 1>(0, i) = box.GetCenter().cast<GLfloat>() + (float)box.GetMaxExtent() * ((float)option.light_position_relative_[i](0) * view.GetRight() + (float)option.light_position_relative_[i](1) * view.GetUp() + (float)option.light_position_relative_[i](2) * view.GetFront()); light_color_data_.block<3, 1>(0, i) = option.light_color_[i].cast<GLfloat>(); } if (option.light_on_) { light_diffuse_power_data_ = Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>(); light_specular_power_data_ = Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>(); light_specular_shininess_data_ = Eigen::Vector4f(option.light_specular_shininess_) .cast<GLfloat>(); light_ambient_data_.block<3, 1>(0, 0) = option.light_ambient_color_.cast<GLfloat>(); light_ambient_data_(3) = 1.0f; } else { light_diffuse_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_shininess_data_ = gl_helper::GLVector4f::Ones(); light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f); } } bool TexturePhongShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } SetLighting(view, option); return true; } bool TexturePhongShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), thrust::raw_pointer_cast(mesh.triangle_uvs_.data()), option.mesh_shade_option_); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TexturePhongShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TexturePhongShaderForTriangleMesh::GetTextureSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TexturePhongShaderForTriangleMesh::GetTextureHeight( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TexturePhongShaderForTriangleMesh::GetTextureWidth( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
the_stack
#include "cuda_pointer.h" //#include "cutil_inline.h" namespace regf4 { #include "dev_regf4.cu" #define NJBLOCK 32 #define NJBLOCK2 32 #define NJTHREAD 128 #define NPIPE 512 enum {NGB_PER_BLOCK = NGBlist::NGB_MAX}; /************************************************/ int ni_tot, ni_tot_round, ni_max; double h2max; double t_predictor, t_interaction; unsigned long long n_interaction; /************************************************/ std::vector<NGBlist > list; std::vector<Force > *force_result; std::vector<double> *gpot_result; float eps2; /***************** data vectors BEGIN *******************/ float dt_tick; // basic time unit unsigned int tsys; // system time in basic time units (32 bit mantissa) cuVector<dev_particle, false> ptcl_list; // particle list //cuVector<int, false> active_list; // list of active particles addr std::vector<int> active_list; cuVector<dev_predictor, false> predictor_list; // predictor list cuVector<float, false> gpot_list; // list of potential #if 0 std::vector<dev_particle> jp_ptcl_vector; std::vector<int > jp_addr_vector; #else std::map<int, dev_particle> jp_ptcl_map; #endif cuVector<dev_particle, false> jp_ptcl_list; // list of jp_particles cuVector<int , false> jp_addr_list; // list of jp_particles' addresses cuVector<unsigned int, false> ngb_list, ngb_reduced_list; cuVector<dev_force, false> force_list, force_reduced_list; cuVector<int2, false> ngb_offset_list; /***************** data vectors END *******************/ regf::regf(const int _ni_max, const double _h2max) {} regf::regf(const int _ni_max, const double _h2max, const double _dt_tick) { ni_max = ((_ni_max - 1)/(NJTHREAD*NJBLOCK) + 1)*NJBLOCK*NJTHREAD; ni_tot = _ni_max; ni_tot_round = ni_max; fprintf(stderr, " ni_tot= %d ni_tot_round= %d \n", ni_tot, ni_tot_round); h2max = _h2max; dt_tick = (float)_dt_tick; assert(dt_tick > 0.0); list.resize(ni_max); ///////////////// int ndevice; assert(cudaGetDeviceCount(&ndevice) == 0); fprintf(stderr, " regf::regf - %d CUDA devices found \n", ndevice); fprintf(stderr, " regf::regf - using last (%d) device \n", ndevice-1); assert(cudaSetDevice(ndevice-1) == cudaSuccess); ////////////// ptcl_list .allocate(ni_max); // active_list .allocate(ni_max); predictor_list.allocate(ni_max); gpot_list .allocate(ni_max); jp_addr_list .allocate(ni_max); jp_ptcl_list .allocate(ni_max); for (int i = 0; i < ni_max; i++) { ptcl_list[i].pos = dcuvec3(0.0f); ptcl_list[i].vel = fcuvec3(0.0f); ptcl_list[i].acc = fcuvec3(0.0f); ptcl_list[i].jrk = fcuvec3(0.0f); ptcl_list[i].mass = 0.0; ptcl_list[i].h2 = 0.0; ptcl_list[i].time = 0; ptcl_list[i].id = i; } ptcl_list.h2d(); ngb_list .allocate(NPIPE*NJBLOCK*NGB_PER_BLOCK); ngb_reduced_list.allocate(NPIPE*NGBlist::NGB_MAX); force_list .allocate(NPIPE*NJBLOCK2); force_reduced_list.allocate(NPIPE); ngb_offset_list.allocate(NPIPE*NJBLOCK); ////////////// t_predictor = t_interaction = 0.0; n_interaction = 0; ///////////////// CUDA_SAFE_CALL(cudaMemcpyToSymbol("regf4::DT_TICK", &dt_tick, sizeof(float))); } regf::~regf() { ptcl_list.cufree(); // active_list.cufree(); predictor_list.cufree(); gpot_list.cufree(); jp_addr_list.cufree(); jp_ptcl_list.cufree(); ngb_list.cufree(); ngb_reduced_list.cufree(); force_list.cufree(); force_reduced_list.cufree(); ngb_offset_list.cufree(); } int regf::resize(const int _ni) { assert(_ni <= ni_max); ni_tot = _ni; ni_tot_round = ((_ni - 1)/(NJTHREAD*NJBLOCK) + 1)*NJBLOCK*NJTHREAD; ptcl_list.d2h(); for (int i = ni_tot; i < ni_max; i++) { ptcl_list[i].pos = dcuvec3(0.0f); ptcl_list[i].vel = fcuvec3(0.0f); ptcl_list[i].acc = fcuvec3(0.0f); ptcl_list[i].jrk = fcuvec3(0.0f); ptcl_list[i].mass = 0.0; ptcl_list[i].h2 = 0.0; ptcl_list[i].time = 0; ptcl_list[i].id = -1; } ptcl_list.h2d(); return 0; } int regf::set_ti(const double ti) { tsys = (unsigned int)(ti/(double)dt_tick); return 0; } /********** jp-particles ********/ int regf::set_jp(const int iaddr, const Particle &pi) { assert(iaddr < ni_tot); #if 0 jp_ptcl_vector.push_back(pi); jp_addr_vector.push_back(iaddr); #else jp_ptcl_map[iaddr] = pi; #endif return 0; } int regf::set_jp(const std::vector<int> &ilist, const std::vector<Particle> &ptcl_list) { for (size_t i = 0; i < ilist.size(); i++) { assert(ilist[i] < ni_tot); #if 0 jp_ptcl_vector.push_back(ptcl_list[i]); jp_addr_vector.push_back(ilist[i]); #else jp_ptcl_map[ilist[i]] = ptcl_list[i]; #endif } return 0; } /********** NGB lists ********/ int regf::set_list(const int iaddr, const NGBlist &ngb) { assert(iaddr < ni_tot); list[iaddr] = ngb; return 0; } int regf::set_list(const std::vector<int> &ilist, const std::vector<NGBlist> &ngb_list) { for (size_t i = 0; i < ilist.size(); i++) { assert(ilist[i] < ni_tot); list[ilist[i]] = ngb_list[i]; } return 0; } int regf::get_list(const int iaddr, NGBlist &ngb) { assert(iaddr < ni_tot); ngb = list[iaddr]; return 0; } int regf::get_list(const std::vector<int>&ilist, std::vector<NGBlist> &ngb_list) { ngb_list.resize(ilist.size()); for (size_t i = 0; i < ilist.size(); i++) { assert(ilist[i] < ni_tot); ngb_list[i] = list[ilist[i]]; } return 0; } /************************************/ void copy_jp_to_device() { if (jp_ptcl_map.empty()) return; /********** copy new/updated jp-particles from the host to the device ****/ const int nj = jp_ptcl_map.size(); assert(nj <= ni_tot); std::vector<int> iaddr(nj); std::vector<dev_particle> iptcl(nj); int cnt = 0; for (std::map<int, dev_particle>::iterator it = jp_ptcl_map.begin(); it != jp_ptcl_map.end(); it++) { iaddr[cnt] = it->first; iptcl[cnt] = it->second; cnt++; } for (int j = 0; j < nj; j++) { iptcl[j].id = iaddr[j]; iptcl[j].iPad = j; } jp_ptcl_list.copy(&iptcl[0], nj); jp_ptcl_list.h2d(nj); jp_addr_list.copy(&iaddr[0], nj); jp_addr_list.h2d(nj); const int nthreads = 256; const int nblocks = (nj-1)/nthreads + 1; const dim3 block(nthreads, 1, 1); const dim3 grid (nblocks, 1, 1); dev_move_particles<<<grid, block>>>(nj, jp_addr_list, jp_ptcl_list, ptcl_list); jp_ptcl_map.clear(); } int regf::commit_changes() { copy_jp_to_device(); return 0; } /************************************/ int regf::force_first(const std::vector<int> &ilist, std::vector<Force> &force, const double eps2_in) { force_result = &force; eps2 = eps2_in; active_list = ilist; if (ilist.empty()) return 0; copy_jp_to_device(); /******** execute predictor kernel ********/ const int nthreads = 128; const int nblocks = (ni_tot_round - 1)/nthreads + 1; const dim3 block(nthreads, 1, 1); const dim3 grid (nblocks, 1, 1); dev_predict_ptcl<<<grid, block>>>(ni_tot_round, tsys, ptcl_list, predictor_list, gpot_list); return 0; } /************************************/ int regf::force_last() { const double t0 = get_wtime(); std::vector<Force> &force = *force_result; if (active_list.empty()) return 0; force.resize(active_list.size()); CUDA_SAFE_CALL(cudaMemcpyToSymbol("regf4::EPS2", &eps2, sizeof(float))); cuVector<int, false> dev_active_list; dev_active_list.allocate(active_list.size()); std::vector<int> active_idx(active_list.size()); for (size_t i = 0; i < active_list.size(); i++) active_idx[i] = i; int nev_tot = 0, nev_tot1 = 0; while(!active_list.empty()) { std::vector<int > failed_idx; std::vector<float> new_h2_list; const int ni_active = active_list.size(); dev_active_list.init(&active_list[0], ni_active); dev_active_list.h2d(ni_active); active_list.clear(); for (int ix = 0; ix < ni_active; ix += NPIPE) { const int ni = std::min(NPIPE, ni_active - ix); nev_tot += NPIPE; nev_tot1 += ni; { const int nthreads = NJTHREAD; const int niblocks = (ni-1)/nthreads + 1; const int njblocks = NJBLOCK; const int nj_per_block = (ni_tot_round-1)/njblocks + 1; const dim3 block(nthreads, 1, 1); const dim3 grid (niblocks, njblocks, 1); #if 0 cudaThreadSetCacheConfig(cudaFuncCachePreferL1); #endif dev_regf<NJTHREAD, NJBLOCK, NJBLOCK2, NGB_PER_BLOCK><<<grid, block>>>( ni, nj_per_block, dev_active_list+ix, predictor_list, gpot_list, // dt_list force_list, ngb_list); #if 0 cudaThreadSynchronize(); cutilCheckMsg("dev_regf failed"); #endif } { const int nthreads = NJBLOCK2 > 64 ? NJBLOCK2 : 64; const dim3 block(nthreads, 1, 1); const dim3 grid (NPIPE, 1, 1); dev_reduce_regf<nthreads, NJBLOCK, NJBLOCK2><<<grid, block>>>( force_list, ngb_offset_list, force_reduced_list); force_reduced_list.d2h(ni); dev_reduce_ngb<nthreads, NJBLOCK, NGB_PER_BLOCK, NGBlist::NGB_MAX><<<grid, block>>>( ngb_offset_list, ngb_list, ngb_reduced_list); ngb_reduced_list.d2h(ni*NGBlist::NGB_MAX); } for (int ii = 0; ii < ni; ii++) { Force &fi = force[active_idx[ix+ii]]; fi.acc = dvec3(force_reduced_list[ii].accx.dbl, force_reduced_list[ii].accy.dbl, force_reduced_list[ii].accz.dbl); fi.jrk = dvec3(force_reduced_list[ii].jrk.x, force_reduced_list[ii].jrk.y, force_reduced_list[ii].jrk.z); fi.h2 = force_reduced_list[ii].h2; const int nj = force_reduced_list[ii].nngb; double fac = (std::pow(NGBMEAN*1.0/(nj+1), 2.0/3.0) + 1)*0.5; if (fac > 1.0) fac = std::min(fac, 1.25); else fac = std::max(fac, 1.0/1.25); fi.h2 *= fac; #if 0 fi.h2 = std::min(fi.h2, h2max); #endif const int addr = dev_active_list[ix+ii]; list[addr].clear(); if (nj >= NGBlist::NGB_MAX) // if (nj > NGBMAX || nj < NGBMIN) { fprintf(stderr, " ** WARNING ** new_ngbi >= NGBBUFF, addr= %d nj= %d\n", addr, nj); failed_idx .push_back(active_idx[ix+ii]); active_list.push_back(addr); new_h2_list.push_back(fi.h2); } else for (int jj = 0; jj < nj; jj++) list[addr].push_back(ngb_reduced_list[jj+ii*NGBlist::NGB_MAX]); } } if (!new_h2_list.empty()) { predictor_list.d2h(ni_tot); for (size_t i = 0; i < active_list.size(); i++) predictor_list[active_list[i]].h2 = new_h2_list[i]; predictor_list.h2d(ni_tot); new_h2_list.clear(); } active_idx = failed_idx; assert(active_idx.size() == active_list.size()); failed_idx.clear(); } #if 0 const double t1 = get_wtime(); const double dt = t1 -t0; fprintf(stderr, "done in %g sec %g/%g GFLOP/s [ %d %d/%d ]\n", dt, (double)ni_tot_round*(double)nev_tot1*60.0/1.0e9/dt, (double)ni_tot_round*(double)nev_tot*60.0/1.0e9/dt, ni_tot_round, nev_tot1, nev_tot); #endif return 0; } /************************************/ /**********************/ int regf::potential_first(std::vector<double> &gpot, const double eps2_pot_in) { gpot_result = &gpot; eps2 = eps2_pot_in; copy_jp_to_device(); return 0; } int regf::potential_last() { CUDA_SAFE_CALL(cudaMemcpyToSymbol("regf4::EPS2", &eps2, sizeof(float))); std::vector<double> &gpot = *gpot_result; const int nthreads = 256; const int nblocks = (ni_tot - 1)/nthreads + 1; const int nj = nblocks * nthreads; const dim3 block(nthreads, 1, 1); const dim3 grid (nblocks, 1, 1); dev_compute_potential<256><<<grid, block>>>(ni_tot, nj, ptcl_list, gpot_list); cudaThreadSynchronize(); cutilCheckMsg("dev_compute_potential failed"); gpot_list.d2h(ni_tot); gpot.resize(ni_tot); for (int i = 0; i < ni_tot; i++) gpot[i] = gpot_list[i]; return 0; } __host__ dev_particle::dev_particle(const regf4::Particle &p) { assert(sizeof(dev_particle) == 32*sizeof(int)); pos = dcuvec3(p.pos.x, p.pos.y, p.pos.z); vel = fcuvec3(p.vel.x, p.vel.y, p.vel.z); acc = fcuvec3(p.acc.x, p.acc.y, p.acc.z); jrk = fcuvec3(p.jrk.x, p.jrk.y, p.jrk.z); mass = p.mass; h2 = p.h2; time = (unsigned int)(p.time/(double)regf4::dt_tick); } };
the_stack
using namespace cuHE; //#define checkEachRound const int circuitDepth = 25; int RC[768] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,1,0,0,1,1,0,0,0,1,1,0,0,1,1,0,0,0,1,0,1,0,0,0,1,0,1,1,1,0, 0,0,0,0,0,0,1,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0,1,1,0,1,0,0,0,1,0,0, 1,0,1,0,0,1,0,0,0,0,0,0,1,0,0,1,0,0,1,1,1,0,0,0,0,0,1,0,0,0,1,0, 0,0,1,0,1,0,0,1,1,0,0,1,1,1,1,1,0,0,1,1,0,0,0,1,1,1,0,1,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,1,0,1,1,1,0,1,1,1,1,1,0,1,0,1,0,0,1,1,0,0,0, 1,1,1,0,1,1,0,0,0,1,0,0,1,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,1,0,0,1, 0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,1,1,1,1,0,0,1,1,0, 0,0,1,1,1,0,0,0,1,1,0,1,0,0,0,0,0,0,0,1,0,0,1,1,0,1,1,1,0,1,1,1, 1,0,1,1,1,1,1,0,0,1,0,1,0,1,0,0,0,1,1,0,0,1,1,0,1,1,0,0,1,1,1,1, 0,0,1,1,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,1,1,0,1,1,0,0, 0,1,1,1,1,1,1,0,1,1,1,1,1,0,0,0,0,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0, 1,1,1,1,1,1,0,1,1,0,0,1,0,1,0,1,0,1,0,1,1,1,0,0,1,0,1,1,0,0,0,1, 1,0,0,0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,1,0,0,0,1, 1,1,1,1,0,0,0,1,1,0,1,0,1,1,0,0,0,1,0,0,0,0,1,1,1,0,1,0,1,0,1,0, 1,1,0,0,1,0,0,0,1,0,0,0,0,0,1,0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,1,1, 0,0,1,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,1,1,1,1,0,0,0,1,0,1,0,1,0,0, 0,1,1,0,0,1,0,0,1,0,1,0,0,1,0,1,0,0,0,1,0,0,0,1,1,0,0,1,0,1,0,1, 1,1,1,0,0,0,0,0,1,1,1,0,0,0,1,1,0,1,1,0,0,0,0,1,0,0,0,0,1,1,0,1, 1,1,0,1,0,0,1,1,1,0,1,1,0,1,0,1,1,0,1,0,0,0,1,1,1,0,0,1,1,0,0,1, 1,1,0,0,1,0,1,0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,1,1,1,0,0,1,1,0,0,1, 1,1,0,0,0,0,0,0,1,0,1,0,1,1,0,0,0,0,1,0,1,0,0,1,1,0,1,1,0,1,1,1, 1,1,0,0,1,0,0,1,0,1,1,1,1,1,0,0,0,1,0,1,0,0,0,0,1,1,0,1,1,1,0,1 }; Prince::Prince() { cudhs = NULL; level = 0; round = 0; } Prince::~Prince() { delete cudhs; } void Prince::heSetup() { multiGPUs(1); cudhs = new CuDHS(circuitDepth, 2, 16, 25, 25, 21845); } void Prince::setMessage(int *m) { for (int i=0; i<64; i++) { bits[i] = m[i]; cudhs->encrypt(bits[i], bits[i], 0); } } void Prince::setKeys(int *k0, int *k1) { for (int i=0; i<64; i++) { key0[i] = k0[i]; key1[i] = k1[i]; cudhs->encrypt(key0[i], key0[i], 0); cudhs->encrypt(key1[i], key1[i], 0); } } void Prince::run() { int A[64],B[64],C[64]; for (int i=0; i<64; i++) { A[i] = 0; B[i] = 1; C[i] = 0; } cout<<"---------- Precomputation ----------"<<endl; heSetup(); cout<<"---------- Set Message ----------"<<endl; setMessage(A); cout<<"---------- Set Keys ----------"<<endl; setKeys(B, C); cout<<"---------- PRINCE ENC ----------"<<endl; otimer ot; ot.start(); princeEncrypt(bits, key0, key1); ot.stop(); ot.show("Prince Encryption"); cout<<"---------- PRINCE DEC ----------"<<endl; ZZX res[64]; for (int i=0; i<64; i++) cudhs->decrypt(res[i], bits[i], circuitDepth-1); for (int i=0; i<64; i++) cout<< coeff(res[i], 0); cout<<endl; cout<<"1001111110110101000110010011010111111100001111011111010100100100"<<endl; } void Prince::check(int rd) { cout<<"Round: "<<rd<<endl; #ifdef checkEachRound for (int i=0; i<64; i++) { ZZX chk; cudhs->decrypt(chk, bits[i], level); cout<<coeff(chk, 0); } cout<<endl; switch(rd) { case(0): cout<<"0100010001000100010001000100010001000100010001000100010001000100"<<endl<<endl; break; case(1): cout<<"1100000111000101111011011001100010100001001010100010000110111011"<<endl<<endl; break; case(2): cout<<"0001010111110110111001101000001101110010101111110010111100010111"<<endl<<endl; break; case(3): cout<<"0000111110110100100011001100001110111010101010110110101101110000"<<endl<<endl; break; case(4): cout<<"0011100101111101011100000001110101111100101110010111101100111110"<<endl<<endl; break; case(5): cout<<"0110001011001101101111001000001100011000011100100010110011100011"<<endl<<endl; break; case(6): cout<<"1111000000000111010110001001011111100101001011001111001001101110"<<endl<<endl; break; case(7): cout<<"1110011011001010101100101000110011100000011101111010000011101110"<<endl<<endl; break; case(8): cout<<"1111010001000111111011111011110001100001100000001111011100100100"<<endl<<endl; break; case(9): cout<<"0010001000000000000010101101010110110101101010110110011110101111"<<endl<<endl; break; case(10): cout<<"0011101110000011000111101111001010110001111011110111111101111011"<<endl<<endl; break; case(11): cout<<"1010000011100110110011110111110111001010101111100101101000000111"<<endl<<endl; break; } #endif } void Prince::princeEncrypt(ZZX tar[64], ZZX k0[64], ZZX k1[64]) { round = 0; addRoundKey(tar, k0); addRoundKey(tar, k1); addRC(tar, round); cout<<"start sbox"<<endl; for (int i=0; i<5; i++) { round++; SBOX(tar); check(round-1); MixColumn(tar); addRC(tar, round); addRoundKey(tar, k1); } SBOX(tar); check(round); M_p(tar); INV_SBOX(tar); check(round+1); for (int i=0; i<5; i++) { round++; addRoundKey(tar, k1); addRC(tar, round); inv_MixColumn(tar); INV_SBOX(tar); check(round+1); } round++; addRC(tar, round); addRoundKey(tar, k1); KeyExpansion(k0); addRoundKey(tar, k0); for (int i=0; i<64; i++) cudhs->coeffReduce(tar[i], tar[i], circuitDepth-1); } void Prince::SBOX(ZZX in[64]) { for (int i=0; i<64; i++) cudhs->coeffReduce(in[i], in[i], level); #pragma omp parallel num_threads(numGPUs()) { #pragma omp for nowait for (int i=0; i<16; i++) _sbox(&in[4*i], omp_get_thread_num(), level); } #pragma omp barrier level += 2; }; void Prince::_sbox(ZZX in[4], int dev, int lvl) { CuCtxt a, b, c, d; CuCtxt ab, ac, ad, bc, bd, cd; CuCtxt abd, acd, bcd, abc; CuCtxt out[4]; /////////////////////////////////// a.setLevel(lvl, dev, in[0]); b.setLevel(lvl, dev, in[1]); c.setLevel(lvl, dev, in[2]); d.setLevel(lvl, dev, in[3]); a.x2n(); b.x2n(); c.x2n(); d.x2n(); // mul cAnd(ab, a, b); cAnd(ac, a, c); cAnd(ad, a, d); cAnd(bc, b, c); cAnd(bd, b, d); cAnd(cd, c, d); // relin ab.relin(); cd.relin(); // modswitch ab.modSwitch(); ac.modSwitch(); ad.modSwitch(); bc.modSwitch(); bd.modSwitch(); cd.modSwitch(); a.modSwitch(); b.modSwitch(); c.modSwitch(); d.modSwitch(); /////////////////////////////////// // level up // add // out[0] = a+c+ab+bc+1; cXor(out[0], a, c); cXor(out[0], out[0], ab); cXor(out[0], out[0], bc); cNot(out[0], out[0]); // out[1] = a+d+ac+ad+cd; cXor(out[1], a, d); cXor(out[1], out[1], ac); cXor(out[1], out[1], ad); cXor(out[1], out[1], cd); // out[2] = ac+bc+bd+1; cXor(out[2], ac, bc); cXor(out[2], out[2], bd); cNot(out[2], out[2]); // out[3] = a+b+ab+ad+bc+cd+1; cXor(out[3], a, b); cXor(out[3], out[3], ab); cXor(out[3], out[3], ad); cXor(out[3], out[3], bc); cXor(out[3], out[3], cd); cNot(out[3], out[3]); // mul a.x2n(); b.x2n(); c.x2n(); d.x2n(); ab.x2n(); cd.x2n(); cAnd(abd, ab, d); cAnd(acd, cd, a); cAnd(bcd, cd, b); cAnd(abc, ab, c); abd.x2c(); acd.x2c(); bcd.x2c(); abc.x2c(); // add // out[0] += abd+acd+bcd; cXor(out[0], out[0], abd); cXor(out[0], out[0], acd); cXor(out[0], out[0], bcd); // out[1] += abc+acd; cXor(out[1], out[1], abc); cXor(out[1], out[1], acd); // out[2] += abc+bcd; cXor(out[2], out[2], abc); cXor(out[2], out[2], bcd); // out[3] += bcd; cXor(out[3], out[3], bcd); for (int i=0; i<4; i++) { out[i].relin(); out[i].modSwitch(); } // delete a.~CuCtxt(); b.~CuCtxt(); c.~CuCtxt(); d.~CuCtxt(); ab.~CuCtxt(); ac.~CuCtxt(); ad.~CuCtxt(); bc.~CuCtxt(); bd.~CuCtxt(); cd.~CuCtxt(); abd.~CuCtxt(); acd.~CuCtxt(); bcd.~CuCtxt(); abc.~CuCtxt(); /////////////////////////////////// // level up // output for (int i=0; i<4; i++) { out[i].x2z(); in[i] = out[i].zRep(); out[i].~CuCtxt(); } return; } void Prince::INV_SBOX(ZZX in[64]) { for (int i=0; i<64; i++) cudhs->coeffReduce(in[i], in[i], level); #pragma omp parallel num_threads(numGPUs()) { #pragma omp for nowait for (int i=0; i<16; i++) _inv_sbox(&in[4*i], omp_get_thread_num(), level); } #pragma omp barrier level += 2; }; void Prince::_inv_sbox(ZZX in[4], int dev, int lvl) { CuCtxt a, b, c, d; CuCtxt ab, ac, ad, bc, bd, cd; CuCtxt abd, acd, bcd, abc; CuCtxt out[4]; /////////////////////////////////// // input a.setLevel(lvl, dev, in[0]); b.setLevel(lvl, dev, in[1]); c.setLevel(lvl, dev, in[2]); d.setLevel(lvl, dev, in[3]); a.x2n(); b.x2n(); c.x2n(); d.x2n(); // mul cAnd(ab, a, b); cAnd(ac, a, c); cAnd(ad, a, d); cAnd(bc, b, c); cAnd(bd, b, d); cAnd(cd, c, d); // relin ab.relin(); cd.relin(); // modswitch ab.modSwitch(); ac.modSwitch(); ad.modSwitch(); bc.modSwitch(); bd.modSwitch(); cd.modSwitch(); a.modSwitch(); b.modSwitch(); c.modSwitch(); d.modSwitch(); /////////////////////////////////// // level up // add // out[0] = c+d+ab+bc+bd+cd+1; cXor(out[0], c, d); cXor(out[0], out[0], ab); cXor(out[0], out[0], bc); cXor(out[0], out[0], bd); cXor(out[0], out[0], cd); cNot(out[0], out[0]); // out[1] = b+d+ac+bc+bd+cd; cXor(out[1], b, d); cXor(out[1], out[1], ac); cXor(out[1], out[1], bc); cXor(out[1], out[1], bd); cXor(out[1], out[1], cd); // out[2] = ab+ac+bc+bd+1; cXor(out[2], ab, ac); cXor(out[2], out[2], bc); cXor(out[2], out[2], bd); cNot(out[2], out[2]); // out[3] = a+ab+bc+cd+1; cXor(out[3], a, ab); cXor(out[3], out[3], bc); cXor(out[3], out[3], cd); cNot(out[3], out[3]); // mul a.x2n(); b.x2n(); c.x2n(); d.x2n(); ab.x2n(); cd.x2n(); cAnd(abd, ab, d); cAnd(acd, cd, a); cAnd(bcd, cd, b); cAnd(abc, ab, c); abd.x2c(); acd.x2c(); bcd.x2c(); abc.x2c(); // add // out[0] += abc+abd+bcd; cXor(out[0], out[0], abc); cXor(out[0], out[0], abd); cXor(out[0], out[0], bcd); // out[1] += acd+bcd; cXor(out[1], out[1], acd); cXor(out[1], out[1], bcd); // out[2] += bcd; cXor(out[2], out[2], bcd); // out[3] += abd+acd; cXor(out[3], out[3], abd); cXor(out[3], out[3], acd); for (int i=0; i<4; i++) { // relin out[i].relin(); // modswitch out[i].modSwitch(); } // delete a.~CuCtxt(); b.~CuCtxt(); c.~CuCtxt(); d.~CuCtxt(); ab.~CuCtxt(); ac.~CuCtxt(); ad.~CuCtxt(); bc.~CuCtxt(); bd.~CuCtxt(); cd.~CuCtxt(); abd.~CuCtxt(); acd.~CuCtxt(); bcd.~CuCtxt(); abc.~CuCtxt(); /////////////////////////////////// // level up // output for (int i=0; i<4; i++) { out[i].x2z(); in[i] = out[i].zRep(); out[i].~CuCtxt(); } }; void Prince::addRoundKey(ZZX in[64], ZZX key[64]) { for (int i=0; i<64; i++) in[i] = in[i] + key[i]; }; void Prince::addRC(ZZX in[64], int round) { for (int i=0;i<64;i++) in[i] = (in[i] + RC[(round*64)+i]); }; void Prince::MixColumn(ZZX in[64]) { M_p(in); ShiftRow(in); }; void Prince::M_p(ZZX in[64]) { ZZX in1[64]; in1[0] = (in[4]+in[8]+in[12]); in1[1] = (in[1]+in[9]+in[13]); in1[2] = (in[2]+in[6]+in[14]); in1[3] = (in[3]+in[7]+in[11]); in1[4] = (in[0]+in[4]+in[8]); in1[5] = (in[5]+in[9]+in[13]); in1[6] = (in[2]+in[10]+in[14]); in1[7] = (in[3]+in[7]+in[15]); in1[8] = (in[0]+in[4]+in[12]); in1[9] = (in[1]+in[5]+in[9]); in1[10] = (in[6]+in[10]+in[14]); in1[11] = (in[3]+in[11]+in[15]); in1[12] = (in[0]+in[8]+in[12]); in1[13] = (in[1]+in[5]+in[13]); in1[14] = (in[2]+in[6]+in[10]); in1[15] = (in[7]+in[11]+in[15]); in1[16] = (in[16]+in[20]+in[24]); in1[17] = (in[21]+in[25]+in[29]); in1[18] = (in[18]+in[26]+in[30]); in1[19] = (in[19]+in[23]+in[31]); in1[20] = (in[16]+in[20]+in[28]); in1[21] = (in[17]+in[21]+in[25]); in1[22] = (in[22]+in[26]+in[30]); in1[23] = (in[19]+in[27]+in[31]); in1[24] = (in[16]+in[24]+in[28]); in1[25] = (in[17]+in[21]+in[29]); in1[26] = (in[18]+in[22]+in[26]); in1[27] = (in[23]+in[27]+in[31]); in1[28] = (in[20]+in[24]+in[28]); in1[29] = (in[17]+in[25]+in[29]); in1[30] = (in[18]+in[22]+in[30]); in1[31] = (in[19]+in[23]+in[27]); in1[32] = (in[32]+in[36]+in[40]); in1[33] = (in[37]+in[41]+in[45]); in1[34] = (in[34]+in[42]+in[46]); in1[35] = (in[35]+in[39]+in[47]); in1[36] = (in[32]+in[36]+in[44]); in1[37] = (in[33]+in[37]+in[41]); in1[38] = (in[38]+in[42]+in[46]); in1[39] = (in[35]+in[43]+in[47]); in1[40] = (in[32]+in[40]+in[44]); in1[41] = (in[33]+in[37]+in[45]); in1[42] = (in[34]+in[38]+in[42]); in1[43] = (in[39]+in[43]+in[47]); in1[44] = (in[36]+in[40]+in[44]); in1[45] = (in[33]+in[41]+in[45]); in1[46] = (in[34]+in[38]+in[46]); in1[47] = (in[35]+in[39]+in[43]); in1[48] = (in[52]+in[56]+in[60]); in1[49] = (in[49]+in[57]+in[61]); in1[50] = (in[50]+in[54]+in[62]); in1[51] = (in[51]+in[55]+in[59]); in1[52] = (in[48]+in[52]+in[56]); in1[53] = (in[53]+in[57]+in[61]); in1[54] = (in[50]+in[58]+in[62]); in1[55] = (in[51]+in[55]+in[63]); in1[56] = (in[48]+in[52]+in[60]); in1[57] = (in[49]+in[53]+in[57]); in1[58] = (in[54]+in[58]+in[62]); in1[59] = (in[51]+in[59]+in[63]); in1[60] = (in[48]+in[56]+in[60]); in1[61] = (in[49]+in[53]+in[61]); in1[62] = (in[50]+in[54]+in[58]); in1[63] = (in[55]+in[59]+in[63]); for (int i=0; i<64; i++) in[i] = in1[i]; }; void Prince::ShiftRow(ZZX in[64]) { int i = 4; ZZX temp[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp[4]; in[i+1] = temp[5]; in[i+2] = temp[6]; in[i+3] = temp[7]; in[i+16] = temp[8]; in[i+17] = temp[9]; in[i+18] = temp[10]; in[i+19] = temp[11]; in[i+32] = temp[12]; in[i+33] = temp[13]; in[i+34] = temp[14]; in[i+35] = temp[15]; in[i+48] = temp[0]; in[i+49] = temp[1]; in[i+50] = temp[2]; in[i+51] = temp[3]; i = 8; ZZX temp1[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp1[8]; in[i+1] = temp1[9]; in[i+2] = temp1[10]; in[i+3] = temp1[11]; in[i+16] = temp1[12]; in[i+17] = temp1[13]; in[i+18] = temp1[14]; in[i+19] = temp1[15]; in[i+32] = temp1[0]; in[i+33] = temp1[1]; in[i+34] = temp1[2]; in[i+35] = temp1[3]; in[i+48] = temp1[4]; in[i+49] = temp1[5]; in[i+50] = temp1[6]; in[i+51] = temp1[7]; i = 12; ZZX temp2[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp2[12]; in[i+1] = temp2[13]; in[i+2] = temp2[14]; in[i+3] = temp2[15]; in[i+16] = temp2[0]; in[i+17] = temp2[1]; in[i+18] = temp2[2]; in[i+19] = temp2[3]; in[i+32] = temp2[4]; in[i+33] = temp2[5]; in[i+34] = temp2[6]; in[i+35] = temp2[7]; in[i+48] = temp2[8]; in[i+49] = temp2[9]; in[i+50] = temp2[10]; in[i+51] = temp2[11]; }; void Prince::inv_MixColumn(ZZX in[64]) { inv_ShiftRow(in); M_p(in); }; void Prince::inv_ShiftRow(ZZX in[64]) { int i = 4; ZZX temp2[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp2[12]; in[i+1] = temp2[13]; in[i+2] = temp2[14]; in[i+3] = temp2[15]; in[i+16] = temp2[0]; in[i+17] = temp2[1]; in[i+18] = temp2[2]; in[i+19] = temp2[3]; in[i+32] = temp2[4]; in[i+33] = temp2[5]; in[i+34] = temp2[6]; in[i+35] = temp2[7]; in[i+48] = temp2[8]; in[i+49] = temp2[9]; in[i+50] = temp2[10]; in[i+51] = temp2[11]; i = 8; ZZX temp1[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp1[8]; in[i+1] = temp1[9]; in[i+2] = temp1[10]; in[i+3] = temp1[11]; in[i+16] = temp1[12]; in[i+17] = temp1[13]; in[i+18] = temp1[14]; in[i+19] = temp1[15]; in[i+32] = temp1[0]; in[i+33] = temp1[1]; in[i+34] = temp1[2]; in[i+35] = temp1[3]; in[i+48] = temp1[4]; in[i+49] = temp1[5]; in[i+50] = temp1[6]; in[i+51] = temp1[7]; i = 12; ZZX temp[16] = {in[i],in[i+1],in[i+2],in[i+3],in[i+16],in[i+17],in[i+18],in[i+19],in[i+32],in[i+33],in[i+34],in[i+35],in[i+48],in[i+49],in[i+50],in[i+51]}; in[i] = temp[4]; in[i+1] = temp[5]; in[i+2] = temp[6]; in[i+3] = temp[7]; in[i+16] = temp[8]; in[i+17] = temp[9]; in[i+18] = temp[10]; in[i+19] = temp[11]; in[i+32] = temp[12]; in[i+33] = temp[13]; in[i+34] = temp[14]; in[i+35] = temp[15]; in[i+48] = temp[0]; in[i+49] = temp[1]; in[i+50] = temp[2]; in[i+51] = temp[3]; }; void Prince::KeyExpansion(ZZX key[64]) { ZZX temp[64]; for (int i=0; i<64; i++) temp[i] = key[i]; key[0] = temp[63]; for (int i=0; i<63; i++) key[i+1] = temp[i]; key[63] = (key[63]+temp[0]); };
the_stack
#include "MemoryManager.h" #include "cuda_runtime.h" using namespace std; MemSegment::MemSegment():_basePointer(NULL),_size(0),_type(UnDefined) {}; MemSegment::MemSegment(void * ptr, size_t size, MemoryType type):_basePointer((char*)ptr),_size(size),_type(type){}; MemSegment::MemSegment(const MemSegment& src):_basePointer(src._basePointer),_size(src._size),_type(src._type){}; MemSegment::MemSegment(const MemSegment& first, const MemSegment& last) { _basePointer = first._basePointer; _type = first._type; if(first._type != last._type){ cout << "CUDA Memory Manager Warning: Segment accumulation failed first and last segment do not match type" << endl; _size = first._size; } else if(first._basePointer > last._basePointer) { cout << "CUDA Memory Manager Warning: Segment accumulation failed first and last base pointers are not in order" << endl; _size = first._size; } else { //create segment from first_basepointer to the end of last. _size = (last._basePointer + last._size) - first._basePointer; } } size_t MemSegment::addSegment(const MemSegment& append) { if(_basePointer == NULL){ _type = append._type; _size = append._size; _basePointer = append._basePointer; return _size; } if(_type != append._type){ cout << "CUDA Memory Manager Warning: Appending Segment failed, segments do not match type" << endl; } else if(_basePointer > append._basePointer) { cout << "CUDA Memory Manager Warning: Appending Segment failed, first and last base pointers are not in order" << endl; } else if(_basePointer+_size != append._basePointer) { cout << "CUDA Memory Manager Warning: Appended Segment is not the imidiate next segment, segments inbetween are added automatically!" << endl; _size = (append._basePointer + append._size) - _basePointer; } else { //create segment from first_basepointer to the end of last. _size += append._size; } return _size; } size_t MemSegment::inflateSegmentBy(size_t size) { _size += size; return size; } size_t MemSegment::changeSegmentSizeTo(size_t size) { if(size > 0) { _size = size; } else { cout << "CUDA Memory Manager Warning: Segment rezise request to size <= 0, previouse size: " << _size << "!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } return _size; } void MemSegment::copyIn(const void * src, size_t size) { cudaMemcpy(_basePointer,src,checkSize(size),getCopyInKind()); CUDA_ERROR_CHECK(); } void MemSegment::copyOut(void * dst, size_t size ) { cudaMemcpy(dst,_basePointer,checkSize(size),getCopyOutKind()); CUDA_ERROR_CHECK(); } void MemSegment::copyInSubSet( size_t dstOffset, void * src,size_t size ) { cudaMemcpy(_basePointer + dstOffset, src, checkSize(dstOffset+size),getCopyInKind()); CUDA_ERROR_CHECK(); } void MemSegment::copyOutSubSet(void * dst, size_t srcOffset, size_t size ) { if(size == 0) size = _size - srcOffset; // if size == 0 copy from srcOffset to end of buffer //cout << "cudaMemcpy( " << dst <<", " <<(void*)_basePointer << " + " << srcOffset << ", " << "checkSize(" << srcOffset << " + " << size << " ),getCopyOutKind())" << endl; cudaMemcpy(dst,(void*)(_basePointer + srcOffset) ,checkSize(srcOffset+size),getCopyOutKind()); CUDA_ERROR_CHECK(); } void MemSegment::copyAsync (const MemSegment& src, cudaStream_t sid, size_t size) { if(sid == 0) cout << "CUDA Memory Manager Warning: intended async-copy is using stream 0 turning it into non-async copy!" << endl; if(_type == HostPageLocked || _type == DeviceGlobal) { if(src._type == HostPageLocked || src._type == DeviceGlobal) { cudaMemcpyAsync(_basePointer, src._basePointer, checkSize(size), getCopyKind(src), sid); } else { cout << "CUDA Memory Manager Warning: intended async-copy is using non paged locked host memory turning it into a non-async copy!" << endl; cudaMemcpy(_basePointer, src._basePointer, checkSize(size), getCopyKind(src)); } } else { cout << "CUDA Memory Manager Warning: intended async-copy is using non paged locked host memory turning it into a non-async copy!" << endl; cudaMemcpy(_basePointer, src._basePointer, checkSize(size), getCopyKind(src)); } } //If no size, or size= 0 is provided src will be copied into local buffer. if a //size mismatch is encountered the smaller one of the two sizes will be used to initialize the //copy and a warning will be printed. void MemSegment::copy (const MemSegment& src, size_t size) { //src.checkSize(size); //should probably check if return value is larger this->getSize() if(size == 0){ size_t dstSize = this->getSize(); size_t srcSize = src.getSize(); size = min(dstSize,srcSize); if(dstSize != srcSize){ cout << "CUDA Memory Manager Warning: buffer size missmatch dst: "; printReadableSize(dstSize); cout << " != src: "; printReadableSize(srcSize); cout <<". Will use smaller buffer size of "; printReadableSize(size); cout << " to initiate copy." << endl; } } cudaMemcpy(_basePointer, src._basePointer, checkSize(size), getCopyKind(src)); //cout << "cudaMemcpy(" << (void*)_basePointer << ", " << (void*)src._basePointer << ", " << checkSize(size) << ", " << getCopyKind(src) << ")" << endl; CUDA_ERROR_CHECK(); } void MemSegment::copySubSet(const MemSegment& src, size_t srcOffset, size_t dstOffset, size_t size) { src.checkSize(srcOffset + size); //check if the requested sub segment actually is within src segment size_t copysize = (size == 0)? (src.getSize() - srcOffset):(size); // no size provide copy all of src from offset to end otherwise try to copy size this->checkSize(dstOffset + copysize); //check if the requested sub segment fits into this cudaMemcpy(_basePointer + dstOffset, src._basePointer + srcOffset, copysize, getCopyKind(src)); CUDA_ERROR_CHECK(); } //sets first size bytes in segment to value //size (optional): number of bytes to set. if not provided // all bytes in the segment will beset to value void MemSegment::memSet(int value, size_t size) { switch(_type){ case DeviceGlobal: cudaMemset((void*)_basePointer, value, checkSize(size)); break; case HostMem: case HostPageLocked: default: memset((void*)_basePointer, value, checkSize(size)); } } // //same as memSet but only works on device memory and performs asynchronous memset if stream is provided void MemSegment::memSetAsync(int value, cudaStream_t sid, size_t size) { switch(_type){ case DeviceGlobal: cudaMemsetAsync((void*)_basePointer, value, checkSize(size), sid); break; case HostMem: case HostPageLocked: default: cout << "CUDA Memory Manager Warning: Asyncronouse Host Side memset not available!" << endl; memset((void*)_basePointer, value, checkSize(size)); } } void MemSegment::memSet(int value, size_t offset, size_t size) { char * tmpPtr = _basePointer + offset; checkSize(size+offset); switch(_type){ case DeviceGlobal: cudaMemset((void*)tmpPtr, value,size); break; case HostMem: case HostPageLocked: default: memset((void*)tmpPtr, value, size); } } // //same as memSet but only works on device memory and performs asynchronous memset if stream is provided void MemSegment::memSetAsync(int value, cudaStream_t sid, size_t offset, size_t size) { char * tmpPtr = _basePointer + offset; checkSize(size+offset); switch(_type){ case DeviceGlobal: cudaMemsetAsync((void*)tmpPtr, value, size, sid); break; case HostMem: case HostPageLocked: default: cout << "CUDA Memory Manager Warning: Asynchronous Host Side memset not available!" << endl; memset((void*)tmpPtr, value, size); } } MemSegment MemSegment::splitAt(size_t offset) { if(offset >= _size){ cout << "CUDA Memory Manager Warning: tried to split segment at offset(" <<offset<<") >= segment size("<< _size << ")!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } //cout << "CUDA Memory Manager: splitting buffer of size " << _size << " into two buffers of sizes " << offset << " and " << _size-offset << endl; MemSegment tmp(_basePointer+offset, _size-offset, _type); changeSegmentSizeTo(offset); return tmp; } size_t MemSegment::checkSize(size_t size) const { if(size <= 0) return _size; if(size <= _size) { //if(size < _size) cout << "CUDA Memory Manager Warning: copying smaller segment of size " << size <<" into large segment of size " << _size << "!" << endl; return size; } cout << "CUDA Memory Manager Warning: requested size (" << size <<") is larger than segment size (" << _size << ")!" << endl; throw cudaAllocationError(cudaErrorInvalidValue, __FILE__, __LINE__); } cudaMemcpyKind MemSegment::getCopyKind(const MemSegment& src) { switch(_type){ case DeviceGlobal: switch(src._type){ case DeviceGlobal: return cudaMemcpyDeviceToDevice; case HostMem: case HostPageLocked: default: return cudaMemcpyHostToDevice; } //no break case HostMem: case HostPageLocked : default: switch(src._type){ case DeviceGlobal: return cudaMemcpyDeviceToHost; case HostMem: case HostPageLocked: default: return cudaMemcpyHostToHost; } } } cudaMemcpyKind MemSegment::getCopyInKind() { switch(_type){ case DeviceGlobal: return cudaMemcpyHostToDevice; case HostMem: case HostPageLocked : default: return cudaMemcpyHostToHost; } } cudaMemcpyKind MemSegment::getCopyOutKind() { switch(_type){ case DeviceGlobal: return cudaMemcpyDeviceToHost; case HostMem: case HostPageLocked : default: return cudaMemcpyHostToHost; } } void MemSegment::printSummary() const { cout << "MemSegment of type " << getType() << " of size: "<< getSize() <<" ( " << getSize()/(1024.0*1024.0) << " MB )" << endl; } void MemSegment::printReadableSize(size_t size) const { double printSize = (size == 0)?(_size):(size); const string SizeString[] = {"Bytes","KB","MB","GB","TB"}; int offset = 0; while(printSize > 999.0){ printSize /= 1024.0; offset++; } cout << std::setprecision(3) << printSize << " " << SizeString[offset]; } void MemSegment::DebugOutput() { cout << "CUDA Memory Manager Debug: MemSegment: " << getSize() <<", " << getVoidPtr() << endl; } void MemSegment::dumptoFile(ofstream& myFile) { myFile.write((const char*)&_size , sizeof(size_t)); myFile.write((const char*)&_type , sizeof(MemoryType)); char * tmpPtr = _basePointer; if(_type == DeviceGlobal){ tmpPtr = new char[_size]; this->copyOut(tmpPtr); } //copy into actual Memory segment myFile.write((const char*)tmpPtr , _size); //clean up if(_type == DeviceGlobal) delete tmpPtr; } size_t MemSegment::readSizeFromFile(ifstream& myFile){ size_t size; if(myFile.eof()){ cerr << "File Error: does not contain any more buffers (reached EOF)" << endl; exit (-1); } myFile.read((char*)&size,sizeof(size_t)); if(myFile.eof()){ cerr << "File Error: does not contain any more buffers (reached EOF)" << endl; exit (-1); } if(size <= 0){ cerr << "File Error: size field corrupted (<= 0)" << endl; exit (-1); } return size; } MemoryType MemSegment::readMemTypeFromFile(ifstream& myFile) { MemoryType type; myFile.read((char*)&type,sizeof(MemoryType)); return type; } void MemSegment::readBufferFromFile(ifstream& myFile, size_t readsize){ char * tmpPtr = _basePointer; if(_type == DeviceGlobal){ tmpPtr = new char[_size]; } if(readsize <= 0) readsize = _size; //read to temp pointer myFile.read((char*)tmpPtr, readsize); if (!myFile) { readsize = myFile.gcount(); cout << "error: only " << readsize << " bytes could be read"; } if(_type == DeviceGlobal){ //copy into actual Memory segment cout << "copy buffer of size " << readsize << " read from file to device " << endl; this->copyIn((void*)tmpPtr,readsize); delete tmpPtr; } } void MemSegment::readFromFile(ifstream& myFile) { size_t readsize; MemoryType readtype; readsize = readSizeFromFile(myFile); if(readsize > _size){ cerr << "File Error: buffer in file exceeds buffer size: " << _size << " < " << readsize << endl; exit (-1); } readtype = readMemTypeFromFile(myFile); if(readtype != _type) { cerr << "File Warning: buffer in file Memory type miss-match "<< _type << " != " << readtype << endl; } readBufferFromFile(myFile,readsize); } //////////////////////////////////////////////////////////////////////////////////////////// MemSegPair::MemSegPair():_Host(),_Device(){}; MemSegPair::MemSegPair(void* HostPtr, void* DevPtr, size_t size, MemoryType Htype, MemoryType Dtype ):_Host(HostPtr,size,Htype),_Device(DevPtr,size,Dtype){}; MemSegPair::MemSegPair(const MemSegPair& src):_Host(src._Host),_Device(src._Device){}; MemSegPair::MemSegPair(const MemSegment& host, const MemSegment& device):_Host(host),_Device(device) { getSize(); } MemSegPair::MemSegPair(const MemSegPair& first, const MemSegPair& last):_Host(first._Host,last._Host),_Device(first._Device,last._Device) { getSize(); } size_t MemSegPair::addSegment(const MemSegPair& append) { _Host.addSegment(append._Host); _Device.addSegment(append._Device); return getSize(); } size_t MemSegPair::inflateSegmentBy(size_t size) { _Host.inflateSegmentBy(size); _Device.inflateSegmentBy(size); return getSize(); } size_t MemSegPair::changeSegmentSizeTo(size_t size) { _Host.changeSegmentSizeTo(size); _Device.changeSegmentSizeTo(size); return getSize(); } size_t MemSegPair::getSize() { if(_Host.getSize() == _Device.getSize()) return _Host.getSize(); cout << "CUDA Memory Manager Error: Device-Host Segment size mismatch!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } void MemSegPair::copyToHostAsync (cudaStream_t sid, size_t size) { _Host.copyAsync(_Device, sid, size); } void MemSegPair::copyToHost ( size_t size) { _Host.copy(_Device, size); } void MemSegPair::copyToDeviceAsync (cudaStream_t sid, size_t size) { _Device.copyAsync(_Host,sid,size); } void MemSegPair::copyToDevice ( size_t size) { _Device.copy(_Host, size); } void MemSegPair::copyIn(const void * src, size_t size) { _Host.copyIn(src,size); } void MemSegPair::copyOut(void * dst, size_t size ) { _Host.copyOut(dst,size); } void MemSegPair::memSet(int value) { _Host.memSet(value); _Device.memSet(value); } MemSegPair MemSegPair::splitAt(size_t offset) { MemSegPair tmp(_Host.splitAt(offset),_Device.splitAt(offset)); //split at offset changeSegmentSizeTo(offset); //resize current return tmp; } void MemSegPair::DebugOutput() { cout << "CUDA Memory Manager Debug: MemSegPair Host:" << _Host.getSize() <<", " << _Host.getVoidPtr() << " Device:" << _Device.getSize() <<", " << _Device.getVoidPtr() << endl; } ///////////////////////////////////////////////// MemTypeTracker::~MemTypeTracker(){ reset(); } void MemTypeTracker::trackSegment(const MemSegment * segment) { if(segment){ set<const MemSegment*>::iterator it; it = mySegments.find(segment); if(it == mySegments.end()){ // cout << "Start tracking: "; //segment->printSummary(); mySegments.insert(segment); _currentAllocated += segment->getSize(); updateMax(); _tracking = true; }else{ if(segment->getSize() != (*it)->getSize()) releaseSegment(segment); trackSegment(segment); } } } void MemTypeTracker::releaseSegment(const MemSegment * segment) { if(segment){ set<const MemSegment*>::iterator it; it = mySegments.find(segment); if(it != mySegments.end()){ _currentAllocated -= segment->getSize(); mySegments.erase (it); } } } size_t MemTypeTracker::getCurrent() const { return _currentAllocated; } size_t MemTypeTracker::getMax() const { return _maxAllocated; } void MemTypeTracker::print() const { if(isTracking()){ cout << "Currently tracking " << mySegments.size() << endl; for (set<const MemSegment*>::iterator it=mySegments.begin(); it!=mySegments.end(); ++it){ (*it)->printSummary(); } cout << "Currently allocated Memory: " << _currentAllocated << " ( " << _currentAllocated/(1024.0*1024.0) << " MB ) "<< endl; cout << "Maximum allocated Memory: " << _maxAllocated << " ( " << _maxAllocated/(1024.0*1024.0) << " MB ) "<< endl; }else cout << "No segments tracked yet." << endl; } bool MemTypeTracker::isTracking() const { return _tracking; } void MemTypeTracker::reset() { _tracking = false; _maxAllocated = 0; _currentAllocated=0; mySegments.clear(); } ///////////// MemoryUsageTracker::~MemoryUsageTracker() { printMemUsage(); } MemTypeTracker * MemoryUsageTracker::trackSegment(const MemSegment * segment) { if(segment){ SegmentTracker[segment->getType()].trackSegment(segment); return & SegmentTracker[segment->getType()]; } return NULL; } void MemoryUsageTracker::releaseSegment(const MemSegment * segment) { if(segment){ SegmentTracker[segment->getType()].releaseSegment(segment); } } MemTypeTracker * MemoryUsageTracker::trackSegment(const MemSegment & segment) { return trackSegment(&segment); } void MemoryUsageTracker::releaseSegment(const MemSegment & segment) { releaseSegment(&segment); } void MemoryUsageTracker::printMemUsage(){ for (map<MemoryType,MemTypeTracker>::iterator it=SegmentTracker.begin(); it!=SegmentTracker.end(); ++it){ cout << "Tracking Info for Memory Segments of Type " << (*it).first << ":" << endl; (*it).second.print(); } } void MemoryUsageTracker::printMemUsage(MemoryType type) { if(SegmentTracker.find(type) != SegmentTracker.end()){ cout << "Tracking Info for Memory Segments of Type " << type << ":" << endl; SegmentTracker[type].print(); } } /////////////////////////////////////////////////////////////////////////////////////////// MemoryResource::MemoryResource() { _sizeBytes = 0; _type = UnDefined; _basePointer = NULL; _returnPointer = NULL; _align = 128; } MemoryResource::MemoryResource(size_t size, MemoryType type) { _sizeBytes = 0; _type = type; _basePointer = NULL; _returnPointer = NULL; _align = 128; if(size >0) reallocate(size, type); } MemoryResource::~MemoryResource() { destroy(); } void MemoryResource::setPreAllocSize(size_t sizeBytes) { if(_basePointer == NULL){ _sizeBytes = sizeBytes; }else{ cout << "CUDA MemoryManager: attempt to change pre-allocation setting for an already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } } void MemoryResource::setPreAllocAlignment(size_t align) { if(_basePointer == NULL){ _align = align; }else{ cout << "CUDA MemoryManager: attempt to change pre-allocation setting for an already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } }; void MemoryResource::setPreAllocMemType(MemoryType type){ _type= type; if(_basePointer == NULL){ _type= type; }else{ cout << "CUDA MemoryManager: attempt to change pre-allocation setting for an already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } }; //padds current buffer size to alignment and adds chunk. returns old size size_t MemoryResource::addPreAllocChunk(size_t chunk){ if(_basePointer == NULL){ size_t current = _sizeBytes; _sizeBytes = getPaddedSize() + chunk; //padd original size and add new chunk return current; }else{ cout << "CUDA MemoryManager: attempt to change pre-allocation setting for an already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } } size_t MemoryResource::addPreAllocSegment(MemSegment * mseg){ if(_basePointer == NULL){ size_t current = _sizeBytes; _sizeBytes = getPaddedSize() + mseg->getSize(); //padd original size and add new chunk return current; }else{ cout << "CUDA MemoryManager: attempt to change pre-allocation setting for an already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } } void MemoryResource::allocate(MemoryType memType) { if(_sizeBytes == 0 || _type == UnDefined){ cout << "CUDA MemoryManager: attempt to allocate without providing type or size!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } if(_basePointer != NULL){ cout << "CUDA MemoryManager: attempt to allocate already allocated buffer!" << endl; throw cudaAllocationError(cudaErrorMemoryAllocation, __FILE__, __LINE__); } reallocate(_sizeBytes,_type); } void MemoryResource::reallocate(size_t size, MemoryType type) { //if already allocated and reallocation is needed cleanup first if(_sizeBytes > 0 || _basePointer != NULL) destroy(); #if DEBUG cout << "Allocating Buffer of type: " << type << " and size: " << size << endl; #endif _sizeBytes = size; _type = (type == UnDefined)?(_type):(type); // only change type if type != undefined [default] is provided try{ _basePointer = (char *) MemoryResource::allocatePtr(_sizeBytes,_type); } catch(cudaException &e) { e.Print(); _basePointer = NULL; _sizeBytes = 0; _basePointer = NULL; throw cudaAllocationError(e.getCudaError(), __FILE__, __LINE__); } _returnPointer = _basePointer; startNewSegGroup(); } //allocates memory according to needs of MemSegment, if no MemoryType is provided the src type is used void MemoryResource::reallocate(const MemSegment & src, MemoryType type) { if(type == UnDefined) type = src.getType(); reallocate(src.getSize(), type); } //static function void* MemoryResource::allocatePtr(size_t size, MemoryType type) { void * ptr = NULL; cudaError_t err = cudaSuccess; switch(type){ case HostPageLocked: cudaHostAlloc(&ptr, size, cudaHostAllocDefault); break; case DeviceGlobal: cudaMalloc(&ptr, size); break; case HostMem: ptr =(char*)malloc(size); break; case UnDefined: default: ptr = NULL; } err = cudaGetLastError(); if ( err != cudaSuccess || ptr == NULL){ throw cudaAllocationError(err, __FILE__, __LINE__); } return ptr; } //static function void MemoryResource::destroyPtr(void * ptr, MemoryType type) { if(ptr != NULL){ switch(type){ case HostMem: free(ptr); break; case HostPageLocked: cudaFreeHost(ptr); //CUDA_ERROR_CHECK(); break; case DeviceGlobal: cudaFree(ptr); //CUDA_ERROR_CHECK(); break; case UnDefined: default: break; } } } void MemoryResource::destroy() { MemoryResource::destroyPtr((void*)_basePointer, _type); _sizeBytes = 0; _basePointer = NULL; _returnPointer = NULL; } void * MemoryResource::getSegment(size_t size) { // if(_type == UnDefined || _basePointer == NULL) throw cudaNotEnoughMemForStream(__FILE__,__LINE__); //return NULL; size_t segSizePadded = getPaddedSize(size); #ifdef MM_DEBUG switch(_type){ case DeviceGlobal: cout << "CUDA: getSegment Device " << segSizePadded <<" used:" << memoryUsed() << " free:" << memoryAvailable() << endl; break; default: cout << "CUDA: getSegment Host " << segSizePadded <<" used:" << memoryUsed() << " free:" << memoryAvailable() << endl; } #endif if(isSet()) { if (_returnPointer + size <= _basePointer + _sizeBytes){ _CurrentSegment = MemSegment((void*)_returnPointer, size, _type); _CurrentSegGroup.inflateSegmentBy(segSizePadded); _returnPointer += segSizePadded; return _CurrentSegment.getVoidPtr(); } throw cudaNotEnoughMemForStream(__FILE__,__LINE__); }else{ //this is for an empty resource to determine the needed size _basePointer += segSizePadded; _CurrentSegment = MemSegment((void*)_returnPointer, size, _type); _CurrentSegGroup.inflateSegmentBy(segSizePadded); _returnPointer += segSizePadded; return _CurrentSegment.getVoidPtr(); } } MemSegment MemoryResource::getMemSegment(size_t size) { getSegment(size); return _CurrentSegment; } MemSegment MemoryResource::getCurrentSegGroup() { return _CurrentSegGroup; } void MemoryResource::releaseAll() { _returnPointer = _basePointer; _CurrentSegment = MemSegment(_returnPointer, 0, _type); startNewSegGroup(); } size_t MemoryResource::startNewSegGroup() { size_t ret = _CurrentSegGroup.getSize(); _CurrentSegGroup = MemSegment((void*)_returnPointer, 0, _type); return ret; } size_t MemoryResource::getCurrentSegGroupSize() { return _CurrentSegGroup.getSize(); } size_t MemoryResource::getSize() { return (isSet())?(_sizeBytes):(0); } size_t MemoryResource::getPaddedSize() { return getPaddedSize(_sizeBytes); } size_t MemoryResource::getPaddedSize(size_t size) { return ( (size + _align -1) / _align) * _align; } size_t MemoryResource::memoryAvailable() { return (isSet())?(_sizeBytes - memoryUsed()):(0); } size_t MemoryResource::memoryUsed() { return (isSet())?((size_t)_returnPointer - (size_t)_basePointer):(0); } size_t MemoryResource::checkAvailableDevMem() { size_t free_byte ; size_t total_byte ; cudaMemGetInfo( &free_byte, &total_byte ) ; free_byte += memoryAvailable(); return free_byte; } //check if memory resource contains enough memory and is able to reallocate bool MemoryResource::checkMemory(size_t size) { if(size > _sizeBytes){ if(_type == DeviceGlobal && false){ // DONT DO PRE CHECK ANYMORE SO ALLOCATION FAILS size_t free_byte = checkAvailableDevMem(); // free_byte -=50*1024*1024; //allow for 50 meg buffer that might not be available because of context if (free_byte < size){ cout << getLogHeader() << " **** NOT ENOUGH MEMORY ON DEVICE TO REALLOCATED: " << size << " bytes needed, " << free_byte << " bytes available, no reallocation performed, propagating up." << endl; throw cudaNotEnoughMemForStream(__FILE__,__LINE__); } } cout << getLogHeader() << " **** NOT ENOUGH MEMORY PREALLOCATED: " << _sizeBytes <<" bytes, try reallocating " << size << " bytes for current Job" << endl; reallocate(size); return false; } return true; } bool MemoryResource::isSet() const { if(_type == UnDefined) return false; if(_sizeBytes <= 0) return false; if(_basePointer == NULL) return false; return true; } string MemoryResource::getLogHeader() { ostringstream headerinfo; int devId; cudaGetDevice(&devId); headerinfo << "CUDA " << devId << ":"; return headerinfo.str(); }
the_stack
namespace surfelwarp { namespace device { //The kernel to build the candidate surfel and finite diff vertex __global__ void buildCandidateSurfelAndFiniteDiffVertexKernel( cudaTextureObject_t depth_vertex_confid_map, cudaTextureObject_t depth_normal_radius_map, cudaTextureObject_t color_time_map, mat34 camera2world, DeviceArrayView<ushort2> candidate_pixel, const float finitediff_step, //The output float4* finitediff_vertex, float4* surfel_vertex_confid, float4* surfel_normal_radius, float4* surfel_color_time ) { const auto idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < candidate_pixel.Size()) { const ushort2 pixel = candidate_pixel[idx]; const float4 depth_vertex_confid = tex2D<float4>(depth_vertex_confid_map, pixel.x, pixel.y); const float4 depth_normal_radius = tex2D<float4>(depth_normal_radius_map, pixel.x, pixel.y); const float4 color_time = tex2D<float4>(color_time_map, pixel.x, pixel.y); //Transform the vertex into the world frame const float3 vertex = camera2world.rot * depth_vertex_confid + camera2world.trans; const float3 normal = camera2world.rot * depth_normal_radius; //Write to finitediff vertex buffer const auto offset = idx * 4; finitediff_vertex[offset + 0] = make_float4(vertex.x, vertex.y, vertex.z, depth_vertex_confid.w); // Note that the last element is confidence finitediff_vertex[offset + 1] = make_float4(vertex.x + finitediff_step, vertex.y, vertex.z, 1.0f); finitediff_vertex[offset + 2] = make_float4(vertex.x, vertex.y + finitediff_step, vertex.z, 1.0f); finitediff_vertex[offset + 3] = make_float4(vertex.x, vertex.y, vertex.z + finitediff_step, 1.0f); //Write to surfel array surfel_vertex_confid[idx] = make_float4(vertex.x, vertex.y, vertex.z, depth_vertex_confid.w); surfel_normal_radius[idx] = make_float4(normal.x, normal.y, normal.z, depth_normal_radius.w); surfel_color_time[idx] = color_time; } } struct SurfelCandidateFilterDevice { //The node coordinate of the struct { const float4* live_node_coords; const float4* reference_node_coords; const DualQuaternion* node_se3; } warpfield_input; //The finite diff data input struct { DeviceArrayView<float4> vertex_finitediff_array; const ushort4* vertex_finitediff_knn; const float4* vertex_finitediff_knnweight; float finitediff_step; } vertex_input; //The output indicator mutable unsigned* candidate_validity_indicator; mutable ushort4* candidate_knn; mutable float4* candidate_knn_weight; __host__ __device__ __forceinline__ float min_distance2node_squared( const float4& vertex, const ushort4& knn ) const { //The first knn float4 node = warpfield_input.live_node_coords[knn.x]; float min_dist_square = squared_norm_xyz(node - vertex); //The second knn node = warpfield_input.live_node_coords[knn.y]; min_dist_square = min(min_dist_square, squared_norm_xyz(node - vertex)); //The third knn node = warpfield_input.live_node_coords[knn.z]; min_dist_square = min(min_dist_square, squared_norm_xyz(node - vertex)); //The forth knn node = warpfield_input.live_node_coords[knn.w]; min_dist_square = min(min_dist_square, squared_norm_xyz(node - vertex)); return min_dist_square; } __host__ __device__ __forceinline__ float average_distance2node_squared( const float4& vertex, const ushort4& knn ) const { //The first knn float4 node = warpfield_input.live_node_coords[knn.x]; float avg_dist_square = squared_norm_xyz(node - vertex); //The second knn node = warpfield_input.live_node_coords[knn.y]; avg_dist_square += squared_norm_xyz(node - vertex); //The third knn node = warpfield_input.live_node_coords[knn.z]; avg_dist_square += squared_norm_xyz(node - vertex); //The forth knn node = warpfield_input.live_node_coords[knn.w]; avg_dist_square += squared_norm_xyz(node - vertex); //Always count for four nodes return 0.25f * avg_dist_square; } __host__ __device__ __forceinline__ bool is_skinning_consistent( const ushort4& knn ) const { float live_pairwise_distance[6]; float canonical_pairwise_distance[6]; const unsigned short* knn_array = (const unsigned short*)&knn; int shift = 0; for (auto i = 0; i < 4; i++) { for (auto j = i + 1; j < 4; j++) { live_pairwise_distance[shift] = squared_norm_xyz(warpfield_input.live_node_coords[knn_array[i]] - warpfield_input.live_node_coords[knn_array[j]]); canonical_pairwise_distance[shift] = squared_norm_xyz(warpfield_input.reference_node_coords[knn_array[i]] - warpfield_input.reference_node_coords[knn_array[j]]); shift++; } } bool consistent_skinning = true; for (auto i = 0; i < 6; i++) { if (live_pairwise_distance[i] < 0.64f * canonical_pairwise_distance[i]) consistent_skinning = false; } return consistent_skinning; } __device__ __forceinline__ void processFiltering() const { const auto candidate_idx = threadIdx.x + blockIdx.x * blockDim.x; const auto offset = candidate_idx * 4; if(offset >= vertex_input.vertex_finitediff_array.Size()) return; //Load the vertex const float4 vertex = vertex_input.vertex_finitediff_array[offset]; const ushort4 vertex_knn = vertex_input.vertex_finitediff_knn[offset]; const float4 vertex_knnweight = vertex_input.vertex_finitediff_knnweight[offset]; //The written marker unsigned candidate_valid = 1; //Check distance if(min_distance2node_squared(vertex, vertex_knn) >= 4 * d_node_radius_square) candidate_valid = 0; //Check the consistent of skinning if(!is_skinning_consistent(vertex_knn)) candidate_valid = 0; //Check collision { //Load the data float4 finitediff_vertex[3], finitediff_weight[3]; ushort4 finitediff_knn[3]; for(auto i = 0; i < 3; i++) { finitediff_vertex[i] = vertex_input.vertex_finitediff_array[offset + 1 + i]; finitediff_knn[i] = vertex_input.vertex_finitediff_knn[offset + 1 + i]; finitediff_weight[i] = vertex_input.vertex_finitediff_knnweight[offset + 1 + i]; } //Check it const bool compression = is_compressive_mapped( vertex, vertex_knn, vertex_knnweight, finitediff_vertex, finitediff_knn, finitediff_weight, warpfield_input.node_se3, vertex_input.finitediff_step ); if(compression) candidate_valid = 0; } //Write to output candidate_validity_indicator[candidate_idx] = candidate_valid; candidate_knn[candidate_idx] = vertex_knn; candidate_knn_weight[candidate_idx] = vertex_knnweight; } }; __global__ void filterCandidateSurfelKernel( const SurfelCandidateFilterDevice filter ) { filter.processFiltering(); } } // device } // surfelwarp /* The method to build vertex. Using either indicator or pixels. The indicator will case sync */ void surfelwarp::AppendSurfelProcessor::BuildSurfelAndFiniteDiffVertex(cudaStream_t stream) { //The size of array contains the element itself const auto num_candidate = m_surfel_candidate_pixel.Size(); m_surfel_vertex_confid.ResizeArrayOrException(num_candidate); m_surfel_normal_radius.ResizeArrayOrException(num_candidate); m_surfel_color_time.ResizeArrayOrException(num_candidate); m_candidate_vertex_finite_diff.ResizeArrayOrException(num_candidate * kNumFiniteDiffVertex); //The appended surfel size is zero if(num_candidate == 0) { LOG(INFO) << "There is no appended surfel"; return; } //Invoke the kernel dim3 blk(64); dim3 grid(divUp(m_surfel_candidate_pixel.Size(), blk.x)); device::buildCandidateSurfelAndFiniteDiffVertexKernel<<<grid, blk, 0, stream>>>( m_observation.vertex_confid_map, m_observation.normal_radius_map, m_observation.color_time_map, m_camera2world, m_surfel_candidate_pixel, kFiniteDiffStep, //The output m_candidate_vertex_finite_diff.Ptr(), m_surfel_vertex_confid.Ptr(), m_surfel_normal_radius.Ptr(), m_surfel_color_time.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); //Download the data //std::vector<float4> h_candidate_finitediff_vec; //m_candidate_vertex_finite_diff.ArrayView().Download(h_candidate_finitediff_vec); //SURFELWARP_CHECK(h_candidate_finitediff_vec.size() == m_surfel_candidate_pixel.Size() * 4); #endif } void surfelwarp::AppendSurfelProcessor::SkinningFiniteDifferenceVertex(cudaStream_t stream) { //Resize the array m_candidate_vertex_finitediff_knn.ResizeArrayOrException(m_candidate_vertex_finite_diff.ArraySize()); m_candidate_vertex_finitediff_knnweight.ResizeArrayOrException(m_candidate_vertex_finite_diff.ArraySize()); //If there is not surfel candidate if(m_candidate_vertex_finitediff_knn.ArraySize() == 0) { return; } m_live_node_skinner->Skinning( m_candidate_vertex_finite_diff.ArrayView(), m_candidate_vertex_finitediff_knn.ArraySlice(), m_candidate_vertex_finitediff_knnweight.ArraySlice(), stream ); //Check the result of skinning: seems correct /*KNNSearch::CheckKNNSearch( m_warpfield_input.live_node_coords, m_candidate_vertex_finite_diff.ArrayView(), m_candidate_vertex_finitediff_knn.ArrayView() );*/ } void surfelwarp::AppendSurfelProcessor::FilterCandidateSurfels(cudaStream_t stream) { //Resize the indicator m_candidate_surfel_validity_indicator.ResizeArrayOrException(m_surfel_candidate_pixel.Size()); m_surfel_knn.ResizeArrayOrException(m_surfel_candidate_pixel.Size()); m_surfel_knn_weight.ResizeArrayOrException(m_surfel_candidate_pixel.Size()); //Check if the size is zero if(m_surfel_knn.ArraySize() == 0) return; //Construct the filter device::SurfelCandidateFilterDevice filter; filter.warpfield_input.live_node_coords = m_warpfield_input.live_node_coords.RawPtr(); filter.warpfield_input.reference_node_coords = m_warpfield_input.reference_node_coords.RawPtr(); filter.warpfield_input.node_se3 = m_warpfield_input.node_se3.RawPtr(); filter.vertex_input.vertex_finitediff_array = m_candidate_vertex_finite_diff.ArrayView(); filter.vertex_input.vertex_finitediff_knn = m_candidate_vertex_finitediff_knn.Ptr(); filter.vertex_input.vertex_finitediff_knnweight = m_candidate_vertex_finitediff_knnweight.Ptr(); filter.vertex_input.finitediff_step = kFiniteDiffStep; filter.candidate_validity_indicator = m_candidate_surfel_validity_indicator.Ptr(); filter.candidate_knn = m_surfel_knn.Ptr(); filter.candidate_knn_weight = m_surfel_knn_weight.Ptr(); //Seems now ready for device code dim3 blk(64); dim3 grid(m_surfel_candidate_pixel.Size(), blk.x); device::filterCandidateSurfelKernel<<<grid, blk, 0, stream>>>(filter); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif //Do a prefix sum on the indicator m_candidate_surfel_validity_prefixsum.InclusiveSum(m_candidate_surfel_validity_indicator.ArrayView(), stream); //Debug /*KNNSearch::CheckKNNSearch( m_warpfield_input.live_node_coords, m_surfel_vertex_confid.ArrayView(), m_surfel_knn.ArrayView() );*/ //LOG(INFO) << "The number of valid appended surfels is " << numNonZeroElement(m_candidate_surfel_validity_indicator.ArrayView()); }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <Eigen/Core> #include <Eigen/Dense> #include <sophus/se3.hpp> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) inline __device__ __host__ float lerp(float a, float b, float t) { return a + t*(b-a); } __device__ __host__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } __device__ __host__ float3 operator-(const float3 &a, const float3 &b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } template <typename Dtype> inline __device__ __host__ const Dtype & getValue(const int3 & v, const int3 & dim, const Dtype* sdf_grids) { return sdf_grids[v.x * dim.y * dim.z + v.y * dim.z + v.z]; } template <typename Dtype> inline __device__ __host__ Dtype getValueInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids) { const int x0 = (int)(pGrid.x - 0.5); const float fx = (pGrid.x - 0.5) - x0; const int y0 = (int)(pGrid.y - 0.5); const float fy = (pGrid.y - 0.5) - y0; const int z0 = (int)(pGrid.z - 0.5); const float fz = (pGrid.z - 0.5) - z0; const int x1 = x0 + 1; const int y1 = y0 + 1; const int z1 = z0 + 1; if ( !(x0 >= 0 && x1 < dim.x && y0 >= 0 && y1 < dim.y && z0 >=0 && z1 < dim.z) ) return 1.0; const float dx00 = lerp( getValue(make_int3(x0,y0,z0), dim, sdf_grids), getValue(make_int3(x1,y0,z0), dim, sdf_grids), fx); const float dx01 = lerp( getValue(make_int3(x0,y0,z1), dim, sdf_grids), getValue(make_int3(x1,y0,z1), dim, sdf_grids), fx); const float dx10 = lerp( getValue(make_int3(x0,y1,z0), dim, sdf_grids), getValue(make_int3(x1,y1,z0), dim, sdf_grids), fx); const float dx11 = lerp( getValue(make_int3(x0,y1,z1), dim, sdf_grids), getValue(make_int3(x1,y1,z1), dim, sdf_grids), fx); const float dxy0 = lerp( dx00, dx10, fy ); const float dxy1 = lerp( dx01, dx11, fy ); float dxyz = lerp( dxy0, dxy1, fz ); // penalize inside objects // if (dxyz < 0) // dxyz *= 10; return dxyz; } template <typename Dtype> inline __device__ __host__ float3 getGradientInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids, float delta) { const float3 delta_x = make_float3(1,0,0); const float3 delta_y = make_float3(0,1,0); const float3 delta_z = make_float3(0,0,1); Dtype f_px = getValueInterpolated(pGrid + delta_x, dim, sdf_grids); Dtype f_py = getValueInterpolated(pGrid + delta_y, dim, sdf_grids); Dtype f_pz = getValueInterpolated(pGrid + delta_z, dim, sdf_grids); Dtype f_mx = getValueInterpolated(pGrid - delta_x, dim, sdf_grids); Dtype f_my = getValueInterpolated(pGrid - delta_y, dim, sdf_grids); Dtype f_mz = getValueInterpolated(pGrid - delta_z, dim, sdf_grids); float3 grad; grad.x = 0.5*(f_px - f_mx) / delta; grad.y = 0.5*(f_py - f_my) / delta; grad.z = 0.5*(f_pz - f_mz) / delta; return grad; } /*******************************************/ /* nthreads: num_points x num_objects */ /* pose_init: num_objects x 4 x 4 */ /* sdf_grid: num_objects x c x h x w */ /* sdf_limits: num_objects x 10 */ /* points: num_points x 3 */ /*******************************************/ template <typename Dtype> __global__ void SDFdistanceForward(const int nthreads, const Dtype* pose_init, const Dtype* sdf_grids, const Dtype* sdf_limits, const Dtype* points, const Dtype* epsilons, const Dtype* padding_scales, const Dtype* clearances, const Dtype* disables, const int num_points, const int num_objects, Dtype* potentials, Dtype* collides, Dtype* potential_grads) { typedef Sophus::SE3<Dtype> SE3; typedef Sophus::SO3<Dtype> SO3; typedef Eigen::Matrix<Dtype,3,1,Eigen::DontAlign> Vec3; typedef Eigen::Matrix<Dtype,3,3,Eigen::DontAlign> Mat3; // index is the index of point CUDA_1D_KERNEL_LOOP(index, nthreads) { // batch index int pindex = index / num_objects; int obj_index = index % num_objects; int start_index; if (disables[obj_index] > 0) continue; // convert initial pose Eigen::Matrix<Dtype,4,4> initialPose; start_index = 16 * obj_index; initialPose << pose_init[start_index + 0], pose_init[start_index + 1], pose_init[start_index + 2], pose_init[start_index + 3], pose_init[start_index + 4], pose_init[start_index + 5], pose_init[start_index + 6], pose_init[start_index + 7], pose_init[start_index + 8], pose_init[start_index + 9], pose_init[start_index + 10], pose_init[start_index + 11], pose_init[start_index + 12], pose_init[start_index + 13], pose_init[start_index + 14], pose_init[start_index + 15]; SE3 initialPoseMatrix = SE3(initialPose); Mat3 rotationMatrix = initialPoseMatrix.so3().matrix(); // convert point Vec3 point; point << points[3 * pindex], points[3 * pindex + 1], points[3 * pindex + 2]; // transform the point const Vec3 updatedPoint = initialPoseMatrix * point; // obtain sdf value start_index = 10 * obj_index; int d0 = int(sdf_limits[start_index + 6]); int d1 = int(sdf_limits[start_index + 7]); int d2 = int(sdf_limits[start_index + 8]); float px = (updatedPoint(0) - sdf_limits[start_index + 0]) / (sdf_limits[start_index + 3] - sdf_limits[start_index + 0]) * d0; float py = (updatedPoint(1) - sdf_limits[start_index + 1]) / (sdf_limits[start_index + 4] - sdf_limits[start_index + 1]) * d1; float pz = (updatedPoint(2) - sdf_limits[start_index + 2]) / (sdf_limits[start_index + 5] - sdf_limits[start_index + 2]) * d2; float delta = sdf_limits[start_index + 9]; float3 pGrid = make_float3(px, py, pz); int3 dim = make_int3(d0, d1, d2); Dtype value = getValueInterpolated(pGrid, dim, sdf_grids + obj_index * d0 * d1 * d2); // collision if (value < clearances[obj_index]) collides[index] = 1; // compute gradient float3 grad = getGradientInterpolated(pGrid, dim, sdf_grids + obj_index * d0 * d1 * d2, delta); Dtype epsilon = epsilons[obj_index]; Dtype padding_scale = padding_scales[obj_index]; Vec3 vgrad; if (value <= 0) { potentials[index] = -value + 0.5 * epsilon; vgrad(0) = -grad.x; vgrad(1) = -grad.y; vgrad(2) = -grad.z; } else if (value > 0 && value <= epsilon) { potentials[index] = 1 / (2 * epsilon) * (value - epsilon) * (value - epsilon) * padding_scale; vgrad(0) = 1 / epsilon * grad.x * (value - epsilon) * padding_scale; vgrad(1) = 1 / epsilon * grad.y * (value - epsilon) * padding_scale; vgrad(2) = 1 / epsilon * grad.z * (value - epsilon) * padding_scale; } else continue; // map to robot coordinate const Vec3 updatedGrad = rotationMatrix.transpose() * vgrad; potential_grads[3 * index + 0] = updatedGrad(0); potential_grads[3 * index + 1] = updatedGrad(1); potential_grads[3 * index + 2] = updatedGrad(2); } } /* diffs: num_points x num_objects x num_channels */ /* bottom_diff: num_points x num_channels */ template <typename Dtype> __global__ void sum_gradients(const int nthreads, const Dtype* diffs, const int num_objects, const int num_channels, Dtype* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int p = index / (num_objects * num_channels); int n = index % (num_objects * num_channels); int c = n % num_channels; atomicAdd(bottom_diff + p * num_channels + c, diffs[index]); } } /*******************************************/ /* pose_init: num_objects x 4 x 4 */ /* sdf_grid: num_objects x c x h x w */ /* sdf_limits: num_objects x 9 */ /* points: num_points x 3 */ /*******************************************/ std::vector<at::Tensor> sdf_loss_cuda_forward( at::Tensor pose_init, at::Tensor sdf_grids, at::Tensor sdf_limits, at::Tensor points, at::Tensor epsilons, at::Tensor padding_scales, at::Tensor clearances, at::Tensor disables) { // run kernels cudaError_t err; const int kThreadsPerBlock = 512; const int num_channels = 3; int output_size; // sizes const int num_objects = pose_init.size(0); const int num_points = points.size(0); // outputs auto potentials = at::zeros({num_points, num_objects}, points.options()); auto collides = at::zeros({num_points, num_objects}, points.options()); auto potential_grads = at::zeros({num_points, num_objects, num_channels}, points.options()); auto top_potentials = at::zeros({num_points}, points.options()); auto top_collides = at::zeros({num_points}, points.options()); auto top_potential_grads = at::zeros({num_points, num_channels}, points.options()); // compute the potentials and gradients output_size = num_points * num_objects; SDFdistanceForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, pose_init.data<float>(), sdf_grids.data<float>(), sdf_limits.data<float>(), points.data<float>(), epsilons.data<float>(), padding_scales.data<float>(), clearances.data<float>(), disables.data<float>(), num_points, num_objects, potentials.data<float>(), collides.data<float>(), potential_grads.data<float>()); cudaDeviceSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // sum the potentials output_size = num_points * num_objects; sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, potentials.data<float>(), num_objects, 1, top_potentials.data<float>()); sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, collides.data<float>(), num_objects, 1, top_collides.data<float>()); output_size = num_points * num_objects * num_channels; sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, potential_grads.data<float>(), num_objects, num_channels, top_potential_grads.data<float>()); cudaDeviceSynchronize(); return {top_potentials, top_potential_grads, top_collides}; }
the_stack
#include "common.cuh" #include <kat/on_device/collaboration/grid.cuh> #include <kat/on_device/collaboration/block.cuh> #include <kat/on_device/collaboration/warp.cuh> #include <kat/on_device/atomics.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace klcg = kat::linear_grid::collaborative::grid; namespace klcb = kat::linear_grid::collaborative::block; // namespace kcg = kat::collaborative::grid; namespace kcb = kat::collaborative::block; namespace kcw = kat::collaborative::warp; namespace kernels { template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } namespace detail { template <typename T> auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) { auto eps = tolerance.value_or(0); return doctest::Approx(x).epsilon(eps); } template <typename T> T tolerance_gadget(std::false_type, T x, optional<T>) { return x; } } // namespace detail template <typename T> auto tolerance_gadget(T x, optional<T> tolerance) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance); } // TODO: Take iterator templates rather than pointers template <typename T, typename F, typename... Is> void check_results( size_t num_values_to_check, // perhaps add another parameter for specific individual-check details? const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_values_to_check); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_values_to_check; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << doctest::current_test_name() // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { CHECK_MESSAGE(actual_values[i] == tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction), mismatch_message); } else { CHECK_MESSAGE(actual_values[i] == expected_value_retriever(i), mismatch_message); } } } template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<T>, std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side auto host_side_results { std::vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename ExpectedResultRetriever, typename T, typename... Is> void execute_non_uniform_testcase_on_gpu_and_check( F testcase_device_function, ExpectedResultRetriever expected_value_retriever, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, optional<T> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; // TODO: Should we check that num_values_to_populate is equal to the number of grid threads? auto host_side_results = execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); check_results ( num_values_to_populate, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_value_retriever, comparison_tolerance_fraction, inputs...); } TEST_SUITE("grid-level") { // Note: Types for instantiation are chosen based on what's actually available in CUDA TEST_CASE("at_grid_stride") { using checked_value_type = uint32_t; // No inputs, nor concrete expected results. auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; klcg::at_grid_stride(length, f_inner); }; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto expected_value_retriever = [total_num_threads] (size_t pos) { // Which thread processes position pos? return checked_value_type(pos % total_num_threads); }; auto num_values_to_populate = total_num_threads * 2 + kat::warp_size / 2 - 1; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("at_block_stride") { using checked_value_type = uint32_t; // The type for number of grids in a thread. Should we typedef that? cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; size_t length_to_cover = total_num_threads * 2 + kat::warp_size / 2 - 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some thread // // In this test case, there's a single common range which the whole grid covers // (as opposed to block-level or warp-level collaboration) auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { auto f_inner = [&] (size_t pos) { // printf("Thread %u in block %u got pos %u of %u\n", threadIdx.x, blockIdx.x, (unsigned) pos, (unsigned) length); results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); }; auto serialization_factor = length / kat::linear_grid::grid_info::grid::num_threads() + (length % kat::linear_grid::grid_info::grid::num_threads() != 0); klcg::at_block_stride(length, f_inner, serialization_factor); }; auto serialization_factor = div_rounding_up(length_to_cover, total_num_threads); auto elements_processed_per_block = serialization_factor * num_threads_per_block; // std::cout << "length_to_cover = " << length_to_cover << ", num_threads_per_block = " << num_threads_per_block << ", elements_per_block = " << serialization_factor << '\n'; auto expected_value_retriever = [=] (size_t pos) { // Which thread processes position pos? auto processing_block_index = pos / elements_processed_per_block; auto processing_thread_index = pos % num_threads_per_block; // which is the same as (pos % processing_block_index) % num_threads_per_block return checked_value_type(processing_block_index * num_threads_per_block + processing_thread_index); }; // for(int i = 0; i < length_to_cover; i++) { // if (i % 10 == 0) { std::cout << '\n' << std::setw(3) << i << ": "; } // std::cout << std::setw(3) << expected_value_retriever(i) << " "; // } // std::cout << "\n\n"; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, length_to_cover, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } struct attending_threads_info { struct { uint32_t grid_size_minus_first, last; // We use grid_size_minus_first rather than first, so that // zero-initialization would be semantically acceptable } extrema; uint32_t num; }; // Note: All of this gets zero-initialized std::ostream& operator<<(std::ostream& os, const attending_threads_info& ati) { return os << "{ {" << ati.extrema.grid_size_minus_first << ", " << ati.extrema.last << " }, " << ati.num << " }"; } bool operator==(const attending_threads_info& lhs, const attending_threads_info & rhs) { return lhs.extrema.grid_size_minus_first == rhs.extrema.grid_size_minus_first and lhs.extrema.last == rhs.extrema.last and lhs.num == rhs.num; } TEST_CASE("warp_per_input_element::at_grid_stride") { using checked_value_type = attending_threads_info; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 15 }; auto total_num_threads = num_grid_blocks * num_threads_per_block; auto length_to_cover = total_num_threads / 4 + 1; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to by some full warp auto num_values_to_populate = length_to_cover; auto testcase_device_function = [=] KAT_DEV ( size_t length_of_attending_threads_info, checked_value_type* attending_threads_info) { namespace gi = kat::linear_grid::grid_info; const auto my_index = gi::thread::id_in_grid(); auto grid_size_minus_my_index = gi::grid::num_threads() - my_index; auto f_inner = [&] (size_t pos) { // printf("Thead %d of block %d is handling pos %lu\n", threadIdx.x, blockIdx.x, pos); kat::atomic::increment(&attending_threads_info[pos].num); kat::atomic::max(&attending_threads_info[pos].extrema.grid_size_minus_first, grid_size_minus_my_index); kat::atomic::max(&attending_threads_info[pos].extrema.last, my_index); }; klcg::warp_per_input_element::at_grid_stride(length_to_cover, f_inner); }; auto expected_value_retriever = [=] (size_t pos) { // Which threads have handled position pos? auto total_num_warps = total_num_threads / kat::warp_size; auto modular_pos = pos % total_num_warps; uint32_t first_thread_to_handle_element = modular_pos * kat::warp_size; uint32_t grid_size_minus_first = total_num_threads - first_thread_to_handle_element; uint32_t last = (modular_pos+1) * kat::warp_size - 1; uint32_t num = kat::warp_size; return attending_threads_info { { grid_size_minus_first, last }, num }; }; execute_non_uniform_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } } // TEST_SUITE("grid-level")
the_stack
/** @addtogroup cudpp_app * @{ */ /** @name StringSort Functions * @{ */ #include "cuda_util.h" #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_stringsort.h" #include "kernel/stringsort_kernel.cuh" #include "limits.h" #define BLOCKSORT_SIZE 1024 #define DEPTH 8 void dotAdd(unsigned int* d_address, unsigned int* numSpaces, unsigned int* packedAddress, size_t numElements, size_t stringArrayLength) { int numThreads = 128; int numBlocks = (numElements+numThreads-1)/numThreads; dotAddInclusive<<<numBlocks, numThreads>>>(numSpaces, d_address, packedAddress, numElements, stringArrayLength); } void calculateAlignedOffsets(unsigned int* d_address, unsigned int* numSpaces, unsigned char* d_stringVals, unsigned char termC, size_t numElements, size_t stringArrayLength) { int numThreads = 128; int numBlocks = (numElements+numThreads-1)/numThreads; alignedOffsets<<<numBlocks, numThreads>>>(numSpaces, d_address, d_stringVals, termC, numElements, stringArrayLength); } void packStrings(unsigned int* packedStrings, unsigned char* d_stringVals, unsigned int* d_keys, unsigned int* packedAddress, unsigned int* address, size_t numElements, size_t stringArrayLength, unsigned char termC) { unsigned int numThreads = 128; unsigned int numBlocks = (numElements + numThreads - 1)/numThreads; //Each thread handles one string (irregular parrallelism) other option is to do per character (set of chars) //but that requires a binary search per character. Efficiency depends on the dataset alignString<<<numBlocks, numThreads>>>(packedStrings, d_stringVals, packedAddress, address, numElements, stringArrayLength, termC); createKeys<<<numBlocks, numThreads>>>(d_keys, packedStrings, packedAddress, numElements); } void unpackStrings(unsigned int* packedAddress, unsigned int* packedAddressRef, unsigned int* address, unsigned int* addressRef, size_t numElements) { unsigned int numThreads = 128; unsigned int numBlocks = (numElements + numThreads - 1)/numThreads; unpackAddresses<<<numBlocks, numThreads>>>(packedAddress, packedAddressRef, address, addressRef, numElements); } /** @brief Performs merge sor utilzing three stages. * (1) Blocksort, (2) simple merge and (3) multi merge on a * set of strings * * @param[in,out] pkeys Keys (first four characters of string) to be sorted. * @param[in,out] pvals Addresses of string locations for tie-breaks * @param[out] stringVals global string value array (four characters stuffed into a uint) * @param[in] numElements Number of elements in the sort. * @param[in] stringArrayLength The size of our string array in uints (4 chars per uint) * @param[in] plan Configuration information for mergesort. * @param[in] termC Termination character for our strings **/ void runStringSort(unsigned int *pkeys, unsigned int *pvals, unsigned int *stringVals, size_t numElements, size_t stringArrayLength, unsigned char termC, const CUDPPStringSortPlan *plan) { int numPartitions = (numElements+BLOCKSORT_SIZE-1)/BLOCKSORT_SIZE; int numBlocks = numPartitions/2; int partitionSize = BLOCKSORT_SIZE; unsigned int swapPoint = plan->m_swapPoint; unsigned int subPartitions = plan->m_subPartitions; int numThreads = 128; blockWiseStringSort<unsigned int, DEPTH> <<<numPartitions, BLOCKSORT_SIZE/DEPTH, 2*(BLOCKSORT_SIZE)*sizeof(unsigned int)>>> (pkeys, pvals, stringVals, BLOCKSORT_SIZE, numElements, stringArrayLength, termC); int mult = 1; int count = 0; //we run p stages of simpleMerge until numBlocks <= some Critical level while(numPartitions > swapPoint || (partitionSize*mult < 16384 && numPartitions > 1)/* && numPartitions > 1*/) { //printf("Running simple merge for %d partitions of size %d\n", numPartitions, partitionSize*mult); numBlocks = (numPartitions&0xFFFE); if(count%2 == 0) { simpleStringMerge<unsigned int, 2> <<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(pkeys, plan->m_tempKeys, pvals, plan->m_tempAddress, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC); if(numPartitions%2 == 1) { int offset = (partitionSize*mult*(numPartitions-1)); int numElementsToCopy = numElements-offset; simpleCopy<unsigned int> <<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy); } } else { simpleStringMerge<unsigned int, 2> <<<numBlocks, CTASIZE_simple, sizeof(unsigned int)*(2*INTERSECT_B_BLOCK_SIZE_simple+4)>>>(plan->m_tempKeys, pkeys, plan->m_tempAddress, pvals, stringVals, partitionSize*mult, numElements, count, stringArrayLength, termC); if(numPartitions%2 == 1) { int offset = (partitionSize*mult*(numPartitions-1)); int numElementsToCopy = numElements-offset; simpleCopy<unsigned int> <<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy); } } mult*=2; count++; numPartitions = (numPartitions+1)/2; } //End of simpleMerge, now blocks cooperate to merge partitions while (numPartitions > 1) { numBlocks = (numPartitions&0xFFFE); int secondBlocks = ((numBlocks)*subPartitions+numThreads-1)/numThreads; if(count%2 == 1) { findMultiPartitions<unsigned int> <<<secondBlocks, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC); //int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements); stringMergeMulti<unsigned int, DEPTH_multi> <<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(plan->m_tempKeys, pkeys, plan->m_tempAddress, pvals, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength, termC); if(numPartitions%2 == 1) { int offset = (partitionSize*mult*(numPartitions-1)); int numElementsToCopy = numElements-offset; simpleCopy<unsigned int> <<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(plan->m_tempKeys, plan->m_tempAddress, pkeys, pvals, offset, numElementsToCopy); } } else { findMultiPartitions<unsigned int> <<<secondBlocks, numThreads>>>(pkeys, pvals, stringVals, subPartitions, numBlocks, partitionSize*mult, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, numElements, stringArrayLength, termC); //int lastSubPart = getLastSubPart(numBlocks, subPartitions, partitionSize, mult, numElements); stringMergeMulti<unsigned int, DEPTH_multi> <<<numBlocks*subPartitions, CTASIZE_multi, (2*INTERSECT_B_BLOCK_SIZE_multi+4)*sizeof(unsigned int)>>>(pkeys, plan->m_tempKeys, pvals, plan->m_tempAddress, stringVals, subPartitions, numBlocks, plan->m_partitionStartA, plan->m_partitionSizeA, plan->m_partitionStartB, plan->m_partitionSizeB, mult*partitionSize, count, numElements, stringArrayLength, termC); if(numPartitions%2 == 1) { int offset = (partitionSize*mult*(numPartitions-1)); int numElementsToCopy = numElements-offset; simpleCopy<unsigned int> <<<(numElementsToCopy+numThreads-1)/numThreads, numThreads>>>(pkeys, pvals, plan->m_tempKeys, plan->m_tempAddress, offset, numElementsToCopy); } } count++; mult*=2; subPartitions*=2; numPartitions = (numPartitions+1)/2; } if(count%2==1) { CUDA_SAFE_CALL(cudaMemcpy(pkeys, plan->m_tempKeys, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice)); CUDA_SAFE_CALL(cudaMemcpy(pvals, plan->m_tempAddress, numElements*sizeof(unsigned int), cudaMemcpyDeviceToDevice)); } } #ifdef __cplusplus extern "C" { #endif /** * @brief From the programmer-specified sort configuration, * creates internal memory for performing the sort. * * @param[in] plan Pointer to CUDPPStringSortPlan object **/ void allocStringSortStorage(CUDPPStringSortPlan *plan) { CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_keys, sizeof(unsigned int)*plan->m_numElements)); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_tempKeys, sizeof(unsigned int)*plan->m_numElements)); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_tempAddress, sizeof(unsigned int)*plan->m_numElements)); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_packedAddress, sizeof(unsigned int)*(plan->m_numElements+1))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_packedAddressRef, sizeof(unsigned int)*(plan->m_numElements))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_addressRef, sizeof(unsigned int)*(plan->m_numElements))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_spaceScan, sizeof(unsigned int)*(plan->m_numElements+1))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_numSpaces, sizeof(unsigned int)*(plan->m_numElements+1))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionSizeA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionSizeB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionStartA, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4))); CUDA_SAFE_CALL(cudaMalloc((void**)&plan->m_partitionStartB, sizeof(unsigned int)*(plan->m_swapPoint*plan->m_subPartitions*4))); } /** @brief Deallocates intermediate memory from allocStringSortStorage. * * * @param[in] plan Pointer to CUDPStringSortPlan object **/ void freeStringSortStorage(CUDPPStringSortPlan* plan) { cudaFree(plan->m_keys); cudaFree(plan->m_packedAddress); cudaFree(plan->m_packedAddressRef); cudaFree(plan->m_tempKeys); cudaFree(plan->m_tempAddress); cudaFree(plan->m_addressRef); cudaFree(plan->m_numSpaces); cudaFree(plan->m_spaceScan); cudaFree(plan->m_partitionSizeA); cudaFree(plan->m_partitionSizeB); cudaFree(plan->m_partitionStartA); cudaFree(plan->m_partitionStartB); } /** @brief Dispatch function to perform a sort on an array with * a specified configuration. * * This is the dispatch routine which calls stringSort...() with * appropriate template parameters and arguments as specified by * the plan. * @param[in,out] keys Keys (first four chars of string) to be sorted. * @param[in,out] values Address of string values in array of null terminated strings * @param[in] stringVals Global string array * @param[in] numElements Number of elements in the sort. * @param[in] stringArrayLength The size of our string array in uints (4 chars per uint) * @param[in] termC Termination character for our strings * @param[in] plan Configuration information for mergeSort. **/ void cudppStringSortDispatch(unsigned int *keys, unsigned int *values, unsigned int *stringVals, size_t numElements, size_t stringArrayLength, unsigned char termC, const CUDPPStringSortPlan *plan) { runStringSort(keys, values, stringVals, numElements, stringArrayLength, termC, plan); } #ifdef __cplusplus } #endif /** @} */ // end stringsort functions /** @} */ // end cudpp_app
the_stack
#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run through mix(), or through mix() in reverse, there are at least 32 bits of the output that are sometimes the same for one pair and different for another pair. This was tested for: * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and arrangements of the variables. This does not achieve avalanche. There are input bits of (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows some parallelism. Read-after-writes are good at doubling the number of bits affected, so the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and rotates are much kinder to the top and bottom bits, so I used rotates. ------------------------------------------------------------------------------- */ #define mix(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } /* ------------------------------------------------------------------------------- final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in only a few bits will usually produce values of c that look totally different. This was tested for * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 ------------------------------------------------------------------------------- */ #define final(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } __device__ __host__ unsigned int mixRemainder(unsigned int a, unsigned int b, unsigned int c, unsigned int k0, unsigned int k1, unsigned int k2, unsigned int length ) { switch(length) { case 12: c+=k2; b+=k1; a+=k0; break; case 11: c+=k2&0xffffff; b+=k1; a+=k0; break; case 10: c+=k2&0xffff; b+=k1; a+=k0; break; case 9 : c+=k2&0xff; b+=k1; a+=k0; break; case 8 : b+=k1; a+=k0; break; case 7 : b+=k1&0xffffff; a+=k0; break; case 6 : b+=k1&0xffff; a+=k0; break; case 5 : b+=k1&0xff; a+=k0; break; case 4 : a+=k0; break; case 3 : a+=k0&0xffffff; break; case 2 : a+=k0&0xffff; break; case 1 : a+=k0&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } final(a,b,c); return c; } unsigned int hashlittle( const void *key, size_t length, unsigned int initval) { unsigned int a,b,c; /* internal state */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((unsigned int)length) + initval; const unsigned int *k = (const unsigned int *)key; /* read 32-bit chunks */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticably faster for short strings (like English words). */ switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } final(a,b,c); return c; } __global__ void kernel ( const unsigned int *lengths, const unsigned int *initvals, const unsigned int *keys, unsigned int *out, const int N ) { int id = blockDim.x*blockIdx.x+threadIdx.x; if (id >= N) return; unsigned int length = lengths[id]; const unsigned int initval = initvals[id]; // a value of type "const unsigned int *" cannot be used to initialize an entity of type "unsigned int *" const unsigned int *k = keys+id*16; // each key has at most 15 words (60 bytes) /* Set up the internal state */ unsigned int a,b,c; unsigned int r0,r1,r2; a = b = c = 0xdeadbeef + length + initval; /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } r0 = k[0]; r1 = k[1]; r2 = k[2]; /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticably faster for short strings (like English words). */ out[id] = mixRemainder(a, b, c, r0, r1, r2, length); } int main(int argc, char** argv) { // sample gold result const char* str = "Four score and seven years ago"; unsigned int c = hashlittle(str, 30, 1); printf("input string: %s hash is %.8x\n", str, c); /* cd628161 */ int block_size = atoi(argv[1]); // work group size unsigned long N = atol(argv[2]); // total number of strings unsigned int *keys = NULL; unsigned int *lens = NULL; unsigned int *initvals = NULL; unsigned int *out = NULL; // padded to 64 bytes (16 words) posix_memalign((void**)&keys, 1024, sizeof(unsigned int)*N*16); posix_memalign((void**)&lens, 1024, sizeof(unsigned int)*N); posix_memalign((void**)&initvals, 1024, sizeof(unsigned int)*N); posix_memalign((void**)&out, 1024, sizeof(unsigned int)*N); // the kernel supports up to 60 bytes srand(2); char src[64]; memcpy(src, str, 64); for (unsigned long i = 0; i < N; i++) { memcpy((unsigned char*)keys+i*16*sizeof(unsigned int), src, 64); lens[i] = rand()%61; initvals[i] = i%2; } unsigned int* d_keys; cudaMalloc((void**)&d_keys, sizeof(unsigned int)*N*16); cudaMemcpyAsync(d_keys, keys, sizeof(unsigned int)*N*16, cudaMemcpyHostToDevice, 0); unsigned int* d_lens; cudaMalloc((void**)&d_lens, sizeof(unsigned int)*N); cudaMemcpyAsync(d_lens, lens, sizeof(unsigned int)*N, cudaMemcpyHostToDevice, 0); unsigned int* d_initvals; cudaMalloc((void**)&d_initvals, sizeof(unsigned int)*N); cudaMemcpyAsync(d_initvals, initvals, sizeof(unsigned int)*N, cudaMemcpyHostToDevice, 0); unsigned int* d_out; cudaMalloc((void**)&d_out, sizeof(unsigned int)*N); dim3 grids ((N+block_size-1)/block_size); dim3 threads (block_size); for (int n = 0; n < 100; n++) { kernel<<<grids,threads>>>(d_lens, d_initvals, d_keys, d_out, N); } cudaMemcpy(out, d_out, sizeof(unsigned int)*N, cudaMemcpyDeviceToHost); cudaFree(d_keys); cudaFree(d_lens); cudaFree(d_initvals); cudaFree(d_out); printf("Verify the results computed on the device..\n"); bool error = false; for (unsigned long i = 0; i < N; i++) { c = hashlittle(&keys[i*16], lens[i], initvals[i]); if (out[i] != c) { printf("Error: at %lu gpu hash is %.8x cpu hash is %.8x\n", i, out[i], c); error = true; break; } } if (error) printf("FAILED\n"); else printf("PASS\n"); free(keys); free(lens); free(initvals); free(out); return 0; }
the_stack
* \file * cub::BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA threadblock. */ #pragma once #include "../../util_ptx.cuh" #include "../../util_arch.cuh" #include "../../block/block_raking_layout.cuh" #include "../../thread/thread_reduce.cuh" #include "../../thread/thread_scan.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA threadblock. */ template < typename T, ///< Data type being scanned int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension bool MEMOIZE, ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanRaking { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /// Layout type for padded threadblock raking grid typedef BlockRakingLayout<T, BLOCK_THREADS, PTX_ARCH> BlockRakingLayout; /// Constants enum { /// Number of raking threads RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, /// Number of raking elements per warp synchronous raking thread SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, /// Cooperative work can be entirely warp synchronous WARP_SYNCHRONOUS = (BLOCK_THREADS == RAKING_THREADS), }; /// WarpScan utility type typedef WarpScan<T, RAKING_THREADS, PTX_ARCH> WarpScan; /// Shared memory storage layout type struct _TempStorage { typename WarpScan::TempStorage warp_scan; ///< Buffer for warp-synchronous scan typename BlockRakingLayout::TempStorage raking_grid; ///< Padded threadblock raking grid T block_aggregate; ///< Block aggregate }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; int linear_tid; T cached_segment[SEGMENT_LENGTH]; //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- /// Templated reduction template <int ITERATION, typename ScanOp> __device__ __forceinline__ T GuardedReduce( T* raking_ptr, ///< [in] Input array ScanOp scan_op, ///< [in] Binary reduction operator T raking_partial, ///< [in] Prefix to seed reduction with Int2Type<ITERATION> iteration) { if ((BlockRakingLayout::UNGUARDED) || (((linear_tid * SEGMENT_LENGTH) + ITERATION) < BLOCK_THREADS)) { T addend = raking_ptr[ITERATION]; raking_partial = scan_op(raking_partial, addend); } return GuardedReduce(raking_ptr, scan_op, raking_partial, Int2Type<ITERATION + 1>()); } /// Templated reduction (base case) template <typename ScanOp> __device__ __forceinline__ T GuardedReduce( T* raking_ptr, ///< [in] Input array ScanOp scan_op, ///< [in] Binary reduction operator T raking_partial, ///< [in] Prefix to seed reduction with Int2Type<SEGMENT_LENGTH> iteration) { return raking_partial; } /// Templated copy template <int ITERATION> __device__ __forceinline__ void CopySegment( T* out, ///< [out] Out array T* in, ///< [in] Input array Int2Type<ITERATION> iteration) { out[ITERATION] = in[ITERATION]; CopySegment(out, in, Int2Type<ITERATION + 1>()); } /// Templated copy (base case) __device__ __forceinline__ void CopySegment( T* out, ///< [out] Out array T* in, ///< [in] Input array Int2Type<SEGMENT_LENGTH> iteration) {} /// Performs upsweep raking reduction, returning the aggregate template <typename ScanOp> __device__ __forceinline__ T Upsweep( ScanOp scan_op) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data into registers CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); T raking_partial = cached_segment[0]; return GuardedReduce(cached_segment, scan_op, raking_partial, Int2Type<1>()); } /// Performs exclusive downsweep raking scan template <typename ScanOp> __device__ __forceinline__ void ExclusiveDownsweep( ScanOp scan_op, T raking_partial, bool apply_prefix = true) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data back into registers if (!MEMOIZE) { CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); } ThreadScanExclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); // Write data back to smem CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); } /// Performs inclusive downsweep raking scan template <typename ScanOp> __device__ __forceinline__ void InclusiveDownsweep( ScanOp scan_op, T raking_partial, bool apply_prefix = true) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data back into registers if (!MEMOIZE) { CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); } ThreadScanInclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); // Write data back to smem CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); } //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanRaking( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &identity, ///< [in] Identity value ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, identity, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Exclusive Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, identity, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial); } __syncthreads(); // Grab exclusive partial from shared memory output = *placement_ptr; } } /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &identity, ///< [in] Identity value ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, identity, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, identity, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial); // Broadcast aggregate to other threads if (linear_tid == RAKING_THREADS - 1) temp_storage.block_aggregate = inclusive_partial; } __syncthreads(); // Grab exclusive partial from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) T identity, ///< [in] Identity value ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items (exclusive of the \p block_prefix_callback_op value) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, identity, scan_op, block_aggregate); // Obtain warp-wide prefix in lane0, then broadcast to other lanes T prefix = block_prefix_callback_op(block_aggregate); prefix = WarpScan(temp_storage.warp_scan).Broadcast(prefix, 0); output = scan_op(prefix, output); if (linear_tid == 0) output = prefix; } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, identity, scan_op); // Broadcast aggregate to other lanes (through smem because we eventually want it in all threads) if (linear_tid == RAKING_THREADS - 1) ThreadStore<STORE_VOLATILE>(&temp_storage.block_aggregate, inclusive_partial); block_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage.block_aggregate); // Obtain block-wide prefix in lane0, then broadcast to other lanes T prefix = block_prefix_callback_op(block_aggregate); prefix = WarpScan(temp_storage.warp_scan).Broadcast(prefix, 0); // Update prefix with warpscan exclusive partial if (linear_tid > 0) prefix = scan_op(prefix, exclusive_partial); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, prefix); } __syncthreads(); // Grab exclusive partial from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } //--------------------------------------------------------------------- // Identity-less exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no identity value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; } } /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no identity value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial= Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); // Broadcast aggregate to all threads if (linear_tid == RAKING_THREADS - 1) temp_storage.block_aggregate = inclusive_partial; } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items (exclusive of the \p block_prefix_callback_op value) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op, block_aggregate); // Obtain warp-wide prefix in lane0, then broadcast to other lanes T prefix = block_prefix_callback_op(block_aggregate); prefix = WarpScan(temp_storage.warp_scan).Broadcast(prefix, 0); output = scan_op(prefix, output); if (linear_tid == 0) output = prefix; } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Broadcast aggregate to other lanes (through smem because we eventually want it in all threads) if (linear_tid == RAKING_THREADS - 1) ThreadStore<STORE_VOLATILE>(&temp_storage.block_aggregate, inclusive_partial); block_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage.block_aggregate); // Obtain block-wide prefix in lane0, then broadcast to other lanes T prefix = block_prefix_callback_op(block_aggregate); prefix = WarpScan(temp_storage.warp_scan).Broadcast(prefix, 0); // Update prefix with warpscan exclusive partial if (linear_tid > 0) prefix = scan_op(prefix, exclusive_partial); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, prefix); } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Exclusive Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; } } /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); // Broadcast aggregate to all threads if (linear_tid == RAKING_THREADS - 1) temp_storage.block_aggregate = inclusive_partial; } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items (exclusive of the \p block_prefix_callback_op value) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan T inclusive_partial; WarpScan(temp_storage.warp_scan).InclusiveScan(input, inclusive_partial, scan_op, block_aggregate); // Obtain warp-wide prefix in lane0, then broadcast to other lanes output = block_prefix_callback_op(block_aggregate); output = WarpScan(temp_storage.warp_scan).Broadcast(output, 0); // Update prefix with exclusive warpscan partial output = scan_op(output, inclusive_partial); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; __syncthreads(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Broadcast aggregate to other lanes (through smem because we eventually want it in all threads) if (linear_tid == RAKING_THREADS - 1) ThreadStore<STORE_VOLATILE>(&temp_storage.block_aggregate, inclusive_partial); block_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage.block_aggregate); // Obtain block-wide prefix in lane0, then broadcast to other lanes T prefix = block_prefix_callback_op(block_aggregate); prefix = WarpScan(temp_storage.warp_scan).Broadcast(prefix, 0); // Update prefix with warpscan exclusive partial if (linear_tid > 0) prefix = scan_op(prefix, exclusive_partial); // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, prefix); } __syncthreads(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <stdio.h> #include <stdlib.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include "hipsparse.h" #include "cusparse.h" // CHECK: if (y) hipFree(y); // CHECK: if (z) hipFree(z); // CHECK: if (xInd) hipFree(xInd); // CHECK: if (xVal) hipFree(xVal); // CHECK: if (csrRowPtr) hipFree(csrRowPtr); // CHECK: if (cooRowIndex) hipFree(cooRowIndex); // CHECK: if (cooColIndex) hipFree(cooColIndex); // CHECK: if (cooVal) hipFree(cooVal); // CHECK: if (descr) hipsparseDestroyMatDescr(descr); // CHECK: if (handle) hipsparseDestroy(handle); // CHECK: hipDeviceReset(); #define CLEANUP(s) \ do { \ printf ("%s\n", s); \ if (yHostPtr) free(yHostPtr); \ if (zHostPtr) free(zHostPtr); \ if (xIndHostPtr) free(xIndHostPtr); \ if (xValHostPtr) free(xValHostPtr); \ if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\ if (cooColIndexHostPtr) free(cooColIndexHostPtr);\ if (cooValHostPtr) free(cooValHostPtr); \ if (y) cudaFree(y); \ if (z) cudaFree(z); \ if (xInd) cudaFree(xInd); \ if (xVal) cudaFree(xVal); \ if (csrRowPtr) cudaFree(csrRowPtr); \ if (cooRowIndex) cudaFree(cooRowIndex); \ if (cooColIndex) cudaFree(cooColIndex); \ if (cooVal) cudaFree(cooVal); \ if (descr) cusparseDestroyMatDescr(descr);\ if (handle) cusparseDestroy(handle); \ cudaDeviceReset(); \ fflush (stdout); \ } while (0) int main(){ // CHECK: hipError_t cudaStat1,cudaStat2,cudaStat3,cudaStat4,cudaStat5,cudaStat6; cudaError_t cudaStat1,cudaStat2,cudaStat3,cudaStat4,cudaStat5,cudaStat6; // CHECK: hipsparseStatus_t status; cusparseStatus_t status; // CHECK: hipsparseHandle_t handle=0; cusparseHandle_t handle=0; // CHECK: hipsparseMatDescr_t descr=0; cusparseMatDescr_t descr=0; int * cooRowIndexHostPtr=0; int * cooColIndexHostPtr=0; double * cooValHostPtr=0; int * cooRowIndex=0; int * cooColIndex=0; double * cooVal=0; int * xIndHostPtr=0; double * xValHostPtr=0; double * yHostPtr=0; int * xInd=0; double * xVal=0; double * y=0; int * csrRowPtr=0; double * zHostPtr=0; double * z=0; int n, nnz, nnz_vector; double dzero =0.0; double dtwo =2.0; double dthree=3.0; double dfive =5.0; printf("testing example\n"); /* create the following sparse test matrix in COO format */ /* |1.0 2.0 3.0| | 4.0 | |5.0 6.0 7.0| | 8.0 9.0| */ n=4; nnz=9; cooRowIndexHostPtr = (int *) malloc(nnz*sizeof(cooRowIndexHostPtr[0])); cooColIndexHostPtr = (int *) malloc(nnz*sizeof(cooColIndexHostPtr[0])); cooValHostPtr = (double *)malloc(nnz*sizeof(cooValHostPtr[0])); if ((!cooRowIndexHostPtr) || (!cooColIndexHostPtr) || (!cooValHostPtr)){ CLEANUP("Host malloc failed (matrix)"); return 1; } cooRowIndexHostPtr[0]=0; cooColIndexHostPtr[0]=0; cooValHostPtr[0]=1.0; cooRowIndexHostPtr[1]=0; cooColIndexHostPtr[1]=2; cooValHostPtr[1]=2.0; cooRowIndexHostPtr[2]=0; cooColIndexHostPtr[2]=3; cooValHostPtr[2]=3.0; cooRowIndexHostPtr[3]=1; cooColIndexHostPtr[3]=1; cooValHostPtr[3]=4.0; cooRowIndexHostPtr[4]=2; cooColIndexHostPtr[4]=0; cooValHostPtr[4]=5.0; cooRowIndexHostPtr[5]=2; cooColIndexHostPtr[5]=2; cooValHostPtr[5]=6.0; cooRowIndexHostPtr[6]=2; cooColIndexHostPtr[6]=3; cooValHostPtr[6]=7.0; cooRowIndexHostPtr[7]=3; cooColIndexHostPtr[7]=1; cooValHostPtr[7]=8.0; cooRowIndexHostPtr[8]=3; cooColIndexHostPtr[8]=3; cooValHostPtr[8]=9.0; nnz_vector = 3; xIndHostPtr = (int *) malloc(nnz_vector*sizeof(xIndHostPtr[0])); xValHostPtr = (double *)malloc(nnz_vector*sizeof(xValHostPtr[0])); yHostPtr = (double *)malloc(2*n *sizeof(yHostPtr[0])); zHostPtr = (double *)malloc(2*(n+1) *sizeof(zHostPtr[0])); if((!xIndHostPtr) || (!xValHostPtr) || (!yHostPtr) || (!zHostPtr)) { CLEANUP("Host malloc failed (vectors)"); return 1; } yHostPtr[0] = 10.0; xIndHostPtr[0]=0; xValHostPtr[0]=100.0; yHostPtr[1] = 20.0; xIndHostPtr[1]=1; xValHostPtr[1]=200.0; yHostPtr[2] = 30.0; yHostPtr[3] = 40.0; xIndHostPtr[2]=3; xValHostPtr[2]=400.0; yHostPtr[4] = 50.0; yHostPtr[5] = 60.0; yHostPtr[6] = 70.0; yHostPtr[7] = 80.0; /* allocate GPU memory and copy the matrix and vectors into it */ // CHECK: cudaStat1 = hipMalloc((void**)&cooRowIndex,nnz*sizeof(cooRowIndex[0])); cudaStat1 = cudaMalloc((void**)&cooRowIndex,nnz*sizeof(cooRowIndex[0])); // CHECK: cudaStat2 = hipMalloc((void**)&cooColIndex,nnz*sizeof(cooColIndex[0])); cudaStat2 = cudaMalloc((void**)&cooColIndex,nnz*sizeof(cooColIndex[0])); // CHECK: cudaStat3 = hipMalloc((void**)&cooVal, nnz*sizeof(cooVal[0])); cudaStat3 = cudaMalloc((void**)&cooVal, nnz*sizeof(cooVal[0])); // CHECK: cudaStat4 = hipMalloc((void**)&y, 2*n*sizeof(y[0])); cudaStat4 = cudaMalloc((void**)&y, 2*n*sizeof(y[0])); // CHECK: cudaStat5 = hipMalloc((void**)&xInd,nnz_vector*sizeof(xInd[0])); cudaStat5 = cudaMalloc((void**)&xInd,nnz_vector*sizeof(xInd[0])); // CHECK: cudaStat6 = hipMalloc((void**)&xVal,nnz_vector*sizeof(xVal[0])); cudaStat6 = cudaMalloc((void**)&xVal,nnz_vector*sizeof(xVal[0])); // CHECK: if ((cudaStat1 != hipSuccess) || // CHECK: (cudaStat2 != hipSuccess) || // CHECK: (cudaStat3 != hipSuccess) || // CHECK: (cudaStat4 != hipSuccess) || // CHECK: (cudaStat5 != hipSuccess) || // CHECK: (cudaStat6 != hipSuccess)) { if ((cudaStat1 != cudaSuccess) || (cudaStat2 != cudaSuccess) || (cudaStat3 != cudaSuccess) || (cudaStat4 != cudaSuccess) || (cudaStat5 != cudaSuccess) || (cudaStat6 != cudaSuccess)) { CLEANUP("Device malloc failed"); return 1; } // CHECK: cudaStat1 = hipMemcpy(cooRowIndex, cooRowIndexHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(cooRowIndex, cooRowIndexHostPtr, (size_t)(nnz*sizeof(cooRowIndex[0])), cudaMemcpyHostToDevice); // CHECK: cudaStat2 = hipMemcpy(cooColIndex, cooColIndexHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat2 = cudaMemcpy(cooColIndex, cooColIndexHostPtr, (size_t)(nnz*sizeof(cooColIndex[0])), cudaMemcpyHostToDevice); // CHECK: cudaStat3 = hipMemcpy(cooVal, cooValHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat3 = cudaMemcpy(cooVal, cooValHostPtr, (size_t)(nnz*sizeof(cooVal[0])), cudaMemcpyHostToDevice); // CHECK: cudaStat4 = hipMemcpy(y, yHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat4 = cudaMemcpy(y, yHostPtr, (size_t)(2*n*sizeof(y[0])), cudaMemcpyHostToDevice); // CHECK: cudaStat5 = hipMemcpy(xInd, xIndHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat5 = cudaMemcpy(xInd, xIndHostPtr, (size_t)(nnz_vector*sizeof(xInd[0])), cudaMemcpyHostToDevice); // CHECK: cudaStat6 = hipMemcpy(xVal, xValHostPtr, // CHECK: hipMemcpyHostToDevice); cudaStat6 = cudaMemcpy(xVal, xValHostPtr, (size_t)(nnz_vector*sizeof(xVal[0])), cudaMemcpyHostToDevice); // CHECK: if ((cudaStat1 != hipSuccess) || // CHECK: (cudaStat2 != hipSuccess) || // CHECK: (cudaStat3 != hipSuccess) || // CHECK: (cudaStat4 != hipSuccess) || // CHECK: (cudaStat5 != hipSuccess) || // CHECK: (cudaStat6 != hipSuccess)) { if ((cudaStat1 != cudaSuccess) || (cudaStat2 != cudaSuccess) || (cudaStat3 != cudaSuccess) || (cudaStat4 != cudaSuccess) || (cudaStat5 != cudaSuccess) || (cudaStat6 != cudaSuccess)) { CLEANUP("Memcpy from Host to Device failed"); return 1; } /* initialize cusparse library */ // CHECK: status= hipsparseCreate(&handle); status= cusparseCreate(&handle); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("CUSPARSE Library initialization failed"); return 1; } /* create and setup matrix descriptor */ // CHECK: status= hipsparseCreateMatDescr(&descr); status= cusparseCreateMatDescr(&descr); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Matrix descriptor initialization failed"); return 1; } // CHECK: hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); // CHECK: hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); /* exercise conversion routines (convert matrix from COO 2 CSR format) */ // CHECK: cudaStat1 = hipMalloc((void**)&csrRowPtr,(n+1)*sizeof(csrRowPtr[0])); cudaStat1 = cudaMalloc((void**)&csrRowPtr,(n+1)*sizeof(csrRowPtr[0])); // CHECK: if (cudaStat1 != hipSuccess) { if (cudaStat1 != cudaSuccess) { CLEANUP("Device malloc failed (csrRowPtr)"); return 1; } status= cusparseXcoo2csr(handle,cooRowIndex,nnz,n, // CHECK: csrRowPtr,HIPSPARSE_INDEX_BASE_ZERO); csrRowPtr,CUSPARSE_INDEX_BASE_ZERO); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Conversion from COO to CSR format failed"); return 1; } //csrRowPtr = [0 3 4 7 9] // The following test only works for compute capability 1.3 and above // because it needs double precision. int devId; // CHECK: hipDeviceProp_t prop; cudaDeviceProp prop; // CHECK: hipError_t cudaStat; cudaError_t cudaStat; // CHECK: cudaStat = hipGetDevice(&devId); cudaStat = cudaGetDevice(&devId); // CHECK: if (hipSuccess != cudaStat){ if (cudaSuccess != cudaStat){ // CLEANUP("hipGetDevice failed"); CLEANUP("cudaGetDevice failed"); // printf("Error: cudaStat %d, %s\n", cudaStat, hipGetErrorString(cudaStat)); printf("Error: cudaStat %d, %s\n", cudaStat, cudaGetErrorString(cudaStat)); return 1; } // CHECK: cudaStat = hipGetDeviceProperties( &prop, devId); cudaStat = cudaGetDeviceProperties( &prop, devId); // CHECK: if (hipSuccess != cudaStat) { if (cudaSuccess != cudaStat) { // CHECK: CLEANUP("hipGetDeviceProperties failed"); CLEANUP("cudaGetDeviceProperties failed"); // CHECK: printf("Error: cudaStat %d, %s\n", cudaStat, hipGetErrorString(cudaStat)); printf("Error: cudaStat %d, %s\n", cudaStat, cudaGetErrorString(cudaStat)); return 1; } int cc = 100*prop.major + 10*prop.minor; if (cc < 130){ CLEANUP("waive the test because only sm13 and above are supported\n"); printf("the device has compute capability %d\n", cc); printf("example test WAIVED"); return 2; } /* exercise Level 1 routines (scatter vector elements) */ // CHECK: status= hipsparseDsctr(handle, nnz_vector, xVal, xInd, // CHECK: &y[n], HIPSPARSE_INDEX_BASE_ZERO); status= cusparseDsctr(handle, nnz_vector, xVal, xInd, &y[n], CUSPARSE_INDEX_BASE_ZERO); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Scatter from sparse to dense vector failed"); return 1; } //y = [10 20 30 40 | 100 200 70 400] /* exercise Level 2 routines (csrmv) */ // CHECK: status= hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, status= cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, n, n, nnz, &dtwo, descr, cooVal, csrRowPtr, cooColIndex, &y[0], &dthree, &y[n]); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Matrix-vector multiplication failed"); return 1; } //y = [10 20 30 40 | 680 760 1230 2240] // CHECK: hipMemcpy(yHostPtr, y, (size_t)(2*n*sizeof(y[0])), hipMemcpyDeviceToHost); cudaMemcpy(yHostPtr, y, (size_t)(2*n*sizeof(y[0])), cudaMemcpyDeviceToHost); /* exercise Level 3 routines (csrmm) */ // cudaStat1 = hipMalloc((void**)&z, 2*(n+1)*sizeof(z[0])); cudaStat1 = cudaMalloc((void**)&z, 2*(n+1)*sizeof(z[0])); // CHECK: if (cudaStat1 != hipSuccess) { if (cudaStat1 != cudaSuccess) { CLEANUP("Device malloc failed (z)"); return 1; } // CHECK: cudaStat1 = hipMemset((void *)z,0, 2*(n+1)*sizeof(z[0])); cudaStat1 = cudaMemset((void *)z,0, 2*(n+1)*sizeof(z[0])); // CHECK: if (cudaStat1 != hipSuccess) { if (cudaStat1 != cudaSuccess) { CLEANUP("Memset on Device failed"); return 1; } // CHECK: status= hipsparseDcsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, 2, n, status= cusparseDcsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, 2, n, nnz, &dfive, descr, cooVal, csrRowPtr, cooColIndex, y, n, &dzero, z, n+1); // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Matrix-matrix multiplication failed"); return 1; } /* print final results (z) */ // CHECK: cudaStat1 = hipMemcpy(zHostPtr, z, // CHECK: hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(zHostPtr, z, (size_t)(2*(n+1)*sizeof(z[0])), cudaMemcpyDeviceToHost); // CHECK: if (cudaStat1 != hipSuccess) { if (cudaStat1 != cudaSuccess) { CLEANUP("Memcpy from Device to Host failed"); return 1; } //z = [950 400 2550 2600 0 | 49300 15200 132300 131200 0] /* destroy matrix descriptor */ // status = hipsparseDestroyMatDescr(descr); status = cusparseDestroyMatDescr(descr); descr = 0; // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("Matrix descriptor destruction failed"); return 1; } /* destroy handle */ // CHECK: status = hipsparseDestroy(handle); status = cusparseDestroy(handle); handle = 0; // CHECK: if (status != HIPSPARSE_STATUS_SUCCESS) { if (status != CUSPARSE_STATUS_SUCCESS) { CLEANUP("CUSPARSE Library release of resources failed"); return 1; } /* check the results */ // Notice that CLEANUP() contains a call to cusparseDestroy(handle) if ((zHostPtr[0] != 950.0) || (zHostPtr[1] != 400.0) || (zHostPtr[2] != 2550.0) || (zHostPtr[3] != 2600.0) || (zHostPtr[4] != 0.0) || (zHostPtr[5] != 49300.0) || (zHostPtr[6] != 15200.0) || (zHostPtr[7] != 132300.0) || (zHostPtr[8] != 131200.0) || (zHostPtr[9] != 0.0) || (yHostPtr[0] != 10.0) || (yHostPtr[1] != 20.0) || (yHostPtr[2] != 30.0) || (yHostPtr[3] != 40.0) || (yHostPtr[4] != 680.0) || (yHostPtr[5] != 760.0) || (yHostPtr[6] != 1230.0) || (yHostPtr[7] != 2240.0)) { CLEANUP("example test FAILED"); return 1; } else { CLEANUP("example test PASSED"); return 0; } }
the_stack
#include "SuperSmooth.h" #include <iostream> using namespace std; #include <stdio.h> // 宏:LOCALCLUSTER_DEF_BLOCK_X 和 LOCALCLUSTER_DEF_BLOCK_Y // 以及 LOCALCLUSTER_DEF_BLOCK_Z // 定义了第 1 个 kernel 默认的线程块的尺寸,本算法中采用了三维的线程块。 #define LOCALCLUSTER_DEF_BLOCK_X 32 #define LOCALCLUSTER_DEF_BLOCK_Y 2 #define LOCALCLUSTER_DEF_BLOCK_Z 4 // 宏:AVG_DEF_BLOCK_X 和 AVG_DEF_BLOCK_Y // 定义了第 2 个 kernel 默认的线程块的尺寸,本算法中采用了二维的线程块。 #define AVG_DEF_BLOCK_X 32 #define AVG_DEF_BLOCK_Y 8 // 宏:SYN_DEF_BLOCK_X 和 SYN_DEF_BLOCK_Y // 定义了第 3 个 kernel 默认的线程块的尺寸,本算法中采用了二维的线程块。 #define SYN_DEF_BLOCK_X 32 #define SYN_DEF_BLOCK_Y 8 // 宏:CNTTENPERCENT // 定义了邻域窗口 10% 的点的数目。 #define CNTTENPERCENT 121 // Device 数组:_argsLocalDev[4][4] // 因为不同线程处理的点的坐标是不一样的,为了最大化的并行,特意提取出来一些参数, // 有了这些参数可以增大并行化。 const int static __device__ _argsLocalDev[4][4] = { { 0, -1, 0, 1}, {-1, 0, 1, 0}, {-1, 1, 1, -1}, {-1, -1, 1, 1} }; // Kernel 函数:_localClusterKer(在点的 8 个方向上进行平滑操作) // 在每一个点的八个方向上遍历一定数量的点,利用这些点的累加值进行算术处理,得到 // 平滑后的图像值。 static __global__ void // Kernel 函数无返回值 _localClusterKer( ImageCuda inimg, // 输入图像。 ImageCuda outimg, // 输出图像。 int diffthred, // 当前像素点和遭遇的点的像素值差相关的阈值。 int diffcntthred, // 当前像素点的像素值与正遭遇点的像素值差大于等于 // diffthred 的连续点的个数超过 diffcntthred 时停 // 止该方向的遍历。 unsigned char flag[], // 标记数组,初始值都为 0 int ccntthred, // 它与当前像素点的像素值与正遭遇点的像素值差小于 // diffthred 的点的个数有关。 int searchscope // LocalCluster kernel中在每一个方向上搜索的最大 // 范围。按照河边老师的需求该值小于 16。 ); // Kernel 函数:_minmaxAvgKer(利用点的邻域进行平滑处理) // 对以当前计算点为中心的 window * window 范围内的点排序,计算前后各 10% 的点的 // 平均值,并以此计算当前点的最终像素值。 static __global__ void // Kernel 函数无返回值 _minmaxAvgKer( ImageCuda inimg, // 输入图像。 ImageCuda avgimg, // 输出图像。 Template atemplate // 模板,用来定位邻域点的坐标 ); // Kernel 函数:_synthImageKer(综合前两个核函数的结果,得到超平滑的结果) // 根据前两个和函数的计算结果,得到最终的结果图像。 static __global__ void // Kernel 函数无返回值 _synthImageKer( ImageCuda avgimg, // _minmaxAvgKer 得到的临时图像。 ImageCuda outimg, // 输出图像。 unsigned char flag[] // 通过第一个 kernel 得到的标记数组,每一个点都对 // 应一个标记值,0 或者 1。 ); // Device 函数:_sortDev(选择排序,升序) static __device__ int // 若正确执行该操作,则返回 NO_ERROR。 _sortDev( unsigned char pixelarr[], // 要排序的数组 int length // 数组的长度 ); // Device 函数:_sortDev(选择排序,升序) static __device__ int _sortDev( unsigned char pixelarr[], int length) { // 判断参数是否为空。 if (pixelarr == NULL || length <= 0) return INVALID_DATA; // 排序中用到的临时变量,记录数值值。 unsigned char temp = 0; // 临时变量,用来记录每一次循环找到的最小值的下标。 int index = 0; for (int i = 0; i < length; i++) { index = i; temp = pixelarr[i]; for (int j = i + 1; j < length; j++) { if (temp > pixelarr[j]) { index = j; temp = pixelarr[j]; } } if (index != i) { pixelarr[index] = pixelarr[i]; pixelarr[i] = temp; } } // 正确执行,返回 NO_ERROR。 return NO_ERROR; } // Kernel 函数:_localClusterKer(在点的 8 个方向上进行平滑操作) static __global__ void _localClusterKer( ImageCuda inimg, ImageCuda outimg, int diffthred, int diffcntthred, unsigned char flag[], int ccntthred, int searchscope) { // 计算当前线程在 Grid 中的位置。其中,c 对应点的横坐标,r 对应点的纵坐标。 // z 对应的是检索的方向。采用了一个线程处理四个点的策略。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; int z = threadIdx.z; // 申请共享内存。 extern __shared__ unsigned char pixeltmpShd[]; // 当前线程在线程块内部的索引 int idxblk = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // 当前线程需用到的共享内存,用来存放线程计算出来的该点可能像素值(可能不是 // 最终值,因为在这些值中最接近当前点的值才被选择为最终值),并将其赋初值 0。 // flagblkShd 用来为每一个线程标记,当 ccount > cCntThred 时将标记为 1。 unsigned char *curpixelShd, *flagblkShd; if (idxblk == 0) { curpixelShd = pixeltmpShd; flagblkShd = curpixelShd + LOCALCLUSTER_DEF_BLOCK_X * LOCALCLUSTER_DEF_BLOCK_Y * LOCALCLUSTER_DEF_BLOCK_Z; } flagblkShd[idxblk] = 0; // 块内同步。 __syncthreads(); // 判断点的坐标是否出界。 if (c < searchscope || c >= inimg.imgMeta.width - searchscope || r < searchscope || r >= inimg.imgMeta.height - searchscope) return; // 当前遭遇点的坐标 int curc = c, curr = r; // 当前计算的第一个点的索引,取出像素值。 int idx = r * inimg.pitchBytes + c; // 声明数组用来存放当前计算的 4 个点的像素值。 unsigned char pixel = inimg.imgMeta.imgData[idx]; // 分配共享内存,用来存放当前线程遭遇到的满足条件的点的像素值累加和。 int sumv = 0; // 当前遭遇点的像素值和索引,都是临时变量在下面的计算中会用到。 unsigned char curpixel = 0; int curidx = idx; // 临时变量。 int ccount = 0, dcount = 0; // 当前点和遭遇到的点的像素值差的绝对值。 unsigned char diff = 0 ; // 取出 _argsLocalDev 中的值到寄存器中,减少频繁访存带来的开销。 int offx = _argsLocalDev[z][0]; int offy = _argsLocalDev[z][1]; // 从(x,y)开始向上(左、左下、左上)方向扫描。 for (int i = 1; i <= searchscope; i++) { // 当前遭遇到的点的坐标。 curc = c + offx * i; curr = r + offy * i; // 判断点的坐 标是否出界。 if (curc < 0 || curc >= inimg.imgMeta.width || curr < 0 || curr >= inimg.imgMeta.height) break; // 当前遭遇到的点的索引和像素值。 curidx = curc + curr * inimg.pitchBytes; curpixel = inimg.imgMeta.imgData[curidx]; // 计算当前点和遭遇点的像素值差的绝对值。 diff = abs(pixel - curpixel); // 若 diff 小于了 diffthred 则做以下的处理。 // 若不满足条件则跳出循环。 if (diff < diffthred) { ccount++; sumv += curpixel; dcount = 0; } else { if (++dcount > diffcntthred) break; } } dcount = 0; offx = _argsLocalDev[z][2]; offy = _argsLocalDev[z][3]; // 从(x,y)开始向下(右、右上、右下)方向扫描。 for (int i = 1; i <= searchscope; i++) { curc = c + offx * i; curr = r + offy * i; // 判断点的坐 标是否出界。 if (curc < 0 || curc >= inimg.imgMeta.width || curr < 0 || curr >= inimg.imgMeta.height) break; // 当前遭遇到的点的索引和像素值。 curidx = curc + curr * inimg.pitchBytes; curpixel = inimg.imgMeta.imgData[curidx]; // 计算当前点和遭遇点的像素值差的绝对值。 diff = abs(pixel - curpixel); // 若 diff 小于了 diffthred 则做以下的处理。 // 若不满足条件则跳出循环。 if (diff < diffthred) { ccount++; sumv += curpixel; dcount = 0; } else { if (++dcount > diffcntthred) break; } } if (ccount > ccntthred) { flag[idx] = 1; flagblkShd[idxblk] = 1; // 计算当前点的可能值,并将其存放在共享内存中。 unsigned char tmppixel = 0; tmppixel = (unsigned char)(ccount == 0 ? (pixel + 1.0f) / 2.0f : (pixel + sumv / ccount + 1.0f) / 2.0f); curpixelShd[idxblk] = (tmppixel > 255 ? 255 : tmppixel); } // 块内同步 __syncthreads(); // 在每一个点对应的 4 个之中找到与当前点最接近的值作为该点的最终值。此处只需 // 用一个线程来实现。 if (z != 0 || flag[idx] != 1) return; int offinblk = blockDim.x * blockDim.y; // 局部变量,方便以下计算。 // 从共享内存中将每一个点对应的 4 个值取出来存放在寄存器中。 unsigned char temppixel[4] = { 0 }; for (int i = idxblk, j = 0; i < idxblk + 4 * offinblk; i += offinblk, j++) temppixel[j] = (flagblkShd[i] == 1 ? curpixelShd[i] : 0); // 找到与当前点最接近的像素值。 unsigned char ipixel = temppixel[0]; for (int i = 1; i < 4; i++) if (abs(pixel - ipixel) > abs(pixel - temppixel[i])) ipixel = temppixel[i]; // 当标记值为 0 时,则不赋值。(因此根据本算法要求,当标记值为 0,该点的 // 最终像素值由第二个核函数来计算得到) outimg.imgMeta.imgData[idx] = ipixel; } // Kernel 函数:_minmaxAvgKer(利用点的邻域进行平滑处理) static __global__ void _minmaxAvgKer( ImageCuda inimg, ImageCuda avgimg, Template atemplate) { // 计算当前线程在 Grid 中的位置。其中,c 对应点的横坐标,r 对应点的纵坐标。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 判断是否越界。 if (c < 0 || c >= inimg.imgMeta.width || r < 0 || r >= inimg.imgMeta.height) return; // 模板内坐标的个数。 int length = atemplate.count; // 分配一个数组用来存放邻域范围的点的像素值。 unsigned char pixelarr[CNTTENPERCENT]; // 当前像素点的和正遭遇到的点一维索引。 int idx = r * avgimg.pitchBytes + c; int index = 0; // 遭遇到的像素点的横纵坐标。 int curc = c, curr = r; // 当前遭遇点的像素值。 unsigned char curpixel = 0; // 临时变量,记录当前邻域内点的个数,pntcnt 与 length 不一样,length 表示的 // 是模板内点的数目。 int pntcnt = 0; for (int i = 0; i < length; i++) { curc = c + atemplate.tplData[2 * i]; curr = r + atemplate.tplData[2 * i + 1]; // 判断目前遭遇到的点是否越界。 if (curc < 0 || curc >= inimg.imgMeta.width || curr < 0 || curr >= inimg.imgMeta.height) continue; // 计算遭遇到的点的索引。 index = curr * inimg.pitchBytes + curc; curpixel = inimg.imgMeta.imgData[index]; pixelarr[pntcnt++] = curpixel; } // 排序。 int err = _sortDev(pixelarr, pntcnt); if (err != NO_ERROR) return; // 计算邻域内 10% 的点数目。 int cnt10percent = (int)(pntcnt * 0.1f + 0.5f); // 计算前后 10% 的平均值 float sumhigh = pixelarr[pntcnt - 1], sumlow = pixelarr[0]; for (int i = 1; i < cnt10percent; i++) { sumlow += pixelarr[i]; sumhigh += pixelarr[pntcnt - 1 - i]; } float high = (cnt10percent == 0 ? sumhigh : sumhigh / cnt10percent); float low = (cnt10percent == 0 ? sumlow : sumlow / cnt10percent); // 计算点的最终值。 float temp = (high + low) / 2.0f + 0.5f; avgimg.imgMeta.imgData[idx] = (temp > 255 ? 255 : (unsigned char)temp); } // Kernel 函数:_synthImageKer(综合前两个核函数的结果,得到超平滑的结果) static __global__ void _synthImageKer( ImageCuda avgimg, ImageCuda outimg, unsigned char *flag) { // 计算当前线程在 Grid 中的位置。其中,c 对应点的横坐标,r 对应点的纵坐标。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 判断是否越界。 if (c < 0 || c >= avgimg.imgMeta.width || r < 0 || r >= avgimg.imgMeta.height) return; // 计算当前线程的一维索引,同时也对应图像中点的坐标。 int idx = r * avgimg.pitchBytes + c; // 更新结果图像中的像素值。 if (flag[idx] == 0) outimg.imgMeta.imgData[idx] = avgimg.imgMeta.imgData[idx]; } // 宏:FREE_SUPERSMOOTH(清理 Device 端或者 Host 端的内存) // 该宏用于清理在 SuperSmooth 过程中申请的设备端或者主机端内存空间。 #define FREE_SUPERSMOOTH do { \ if (avgimg != NULL) \ ImageBasicOp::deleteImage(avgimg); \ if (flagDev != NULL) \ cudaFree(flagDev); \ if (atemplate != NULL) \ TemplateFactory::putTemplate(atemplate); \ if (stream[0] != NULL) \ cudaStreamDestroy(stream[0]); \ if (stream[1] != NULL) \ cudaStreamDestroy(stream[1]); \ } while (0) // Host 成员方法:superSmooth(对输入图像进行超平滑操作) __host__ int SuperSmooth::superSmooth(Image *inimg, Image *outimg) { // 检查输入图像是否为 NULL if (inimg == NULL || inimg->imgData == NULL) return NULL_POINTER; // 输入图像的 ROI 区域尺寸 int imgroix = inimg->roiX2 - inimg->roiX1; int imgroiy = inimg->roiY2 - inimg->roiY1; int errcode; // 局部变量,接受自定义函数返回的错误值。 cudaError_t cudaerr; // 局部变量,接受 CUDA 端返回的错误值。 Image* avgimg = NULL; // 声明的临时图像。 unsigned char *flagDev = NULL; // 声明一个 Device 端的标记数组。 Template *atemplate = NULL; // 声明第二个 kernel 需要用到的模板。 // 创建两个流,一个用来执行第一个 kernel,另一个用来执行第二个 kernel。 cudaStream_t stream[2]; for (int i = 0; i < 2; i++) cudaStreamCreate(&stream[i]); // 将输入图像复制到 device errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将 outimg 复制到 device errcode = ImageBasicOp::copyToCurrentDevice(outimg); // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和 // 输入图像尺寸相同的图像 if (errcode != NO_ERROR) { errcode = ImageBasicOp::makeAtCurrentDevice( outimg, imgroix, imgroiy); // 如果创建图像也操作失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 声明一个临时 ImageCuda 对象,用来存放 _minmaxAvgKer 的结果。 ImageCuda avgimgCud; errcode = ImageBasicOp::newImage(&avgimg); if (errcode != NO_ERROR) { FREE_SUPERSMOOTH; return errcode; } // 在 Device 端创建该图像。 errcode = ImageBasicOp::makeAtCurrentDevice(avgimg, inimg->width, inimg->height); if (errcode != NO_ERROR) { FREE_SUPERSMOOTH; return errcode; } // 提取 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(avgimg, &avgimgCud); if (errcode != NO_ERROR) { FREE_SUPERSMOOTH; return errcode; } // 申请一个 GPU 端的标记数组,具有和图像一样的尺寸,初始值设置为 0。 cudaerr = cudaMalloc((void **)&flagDev, insubimgCud.pitchBytes * sizeof(unsigned char) * insubimgCud.imgMeta.height); if (cudaerr != cudaSuccess) { FREE_SUPERSMOOTH; return cudaerr; } // 在 Device 端为 flagDev 赋初值为 0。 cudaerr = cudaMemset(flagDev, 0, insubimgCud.pitchBytes * insubimgCud.imgMeta.height * sizeof(unsigned char)); if (cudaerr != cudaSuccess) { FREE_SUPERSMOOTH; return cudaerr; } // 为第一个 kernel 分配线程 dim3 blocksize1, gridsize1; blocksize1.x = LOCALCLUSTER_DEF_BLOCK_X; blocksize1.y = LOCALCLUSTER_DEF_BLOCK_Y; blocksize1.z = LOCALCLUSTER_DEF_BLOCK_Z; gridsize1.x = (insubimgCud.imgMeta.width + blocksize1.x - 1) / blocksize1.x; gridsize1.y = (insubimgCud.imgMeta.height + blocksize1.y - 1) / blocksize1.y; gridsize1.z = 1; // 计算第一个核函数的共享内存的大小。 int memsize = LOCALCLUSTER_DEF_BLOCK_X * LOCALCLUSTER_DEF_BLOCK_Y * LOCALCLUSTER_DEF_BLOCK_Z * 2 * sizeof(unsigned char); // 调用第一个核函数。 _localClusterKer<<<gridsize1, blocksize1, memsize, stream[0]>>>( insubimgCud, outsubimgCud, this->diffThred, this->diffCntThred, flagDev, this->cCntThred, this->searchScope); if (cudaGetLastError() != cudaSuccess) { FREE_SUPERSMOOTH; return CUDA_ERROR; } // 为第二个 kernel 分配线程 dim3 blocksize2, gridsize2; blocksize2.x = AVG_DEF_BLOCK_X; blocksize2.y = AVG_DEF_BLOCK_Y; gridsize2.x = (insubimgCud.imgMeta.width + blocksize2.x - 1) / blocksize2.x; gridsize2.y = (insubimgCud.imgMeta.height + blocksize2.y - 1) / blocksize2.y; // 创建第二个 kernel 用到的模板。 dim3 temsize(this->windowSize, this->windowSize, 1); errcode = TemplateFactory::getTemplate(&atemplate, TF_SHAPE_BOX, temsize, NULL); //for (int i = 0; i < atemplate->count;i++) //cout<<atemplate->tplData[2 * i] <<","<<atemplate->tplData[2 * i + 1]<<endl; // 将模板拷贝到 Device 内存中 errcode = TemplateBasicOp::copyToCurrentDevice(atemplate); if (errcode != NO_ERROR) { FREE_SUPERSMOOTH; return errcode; } // 调用第二个核函数。 _minmaxAvgKer<<<gridsize2, blocksize2, 0, stream[1]>>>( insubimgCud, avgimgCud, *atemplate); if (cudaGetLastError() != cudaSuccess) { FREE_SUPERSMOOTH; return CUDA_ERROR; } //数据同步 cudaThreadSynchronize(); // 销毁流。 for (int i = 0; i < 2; i++) cudaStreamDestroy(stream[i]); cudaDeviceSynchronize(); // 为第三个 kernel 分配线程 dim3 blocksize3, gridsize3; blocksize3.x = SYN_DEF_BLOCK_X; blocksize3.y = SYN_DEF_BLOCK_Y; gridsize3.x = (insubimgCud.imgMeta.width + blocksize3.x - 1) / blocksize3.x; gridsize3.y = (insubimgCud.imgMeta.height + blocksize3.y - 1) / blocksize3.y; // 调用第三个核函数。 _synthImageKer<<<gridsize3, blocksize3>>>(avgimgCud, outsubimgCud, flagDev); if (cudaGetLastError() != cudaSuccess) { FREE_SUPERSMOOTH; return CUDA_ERROR; } // 处理完毕,退出。 return NO_ERROR; }
the_stack
* \brief * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data, * such as (compiled) kernel handles. * * \author Aleksei Iupinov <a.yupinov@gmail.com> * \ingroup module_ewald */ #include "gmxpre.h" #include "pme_gpu_program_impl.h" #include "pme_gpu_constants.h" #include "pme_gpu_internal.h" // for GridOrdering enum #include "pme_gpu_types_host.h" // PME interpolation order constexpr int c_pmeOrder = 4; // These hardcoded spread/gather parameters refer to not-implemented PME GPU 2D decomposition in X/Y constexpr bool c_wrapX = true; constexpr bool c_wrapY = true; constexpr int c_stateA = 0; constexpr int c_stateB = 1; //! PME CUDA kernels forward declarations. Kernels are documented in their respective files. template<int order, bool computeSplines, bool spreadCharges, bool wrapX, bool wrapY, int mode, bool writeGlobal, ThreadsPerAtom threadsPerAtom> __global__ void pme_spline_and_spread_kernel(PmeGpuCudaKernelParams kernelParams); // Add extern declarations to inform that there will be a definition // provided in another translation unit. // clang-format off extern template void __global__ pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template void __global__ pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template void __global__ pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template void __global__ pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order>(const PmeGpuCudaKernelParams); extern template __global__ void pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template<GridOrdering gridOrdering, bool computeEnergyAndVirial, const int gridIndex> /* It is significantly slower to pass gridIndex as a kernel parameter */ __global__ void pme_solve_kernel(PmeGpuCudaKernelParams kernelParams); // Add extern declarations to inform that there will be a definition // provided in another translation unit. // clang-format off extern template __global__ void pme_solve_kernel<GridOrdering::XYZ, false, c_stateA>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::XYZ, true, c_stateA>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::YZX, false, c_stateA>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::YZX, true, c_stateA>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::XYZ, false, c_stateB>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::XYZ, true, c_stateB>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::YZX, false, c_stateB>(const PmeGpuCudaKernelParams); extern template __global__ void pme_solve_kernel<GridOrdering::YZX, true, c_stateB>(const PmeGpuCudaKernelParams); // clang-format on template<int order, bool wrapX, bool wrapY, int nGrids, bool readGlobal, ThreadsPerAtom threadsPerAtom> __global__ void pme_gather_kernel(PmeGpuCudaKernelParams kernelParams); // Add extern declarations to inform that there will be a definition // provided in another translation unit. // clang-format off extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); extern template __global__ void pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); // clang-format on PmeGpuProgramImpl::PmeGpuProgramImpl(const DeviceContext& deviceContext) : deviceContext_(deviceContext) { // kernel parameters warpSize_ = warp_size; spreadWorkGroupSize = c_spreadMaxThreadsPerBlock; solveMaxWorkGroupSize = c_solveMaxThreadsPerBlock; gatherWorkGroupSize = c_gatherMaxThreadsPerBlock; /* Not all combinations of the splineAndSpread, spline and Spread kernels are required * If only the spline (without the spread) then it does not make sense not to write the data to global memory * Similarly the spread kernel (without the spline) implies that we should read the spline data from global memory */ // clang-format off splineAndSpreadKernelSingle = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared>; splineAndSpreadKernelThPerAtom4Single = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order>; splineAndSpreadKernelWriteSplinesSingle = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>; splineAndSpreadKernelWriteSplinesThPerAtom4Single = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>; splineKernelSingle = pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>; splineKernelThPerAtom4Single = pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>; spreadKernelSingle = pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>; spreadKernelThPerAtom4Single = pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>; splineAndSpreadKernelDual = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared>; splineAndSpreadKernelThPerAtom4Dual = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order>; splineAndSpreadKernelWriteSplinesDual = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>; splineAndSpreadKernelWriteSplinesThPerAtom4Dual = pme_spline_and_spread_kernel<c_pmeOrder, true, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>; splineKernelDual = pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>; splineKernelThPerAtom4Dual = pme_spline_and_spread_kernel<c_pmeOrder, true, false, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>; spreadKernelDual = pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>; spreadKernelThPerAtom4Dual = pme_spline_and_spread_kernel<c_pmeOrder, false, true, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>; gatherKernelSingle = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::OrderSquared>; gatherKernelThPerAtom4Single = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, false, ThreadsPerAtom::Order>; gatherKernelReadSplinesSingle = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::OrderSquared>; gatherKernelReadSplinesThPerAtom4Single = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 1, true, ThreadsPerAtom::Order>; gatherKernelDual = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::OrderSquared>; gatherKernelThPerAtom4Dual = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, false, ThreadsPerAtom::Order>; gatherKernelReadSplinesDual = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::OrderSquared>; gatherKernelReadSplinesThPerAtom4Dual = pme_gather_kernel<c_pmeOrder, c_wrapX, c_wrapY, 2, true, ThreadsPerAtom::Order>; solveXYZKernelA = pme_solve_kernel<GridOrdering::XYZ, false, c_stateA>; solveXYZEnergyKernelA = pme_solve_kernel<GridOrdering::XYZ, true, c_stateA>; solveYZXKernelA = pme_solve_kernel<GridOrdering::YZX, false, c_stateA>; solveYZXEnergyKernelA = pme_solve_kernel<GridOrdering::YZX, true, c_stateA>; solveXYZKernelB = pme_solve_kernel<GridOrdering::XYZ, false, c_stateB>; solveXYZEnergyKernelB = pme_solve_kernel<GridOrdering::XYZ, true, c_stateB>; solveYZXKernelB = pme_solve_kernel<GridOrdering::YZX, false, c_stateB>; solveYZXEnergyKernelB = pme_solve_kernel<GridOrdering::YZX, true, c_stateB>; // clang-format on } PmeGpuProgramImpl::~PmeGpuProgramImpl() = default;
the_stack
namespace tensorflow { #define FINAL_MASK 0xffffffff template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; } template <typename T> __inline__ __device__ T warpReduceMax(T val) { for(int mask = 16; mask > 0; mask >>= 1) val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32)); return val; } /* Calculate the maximum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceMax(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; // in-warp idx int wid = threadIdx.x >> 5; // warp idx val = warpReduceMax(val); // get maxx in each warp if(lane == 0) // record in-warp maxx by warp Idx shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f; val = warpReduceMax(val); return val; } template<typename T> __global__ void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head) { int batch_id = blockIdx.x / (head_num * seq_len); int seq_id = blockIdx.x % seq_len; int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len; dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head + head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x]; } template <typename T> __global__ void softmax_kernel(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scaler) { int batch_id = blockIdx.x / head_num; int qk_offset = blockIdx.x * seq_len * seq_len; int mask_offset = batch_id * seq_len * seq_len; __shared__ float s_sum, s_max; for(int i = 0; i < seq_len; ++i) { float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val): -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f; float sum_val = blockReduceSum<float>(qk); if(threadIdx.x == 0) { s_sum = sum_val + 1e-30f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk / s_sum); qk_offset += seq_len; mask_offset += seq_len; } } template <typename T> __global__ void softmax_kernel_v2(T* qk_buf, const int* attr_mask, const int batch_size, const int head_num, const int seq_len, const float scaler) { int batch_id = blockIdx.x / head_num / seq_len; int seq_id = blockIdx.x % seq_len; int qk_offset = blockIdx.x * seq_len; int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len; __shared__ float s_sum, s_max; float qk = threadIdx.x < seq_len ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f; float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f; mask_val = (1.0f - mask_val) * -10000.0f; float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val) : -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) s_max = max_val; __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if(threadIdx.x == 0) { s_sum = sum_val + 1e-30f; } __syncthreads(); if(threadIdx.x < seq_len) qk_buf[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum); } template<typename T> __global__ void add_QKV_bias(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf, T* k_buf, T* v_buf, const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block) { T* data_ptr; T* buf_ptr; const T* bias_ptr; int m = batch_size * seq_len; int n = head_num * size_per_head; int qkv_id = blockIdx.x * word_per_block / m; int row_offset = (blockIdx.x * word_per_block % m) * n; if(qkv_id == 0) { data_ptr = Q + row_offset; buf_ptr = q_buf; bias_ptr = bias_Q; } else if(qkv_id == 1) { data_ptr = K + row_offset; buf_ptr = k_buf; bias_ptr = bias_K; } else { data_ptr = V + row_offset; buf_ptr = v_buf; bias_ptr = bias_V; } #if 1 // add bias and transpose int batch_id = (blockIdx.x * word_per_block % m) / seq_len; int head_id = threadIdx.x / size_per_head; int id_in_head = threadIdx.x % size_per_head; int word_start_id = (blockIdx.x * word_per_block) % seq_len; T bias = __ldg(&bias_ptr[threadIdx.x]); #pragma unroll for(int i = word_start_id; i < word_start_id + word_per_block; ++i) { T tmp = data_ptr[threadIdx.x] + bias; int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head + i * size_per_head + id_in_head; buf_ptr[target_id] = tmp; data_ptr += n; } #else // error T bias = __ldg(&bias_ptr[threadIdx.x]); for(int i = 0; i < word_per_block; i++) { T tmp = data_ptr[threadIdx.x] + bias; int target_id = row_offset + i*n + threadIdx.x; buf_ptr[target_id] = tmp; data_ptr += n; } #endif } template<typename T> void MultiHeadAtentionLauncher(DType<T>* query, DType<T>* key, DType<T>* value, DType<T>* q_buf, DType<T>* k_buf, DType<T>* v_buf, DType<T>* qk_buf, DType<T>* transpose_dst, DType<T>* attr_out_buf, const DType<T> scaler, int batch_size, int from_seq_len, int head_num, int size_per_head, const DType<T>* bias_Q, const DType<T>* bias_K, const DType<T>* bias_V, const DType<int>* mask, cublasHandle_t cublas_handle, cudaStream_t stream) { int m = batch_size * from_seq_len; int k = head_num * size_per_head; const int word_per_block = 1; assert(k <= 1024); assert(m / word_per_block * 3 <= 65536); dim3 grid(m / word_per_block * 3); dim3 block(k); DELTA_SCOPE{ auto sum = CheckSum<GPUDevice, float>(query); printf("didi query ck sum: %lf \n", sum); sum = CheckSum<GPUDevice, float>(key); printf("didi key ck sum: %lf \n", sum); sum = CheckSum<GPUDevice, float>(value); printf("didi value ck sum: %lf \n", sum); }; add_QKV_bias<DType<T> ><<<grid, block, 0, stream>>>(query, bias_Q, key, bias_K, value, bias_V, q_buf, k_buf, v_buf, batch_size, from_seq_len, head_num, size_per_head, word_per_block); //cuda(PeekAtLastError()); //cuda(DeviceSynchronize()); DELTA_SCOPE{ auto sum = CheckSum<GPUDevice, float>(q_buf); printf("didi q_buf ck sum: %lf \n", sum); sum = CheckSum<GPUDevice, float>(k_buf); printf("didi k_buf ck sum: %lf \n", sum); sum = CheckSum<GPUDevice, float>(v_buf); printf("didi v_buf ck sum: %lf \n", sum); }; #if 1 T alpha = 1.0f, beta = 0.0f; typedef DeltaTraits<GPUDevice, T> traits; /// get _qk_buf[batch_size, head_num, from_seq_len, from_seq_len] #if (CUDART_VERSION >= 10000) cublas(GemmStridedBatchedEx(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, from_seq_len, from_seq_len, size_per_head, &alpha, k_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head, q_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head, &beta, qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len, batch_size * head_num, traits::ComputeType, *static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["3"].get()))); #else cublas(SgemmStridedBatched(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, from_seq_len, from_seq_len, size_per_head, &alpha, k_buf, size_per_head, from_seq_len * size_per_head, q_buf, size_per_head, from_seq_len * size_per_head, &beta, qk_buf, from_seq_len, from_seq_len * from_seq_len, batch_size * head_num)); #endif if(from_seq_len <= 32) block.x = 32; else if(from_seq_len > 32 && from_seq_len <= 64) block.x = 64; else if(from_seq_len > 64 && from_seq_len <= 128) block.x = 128; else if(from_seq_len > 128 && from_seq_len <= 256) block.x = 256; else if(from_seq_len > 256 && from_seq_len <= 512) block.x = 512; else block.x = 1024; DELTA_SCOPE{ auto sum = CheckSum<GPUDevice, float>(qk_buf); printf("didi qk_buf before softmax ck sum: %lf\n", sum); }; if(batch_size * head_num <= 120) { grid.x = batch_size * head_num * from_seq_len; softmax_kernel_v2<DType<T> ><<<grid, block, 0, stream>>>(qk_buf, mask, batch_size, head_num, from_seq_len, scaler); } else { grid.x = batch_size * head_num; softmax_kernel<DType<T> ><<<grid, block, 0, stream>>>(qk_buf, mask, batch_size, head_num, from_seq_len, scaler); } DELTA_SCOPE { auto sum = CheckSum<GPUDevice, float>(qk_buf); printf("didi qk_buf after softmax ck sum: %lf\n", sum); }; #if (CUDART_VERSION >= 10000) /// cuda > 10.0 cublas(GemmStridedBatchedEx(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, size_per_head, from_seq_len, from_seq_len, &alpha, v_buf, traits::ComputeType, size_per_head, from_seq_len * size_per_head, qk_buf, traits::ComputeType, from_seq_len, from_seq_len * from_seq_len, &beta, transpose_dst, traits::ComputeType, size_per_head, from_seq_len * size_per_head, batch_size * head_num, traits::ComputeType, *static_cast<delta::CublasGemmAlgo*>(delta::Config::Instance()["4"].get()))); #else cublas(SgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, size_per_head, from_seq_len, from_seq_len, &alpha, v_buf, size_per_head, from_seq_len * size_per_head, qk_buf, from_seq_len, from_seq_len * from_seq_len, &beta, transpose_dst, size_per_head, from_seq_len * size_per_head, batch_size * head_num)); #endif DELTA_SCOPE{ auto sum = CheckSum<GPUDevice, float>(transpose_dst); printf("didi transpose_dst before softmax ck sum: %lf\n", sum); }; const int seq_per_block = 1; grid.x = batch_size * head_num * from_seq_len / seq_per_block; block.x = seq_per_block * size_per_head; transpose<DType<T> ><<<grid, block, 0, stream>>>(transpose_dst, attr_out_buf, batch_size, from_seq_len, head_num, size_per_head); DELTA_SCOPE{ auto sum = CheckSum<GPUDevice, float>(transpose_dst); printf("didi transpose_dst after softmax ck sum: %lf\n", sum); }; #endif } template void MultiHeadAtentionLauncher<float>(DType<float>* query, DType<float>* key, DType<float>* value, DType<float>* q_buf, DType<float>* k_buf, DType<float>* v_buf, DType<float>* qk_buf, DType<float>* transpose_dst, DType<float>* attr_out_buf, const DType<float> scaler, int batch_size, int from_seq_len, int head_num, int size_per_head, const DType<float>* bias_Q, const DType<float>* bias_K, const DType<float>* bias_V, const DType<int>* mask, cublasHandle_t cublas_handle, cudaStream_t stream); template<typename T> __global__ void transpose_head_and_seq_kernel(T* data, int sqlen, int n_head, int d_head, T* out) { extern __shared__ T smem[]; int seq_id = blockIdx.x / n_head; int head_id = blockIdx.x % n_head; smem[threadIdx.x] = data[blockIdx.x*d_head + threadIdx.x]; __syncthreads(); out[head_id * sqlen * d_head + seq_id * d_head + threadIdx.x] = smem[threadIdx.x]; } template<typename T> void transpose_head_and_seq(DType<T>* data, int sqlen, int n_head, int d_head, DType<T>* out, cudaStream_t stream) { dim3 grid(sqlen * n_head); dim3 block(d_head); transpose_head_and_seq_kernel<<<grid, block, d_head*sizeof(T), stream>>>(data, sqlen, n_head, d_head, out); } template void transpose_head_and_seq<float>(DType<float>* data, int sqlen, int n_head, int d_head, DType<float>* out, cudaStream_t stream); template<typename T> __global__ void transpose_head_num_and_seq_kernel(T* attn_vec, int sqlen, int bsz, int n_head, int d_head, T* attn_vec_out) { extern __shared__ T smem[]; int head_offset = blockIdx.x * d_head; int bsz_id = blockIdx.x / (n_head * sqlen); int head_id = blockIdx.x % (n_head * sqlen) / sqlen; int seq_id = blockIdx.x % sqlen; smem[threadIdx.x] = attn_vec[head_offset + threadIdx.x]; __syncthreads(); int head_len = n_head * d_head; int batch_len = head_len * bsz; attn_vec_out[seq_id * batch_len + bsz_id * head_len + head_id * d_head + threadIdx.x] = smem[threadIdx.x]; } template<typename T> void transpose_head_num_and_seq(DType<T>* attn_vec, int sqlen, int bsz, int n_head, int d_head, DType<T>* attn_vec_out, cudaStream_t stream) { dim3 grid(sqlen * bsz * n_head); dim3 block(d_head); transpose_head_num_and_seq_kernel<<<grid, block, d_head*sizeof(T), stream>>>(attn_vec, sqlen, bsz, n_head, d_head, attn_vec_out); } template void transpose_head_num_and_seq<float>(DType<float>* attn_vec, int sqlen, int bsz, int n_head, int d_head, DType<float>* attn_vec_out, cudaStream_t stream); template<typename T> __global__ void add_bias_and_split_kernel(T* w_heads, int mlen, int sqlen, int bsz, int n, int n_head, int d_head, const T* r_w_bias, const T* r_r_bias, T* rw_head_q, T* rr_head_q, T* w_head_k, T* w_head_v) { extern __shared__ T swap_space[]; int start_idx = 0; if(blockIdx.y == 0) { // split and add bias int block_id = blockIdx.x - mlen * bsz; if(block_id >= 0) { start_idx = blockIdx.x * n * 3; for(int i=threadIdx.x; i<n; i+=blockDim.x) { swap_space[i] = w_heads[start_idx + i]; } __syncthreads(); int seq_id = block_id / bsz; int bsz_id = block_id % bsz; for(int i = 0; i < n_head; i++) { for(int j=threadIdx.x; j<d_head; j+=blockDim.x) { T bias = __ldg(&r_w_bias[i*d_head + j]); // from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head] int out_id = bsz_id * n_head * sqlen * d_head + i * sqlen * d_head + seq_id * d_head + j; rw_head_q[out_id] = swap_space[i*d_head + j] + bias; bias = __ldg(&r_r_bias[i*d_head + j]); rr_head_q[out_id] = swap_space[i*d_head + j] + bias; } } } } else if(blockIdx.y == 1) { // only split start_idx = blockIdx.x * n * 3 + n; for(int i=threadIdx.x; i<n; i+=blockDim.x) { swap_space[i] = w_heads[start_idx + i]; } __syncthreads(); int seq_id = blockIdx.x / bsz; int bsz_id = blockIdx.x % bsz; for(int i = 0; i < n_head; i++) { for(int j=threadIdx.x; j < d_head; j+=blockDim.x) { // from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head] int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j; w_head_k[out_id] = swap_space[i*d_head + j];; } } } else { // only split start_idx = blockIdx.x * n * 3 + 2 * n; for(int i=threadIdx.x; i<n; i+=blockDim.x) { swap_space[i] = w_heads[start_idx + i]; } __syncthreads(); int seq_id = blockIdx.x / bsz; int bsz_id = blockIdx.x % bsz; for(int i = 0; i < n_head; i++) { for(int j=threadIdx.x; j < d_head; j+=blockDim.x) { // from [seq, bsz, n_head, d_head] to [bsz , n_head, seq, d_head] int out_id = bsz_id * n_head * (mlen + sqlen) * d_head + i * (mlen + sqlen) * d_head + seq_id * d_head + j; w_head_v[out_id] = swap_space[i*d_head + j];; } } } } template <typename T> void add_bias_and_split(DType<T>* w_heads, int mlen, int sqlen, int bsz, int n_head, int d_head, const DType<T>* r_w_bias, const DType<T>* r_r_bias, DType<T>* rw_head_q, DType<T>* rr_head_q, DType<T>* w_head_k, DType<T>* w_head_v, cudaStream_t stream) { int m = (mlen + sqlen) * bsz; int n = n_head * d_head; // x 3 assert(n <= 1024); assert(m * 3 <= 65536); dim3 grid(m, 3); dim3 block(128); add_bias_and_split_kernel<<<grid, block, n*sizeof(T), stream>>>(w_heads, mlen, sqlen, bsz, n, n_head, d_head, r_w_bias, r_r_bias, rw_head_q, rr_head_q, w_head_k, w_head_v); }; template void add_bias_and_split<float>(DType<float>* w_heads, int mlen, int sqlen, int bsz, int n_head, int d_head, const DType<float>* r_w_bias, const DType<float>* r_r_bias, DType<float>* rw_head_q, DType<float>* rr_head_q, DType<float>* w_head_k, DType<float>* w_head_v, cudaStream_t stream); #if 0 template<typename T> __global__ void attn_prob_softmax_kernel(T* ac, T* bd, const T* attn_mask, const int sqlen, const int klen, const float scaler) { int input_offset = blockIdx.x * klen; int mask_offset = blockIdx.x % sqlen * klen; __shared__ float s_sum; float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f; float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f; float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f; float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e30f * mask_val : 1e-30f; tmp = threadIdx.x < klen ? __expf((float)(tmp)) : 0.0f; float sum_val = blockReduceSum<float>(tmp); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < klen) { ac[threadIdx.x + input_offset] = (T)(tmp / s_sum); } } #else template<typename T> __global__ void attn_prob_softmax_kernel(T* ac, T* bd, const T* attn_mask, const int sqlen, const int klen, const float scaler) { int input_offset = blockIdx.x * klen; int mask_offset = blockIdx.x % sqlen * klen; __shared__ float s_sum, s_max; float ac_val = threadIdx.x < klen ? (float)ac[threadIdx.x + input_offset] : 0.0f; float bd_val = threadIdx.x < klen ? (float)bd[threadIdx.x + input_offset] : 0.0f; float mask_val = threadIdx.x < klen ? (float)attn_mask[threadIdx.x + mask_offset] : 0.0f; float tmp = threadIdx.x < klen ? (ac_val + bd_val) * scaler * (1 - mask_val) - 1e20f * mask_val : -1e20f; float max_val = blockReduceMax<float>(tmp); if(threadIdx.x == 0) { s_max = max_val; } __syncthreads(); float qk_tmp = threadIdx.x < klen ? __expf((float)(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp); if(threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if(threadIdx.x < klen) { ac[threadIdx.x + input_offset] = (T)(qk_tmp / s_sum); } } #endif template <typename T> void attn_prob_softmax(DType<T>* ac, DType<T>* bd, const DType<T>* attn_mask, int mlen, int sqlen, int bsz, int n_head, int d_head, cudaStream_t stream) { float scaler = 1 / sqrt(d_head); dim3 grid; grid.x = bsz * n_head * sqlen; dim3 block; int klen = (sqlen + mlen); if(klen <= 32) { block.x = 32; } else if(klen > 32 && klen <= 64) { block.x = 64; } else if(klen > 64 && klen <= 128) { block.x = 128; } else if(klen > 128 && klen <= 256) { block.x = 256; } else if(klen > 256 && klen <= 512) { block.x = 512; } else { block.x = 1024; } attn_prob_softmax_kernel<<<grid, block, 0, stream>>>(ac, bd, attn_mask, sqlen, klen, scaler); } template void attn_prob_softmax<float>(DType<float>* ac, DType<float>* bd, const DType<float>* attn_mask, int mlen, int sqlen, int bsz, int n_head, int d_head, cudaStream_t stream); } /* namespace tensorflow */
the_stack
#include "../fixed_point.hpp" #include "k_fixed_point.cuh" #include "kernel_utils.cuh" #include "surreal.cuh" #define WARPSIZE 32 #define PI 3.141592653589793115997963468544185161 #define TWO_OVER_SQRT_PI 1.128379167095512595889238330988549829708 // generate kv values from coordinates to be radix sorted void __global__ k_coords_to_kv( const int N, const double *coords, const double *box, const unsigned int *bin_to_idx, unsigned int *keys, unsigned int *vals) { const int atom_idx = blockIdx.x * blockDim.x + threadIdx.x; if (atom_idx >= N) { return; } // these coords have to be centered double bx = box[0 * 3 + 0]; double by = box[1 * 3 + 1]; double bz = box[2 * 3 + 2]; double binWidth = max(max(bx, by), bz) / 255.0; double x = coords[atom_idx * 3 + 0]; double y = coords[atom_idx * 3 + 1]; double z = coords[atom_idx * 3 + 2]; x -= bx * floor(x / bx); y -= by * floor(y / by); z -= bz * floor(z / bz); unsigned int bin_x = x / binWidth; unsigned int bin_y = y / binWidth; unsigned int bin_z = z / binWidth; keys[atom_idx] = bin_to_idx[bin_x * 256 * 256 + bin_y * 256 + bin_z]; // uncomment below if you want to preserve the atom ordering // keys[atom_idx] = atom_idx; vals[atom_idx] = atom_idx; } template <typename RealType> void __global__ k_check_rebuild_box(const int N, const double *new_box, const double *old_box, int *rebuild) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= 9) { return; } // (ytz): box vectors have exactly 9 components // we can probably derive a looser bound later on. if (old_box[idx] != new_box[idx]) { rebuild[0] = 1; } } template <typename RealType> void __global__ k_check_rebuild_coords_and_box( const int N, const double *__restrict__ new_coords, const double *__restrict__ old_coords, const double *__restrict__ new_box, const double *__restrict__ old_box, const double padding, int *rebuild) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < 9) { // (ytz): box vectors have exactly 9 components // we can probably derive a looser bound later on. if (old_box[idx] != new_box[idx]) { rebuild[0] = 1; } } if (idx >= N) { return; } RealType xi = old_coords[idx * 3 + 0]; RealType yi = old_coords[idx * 3 + 1]; RealType zi = old_coords[idx * 3 + 2]; RealType xj = new_coords[idx * 3 + 0]; RealType yj = new_coords[idx * 3 + 1]; RealType zj = new_coords[idx * 3 + 2]; RealType dx = xi - xj; RealType dy = yi - yj; RealType dz = zi - zj; RealType d2ij = dx * dx + dy * dy + dz * dz; if (d2ij > static_cast<RealType>(0.25) * padding * padding) { // (ytz): this is *safe* but technically is a race condition rebuild[0] = 1; } } template <typename RealType> void __global__ k_copy_nblist_coords_and_box( const int N, const int *__restrict__ rebuild, const double *__restrict__ new_coords, const double *__restrict__ new_box, double *__restrict__ nblist_coords, double *__restrict__ nblist_box) { if (rebuild[0] <= 0) { return; } const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) { return; } if (idx < 9) { nblist_box[idx] = new_box[idx]; } #pragma unroll 3 for (int i = 0; i < 3; i++) { nblist_coords[idx * 3 + i] = new_coords[idx * 3 + i]; } } template <typename RealType> void __global__ k_permute( const int N, const unsigned int *__restrict__ perm, const RealType *__restrict__ array, RealType *__restrict__ sorted_array) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } sorted_array[idx * stride + stride_idx] = array[perm[idx] * stride + stride_idx]; } template <typename RealType> void __global__ k_permute_2x( const int N, const unsigned int *__restrict__ perm, const RealType *__restrict__ array_1, const RealType *__restrict__ array_2, RealType *__restrict__ sorted_array_1, RealType *__restrict__ sorted_array_2) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } sorted_array_1[idx * stride + stride_idx] = array_1[perm[idx] * stride + stride_idx]; sorted_array_2[idx * stride + stride_idx] = array_2[perm[idx] * stride + stride_idx]; } template <typename RealType> void __global__ k_inv_permute_accum( const int N, const unsigned int *__restrict__ perm, const RealType *__restrict__ sorted_array, RealType *__restrict__ array) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } array[perm[idx] * stride + stride_idx] += sorted_array[idx * stride + stride_idx]; } template <typename RealType> void __global__ k_inv_permute_assign( const int N, const unsigned int *__restrict__ perm, const RealType *__restrict__ sorted_array, RealType *__restrict__ array) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } array[perm[idx] * stride + stride_idx] = sorted_array[idx * stride + stride_idx]; } template <typename RealType> void __global__ k_inv_permute_assign_2x( const int N, const unsigned int *__restrict__ perm, const RealType *__restrict__ sorted_array_1, const RealType *__restrict__ sorted_array_2, RealType *__restrict__ array_1, RealType *__restrict__ array_2) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } array_1[perm[idx] * stride + stride_idx] = sorted_array_1[idx * stride + stride_idx]; array_2[perm[idx] * stride + stride_idx] = sorted_array_2[idx * stride + stride_idx]; } template <typename RealType> void __global__ k_add_ull_to_real(const int N, const unsigned long long *__restrict__ ull_array, RealType *__restrict__ real_array) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.y; int stride_idx = blockIdx.y; if (idx >= N) { return; } // handle charges, sigmas, epsilons with different exponents if (stride_idx == 0) { real_array[idx * stride + stride_idx] += FIXED_TO_FLOAT_DU_DP<RealType, FIXED_EXPONENT_DU_DCHARGE>(ull_array[idx * stride + stride_idx]); } else if (stride_idx == 1) { real_array[idx * stride + stride_idx] += FIXED_TO_FLOAT_DU_DP<RealType, FIXED_EXPONENT_DU_DSIG>(ull_array[idx * stride + stride_idx]); } else if (stride_idx == 2) { real_array[idx * stride + stride_idx] += FIXED_TO_FLOAT_DU_DP<RealType, FIXED_EXPONENT_DU_DEPS>(ull_array[idx * stride + stride_idx]); } } template <typename RealType> void __global__ k_reduce_buffer(int N, RealType *d_buffer, RealType *d_sum) { int idx = blockIdx.x * blockDim.x + threadIdx.x; RealType elem = idx < N ? d_buffer[idx] : 0; atomicAdd(d_sum, elem); }; template <typename RealType> void __global__ k_reduce_ull_buffer(int N, unsigned long long *d_buffer, RealType *d_sum) { int idx = blockIdx.x * blockDim.x + threadIdx.x; RealType elem = idx < N ? FIXED_TO_FLOAT<RealType>(d_buffer[idx]) : 0; atomicAdd(d_sum, elem); }; double __device__ __forceinline__ real_es_factor(double real_beta, double dij, double inv_d2ij, double &erfc_beta_dij) { double beta_dij = real_beta * dij; double exp_beta_dij_2 = exp(-beta_dij * beta_dij); erfc_beta_dij = erfc(beta_dij); return -inv_d2ij * (static_cast<double>(TWO_OVER_SQRT_PI) * beta_dij * exp_beta_dij_2 + erfc_beta_dij); } float __device__ __forceinline__ real_es_factor(float real_beta, float dij, float inv_d2ij, float &erfc_beta_dij) { float beta_dij = real_beta * dij; // max ulp error is: 2 + floor(abs(1.16 * x)) float exp_beta_dij_2 = __expf(-beta_dij * beta_dij); // 5th order gaussian polynomial approximation, we need the exp(-x^2) anyways for the chain rule // so we use last variant in https://en.wikipedia.org/wiki/Error_function#Approximation_with_elementary_functions float t = 1.0f / (1.0f + 0.3275911f * beta_dij); erfc_beta_dij = (0.254829592f + (-0.284496736f + (1.421413741f + (-1.453152027f + 1.061405429f * t) * t) * t) * t) * t * exp_beta_dij_2; return -inv_d2ij * (static_cast<float>(TWO_OVER_SQRT_PI) * beta_dij * exp_beta_dij_2 + erfc_beta_dij); } // These are two lines of code are to deal with the formation of a non-commutative fma. // For more information, see: https://github.com/proteneer/timemachine/issues/386 float __device__ __forceinline__ fix_nvidia_fmad(float a, float b, float c, float d) { return __fmul_rn(a, b) + __fmul_rn(c, d); } double __device__ __forceinline__ fix_nvidia_fmad(double a, double b, double c, double d) { return __dmul_rn(a, b) + __dmul_rn(c, d); } // void __global__ k_compute_w_coords( // const int N, // const double lambda, // const double cutoff, // const int * __restrict__ lambda_plane_idxs, // 0 or 1, shift // const int * __restrict__ lambda_offset_idxs, // double * __restrict__ coords_w, // double * __restrict__ dw_dl) { // int atom_i_idx = blockIdx.x*blockDim.x + threadIdx.x; // if(atom_i_idx >= N) { // return; // } // int lambda_offset_i = atom_i_idx < N ? lambda_offset_idxs[atom_i_idx] : 0; // int lambda_plane_i = atom_i_idx < N ? lambda_plane_idxs[atom_i_idx] : 0; // double coords_w_i = (lambda_plane_i + lambda_offset_i*lambda)*cutoff; // double dw_dl_i = lambda_offset_i*cutoff; // coords_w[atom_i_idx] = coords_w_i; // dw_dl[atom_i_idx] = dw_dl_i; // } // 0 or 1, how much we offset from the plane by ) // Compute the terms associated with electrostatics. // This is pulled out into a function to ensure that the same bit values // are computed to ensure that that the fixed point values are exactly the same regardless // of where the values are computed. template <typename RealType, bool COMPUTE_U> void __device__ __forceinline__ compute_electrostatics( const RealType charge_scale, const RealType qi, const RealType qj, const RealType d2ij, const RealType beta, RealType &dij, RealType &inv_dij, RealType &inv_d2ij, RealType &ebd, RealType &es_prefactor, RealType &u) { inv_dij = rsqrt(d2ij); dij = d2ij * inv_dij; inv_d2ij = inv_dij * inv_dij; RealType qij = qi * qj; es_prefactor = charge_scale * qij * inv_dij * real_es_factor(beta, dij, inv_d2ij, ebd); if (COMPUTE_U) { u = charge_scale * qij * inv_dij * ebd; } } // Handles the computation related to the LJ terms. // This is pulled out into a function to ensure that the same bit values // are computed to ensure that that the fixed point values are exactly the same regardless // of where the values are computed. template <typename RealType, bool COMPUTE_U> void __device__ __forceinline__ compute_lj( RealType lj_scale, RealType eps_i, RealType eps_j, RealType sig_i, RealType sig_j, RealType inv_dij, RealType inv_d2ij, RealType &u, RealType &delta_prefactor, RealType &sig_grad, RealType &eps_grad) { RealType eps_ij = eps_i * eps_j; RealType sig_ij = sig_i + sig_j; RealType sig_inv_dij = sig_ij * inv_dij; RealType sig2_inv_d2ij = sig_inv_dij * sig_inv_dij; RealType sig4_inv_d4ij = sig2_inv_d2ij * sig2_inv_d2ij; RealType sig6_inv_d6ij = sig4_inv_d4ij * sig2_inv_d2ij; RealType sig6_inv_d8ij = sig6_inv_d6ij * inv_d2ij; RealType sig5_inv_d6ij = sig_ij * sig4_inv_d4ij * inv_d2ij; RealType lj_prefactor = lj_scale * eps_ij * sig6_inv_d8ij * (sig6_inv_d6ij * 48 - 24); if (COMPUTE_U) { u += lj_scale * 4 * eps_ij * (sig6_inv_d6ij - 1) * sig6_inv_d6ij; } delta_prefactor -= lj_prefactor; sig_grad = lj_scale * 24 * eps_ij * sig5_inv_d6ij * (2 * sig6_inv_d6ij - 1); eps_grad = lj_scale * 4 * (sig6_inv_d6ij - 1) * sig6_inv_d6ij; } // ALCHEMICAL == false guarantees that the tile's atoms are such that // 1. src_param and dst_params are equal for every i in R and j in C // 2. w_i and w_j are identical for every (i,j) in (RxC) // DU_DL_DEPENDS_ON_DU_DP indicates whether or not to compute DU_DP when // COMPUTE_DU_DL is requested (needed for interpolated potentials) template < typename RealType, bool ALCHEMICAL, bool COMPUTE_U, bool COMPUTE_DU_DX, bool COMPUTE_DU_DL, bool COMPUTE_DU_DP> // void __device__ __forceinline__ v_nonbonded_unified( void __device__ v_nonbonded_unified( const int N, const double *__restrict__ coords, const double *__restrict__ params, // [N] const double *__restrict__ box, const double *__restrict__ dp_dl, const double *__restrict__ coords_w, // 4D coords const double *__restrict__ dw_dl, // 4D derivatives const double lambda, // const int * __restrict__ lambda_plane_idxs, // 0 or 1, shift // const int * __restrict__ lambda_offset_idxs, // 0 or 1, how much we offset from the plane by cutoff const double beta, const double cutoff, const int *__restrict__ ixn_tiles, const unsigned int *__restrict__ ixn_atoms, unsigned long long *__restrict__ du_dx, unsigned long long *__restrict__ du_dp, unsigned long long *__restrict__ du_dl_buffer, unsigned long long *__restrict__ u_buffer) { int tile_idx = blockIdx.x; RealType box_x = box[0 * 3 + 0]; RealType box_y = box[1 * 3 + 1]; RealType box_z = box[2 * 3 + 2]; RealType inv_box_x = 1 / box_x; RealType inv_box_y = 1 / box_y; RealType inv_box_z = 1 / box_z; int row_block_idx = ixn_tiles[tile_idx]; int atom_i_idx = row_block_idx * 32 + threadIdx.x; // int lambda_offset_i = atom_i_idx < N ? lambda_offset_idxs[atom_i_idx] : 0; // int lambda_plane_i = atom_i_idx < N ? lambda_plane_idxs[atom_i_idx] : 0; RealType ci_x = atom_i_idx < N ? coords[atom_i_idx * 3 + 0] : 0; RealType ci_y = atom_i_idx < N ? coords[atom_i_idx * 3 + 1] : 0; RealType ci_z = atom_i_idx < N ? coords[atom_i_idx * 3 + 2] : 0; RealType ci_w = atom_i_idx < N ? coords_w[atom_i_idx] : 0; RealType dq_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 0] : 0; RealType dsig_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 1] : 0; RealType deps_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 2] : 0; RealType dw_dl_i = atom_i_idx < N ? dw_dl[atom_i_idx] : 0; unsigned long long gi_x = 0; unsigned long long gi_y = 0; unsigned long long gi_z = 0; unsigned long long du_dl = 0; int charge_param_idx_i = atom_i_idx * 3 + 0; int lj_param_idx_sig_i = atom_i_idx * 3 + 1; int lj_param_idx_eps_i = atom_i_idx * 3 + 2; RealType qi = atom_i_idx < N ? params[charge_param_idx_i] : 0; RealType sig_i = atom_i_idx < N ? params[lj_param_idx_sig_i] : 0; RealType eps_i = atom_i_idx < N ? params[lj_param_idx_eps_i] : 0; unsigned long long g_qi = 0; unsigned long long g_sigi = 0; unsigned long long g_epsi = 0; // i idx is contiguous but j is not, so we should swap them to avoid having to shuffle atom_j_idx int atom_j_idx = ixn_atoms[tile_idx * 32 + threadIdx.x]; // int lambda_offset_j = atom_j_idx < N ? lambda_offset_idxs[atom_j_idx] : 0; // int lambda_plane_j = atom_j_idx < N ? lambda_plane_idxs[atom_j_idx] : 0; RealType cj_x = atom_j_idx < N ? coords[atom_j_idx * 3 + 0] : 0; RealType cj_y = atom_j_idx < N ? coords[atom_j_idx * 3 + 1] : 0; RealType cj_z = atom_j_idx < N ? coords[atom_j_idx * 3 + 2] : 0; RealType cj_w = atom_j_idx < N ? coords_w[atom_j_idx] : 0; RealType dq_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 0] : 0; RealType dsig_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 1] : 0; RealType deps_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 2] : 0; RealType dw_dl_j = atom_j_idx < N ? dw_dl[atom_j_idx] : 0; unsigned long long gj_x = 0; unsigned long long gj_y = 0; unsigned long long gj_z = 0; int charge_param_idx_j = atom_j_idx * 3 + 0; int lj_param_idx_sig_j = atom_j_idx * 3 + 1; int lj_param_idx_eps_j = atom_j_idx * 3 + 2; RealType qj = atom_j_idx < N ? params[charge_param_idx_j] : 0; RealType sig_j = atom_j_idx < N ? params[lj_param_idx_sig_j] : 0; RealType eps_j = atom_j_idx < N ? params[lj_param_idx_eps_j] : 0; unsigned long long g_qj = 0; unsigned long long g_sigj = 0; unsigned long long g_epsj = 0; RealType real_cutoff = static_cast<RealType>(cutoff); RealType cutoff_squared = real_cutoff * real_cutoff; unsigned long long energy = 0; RealType real_lambda = static_cast<RealType>(lambda); RealType real_beta = static_cast<RealType>(beta); const int srcLane = (threadIdx.x + 1) % WARPSIZE; // fixed // #pragma unroll for (int round = 0; round < 32; round++) { RealType delta_x = ci_x - cj_x; RealType delta_y = ci_y - cj_y; RealType delta_z = ci_z - cj_z; delta_x -= box_x * nearbyint(delta_x * inv_box_x); delta_y -= box_y * nearbyint(delta_y * inv_box_y); delta_z -= box_z * nearbyint(delta_z * inv_box_z); RealType d2ij = delta_x * delta_x + delta_y * delta_y + delta_z * delta_z; RealType delta_w; if (ALCHEMICAL) { // (ytz): we are guaranteed that delta_w is zero if ALCHEMICAL == false // delta_w = (lambda_plane_i - lambda_plane_j)*real_cutoff + (lambda_offset_i - lambda_offset_j)*real_lambda*real_cutoff; delta_w = ci_w - cj_w; d2ij += delta_w * delta_w; } // (ytz): note that d2ij must be *strictly* less than cutoff_squared. This is because we set the // non-interacting atoms to exactly real_cutoff*real_cutoff. This ensures that atoms who's 4th dimension // is set to cutoff are non-interacting. if (d2ij < cutoff_squared && atom_j_idx > atom_i_idx && atom_j_idx < N && atom_i_idx < N) { // electrostatics RealType u; RealType es_prefactor; RealType ebd; RealType dij; RealType inv_dij; RealType inv_d2ij; compute_electrostatics<RealType, COMPUTE_U>( 1.0, qi, qj, d2ij, beta, dij, inv_dij, inv_d2ij, ebd, es_prefactor, u); RealType delta_prefactor = es_prefactor; RealType real_du_dl = 0; // lennard jones force if (eps_i != 0 && eps_j != 0) { RealType sig_grad; RealType eps_grad; compute_lj<RealType, COMPUTE_U>( 1.0, eps_i, eps_j, sig_i, sig_j, inv_dij, inv_d2ij, u, delta_prefactor, sig_grad, eps_grad); // do chain rule inside loop if (COMPUTE_DU_DP) { g_sigi += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DSIG>(sig_grad); g_sigj += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DSIG>(sig_grad); g_epsi += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DEPS>(eps_grad * eps_j); g_epsj += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DEPS>(eps_grad * eps_i); } if (COMPUTE_DU_DL && ALCHEMICAL) { real_du_dl += sig_grad * (dsig_dl_i + dsig_dl_j); RealType term = eps_grad * fix_nvidia_fmad(eps_j, deps_dl_i, eps_i, deps_dl_j); real_du_dl += term; } } if (COMPUTE_DU_DX) { gi_x += FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_x); gi_y += FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_y); gi_z += FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_z); gj_x += FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_x); gj_y += FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_y); gj_z += FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_z); } if (COMPUTE_DU_DP) { g_qi += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DCHARGE>(qj * inv_dij * ebd); g_qj += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DCHARGE>(qi * inv_dij * ebd); } if (COMPUTE_DU_DL && ALCHEMICAL) { // needed for cancellation of nans (if one term blows up) real_du_dl += delta_w * delta_prefactor * (dw_dl_i - dw_dl_j); real_du_dl += inv_dij * ebd * fix_nvidia_fmad(qj, dq_dl_i, qi, dq_dl_j); du_dl += FLOAT_TO_FIXED_NONBONDED(real_du_dl); } if (COMPUTE_U) { energy += FLOAT_TO_FIXED_NONBONDED(u); } } atom_j_idx = __shfl_sync(0xffffffff, atom_j_idx, srcLane); // we can pre-compute this probably qj = __shfl_sync(0xffffffff, qj, srcLane); eps_j = __shfl_sync(0xffffffff, eps_j, srcLane); sig_j = __shfl_sync(0xffffffff, sig_j, srcLane); cj_x = __shfl_sync(0xffffffff, cj_x, srcLane); cj_y = __shfl_sync(0xffffffff, cj_y, srcLane); cj_z = __shfl_sync(0xffffffff, cj_z, srcLane); if (ALCHEMICAL) { cj_w = __shfl_sync(0xffffffff, cj_w, srcLane); // this also can be optimized away dw_dl_j = __shfl_sync(0xffffffff, dw_dl_j, srcLane); } if (COMPUTE_DU_DX) { gj_x = __shfl_sync(0xffffffff, gj_x, srcLane); gj_y = __shfl_sync(0xffffffff, gj_y, srcLane); gj_z = __shfl_sync(0xffffffff, gj_z, srcLane); } if (COMPUTE_DU_DP) { g_qj = __shfl_sync(0xffffffff, g_qj, srcLane); g_sigj = __shfl_sync(0xffffffff, g_sigj, srcLane); g_epsj = __shfl_sync(0xffffffff, g_epsj, srcLane); } if (COMPUTE_DU_DL && ALCHEMICAL) { dsig_dl_j = __shfl_sync(0xffffffff, dsig_dl_j, srcLane); deps_dl_j = __shfl_sync(0xffffffff, deps_dl_j, srcLane); dq_dl_j = __shfl_sync(0xffffffff, dq_dl_j, srcLane); } } if (COMPUTE_DU_DX) { if (atom_i_idx < N) { atomicAdd(du_dx + atom_i_idx * 3 + 0, gi_x); atomicAdd(du_dx + atom_i_idx * 3 + 1, gi_y); atomicAdd(du_dx + atom_i_idx * 3 + 2, gi_z); } if (atom_j_idx < N) { atomicAdd(du_dx + atom_j_idx * 3 + 0, gj_x); atomicAdd(du_dx + atom_j_idx * 3 + 1, gj_y); atomicAdd(du_dx + atom_j_idx * 3 + 2, gj_z); } } if (COMPUTE_DU_DP) { if (atom_i_idx < N) { atomicAdd(du_dp + charge_param_idx_i, g_qi); atomicAdd(du_dp + lj_param_idx_sig_i, g_sigi); atomicAdd(du_dp + lj_param_idx_eps_i, g_epsi); } if (atom_j_idx < N) { atomicAdd(du_dp + charge_param_idx_j, g_qj); atomicAdd(du_dp + lj_param_idx_sig_j, g_sigj); atomicAdd(du_dp + lj_param_idx_eps_j, g_epsj); } } // these are buffered and then reduced to avoid massive conflicts if (COMPUTE_DU_DL && ALCHEMICAL) { if (atom_i_idx < N) { atomicAdd(du_dl_buffer + atom_i_idx, du_dl); } } if (COMPUTE_U) { if (atom_i_idx < N) { atomicAdd(u_buffer + atom_i_idx, energy); } } } template <typename RealType, bool COMPUTE_U, bool COMPUTE_DU_DX, bool COMPUTE_DU_DL, bool COMPUTE_DU_DP> void __global__ k_nonbonded_unified( const int N, const double *__restrict__ coords, const double *__restrict__ params, // [N] const double *__restrict__ box, const double *__restrict__ dp_dl, const double *__restrict__ coords_w, // 4D coords const double *__restrict__ dw_dl, // 4D derivatives const double lambda, const double beta, const double cutoff, const int *__restrict__ ixn_tiles, const unsigned int *__restrict__ ixn_atoms, unsigned long long *__restrict__ du_dx, unsigned long long *__restrict__ du_dp, unsigned long long *__restrict__ du_dl_buffer, unsigned long long *__restrict__ u_buffer) { int tile_idx = blockIdx.x; int row_block_idx = ixn_tiles[tile_idx]; int atom_i_idx = row_block_idx * 32 + threadIdx.x; RealType dq_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 0] : 0; RealType dsig_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 1] : 0; RealType deps_dl_i = atom_i_idx < N ? dp_dl[atom_i_idx * 3 + 2] : 0; RealType cw_i = atom_i_idx < N ? coords_w[atom_i_idx] : 0; int atom_j_idx = ixn_atoms[tile_idx * 32 + threadIdx.x]; RealType dq_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 0] : 0; RealType dsig_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 1] : 0; RealType deps_dl_j = atom_j_idx < N ? dp_dl[atom_j_idx * 3 + 2] : 0; RealType cw_j = atom_j_idx < N ? coords_w[atom_j_idx] : 0; int is_vanilla = (cw_i == 0 && dq_dl_i == 0 && dsig_dl_i == 0 && deps_dl_i == 0 && cw_j == 0 && dq_dl_j == 0 && dsig_dl_j == 0 && deps_dl_j == 0); bool tile_is_vanilla = __all_sync(0xffffffff, is_vanilla); if (tile_is_vanilla) { v_nonbonded_unified<RealType, 0, COMPUTE_U, COMPUTE_DU_DX, COMPUTE_DU_DL, COMPUTE_DU_DP>( N, coords, params, box, dp_dl, coords_w, dw_dl, lambda, beta, cutoff, ixn_tiles, ixn_atoms, du_dx, du_dp, du_dl_buffer, u_buffer); } else { v_nonbonded_unified<RealType, 1, COMPUTE_U, COMPUTE_DU_DX, COMPUTE_DU_DL, COMPUTE_DU_DP>( N, coords, params, box, dp_dl, coords_w, dw_dl, lambda, beta, cutoff, ixn_tiles, ixn_atoms, du_dx, du_dp, du_dl_buffer, u_buffer); }; } // tbd add restrict template <typename RealType> void __global__ k_nonbonded_exclusions( const int E, // number of exclusions const double *__restrict__ coords, const double *__restrict__ params, const double *__restrict__ box, const double *__restrict__ dp_dl, const double *__restrict__ coords_w, // 4D coords const double *__restrict__ dw_dl, // 4D derivatives const double lambda, const int *__restrict__ exclusion_idxs, // [E, 2] pair-list of atoms to be excluded const double *__restrict__ scales, // [E] const double beta, const double cutoff, unsigned long long *__restrict__ du_dx, unsigned long long *__restrict__ du_dp, unsigned long long *__restrict__ du_dl_buffer, unsigned long long *__restrict__ u_buffer) { // (ytz): oddly enough the order of atom_i and atom_j // seem to not matter. I think this is because distance calculations // are bitwise identical in both dij(i, j) and dij(j, i) . However we // do need the calculation done for exclusions to perfectly mirror // that of the nonbonded kernel itself. Remember that floating points // commute but are not associative. const int e_idx = blockIdx.x * blockDim.x + threadIdx.x; if (e_idx >= E) { return; } int atom_i_idx = exclusion_idxs[e_idx * 2 + 0]; RealType ci_x = coords[atom_i_idx * 3 + 0]; RealType ci_y = coords[atom_i_idx * 3 + 1]; RealType ci_z = coords[atom_i_idx * 3 + 2]; RealType ci_w = coords_w[atom_i_idx]; RealType dq_dl_i = dp_dl[atom_i_idx * 3 + 0]; RealType dsig_dl_i = dp_dl[atom_i_idx * 3 + 1]; RealType deps_dl_i = dp_dl[atom_i_idx * 3 + 2]; RealType dw_dl_i = dw_dl[atom_i_idx]; unsigned long long gi_x = 0; unsigned long long gi_y = 0; unsigned long long gi_z = 0; int charge_param_idx_i = atom_i_idx * 3 + 0; int lj_param_idx_sig_i = atom_i_idx * 3 + 1; int lj_param_idx_eps_i = atom_i_idx * 3 + 2; RealType qi = params[charge_param_idx_i]; RealType sig_i = params[lj_param_idx_sig_i]; RealType eps_i = params[lj_param_idx_eps_i]; unsigned long long g_qi = 0; unsigned long long g_sigi = 0; unsigned long long g_epsi = 0; int atom_j_idx = exclusion_idxs[e_idx * 2 + 1]; RealType cj_x = coords[atom_j_idx * 3 + 0]; RealType cj_y = coords[atom_j_idx * 3 + 1]; RealType cj_z = coords[atom_j_idx * 3 + 2]; RealType cj_w = coords_w[atom_j_idx]; RealType dq_dl_j = dp_dl[atom_j_idx * 3 + 0]; RealType dsig_dl_j = dp_dl[atom_j_idx * 3 + 1]; RealType deps_dl_j = dp_dl[atom_j_idx * 3 + 2]; RealType dw_dl_j = dw_dl[atom_j_idx]; unsigned long long gj_x = 0; unsigned long long gj_y = 0; unsigned long long gj_z = 0; int charge_param_idx_j = atom_j_idx * 3 + 0; int lj_param_idx_sig_j = atom_j_idx * 3 + 1; int lj_param_idx_eps_j = atom_j_idx * 3 + 2; RealType qj = params[charge_param_idx_j]; RealType sig_j = params[lj_param_idx_sig_j]; RealType eps_j = params[lj_param_idx_eps_j]; unsigned long long g_qj = 0; unsigned long long g_sigj = 0; unsigned long long g_epsj = 0; RealType real_lambda = static_cast<RealType>(lambda); RealType real_beta = static_cast<RealType>(beta); RealType real_cutoff = static_cast<RealType>(cutoff); RealType cutoff_squared = real_cutoff * real_cutoff; RealType charge_scale = scales[e_idx * 2 + 0]; RealType lj_scale = scales[e_idx * 2 + 1]; RealType box_x = box[0 * 3 + 0]; RealType box_y = box[1 * 3 + 1]; RealType box_z = box[2 * 3 + 2]; RealType inv_box_x = 1 / box_x; RealType inv_box_y = 1 / box_y; RealType inv_box_z = 1 / box_z; RealType delta_x = ci_x - cj_x; RealType delta_y = ci_y - cj_y; RealType delta_z = ci_z - cj_z; delta_x -= box_x * nearbyint(delta_x * inv_box_x); delta_y -= box_y * nearbyint(delta_y * inv_box_y); delta_z -= box_z * nearbyint(delta_z * inv_box_z); RealType delta_w = ci_w - cj_w; RealType d2ij = delta_x * delta_x + delta_y * delta_y + delta_z * delta_z + delta_w * delta_w; unsigned long long energy = 0; int is_vanilla = (ci_w == 0 && dq_dl_i == 0 && dsig_dl_i == 0 && deps_dl_i == 0 && cj_w == 0 && dq_dl_j == 0 && dsig_dl_j == 0 && deps_dl_j == 0); // see note: this must be strictly less than if (d2ij < cutoff_squared) { RealType u; RealType ebd; RealType es_prefactor; RealType dij; RealType inv_dij; RealType inv_d2ij; compute_electrostatics<RealType, true>( charge_scale, qi, qj, d2ij, beta, dij, inv_dij, inv_d2ij, ebd, es_prefactor, u); RealType delta_prefactor = es_prefactor; RealType real_du_dl = 0; // lennard jones force if (eps_i != 0 && eps_j != 0) { RealType sig_grad; RealType eps_grad; compute_lj<RealType, true>( lj_scale, eps_i, eps_j, sig_i, sig_j, inv_dij, inv_d2ij, u, delta_prefactor, sig_grad, eps_grad); g_sigi += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DSIG>(-sig_grad); g_sigj += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DSIG>(-sig_grad); g_epsi += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DEPS>(-eps_grad * eps_j); g_epsj += FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DEPS>(-eps_grad * eps_i); real_du_dl -= sig_grad * (dsig_dl_i + dsig_dl_j); RealType term = eps_grad * fix_nvidia_fmad(eps_j, deps_dl_i, eps_i, deps_dl_j); real_du_dl -= term; } gi_x -= FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_x); gi_y -= FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_y); gi_z -= FLOAT_TO_FIXED_NONBONDED(delta_prefactor * delta_z); gj_x -= FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_x); gj_y -= FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_y); gj_z -= FLOAT_TO_FIXED_NONBONDED(-delta_prefactor * delta_z); // energy is size extensive so this may not be a good idea energy -= FLOAT_TO_FIXED_NONBONDED(u); g_qi -= FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DCHARGE>(charge_scale * qj * inv_dij * ebd); g_qj -= FLOAT_TO_FIXED_DU_DP<RealType, FIXED_EXPONENT_DU_DCHARGE>(charge_scale * qi * inv_dij * ebd); real_du_dl -= delta_w * delta_prefactor * (dw_dl_i - dw_dl_j); real_du_dl -= charge_scale * inv_dij * ebd * fix_nvidia_fmad(qj, dq_dl_i, qi, dq_dl_j); if (du_dx) { atomicAdd(du_dx + atom_i_idx * 3 + 0, gi_x); atomicAdd(du_dx + atom_i_idx * 3 + 1, gi_y); atomicAdd(du_dx + atom_i_idx * 3 + 2, gi_z); atomicAdd(du_dx + atom_j_idx * 3 + 0, gj_x); atomicAdd(du_dx + atom_j_idx * 3 + 1, gj_y); atomicAdd(du_dx + atom_j_idx * 3 + 2, gj_z); } if (du_dp) { atomicAdd(du_dp + charge_param_idx_i, g_qi); atomicAdd(du_dp + charge_param_idx_j, g_qj); atomicAdd(du_dp + lj_param_idx_sig_i, g_sigi); atomicAdd(du_dp + lj_param_idx_eps_i, g_epsi); atomicAdd(du_dp + lj_param_idx_sig_j, g_sigj); atomicAdd(du_dp + lj_param_idx_eps_j, g_epsj); } if (du_dl_buffer && !is_vanilla) { atomicAdd(du_dl_buffer + atom_i_idx, FLOAT_TO_FIXED_NONBONDED(real_du_dl)); } if (u_buffer) { atomicAdd(u_buffer + atom_i_idx, energy); } } }
the_stack
* Simple caching allocator for device memory allocations. The allocator is * thread-safe and capable of managing device allocations on multiple GPUs. ******************************************************************************/ #pragma once #include <math.h> #include <set> #include <map> #include "../util/ns_umbrella.cuh" #include "../util/spinlock.cuh" #include "../util/error_utils.cuh" B40C_NS_PREFIX namespace b40c { namespace util { /** * Simple caching allocator for device memory allocations. The allocator is * thread-safe and is capable of managing cached device allocations on multiple GPUs. * * Allocations are rounded up to and categorized by bin size. Bin sizes progress * geometrically in accordance with the growth factor "bin_growth" provided during * construction. Unused device allocations within a larger bin cache are not * reused for allocation requests that categorize to smaller bin sizes. * * Allocation requests below (bin_growth ^ min_bin) are rounded up to * (bin_growth ^ min_bin). * * Allocations above (bin_growth ^ max_bin) are not rounded up to the nearest * bin and are simply freed when they are deallocated instead of being returned * to a bin-cache. * * If the total storage of cached allocations on a given GPU will exceed * (max_cached_bytes), allocations for that GPU are simply freed when they are * deallocated instead of being returned to their bin-cache. * * For example, the default-constructed CachedAllocator is configured with: * bin_growth = 8 * min_bin = 3 * max_bin = 7 * max_cached_bytes = (bin_growth ^ max_bin) * 3) - 1 = 6,291,455 bytes * * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB * and sets a maximum of 6,291,455 cached bytes per GPU * */ struct CachedAllocator { //--------------------------------------------------------------------- // Type definitions and constants //--------------------------------------------------------------------- typedef int GpuOrdinal; enum { INVALID_GPU_ORDINAL = -1, }; /** * Integer pow function for unsigned base and exponent */ static __forceinline__ unsigned int IntPow( unsigned int base, unsigned int exp) { unsigned int retval = 1; while (exp > 0) { if (exp & 1) { retval = retval * base; // multiply the result by the current base } base = base * base; // square the base exp = exp >> 1; // divide the exponent in half } return retval; } /** * Round up to the nearest power-of */ static __forceinline__ void NearestPowerOf( unsigned int &power, size_t &rounded_bytes, unsigned int base, size_t value) { power = 0; rounded_bytes = 1; while (rounded_bytes < value) { rounded_bytes *= base; power++; } } /** * Descriptor for device memory allocations */ struct BlockDescriptor { GpuOrdinal gpu; // GPU ordinal void* d_ptr; // Device pointer size_t bytes; // Size of allocation in bytes unsigned int bin; // Bin enumeration // Constructor BlockDescriptor(void *d_ptr, GpuOrdinal gpu) : d_ptr(d_ptr), bytes(0), bin(0), gpu(gpu) {} // Constructor BlockDescriptor(size_t bytes, unsigned int bin, GpuOrdinal gpu) : d_ptr(NULL), bytes(bytes), bin(bin), gpu(gpu) {} // Comparison functor for comparing device pointers static bool PtrCompare(const BlockDescriptor &a, const BlockDescriptor &b) { if (a.gpu < b.gpu) { return true; } else if (a.gpu > b.gpu) { return false; } else { return (a.d_ptr < b.d_ptr); } } // Comparison functor for comparing allocation sizes static bool SizeCompare(const BlockDescriptor &a, const BlockDescriptor &b) { if (a.gpu < b.gpu) { return true; } else if (a.gpu > b.gpu) { return false; } else { return (a.bytes < b.bytes); } } }; // BlockDescriptor comparator function interface typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &); // Set type for cached blocks (ordered by size) typedef std::set<BlockDescriptor, Compare> CachedBlocks; // Set type for live blocks (ordered by ptr) typedef std::set<BlockDescriptor, Compare> BusyBlocks; // Map type of gpu ordinals to the number of cached bytes cached by each GPU typedef std::map<GpuOrdinal, size_t> GpuCachedBytes; //--------------------------------------------------------------------- // Fields //--------------------------------------------------------------------- Spinlock spin_lock; // Spinlock for thread-safety CachedBlocks cached_blocks; // Set of cached device allocations available for reuse BusyBlocks live_blocks; // Set of live device allocations currently in use unsigned int bin_growth; // Geometric growth factor for bin-sizes unsigned int min_bin; // Minimum bin enumeration unsigned int max_bin; // Maximum bin enumeration size_t min_bin_bytes; // Minimum bin size size_t max_bin_bytes; // Maximum bin size size_t max_cached_bytes; // Maximum aggregate cached bytes per GPU GpuCachedBytes cached_bytes; // Map of GPU ordinal to aggregate cached bytes on that GPU //--------------------------------------------------------------------- // Methods //--------------------------------------------------------------------- /** * Constructor. */ CachedAllocator( unsigned int bin_growth, // Geometric growth factor for bin-sizes unsigned int min_bin, // Minimum bin unsigned int max_bin, // Maximum bin size_t max_cached_bytes) : // Maximum aggregate cached bytes per GPU spin_lock(0), cached_blocks(BlockDescriptor::SizeCompare), live_blocks(BlockDescriptor::PtrCompare), bin_growth(bin_growth), min_bin(min_bin), max_bin(max_bin), min_bin_bytes(IntPow(bin_growth, min_bin)), max_bin_bytes(IntPow(bin_growth, max_bin)), max_cached_bytes(max_cached_bytes) {} /** * Constructor. Configured with: * bin_growth = 8 * min_bin = 3 * max_bin = 7 * max_cached_bytes = (bin_growth ^ max_bin) * 3) - 1 = 6,291,455 bytes * * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB * and sets a maximum of 6,291,455 cached bytes per GPU */ CachedAllocator() : spin_lock(0), cached_blocks(BlockDescriptor::SizeCompare), live_blocks(BlockDescriptor::PtrCompare), bin_growth(8), min_bin(3), max_bin(7), min_bin_bytes(IntPow(bin_growth, min_bin)), max_bin_bytes(IntPow(bin_growth, max_bin)), max_cached_bytes((max_bin_bytes * 3) - 1) {} /** * Sets the limit on the number bytes this allocator is allowed to * cache per GPU. */ void SetMaxCachedBytes(size_t max_cached_bytes) { // Lock Lock(&spin_lock); this->max_cached_bytes = max_cached_bytes; // Unlock Unlock(&spin_lock); } /** * Provides a suitable allocation of device memory for the given size * on the specified GPU */ cudaError_t Allocate(void** d_ptr, size_t bytes, GpuOrdinal gpu) { bool locked = false; GpuOrdinal entrypoint_gpu = INVALID_GPU_ORDINAL; cudaError_t error = cudaSuccess; // Round up to nearest bin size unsigned int bin; size_t bin_bytes; NearestPowerOf(bin, bin_bytes, bin_growth, bytes); if (bin < min_bin) { bin = min_bin; bin_bytes = min_bin_bytes; } // Check if bin is greater than our maximum bin if (bin > max_bin) { // Allocate the request exactly and give out-of-range bin bin = (unsigned int) -1; bin_bytes = bytes; } BlockDescriptor search_key(bin_bytes, bin, gpu); // Lock if (!locked) { Lock(&spin_lock); locked = true; } do { // Find a free block big enough within the same bin on the same GPU CachedBlocks::iterator block_itr = cached_blocks.lower_bound(search_key); if ((block_itr != cached_blocks.end()) && (block_itr->gpu == gpu) && (block_itr->bin == search_key.bin)) { // Reuse existing cache block. Insert into live blocks. search_key = *block_itr; live_blocks.insert(search_key); // Remove from free blocks cached_blocks.erase(block_itr); cached_bytes[gpu] -= search_key.bytes; } else { // Need to allocate a new cache block. Unlock. if (locked) { Unlock(&spin_lock); locked = false; } // Set to specified GPU error = cudaGetDevice(&entrypoint_gpu); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) break; error = cudaSetDevice(gpu); if (util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__)) break; // Allocate error = cudaMalloc(&search_key.d_ptr, search_key.bytes); if (util::B40CPerror(error, "cudaMalloc failed ", __FILE__, __LINE__)) break; // Lock if (!locked) { Lock(&spin_lock); locked = true; } // Insert into live blocks live_blocks.insert(search_key); } } while(0); // Unlock if (locked) { Unlock(&spin_lock); locked = false; } // Attempt to revert back to previous GPU if necessary if (entrypoint_gpu != INVALID_GPU_ORDINAL) { error = cudaSetDevice(entrypoint_gpu); util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__); } // Copy device pointer to output parameter *d_ptr = search_key.d_ptr; return error; } /** * Provides a suitable allocation of device memory for the given size * on the current GPU */ cudaError_t Allocate(void** d_ptr, size_t bytes) { GpuOrdinal current_gpu; cudaError_t error = cudaGetDevice(&current_gpu); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) return error; return Allocate(d_ptr, bytes, current_gpu); } /** * Returns a live allocation of GPU memory on the specified GPU to * the allocator */ cudaError_t Deallocate(void* d_ptr, GpuOrdinal gpu) { bool locked = false; GpuOrdinal entrypoint_gpu = INVALID_GPU_ORDINAL; cudaError_t error = cudaSuccess; BlockDescriptor search_key(d_ptr, gpu); // Lock if (!locked) { Lock(&spin_lock); locked = true; } do { // Find corresponding block descriptor BusyBlocks::iterator block_itr = live_blocks.find(search_key); if (block_itr == live_blocks.end()) { // Cannot find pointer error = util::B40CPerror(cudaErrorUnknown, "Deallocate failed ", __FILE__, __LINE__); break; } else { // Remove from live blocks search_key = *block_itr; live_blocks.erase(block_itr); // Check if we should keep the returned allocation if ((search_key.bin <= max_bin) && (cached_bytes[gpu] + search_key.bytes <= max_cached_bytes)) { // Insert returned allocation into free blocks cached_blocks.insert(search_key); cached_bytes[gpu] += search_key.bytes; } else { // Free the returned allocation. Unlock. if (locked) { Unlock(&spin_lock); locked = false; } // Set to specified GPU error = cudaGetDevice(&entrypoint_gpu); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) break; error = cudaSetDevice(gpu); if (util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__)) break; // Free device memory error = cudaFree(d_ptr); if (util::B40CPerror(error, "cudaFree failed ", __FILE__, __LINE__)) break; } } } while (0); // Unlock if (locked) { Unlock(&spin_lock); locked = false; } // Attempt to revert back to entry-point GPU if necessary if (entrypoint_gpu != INVALID_GPU_ORDINAL) { error = cudaSetDevice(entrypoint_gpu); util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__); } return error; } /** * Returns a live allocation of device memory on the current GPU to the * allocator */ cudaError_t Deallocate(void* d_ptr) { GpuOrdinal current_gpu; cudaError_t error = cudaGetDevice(&current_gpu); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) return error; return Deallocate(d_ptr, current_gpu); } /** * Frees all cached device allocations on all GPUs */ cudaError_t FreeAllCached() { cudaError_t error = cudaSuccess; bool locked = false; GpuOrdinal entrypoint_gpu = INVALID_GPU_ORDINAL; GpuOrdinal current_gpu = INVALID_GPU_ORDINAL; // Lock if (!locked) { Lock(&spin_lock); locked = true; } while (!cached_blocks.empty()) { // Get first block CachedBlocks::iterator begin = cached_blocks.begin(); // Get entry-point GPU ordinal if necessary if (entrypoint_gpu == INVALID_GPU_ORDINAL) { error = cudaGetDevice(&entrypoint_gpu); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) break; } // Set current GPU ordinal if necessary if (begin->gpu != current_gpu) { error = cudaSetDevice(begin->gpu); if (util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__)) break; current_gpu = begin->gpu; } // Free device memory error = cudaFree(begin->d_ptr); if (util::B40CPerror(error, "cudaGetDevice failed ", __FILE__, __LINE__)) break; // Reduce balance and erase entry cached_bytes[current_gpu] -= begin->bytes; cached_blocks.erase(begin); } // Unlock if (locked) { Unlock(&spin_lock); locked = false; } // Attempt to revert back to entry-point GPU if necessary if (entrypoint_gpu != INVALID_GPU_ORDINAL) { error = cudaSetDevice(entrypoint_gpu); util::B40CPerror(error, "cudaSetDevice failed ", __FILE__, __LINE__); } return error; } }; } // namespace util } // namespace b40c B40C_NS_POSTFIX
the_stack
#include "fastertransformer/cuda/cub/cub.cuh" #include "fusion_gpt_op.h" #include "pd_traits.h" template <paddle::DataType D> std::vector<paddle::Tensor> gpt2_kernel( const paddle::Tensor& input, const paddle::Tensor& attn_mask, const paddle::Tensor& start_length, const paddle::Tensor& word_emb, const std::vector<paddle::Tensor>& self_ln_weight, const std::vector<paddle::Tensor>& self_ln_bias, const std::vector<paddle::Tensor>& self_q_weight, const std::vector<paddle::Tensor>& self_q_bias, const std::vector<paddle::Tensor>& self_k_weight, const std::vector<paddle::Tensor>& self_k_bias, const std::vector<paddle::Tensor>& self_v_weight, const std::vector<paddle::Tensor>& self_v_bias, const std::vector<paddle::Tensor>& self_out_weight, const std::vector<paddle::Tensor>& self_out_bias, const std::vector<paddle::Tensor>& ffn_ln_weight, const std::vector<paddle::Tensor>& ffn_ln_bias, const std::vector<paddle::Tensor>& ffn_inter_weight, const std::vector<paddle::Tensor>& ffn_inter_bias, const std::vector<paddle::Tensor>& ffn_out_weight, const std::vector<paddle::Tensor>& ffn_out_bias, const paddle::Tensor& decoder_ln_weight, const paddle::Tensor& decoder_ln_bias, const paddle::Tensor& positional_embedding_weight, const paddle::Tensor& emb_weight, paddle::Tensor& output_ids, const int& topk, const float& topp, const int& max_len, const int& n_head, const int& size_per_head, const int& num_layer, const int& bos_id, const int& eos_id, const float& temperature, cublasHandle_t cublas_handle_, cublasLtHandle_t cublaslt_handle_, cudaStream_t stream) { auto input_dims = input.shape(); int batch_size_ = input_dims[0]; int start_len = input_dims[1]; const int vocab_size = word_emb.shape()[0]; typedef PDTraits<D> traits_; typedef typename traits_::DataType DataType_; typedef typename traits_::data_t data_t_; DecodingInitParam<DataType_> decoding_params; decoding_params.cublas_handle = cublas_handle_; decoding_params.cublaslt_handle = cublaslt_handle_; decoding_params.output_ids = output_ids.mutable_data<int>(word_emb.place()); typedef DecoderTransformerTraits<traits_::OpType> DecodingTraits_; decoding_params.stream = stream; fastertransformer::Allocator<AllocatorType::PD> allocator_(stream); const int hidden_unit = size_per_head * n_head; TensorParallelParam tensor_parallel_param; LayerParallelParam layer_parallel_param; // TODO: multi-cards supports. // ncclComm_t tensor_para_nccl_comm, layer_para_nccl_comm; tensor_parallel_param.rank = 0; tensor_parallel_param.world_size = 1; // TODO: multi-cards supports. // tensor_parallel_param.nccl_comm = tensor_para_nccl_comm; tensor_parallel_param.local_head_num_ = n_head; tensor_parallel_param.local_hidden_units_ = hidden_unit; layer_parallel_param.rank = 0; layer_parallel_param.world_size = 1; // TODO: multi-cards supports. // layer_parallel_param.nccl_comm = layer_para_nccl_comm; layer_parallel_param.layers_per_group = num_layer; layer_parallel_param.local_batch_size = batch_size_; DecodingGpt<DecodingTraits_::OpType>* gpt_decoding; decoding_params.request_batch_size = batch_size_; decoding_params.max_input_len = start_len; decoding_params.request_input_len = start_len; decoding_params.request_output_len = max_len - start_len; decoding_params.d_start_ids = const_cast<int *>(input.data<int>()); decoding_params.d_attn_mask = reinterpret_cast<DataType_*>(const_cast<data_t_ *>(attn_mask.data<data_t_>())); decoding_params.d_start_lengths = start_length.data<int>(); gpt_decoding = new DecodingGpt<DecodingTraits_::OpType>(allocator_, batch_size_, max_len, n_head, size_per_head, vocab_size, num_layer, bos_id, eos_id, topk, topp, temperature, 1, /*tensor_para_size*/ 1, /*layer_para_size*/ true /*is_fuse_QKV*/); gpt_decoding->set_tensor_parallel_param(tensor_parallel_param); gpt_decoding->set_layer_parallel_param(layer_parallel_param); DecoderInitParam<DataType_>* params = new DecoderInitParam<DataType_>[num_layer]; for (int i = 0; i < num_layer; ++i) { if (layer_parallel_param.is_valid(i) == false) { continue; } params[i].stream = stream; params[i].cublas_handle = cublas_handle_; params[i].cublaslt_handle = cublaslt_handle_; params[i].request_batch_size = batch_size_; params[i].request_max_mem_seq_len = start_len; params[i].self_layernorm.gamma = reinterpret_cast<const DataType_*>(self_ln_weight[i].data<data_t_>()); params[i].self_layernorm.beta = reinterpret_cast<const DataType_*>(self_ln_bias[i].data<data_t_>()); params[i].self_attention.query_weight.kernel = reinterpret_cast<const DataType_*>(self_q_weight[i].data<data_t_>()); params[i].self_attention.query_weight.bias = reinterpret_cast<const DataType_*>(self_q_bias[i].data<data_t_>()); params[i].self_attention.key_weight.kernel = reinterpret_cast<const DataType_*>(self_k_weight[i].data<data_t_>()); params[i].self_attention.key_weight.bias = reinterpret_cast<const DataType_*>(self_k_bias[i].data<data_t_>()); params[i].self_attention.value_weight.kernel = reinterpret_cast<const DataType_*>(self_v_weight[i].data<data_t_>()); params[i].self_attention.value_weight.bias = reinterpret_cast<const DataType_*>(self_v_bias[i].data<data_t_>()); params[i].self_attention.attention_output_weight.kernel = reinterpret_cast<const DataType_*>(self_out_weight[i].data<data_t_>()); params[i].self_attention.attention_output_weight.bias = reinterpret_cast<const DataType_*>(self_out_bias[i].data<data_t_>()); params[i].ffn_layernorm.gamma = reinterpret_cast<const DataType_*>(ffn_ln_weight[i].data<data_t_>()); params[i].ffn_layernorm.beta = reinterpret_cast<const DataType_*>(ffn_ln_bias[i].data<data_t_>()); params[i].ffn.intermediate_weight.kernel = reinterpret_cast<const DataType_*>(ffn_inter_weight[i].data<data_t_>()); params[i].ffn.intermediate_weight.bias = reinterpret_cast<const DataType_*>(ffn_inter_bias[i].data<data_t_>()); params[i].ffn.output_weight.kernel = reinterpret_cast<const DataType_*>(ffn_out_weight[i].data<data_t_>()); params[i].ffn.output_weight.bias = reinterpret_cast<const DataType_*>(ffn_out_bias[i].data<data_t_>()); } decoding_params.layernorm.gamma = reinterpret_cast<const DataType_*>(decoder_ln_weight.data<data_t_>()); decoding_params.layernorm.beta = reinterpret_cast<const DataType_*>(decoder_ln_bias.data<data_t_>()); decoding_params.embedding_table = reinterpret_cast<const DataType_*>(word_emb.data<data_t_>()); decoding_params.embedding_kernel = reinterpret_cast<const DataType_*>(emb_weight.data<data_t_>()); decoding_params.position_encoding_table = reinterpret_cast<const DataType_*>( positional_embedding_weight.data<data_t_>()); gpt_decoding->forward_context(params, decoding_params); gpt_decoding->forward(params, decoding_params); delete gpt_decoding; delete[] params; return {output_ids}; } std::vector<paddle::Tensor> GPT2CUDAForward( const paddle::Tensor& input, const paddle::Tensor& attn_mask, const paddle::Tensor& start_length, const paddle::Tensor& word_embedding, const std::vector<paddle::Tensor>& self_ln_weight, const std::vector<paddle::Tensor>& self_ln_bias, const std::vector<paddle::Tensor>& self_q_weight, const std::vector<paddle::Tensor>& self_q_bias, const std::vector<paddle::Tensor>& self_k_weight, const std::vector<paddle::Tensor>& self_k_bias, const std::vector<paddle::Tensor>& self_v_weight, const std::vector<paddle::Tensor>& self_v_bias, const std::vector<paddle::Tensor>& self_out_weight, const std::vector<paddle::Tensor>& self_out_bias, const std::vector<paddle::Tensor>& ffn_ln_weight, const std::vector<paddle::Tensor>& ffn_ln_bias, const std::vector<paddle::Tensor>& ffn_inter_weight, const std::vector<paddle::Tensor>& ffn_inter_bias, const std::vector<paddle::Tensor>& ffn_out_weight, const std::vector<paddle::Tensor>& ffn_out_bias, const paddle::Tensor& decoder_ln_weight, const paddle::Tensor& decoder_ln_bias, const paddle::Tensor& positional_embedding_weight, const paddle::Tensor& emb_weight, paddle::Tensor& output_ids, const int& topk, const float& topp, const int& max_len, const int& n_head, const int& size_per_head, const int& num_layer, const int& bos_id, const int& eos_id, const float& temperature, const bool& use_fp16 = false) { auto stream = word_embedding.stream(); cublasHandle_t cublas_handle_; cublasCreate(&cublas_handle_); cublasLtHandle_t cublaslt_handle_; cublasLtCreate(&cublaslt_handle_); cublasSetStream(cublas_handle_, stream); std::vector<paddle::Tensor> ret; if (use_fp16) { ret = gpt2_kernel<paddle::DataType::FLOAT16>(input, attn_mask, start_length, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, positional_embedding_weight, emb_weight, output_ids, topk, topp, max_len, n_head, size_per_head, num_layer, bos_id, eos_id, temperature, cublas_handle_, cublaslt_handle_, stream); } else { ret = gpt2_kernel<paddle::DataType::FLOAT32>(input, attn_mask, start_length, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, positional_embedding_weight, emb_weight, output_ids, topk, topp, max_len, n_head, size_per_head, num_layer, bos_id, eos_id, temperature, cublas_handle_, cublaslt_handle_, stream); } cublasDestroy(cublas_handle_); cublasLtDestroy(cublaslt_handle_); return ret; }
the_stack
extern "C" __global__ void bmm_tn( const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, int M, int N, int K ){ int tid = threadIdx.x; // thread idx int bid = blockIdx.z; // batch idx // Neighboring blocks are grouped into PN x PM block groups in order to increase // L1 cache hit rate // There are ceil(M/PM) x ceil(N/PN) block groups in total. // Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN int px = blockIdx.x % _PN_; int py = blockIdx.x / _PN_; int bDimX = (N + (128*_PN_) - 1) / (128*_PN_); int bDimY = (M + (128*_PM_) - 1) / (128*_PM_); int bIdxX = (blockIdx.y % bDimX) * _PN_ + px; int bIdxY = (blockIdx.y / bDimX) * _PM_ + py; int gStartx = bIdxX * 128; // starting index of block on N axis int gStarty = bIdxY * 128; // starting index of block on M axis if (gStartx > N || gStarty > M){ return; } // These are used to re-arrange threads into different shapes // for example: (256) -> (16, 16) -> (8, 32) -> (32, 8) int vx = tid % 16; int vy = tid / 16; int wx = tid % 32; // thread idx in warp int wy = tid / 32; // warp id int dx = tid % 8; int dy = tid / 8; __shared__ _VOLATILE_ float aSmem1[8][128+4]; __shared__ _VOLATILE_ float bSmem1[8][128+4]; __shared__ _VOLATILE_ float aSmem2[8][128+4]; __shared__ _VOLATILE_ float bSmem2[8][128+4]; float aBuffer1[4]; float bBuffer1[4]; float aBuffer2[4]; float bBuffer2[4]; float8 cCache[8]; init_cCache(cCache); // Load initial 16 x 128 tile of A and B to buffer1 and buffer2 load_ab_tn( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, 0, M, N, K ); // Number of main loop iterations is ceil(k/16) int nIt = (K + 16 - 1) / 16; #pragma unroll for (int itr=0; itr<nIt; itr++){ int gStartk = itr * 16; #pragma unroll buffer2smem_tn( aSmem1, aSmem2, bSmem1, bSmem2, aBuffer1, aBuffer2, bBuffer1, bBuffer2 ); if (likely(itr < nIt - 1)){ load_ab_tn( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, gStartk + 16, M, N, K ); } // synchroznie threads in order make sure tiles of A and B are fully // loaded to shared memory. __syncthreads(); // Each thread computes 8 x 8 matrix multiplication // Accumulate intermediate results in cCache // aSmem1, bSmem1, aSmem2, bSmem2 are consumed thread_matmul_v3(aSmem1, bSmem1, cCache, vx, vy); thread_matmul_v3(aSmem2, bSmem2, cCache, vx, vy); // synchronize threads to signal that shared memory is consumed. __syncthreads(); } // At the end of main loop, store cCache to C //write_c(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); } extern "C" __global__ void bmm_nt( const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, int M, int N, int K ){ int tid = threadIdx.x; // thread idx int bid = blockIdx.z; // batch idx // Neighboring blocks are grouped into PN x PM block groups in order to increase // L1 cache hit rate // There are ceil(M/PM) x ceil(N/PN) block groups in total. // Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN int px = blockIdx.x % _PN_; int py = blockIdx.x / _PN_; int bDimX = (N + (128*_PN_) - 1) / (128*_PN_); int bDimY = (M + (128*_PM_) - 1) / (128*_PM_); int bIdxX = (blockIdx.y % bDimX) * _PN_ + px; int bIdxY = (blockIdx.y / bDimX) * _PM_ + py; int gStartx = bIdxX * 128; // starting index of block on N axis int gStarty = bIdxY * 128; // starting index of block on M axis if (gStartx > N || gStarty > M){ return; } K // These are used to re-arrange threads into different shapes // for example: (256) -> (16, 16) -> (8, 32) -> (32, 8) int vx = tid % 16; int vy = tid / 16; int wx = tid % 32; // thread idx in warp int wy = tid / 32; // warp id int dx = tid % 8; int dy = tid / 8; __shared__ _VOLATILE_ float aSmem1[8][128+4]; __shared__ _VOLATILE_ float bSmem1[8][128+4]; __shared__ _VOLATILE_ float aSmem2[8][128+4]; __shared__ _VOLATILE_ float bSmem2[8][128+4]; float aBuffer1[4]; float bBuffer1[4]; float aBuffer2[4]; float bBuffer2[4]; float8 cCache[8]; init_cCache(cCache); // Load initial 16 x 128 tile of A and B to buffer1 and buffer2 load_ab_nt( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, 0, M, N, K ); // Number of main loop iterations is ceil(k/16) int nIt = (K + 16 - 1) / 16; #pragma unroll for (int itr=0; itr<nIt; itr++){ int gStartk = itr * 16; buffer2smem_nt( aSmem1, aSmem2, bSmem1, bSmem2, aBuffer1, aBuffer2, bBuffer1, bBuffer2 ); if (likely(itr < nIt - 1)){ load_ab_nt( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, gStartk + 16, M, N, K ); } // synchroznie threads in order make sure tiles of A and B are fully // loaded to shared memory. __syncthreads(); // Each thread computes 8 x 8 matrix multiplication // Accumulate intermediate results in cCache // aSmem1, bSmem1, aSmem2, bSmem2 are consumed thread_matmul_v3(aSmem1, bSmem1, cCache, vx, vy); thread_matmul_v3(aSmem2, bSmem2, cCache, vx, vy); // synchronize threads to signal that shared memory is consumed. __syncthreads(); } // At the end of main loop, store cCache to C //write_c(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); } extern "C" __global__ void bmm_nn( const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, int M, int N, int K ){ int tid = threadIdx.x; // thread idx int bid = blockIdx.z; // batch idx // Neighboring blocks are grouped into PN x PM block groups in order to increase // L1 cache hit rate // There are ceil(M/PM) x ceil(N/PN) block groups in total. // Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN int px = blockIdx.x % _PN_; int py = blockIdx.x / _PN_; int bDimX = (N + (128*_PN_) - 1) / (128*_PN_); int bDimY = (M + (128*_PM_) - 1) / (128*_PM_); int bIdxX = (blockIdx.y % bDimX) * _PN_ + px; int bIdxY = (blockIdx.y / bDimX) * _PM_ + py; int gStartx = bIdxX * 128; // starting index of block on N axis int gStarty = bIdxY * 128; // starting index of block on M axis if (gStartx > N || gStarty > M){ return; } // These are used to re-arrange threads into different shapes // for example: (256) -> (16, 16) -> (8, 32) -> (32, 8) int vx = tid % 16; int vy = tid / 16; int wx = tid % 32; // thread idx in warp int wy = tid / 32; // warp id int dx = tid % 8; int dy = tid / 8; __shared__ _VOLATILE_ float aSmem1[8][128+4]; __shared__ _VOLATILE_ float bSmem1[8][128+4]; __shared__ _VOLATILE_ float aSmem2[8][128+4]; __shared__ _VOLATILE_ float bSmem2[8][128+4]; float aBuffer1[4]; float bBuffer1[4]; float aBuffer2[4]; float bBuffer2[4]; float8 cCache[8]; init_cCache(cCache); // Load initial 16 x 128 tile of A and B to buffer1 and buffer2 load_ab_nn( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, 0, M, N, K ); // Number of main loop iterations is ceil(k/16) int nIt = (K + 16 - 1) / 16; #pragma unroll for (int itr=0; itr<nIt; itr++){ int gStartk = itr * 16; #pragma unroll buffer2smem_nn( aSmem1, aSmem2, bSmem1, bSmem2, aBuffer1, aBuffer2, bBuffer1, bBuffer2 ); if (likely(itr < nIt - 1)){ load_ab_nn( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, gStartk + 16, M, N, K ); } // synchroznie threads in order make sure tiles of A and B are fully // loaded to shared memory. __syncthreads(); // Each thread computes 8 x 8 matrix multiplication // Accumulate intermediate results in cCache // aSmem1, bSmem1, aSmem2, bSmem2 are consumed thread_matmul_v3(aSmem1, bSmem1, cCache, vx, vy); thread_matmul_v3(aSmem2, bSmem2, cCache, vx, vy); // synchronize threads to signal that shared memory is consumed. __syncthreads(); } // At the end of main loop, store cCache to C //write_c(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); } extern "C" __global__ void bmm_tt( const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C, int M, int N, int K ){ int tid = threadIdx.x; // thread idx int bid = blockIdx.z; // batch idx // Neighboring blocks are grouped into PN x PM block groups in order to increase // L1 cache hit rate // There are ceil(M/PM) x ceil(N/PN) block groups in total. // Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN int px = blockIdx.x % _PN_; int py = blockIdx.x / _PN_; int bDimX = (N + (128*_PN_) - 1) / (128*_PN_); int bDimY = (M + (128*_PM_) - 1) / (128*_PM_); int bIdxX = (blockIdx.y % bDimX) * _PN_ + px; int bIdxY = (blockIdx.y / bDimX) * _PM_ + py; int gStartx = bIdxX * 128; // starting index of block on N axis int gStarty = bIdxY * 128; // starting index of block on M axis if (gStartx > N || gStarty > M){ return; } // These are used to re-arrange threads into different shapes // for example: (256) -> (16, 16) -> (8, 32) -> (32, 8) int vx = tid % 16; int vy = tid / 16; int wx = tid % 32; // thread idx in warp int wy = tid / 32; // warp id int dx = tid % 8; int dy = tid / 8; __shared__ _VOLATILE_ float aSmem1[8][128+4]; __shared__ _VOLATILE_ float bSmem1[8][128+4]; __shared__ _VOLATILE_ float aSmem2[8][128+4]; __shared__ _VOLATILE_ float bSmem2[8][128+4]; float aBuffer1[4]; float bBuffer1[4]; float aBuffer2[4]; float bBuffer2[4]; float8 cCache[8]; init_cCache(cCache); // Load initial 16 x 128 tile of A and B to buffer1 and buffer2 load_ab_tt( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, 0, M, N, K ); // Number of main loop iterations is ceil(k/16) int nIt = (K + 16 - 1) / 16; #pragma unroll for (int itr=0; itr<nIt; itr++){ int gStartk = itr * 16; #pragma unroll buffer2smem_tt( aSmem1, aSmem2, bSmem1, bSmem2, aBuffer1, aBuffer2, bBuffer1, bBuffer2 ); if (likely(itr < nIt - 1)){ load_ab_tt( A, B, aBuffer1, aBuffer2, bBuffer1, bBuffer2, bid, gStartx, gStarty, gStartk + 16, M, N, K ); } // synchroznie threads in order make sure tiles of A and B are fully // loaded to shared memory. __syncthreads(); // Each thread computes 8 x 8 matrix multiplication // Accumulate intermediate results in cCache // aSmem1, bSmem1, aSmem2, bSmem2 are consumed thread_matmul_v3(aSmem1, bSmem1, cCache, vx, vy); thread_matmul_v3(aSmem2, bSmem2, cCache, vx, vy); // synchronize threads to signal that shared memory is consumed. __syncthreads(); } // At the end of main loop, store cCache to C //write_c(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N); }
the_stack
//#define RND_MULTIPLIERS_FILE ("rnd_multipliers_32bit.txt") #ifndef RND_MULTIPLIERS_FILE #define RND_MULTIPLIERS_FILE ("rnd_multipliers_32bit.txt") #endif #include <pthread.h> #include <map> #include <cublas_v2.h> #include <cuda.h> #include <curand.h> #include <cutil_inline.h> #include <time.h> #include <curand_kernel.h> #include <Python.h> #include <cuda_ndarray.cuh> //Commented by Ian Goodfellow-- we don't actually need this dependency, it just increase theano compile times //#include <matrix.h> #include "nvmatrix_kernels.cuh" #include "nvmatrix_operators.cuh" #ifdef WARNINGS #define WARN(msg) printf("WARN: File %s, line %d: %s\n", __FILE__, __LINE__, msg); #else #define WARN(msg) ; #endif #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ exit(EXIT_FAILURE);}} while(0) #define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ exit(EXIT_FAILURE);}} while(0) #ifdef _WIN32 #ifdef _NVMATRIX_EXPORT #define DllExport __declspec( dllexport ) #else #define DllExport __declspec( dllimport ) #endif #else //else _WIN32 #define DllExport __attribute__((visibility ("default"))) #endif class DllExport NVMatrix { private: int _numCols, _numRows; int _numElements; int _stride; float* _devData; bool _isTrans; bool _ownsData; // static std::map<int,curandGenerator_t> rndGen; static std::map<int,curandState*> rndDevStates; static pthread_mutex_t *_rndMutex; static void checkCublasError(cublasStatus_t status, const char* msg) { if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, msg, NULL); exit(EXIT_FAILURE); } } char getTransChar() const { /* * not a typo! return opposite character because a * non-transposed krizhevsky matrix is in row-major order while a non-transposed * cublas matrix is in column-major order. */ return _isTrans ? 'n' : 't'; } cublasOperation_t getTransOp() const { /* * not a typo! return opposite character because a * non-transposed krizhevsky matrix is in row-major order while a non-transposed * cublas matrix is in column-major order. */ return _isTrans ? CUBLAS_OP_N : CUBLAS_OP_T; } void _init(int numRows, int numCols); void _init(int numRows, int numCols, int stride, bool isTrans); void _sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols); template<class Agg> float _totalAgg(Agg agg); template<class Agg, class BinaryOp> void _aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op); template<class Agg, class BinaryOp> NVMatrix& _aggregate(int axis, Agg agg, BinaryOp op); template <class Randomizer> void _unaryRandomize(NVMatrix& target, Randomizer rnd); template <class Randomizer> void _binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd); public: NVMatrix(); NVMatrix(bool isTrans); NVMatrix(int numRows, int numCols, bool isTrans=false); // Commented by IG. Depends on Matrix //NVMatrix(const Matrix& like, bool copy); NVMatrix(const NVMatrix& like, bool copy); //Constructor added by Ian Goodfellow. Make a view of a CudaNdarray. NVMatrix(const CudaNdarray * view, int numRows, int numCols, const char * msg); NVMatrix(const NVMatrix& like); // Commented by IG. Depends on Matrix // NVMatrix(const Matrix& like); NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans); ~NVMatrix(); // static void initRandom(unsigned long long seed); // static void initRandom(); static int getDeviceID(); static bool isRndInitialized(); static curandState* getCurandState(); static void destroyRandom(); static pthread_mutex_t* makeMutex(); /* * DO NOT DEREFERENCE IN HOST CODE! This is a device memory pointer. */ float* getCellPtr(int i, int j) const { if (_isTrans) { return &_devData[j * _numRows + i]; } return &_devData[i * _numCols + j]; } // Commented by IG. Depends on Matrix // bool isSameDims(const Matrix& m) const { // return m.getNumRows() == _numRows && m.getNumCols() == _numCols; //} bool isSameDims(const NVMatrix& m) const { return m.getNumRows() == _numRows && m.getNumCols() == _numCols; } int getNumRows() const { return _numRows; } int getNumCols() const { return _numCols; } int getStride() const { return _stride; } int getLeadingDim() const { return _isTrans ? _numRows : _numCols; } int getFollowingDim() const { return !_isTrans ? _numRows : _numCols; } /* * FALSE: Row-major order. * TRUE: Column-major order. */ bool isTrans() const { return _isTrans; } bool isView() const { return !_ownsData; } float* getDevData() const { return _devData; } unsigned int getNumElements() const { return _numElements; } /* * Only use if you know what you're doing! * Does not actually transpose matrix. */ void setTrans(bool trans) { if (trans != _isTrans) { assert(isContiguous()); _isTrans = trans; _stride = getLeadingDim(); } } /* * Only use if you know what you're doing! * This toggles whether this object will free its GPU memory when it's destroyed. */ void setView(bool isView) { _ownsData = !isView; } bool isContiguous() const { return _stride == getLeadingDim() || getFollowingDim() == 1; } void truncate() { resize(0,0); } // Commented by IG. Depends on Matrix // void copyFromHost(const Matrix& hostMatrix); // void copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix); // void copyToHost(Matrix& hostMatrix) const; // void copyToHost(Matrix& hostMatrix, bool resizeTarget) const; void copy(NVMatrix& dest) const; NVMatrix& copy() const; void addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB); void addProduct(const NVMatrix& a, const NVMatrix &b); void rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const; void rightMult(const NVMatrix &b, NVMatrix &target) const; void rightMult(const NVMatrix &b, float scaleAB); void randomizeUniform(); void addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target); void addGaussianNoise(float stdev, NVMatrix& target); void addGaussianNoise(NVMatrix& stdevs, bool var); void addGaussianNoise(NVMatrix& stdevs); void addGaussianNoise(float stdev); void addGaussianNoise(); void randomizeGaussian(); void randomizeGaussian(float stdev); void randomizeGaussian(float mean, float stdev); void randomizeGaussian(NVMatrix& stdevs); void randomizeGaussian(NVMatrix& stdevs, NVMatrix& target); void binarizeProbs(); void binarizeProbs(NVMatrix& target); void biggerThan(NVMatrix& m, NVMatrix& target); void biggerThan(NVMatrix& m); void biggerThanVector(NVMatrix& vec, NVMatrix& target); void biggerThanVector(NVMatrix& vec); void equals(NVMatrix& m, NVMatrix& target); void equals(NVMatrix& m); void _checkBounds(int startRow, int endRow, int startCol, int endCol) const; NVMatrix& slice(int startRow, int endRow, int startCol, int endCol) const; void slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const; NVMatrix& sliceRows(int startRow, int endRow) const; void sliceRows(int startRow, int endRow, NVMatrix& target) const; NVMatrix& sliceCols(int startCol, int endCol) const; void sliceCols(int startCol, int endCol, NVMatrix& target) const; template <class Op> void apply(Op op, NVMatrix& target) { if (!target.isSameDims(*this)) { target.resize(*this); } int height = target.getFollowingDim(), width = target.getLeadingDim(); dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); if (target.isTrans() == isTrans()) { kEltwiseUnaryOp<Op><<<blocks, threads>>>(_devData, target._devData, height, width, getStride(), target.getStride(), op); cutilCheckMsg("kEltwiseUnaryOp: Kernel execution failed"); } else { bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0); if (checkBounds) { kEltwiseUnaryOpTrans<Op, true><<<blocks, threads>>>(_devData, target._devData, height, width, getStride(), target.getStride(), op); } else { kEltwiseUnaryOpTrans<Op, false><<<blocks, threads>>>(_devData, target._devData, height, width, getStride(), target.getStride(), op); } cutilCheckMsg("kEltwiseUnaryOpTrans: Kernel execution failed"); } } template <class Op> void apply(Op op) { apply(op, *this); } template <class Op> void applyBinary(Op op, NVMatrix& b) { applyBinary(op, b, *this); } template <class Op> void applyBinary(Op op, NVMatrix& b, NVMatrix& target) { assert(this->isSameDims(b)); if (!target.isSameDims(*this)) { target.resize(*this); } int height = target.getFollowingDim(), width = target.getLeadingDim(); dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); if (target.isTrans() == isTrans() && target.isTrans() == b.isTrans()) { kEltwiseBinaryOp<Op><<<blocks, threads>>>(_devData, b._devData, target._devData, height, width, getStride(), b.getStride(), target.getStride(), op); cutilCheckMsg("kEltwiseBinaryOp: Kernel execution failed"); } else { // both x here since y divides x bool checkBounds = !(width % ELTWISE_THREADS_X == 0 && height % ELTWISE_THREADS_X == 0); if (target.isTrans() == isTrans() && target.isTrans() != b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,false,false><<<blocks, threads>>>(_devData, b._devData, target._devData, height, width,getStride(), b.getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,false,false><<<blocks, threads>>>(_devData, b._devData, target._devData, height, width,getStride(), b.getStride(), target.getStride(), op); } } else if (target.isTrans() != isTrans() && target.isTrans() != b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,true,false><<<blocks, threads>>>(_devData, b._devData, target._devData, height, width,getStride(), b.getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,true,false><<<blocks, threads>>>(_devData, b._devData, target._devData, height, width,getStride(), b.getStride(), target.getStride(), op); } } else if (target.isTrans() != isTrans() && target.isTrans() == b.isTrans()) { if (checkBounds) { kEltwiseBinaryOpTrans<Op,true,false,true><<<blocks, threads>>>(b._devData, _devData, target._devData, height, width,b.getStride(), getStride(), target.getStride(), op); } else { kEltwiseBinaryOpTrans<Op,false,false,true><<<blocks, threads>>>(b._devData, _devData, target._devData, height, width, b.getStride(), getStride(), target.getStride(), op); } } cutilCheckMsg("kEltwiseBinaryOpTrans: Kernel execution failed"); } } template <class Op> void applyTernary(Op op, NVMatrix& b, NVMatrix& c, NVMatrix& target) { assert(this->isSameDims(b)); assert(this->isSameDims(c)); // For now ternary ops are only supported for matrices of same transposedness assert(isTrans() == b.isTrans()); assert(isTrans() == c.isTrans()); if (!target.isSameDims(*this) || target.isTrans() != isTrans()) { target.resize(*this); } int height = target.getFollowingDim(), width = target.getLeadingDim(); dim3 blocks(std::min(NUM_BLOCKS_MAX, DIVUP(width, ELTWISE_THREADS_X)), std::min(NUM_BLOCKS_MAX, DIVUP(height, ELTWISE_THREADS_Y))); dim3 threads(ELTWISE_THREADS_X, ELTWISE_THREADS_Y); kEltwiseTernaryOp<Op><<<blocks, threads>>>(_devData, b._devData, c._devData, target._devData, height, width, getStride(), b.getStride(), c.getStride(), target.getStride(), op); cutilCheckMsg("kEltwiseTernaryOp: Kernel execution failed"); } bool resize(int numRows, int numCols); bool resize(const NVMatrix &like); // Commented by IG. Depends on Matrix // bool resize(const Matrix &like); void reshape(int numRows, int numCols); NVMatrix& reshaped(int numRows, int numCols); void copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const; void add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target); void add(NVMatrix& b, float scaleB, NVMatrix& target); void add(NVMatrix& b, NVMatrix& target); void add(NVMatrix& b, float scaleB); void add(NVMatrix& b, float scaleA, float scaleB); void add(NVMatrix& b); void eltwiseMult(NVMatrix& b); void eltwiseMult(NVMatrix& b, NVMatrix& target); void eltwiseDivide(NVMatrix& b); void eltwiseDivide(NVMatrix& b, NVMatrix& target); void squaredDiff(NVMatrix& b); void squaredDiff(NVMatrix& b, NVMatrix& target); void subtract(NVMatrix& b, NVMatrix& target); void subtract(NVMatrix& b); void addVector(NVMatrix& vec, float scaleVec, NVMatrix& target); void addVector(NVMatrix& vec); void addVector(NVMatrix& vec, float scaleVec); void addVector(NVMatrix& vec, NVMatrix& target); void equalsVector(NVMatrix& vec, NVMatrix& target); void equalsVector(NVMatrix& vec); void eltwiseMultByVector(NVMatrix& vec, NVMatrix& target); void eltwiseMultByVector(NVMatrix& vec); void eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target); void eltwiseDivideByVector(NVMatrix& vec); void tile(int timesY, int timesX, NVMatrix& target); void sum(int axis, NVMatrix& target); void addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum); // Commented by IG NVMatrix& max(int axis); void max(int axis, NVMatrix& target); /* Commented by IG. Depends on _aggregate NVMatrix& sum(int axis); void min(int axis, NVMatrix& target); NVMatrix& min(int axis); */ // Commented by IG. Depends on sum // float mean(); // Commented by IG float max(); /* Commented by IG. Depends on _totalAgg float sum(); float min(); Depend on dotProduct: float norm2(); float norm(); */ void inRangeInc(float lower, float upper); void inRangeInc(float lower, float upper, NVMatrix& target); void inRangeExc(float lower, float upper); void inRangeExc(float lower, float upper, NVMatrix& target); void biggerThanScalar(float scalar); void biggerThanScalar(float scalar, NVMatrix& target); void smallerThanScalar(float scalar); void smallerThanScalar(float scalar, NVMatrix& target); void addScalar(float scaleThis, float scalar, NVMatrix& target); void addScalar(float scalar, NVMatrix& target); void addScalar(float scalar); void minWithScalar(float scalar, NVMatrix& target); void minWithScalar(float scalar); void maxWithScalar(float scalar, NVMatrix& target); void maxWithScalar(float scalar); void pow(float p, NVMatrix& target); void pow(float p); void scale(float _scale); void scale(float _scale, NVMatrix& target); // Commented by IG. Depends on sum // float dotProduct(NVMatrix& b); /* * Does SOFT transpose and returns result, leaving this matrix unchanged */ NVMatrix& getTranspose(); /* * Does HARD transpose and puts result in target */ void transpose(NVMatrix& target); /* * Does SOFT transpose */ void transpose(); bool transpose(bool trans); void flipTrans(NVMatrix& target); NVMatrix& flipTrans(); /* Commented out by Ian Goodfellow. These methods bring in more dependencies / increase the theano compile time, and we don't really need them. void print(int startRow, int rows, int startCol, int cols) const; void print(int rows, int cols) const; */ void printShape(const char* name) const; template <class Op> void applyBinaryV(Op op, NVMatrix& vec, NVMatrix& target) { assert(&target != &vec); // for now assert(vec.getNumRows() == 1 || vec.getNumCols() == 1); assert(vec.getNumRows() == _numRows || vec.getNumCols() == _numCols); assert(vec.isContiguous()); target.resize(*this); // target must be same orientation as me for now int width = getLeadingDim(); //_isTrans ? _numRows : _numCols; int height = getFollowingDim(); //_isTrans ? _numCols : _numRows; dim3 threads(ADD_VEC_THREADS_X, ADD_VEC_THREADS_Y); dim3 blocks(MIN(NUM_BLOCKS_MAX, DIVUP(width, ADD_VEC_THREADS_X)), MIN(NUM_BLOCKS_MAX, DIVUP(height, ADD_VEC_THREADS_Y))); if (vec.getNumRows() == _numRows && !isTrans() || vec.getNumCols() == _numCols && isTrans()) { kColVectorOp<Op><<<blocks,threads>>>(_devData, vec._devData, target._devData, width, height, getStride(), target.getStride(), op); } else { kRowVectorOp<Op><<<blocks,threads>>>(_devData, vec._devData, target._devData, width, height, getStride(), target.getStride(), op); } cutilCheckMsg("Kernel execution failed"); // cudaThreadSynchronize(); } /* Commented by Ian Goodfellow because it depends on _totalAgg template<class UnaryOperator> float argMax(UnaryOperator u) { return _totalAgg(NVMatrixAggs::ArgMax<UnaryOperator>(u)); } */ }; #endif /* NVMATRIX_H_ */
the_stack
* \file dnn/src/cuda/conv_bias/quint4x4x32_wmma/reduce_with_scale_data.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./reduce_with_scale_data.cuh" #include "./wmma_conv_integer_u4.cuh" #include "src/cuda/cub/util_ptx.cuh" using namespace megdnn; using namespace cuda; using namespace wmma_conv_integer_subbyte; namespace { template < typename ConvConfig, size_t thread_blk_x, size_t thread_blk_y, size_t pixels_per_thread_x, size_t pixels_per_thread_y> struct TileCounter { MEGDNN_STATIC_ASSERT( thread_blk_x % WARP_SIZE == 0, "thread block size in dim x not divided by warpSize"); static const size_t spatial_tile_x = thread_blk_x * pixels_per_thread_x; static const size_t spatial_tile_y = thread_blk_y * pixels_per_thread_y; static const size_t global_load_tile_x = (spatial_tile_x - 1) * ConvConfig::SW + ConvConfig::FW; static const size_t global_load_tile_y = (spatial_tile_y - 1) * ConvConfig::SH + ConvConfig::FH; static const size_t reg_cache_x = (global_load_tile_x + WARP_SIZE - 1) / WARP_SIZE; static const size_t warps_per_block = (thread_blk_x * thread_blk_y) / WARP_SIZE; static const size_t reg_cache_y = (global_load_tile_y + warps_per_block - 1) / warps_per_block; static const size_t smem_stride = global_load_tile_x + (global_load_tile_x % 2 == 0); }; template < typename ConvConfig_, size_t thread_blk_x, size_t thread_blk_y, size_t pixels_per_thread_x, size_t pixels_per_thread_y> __global__ void reduce_in_spatial_block_and_along_input_channel_with_scale_u4( int32_t* __restrict__ dst, const uint8_t* __restrict__ src, int IC, int IH, int IW, int OH, int OW, int PH, int PW, int32_t scale, int32_t zero) { typedef TileCounter< ConvConfig_, thread_blk_x, thread_blk_y, pixels_per_thread_x, pixels_per_thread_y> TileCounter_; const int bidx = blockIdx.x; const int bidy = blockIdx.y; const int bidz = blockIdx.z; const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int oh_start = bidy * TileCounter_::spatial_tile_y; const int ow_start = bidx * TileCounter_::spatial_tile_x; const int ih_base = oh_start * ConvConfig_::SH - PH; const int iw_base = ow_start * ConvConfig_::SW - PW; const uint8_t* __restrict__ sptr = src + bidz * IC * IH * IW / 2 + (ih_base * IW + iw_base) * 4; __shared__ uint8_t smem[TileCounter_::global_load_tile_y][TileCounter_::smem_stride * 4]; uint32_t reg_cache[TileCounter_::reg_cache_y][TileCounter_::reg_cache_x]; int32_t acc[pixels_per_thread_y][pixels_per_thread_x]; int32_t* __restrict__ dptr = dst + bidz * OH * OW + ow_start + oh_start * OW; const int tid = tidy * thread_blk_x + tidx; const int idx_in_warp = tid % WARP_SIZE; const int warp_id = tid / WARP_SIZE; #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { acc[i][j] = 0; } } #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int iw = idx_in_warp + j * WARP_SIZE; int ih = warp_id + i * TileCounter_::warps_per_block; if (ih_base + ih >= 0 && ih_base + ih < IH && iw_base + iw >= 0 && iw_base + iw < IW) { reg_cache[i][j] = *(const uint32_t*)(&sptr[(ih * IW + iw) * 4]); } else { reg_cache[i][j] = zero; } } } #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int x = idx_in_warp + j * WARP_SIZE; int y = warp_id + i * TileCounter_::warps_per_block; if (y < TileCounter_::global_load_tile_y && x < TileCounter_::global_load_tile_x) { *(uint32_t*)(&smem[y][x * 4]) = reg_cache[i][j]; } } } __syncthreads(); const int ic_blks = (IC + 7) / 8; #pragma unroll for (int c = 0; c < ic_blks; ++c) { sptr += IH * IW * 4; if (c < ic_blks - 1) { #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int iw = idx_in_warp + j * WARP_SIZE; int ih = warp_id + i * TileCounter_::warps_per_block; if (ih_base + ih >= 0 && ih_base + ih < IH && iw_base + iw >= 0 && iw_base + iw < IW) { reg_cache[i][j] = *(const uint32_t*)(&sptr[(ih * IW + iw) * 4]); } else { reg_cache[i][j] = zero; } } } } #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { int x = (j * thread_blk_x + tidx) * ConvConfig_::SW; int y = (i * thread_blk_y + tidy) * ConvConfig_::SH; #pragma unroll for (int fh = 0; fh < ConvConfig_::FH; ++fh) { #pragma unroll for (int fw = 0; fw < ConvConfig_::FW; ++fw) { uint32_t sdata = *(uint32_t*)(&smem[y + fh][(x + fw) * 4]); #pragma unroll for (int r = 0; r < 8; r++) { uint8_t val = (sdata & 0xF); acc[i][j] += val; sdata >>= 4; } } } } } if (c < ic_blks - 1) { __syncthreads(); #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int x = idx_in_warp + j * WARP_SIZE; int y = warp_id + i * TileCounter_::warps_per_block; if (y < TileCounter_::global_load_tile_y && x < TileCounter_::global_load_tile_x) { *(uint32_t*)(&smem[y][x * 4]) = reg_cache[i][j]; } } } __syncthreads(); } } #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { int x = j * thread_blk_x + tidx; int y = i * thread_blk_y + tidy; if (oh_start + y < OH && ow_start + x < OW) { dptr[y * OW + x] = acc[i][j] * scale; } } } } template < typename ConvConfig, size_t thread_blk_x, size_t thread_blk_y, size_t pixels_per_thread_x, size_t pixels_per_thread_y> struct LargeChannelTileCounter { static const size_t spatial_tile_x = thread_blk_x * pixels_per_thread_x; static const size_t spatial_tile_y = pixels_per_thread_y; static const size_t global_load_tile_x = (spatial_tile_x - 1) * ConvConfig::SW + ConvConfig::FW; static const size_t global_load_tile_y = (spatial_tile_y - 1) * ConvConfig::SH + ConvConfig::FH; static const size_t reg_cache_x = (global_load_tile_x + WARP_SIZE - 1) / WARP_SIZE; static const size_t warps_per_block = (thread_blk_x * thread_blk_y) / WARP_SIZE; static const size_t reg_cache_y = (global_load_tile_y * thread_blk_y + warps_per_block - 1) / warps_per_block; static const size_t smem_stride = global_load_tile_x + (global_load_tile_x % 2 == 0); static const size_t reduce_dim_0 = thread_blk_y; static const size_t reduce_dim_1 = pixels_per_thread_y; static const size_t reduce_dim_2 = thread_blk_x * pixels_per_thread_x; }; template < typename ConvConfig_, size_t thread_blk_x, size_t thread_blk_y, size_t pixels_per_thread_x, size_t pixels_per_thread_y> __global__ void reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels( int32_t* __restrict__ dst, const uint8_t* __restrict__ src, int IC, int IH, int IW, int OH, int OW, int PH, int PW, int32_t scale, int32_t zero) { typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x, thread_blk_y, pixels_per_thread_x, pixels_per_thread_y> TileCounter_; const int bidx = blockIdx.x; const int bidz = blockIdx.z; const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blocks_per_row = (OW + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; const int bidw = bidx % blocks_per_row; const int bidh = bidx / blocks_per_row; const int oh_start = bidh * TileCounter_::spatial_tile_y; const int ow_start = bidw * TileCounter_::spatial_tile_x; const int ih_base = oh_start * ConvConfig_::SH - PH; const int iw_base = ow_start * ConvConfig_::SW - PW; const uint8_t* __restrict__ sptr = src + bidz * IC * IH * IW / 2 + (ih_base * IW + iw_base) * 4; __shared__ uint8_t smem[thread_blk_y][TileCounter_::global_load_tile_y] [TileCounter_::smem_stride * 4]; __shared__ int32_t s_reduce[TileCounter_::reduce_dim_0][TileCounter_::reduce_dim_1] [TileCounter_::reduce_dim_2 + 1]; uint32_t reg_cache[TileCounter_::reg_cache_y][TileCounter_::reg_cache_x]; int32_t acc[pixels_per_thread_y][pixels_per_thread_x]; int32_t* __restrict__ dptr = dst + bidz * OH * OW + ow_start + oh_start * OW; const int tid = tidy * thread_blk_x + tidx; const int idx_in_warp = tid % WARP_SIZE; const int warp_id = tid / WARP_SIZE; const int ic_blks = IC / 8; #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { acc[i][j] = 0; } } #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int iw = idx_in_warp + j * WARP_SIZE; int hc = warp_id + i * TileCounter_::warps_per_block; int ih = hc % TileCounter_::global_load_tile_y; int ic_blk = hc / TileCounter_::global_load_tile_y; if (ih_base + ih >= 0 && ih_base + ih < IH && iw_base + iw >= 0 && iw_base + iw < IW) { reg_cache[i][j] = 0; if (ic_blk < ic_blks) reg_cache[i][j] = *(const uint32_t*)(&sptr [(ic_blk * IH * IW + ih * IW + iw) * 4]); } else { reg_cache[i][j] = (ic_blk < ic_blks) ? zero : 0; } } } #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int x = idx_in_warp + j * WARP_SIZE; int hc = warp_id + i * TileCounter_::warps_per_block; int ih = hc % TileCounter_::global_load_tile_y; int ic_blk = hc / TileCounter_::global_load_tile_y; if (ic_blk < thread_blk_y && x < TileCounter_::global_load_tile_x) { *(uint32_t*)(&smem[ic_blk][ih][x * 4]) = reg_cache[i][j]; } } } __syncthreads(); int blks = (ic_blks + thread_blk_y - 1) / thread_blk_y; #pragma unroll for (int c = 0; c < blks; ++c) { sptr += IH * IW * thread_blk_y * 4; if (c < blks - 1) { #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int iw = idx_in_warp + j * WARP_SIZE; int hc = warp_id + i * TileCounter_::warps_per_block; int ih = hc % TileCounter_::global_load_tile_y; int ic_blk = hc / TileCounter_::global_load_tile_y; int g_ic_blk = ic_blk + c * thread_blk_y; if (ih_base + ih >= 0 && ih_base + ih < IH && iw_base + iw >= 0 && iw_base + iw < IW) { reg_cache[i][j] = 0; if (g_ic_blk < ic_blks) reg_cache[i][j] = *(const uint32_t*)(&sptr [(ic_blk * IH * IW + ih * IW + iw) * 4]); } else { reg_cache[i][j] = (g_ic_blk < ic_blks) ? zero : 0; } } } } #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { int x = (j * thread_blk_x + tidx) * ConvConfig_::SW; int y = i * ConvConfig_::SH; #pragma unroll for (int fh = 0; fh < ConvConfig_::FH; ++fh) { #pragma unroll for (int fw = 0; fw < ConvConfig_::FW; ++fw) { uint32_t sdata = *(uint32_t*)(&smem[tidy][y + fh][(x + fw) * 4]); #pragma unroll for (int r = 0; r < 8; r++) { uint8_t val = (sdata & 0xF); acc[i][j] += val; sdata >>= 4; } } } } } if (c < blks - 1) { __syncthreads(); #pragma unroll for (int i = 0; i < TileCounter_::reg_cache_y; ++i) { #pragma unroll for (int j = 0; j < TileCounter_::reg_cache_x; ++j) { int x = idx_in_warp + j * WARP_SIZE; int hc = warp_id + i * TileCounter_::warps_per_block; int ih = hc % TileCounter_::global_load_tile_y; int ic_blk = hc / TileCounter_::global_load_tile_y; if (ic_blk < thread_blk_y && x < TileCounter_::global_load_tile_x) { *(uint32_t*)(&smem[ic_blk][ih][x * 4]) = reg_cache[i][j]; } } } __syncthreads(); } } #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { s_reduce[tidy][i][tidx + j * thread_blk_x] = acc[i][j]; } } const int nr_ty_per_warp = WARP_SIZE / thread_blk_x; #pragma unroll for (int k = (thread_blk_y >> 1); k; k >>= 1) { if (k >= nr_ty_per_warp) { __syncthreads(); } else { cub::WARP_SYNC(0xffffffff); } if (tidy < k) { #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { s_reduce[tidy][i][tidx + j * thread_blk_x] += s_reduce[tidy + k][i][tidx + j * thread_blk_x]; } } } } if (tidy == 0) { #pragma unroll for (int i = 0; i < pixels_per_thread_y; ++i) { #pragma unroll for (int j = 0; j < pixels_per_thread_x; ++j) { int x = j * thread_blk_x + tidx; int y = i; if (oh_start + y < OH && ow_start + x < OW) { dptr[y * OW + x] = s_reduce[0][i][tidx + j * thread_blk_x] * scale; } } } } } } // namespace void megdnn::cuda::do_dispatch_reduce_with_scale_data_u4( int32_t* dst, const uint8_t* src, int batch_size, int ih, int iw, int oh, int ow, int ph, int pw, int fh, int fw, int sh, int sw, int ic, int32_t scale, uint8_t zp_data, cudaStream_t stream) { zp_data = (zp_data << 4) | zp_data; int32_t zero = (zp_data << 24) | (zp_data << 16) | (zp_data << 8) | zp_data; if (fh == 3 && fw == 3 && sh == 1 && sw == 1) { typedef ConvConfig<3, 3, 1, 1> ConvConfig_; if (ic <= 32 && iw >= 128) { constexpr size_t thread_blk_x_ = WARP_SIZE; constexpr size_t thread_blk_y_ = 2; constexpr size_t pixels_per_thread_x_ = 4; constexpr size_t pixels_per_thread_y_ = 2; typedef TileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row; gridDim.y = blocks_per_col; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } else { if (iw <= 32) { constexpr size_t thread_blk_x_ = WARP_SIZE / 2; constexpr size_t thread_blk_y_ = 8; constexpr size_t pixels_per_thread_x_ = 1; constexpr size_t pixels_per_thread_y_ = 4; typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row * blocks_per_col; gridDim.y = 1; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } else { constexpr size_t thread_blk_x_ = WARP_SIZE / 2; constexpr size_t thread_blk_y_ = 4; constexpr size_t pixels_per_thread_x_ = 4; constexpr size_t pixels_per_thread_y_ = 4; typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row * blocks_per_col; gridDim.y = 1; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } } } else if (fh == 5 && fw == 5 && sh == 1 && sw == 1) { typedef ConvConfig<5, 5, 1, 1> ConvConfig_; if (ic <= 32 && iw >= 128) { constexpr size_t thread_blk_x_ = WARP_SIZE; constexpr size_t thread_blk_y_ = 2; constexpr size_t pixels_per_thread_x_ = 4; constexpr size_t pixels_per_thread_y_ = 2; typedef TileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row; gridDim.y = blocks_per_col; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } else { if (iw <= 32) { constexpr size_t thread_blk_x_ = WARP_SIZE / 2; constexpr size_t thread_blk_y_ = 8; constexpr size_t pixels_per_thread_x_ = 1; constexpr size_t pixels_per_thread_y_ = 4; typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row * blocks_per_col; gridDim.y = 1; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } else { constexpr size_t thread_blk_x_ = WARP_SIZE / 2; constexpr size_t thread_blk_y_ = 4; constexpr size_t pixels_per_thread_x_ = 4; constexpr size_t pixels_per_thread_y_ = 4; typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row * blocks_per_col; gridDim.y = 1; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } } } else if (fh == 7 && fw == 7 && sh == 1 && sw == 1) { typedef ConvConfig<7, 7, 1, 1> ConvConfig_; if (ic <= 32 && iw >= 128) { constexpr size_t thread_blk_x_ = WARP_SIZE; constexpr size_t thread_blk_y_ = 2; constexpr size_t pixels_per_thread_x_ = 4; constexpr size_t pixels_per_thread_y_ = 2; typedef TileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row; gridDim.y = blocks_per_col; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } else { constexpr size_t thread_blk_x_ = WARP_SIZE / 2; constexpr size_t thread_blk_y_ = 8; constexpr size_t pixels_per_thread_x_ = 1; constexpr size_t pixels_per_thread_y_ = 4; typedef LargeChannelTileCounter< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_> TileCounter_; dim3 gridDim; dim3 blockDim; int blocks_per_row = (ow + TileCounter_::spatial_tile_x - 1) / TileCounter_::spatial_tile_x; int blocks_per_col = (oh + TileCounter_::spatial_tile_y - 1) / TileCounter_::spatial_tile_y; blockDim.x = thread_blk_x_; blockDim.y = thread_blk_y_; gridDim.x = blocks_per_row * blocks_per_col; gridDim.y = 1; gridDim.z = batch_size; reduce_in_spatial_block_and_along_input_channel_with_scale_u4_large_channels< ConvConfig_, thread_blk_x_, thread_blk_y_, pixels_per_thread_x_, pixels_per_thread_y_><<<gridDim, blockDim, 0, stream>>>( dst, src, ic, ih, iw, oh, ow, ph, pw, scale, zero); } } after_kernel_launch(); } // vim: ft=cpp syntax=cuda.doxygen
the_stack
#include "../grid_info.cuh" #include <kat/on_device/shuffle.cuh> #include <kat/on_device/builtins.cuh> #include <kat/on_device/atomics.cuh> #include <kat/on_device/non-builtins.cuh> #include <kat/on_device/ptx.cuh> #include <kat/on_device/math.cuh> #include <kat/on_device/common.cuh> #include <type_traits> ///@cond #include <kat/detail/execution_space_specifiers.hpp> ///@endcond namespace kat { // Lane-mask-related functions. // TODO: Refactor these out of here KAT_FD unsigned num_lanes_in(lane_mask_t mask) { // Note the type cast from signed to unsigned return builtins::population_count(mask); } /** * @brief Determines which lane is the first within a lane mask * (considered in LSB-to-MSB order) * * @tparam ReturnWarpSizeForEmptyMask when set to true, the * semantics of this function will be consistent for empty * (all-zero) lane masks, in that the "first lane" inside * the mask will be one past the last lane - like in * the @ref `last_lane_in()` function * * @return index of the first 1-bit in the warp-size-bit mask; * if no lanes have a corresponding 1-bit, -1 or 32 (warp_size) * is returned, depending on @tparam ReturnWarpSizeForEmptyMask */ template <bool ReturnWarpSizeForEmptyMask = true> KAT_FD int first_lane_in(lane_mask_t mask) { return non_builtins::count_trailing_zeros<lane_mask_t, ReturnWarpSizeForEmptyMask>(mask); } /** * @brief Determines which lane is the first within a lane mask * (considered in LSB-to-MSB order) * * @return index of the first 1-bit in the warp-size-bit mask; * if no lanes have a corresponding 1-bit, 32 is returned; */ KAT_FD int last_lane_in(lane_mask_t mask) { return builtins::count_leading_zeros(mask); } namespace collaborative { namespace warp { // If we want to refer to other primitives, we'll make those references explicit; // but we do want to be able to say `warp::id()` without prefixing that with anything. //namespace grid = grid_info::grid; //namespace block = grid_info::block; //namespace warp = grid_info::warp; //namespace thread = grid_info::thread; namespace lane = grid_info::lane; // lane conditions // ---------------------------- /** * Checks whether a condition holds for an entire warp of threads * * @param condition A boolean value (passed as an integer * since that's what nVIDIA GPUs actually check with the HW instruction * @return true if condition is non-zero for all threads */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD bool all_lanes_satisfy(int condition) #else KAT_FD bool all_lanes_satisfy(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) return builtins::warp::all_lanes_satisfy(condition); #else return builtins::warp::all_lanes_satisfy(condition, lane_mask); #endif } /** * Checks whether a condition holds for none of the threads in a warp * * @param condition A boolean value (passed as an integer * since that's what nVIDIA GPUs actually check with the HW instruction * @return true if condition is zero for all threads */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD bool no_lanes_satisfy(int condition) #else KAT_FD bool no_lanes_satisfy(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) return all_lanes_satisfy(not condition); #else return all_lanes_satisfy(not condition, lane_mask); #endif } /** * Checks whether a condition holds for an entire warp of threads * * @param condition A boolean value (passed as an integer * since that's what nVIDIA GPUs actually check with the HW instruction * @return true if condition is non-zero for all threads */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD bool all_lanes_agree_on(int condition) #else KAT_FD bool all_lanes_agree_on(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) auto ballot_results = builtins::warp::ballot(condition); #else auto ballot_results = builtins::warp::ballot(condition, lane_mask); #endif return ballot_results == 0 // none satisfy the condition or ~ballot_results == 0; // all satisfy the condition); } /** * Checks whether a condition holds for at least one of the threads * in a warp * * @param condition A boolean value (passed as an integer * since that's what nVIDIA GPUs actually check with the HW instruction * @return true if condition is non-zero for at least one thread */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD bool some_lanes_satisfy(int condition) #else KAT_FD bool some_lanes_satisfy(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) return !no_lanes_satisfy(condition); #else return !no_lanes_satisfy(condition, lane_mask); #endif } /** * Count the lanes in a warp for which some condition holds * * @param condition the condition value for each lane (true if non-zero) * @return the number of threads in the warp whose @p condition is true (non-zero) */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD native_word_t num_lanes_satisfying(int condition) #else KAT_FD native_word_t num_lanes_satisfying(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) return num_lanes_in(builtins::warp::ballot(condition)); #else return num_lanes_in(builtins::warp::ballot(condition, lane_mask)); #endif } /** * Count the lanes in a warp which have the same condition value as the calling lane * * @param condition the condition value for each lane (true if non-zero) * @return the number of threads in the warp whose @p condition is the same value * as the calling lane (including the calling lane itself) */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD native_word_t num_lanes_agreeing_on(int condition) #else KAT_FD native_word_t num_lanes_agreeing_on(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { auto satisfying = #if (__CUDACC_VER_MAJOR__ < 9) num_lanes_satisfying(condition); #else num_lanes_satisfying(condition, lane_mask); #endif return condition ? satisfying : warp_size - satisfying; } /** * Check whether a condition holds for most lanes in a warp * * @param condition A boolean value (passed as an integer * since that's what nVIDIA GPUs actually check with the HW instruction */ #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD bool majority_vote(int condition) #else KAT_FD bool majority_vote(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) return num_lanes_satisfying(condition) > (warp_size / 2); #else return num_lanes_satisfying(condition, lane_mask) > (num_lanes_in(lane_mask) / 2); #endif } // -------------------------------------------------- #if !defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700 /** * Compares values provided by each lane in a warp, checking for uniqueness * * @param value to check for matches; each lane brings its own * @param lane_mask the lanes participating in the uniqueness check (other * lanes' values are not considered); should be uniform among participating * lanes * @return true if the lane this value provided had no matches among the values * provided by other lanes */ #if (__CUDACC_VER_MAJOR__ < 9) template <typename T> KAT_FD bool in_unique_lane_with(T value) #else template <typename T> KAT_FD bool in_unique_lane_with(T value, lane_mask_t lane_mask = full_warp_mask) #endif { auto self_lane_mask = (1 << lane::id()); // Note we're _not_ using the PTX builtin for obtaining the self lane mask from a special // regiater - since that would probably be much slower. // Note: This assumes a lane's bit is always on in the result of get_matching_lanes(); // this must indeed be the case, because The PTX spec demands that the calling lane be // part of its own masked lanes; see: // // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-match-sync // #if (__CUDACC_VER_MAJOR__ < 9) return builtins::warp::get_matching_lanes(value) == self_lane_mask; #else return builtins::warp::get_matching_lanes(value, lane_mask) == self_lane_mask; #endif } #endif // !defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700 /* * * Notes: * * 1. This relies on the shuffle generics! Without them it'll fail for > 32-bit types. * 2.Let's consider replacing this function with some proxy to enable syntax such as * * warp_choose(my_var).from_lane(some_lane_id) * 3. The position parameter is a signed int because that's what the shuffle functions * take; although perhaps we should make it a native_word_t? */ template <typename T> KAT_FD T get_from_lane(T value, int source_lane) { return shuffle_arbitrary(value, source_lane); } template <typename T> KAT_FD T get_from_first_lane(T value) { return get_from_lane(value, grid_info::warp::first_lane); } template <typename T> KAT_FD T get_from_last_lane(T value) { return get_from_lane(value, grid_info::warp::last_lane); } /** * Determines which is the first lane within a warp which satisfies some * boolean condition * * @param[in] condition an essentially-boolena value representing whether * or not the calling thread (lane) satisfies the condition; typed * as an int since that's what CUDA primitives take mostly. * * @return index of the first lane in the warp for which condition is non-zero. * If no lane has non-zero condition, then the result is either undefined, * or if @tparam DefinedResultOnNoSatisfaction was set - 32 (warp_size). */ template <bool DefinedResultOnNoSatisfaction = true> #if (__CUDACC_VER_MAJOR__ < 9) KAT_FD native_word_t first_lane_satisfying(int condition) #else KAT_FD native_word_t first_lane_satisfying(int condition, lane_mask_t lane_mask = full_warp_mask) #endif { #if (__CUDACC_VER_MAJOR__ < 9) auto ballot_results = builtins::warp::ballot(condition); #else auto ballot_results = builtins::warp::ballot(condition, lane_mask); #endif return first_lane_in<DefinedResultOnNoSatisfaction>(ballot_results); } KAT_FD lane_mask_t get_active_lanes() { return __activemask(); } KAT_FD unsigned num_active_lanes() { return num_lanes_in(get_active_lanes()); } namespace detail { // Note: Return value is unspecified for empty lane masks template <bool PreferFirstLane = true> KAT_FD unsigned select_leader_lane_among(unsigned lanes_mask) { return PreferFirstLane ? first_lane_in(lanes_mask) : last_lane_in(lanes_mask); } template <bool PreferFirstLane = true> KAT_FD bool am_leader_lane(unsigned active_lanes_mask) { // This code hints that leadership is automatic given who's active - and in fact, it is, // despite the name "select leader lane". TODO: Perhaps we should rename that function, then? return select_leader_lane_among<PreferFirstLane>(active_lanes_mask) == lane::id(); } KAT_FD unsigned lane_index_among(lane_mask_t mask) { return num_lanes_in(ptx::special_registers::lanemask_lt() & mask); // Note: If we know we're in a linear grid, it may be faster to use threadIdx.x which // is probably already known: // // return num_lanes_in(builtins::bit_field::extract_bits(mask, threadIdx.x % kat::warp_size)); } } // namespace detail /** * This is a mechanism for making exactly one lane act instead of the whole warp, * which supports the case of some threads being inactive (e.g. having exited). * * @tparam PreferFirstLane if true, the first lane will be the acting leader * whenever it is active - at the cost of making this functions slightly more * expensive (an extra subtraction instruction) * @return Iif PreferFirstLane is true, and the first lane is active, * then 0; the index of some active lane otherwise. */ template <bool PreferFirstLane = true> KAT_FD unsigned select_leader_lane() { return detail::select_leader_lane_among<PreferFirstLane>( get_active_lanes() ); } /** * This applies the leader lane selection mechanism to obtain a condition * for being the leader lane. * * @tparam PreferFirstLane if true, the first lane will be the acting leader * whenever it is active - at the cost of making this functions slightly more * expensive (an extra subtraction instruction) * @return 1 for exactly one of the active lanes in each warp, 0 for all others */ template <bool PreferFirstLane = true> KAT_FD bool am_leader_lane() { return detail::am_leader_lane<PreferFirstLane>( get_active_lanes() ); } template <typename Function> KAT_FD typename std::result_of<Function()>::type have_a_single_lane_compute( Function f, unsigned designated_computing_lane) { typename std::result_of<Function()>::type result; if (lane::id() == designated_computing_lane) { result = f(); } return get_from_lane(result, designated_computing_lane); } template <typename Function, bool PreferFirstLane = true> KAT_FD typename std::result_of<Function()>::type have_a_single_lane_compute(Function f) { unsigned computing_lane { select_leader_lane<PreferFirstLane>() }; return have_a_single_lane_compute(f, computing_lane); } template <typename Function> KAT_FD typename std::result_of<Function()>::type have_first_lane_compute(Function f) { return have_a_single_lane_compute<Function>(f, grid_info::warp::first_lane); } template <typename Function> KAT_FD typename std::result_of<Function()>::type have_last_lane_compute(Function f) { return have_a_single_lane_compute<Function>(f, grid_info::warp::last_lane); } KAT_FD unsigned index_among_active_lanes() { return detail::lane_index_among( get_active_lanes() ); } KAT_FD unsigned last_active_lane_index() { return detail::lane_index_among( get_active_lanes() ); } // TODO: Consider implementing have_first_active_lane_compute and // have_last_active_lane_compute /** * When every (active) lane in a warp needs to increment a counter, * use this function to avoid all of them having to execute atomics; * each active lane gets the "previous value" as though they had all * incremented in order. * * @note It's not clear to me how better this is from just using atomics * and being done with it * * @todo extend this to other atomic operations */ template <typename T> KAT_FD T active_lanes_atomically_increment(T* counter) { auto lanes_mask = get_active_lanes(); auto active_lane_count = num_lanes_in(lanes_mask); auto perform_all_increments = [counter, active_lane_count]() -> T { return atomic::add<T>(counter, active_lane_count); }; auto value_before_all_lane_increments = have_a_single_lane_compute(perform_all_increments, detail::select_leader_lane_among(lanes_mask)); // the return value simulates the case of every lane having done its // own atomic increment return value_before_all_lane_increments + detail::lane_index_among(lanes_mask); } template <typename Function, typename Size = unsigned> KAT_FD void at_warp_stride(Size length, Function f) { // If the length is known at compile-time, perhaps this loop can be unrolled #pragma unroll for(promoted_size_t<Size> pos = lane::id(); pos < length; pos += warp_size) { f(pos); } } } // namespace warp } // namespace collaborative namespace linear_grid { namespace collaborative { namespace warp { /** * A structure for warp-level search results. Semantically, * it should have been std::optional-like, but that might * incur too much overhead, so we're just encoding the * 'empty' indication using the result fields. */ template <typename T> struct search_result_t { native_word_t lane_index; T value; KAT_FHD bool is_set() const { return lane_index < warp_size; } KAT_FHD void unset() { lane_index = warp_size; } KAT_FHD bool operator==(const search_result_t<T>& other) { return (lane_index == other.lane_index) and ( (not is_set() ) or (value == other.value) ); } }; /** * Have each lane search for its own value of interest within the * sorted sequence of single values provided by all the warp lanes. * * @note The amount of time this function takes is very much * data-dependent! * * @note this function assumes all warp lanes are active. * * @todo Does it matter if the _needles_, as opposed to the * _hay straws_, are sorted? I wonder. * * @todo consider specializing for non-full warps * * @todo Specialize for smaller and larger data types: For * larger ones, compare 4-byte parts of the datum separately * (assuming @tparam T is bitwise-comparable); for smaller * ones, consider having lanes collect multiple data and pass * it on to other lanes, which can then spare some shuffling. * * @param lane_needle the value the current lane wants to search * for * @param lane_hay_straw the warp_size hay "straws" passed by * the lanes make up the entire "haystack" we search in. They * _must_ be known to be in order, i < j => straw of lane i < * straw of lane j. They are accessed using intra-warp shuffling, * solely. * @return For lane i, the index of the first lane j with * straw-of-j > needle-of-i, along with straw-of-j; if there is no such * lane j, warp_size is returned as the lane index and an arbitrary * value is returned as the result. */ template <typename T, bool AssumeNeedlesAreSorted = false> KAT_FD search_result_t<T> multisearch(const T& lane_needle, const T& lane_hay_straw) { search_result_t<T> result; result.unset(); struct { unsigned lower, upper; // lower is inclusive, upper is exclusive } bounds; if (lane_needle <= lane_hay_straw) { bounds.lower = grid_info::warp::first_lane; bounds.upper = grid_info::lane::id(); } else { bounds.lower = grid_info::lane::id() + 1; bounds.upper = warp_size; } enum : unsigned { cutoff_to_linear_search = 6 }; // is 6 a good choice for a cutoff? should it depend on the microarch? while (bounds.upper - bounds.lower >= cutoff_to_linear_search) { unsigned mid = (bounds.lower + bounds.upper) / 2; auto mid_lane_hay_straw = shuffle_arbitrary(lane_needle, mid); if (lane_needle <= mid_lane_hay_straw) { bounds.lower = mid + 1; } else { bounds.upper = mid; } } for(unsigned lane = bounds.lower; lane < bounds.upper; lane++) { auto hay_straw = shuffle_arbitrary(lane_needle, lane); if (not result.is_set() and hay_straw > lane_needle) { result = { lane, hay_straw }; } // Note: We don't break from this loop even if we've found // our result - as we still need to participate in shuffles } return result; } template <typename Function, typename Size = unsigned> KAT_FD void at_warp_stride(Size length, Function f) { // If the length is known at compile-time, perhaps this loop can be unrolled #pragma unroll for(promoted_size_t<Size> pos = linear_grid::grid_info::lane::id(); pos < length; pos += warp_size) { f(pos); } } namespace detail { /** * Sometimes you have a run-time-determined range of indices * for which you need to compute some predicate, but you have * it constrained to be a nice round value, say a multiple of * warp_size * warp_size - or even a constant multiple, but by * means of, say, a configuration file. Now, the compiler will * not be able to figure out this is the case, and will compile * in the condition checks and the code for handling slack - * which you don't actually need. To prevent that and cater * to my OCD, you can use a template parameter to indicate how * nicely-behaving your input length is. */ enum predicate_computation_length_slack_t { has_no_slack, //!< Length known to be multiple of warp_size * warp_size may_have_full_warps_of_slack,//!< Length only known to be multiple of warp_size may_have_arbitrary_slack //!< Length may have any value, make no assumptions about it }; } // namespace detail /** * @brief Arrange the computation of a predicate by a warp so that * both reads and writes are coalesced and divergence is minimized * * This relies on the fact that there are as many threads in a warp as * there are bits in the native register size (a 4-byte unsigned int * has 32 bits, and warp size is 32). This means that the whole warp * computing the predicate once (i.e. for 32 elements), the results in * bits exactly suffice for a single write of a 4-byte value by a * single thread. Since we want coalesced writes, of 128 bytes at a * time, we need that many results for each of the lanes in a warp, * i.e. we need to have 32 times all-warp computations of the * predicate, with every consecutive 32 providing the write value * to one of the lanes. That's how this function arranges the * computation. * * @note The predicate is passed no input data other than the index * within the range 0..@p length - 1; if you need such input, * pass a closure (e.g. a lambda which captures the relevant data). * * @note the last bits of the last bit container - those which are * beyond @param length bits overall - are slack bits; it is assumed * we're allowed to write anything to them. * * @note There is no inter-warp collaboration here; the outputs * may be completely disjoint; and any warp's range's slack is * separate. See @ref kat::linear_grid::collabopration::grid::compute_predicate_at_warp_stride * for the case of a single joint range to cover. * * @param computed_predicate the result of the computation of the * predicate for each of the indices in the range * @param length The number of elements for which to compute the * predicate, which is also the number of bits (not the size in * bytes) of @p computed_predicate * @param predicate */ template < typename Predicate, typename Size = native_word_t, detail::predicate_computation_length_slack_t PossibilityOfSlack = detail::may_have_arbitrary_slack // set Index to something smaller than Size if you have a size that's // something like 1 << sizeof(uint32_t), and then you have to use uint64_t as Size > KAT_FD void compute_predicate_at_warp_stride( unsigned* computed_predicate, Predicate& predicate, Size length) { static_assert(warp_size == size_in_bits<native_word_t>(), "The assumption of having as many threads in a warp " "as there are bits in the native register size - " "doesn't hold; you can't use this function."); // The are three ways of proceeding with the computations, by decreasing preference: // // 1. Full-warp coalesced reads, full-warp coalesced writes (in units of warp_size^2) // 2. Full-warp coalesced reads, non-coalesced writes (in units of warp_size) // 3. Non-coalesced reads, non-coalesced writes (in units of 1) // // Using compile-time logic we'll try to completely avoid any consideration of // cases 2 and 3 when they're not absolutely necessary; otherwise, we'll do most of // the work in case 1 promoted_size_t<Size> full_warp_reads_output_length = length >> log_warp_size; auto full_warp_writes_output_length = (PossibilityOfSlack == detail::has_no_slack) ? full_warp_reads_output_length : round_down_to_full_warps(full_warp_reads_output_length); const auto lane_index = grid_info::lane::id(); promoted_size_t<Size> input_pos = lane_index; // This is the finger-licking-good part :-) promoted_size_t<Size> output_pos; // We'll need this after the loop as well. for (output_pos = lane_index; output_pos < full_warp_writes_output_length; output_pos += warp_size) { native_word_t warp_results_write_buffer; #pragma unroll for(native_word_t writing_lane = 0; writing_lane < warp_size; writing_lane++, input_pos += warp_size) { auto thread_result = predicate(input_pos); auto warp_results = builtins::warp::ballot(thread_result); if (lane_index == writing_lane) { warp_results_write_buffer = warp_results; } } computed_predicate[output_pos] = warp_results_write_buffer; } // ... and all the rest is ugly but necessary if (PossibilityOfSlack != detail::has_no_slack) { // In this case, the output length is not known to be a multiple // of 1024 = 32 * 32 = warp_size * warp_size // // Note that we're continuing to advance our input and output // position variables promoted_size_t<Size> full_warp_reads_output_slack_length = full_warp_reads_output_length - full_warp_writes_output_length; native_word_t warp_results_write_buffer; if (full_warp_reads_output_slack_length > 0) { for (native_word_t writing_lane = 0; writing_lane < full_warp_reads_output_slack_length; writing_lane++, input_pos += warp_size) { auto thread_result = predicate(input_pos); auto warp_results = builtins::warp::ballot(thread_result); if (lane_index == writing_lane) { warp_results_write_buffer = warp_results; } } } native_word_t num_writing_lanes = full_warp_reads_output_slack_length; if (PossibilityOfSlack == detail::may_have_arbitrary_slack) { native_word_t input_slack_length = length % warp_size; // let's hope this gets optimized... if (input_slack_length > 0) { auto thread_result = (input_pos < length) ? predicate(input_pos) : false; auto warp_results = builtins::warp::ballot(thread_result); if (lane_index == num_writing_lanes) { warp_results_write_buffer = warp_results; } num_writing_lanes++; } } // Note it could theoretically be the case that num_writing_lanes is 0 if (lane_index < num_writing_lanes) { computed_predicate[output_pos] = warp_results_write_buffer; } } } } // namespace warp } // namespace collaborative } // namespace linear_grid } // namespace kat #endif // CUDA_KAT_WARP_LEVEL_PRIMITIVES_CUH_
the_stack
#pragma once #include <math/vector.h> #include <math/matrix.h> #include "config.h" #include "fragment_data.cuh" #include "IntermediateGeometryStorage.cuh" #include "shaders/vertex_simple.cuh" #include "shaders/clipspace.cuh" #include "ptx_primitives.cuh" extern "C" { struct Viewport { float left; float top; float right; float bottom; }; __constant__ Viewport viewport; __constant__ float pixel_step[2]; __constant__ float* c_depthBuffer; __constant__ unsigned int c_bufferDims[2]; surface<void, cudaSurfaceType2D> color_buffer; __global__ void clearColorBuffer(uchar4 color, unsigned int buffer_width, unsigned int buffer_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < buffer_width && y < buffer_height) { surf2Dwrite(color, color_buffer, 4 * x, y); } } } extern "C" { __constant__ float *c_positions, *c_normals, *c_texCoords; __constant__ unsigned int *c_indices, *c_patchData; } namespace FreePipe { template<class FragmentIn, int TInterpolators> struct Interpolators { static const int NumInterpolators = FragmentIn::Interpolators; math::float3 interpolators[NumInterpolators]; __device__ void setup(const FragmentIn* v0, const FragmentIn* v1, const FragmentIn* v2, const math::float3x3& M_inv) { #pragma unroll for (int i = 0; i < NumInterpolators; ++i) interpolators[i] = math::float3(v0->attributes[i], v1->attributes[i], v2->attributes[i]) * M_inv; } __device__ void interpolate(FragmentIn& frag, math::float3 p, float w) { #pragma unroll for (int i = 0; i < NumInterpolators; ++i) frag.attributes[i] = dot(interpolators[i], p) * w; } }; template<class FragmentIn> struct Interpolators<FragmentIn, 0> { __device__ void setup(const FragmentIn* v0, const FragmentIn* v1, const FragmentIn* v2, const math::float3x3& M_inv) { } __device__ void interpolate(FragmentIn& frag, math::float3 p, float w) { } }; template<class VertexShader> __device__ typename VertexShader::VertexOut runVertexShader(unsigned int vId) { math::float3 pos = math::float3(c_positions[vId * 3], c_positions[vId * 3 + 1], c_positions[vId * 3 + 2]); math::float3 normal = math::float3(c_normals[vId * 3], c_normals[vId * 3 + 1], c_normals[vId * 3 + 2]); math::float2 tex = math::float2(c_texCoords[vId * 2], c_texCoords[vId * 2 + 1]); typename VertexShader::VertexOut out = VertexShader::process(pos, normal, tex); return out; } template<> __device__ typename Shaders::ClipSpaceVertexShader::VertexOut runVertexShader<Shaders::ClipSpaceVertexShader>(unsigned int vId) { math::float4 pos = math::float4(c_positions[vId * 4], c_positions[vId * 4 + 1], c_positions[vId * 4 + 2], c_positions[vId * 4 + 3]); return Shaders::ClipSpaceVertexShader::process(pos); } template<class VertexShader, class FragmentShader> __device__ void process_fragments_h(float* depth_buffer, unsigned int buffer_width, unsigned int buffer_height, float pixel_step_x, float pixel_step_y, unsigned int triangle_id) { unsigned int id = triangle_id; // every thread works on one triangle for now //if (id < num_triangles) { typename VertexShader::VertexOut vert0 = runVertexShader<VertexShader>(c_indices[3 * id]); typename VertexShader::VertexOut vert1 = runVertexShader<VertexShader>(c_indices[3 * id + 1]); typename VertexShader::VertexOut vert2 = runVertexShader<VertexShader>(c_indices[3 * id + 2]); //unsigned int id0 = geometryOutStorage.indices[3 * id]; //unsigned int id1 = geometryOutStorage.indices[3 * id + 1]; //unsigned int id2 = geometryOutStorage.indices[3 * id + 2]; math::float4 v0 = vert0.pos; math::float4 v1 = vert1.pos; math::float4 v2 = vert2.pos; math::float3 p0 = math::float3(v0.x, v0.y, v0.w); math::float3 p1 = math::float3(v1.x, v1.y, v1.w); math::float3 p2 = math::float3(v2.x, v2.y, v2.w); math::float3x3 M = math::float3x3( v0.x, v1.x, v2.x, v0.y, v1.y, v2.y, v0.w, v1.w, v2.w ); math::float3x3 M_adj = adj(M); float det = dot(M_adj.row1(), M.column1()); if (det > -0.00001f) return; math::float3x3 M_inv = (1.0f / det) * M_adj; float l0 = 1.0f / v0.w; float l1 = 1.0f / v1.w; float l2 = 1.0f / v2.w; math::float3 u0 = M_inv.row1(); math::float3 u1 = M_inv.row2(); math::float3 u2 = M_inv.row3(); math::float3 uw = math::float3(1.0f, 1.0f, 1.0f) * M_inv; math::float3 uz = math::float3(v0.z, v1.z, v2.z) * M_inv; Interpolators<typename VertexShader::VertexOut, VertexShader::VertexOut::Interpolators> interpolator; interpolator.setup(&vert0, &vert1, &vert2, M_inv); float vp_scale_x = 0.5f * (viewport.right - viewport.left); float vp_scale_y = 0.5f * (viewport.bottom - viewport.top); float x0 = (v0.x * l0 + 1.0f) * vp_scale_x + viewport.left; float x1 = (v1.x * l1 + 1.0f) * vp_scale_x + viewport.left; float x2 = (v2.x * l2 + 1.0f) * vp_scale_x + viewport.left; float y0 = (v0.y * l0 + 1.0f) * vp_scale_y + viewport.top; float y1 = (v1.y * l1 + 1.0f) * vp_scale_y + viewport.top; float y2 = (v2.y * l2 + 1.0f) * vp_scale_y + viewport.top; float x_min = max(min(x0, min(x1, x2)), viewport.left); float y_min = max(min(y0, min(y1, y2)), viewport.top); float x_max = min(max(x0, max(x1, x2)), viewport.right); float y_max = min(max(y0, max(y1, y2)), viewport.bottom); int i_start = ceil(x_min - 0.50000f); int j_start = ceil(y_min - 0.50000f); int i_end = x_max + 0.50000f; int j_end = y_max + 0.50000f; //printf("%d %d %d %d\n", i_start, i_end, j_start, j_end); for (int j = j_start; j < j_end; ++j) { float y = -1.0f + (j + 0.5f) * pixel_step[1]; for (int i = i_start; i < i_end; ++i) { float x = -1.0f + (i + 0.5f) * pixel_step[0]; math::float3 p = math::float3(x, y, 1.0f); float f0 = dot(u0, p); float f1 = dot(u1, p); float f2 = dot(u2, p); if (f0 >= 0.0f && f1 >= 0.0f && f2 >= 0.0f) { float rcpw = dot(uw, p); float w = 1.0f / rcpw; float z = dot(uz, p); typename FragmentShader::FragementIn frag; frag.pos = math::float4(x, y, z, 1.0f); interpolator.interpolate(frag, p, w); FragementData data; data.depth = z; if (z >= -1.0f && z <= 1.0f) // clipping! { if (FragmentShader::process(data, frag)) { //float c = 0.5f*(z + 1.0f); //float c = f0 * w; //float* pd = depth_buffer + j * buffer_width + i; //float current_z; //bool notdone = true; //while (notdone) //{ // current_z = atomicExch(pd, -1.0f); // if (current_z != -1.0f) // { // if (data.depth < current_z) // { // surf2Dwrite(make_uchar4(255 * data.color.x, 255 * data.color.y, 255 * data.color.z, 255 * data.color.w), color_buffer, 4 * i, j); // __threadfence(); // //atomicExch(pd, z); // *pd = data.depth; // } // else // //atomicExch(pd, current_z); // *pd = current_z; // notdone = false; // } //} float d = __float_as_int(data.depth); if (!DEPTH_TEST || d < atomicMin(reinterpret_cast<int*>(depth_buffer + j * buffer_width + i), d)) { surf2Dwrite(make_uchar4(255 * data.color.x, 255 * data.color.y, 255 * data.color.z, 255), color_buffer, 4 * i, j); } } } } } } } } //template<class VertexShader, class FragmentShader> //__device__ void process_fragments(IntermediateGeometryStorage& geometryOutStorage, float* depth_buffer, // unsigned int buffer_width, unsigned int buffer_height, // float xPixelStep, float yPixelStep, // unsigned int numTriangles, unsigned int triangleId) //{ // unsigned int id = triangleId; // // every thread works on one triangle for now // if (id < numTriangles) // { // unsigned int id0 = geometryOutStorage.indices[3 * id]; // unsigned int id1 = geometryOutStorage.indices[3 * id + 1]; // unsigned int id2 = geometryOutStorage.indices[3 * id + 2]; // math::float4 v0 = geometryOutStorage.accessProcessedVertices<typename VertexShader::VertexOut>()[id0].pos; // math::float4 v1 = geometryOutStorage.accessProcessedVertices<typename VertexShader::VertexOut>()[id1].pos; // math::float4 v2 = geometryOutStorage.accessProcessedVertices<typename VertexShader::VertexOut>()[id2].pos; // // devide by w // float w0_inv = 1.0f / v0.w; // v0.x *= w0_inv; v0.y *= w0_inv; v0.z *= w0_inv; // float w1_inv = 1.0f / v1.w; // v1.x *= w1_inv; v1.y *= w1_inv; v1.z *= w1_inv; // float w2_inv = 1.0f / v2.w; // v2.x *= w2_inv; v2.y *= w2_inv; v2.z *= w2_inv; // // compute edge equations // math::float2 n0(-(v1.y - v0.y), v1.x - v0.x); // math::float2 n1(-(v2.y - v1.y), v2.x - v1.x); // math::float2 n2(-(v0.y - v2.y), v0.x - v2.x); // float c0 = -dot(n0, math::float2(v0.x, v0.y)); // float c1 = -dot(n1, math::float2(v1.x, v1.y)); // float c2 = -dot(n2, math::float2(v2.x, v2.y)); // // compute bounding box limited to -1 +1 // float xmin = max(min(min(v0.x, v1.x), v2.x) - 0.00001f, -1.0f); // float xmax = min(max(max(v0.x, v1.x), v2.x) + 0.00001f, 1.0f); // float ymin = max(min(min(v0.y, v1.y), v2.y) - 0.00001f, -1.0f); // float ymax = min(max(max(v0.y, v1.y), v2.y) + 0.00001f, 1.0f); // // compute pixel positions // float xstart = (viewport.left + 0.5f*(xmin + 1.0f)*(viewport.right - viewport.left)); // float xend = (viewport.left + 0.5f*(xmax + 1.0f)*(viewport.right - viewport.left)); // float ystart = (viewport.top + 0.5f*(ymin + 1.0f)*(viewport.bottom - viewport.top)); // float yend = (viewport.top + 0.5f*(ymax + 1.0f)*(viewport.bottom - viewport.top)); // //float xstart = 0; // //float xend = buffer_width - 1; // //float ystart = 0; // //float yend = buffer_height - 1; // unsigned int pxstart = xstart + 0.5f; // unsigned int pystart = ystart + 0.5f; // unsigned int pxend = min(static_cast<unsigned int>(xend), c_bufferDims[0] - 1); // unsigned int pyend = min(static_cast<unsigned int>(yend), c_bufferDims[1] - 1); // xstart = ((pxstart + 0.5f) - viewport.left) * xPixelStep - 1.0f; // ystart = ((pystart + 0.5f) - viewport.top) * yPixelStep - 1.0f; // // run through pixels inside bounding box // unsigned int py = pystart; // for (float y = ystart; py <= pyend; y += yPixelStep, ++py) // { // unsigned int px = pxstart; // for (float x = xstart; px <= pxend; x += xPixelStep, ++px) // { // math::float2 pixCoord(x, y); // if (dot(n0,pixCoord) + c0 > 0 && // dot(n1,pixCoord) + c1 > 0 && // dot(n2,pixCoord) + c2 > 0) // { // // TODO depth + interpolation // // run fragmenmt shader // FragementData data; // data.depth = 0; // // // TODO compute interpolated values // typename FragmentShader::FragementIn frag = geometryOutStorage.accessProcessedVertices<typename VertexShader::VertexOut>()[id0]; // if (FragmentShader::process(data, frag)) // { // //// TODO blending... // //if (px >= buffer_width || py >= buffer_height) // //printf("outch: %d/%d %d/%d\n", px, buffer_width, py, buffer_height); // //else // surf2Dwrite(make_uchar4(255 * data.color.x, 255 * data.color.y, 255 * data.color.z, 255 * data.color.w), color_buffer, 4 * px, py); // } // } // } // } // } //} } #endif //INCLUDED_FREEPIPE_FRAGMENT_PROCESSING
the_stack
//#include <cutil.h> // cutil32.lib #include <cutil_math.h> // cutil32.lib #include <string.h> #include <assert.h> #include <windows.h> //#include <cuda_gl_interop.h> #include <stdio.h> #include <math.h> extern void app_printf ( char* format, ... ); extern void app_printEXIT ( char* format, ... ); extern char app_getch (); #include "fluid_system_host.cuh" #include "fluid_system_kern.cuh" FluidParams fcuda; // CPU Fluid params FluidParams* mcuda; // GPU Fluid params bufList fbuf; // GPU Particle buffers bool cudaCheck ( cudaError_t status, char* msg ) { if ( status != cudaSuccess ) { app_printf ( "CUDA ERROR: %s\n", cudaGetErrorString ( status ) ); app_getch (); MessageBox ( NULL, cudaGetErrorString ( status), msg, MB_OK ); return false; } else { //app_printf ( "%s. OK.\n", msg ); } return true; } void cudaExit () { int argc = 1; char* argv[] = {"fluids"}; cudaDeviceReset(); } // Initialize CUDA void cudaInit() { int argc = 1; char* argv[] = {"fluids"}; int count = 0; int i = 0; cudaError_t err = cudaGetDeviceCount(&count); if ( err==cudaErrorInsufficientDriver) { app_printEXIT( "CUDA driver not installed.\n"); } if ( err==cudaErrorNoDevice) { app_printEXIT ( "No CUDA device found.\n"); } if ( count == 0) { app_printEXIT ( "No CUDA device found.\n"); } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) if(prop.major >= 1) break; } if(i == count) { app_printEXIT ( "No CUDA device found.\n"); } cudaSetDevice(i); app_printf( "CUDA initialized.\n"); cudaDeviceProp p; cudaGetDeviceProperties ( &p, 0); app_printf ( "-- CUDA --\n" ); app_printf ( "Name: %s\n", p.name ); app_printf ( "Revision: %d.%d\n", p.major, p.minor ); app_printf ( "Global Mem: %d\n", p.totalGlobalMem ); app_printf ( "Shared/Blk: %d\n", p.sharedMemPerBlock ); app_printf ( "Regs/Blk: %d\n", p.regsPerBlock ); app_printf ( "Warp Size: %d\n", p.warpSize ); app_printf ( "Mem Pitch: %d\n", p.memPitch ); app_printf ( "Thrds/Blk: %d\n", p.maxThreadsPerBlock ); app_printf ( "Const Mem: %d\n", p.totalConstMem ); app_printf ( "Clock Rate: %d\n", p.clockRate ); fbuf.mgridactive = 0x0; // Allocate the sim parameters cudaCheck ( cudaMalloc ( (void**) &mcuda, sizeof(FluidParams) ), "Malloc FluidParams mcuda" ); // Allocate particle buffers cudaCheck ( cudaMalloc ( (void**) &fbuf.mpos, sizeof(float)*3 ), "Malloc mpos" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mvel, sizeof(float)*3), "Malloc mvel" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mveleval, sizeof(float)*3), "Malloc mveleval" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mforce, sizeof(float)*3), "Malloc mforce" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mpress, sizeof(float) ), "Malloc mpress" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mdensity, sizeof(float) ), "Malloc mdensity" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgcell, sizeof(uint)), "Malloc mgcell" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgndx, sizeof(uint)), "Malloc mgndx" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mclr, sizeof(uint)), "Malloc mclr" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.msortbuf, sizeof(uint) ), "Malloc msortbu" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgrid, 1 ), "Malloc mgrid" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridcnt, 1 ), "Malloc mgridcnt" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridoff, 1 ), "Malloc mgridoff" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridactive, 1 ), "Malloc mgridactive"); //cudaCheck ( cudaMalloc ( (void**) &fbuf.mcluster, sizeof(uint) ) ); preallocBlockSumsInt ( 1 ); }; // Compute number of blocks to create int iDivUp (int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads) { numThreads = min( maxThreads, numPnts ); numBlocks = iDivUp ( numPnts, numThreads ); } void FluidClearCUDA () { cudaCheck ( cudaFree ( fbuf.mpos ), "Free mpos" ); cudaCheck ( cudaFree ( fbuf.mvel ), "Free mvel" ); cudaCheck ( cudaFree ( fbuf.mveleval ), "Free mveleval" ); cudaCheck ( cudaFree ( fbuf.mforce ), "Free mforce" ); cudaCheck ( cudaFree ( fbuf.mpress ), "Free mpress"); cudaCheck ( cudaFree ( fbuf.mdensity ), "Free mdensity" ); cudaCheck ( cudaFree ( fbuf.mgcell ), "Free mgcell" ); cudaCheck ( cudaFree ( fbuf.mgndx ), "Free mgndx" ); cudaCheck ( cudaFree ( fbuf.mclr ), "Free mclr" ); //cudaCheck ( cudaFree ( fbuf.mcluster ) ); cudaCheck ( cudaFree ( fbuf.msortbuf ), "Free msortbuf" ); cudaCheck ( cudaFree ( fbuf.mgrid ), "Free mgrid" ); cudaCheck ( cudaFree ( fbuf.mgridcnt ), "Free mgridcnt" ); cudaCheck ( cudaFree ( fbuf.mgridoff ), "Free mgridoff" ); cudaCheck ( cudaFree ( fbuf.mgridactive ), "Free mgridactive" ); } void FluidSetupCUDA ( int num, int gsrch, int3 res, float3 size, float3 delta, float3 gmin, float3 gmax, int total, int chk ) { fcuda.pnum = num; fcuda.gridRes = res; fcuda.gridSize = size; fcuda.gridDelta = delta; fcuda.gridMin = gmin; fcuda.gridMax = gmax; fcuda.gridTotal = total; fcuda.gridSrch = gsrch; fcuda.gridAdjCnt = gsrch*gsrch*gsrch; fcuda.gridScanMax = res; fcuda.gridScanMax -= make_int3( fcuda.gridSrch, fcuda.gridSrch, fcuda.gridSrch ); fcuda.chk = chk; // Build Adjacency Lookup int cell = 0; for (int y=0; y < gsrch; y++ ) for (int z=0; z < gsrch; z++ ) for (int x=0; x < gsrch; x++ ) fcuda.gridAdj [ cell++] = ( y * fcuda.gridRes.z+ z )*fcuda.gridRes.x + x ; app_printf ( "CUDA Adjacency Table\n"); for (int n=0; n < fcuda.gridAdjCnt; n++ ) { app_printf ( " ADJ: %d, %d\n", n, fcuda.gridAdj[n] ); } // Compute number of blocks and threads int threadsPerBlock = 192; computeNumBlocks ( fcuda.pnum, threadsPerBlock, fcuda.numBlocks, fcuda.numThreads); // particles computeNumBlocks ( fcuda.gridTotal, threadsPerBlock, fcuda.gridBlocks, fcuda.gridThreads); // grid cell // Allocate particle buffers fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads); app_printf ( "CUDA Allocate: \n" ); app_printf ( " Pnts: %d, t:%dx%d=%d, Size:%d\n", fcuda.pnum, fcuda.numBlocks, fcuda.numThreads, fcuda.numBlocks*fcuda.numThreads, fcuda.szPnts); app_printf ( " Grid: %d, t:%dx%d=%d, bufGrid:%d, Res: %dx%dx%d\n", fcuda.gridTotal, fcuda.gridBlocks, fcuda.gridThreads, fcuda.gridBlocks*fcuda.gridThreads, fcuda.szGrid, (int) fcuda.gridRes.x, (int) fcuda.gridRes.y, (int) fcuda.gridRes.z ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mpos, fcuda.szPnts*sizeof(float)*3 ), "Malloc mpos" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mvel, fcuda.szPnts*sizeof(float)*3 ), "Malloc mvel" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mveleval, fcuda.szPnts*sizeof(float)*3 ), "Malloc mveleval" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mforce, fcuda.szPnts*sizeof(float)*3 ), "Malloc mforce" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mpress, fcuda.szPnts*sizeof(float) ), "Malloc mpress" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mdensity, fcuda.szPnts*sizeof(float) ), "Malloc mdensity" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgcell, fcuda.szPnts*sizeof(uint) ), "Malloc mgcell" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgndx, fcuda.szPnts*sizeof(uint)), "Malloc mgndx" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mclr, fcuda.szPnts*sizeof(uint) ), "Malloc mclr" ); //cudaCheck ( cudaMalloc ( (void**) &fbuf.mcluster, fcuda.szPnts*sizeof(uint) ) ); int temp_size = 4*(sizeof(float)*3) + 2*sizeof(float) + 2*sizeof(int) + sizeof(uint); cudaCheck ( cudaMalloc ( (void**) &fbuf.msortbuf, fcuda.szPnts*temp_size ), "Malloc msortbuf" ); // Allocate grid fcuda.szGrid = (fcuda.gridBlocks * fcuda.gridThreads); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgrid, fcuda.szPnts*sizeof(int) ), "Malloc mgrid" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridcnt, fcuda.szGrid*sizeof(int) ), "Malloc mgridcnt" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridoff, fcuda.szGrid*sizeof(int) ), "Malloc mgridoff" ); cudaCheck ( cudaMalloc ( (void**) &fbuf.mgridactive, fcuda.szGrid*sizeof(int) ), "Malloc mgridactive" ); // Transfer sim params to device updateSimParams ( &fcuda ); cudaThreadSynchronize (); // Prefix Sum - Preallocate Block sums for Sorting deallocBlockSumsInt (); preallocBlockSumsInt ( fcuda.gridTotal ); } void FluidParamCUDA ( float ss, float sr, float pr, float mass, float rest, float3 bmin, float3 bmax, float estiff, float istiff, float visc, float damp, float fmin, float fmax, float ffreq, float gslope, float gx, float gy, float gz, float al, float vl ) { fcuda.psimscale = ss; fcuda.psmoothradius = sr; fcuda.pradius = pr; fcuda.r2 = sr * sr; fcuda.pmass = mass; fcuda.prest_dens = rest; fcuda.pboundmin = bmin; fcuda.pboundmax = bmax; fcuda.pextstiff = estiff; fcuda.pintstiff = istiff; fcuda.pvisc = visc; fcuda.pdamp = damp; fcuda.pforce_min = fmin; fcuda.pforce_max = fmax; fcuda.pforce_freq = ffreq; fcuda.pground_slope = gslope; fcuda.pgravity = make_float3( gx, gy, gz ); fcuda.AL = al; fcuda.AL2 = al * al; fcuda.VL = vl; fcuda.VL2 = vl * vl; //app_printf ( "Bound Min: %f %f %f\n", bmin.x, bmin.y, bmin.z ); //app_printf ( "Bound Max: %f %f %f\n", bmax.x, bmax.y, bmax.z ); fcuda.pdist = pow ( fcuda.pmass / fcuda.prest_dens, 1/3.0f ); fcuda.poly6kern = 315.0f / (64.0f * 3.141592 * pow( sr, 9.0f) ); fcuda.spikykern = -45.0f / (3.141592 * pow( sr, 6.0f) ); fcuda.lapkern = 45.0f / (3.141592 * pow( sr, 6.0f) ); fcuda.d2 = fcuda.psimscale * fcuda.psimscale; fcuda.rd2 = fcuda.r2 / fcuda.d2; fcuda.vterm = fcuda.lapkern * fcuda.pvisc; // Transfer sim params to device updateSimParams ( &fcuda ); cudaThreadSynchronize (); } void CopyToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr ) { // Send particle buffers int numPoints = fcuda.pnum; cudaCheck( cudaMemcpy ( fbuf.mpos, pos, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mpos ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mvel, vel, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mvel ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mveleval, veleval, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mveleval ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mforce, force, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ), "Memcpy mforce ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mpress, pressure, numPoints*sizeof(float), cudaMemcpyHostToDevice ), "Memcpy mpress ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mdensity, density, numPoints*sizeof(float), cudaMemcpyHostToDevice ), "Memcpy mdensity ToDev" ); cudaCheck( cudaMemcpy ( fbuf.mclr, clr, numPoints*sizeof(uint), cudaMemcpyHostToDevice ), "Memcpy mclr ToDev" ); cudaThreadSynchronize (); } void CopyFromCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr ) { // Return particle buffers int numPoints = fcuda.pnum; if ( pos != 0x0 ) cudaCheck( cudaMemcpy ( pos, fbuf.mpos, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ), "Memcpy mpos FromDev" ); if ( clr != 0x0 ) cudaCheck( cudaMemcpy ( clr, fbuf.mclr, numPoints*sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mclr FromDev" ); /*cudaCheck( cudaMemcpy ( vel, fbuf.mvel, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) ); cudaCheck( cudaMemcpy ( veleval, fbuf.mveleval, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) ); cudaCheck( cudaMemcpy ( force, fbuf.mforce, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) ); cudaCheck( cudaMemcpy ( pressure, fbuf.mpress, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) ); cudaCheck( cudaMemcpy ( density, fbuf.mdensity, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );*/ cudaThreadSynchronize (); } void InsertParticlesCUDA ( uint* gcell, uint* ccell, int* gcnt ) { cudaMemset ( fbuf.mgridcnt, 0, fcuda.gridTotal * sizeof(int)); insertParticles<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: InsertParticlesCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); // Transfer data back if requested (for validation) if (gcell != 0x0) { cudaCheck( cudaMemcpy ( gcell, fbuf.mgcell, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mgcell FromDev"); cudaCheck( cudaMemcpy ( gcnt, fbuf.mgridcnt, fcuda.gridTotal*sizeof(int), cudaMemcpyDeviceToHost ), "Memcpy mgridcnt FromDev" ); //cudaCheck( cudaMemcpy ( ccell, fbuf.mcluster, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ) ); } } void PrefixSumCellsCUDA ( int* goff ) { // Prefix Sum - determine grid offsets prescanArrayRecursiveInt ( fbuf.mgridoff, fbuf.mgridcnt, fcuda.gridTotal, 0); cudaThreadSynchronize (); // Transfer data back if requested if ( goff != 0x0 ) { cudaCheck( cudaMemcpy ( goff, fbuf.mgridoff, fcuda.gridTotal * sizeof(int), cudaMemcpyDeviceToHost ), "Memcpy mgoff FromDev" ); } } void CountingSortIndexCUDA ( uint* ggrid ) { // Counting Sort - pass one, determine grid counts cudaMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) ); countingSortIndex <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaThreadSynchronize (); // Transfer data back if requested if ( ggrid != 0x0 ) { cudaCheck( cudaMemcpy ( ggrid, fbuf.mgrid, fcuda.pnum * sizeof(uint), cudaMemcpyDeviceToHost ), "Memcpy mgrid FromDev" ); } } void CountingSortFullCUDA ( uint* ggrid ) { // Transfer particle data to temp buffers int n = fcuda.pnum; cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_POS, fbuf.mpos, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mpos DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_VEL, fbuf.mvel, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mvel DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_VELEVAL, fbuf.mveleval, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mveleval DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_FORCE, fbuf.mforce, n*sizeof(float)*3, cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mforce DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_PRESS, fbuf.mpress, n*sizeof(float), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mpress DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_DENS, fbuf.mdensity, n*sizeof(float), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mdens DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_GCELL, fbuf.mgcell, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mgcell DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_GNDX, fbuf.mgndx, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mgndx DevToDev" ); cudaCheck ( cudaMemcpy ( fbuf.msortbuf + n*BUF_CLR, fbuf.mclr, n*sizeof(uint), cudaMemcpyDeviceToDevice ), "Memcpy msortbuf->mclr DevToDev" ); // Counting Sort - pass one, determine grid counts cudaMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) ); countingSortFull <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaThreadSynchronize (); } void ComputePressureCUDA () { computePressure<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); } void ComputeQueryCUDA () { computeQuery<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: ComputePressureCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); } void CountActiveCUDA () { int threads = 1; int blocks = 1; assert ( fbuf.mgridactive != 0x0 ); /*#ifdef CUDA_42 cudaMemcpyToSymbol ( "gridActive", &fcuda.gridActive, sizeof(int) ); #else cudaMemcpyToSymbol ( gridActive, &fcuda.gridActive, sizeof(int) ); #endif */ countActiveCells<<< blocks, threads >>> ( fbuf, fcuda.gridTotal ); cudaThreadSynchronize (); cudaMemcpyFromSymbol ( &fcuda.gridActive, "gridActive", sizeof(int) ); app_printf ( "Active cells: %d\n", fcuda.gridActive ); } void ComputePressureGroupCUDA () { if ( fcuda.gridActive > 0 ) { int threads = 128; // should be based on maximum occupancy uint3 blocks; blocks.x = 4096; blocks.y = (fcuda.gridActive / 4096 )+1; blocks.z = 1; computePressureGroup<<< blocks, threads >>> ( fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: ComputePressureGroupCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); } } void ComputeForceCUDA () { computeForce<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: ComputeForceCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); } void AdvanceCUDA ( float tm, float dt, float ss ) { advanceParticles<<< fcuda.numBlocks, fcuda.numThreads>>> ( tm, dt, ss, fbuf, fcuda.pnum ); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf ( stderr, "CUDA ERROR: AdvanceCUDA: %s\n", cudaGetErrorString(error) ); } cudaThreadSynchronize (); } /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ // includes, kernels #include <assert.h> inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 return 1 << (int)logb((float)n); #else int exp; frexp((float)n, &exp); return 1 << (exp - 1); #endif } #define BLOCK_SIZE 256 float** g_scanBlockSums = 0; int** g_scanBlockSumsInt = 0; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) level++; numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (float**) malloc(level * sizeof(float*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) cudaCheck ( cudaMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(float)), "Malloc prescanBlockSums g_scanBlockSums"); numElts = numBlocks; } while (numElts > 1); } void preallocBlockSumsInt (unsigned int maxNumElements) { assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) level++; numElts = numBlocks; } while (numElts > 1); g_scanBlockSumsInt = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize))); if (numBlocks > 1) cudaCheck ( cudaMalloc((void**) &g_scanBlockSumsInt[level++], numBlocks * sizeof(int)), "Malloc prescanBlockSumsInt g_scanBlockSumsInt"); numElts = numBlocks; } while (numElts > 1); } void deallocBlockSums() { if ( g_scanBlockSums != 0x0 ) { for (unsigned int i = 0; i < g_numLevelsAllocated; i++) cudaCheck ( cudaFree(g_scanBlockSums[i]), "Malloc deallocBlockSums g_scanBlockSums"); free( (void**)g_scanBlockSums ); } g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void deallocBlockSumsInt() { if ( g_scanBlockSums != 0x0 ) { for (unsigned int i = 0; i < g_numLevelsAllocated; i++) cudaCheck ( cudaFree(g_scanBlockSumsInt[i]), "Malloc deallocBlockSumsInt g_scanBlockSumsInt"); free( (void**)g_scanBlockSumsInt ); } g_scanBlockSumsInt = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void prescanArrayRecursive (float *outArray, const float *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) assert(g_numEltsAllocated >= numElements); #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // execute the scan if (numBlocks > 1) { prescan<true, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); if (np2LastBlock) { prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be added to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive (g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); uniformAdd<<< grid, threads >>> (outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { uniformAdd<<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { prescan<false, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numThreads * 2, 0, 0); } else { prescan<false, true><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numElements, 0, 0); } } void prescanArrayRecursiveInt (int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(float) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(float) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) assert(g_numEltsAllocated >= numElements); #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // execute the scan if (numBlocks > 1) { prescanInt <true, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, g_scanBlockSumsInt[level], numThreads * 2, 0, 0); if (np2LastBlock) { prescanInt <true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be added to each block to // get the final results. // recursive (CPU) call prescanArrayRecursiveInt (g_scanBlockSumsInt[level], g_scanBlockSumsInt[level], numBlocks, level+1); uniformAddInt <<< grid, threads >>> (outArray, g_scanBlockSumsInt[level], numElements - numEltsLastBlock, 0, 0); if (np2LastBlock) { uniformAddInt <<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSumsInt[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); } } else if (isPowerOfTwo(numElements)) { prescanInt <false, false><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numThreads * 2, 0, 0); } else { prescanInt <false, true><<< grid, threads, sharedMemSize >>> (outArray, inArray, 0, numElements, 0, 0); } } void prescanArray ( float *d_odata, float *d_idata, int num ) { // preform prefix sum preallocBlockSums( num ); prescanArrayRecursive ( d_odata, d_idata, num, 0); deallocBlockSums(); } void prescanArrayInt ( int *d_odata, int *d_idata, int num ) { // preform prefix sum preallocBlockSumsInt ( num ); prescanArrayRecursiveInt ( d_odata, d_idata, num, 0); deallocBlockSumsInt (); } char* d_idata = NULL; char* d_odata = NULL; void prefixSum ( int num ) { prescanArray ( (float*) d_odata, (float*) d_idata, num ); } void prefixSumInt ( int num ) { prescanArrayInt ( (int*) d_odata, (int*) d_idata, num ); } void prefixSumToGPU ( char* inArray, int num, int siz ) { cudaCheck ( cudaMalloc( (void**) &d_idata, num*siz ), "Malloc prefixumSimToGPU idata"); cudaCheck ( cudaMalloc( (void**) &d_odata, num*siz ), "Malloc prefixumSimToGPU odata" ); cudaCheck ( cudaMemcpy( d_idata, inArray, num*siz, cudaMemcpyHostToDevice), "Memcpy inArray->idata" ); } void prefixSumFromGPU ( char* outArray, int num, int siz ) { cudaCheck ( cudaMemcpy( outArray, d_odata, num*siz, cudaMemcpyDeviceToHost), "Memcpy odata->outArray" ); cudaCheck ( cudaFree( d_idata ), "Free idata" ); cudaCheck ( cudaFree( d_odata ), "Free odata" ); d_idata = NULL; d_odata = NULL; }
the_stack
__global__ void cuda_calc_curve_values( REAL const * parameters, int const n_fits, int const n_points, int const n_parameters, int const * finished, REAL * values, REAL * derivatives, int const n_fits_per_block, int const n_blocks_per_fit, ModelID const model_id, int const chunk_index, char * user_info, std::size_t const user_info_size) { int const fit_in_block = threadIdx.x / n_points; int const fit_index = blockIdx.x * n_fits_per_block / n_blocks_per_fit + fit_in_block; int const fit_piece = blockIdx.x % n_blocks_per_fit; int const point_index = threadIdx.x - fit_in_block * n_points + fit_piece * blockDim.x; int const first_point = fit_index * n_points; REAL * current_values = values + first_point; REAL * current_derivatives = derivatives + first_point * n_parameters; REAL const * current_parameters = parameters + fit_index * n_parameters; if (finished[fit_index]) return; if (point_index >= n_points) return; calculate_model(model_id, current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); } /* Description of the sum_up_floats function * ========================================== * * This function sums up a vector of REAL values and stores the result at the * first place of the vector. * * Parameters: * * shared_array: An input vector of REAL values. The vector must be stored * on the shared memory of the GPU. The size of this vector must be a * power of two. Use zero padding to extend it to the next highest * power of 2 greater than the number of elements. * * size: The number of elements in the input vector considering zero padding. * * Calling the sum_up_floats function * ================================== * * This __device__ function can be only called from a __global__ function or * an other __device__ function. When calling the function, the blocks and threads * of the __global__ function must be set up correctly, as shown in the following * example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = size * vectors_per_block; * blocks.x = n_vectors / vectors_per_block; * * global_function<<< blocks, threads >>>(parameter1, ...); * */ __device__ void sum_up_floats(volatile REAL* shared_array, int const size) { int const fit_in_block = threadIdx.x / size; int const point_index = threadIdx.x - (fit_in_block*size); int current_n_points = size >> 1; __syncthreads(); while (current_n_points) { if (point_index < current_n_points) { shared_array[point_index] += shared_array[point_index + current_n_points]; } current_n_points >>= 1; __syncthreads(); } } /* Description of the cuda_sum_chi_square_subtotals function * ========================================================== * * This function sums up chi_square subtotals in place. * * Parameters: * * chi_squares: A vector of chi-square values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit. It is * equivalent to the number of subtotals per fit. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_sum_chi_square_subtotals function * ================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_sum_chi_square_subtotals<<< blocks, threads >>>( * chi_squares, * n_blocks_per_fit, * n_fits, * finished); * */ __global__ void cuda_sum_chi_square_subtotals( REAL * chi_squares, REAL const * subtotals, int const n_blocks_per_fit, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; REAL * chi_square = chi_squares + index; REAL const * subtotal = subtotals + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += subtotal[i * n_fits]; chi_square[0] = sum; } /* Description of the cuda_check_fit_improvement function * ======================================================= * * This function checks after each calculation of chi-square values whether the * currently calculated chi-square values are lower than chi-square values calculated * in the previous iteration and sets the iteration_failed flags. * * Parameters: * * iteration_failed: An output vector of flags which indicate whether the fitting * process improved the fit in the last iteration. If yes it is set * to 0 otherwise to 1. * * chi_squares: An input vector of chi-square values for multiple fits. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_check_fit_improvement function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_check_fit_improvement <<< blocks, threads >>>( * iteration_failed, * chi_squares, * prev_chi_squares, * n_fits, * finished); * */ __global__ void cuda_check_fit_improvement( int * iteration_failed, REAL const * chi_squares, REAL const * prev_chi_squares, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; bool const prev_chi_squares_initialized = prev_chi_squares[index] != 0.; // chi_squares[index] can be NaN which compares to false with any other number bool const chi_square_decreased = (chi_squares[index] < prev_chi_squares[index]); if (prev_chi_squares_initialized && !chi_square_decreased) { iteration_failed[index] = 1; } else { iteration_failed[index] = 0; } } /* Description of the cuda_calculate_chi_squares function * ======================================================== * * This function calls one of the estimator funktions depending on the input * parameter estimator_id. The estimator function calculates the chi-square values. * The calcluation is performed for multiple fits in parallel. * * Parameters: * * chi_squares: An output vector of concatenated chi-square values. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. In this function * it is only used for MLE. It is set to 3 if a fitting curve value is * negative. This vector includes the states for multiple fits. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_chi_squares function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(REAL) * threads.x; * * cuda_calculate_chi_squares<<< blocks, threads, shared_size >>>( * chi_squares, * states, * data, * values, * weights, * n_points, * n_fits, * estimator_id, * finished, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_chi_squares( REAL * chi_squares, int * states, REAL const * data, REAL const * values, REAL const * weights, int const n_points, int const n_fits, int const estimator_id, int const * finished, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index]) { return; } REAL const * current_data = &data[first_point]; REAL const * current_weight = weights ? &weights[first_point] : NULL; REAL const * current_value = &values[first_point]; int * current_state = &states[fit_index]; extern __shared__ REAL extern_array[]; volatile REAL * shared_chi_square = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_chi_square[point_index] = 0.; } if (point_index < n_points) { calculate_chi_square( estimator_id, shared_chi_square, point_index, current_data, current_value, current_weight, current_state, user_info, user_info_size); } shared_chi_square += fit_piece * shared_size; sum_up_floats(shared_chi_square, shared_size); chi_squares[fit_index + fit_piece * n_fits] = shared_chi_square[0]; } /* Description of the cuda_sum_gradient_subtotals function * ======================================================== * * This function sums up the chi-square gradient subtotals in place. * * Parameters: * * gradients: A vector of gradient values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit * * n_fits: The number of fits. * * n_parameters_to_fit: The number of model parameters, that are not held fixed. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * Calling the cuda_sum_gradient_subtotals function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_sum_gradient_subtotals<<< blocks,threads >>>( * gradients, * n_blocks_per_fit, * n_fits, * n_parameters_to_fit, * skip, * finished); * */ __global__ void cuda_sum_gradient_subtotals( REAL * gradients, REAL const * subtotals, int const n_blocks_per_fit, int const n_fits, int const n_parameters, int const * skip, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; int const fit_index = index / n_parameters; if (fit_index >= n_fits || finished[fit_index] || skip[fit_index]) return; REAL * gradient = gradients + index; REAL const * subtotal = subtotals + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += subtotal[i * n_fits * n_parameters]; gradient[0] = sum; } /* Description of the cuda_calculate_gradients function * ===================================================== * * This function calls one of the gradient functions depending on the input * parameter estimator_id. The gradient function calculates the gradient values * of the chi-square function calling a __device__ function. The calcluation is * performed for multiple fits in parallel. * * Parameters: * * gradients: An output vector of concatenated sets of gradient vector values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * skip: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The number of elements in user_info. * * Calling the cuda_calculate_gradients function * ============================================= * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(REAL) * threads.x; * * cuda_calculate_gradients<<< blocks, threads, shared_size >>>( * gradients, * data, * values, * derivatives, * weight, * n_points, * n_fits, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * finished, * skip, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_gradients( REAL * gradients, REAL const * data, REAL const * values, REAL const * derivatives, REAL const * weights, int const n_points, int const n_fits, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * finished, int const * skip, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index] || skip[fit_index]) { return; } REAL const * current_data = &data[first_point]; REAL const * current_weight = weights ? &weights[first_point] : NULL; REAL const * current_derivative = &derivatives[first_point * n_parameters]; REAL const * current_value = &values[first_point]; extern __shared__ REAL extern_array[]; volatile REAL * shared_gradient = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_gradient[point_index] = 0.; } for (int parameter_index = 0; parameter_index < n_parameters_to_fit; parameter_index++) { if (point_index < n_points) { int const derivative_index = parameters_to_fit_indices[parameter_index] * n_points + point_index; calculate_gradient( estimator_id, shared_gradient, point_index, derivative_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } sum_up_floats(shared_gradient + fit_piece * shared_size, shared_size); gradients[(fit_index * n_parameters_to_fit + parameter_index) + fit_piece * n_fits * n_parameters_to_fit] = shared_gradient[fit_piece * shared_size]; } } /* Description of the cuda_calculate_hessians function * ==================================================== * * This function calls one of the hessian function depending on the input * parameter estimator_id. The hessian funcion calculates the hessian matrix * values of the chi-square function calling a __device__ functions. The * calcluation is performed for multiple fits in parallel. * * Parameters: * * hessians: An output vector of concatenated sets of hessian matrix values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_fits: The number of fits. * * n_points: The number of data points per fit. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_hessians function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2; * * threads.x * = min(n_unique_values * n_fits_per_block, max_threads_per_block); * * blocks.y * = threads.x / max_threads_per_block * + int((threads.x % max_threads_per_block) > 0); * * blocks.x * = n_fits / n_fits_per_block * + int((n_fits % n_fits_per_block) > 0); * * cuda_calculate_hessians<<< blocks, threads >>>( * hessians, * data, * values, * derivatives, * weight, * n_fits, * n_points, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * skip, * finished, * user_info, * user_info_size); * */ __global__ void cuda_calculate_hessians( REAL * hessians, REAL const * data, REAL const * values, REAL const * derivatives, REAL const * weights, int const n_fits, int const n_points, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * skip, int const * finished, char * user_info, std::size_t const user_info_size) { int const n_unique_values = n_parameters_to_fit * (n_parameters_to_fit + 1) / 2; int const n_fits_per_block = blockDim.x * gridDim.y / n_unique_values; int const fit_in_block = (gridDim.y == 1) ? (blockIdx.y * blockDim.x + threadIdx.x) / n_unique_values : 0; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; if (fit_index >= n_fits || finished[fit_index] || skip[fit_index]) { return; } int const first_point = fit_index * n_points; int const parameter_index = (blockIdx.y * blockDim.x + threadIdx.x) - fit_in_block * n_unique_values; if (parameter_index >= n_unique_values) { return; } int const parameter_index_i = n_parameters_to_fit - 1. - std::floor( .5*( std::sqrt( - 8. * (parameter_index - n_parameters_to_fit) + 4. * n_parameters_to_fit * (n_parameters_to_fit - 1.) - 7. ) - 1. ) ); int const parameter_index_j = parameter_index + parameter_index_i - parameter_index_i*(n_parameters_to_fit - (parameter_index_i - 1) / 2.); REAL * current_hessian = &hessians[fit_index * n_parameters_to_fit * n_parameters_to_fit]; REAL const * current_data = &data[first_point]; REAL const * current_weight = weights ? &weights[first_point] : NULL; REAL const * current_derivative = &derivatives[first_point*n_parameters]; REAL const * current_value = &values[first_point]; int const hessian_index_ij = parameter_index_i * n_parameters_to_fit + parameter_index_j; int const hessian_index_ji = parameter_index_j * n_parameters_to_fit + parameter_index_i; int const derivative_index_i = parameters_to_fit_indices[parameter_index_i] * n_points; int const derivative_index_j = parameters_to_fit_indices[parameter_index_j] * n_points; double sum = 0.0; for (int point_index = 0; point_index < n_points; point_index++) { calculate_hessian( estimator_id, &sum, point_index, derivative_index_i + point_index, derivative_index_j + point_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } current_hessian[hessian_index_ij] = sum; current_hessian[hessian_index_ji] = sum; } /* Description of the cuda_modify_step_widths function * ==================================================== * * This function midifies the diagonal elements of the hessian matrices by multiplying * them by the factor (1+ lambda). This operation controls the step widths of the * iteration. If the last iteration failed, befor modifying the hessian, the diagonal * elements of the hessian are calculated back to represent unmodified values. * * hessians: An input and output vector of hessian matrices, which are modified by * the lambda values. * * lambdas: An input vector of values for modifying the hessians. * * n_parameters: The number of fitting curve parameters. * * iteration_failed: An input vector which indicates whether the previous iteration * failed. * * finished: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * Calling the cuda_modify_step_widths function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters_to_fit * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_modify_step_width<<< blocks, threads >>>( * hessians, * lambdas, * n_parameters, * iteration_failed, * finished, * n_fits_per_block); * */ __global__ void cuda_modify_step_widths( REAL * hessians, REAL const * lambdas, REAL * scaling_vectors, unsigned int const n_parameters, int const * iteration_failed, int const * finished, int const n_fits_per_block) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const parameter_index = threadIdx.x - fit_in_block * shared_size; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; if (finished[fit_index]) { return; } REAL * hessian = &hessians[fit_index * n_parameters * n_parameters]; REAL * scaling_vector = &scaling_vectors[fit_index * n_parameters]; REAL const & lambda = lambdas[fit_index]; int const diagonal_index = parameter_index * n_parameters + parameter_index; if (iteration_failed[fit_index]) { hessian[diagonal_index] -= scaling_vector[parameter_index] * lambda / 10.; } // adaptive scaling scaling_vector[parameter_index] = max(scaling_vector[parameter_index], hessian[diagonal_index]); // continuous scaling //scaling_vector[parameter_index] = hessian[diagonal_index]; // initial scaling //if (scaling_vector[parameter_index] == 0.) // scaling_vector[parameter_index] = hessian[diagonal_index]; hessian[diagonal_index] += scaling_vector[parameter_index] * lambda; } __device__ void project_parameter_to_box(REAL & parameter, REAL const lower_bound, REAL const upper_bound, int const constraint_type) { switch (constraint_type) { case ConstraintType::LOWER: parameter = max(parameter, lower_bound); break; case ConstraintType::UPPER: parameter = min(parameter, upper_bound); break; case ConstraintType::LOWER_UPPER: parameter = max(parameter, lower_bound); parameter = min(parameter, upper_bound); break; default: break; } } __global__ void cuda_project_parameters_to_box( REAL * parameters, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, REAL const * constraints, int const * constraint_types, int const * finished, int const n_fits_per_block) { int const fit_in_block = threadIdx.x / n_parameters_to_fit; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; int const parameter_index = threadIdx.x - fit_in_block * n_parameters_to_fit; if (finished[fit_index]) { return; } REAL & parameter = parameters[fit_index * n_parameters + parameters_to_fit_indices[parameter_index]]; REAL const & lower_bound = constraints[parameters_to_fit_indices[parameter_index] * 2 + LOWER_BOUND]; REAL const & upper_bound = constraints[parameters_to_fit_indices[parameter_index] * 2 + UPPER_BOUND]; int const & constraint_type = constraint_types[parameters_to_fit_indices[parameter_index]]; project_parameter_to_box(parameter, lower_bound, upper_bound, constraint_type); } /* Description of the cuda_update_parameters function * =================================================== * * This function stores the fitting curve parameter values in prev_parameters and * updates them after each iteration. * * Parameters: * * parameters: An input and output vector of concatenated sets of model * parameters. * * prev_parameters: An input and output vector of concatenated sets of model * parameters calculated by the previous iteration. * * deltas: An input vector of concatenated delta values, which are added to the * model parameters. * * n_parameters_to_fit: The number of fitted curve parameters. * * parameters_to_fit_indices: The indices of fitted curve parameters. * * finished: An input vector which allows the parameter update to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each threadblock. * * Calling the cuda_update_parameters function * =========================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_update_parameters<<< blocks, threads >>>( * parameters, * prev_parameters, * deltas, * n_parameters_to_fit, * parameters_to_fit_indices, * finished, * n_fits_per_block); * */ __global__ void cuda_update_parameters( REAL * parameters, REAL * prev_parameters, REAL const * deltas, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const * finished, int const n_fits_per_block) { int const n_parameters = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / n_parameters; int const parameter_index = threadIdx.x - fit_in_block * n_parameters; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; REAL * current_parameters = &parameters[fit_index * n_parameters]; REAL * current_prev_parameters = &prev_parameters[fit_index * n_parameters]; current_prev_parameters[parameter_index] = current_parameters[parameter_index]; if (finished[fit_index]) { return; } if (parameter_index >= n_parameters_to_fit) { return; } REAL const * current_deltas = &deltas[fit_index * n_parameters_to_fit]; current_parameters[parameters_to_fit_indices[parameter_index]] += current_deltas[parameter_index]; } /* Description of the cuda_update_state_after_solving function * =========================================================== * * This function interprets the singular flag vector of the equation system * solving function according to this LM implementation. * * Parameters: * * n_fits: The number of fits. * * solution_info: An input vector used to report whether a fit is singular. * * finished: An input vector which allows the calculation to by skipped for * single fits. * * gpufit_states: An output vector of values which indicate whether the fitting * process was carreid out correctly or which problem occurred. * If a hessian matrix of a fit is singular, it is set to 2. * * Calling the cuda_update_state_after_solving function * ==================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_update_state_after_solving<<< blocks, threads >>>( * n_fits, * solution_info, * finished, * gpufit_states); * */ __global__ void cuda_update_state_after_solving( int const n_fits, int const * cublas_info, int const * finished, int * states) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) return; if (finished[fit_index]) return; if (cublas_info[fit_index] != 0) states[fit_index] = SINGULAR_HESSIAN; } /* Description of the cuda_check_for_convergence function * ======================================================= * * This function checks after each iteration whether the fits are converged or not. * It also checks whether the set maximum number of iterations is reached. * * Parameters: * * finished: An input and output vector which allows the calculation to be skipped * for single fits. * * tolerance: The tolerance value for the convergence set by user. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. If the maximum * number of iterations is reached without converging, it is set to 1. If * the fit converged it keeps its initial value of 0. * * chi_squares: An input vector of chi-square values for multiple fits. Used for the * convergence check. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. Used for the convergence check. * * iteration: The value of the current iteration. It is compared to the value * of the maximum number of iteration set by user. * * max_n_iterations: The maximum number of iterations set by user. * * n_fits: The number of fits. * * Calling the cuda_check_for_convergence function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_check_for_convergence<<< blocks, threads >>>( * finished, * tolerance, * states, * chi_squares, * prev_chi_squares, * iteration, * max_n_iterations, * n_fits); * */ __global__ void cuda_check_for_convergence( int * finished, REAL const tolerance, int * states, REAL const * chi_squares, REAL const * prev_chi_squares, int const iteration, int const max_n_iterations, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (finished[fit_index]) { return; } int const fit_found = abs(chi_squares[fit_index] - prev_chi_squares[fit_index]) < tolerance * max(1., chi_squares[fit_index]); int const max_n_iterations_reached = iteration == max_n_iterations - 1; if (fit_found) { finished[fit_index] = 1; } else if (max_n_iterations_reached) { states[fit_index] = MAX_ITERATION; } } /* Description of the cuda_evaluate_iteration function * ==================================================== * * This function evaluates the current iteration. * - It marks a fit as finished if a problem occured. * - It saves the needed number of iterations if a fit finished. * - It checks if all fits finished * * Parameters: * * all_finished: An output flag, that indicates whether all fits finished. * * n_iterations: An output vector of needed iterations for each fit. * * finished: An input and output vector which allows the evaluation to be skipped * for single fits * * iteration: The values of the current iteration. * * states: An input vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. * * n_fits: The number of fits. * * Calling the cuda_evaluate_iteration function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_evaluate_iteration<<< blocks, threads >>>( * all_finished, * n_iterations, * finished, * iteration, * states, * n_fits); * */ __global__ void cuda_evaluate_iteration( int * all_finished, int * n_iterations, int * finished, int const iteration, int const * states, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (states[fit_index] != CONVERGED) { finished[fit_index] = 1; } if (finished[fit_index] && n_iterations[fit_index] == 0) { n_iterations[fit_index] = iteration + 1; } if (!finished[fit_index]) { *all_finished = 0; } } /* Description of the cuda_prepare_next_iteration function * ======================================================== * * This function prepares the next iteration. It either updates previous * chi-square values or sets currently calculated chi-square values and * parameters to values calculated by the previous iteration. This function also * updates lambda values. * * Parameters: * * lambdas: An output vector of values which control the step width by modifying * the diagonal elements of the hessian matrices. * * chi_squares: An input and output vector of chi-square values for multiple fits. * * prev_chi_squares: An input and output vector of chi-square values for multiple * fits calculated in the previous iteration. * * parameters: An output vector of concatenated sets of model parameters. * * prev_parameters: An input vector of concatenated sets of model parameters * calculated in the previous iteration. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * Calling the cuda_prepare_next_iteration function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(REAL(n_fits) / REAL(threads.x))); * * cuda_prepare_next_iteration<<< blocks, threads >>>( * lambdas, * chi_squares, * prev_chi_squares, * parameters, * prev_parameters, * n_fits, * n_parameters); * */ __global__ void cuda_prepare_next_iteration( REAL * lambdas, REAL * chi_squares, REAL * prev_chi_squares, REAL * parameters, REAL const * prev_parameters, int const n_fits, int const n_parameters) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (chi_squares[fit_index] < prev_chi_squares[fit_index]) { lambdas[fit_index] *= 0.1f; prev_chi_squares[fit_index] = chi_squares[fit_index]; } else { lambdas[fit_index] *= 10.; chi_squares[fit_index] = prev_chi_squares[fit_index]; for (int iparameter = 0; iparameter < n_parameters; iparameter++) { parameters[fit_index * n_parameters + iparameter] = prev_parameters[fit_index * n_parameters + iparameter]; } } }
the_stack
#include <stdio.h> #include <pthread.h> #include <cuda.h> #include <cutil.h> #include <multithreading.h> __global__ void eval_multi_UBspline_3d_cuda_c (const float *coefs, float *abc, float *vals, int ix, int iy, int iz, int xs, int ys, int zs, int N) { int block = blockIdx.x; int thr = threadIdx.x; int offset = block*BLOCK_SIZE+thr; __shared__ float abcs[64]; abcs[thr] = abc[thr]; __syncthreads(); float val= 0.0; //int index=0; for (int i=0; i<4; i++) for (int j=0; j<4; j++) { for (int k=0; k<4; k++) { float *base_addr = coefs + (ix+i)*xs + (iy+j)*ys + (iz+k)*zs; //val += abc[(16*i+4*j+k)*BLOCK_SIZE + thr] * base_addr[offset]; val += abcs[16*i+4*j+k] * base_addr[offset]; //index++; } } vals[offset] = val; } __constant__ float A[16], dA[16], d2A[16]; __global__ static void eval_multi_multi_UBspline_3d_cuda_c (float *pos, float3 drInv, const float *coefs_real,const float *coefs_imag, float *vals_real, float *vals_imag, int3 strides) { int block = blockIdx.x; int thr = threadIdx.x; int ir = blockIdx.y; int offset = block*BLOCK_SIZE+thr; __shared__ float abc[64]; __shared__ float pos_s[BLOCK_SIZE]; int ir1 = (ir >> 4)*64; int ir2 = (ir & 15)*4; pos_s[thr] = pos[ir1+thr]; __syncthreads(); float3 r; r.x = pos_s[ir2+0]; r.y = pos_s[ir2+1]; r.z = pos_s[ir2+2]; int3 index; float3 t; float s, sf; float4 tp[3]; s = r.x * drInv.x; sf = floor(s); index.x = (int)sf; t.x = s - sf; s = r.y * drInv.y; sf = floor(s); index.y = (int)sf; t.y = s - sf; s = r.z * drInv.z; sf = floor(s); index.z = (int)sf; t.z = s - sf; tp[0] = make_float4(1.0, t.x, t.x*t.x, t.x*t.x*t.x); tp[1] = make_float4(1.0, t.y, t.y*t.y, t.y*t.y*t.y); tp[2] = make_float4(1.0, t.z, t.z*t.z, t.z*t.z*t.z); __shared__ float a[4], b[4], c[4]; if (thr == 0) { a[0] = A[ 0]*tp[0].x + A[ 1]*tp[0].y + A[ 2]*tp[0].z + A[ 3]*tp[0].w; a[1] = A[ 4]*tp[0].x + A[ 5]*tp[0].y + A[ 6]*tp[0].z + A[ 7]*tp[0].w; a[2] = A[ 8]*tp[0].x + A[ 9]*tp[0].y + A[10]*tp[0].z + A[11]*tp[0].w; a[3] = A[12]*tp[0].x + A[13]*tp[0].y + A[14]*tp[0].z + A[15]*tp[0].w; b[0] = A[ 0]*tp[1].x + A[ 1]*tp[1].y + A[ 2]*tp[1].z + A[ 3]*tp[1].w; b[1] = A[ 4]*tp[1].x + A[ 5]*tp[1].y + A[ 6]*tp[1].z + A[ 7]*tp[1].w; b[2] = A[ 8]*tp[1].x + A[ 9]*tp[1].y + A[10]*tp[1].z + A[11]*tp[1].w; b[3] = A[12]*tp[1].x + A[13]*tp[1].y + A[14]*tp[1].z + A[15]*tp[1].w; c[0] = A[ 0]*tp[2].x + A[ 1]*tp[2].y + A[ 2]*tp[2].z + A[ 3]*tp[2].w; c[1] = A[ 4]*tp[2].x + A[ 5]*tp[2].y + A[ 6]*tp[2].z + A[ 7]*tp[2].w; c[2] = A[ 8]*tp[2].x + A[ 9]*tp[2].y + A[10]*tp[2].z + A[11]*tp[2].w; c[3] = A[12]*tp[2].x + A[13]*tp[2].y + A[14]*tp[2].z + A[15]*tp[2].w; } int i = (thr>>4)&3; int j = (thr>>2)&3; int k = (thr & 3); abc[thr] = a[i]*b[j]*c[k]; __syncthreads(); float val_real = 0.0; float val_imag = 0.0; //int index=0; val_real = val_imag = 0.0; // int di = strides.x - 4*strides.y; // int dj = strides.y - 4*strides.z; for (int i=0; i<4; i++) { for (int j=0; j<4; j++) { float *base_real = coefs_real + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; float *base_imag = coefs_imag + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; for (int k=0; k<4; k++) { // float *base_real = coefs_real + (index.x+i)*strides.x + (index.y+j)*strides.y + (index.z+k)*strides.z; // float *base_imag = coefs_imag + (index.x+i)*strides.x + (index.y+j)*strides.y + (index.z+k)*strides.z; val_real += abc[16*i+4*j+k] * base_real[offset+k*strides.z]; val_imag += abc[16*i+4*j+k] * base_imag[offset+k*strides.z]; // base_real += strides.z; // base_imag += strides.z; } // base_real += dj; // base_imag += dj; } // base_real += di; // base_imag += di; } vals_real[offset+ir*128] = val_real; vals_imag[offset+ir*128] = val_imag; //vals_real[ir][offset] = val_real; // vals_imag[ir][offset] = val_imag; } // __global__ void // eval_multi_UBspline_3d_cuda_c2 (float3 r, // float *coefs, float *vals, // int xs, int ys, int zs, int N) // { // int block = blockIdx.x; // int thr = threadIdx.x; // __shared__ float abcs[64]; // abcs[thr] = abc[thr]; // float dxInv = 0.0625f; // float v, dv; // v = floor(dxInv*r.x); // dv = dxInv*r.x - v; // int ix = (int) v; // v = floor(dxInv*r.x); // dv = dxInv*r.x - v; // int iy = (int) v; // v = floor(dxInv*r.y); // dv = dxInv*r.y - v; // int iz = (int) v; // int offset = block*BLOCK_SIZE+thr; // __shared__ float abcs[64]; // abcs[thr] = abc[thr]; // float val= 0.0; // //int index=0; // val = 0.0; // for (int i=0; i<4; i++) // for (int j=0; j<4; j++) // for (int k=0; k<4; k++) { // float *base_addr = coefs + (ix+i)*xs + (iy+j)*ys + (iz+k)*zs; // //val += abc[(16*i+4*j+k)*BLOCK_SIZE + thr] * base_addr[offset]; // val += abcs[16*i+4*j+k] * base_addr[offset]; // //index++; // } // vals[offset] = val; // } void test_cuda() { float *coefs , *abc , *abc2, *vals; float *coefs_d, *abc_d, *vals_d; int xs, ys, zs, N; int Nx, Ny, Nz; N = 4096; Nx = Ny = Nz = 16; xs = Nx*Ny*Nz; ys = Ny*Nz; zs = Nz; int size = Nx*Ny*Nz*N*sizeof(float); posix_memalign((void**)&coefs, 16, size); cudaMalloc((void**)&coefs_d, size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int n=0; n<N; n++) coefs[ix*xs + iy*ys + iz*zs + n] = drand48(); cudaMemcpy(coefs_d, coefs, size, cudaMemcpyHostToDevice); posix_memalign ((void**)&abc, 16, 64*sizeof(float)); posix_memalign ((void**)&abc2, 16, 64*BLOCK_SIZE*sizeof(float)); cudaMalloc((void**)&abc_d, 64*BLOCK_SIZE*sizeof(float)); for (int i=0; i<64; i++) { abc[i] = drand48(); for (int j=0; j<BLOCK_SIZE; j++) abc2[i*BLOCK_SIZE+j] = abc[i]; } // cudaMemcpy(abc_d, abc2, 64*BLOCK_SIZE*sizeof(float), // cudaMemcpyHostToDevice); cudaMemcpy(abc_d, abc, 64*sizeof(float), cudaMemcpyHostToDevice); posix_memalign((void**)&vals, 16, N*sizeof(float)); cudaMalloc((void**)&vals_d, N*sizeof(float)); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE); int ix=1; int iy=2; int iz=3; clock_t start, end; start = clock(); for (int i=0; i<100000; i++) { eval_multi_UBspline_3d_cuda_c<<<dimGrid,dimBlock>>> (coefs_d, abc_d, vals_d, ix, iy, iz, xs, ys, zs, N); } end = clock(); double time = (double)(end-start)/(double)(CLOCKS_PER_SEC*100000*N); fprintf (stderr, "Evals per second = %1.8e\n", 1.0/time); cudaMemcpy (vals, vals_d, N*sizeof(float), cudaMemcpyDeviceToHost); float vals2[N]; for (int n=0; n<N; n++) { vals2[n] = 0.0; int index=0; for(int i=0; i<4; i++) for (int j=0; j<4; j++) for (int k=0; k<4; k++) { vals2[n] += abc[index] * coefs[(ix+i)*xs+(iy+j)*ys+(iz+k)*zs+n]; index++; } } for (int i=0; i<N/256; i++) fprintf (stderr, "%1.9f %1.9f\n", vals[i], vals2[i]); cudaFree(abc_d); cudaFree(coefs_d); cudaFree(vals_d); } static void * test_multi_cuda(void *thread) { // CUcontext ctx; // CUdevice dev; // cuDeviceGet (&dev, (int)(size_t)thread); // cuCtxCreate(&ctx, CU_CTX_SCHED_YIELD, dev); // int deviceCount; // cudaGetDeviceCount(&deviceCount); CUDA_SAFE_CALL(cudaSetDevice((int)(size_t)thread)); fprintf (stderr, "In thread %p\n", thread); int numWalkers = 2000; float *coefs , __device__ *vals_real[numWalkers], __device__ *vals_imag[numWalkers]; float *coefs_real_d, *coefs_imag_d, __device__ *vals_real_d[numWalkers], __device__ *vals_imag_d[numWalkers]; float *r_d, *r_h; int xs, ys, zs, N; int Nx, Ny, Nz; N = 128; Nx = Ny = Nz = 64; xs = Ny*Nz*N; ys = Nz*N; zs = N; float3 drInv; drInv.x = 1.0/float(Nx); drInv.y = 1.0/float(Ny); drInv.z = 1.0/float(Nz); // Setup Bspline coefficients int size = Nx*Ny*Nz*N*sizeof(float); CUT_SAFE_MALLOC(posix_memalign((void**)&coefs, 16, size)); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int n=0; n<N; n++) coefs[ix*xs + iy*ys + iz*zs + n] = drand48(); fprintf (stderr, "Filled in coefs.\n"); // Setup values //posix_memalign((void**)&vals, 16, N*sizeof(float)); // cudaMemcpy(r_d, r, numWalkers*sizeof(float3), cudaMemcpyHostToDevice); fprintf (stderr, "size = %d\n", size); // Setup CUDA coefficients fprintf (stderr, "Before first CUDA mallocs.\n"); CUDA_SAFE_CALL(cudaMalloc((void**)&coefs_real_d, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&coefs_imag_d, size)); fprintf (stderr, "Before Memcpy.\n"); CUDA_SAFE_CALL(cudaMemcpy(coefs_real_d, coefs, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(coefs_imag_d, coefs, size, cudaMemcpyHostToDevice)); fprintf (stderr, "After Memcpy.\n"); // Setup device value storage int numVals = 2*N*numWalkers; float *valBlock_d, *valBlock_h; CUDA_SAFE_CALL(cudaMalloc((void**)&(valBlock_d), numVals*sizeof(float))); CUDA_SAFE_CALL(cudaMallocHost((void**)&(valBlock_h), numVals*sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**)&(vals_real_d), numWalkers*sizeof(float*))); CUDA_SAFE_CALL(cudaMalloc((void**)&(vals_imag_d), numWalkers*sizeof(float*))); fprintf (stderr, "valBlock_d = %p\n", valBlock_d); for (int i=0; i<numWalkers; i++) { vals_real[i] = valBlock_d + 2*i*N; vals_imag[i] = valBlock_d + (2*i+1)*N; } CUDA_SAFE_CALL(cudaMemcpy(vals_real_d, vals_real, numWalkers*sizeof(float*), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(vals_imag_d, vals_imag, numWalkers*sizeof(float*), cudaMemcpyHostToDevice)); fprintf (stderr, "Finished cuda allocations.\n"); // Setup walker positions CUDA_SAFE_CALL(cudaMalloc((void**)&(r_d), 4*numWalkers*sizeof(float))); CUDA_SAFE_CALL(cudaMallocHost((void**)&(r_h), 4*numWalkers*sizeof(float))); for (int ir=0; ir<numWalkers; ir++) { r_h[4*ir+0] = 0.75*drand48(); r_h[4*ir+1] = 0.75*drand48(); r_h[4*ir+2] = 0.75*drand48(); } int3 strides; strides.x = xs; strides.y = ys; strides.z = zs; dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE,numWalkers); clock_t start, end; start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); CUDA_SAFE_CALL(cudaMemcpy(r_d, r_h, 4*numWalkers*sizeof(float), cudaMemcpyHostToDevice)); // eval_multi_multi_UBspline_3d_cuda_c<<<dimGrid,dimBlock>>> // (r_d, drInv, coefs_real_d, coefs_imag_d, // vals_real_d, vals_imag_d, strides); eval_multi_multi_UBspline_3d_cuda_c<<<dimGrid,dimBlock>>> (r_d, drInv, coefs_real_d, coefs_imag_d, valBlock_d, valBlock_d+numVals/2, strides); //cudaMemcpy(valBlock_h, valBlock_d, numVals*sizeof(float), cudaMemcpyDeviceToHost); } end = clock(); double time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "Evals per second = %1.8e\n", 1.0/time); cudaFree (valBlock_d); cudaFree (vals_real_d); cudaFree (vals_imag_d); cudaFree (coefs_real_d); cudaFree (coefs_imag_d); cudaFree (r_d); return NULL; // cudaMemcpy (vals, vals_d, N*sizeof(float), cudaMemcpyDeviceToHost); // float vals2[N]; // for (int n=0; n<N; n++) { // vals2[n] = 0.0; // int index=0; // for(int i=0; i<4; i++) // for (int j=0; j<4; j++) // for (int k=0; k<4; k++) { // vals2[n] += abc[index] * coefs[(ix+i)*xs+(iy+j)*ys+(iz+k)*zs+n]; // index++; // } // } // for (int i=0; i<N/256; i++) // fprintf (stderr, "%1.9f %1.9f\n", vals[i], vals2[i]); // cudaFree(abc_d); // cudaFree(coefs_d); // cudaFree(vals_d); } main() { int deviceCount; cudaGetDeviceCount(&deviceCount); fprintf (stderr, "Detected %d CUDA devices.\n", deviceCount); // test_cuda(); for (int device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); fprintf (stderr, "Device %d:\n", device); fprintf (stderr, " Global memory: %10d\n", deviceProp.totalGlobalMem); fprintf (stderr, " MultiProcessors: %10d\n", deviceProp.multiProcessorCount); fprintf (stderr, " Registers: %10d\n", deviceProp.regsPerBlock); fprintf (stderr, " Constant memory: %10d\n", deviceProp.totalConstMem); } // pthread_t threads[deviceCount]; // for (int device = 0; device < deviceCount; device++) // pthread_create (&(threads[device]), NULL, test_multi_cuda, (void*)device); // cutStartThread((CUT_THREADROUTINE)test_multi_cuda,(void*)device); test_multi_cuda((void*)0); // pthread_exit(NULL); //test_multi_cuda(); }
the_stack
namespace cgbn { template<class env> __device__ __forceinline__ void core_t<env>::sqrt_resolve_rem(uint32_t rem[LIMBS], const uint32_t s[LIMBS], const uint32_t top, const uint32_t r[LIMBS], const uint32_t shift) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t mask, phi[LIMBS], plo[LIMBS], t, s0[LIMBS]; // remainder computation: r'=(2*s*(s mod b)+r)/b^2 // where s=square root, r=remainder, b=2^shift. mask=(1<<(shift & 0x1F))-1; #pragma unroll for(int32_t index=0;index<LIMBS;index++) if(index*32+32<=shift) s0[index]=__shfl_sync(sync, s[index], 0, TPI); else if(index*32>shift) s0[index]=0; else s0[index]=__shfl_sync(sync, s[index], 0, TPI) & mask; mpadd<LIMBS>(s0, s0, s0); mpmul<LIMBS>(plo, phi, s, s0); chain_t<> chain; #pragma unroll for(int32_t index=0;index<LIMBS;index++) plo[index]=chain.add(plo[index], r[index]); t=(group_thread==TPI-1) ? top : 0; phi[0]=chain.add(phi[0], t); #pragma unroll for(int32_t index=1;index<LIMBS;index++) phi[index]=chain.add(phi[index], 0); #pragma unroll for(int32_t index=0;index<LIMBS;index++) phi[index]=__shfl_sync(sync, phi[index], threadIdx.x-1, TPI); t=0; if(group_thread!=0) { t=mpadd<LIMBS>(plo, plo, phi); mpzero<LIMBS>(phi); } t=fast_propagate_add(t, plo); if(group_thread==0) mpadd32<LIMBS>(phi, phi, t); bitwise_mask_select(plo, plo, phi, shift*2); rotate_right(rem, plo, shift*2); } template<class env> __device__ __forceinline__ void core_t<env>::sqrt(uint32_t s[LIMBS], const uint32_t x[LIMBS], const uint32_t numthreads) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t dlo[DLIMBS], dhi[DLIMBS], rem[DLIMBS], dtemp[DLIMBS], approx[DLIMBS], t, c; uint32_t remainder[LIMBS], q[LIMBS], plo[LIMBS], phi[LIMBS]; int32_t top; dlimbs_scatter(dlo, x, TPI-2); dlimbs_scatter(dhi, x, TPI-1); top=dlimbs_sqrt_rem_wide(dtemp, rem, dlo, dhi); dlimbs_approximate(approx, dtemp); // set up remainder #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_up_sync(sync, x[index], 1, TPI); remainder[index]=(group_thread==0) ? 0 : t; } dlimbs_gather(remainder, rem, TPI-1); // initialize s to be 2 * divisor, silent 1 at top of s t=__shfl_up_sync(sync, dtemp[DLIMBS-1], 1, TPI); t=(group_thread==0) ? 0 : t; mpleft<DLIMBS>(dtemp, dtemp, 1, t); mpzero<LIMBS>(s); dlimbs_gather(s, dtemp, TPI-1); // silent 1 at top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-numthreads);index--) { dlimbs_scatter(dtemp, remainder, TPI-1); dlimbs_sqrt_estimate(dtemp, top, dtemp, approx); dlimbs_all_gather(q, dtemp); if(group_thread==index) mpset<LIMBS>(s, q); // compute low/high mpmul<LIMBS>(plo, phi, s, q); // double q in s c=0; if(group_thread==index) c=mpadd<LIMBS>(s, s, q); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; c=mpsub<LIMBS>(remainder, remainder, phi); top=__shfl_sync(sync, remainder[0], TPI-1, TPI) - q[0]; // we subtract q[0] because of the silent 1 in s #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) // shuffle remainder up by 1 remainder[limb]=__shfl_up_sync(sync, remainder[limb], 1, TPI); c=__shfl_up_sync(sync, c, 1, TPI); // shuffle carry up by 1 c=(group_thread==0) ? 0 : c; c=c+mpsub<LIMBS>(remainder, remainder, plo); top=top+resolve_sub(c, remainder); while(top<0) { c=0; if(group_thread==index) { // decrement s by 2, if we borrow, need to resolve in next thread c=mpsub32<LIMBS>(s, s, 2); } c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; add_cc(group_thread==index, 0xFFFFFFFF); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) remainder[limb]=addc_cc(remainder[limb], s[limb]); c=addc(0, 0); top=top+1+fast_propagate_add(c, remainder); } } t=__shfl_down_sync(sync, s[0], 1, TPI); t=(group_thread==TPI-1) ? 1 : t; mpright<LIMBS>(s, s, 1, t); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) s[limb]=__shfl_sync(sync, s[limb], threadIdx.x-numthreads, TPI); } template<class env> __device__ __forceinline__ void core_t<env>::sqrt_rem(uint32_t s[LIMBS], uint32_t r[LIMBS], const uint32_t x[LIMBS], const uint32_t numthreads) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t dlo[DLIMBS], dhi[DLIMBS], rem[DLIMBS], dtemp[DLIMBS], approx[DLIMBS], t, c; uint32_t remainder[LIMBS], q[LIMBS], plo[LIMBS], phi[LIMBS]; int32_t top; dlimbs_scatter(dlo, x, TPI-2); dlimbs_scatter(dhi, x, TPI-1); top=dlimbs_sqrt_rem_wide(dtemp, rem, dlo, dhi); dlimbs_approximate(approx, dtemp); // set up remainder #pragma unroll for(int32_t index=0;index<LIMBS;index++) { t=__shfl_up_sync(sync, x[index], 1, TPI); remainder[index]=(group_thread==0) ? 0 : t; } dlimbs_gather(remainder, rem, TPI-1); // initialize s to be 2 * divisor, silent 1 at top of s t=__shfl_up_sync(sync, dtemp[DLIMBS-1], 1, TPI); t=(group_thread==0) ? 0 : t; mpleft<DLIMBS>(dtemp, dtemp, 1, t); mpzero<LIMBS>(s); dlimbs_gather(s, dtemp, TPI-1); // silent 1 at top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-numthreads);index--) { dlimbs_scatter(dtemp, remainder, TPI-1); dlimbs_sqrt_estimate(dtemp, top, dtemp, approx); dlimbs_all_gather(q, dtemp); if(group_thread==index) mpset<LIMBS>(s, q); // compute low/high mpmul<LIMBS>(plo, phi, s, q); // double q in s c=0; if(group_thread==index) c=mpadd<LIMBS>(s, s, q); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; c=mpsub<LIMBS>(remainder, remainder, phi); top=__shfl_sync(sync, remainder[0], TPI-1, TPI) - q[0]; // we subtract q[0] because of the silent 1 in s #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) // shuffle remainder up by 1 remainder[limb]=__shfl_up_sync(sync, remainder[limb], 1, TPI); c=__shfl_up_sync(sync, c, 1, TPI); // shuffle carry up by 1 c=(group_thread==0) ? 0 : c; c=c+mpsub<LIMBS>(remainder, remainder, plo); top=top+resolve_sub(c, remainder); while(top<0) { c=0; if(group_thread==index) { // decrement s by 2, if we borrow, need to resolve in next thread c=mpsub32<LIMBS>(s, s, 2); } c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; add_cc(group_thread==index, 0xFFFFFFFF); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) remainder[limb]=addc_cc(remainder[limb], s[limb]); c=addc(0, 0); top=top+1+fast_propagate_add(c, remainder); } } t=__shfl_down_sync(sync, s[0], 1, TPI); t=(group_thread==TPI-1) ? 1 : t; mpright<LIMBS>(s, s, 1, t); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) s[limb]=__shfl_sync(sync, s[limb], threadIdx.x-numthreads, TPI); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) r[limb]=__shfl_sync(sync, remainder[limb], threadIdx.x-numthreads, TPI); if(group_thread>=numthreads) mpzero<LIMBS>(r); if(group_thread==numthreads) r[0]=top; } template<class env> __device__ __forceinline__ void core_t<env>::sqrt_wide(uint32_t s[LIMBS], const uint32_t lo[LIMBS], const uint32_t hi[LIMBS], const uint32_t numthreads) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t dlo[DLIMBS], dhi[DLIMBS], rem[DLIMBS], dtemp[DLIMBS], approx[DLIMBS], t, c; uint32_t remainder[LIMBS], q[LIMBS], plo[LIMBS], phi[LIMBS]; int32_t top; dlimbs_scatter(dlo, hi, TPI-2); dlimbs_scatter(dhi, hi, TPI-1); top=dlimbs_sqrt_rem_wide(dtemp, rem, dlo, dhi); dlimbs_approximate(approx, dtemp); // set up remainder #pragma unroll for(int32_t index=0;index<LIMBS;index++) { remainder[index]=(group_thread==TPI-1) ? lo[index] : hi[index]; remainder[index]=__shfl_sync(sync, remainder[index], threadIdx.x-1, TPI); } dlimbs_gather(remainder, rem, TPI-1); // initialize s to be 2 * divisor, silent 1 at top of s t=__shfl_up_sync(sync, dtemp[DLIMBS-1], 1, TPI); t=(group_thread==0) ? 0 : t; mpleft<DLIMBS>(dtemp, dtemp, 1, t); mpzero<LIMBS>(s); dlimbs_gather(s, dtemp, TPI-1); // silent 1 at top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-numthreads);index--) { dlimbs_scatter(dtemp, remainder, TPI-1); dlimbs_sqrt_estimate(dtemp, top, dtemp, approx); dlimbs_all_gather(q, dtemp); if(group_thread==index) mpset<LIMBS>(s, q); // compute low/high mpmul<LIMBS>(plo, phi, s, q); // double q in s c=0; if(group_thread==index) c=mpadd<LIMBS>(s, s, q); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; c=mpsub<LIMBS>(remainder, remainder, phi); top=__shfl_sync(sync, remainder[0], TPI-1, TPI) - q[0]; // we subtract q[0] because of the silent 1 in s #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) { // shuffle remainder up by 1 t=__shfl_sync(sync, lo[limb], index, TPI); remainder[limb]=(group_thread==TPI-1) ? t : remainder[limb]; remainder[limb]=__shfl_sync(sync, remainder[limb], threadIdx.x-1, TPI); } c=__shfl_up_sync(sync, c, 1, TPI); // shuffle carry up by 1 c=(group_thread==0) ? 0 : c; c=c+mpsub<LIMBS>(remainder, remainder, plo); top=top+resolve_sub(c, remainder); while(top<0) { c=0; if(group_thread==index) { // decrement s by 2, if we borrow, need to resolve in next thread c=mpsub32<LIMBS>(s, s, 2); } c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; add_cc(group_thread==index, 0xFFFFFFFF); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) remainder[limb]=addc_cc(remainder[limb], s[limb]); c=addc(0, 0); top=top+1+fast_propagate_add(c, remainder); } } t=__shfl_down_sync(sync, s[0], 1, TPI); t=(group_thread==TPI-1) ? 1 : t; mpright<LIMBS>(s, s, 1, t); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) s[limb]=__shfl_sync(sync, s[limb], threadIdx.x-numthreads, TPI); } template<class env> __device__ __forceinline__ uint32_t core_t<env>::sqrt_rem_wide(uint32_t s[LIMBS], uint32_t r[LIMBS], const uint32_t lo[LIMBS], const uint32_t hi[LIMBS], const uint32_t numthreads) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t dlo[DLIMBS], dhi[DLIMBS], rem[DLIMBS], dtemp[DLIMBS], approx[DLIMBS], t, c; uint32_t remainder[LIMBS], q[LIMBS], plo[LIMBS], phi[LIMBS]; int32_t top; dlimbs_scatter(dlo, hi, TPI-2); dlimbs_scatter(dhi, hi, TPI-1); top=dlimbs_sqrt_rem_wide(dtemp, rem, dlo, dhi); dlimbs_approximate(approx, dtemp); // set up remainder #pragma unroll for(int32_t index=0;index<LIMBS;index++) { remainder[index]=(group_thread==TPI-1) ? lo[index] : hi[index]; remainder[index]=__shfl_sync(sync, remainder[index], threadIdx.x-1, TPI); } dlimbs_gather(remainder, rem, TPI-1); // initialize s to be 2 * divisor, silent 1 at top of s t=__shfl_up_sync(sync, dtemp[DLIMBS-1], 1, TPI); t=(group_thread==0) ? 0 : t; mpleft<DLIMBS>(dtemp, dtemp, 1, t); mpzero<LIMBS>(s); dlimbs_gather(s, dtemp, TPI-1); // silent 1 at top of s #pragma nounroll for(int32_t index=TPI-2;index>=(int32_t)(TPI-numthreads);index--) { dlimbs_scatter(dtemp, remainder, TPI-1); dlimbs_sqrt_estimate(dtemp, top, dtemp, approx); dlimbs_all_gather(q, dtemp); if(group_thread==index) mpset<LIMBS>(s, q); // compute low/high mpmul<LIMBS>(plo, phi, s, q); // double q in s c=0; if(group_thread==index) c=mpadd<LIMBS>(s, s, q); c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; c=mpsub<LIMBS>(remainder, remainder, phi); top=__shfl_sync(sync, remainder[0], TPI-1, TPI) - q[0]; // we subtract q[0] because of the silent 1 in s #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) { // shuffle remainder up by 1 t=__shfl_sync(sync, lo[limb], index, TPI); remainder[limb]=(group_thread==TPI-1) ? t : remainder[limb]; remainder[limb]=__shfl_sync(sync, remainder[limb], threadIdx.x-1, TPI); } c=__shfl_up_sync(sync, c, 1, TPI); // shuffle carry up by 1 c=(group_thread==0) ? 0 : c; c=c+mpsub<LIMBS>(remainder, remainder, plo); top=top+resolve_sub(c, remainder); while(top<0) { c=0; if(group_thread==index) { // decrement s by 2, if we borrow, need to resolve in next thread c=mpsub32<LIMBS>(s, s, 2); } c=__shfl_sync(sync, c, threadIdx.x-1, TPI); s[0]=s[0]+c; add_cc(group_thread==index, 0xFFFFFFFF); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) remainder[limb]=addc_cc(remainder[limb], s[limb]); c=addc(0, 0); top=top+1+fast_propagate_add(c, remainder); } } t=__shfl_down_sync(sync, s[0], 1, TPI); t=(group_thread==TPI-1) ? 1 : t; mpright<LIMBS>(s, s, 1, t); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) s[limb]=__shfl_sync(sync, s[limb], threadIdx.x-numthreads, TPI); #pragma unroll for(int32_t limb=0;limb<LIMBS;limb++) r[limb]=__shfl_sync(sync, remainder[limb], threadIdx.x-numthreads, TPI); if(group_thread>=numthreads) mpzero<LIMBS>(r); if(group_thread==numthreads) r[0]=top; return (numthreads==TPI) ? top : 0; } } /* namespace cgbn */
the_stack
#define RAD 1 // radius of the stencil; helps to deal with "boundary conditions" at (thread) block's ends __constant__ float dev_Deltat[1]; __constant__ float dev_heat_params[2]; int blocksNeeded( int N_i, int M_i) { return (N_i+M_i-1)/M_i; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n);} __device__ int idxClip( int idx, int idxMax) { return idx > (idxMax - 1) ? (idxMax - 1): (idx < 0 ? 0 : idx); } __device__ int flatten(int col, int row, int z, int width, int height, int depth) { return idxClip(col, width) + idxClip(row,height)*width + idxClip(z,depth)*width*height; } __global__ void resetKernel(float *d_temp, BC bc) { const int k_x = blockIdx.x*blockDim.x + threadIdx.x; const int k_y = blockIdx.y*blockDim.y + threadIdx.y; const int k_z = blockIdx.z*blockDim.z + threadIdx.z; if ((k_x >= dev_Ld[0]) || (k_y >= dev_Ld[1]) || (k_z >= dev_Ld[2])) return; d_temp[k_z*dev_Ld[0]*dev_Ld[1] + k_y*dev_Ld[0] + k_x] = bc.t_a; } __global__ void tempKernel(float *d_temp, BC bc) { constexpr int NUS = 1; constexpr int radius = NUS; extern __shared__ float s_in[]; // global indices const int k_x = threadIdx.x + blockDim.x * blockIdx.x; const int k_y = threadIdx.y + blockDim.y * blockIdx.y; const int k_z = threadIdx.z + blockDim.z * blockIdx.z; if ((k_x >= dev_Ld[0] ) || (k_y >= dev_Ld[1] ) || (k_z >= dev_Ld[2])) return; const int k = flatten(k_x, k_y, k_z, dev_Ld[0], dev_Ld[1],dev_Ld[2]); // local width and height const int3 S = { static_cast<int>(blockDim.x + 2 * radius), static_cast<int>(blockDim.y + 2 * radius), static_cast<int>(blockDim.z + 2 * radius) }; // local indices const int s_x = threadIdx.x + radius; const int s_y = threadIdx.y + radius; const int s_z = threadIdx.z + radius; const int s_k = flatten(s_x, s_y, s_z, S.x, S.y, S.z); // assign default color values for d_out (black) // Load regular cells s_in[s_k] = d_temp[k]; // Load halo cells if (threadIdx.x < radius ) { s_in[flatten(s_x - radius, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x - radius, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x + blockDim.x, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x + blockDim.x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.y < radius) { s_in[flatten(s_x, s_y - radius, s_z,S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y - radius, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y + blockDim.y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y + blockDim.y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.z < radius) { s_in[flatten(s_x, s_y, s_z - radius, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y, k_z - radius, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y, s_z + blockDim.z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y , k_z + blockDim.z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } // Calculate squared distance from pipe center float dSq = ((k_x - bc.x)*(k_x - bc.x) + (k_y - bc.y)*(k_y - bc.y) + (k_z - dev_Ld[2]/2)*(k_z - dev_Ld[2]/2) // this can be changed manually, to place the "pipe" source in the "middle" of the z-axis ); // If inside pipe, set temp to t_s and return if (dSq < bc.rad*bc.rad) { d_temp[k] = bc.t_s; return; } /* // If outside plate, set temp to t_a and return if ((k_x == 0 ) || (k_x == dev_Ld[0] - 1) || (k_y == 0 ) || (k_x + k_y < bc.chamfer) || (k_x - k_y > dev_Ld[0] - bc.chamfer)) { d_temp[k] = bc.t_a; return; }*/ // boundary conditions, BC, for "sides" if ((k_y == 0) || (k_y == dev_Ld[1]-1) || (k_z == 0) || (k_z == dev_Ld[2]-1) ) { d_temp[k] = bc.t_a; return; } /* // If point is below ground, set temp to t_g and return if (k_y == dev_Ld[1] - 1) { d_temp[k] = bc.t_g; return; } */ // If point is in front of inlet, set temp to t_g and return if (k_x == 0) { d_temp[k] = bc.t_g; return; } __syncthreads(); // For all the remaining points, find temperature. float3 stencil[NUS][2] ; const float centerval { s_in[ s_k] }; for (int nu = 0; nu < NUS; ++nu) { stencil[nu][0].x = s_in[flatten(s_x-(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][1].x = s_in[flatten(s_x+(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][0].y = s_in[flatten(s_x,s_y-(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][1].y = s_in[flatten(s_x,s_y+(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][0].z = s_in[flatten(s_x,s_y,s_z-(nu+1),S.x,S.y,S.z)] ; stencil[nu][1].z = s_in[flatten(s_x,s_y,s_z+(nu+1),S.x,S.y,S.z)] ; } float tempval { dev_lap1( centerval, stencil ) }; __syncthreads(); d_temp[k] += dev_Deltat[0]*(dev_heat_params[0]/dev_heat_params[1])*tempval; } __global__ void tempKernel2(float *d_temp, BC bc) { constexpr int NUS = 2; constexpr int radius = NUS; extern __shared__ float s_in[]; // global indices const int k_x = threadIdx.x + blockDim.x * blockIdx.x; const int k_y = threadIdx.y + blockDim.y * blockIdx.y; const int k_z = threadIdx.z + blockDim.z * blockIdx.z; if ((k_x >= dev_Ld[0] ) || (k_y >= dev_Ld[1] ) || (k_z >= dev_Ld[2])) return; const int k = flatten(k_x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2]); // local width and height const int3 S { static_cast<int>(blockDim.x + 2 * radius) , static_cast<int>(blockDim.y + 2 * radius), static_cast<int>(blockDim.z + 2 * radius) } ; // local indices const int s_x = threadIdx.x + radius; const int s_y = threadIdx.y + radius; const int s_z = threadIdx.z + radius; const int s_k = flatten(s_x, s_y, s_z, S.x, S.y, S.z); // assign default color values for d_out (black) // Load regular cells s_in[s_k] = d_temp[k]; // Load halo cells if (threadIdx.x < radius ) { s_in[flatten(s_x - radius, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x - radius, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x + blockDim.x, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x + blockDim.x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.y < radius) { s_in[flatten(s_x, s_y - radius, s_z,S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y - radius, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y + blockDim.y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y + blockDim.y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.z < radius) { s_in[flatten(s_x, s_y, s_z - radius, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y, k_z - radius, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y, s_z + blockDim.z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y , k_z + blockDim.z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } // Calculate squared distance from pipe center float dSq = ((k_x - bc.x)*(k_x - bc.x) + (k_y - bc.y)*(k_y - bc.y) + (k_z - dev_Ld[2]/2)*(k_z - dev_Ld[2]/2) // this can be changed manually, to place the "pipe" source in the "middle" of the z-axis ); // If inside pipe, set temp to t_s and return if (dSq < bc.rad*bc.rad) { d_temp[k] = bc.t_s; return; } // boundary conditions, BC, for "sides" if ((k_y == 0) || (k_y == dev_Ld[1]-1) || (k_z == 0) || (k_z == dev_Ld[2]-1) ) { d_temp[k] = bc.t_a; return; } // If point is in front of inlet, set temp to t_g and return if (k_x == 0) { d_temp[k] = bc.t_g; return; } __syncthreads(); // For all the remaining points, find temperature. float3 stencil[NUS][2] ; const float centerval { s_in[s_k ] }; for (int nu = 0; nu < NUS; ++nu) { stencil[nu][0].x = s_in[flatten(s_x-(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][1].x = s_in[flatten(s_x+(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][0].y = s_in[flatten(s_x,s_y-(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][1].y = s_in[flatten(s_x,s_y+(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][0].z = s_in[flatten(s_x,s_y,s_z-(nu+1),S.x,S.y,S.z)] ; stencil[nu][1].z = s_in[flatten(s_x,s_y,s_z+(nu+1),S.x,S.y,S.z)] ; } float tempval { dev_lap2( centerval, stencil ) }; __syncthreads(); d_temp[k] += dev_Deltat[0]*(dev_heat_params[0]/dev_heat_params[1])*tempval; } __global__ void tempKernel3(float *d_temp, BC bc) { constexpr int NUS = 3; constexpr int radius = NUS; extern __shared__ float s_in[]; // global indices const int k_x = threadIdx.x + blockDim.x * blockIdx.x; const int k_y = threadIdx.y + blockDim.y * blockIdx.y; const int k_z = threadIdx.z + blockDim.z * blockIdx.z; if ((k_x >= dev_Ld[0] ) || (k_y >= dev_Ld[1] ) || (k_z >= dev_Ld[2])) return; const int k = flatten(k_x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2]); // local width and height const int3 S { static_cast<int>(blockDim.x + 2 * radius) , static_cast<int>(blockDim.y + 2 * radius), static_cast<int>(blockDim.z + 2 * radius) } ; // local indices const int s_x = threadIdx.x + radius; const int s_y = threadIdx.y + radius; const int s_z = threadIdx.z + radius; const int s_k = flatten(s_x, s_y, s_z, S.x, S.y, S.z); // assign default color values for d_out (black) // Load regular cells s_in[s_k] = d_temp[k]; // Load halo cells if (threadIdx.x < radius ) { s_in[flatten(s_x - radius, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x - radius, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x + blockDim.x, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x + blockDim.x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.y < radius) { s_in[flatten(s_x, s_y - radius, s_z,S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y - radius, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y + blockDim.y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y + blockDim.y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.z < radius) { s_in[flatten(s_x, s_y, s_z - radius, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y, k_z - radius, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y, s_z + blockDim.z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y , k_z + blockDim.z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } // Calculate squared distance from pipe center float dSq = ((k_x - bc.x)*(k_x - bc.x) + (k_y - bc.y)*(k_y - bc.y) + (k_z - dev_Ld[2]/2)*(k_z - dev_Ld[2]/2) // this can be changed manually, to place the "pipe" source in the "middle" of the z-axis ); // If inside pipe, set temp to t_s and return if (dSq < bc.rad*bc.rad) { d_temp[k] = bc.t_s; return; } // boundary conditions, BC, for "sides" if ((k_y == 0) || (k_y == dev_Ld[1]-1) || (k_z == 0) || (k_z == dev_Ld[2]-1) ) { d_temp[k] = bc.t_a; return; } // If point is in front of inlet, set temp to t_g and return if (k_x == 0) { d_temp[k] = bc.t_g; return; } __syncthreads(); // For all the remaining points, find temperature. float3 stencil[NUS][2] ; const float centerval { s_in[s_k ] }; for (int nu = 0; nu < NUS; ++nu) { stencil[nu][0].x = s_in[flatten(s_x-(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][1].x = s_in[flatten(s_x+(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][0].y = s_in[flatten(s_x,s_y-(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][1].y = s_in[flatten(s_x,s_y+(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][0].z = s_in[flatten(s_x,s_y,s_z-(nu+1),S.x,S.y,S.z)] ; stencil[nu][1].z = s_in[flatten(s_x,s_y,s_z+(nu+1),S.x,S.y,S.z)] ; } float tempval { dev_lap3( centerval, stencil ) }; __syncthreads(); d_temp[k] += dev_Deltat[0]*(dev_heat_params[0]/dev_heat_params[1])*tempval; } __global__ void tempKernel4(float *d_temp, BC bc) { constexpr int NUS = 4; constexpr int radius = NUS; extern __shared__ float s_in[]; // global indices const int k_x = threadIdx.x + blockDim.x * blockIdx.x; const int k_y = threadIdx.y + blockDim.y * blockIdx.y; const int k_z = threadIdx.z + blockDim.z * blockIdx.z; if ((k_x >= dev_Ld[0] ) || (k_y >= dev_Ld[1] ) || (k_z >= dev_Ld[2])) return; const int k = flatten(k_x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2]); // local width and height const int3 S { static_cast<int>(blockDim.x + 2 * radius) , static_cast<int>(blockDim.y + 2 * radius) , static_cast<int>(blockDim.z + 2 * radius) } ; // local indices const int s_x = threadIdx.x + radius; const int s_y = threadIdx.y + radius; const int s_z = threadIdx.z + radius; const int s_k = flatten(s_x, s_y, s_z, S.x, S.y, S.z); // assign default color values for d_out (black) // Load regular cells s_in[s_k] = d_temp[k]; // Load halo cells if (threadIdx.x < radius ) { s_in[flatten(s_x - radius, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x - radius, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x + blockDim.x, s_y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x + blockDim.x, k_y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.y < radius) { s_in[flatten(s_x, s_y - radius, s_z,S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y - radius, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y + blockDim.y, s_z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y + blockDim.y, k_z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } if (threadIdx.z < radius) { s_in[flatten(s_x, s_y, s_z - radius, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y, k_z - radius, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; s_in[flatten(s_x, s_y, s_z + blockDim.z, S.x, S.y, S.z)] = d_temp[flatten(k_x, k_y , k_z + blockDim.z, dev_Ld[0], dev_Ld[1], dev_Ld[2])]; } // Calculate squared distance from pipe center float dSq = ((k_x - bc.x)*(k_x - bc.x) + (k_y - bc.y)*(k_y - bc.y) + (k_z - dev_Ld[2]/2)*(k_z - dev_Ld[2]/2) // this can be changed manually, to place the "pipe" source in the "middle" of the z-axis ); // If inside pipe, set temp to t_s and return if (dSq < bc.rad*bc.rad) { d_temp[k] = bc.t_s; return; } // boundary conditions, BC, for "sides" if ((k_y == 0) || (k_y == dev_Ld[1]-1) || (k_z == 0) || (k_z == dev_Ld[2]-1) ) { d_temp[k] = bc.t_a; return; } // If point is in front of inlet, set temp to t_g and return if (k_x == 0) { d_temp[k] = bc.t_g; return; } __syncthreads(); // For all the remaining points, find temperature. float3 stencil[NUS][2] ; const float centerval { s_in[s_k ] }; for (int nu = 0; nu < NUS; ++nu) { stencil[nu][0].x = s_in[flatten(s_x-(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][1].x = s_in[flatten(s_x+(nu+1),s_y,s_z,S.x,S.y,S.z)] ; stencil[nu][0].y = s_in[flatten(s_x,s_y-(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][1].y = s_in[flatten(s_x,s_y+(nu+1),s_z,S.x,S.y,S.z)] ; stencil[nu][0].z = s_in[flatten(s_x,s_y,s_z-(nu+1),S.x,S.y,S.z)] ; stencil[nu][1].z = s_in[flatten(s_x,s_y,s_z+(nu+1),S.x,S.y,S.z)] ; } float tempval { dev_lap4( centerval, stencil ) }; __syncthreads(); d_temp[k] += dev_Deltat[0]*(dev_heat_params[0]/dev_heat_params[1])*tempval; } __global__ void float_to_char( uchar4* dev_out, const float* outSrc) { const int k_x = threadIdx.x + blockDim.x * blockIdx.x; const int k_y = threadIdx.y + blockDim.y * blockIdx.y; // choose at which z coordinate to make the slice in x-y plane const int zcoordslice = dev_Ld[2]/2*1; const int k = k_x + k_y * blockDim.x*gridDim.x ; const int fulloffset = k + zcoordslice*blockDim.x*gridDim.x*blockDim.y*gridDim.y; dev_out[k].x = 0; dev_out[k].z = 0; dev_out[k].y = 0; dev_out[k].w = 255; const unsigned char intensity = clip((int) outSrc[fulloffset] ) ; dev_out[k].x = intensity ; // higher temp -> more red dev_out[k].z = 255 - intensity ; // lower temp -> more blue } void kernelLauncher(uchar4 *d_out, float *d_temp, dim3 Ld, BC bc, dim3 M_in) { const dim3 gridSize(blocksNeeded(Ld.x, M_in.x), blocksNeeded(Ld.y, M_in.y), blocksNeeded(Ld.z,M_in.z)); const size_t smSz = (M_in.x + 2 * RAD)*(M_in.y + 2 * RAD)*(M_in.z + 2 * RAD)*sizeof(float); tempKernel<<<gridSize, M_in, smSz>>>(d_temp, bc); const dim3 out_gridSize( gridSize.x, gridSize.y ); const dim3 out_M( M_in.x, M_in.y ); float_to_char<<<out_gridSize,out_M>>>(d_out, d_temp) ; } void kernelLauncher2(uchar4 *d_out, float *d_temp, dim3 Ld, BC bc, dim3 M_in) { constexpr int radius { 2 }; const dim3 gridSize(blocksNeeded(Ld.x, M_in.x), blocksNeeded(Ld.y, M_in.y), blocksNeeded(Ld.z,M_in.z)); const size_t smSz = (M_in.x + 2 * radius)*(M_in.y + 2 * radius)*(M_in.z + 2*radius)*sizeof(float); tempKernel2<<<gridSize, M_in, smSz>>>(d_temp, bc); const dim3 out_gridSize( gridSize.x, gridSize.y ); const dim3 out_M( M_in.x, M_in.y ); float_to_char<<<out_gridSize,out_M>>>(d_out, d_temp) ; } void kernelLauncher3(uchar4 *d_out, float *d_temp, dim3 Ld, BC bc, dim3 M_in) { constexpr int radius { 3 }; const dim3 gridSize(blocksNeeded(Ld.x, M_in.x), blocksNeeded(Ld.y, M_in.y), blocksNeeded(Ld.z, M_in.z)); const size_t smSz = (M_in.x + 2 * radius)*(M_in.y + 2 * radius)*(M_in.z + 2*radius)*sizeof(float); tempKernel3<<<gridSize, M_in, smSz>>>(d_temp, bc); const dim3 out_gridSize( gridSize.x, gridSize.y ); const dim3 out_M( M_in.x, M_in.y ); float_to_char<<<out_gridSize,out_M>>>(d_out, d_temp) ; } void kernelLauncher4(uchar4 *d_out, float *d_temp, dim3 Ld, BC bc, dim3 M_in) { constexpr int radius { 4 }; const dim3 gridSize(blocksNeeded(Ld.x, M_in.x), blocksNeeded(Ld.y, M_in.y) , blocksNeeded(Ld.z, M_in.z)); const size_t smSz = (M_in.x + 2 * radius)*(M_in.y + 2 * radius)*(M_in.z + 2*radius)*sizeof(float); tempKernel4<<<gridSize, M_in, smSz>>>(d_temp, bc); const dim3 out_gridSize( gridSize.x, gridSize.y ); const dim3 out_M( M_in.x, M_in.y ); float_to_char<<<out_gridSize,out_M>>>(d_out, d_temp) ; } void resetTemperature(float *d_temp, dim3 Ld, BC bc, dim3 M_in) { const dim3 gridSize( blocksNeeded(Ld.x, M_in.x), blocksNeeded( Ld.y, M_in.y), blocksNeeded(Ld.z, M_in.z)); resetKernel<<<gridSize, M_in>>>(d_temp,bc); }
the_stack
#include "object.h" #include <cmath> #include "sim.h" #ifdef GRAPHICS #define GLM_FORCE_PURE #endif namespace titan { #ifdef GRAPHICS const Vec RED(1.0, 0.2, 0.2); const Vec GREEN(0.2, 1.0, 0.2); const Vec BLUE(0.2, 0.2, 1.0); const Vec PURPLE(0.5, 0.2, 0.5); #endif __device__ const double NORMAL = 20000; // normal force coefficient for contact constaints #ifdef CONSTRAINTS void Container::addConstraint(CONSTRAINT_TYPE type, const Vec & v, double d) { for (Mass * m : masses) { m -> addConstraint(type, v, d); } } void Container::clearConstraints() { for (Mass * m : masses) { m -> clearConstraints(); } } #endif CUDA_CALLABLE_MEMBER CudaBall::CudaBall(const Vec & center, double radius) { _center = center; _radius = radius; } CUDA_CALLABLE_MEMBER CudaBall::CudaBall(const Ball & b) { _center = b._center; _radius = b._radius; } CUDA_CALLABLE_MEMBER void CudaBall::applyForce(CUDA_MASS * m) { double dist = (m -> pos - _center).norm(); m -> force += (dist <= _radius) ? NORMAL * (m -> pos - _center) / dist : Vec(0, 0, 0); } CUDA_CALLABLE_MEMBER CudaContactPlane::CudaContactPlane(const Vec & normal, double offset) { _normal = normal / normal.norm(); _offset = offset; _FRICTION_S = 0.0; _FRICTION_K = 0.0; } CudaContactPlane::CudaContactPlane(const ContactPlane & p) { _normal = p._normal; _offset = p._offset; _FRICTION_S = p._FRICTION_S; _FRICTION_K = p._FRICTION_K; } CUDA_CALLABLE_MEMBER void CudaContactPlane::applyForce(CUDA_MASS * m) { // m -> force += (disp < 0) ? - disp * NORMAL * _normal : 0 * _normal; // TODO fix this for the host double disp = dot(m -> pos, _normal) - _offset; // displacement into the plane Vec f_normal = dot(m -> force, _normal) * _normal; // normal force if (disp < 0 && (_FRICTION_S > 0 || _FRICTION_K > 0)) { // if inside the plane Vec v_perp = m -> vel - dot(m -> vel, _normal) * _normal; // perpendicular velocity double v_norm = v_perp.norm(); if (v_norm > 1e-16) { // kinetic friction domain double friction_mag = _FRICTION_K * f_normal.norm(); m->force -= v_perp * friction_mag / v_norm; } else { // static friction Vec f_perp = m -> force - f_normal; // perpendicular force if (_FRICTION_S * f_normal.norm() > f_perp.norm()) { m -> force -= f_perp; } // else { // kinetic domain again // double friction_mag = _FRICTION_K * f_normal.norm(); // m->force -= v_perp * friction_mag / v_norm; // } } } // now apply the offset force to push the object out of the plane. // if (disp < 0) { // m -> pos[2] = 0; // m -> vel = m -> vel - 2 * dot(m -> vel, _normal) * _normal; // m -> force -= f_normal; // } Vec contact = (disp < 0) ? - disp * NORMAL * _normal : 0 * _normal; // displacement force m -> force += contact; } CUDA_CALLABLE_MEMBER CudaConstraintPlane::CudaConstraintPlane(const Vec & normal, double friction) { assert(normal.norm() != 0.0); _normal = normal / normal.norm(); _friction = friction; } CUDA_CALLABLE_MEMBER void CudaConstraintPlane::applyForce(CUDA_MASS * m) { double normal_force = dot(m -> force, _normal); m -> force += - _normal * normal_force; // constraint force double v_norm = m -> vel.norm(); if (v_norm >= 1e-16) { m -> vel += - _normal * dot(m -> vel, _normal); // constraint velocity m -> force += - _friction * normal_force * m -> vel / v_norm; // apply friction force } } CUDA_CALLABLE_MEMBER CudaDirection::CudaDirection(const Vec & tangent, double friction) { assert(tangent.norm() != 0.0); _tangent = tangent / tangent.norm(); _friction = friction; } CUDA_CALLABLE_MEMBER void CudaDirection::applyForce(CUDA_MASS * m) { Vec normal_force = m -> force - dot(m -> force, _tangent) * _tangent; m -> force += - normal_force; if (m -> vel.norm() >= 1e-16) { m -> vel = _tangent * dot(m -> vel, _tangent); m -> force += - normal_force.norm() * _friction * _tangent; } } void Container::setMassValues(double m) { // set masses for all Mass objects for (Mass * mass : masses) { mass -> m += m; } } void Container::setSpringConstants(double k) { for (Spring * spring : springs) { spring -> _k = k; } } void Container::setRestLengths(double len) { // set masses for all Mass objects for (Spring * spring : springs) { spring -> _rest = len; } } void Container::add(Mass * m) { masses.push_back(m); } void Container::add(Spring * s) { springs.push_back(s); } void Container::add(Container * c) { for (Mass * m : c -> masses) { masses.push_back(m); } for (Spring * s : c -> springs) { springs.push_back(s); } } Cube::Cube(const Vec & center, double side_length) { _center = center; _side_length = side_length; for (int i = 0; i < 8; i++) { masses.push_back(new Mass(side_length * (Vec(i & 1, (i >> 1) & 1, (i >> 2) & 1) - Vec(0.5, 0.5, 0.5)) + center)); } for (int i = 0; i < 8; i++) { // add the appropriate springs for (int j = i + 1; j < 8; j++) { springs.push_back(new Spring(masses[i], masses[j])); } } for (Spring * s : springs) { s -> setRestLength((s -> _right -> pos - s -> _left -> pos).norm()); } } void Container::translate(const Vec & displ) { for (Mass * m : masses) { m -> pos += displ; } } void Container::rotate(const Vec & axis, double angle) { Vec com(0, 0, 0); double total_mass = 0; for (Mass * m : masses) { com += m -> m * m -> pos; total_mass += m -> m; } com = com / total_mass; // center of mass as centroid Vec temp_axis = axis / axis.norm(); for (Mass * m : masses) { Vec temp = m -> pos - com; // subtract off center of mass Vec y = temp - dot(temp, temp_axis) * temp_axis; // project onto the given axis and find offset (y coordinate) if (y.norm() < 0.0001) { // if on the axis, don't do anything continue; } Vec planar(-sin(angle) * y.norm(), cos(angle) * y.norm(), 0); // coordinate in xy space Vec spatial = planar[0] * cross(temp_axis, y / y.norm()) + y / y.norm() * planar[1] + dot(temp, temp_axis) * temp_axis + com; // return to 3D space, then to COM space, then to absolute space m -> pos = spatial; // update position } } Lattice::Lattice(const Vec & center, const Vec & dims, int nx, int ny, int nz) { _center = center; _dims = dims; this -> nx = nx; this -> ny = ny; this -> nz = nz; for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { masses.push_back(new Mass(Vec((nx > 1) ? (double) i / (nx - 1.0) - 0.5 : 0, (ny > 1) ? j / (ny - 1.0) - 0.5 : 0, (nz > 1) ? k / (nz - 1.0) - 0.5 : 0) * dims + center)); } } } for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { for (int l = 0; l < ((i != nx - 1) ? 2 : 1); l++) { for (int m = 0; m < ((j != ny - 1) ? 2 : 1); m++) { for (int n = 0; n < ((k != nz - 1) ? 2 : 1); n++) { if (l != 0 || m != 0 || n != 0) { springs.push_back(new Spring(masses[k + j * nz + i * ny * nz], masses[(k + n) + (j + m) * nz + (i + l) * ny * nz])); } } } } if (k != nz - 1) { if (j != ny - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], // get the full triangle masses[k + (j + 1) * nz + i * ny * nz])); } if (i != nx - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } if (j != ny - 1 && i != nx - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], masses[k + (j + 1) * nz + (i + 1) * ny * nz])); springs.push_back(new Spring(masses[(k + 1) + j * nz + (i + 1) * ny * nz], masses[k + (j + 1) * nz + i * ny * nz])); springs.push_back(new Spring(masses[(k + 1) + (j + 1) * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } } if (j != ny - 1 && i != nx - 1) { springs.push_back(new Spring(masses[k + (j + 1) * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } } } } for (Spring * s : springs) { s -> setRestLength((s -> _right -> pos - s -> _left -> pos).norm()); } } #ifdef CONSTRAINTS Beam::Beam(const Vec & center, const Vec & dims, int nx, int ny, int nz) { _center = center; _dims = dims; this -> nx = nx; this -> ny = ny; this -> nz = nz; for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { masses.push_back(new Mass(Vec((nx > 1) ? (double) i / (nx - 1.0) - 0.5 : 0, (ny > 1) ? j / (ny - 1.0) - 0.5 : 0, (nz > 1) ? k / (nz - 1.0) - 0.5 : 0) * dims + center)); if (i == 0) { masses[masses.size() - 1] -> constraints.fixed = true; } } } } for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { for (int l = 0; l < ((i != nx - 1) ? 2 : 1); l++) { for (int m = 0; m < ((j != ny - 1) ? 2 : 1); m++) { for (int n = 0; n < ((k != nz - 1) ? 2 : 1); n++) { if (l != 0 || m != 0 || n != 0) { springs.push_back(new Spring(masses[k + j * nz + i * ny * nz], masses[(k + n) + (j + m) * nz + (i + l) * ny * nz])); } } } } if (k != nz - 1) { if (j != ny - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], // get the full triangle masses[k + (j + 1) * nz + i * ny * nz])); } if (i != nx - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } if (j != ny - 1 && i != nx - 1) { springs.push_back(new Spring(masses[(k + 1) + j * nz + i * ny * nz], masses[k + (j + 1) * nz + (i + 1) * ny * nz])); springs.push_back(new Spring(masses[(k + 1) + j * nz + (i + 1) * ny * nz], masses[k + (j + 1) * nz + i * ny * nz])); springs.push_back(new Spring(masses[(k + 1) + (j + 1) * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } } if (j != ny - 1 && i != nx - 1) { springs.push_back(new Spring(masses[k + (j + 1) * nz + i * ny * nz], masses[k + j * nz + (i + 1) * ny * nz])); } } } } for (Spring * s : springs) { s -> setRestLength((s -> _right -> pos - s -> _left -> pos).norm()); } } #endif // Robot::Robot(const Vec & center, const cppn& encoding, double side_length, double omega, double k_soft, double k_stiff){ // _center = center; // _side_length = side_length; // _omega = omega; // _k_soft = k_soft; // _k_stiff = k_stiff; // _encoding = encoding; // int RobotDim = encoding.size(); // number of cubes per side // Vec dims(side_length,side_length,side_length); // // keep trace of number of cubes that each mass is connected to // std::vector<std::vector<std::vector<int>>> mass_conn(RobotDim+1, std::vector<std::vector<int>>(RobotDim+1,std::vector<int>(RobotDim+1,0))); // std::vector<std::vector<std::vector<Mass *>>> _masses(RobotDim+1, std::vector<std::vector<Mass *>>(RobotDim+1,std::vector<Mass *>(RobotDim+1,nullptr))); // // store number of cubes that should be connected to each mass // for (int i = 0; i < RobotDim+1; i++) { // for (int j = 0; j < RobotDim+1; j++) { // for (int k = 0; k < RobotDim+1; k++) { // // if index mode RobotDim+1 is 0, then it is on the edge // int i_edge = (i % (RobotDim)) ? 0:1; // int j_edge = (j % (RobotDim)) ? 0:1; // int k_edge = (k % (RobotDim)) ? 0:1; // if (i_edge + j_edge + k_edge ==0){ // mass_conn[i][j][k] = 8; //corner // }else if (i_edge+j_edge+k_edge ==3){ // mass_conn[i][j][k] = 1; //corner // }else if (i_edge+j_edge+k_edge ==2){ // mass_conn[i][j][k] = 2; //edge // }else{ // mass_conn[i][j][k] = 4; //surface // } // } // } // } // // Remove appropriate masses // for (int i = 0; i < RobotDim; i++) { // for (int j = 0; j < RobotDim; j++) { // for (int k = 0; k < RobotDim; k++) { // int exist = encoding[i][j][k][0]; // if (!exist){ // // subtract connectedness of each mass for the cube // mass_conn[i][j][k] -= 1; // mass_conn[i][j][k+1] -= 1; // mass_conn[i][j+1][k] -= 1; // mass_conn[i][j+1][k+1] -= 1; // mass_conn[i+1][j][k] -= 1; // mass_conn[i+1][j][k+1] -= 1; // mass_conn[i+1][j+1][k] -= 1; // mass_conn[i+1][j+1][k+1] -= 1; // } // } // } // } // // create masses // for (int i = 0; i < RobotDim+1; i++) { // for (int j = 0; j < RobotDim+1; j++) { // for (int k = 0; k < RobotDim + 1; k++) { // if (mass_conn[i][j][k] > 0){ // Mass * m; // if (RobotDim == 1) { // m = new Mass(Vec(i-0.5, j-0.5, k-0.5) * dims + _center); // } else { // m = new Mass(Vec(i / (RobotDim - 1.0) - 0.5, // j / (RobotDim - 1.0) - 0.5, // k / (RobotDim - 1.0) - 0.5) * dims + _center); // } // #ifdef GRAPHICS // m -> color = Vec(0,0,0); // #endif // masses.push_back(m); // _masses[i][j][k] = m; // } // } // } // } // // create springs // for (int i = 0; i < RobotDim; i++) { // for (int j = 0; j < RobotDim; j++) { // for (int k = 0; k < RobotDim; k++) { // int exist = encoding[i][j][k][0]; // if (exist) { // int type = encoding[i][j][k][1]; // for(int l=0; l<8; l++) { // int l_x = (l<4)? 0:1; // int l_y = (l<2)? 0:(l<4)?1:(l<6)?0:1; // int l_z = (l%2)? 1:0; // for (int m=l+1; m<8; m++) { // int r_x = (m<4)? 0:1; // int r_y = (m<2)? 0:(m<4)?1:(m<6)?0:1; // int r_z = (m%2)? 1:0; // Spring * spr = new Spring(_masses[i+l_x][j+l_y][k+l_z], // _masses[i+r_x][j+r_y][k+r_z]); // spr -> _type = type; // spr -> _omega = omega; // if (type==0) { // green, contract then expand // spr -> _k = k_soft; // #ifdef GRAPHICS // _masses[i+l_x][j+l_y][k+l_z]->color += GREEN/16; // _masses[i+r_x][j+r_y][k+r_z]->color += GREEN/16; // #endif // } else if (type==1) { // red, expand then contract // spr -> _k = k_soft; // #ifdef GRAPHICS // _masses[i+l_x][j+l_y][k+l_z]->color += RED/16; // _masses[i+r_x][j+r_y][k+r_z]->color += RED/16; // #endif // } else if (type==2) { // passive soft // spr -> _k = k_soft; // #ifdef GRAPHICS // _masses[i+l_x][j+l_y][k+l_z]->color += BLUE/16; // _masses[i+r_x][j+r_y][k+r_z]->color += BLUE/16; // #endif // } else { // passive stiff // spr -> _k = k_stiff; // #ifdef GRAPHICS // _masses[i+l_x][j+l_y][k+l_z]->color += PURPLE/16; // _masses[i+r_x][j+r_y][k+r_z]->color += PURPLE/16; // #endif // } // springs.push_back(spr); // } // } // } // } // } // } // for (Spring * s : springs) { // s -> setRestLength((s -> _right -> pos - s -> _left -> pos).norm()); // } // } #ifdef CONSTRAINTS void Container::fix() { for (Mass * mass : masses) { mass -> constraints.fixed = true; } } LOCAL_CONSTRAINTS::LOCAL_CONSTRAINTS() { // constraint_plane = thrust::device_vector<CudaConstraintPlane>(1); // contact_plane = thrust::device_vector<CudaContactPlane>(1); // ball = thrust::device_vector<CudaBall>(1); // direction = thrust::device_vector<CudaDirection>(1); // contact_plane_ptr = thrust::raw_pointer_cast(contact_plane.data()); // TODO make sure this is safe // constraint_plane_ptr = thrust::raw_pointer_cast(constraint_plane.data()); // ball_ptr = thrust::raw_pointer_cast(ball.data()); // direction_ptr = thrust::raw_pointer_cast(direction.data()); num_contact_planes = 0; num_constraint_planes = 0; num_balls = 0; num_directions = 0; drag_coefficient = 0; fixed = false; } CUDA_LOCAL_CONSTRAINTS::CUDA_LOCAL_CONSTRAINTS(LOCAL_CONSTRAINTS & c) { contact_plane = c.contact_plane_ptr; constraint_plane = c.constraint_plane_ptr; ball = c.ball_ptr; direction = c.direction_ptr; num_contact_planes = c.num_contact_planes; num_constraint_planes = c.num_constraint_planes; num_balls = c.num_balls; num_directions = c.num_directions; fixed = c.fixed; drag_coefficient = c.drag_coefficient; } #endif #ifdef GRAPHICS void Ball::normalize(GLfloat * v) { GLfloat norm = sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2],2)) / _radius; for (int i = 0; i < 3; i++) { v[i] /= norm; } } void Ball::writeTriangle(GLfloat * arr, GLfloat *v1, GLfloat *v2, GLfloat *v3) { for (int j = 0; j < 3; j++) { arr[j] = v1[j] + _center[j]; } arr += 3; for (int j = 0; j < 3; j++) { arr[j] = v2[j] + _center[j]; } arr += 3; for (int j = 0; j < 3; j++) { arr[j] = v3[j] + _center[j]; } } void Ball::subdivide(GLfloat * arr, GLfloat *v1, GLfloat *v2, GLfloat *v3, int depth) { GLfloat v12[3], v23[3], v31[3]; if (depth == 0) { writeTriangle(arr, v1, v2, v3); return; } for (int i = 0; i < 3; i++) { v12[i] = v1[i]+v2[i]; v23[i] = v2[i]+v3[i]; v31[i] = v3[i]+v1[i]; } normalize(v12); normalize(v23); normalize(v31); subdivide(arr, v1, v12, v31, depth - 1); arr += 3 * 3 * (int) pow(4, depth - 1); subdivide(arr, v2, v23, v12, depth - 1); arr += 3 * 3 * (int) pow(4, depth - 1); subdivide(arr, v3, v31, v23, depth - 1); arr += 3 * 3 * (int) pow(4, depth - 1); subdivide(arr, v12, v23, v31, depth - 1); } void Ball::generateBuffers() { glm::vec3 color = {0.22f, 0.71f, 0.0f}; GLfloat * vertex_data = new GLfloat[20 * 3 * 3 * (int) pow(4, depth)]; // times 4 for subdivision GLfloat X = (GLfloat) _radius * .525731112119133606; GLfloat Z = (GLfloat) _radius * .850650808352039932; static GLfloat vdata[12][3] = { {-X, 0.0, Z}, {X, 0.0, Z}, {-X, 0.0, -Z}, {X, 0.0, -Z}, {0.0, Z, X}, {0.0, Z, -X}, {0.0, -Z, X}, {0.0, -Z, -X}, {Z, X, 0.0}, {-Z, X, 0.0}, {Z, -X, 0.0}, {-Z, -X, 0.0} }; static GLuint tindices[20][3] = { {0,4,1}, {0,9,4}, {9,5,4}, {4,5,8}, {4,8,1}, {8,10,1}, {8,3,10}, {5,3,8}, {5,2,3}, {2,7,3}, {7,10,3}, {7,6,10}, {7,11,6}, {11,0,6}, {0,1,6}, {6,1,10}, {9,0,11}, {9,11,2}, {9,2,5}, {7,2,11} }; for (int i = 0; i < 20; i++) { subdivide(&vertex_data[3 * 3 * (int) pow(4, depth) * i], vdata[tindices[i][0]], vdata[tindices[i][1]], vdata[tindices[i][2]], depth); } glGenBuffers(1, &vertices); // create buffer for these vertices glBindBuffer(GL_ARRAY_BUFFER, vertices); glBufferData(GL_ARRAY_BUFFER, 20 * 3 * 3 * (int) pow(4, depth) * sizeof(GLfloat), vertex_data, GL_STATIC_DRAW); GLfloat * color_data = new GLfloat[20 * 3 * 3 * (int) pow(4, depth)]; // TODO constant length array for (int i = 0; i < 20 * 3 * (int) pow(4, depth); i++) { color_data[3*i] = color[0]; color_data[3*i + 1] = color[1]; color_data[3*i + 2] = color[2]; } glGenBuffers(1, &colors); glBindBuffer(GL_ARRAY_BUFFER, colors); glBufferData(GL_ARRAY_BUFFER, 20 * 3 * 3 * (int) pow(4, depth) * sizeof(GLfloat), color_data, GL_STATIC_DRAW); delete [] color_data; delete [] vertex_data; _initialized = true; } void Ball::draw() { glEnableVertexAttribArray(0); glBindBuffer(GL_ARRAY_BUFFER, vertices); glVertexAttribPointer( 0, // attribute. No particular reason for 0, but must match the layout in the shader. 3, // size GL_FLOAT, // type GL_FALSE, // normalized? 0, // stride (void*)0 // array buffer offset ); glEnableVertexAttribArray(1); glBindBuffer(GL_ARRAY_BUFFER, colors); glVertexAttribPointer( 1, // attribute. No particular reason for 1, but must match the layout in the shader. 3, // size GL_FLOAT, // type GL_FALSE, // normalized? 0, // stride (void*)0 // array buffer offset ); // Draw the triangle ! glDrawArrays(GL_TRIANGLES, 0, 20 * 3 * (int) pow(4, depth)); // 12*3 indices starting at 0 -> 12 triangles glDisableVertexAttribArray(1); glDisableVertexAttribArray(0); } #endif #ifdef GRAPHICS void ContactPlane::generateBuffers() { glm::vec3 color = {0.22f, 0.71f, 0.0f}; Vec temp = (dot(_normal, Vec(0, 1, 0)) < 0.8) ? Vec(0, 1, 0) : Vec(1, 0, 0); Vec v1 = cross(_normal, temp); // two unit vectors along plane v1 = v1 / v1.norm(); Vec v2 = cross(_normal, v1); v2 = v2 / v2.norm(); const static GLfloat vertex_buffer_platform[118] = { -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1 }; GLfloat vertex_data[108]; for (int i = 0; i < 36; i++) { Vec temp = Vec(vertex_buffer_platform[3 * i], vertex_buffer_platform[3 * i + 1], vertex_buffer_platform[3 * i + 2]); Vec vertex = 10 * dot(v1, temp) * v1 + 10 * dot(v2, temp) * v2 + _normal * (_offset + dot(_normal, temp) - 1.0); vertex_data[3 * i] = vertex[0]; vertex_data[3 * i + 1] = vertex[1]; vertex_data[3 * i + 2] = vertex[2]; } glGenBuffers(1, &vertices); // create buffer for these vertices glBindBuffer(GL_ARRAY_BUFFER, vertices); glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_data), vertex_data, GL_STATIC_DRAW); GLfloat g_color_buffer_data[108]; for (int i = 0; i < 36; i++) { g_color_buffer_data[3 * i] = color[0]; g_color_buffer_data[3 * i + 1] = color[1]; g_color_buffer_data[3 * i + 2] = color[2]; } glGenBuffers(1, &colors); glBindBuffer(GL_ARRAY_BUFFER, colors); glBufferData(GL_ARRAY_BUFFER, sizeof(g_color_buffer_data), g_color_buffer_data, GL_STATIC_DRAW); _initialized = true; } void ContactPlane::draw() { // 1st attribute buffer : vertices glEnableVertexAttribArray(0); glBindBuffer(GL_ARRAY_BUFFER, vertices); glVertexAttribPointer( 0, // attribute. No particular reason for 0, but must match the layout in the shader. 3, // size GL_FLOAT, // type GL_FALSE, // normalized? 0, // stride (void*)0 // array buffer offset ); glEnableVertexAttribArray(1); glBindBuffer(GL_ARRAY_BUFFER, colors); glVertexAttribPointer( 1, // attribute. No particular reason for 1, but must match the layout in the shader. 3, // size GL_FLOAT, // type GL_FALSE, // normalized? 0, // stride (void*)0 // array buffer offset ); // Draw the triangle ! glDrawArrays(GL_TRIANGLES, 0, 12*3); // 12*3 indices starting at 0 -> 12 triangles glDisableVertexAttribArray(1); glDisableVertexAttribArray(0); } #endif } // namespace titan
the_stack
#pragma once #include "cuda/Complex.cuh" #include <cmath> namespace facebook { namespace cuda { namespace fbfft { namespace { #define PI 0x1.921FB6p+1f /* Computed in Sollya as round(cos(k * pi / 32), single, RN)) */ #define FBFFT32_COSF_0 0x1.p0 #define FBFFT32_COSF_1 0xf.ec46dp-4 #define FBFFT32_COSF_2 0xf.b14bep-4 #define FBFFT32_COSF_3 0xf.4fa0bp-4 #define FBFFT32_COSF_4 0xe.c835ep-4 #define FBFFT32_COSF_5 0xe.1c598p-4 #define FBFFT32_COSF_6 0xd.4db31p-4 #define FBFFT32_COSF_7 0xc.5e403p-4 #define FBFFT32_COSF_8 0xb.504f3p-4 #define FBFFT32_COSF_9 0xa.26799p-4 #define FBFFT32_COSF_A 0x8.e39dap-4 #define FBFFT32_COSF_B 0x7.8ad75p-4 #define FBFFT32_COSF_C 0x6.1f78a8p-4 #define FBFFT32_COSF_D 0x4.a50188p-4 #define FBFFT32_COSF_E 0x3.1f1708p-4 #define FBFFT32_COSF_F 0x1.917a6cp-4 #define FBFFT32_COSF_G 0 #define FBFFT32_COSF_H -0x1.917a6cp-4 #define FBFFT32_COSF_I -0x3.1f1708p-4 #define FBFFT32_COSF_J -0x4.a50188p-4 #define FBFFT32_COSF_K -0x6.1f78a8p-4 #define FBFFT32_COSF_L -0x7.8ad75p-4 #define FBFFT32_COSF_M -0x8.e39dap-4 #define FBFFT32_COSF_N -0xa.26799p-4 #define FBFFT32_COSF_O -0xb.504f3p-4 #define FBFFT32_COSF_P -0xc.5e403p-4 #define FBFFT32_COSF_Q -0xd.4db31p-4 #define FBFFT32_COSF_R -0xe.1c598p-4 #define FBFFT32_COSF_S -0xe.c835ep-4 #define FBFFT32_COSF_T -0xf.4fa0bp-4 #define FBFFT32_COSF_U -0xf.b14bep-4 #define FBFFT32_COSF_V -0xf.ec46dp-4 #define FBFFT32_SINF_0 0.0f #define FBFFT32_SINF_1 FBFFT32_COSF_F #define FBFFT32_SINF_2 FBFFT32_COSF_E #define FBFFT32_SINF_3 FBFFT32_COSF_D #define FBFFT32_SINF_4 FBFFT32_COSF_C #define FBFFT32_SINF_5 FBFFT32_COSF_B #define FBFFT32_SINF_6 FBFFT32_COSF_A #define FBFFT32_SINF_7 FBFFT32_COSF_9 #define FBFFT32_SINF_8 FBFFT32_COSF_8 #define FBFFT32_SINF_9 FBFFT32_COSF_7 #define FBFFT32_SINF_A FBFFT32_COSF_6 #define FBFFT32_SINF_B FBFFT32_COSF_5 #define FBFFT32_SINF_C FBFFT32_COSF_4 #define FBFFT32_SINF_D FBFFT32_COSF_3 #define FBFFT32_SINF_E FBFFT32_COSF_2 #define FBFFT32_SINF_F FBFFT32_COSF_1 #define FBFFT32_SINF_G 1.0f #define FBFFT32_SINF_H FBFFT32_COSF_1 #define FBFFT32_SINF_I FBFFT32_COSF_2 #define FBFFT32_SINF_J FBFFT32_COSF_3 #define FBFFT32_SINF_K FBFFT32_COSF_4 #define FBFFT32_SINF_L FBFFT32_COSF_5 #define FBFFT32_SINF_M FBFFT32_COSF_6 #define FBFFT32_SINF_N FBFFT32_COSF_7 #define FBFFT32_SINF_O FBFFT32_COSF_8 #define FBFFT32_SINF_P FBFFT32_COSF_9 #define FBFFT32_SINF_Q FBFFT32_COSF_A #define FBFFT32_SINF_R FBFFT32_COSF_B #define FBFFT32_SINF_S FBFFT32_COSF_C #define FBFFT32_SINF_T FBFFT32_COSF_D #define FBFFT32_SINF_U FBFFT32_COSF_E #define FBFFT32_SINF_V FBFFT32_COSF_F template <int FFTSize> __device__ __host__ __forceinline__ constexpr float cos(int index); template <> __device__ __host__ __forceinline__ constexpr float cos<32>(int index) { return (index == 0) ? FBFFT32_COSF_0 : ((index == 1) ? FBFFT32_COSF_1 : ((index == 2) ? FBFFT32_COSF_2: ((index == 3) ? FBFFT32_COSF_3: ((index == 4) ? FBFFT32_COSF_4: ((index == 5) ? FBFFT32_COSF_5: ((index == 6) ? FBFFT32_COSF_6: ((index == 7) ? FBFFT32_COSF_7: ((index == 8) ? FBFFT32_COSF_8: ((index == 9) ? FBFFT32_COSF_9: ((index == 10) ? FBFFT32_COSF_A: ((index == 11) ? FBFFT32_COSF_B: ((index == 12) ? FBFFT32_COSF_C: ((index == 13) ? FBFFT32_COSF_D: ((index == 14) ? FBFFT32_COSF_E: ((index == 15) ? FBFFT32_COSF_F: ((index == 16) ? FBFFT32_COSF_G: ((index == 17) ? FBFFT32_COSF_H: ((index == 18) ? FBFFT32_COSF_I: ((index == 19) ? FBFFT32_COSF_J: ((index == 20) ? FBFFT32_COSF_K: ((index == 21) ? FBFFT32_COSF_L: ((index == 22) ? FBFFT32_COSF_M: ((index == 23) ? FBFFT32_COSF_N: ((index == 24) ? FBFFT32_COSF_O: ((index == 25) ? FBFFT32_COSF_P: ((index == 26) ? FBFFT32_COSF_Q: ((index == 27) ? FBFFT32_COSF_R: ((index == 28) ? FBFFT32_COSF_S: ((index == 29) ? FBFFT32_COSF_T: ((index == 30) ? FBFFT32_COSF_U: ((index == 31) ? FBFFT32_COSF_V: NAN))))))))))))))))))))))))))))))); } template <> __device__ __host__ __forceinline__ constexpr float cos<16>(int index) { return cos<32>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float cos<8>(int index) { return cos<16>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float cos<4>(int index) { return cos<8>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float cos<2>(int index) { return cos<4>(2 * index); } template <int FFTSize> __device__ __host__ __forceinline__ constexpr float sin(int index); template <> __device__ __host__ __forceinline__ constexpr float sin<32>(int index) { return (index == 0) ? FBFFT32_SINF_0: ((index == 1) ? FBFFT32_SINF_1: ((index == 2) ? FBFFT32_SINF_2: ((index == 3) ? FBFFT32_SINF_3: ((index == 4) ? FBFFT32_SINF_4: ((index == 5) ? FBFFT32_SINF_5: ((index == 6) ? FBFFT32_SINF_6: ((index == 7) ? FBFFT32_SINF_7: ((index == 8) ? FBFFT32_SINF_8: ((index == 9) ? FBFFT32_SINF_9: ((index == 10) ? FBFFT32_SINF_A: ((index == 11) ? FBFFT32_SINF_B: ((index == 12) ? FBFFT32_SINF_C: ((index == 13) ? FBFFT32_SINF_D: ((index == 14) ? FBFFT32_SINF_E: ((index == 15) ? FBFFT32_SINF_F: ((index == 16) ? FBFFT32_SINF_G: ((index == 17) ? FBFFT32_SINF_H: ((index == 18) ? FBFFT32_SINF_I: ((index == 19) ? FBFFT32_SINF_J: ((index == 20) ? FBFFT32_SINF_K: ((index == 21) ? FBFFT32_SINF_L: ((index == 22) ? FBFFT32_SINF_M: ((index == 23) ? FBFFT32_SINF_N: ((index == 24) ? FBFFT32_SINF_O: ((index == 25) ? FBFFT32_SINF_P: ((index == 26) ? FBFFT32_SINF_Q: ((index == 27) ? FBFFT32_SINF_R: ((index == 28) ? FBFFT32_SINF_S: ((index == 29) ? FBFFT32_SINF_T: ((index == 30) ? FBFFT32_SINF_U: ((index == 31) ? FBFFT32_SINF_V: NAN))))))))))))))))))))))))))))))); } template <> __device__ __host__ __forceinline__ constexpr float sin<16>(int index) { return sin<32>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float sin<8>(int index) { return sin<16>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float sin<4>(int index) { return sin<8>(2 * index); } template <> __device__ __host__ __forceinline__ constexpr float sin<2>(int index) { return sin<4>(2 * index); } #define FBFFT32_CEXPF_0 Complex(FBFFT32_COSF_0, FBFFT32_SINF_0) #define FBFFT32_CEXPF_1 Complex(FBFFT32_COSF_1, FBFFT32_SINF_1) #define FBFFT32_CEXPF_2 Complex(FBFFT32_COSF_2, FBFFT32_SINF_2) #define FBFFT32_CEXPF_3 Complex(FBFFT32_COSF_3, FBFFT32_SINF_3) #define FBFFT32_CEXPF_4 Complex(FBFFT32_COSF_4, FBFFT32_SINF_4) #define FBFFT32_CEXPF_5 Complex(FBFFT32_COSF_5, FBFFT32_SINF_5) #define FBFFT32_CEXPF_6 Complex(FBFFT32_COSF_6, FBFFT32_SINF_6) #define FBFFT32_CEXPF_7 Complex(FBFFT32_COSF_7, FBFFT32_SINF_7) #define FBFFT32_CEXPF_8 Complex(FBFFT32_COSF_8, FBFFT32_SINF_8) #define FBFFT32_CEXPF_9 Complex(FBFFT32_COSF_9, FBFFT32_SINF_9) #define FBFFT32_CEXPF_A Complex(FBFFT32_COSF_A, FBFFT32_SINF_A) #define FBFFT32_CEXPF_B Complex(FBFFT32_COSF_B, FBFFT32_SINF_B) #define FBFFT32_CEXPF_C Complex(FBFFT32_COSF_C, FBFFT32_SINF_C) #define FBFFT32_CEXPF_D Complex(FBFFT32_COSF_D, FBFFT32_SINF_D) #define FBFFT32_CEXPF_E Complex(FBFFT32_COSF_E, FBFFT32_SINF_E) #define FBFFT32_CEXPF_F Complex(FBFFT32_COSF_F, FBFFT32_SINF_F) #define FBFFT32_CEXPF_G Complex(FBFFT32_COSF_G, FBFFT32_SINF_G) #define FBFFT32_CEXPF_H Complex(FBFFT32_COSF_H, FBFFT32_SINF_H) #define FBFFT32_CEXPF_I Complex(FBFFT32_COSF_I, FBFFT32_SINF_I) #define FBFFT32_CEXPF_J Complex(FBFFT32_COSF_J, FBFFT32_SINF_J) #define FBFFT32_CEXPF_K Complex(FBFFT32_COSF_K, FBFFT32_SINF_K) #define FBFFT32_CEXPF_L Complex(FBFFT32_COSF_L, FBFFT32_SINF_L) #define FBFFT32_CEXPF_M Complex(FBFFT32_COSF_M, FBFFT32_SINF_M) #define FBFFT32_CEXPF_N Complex(FBFFT32_COSF_N, FBFFT32_SINF_N) #define FBFFT32_CEXPF_O Complex(FBFFT32_COSF_O, FBFFT32_SINF_O) #define FBFFT32_CEXPF_P Complex(FBFFT32_COSF_P, FBFFT32_SINF_P) #define FBFFT32_CEXPF_Q Complex(FBFFT32_COSF_Q, FBFFT32_SINF_Q) #define FBFFT32_CEXPF_R Complex(FBFFT32_COSF_R, FBFFT32_SINF_R) #define FBFFT32_CEXPF_S Complex(FBFFT32_COSF_S, FBFFT32_SINF_S) #define FBFFT32_CEXPF_T Complex(FBFFT32_COSF_T, FBFFT32_SINF_T) #define FBFFT32_CEXPF_U Complex(FBFFT32_COSF_U, FBFFT32_SINF_U) #define FBFFT32_CEXPF_V Complex(FBFFT32_COSF_V, FBFFT32_SINF_V) template <int FFTSize> __device__ __host__ __forceinline__ Complex cexp(int index); template <> __device__ __host__ __forceinline__ Complex cexp<256>(int index) { return Complex(); } template <> __device__ __host__ __forceinline__ Complex cexp<128>(int index) { return Complex(); } template <> __device__ __host__ __forceinline__ Complex cexp<64>(int index) { return Complex(); } template <> __device__ __host__ __forceinline__ Complex cexp<32>(int index) { return (index == 0) ? FBFFT32_CEXPF_0: ((index == 1) ? FBFFT32_CEXPF_1: ((index == 2) ? FBFFT32_CEXPF_2: ((index == 3) ? FBFFT32_CEXPF_3: ((index == 4) ? FBFFT32_CEXPF_4: ((index == 5) ? FBFFT32_CEXPF_5: ((index == 6) ? FBFFT32_CEXPF_6: ((index == 7) ? FBFFT32_CEXPF_7: ((index == 8) ? FBFFT32_CEXPF_8: ((index == 9) ? FBFFT32_CEXPF_9: ((index == 10) ? FBFFT32_CEXPF_A: ((index == 11) ? FBFFT32_CEXPF_B: ((index == 12) ? FBFFT32_CEXPF_C: ((index == 13) ? FBFFT32_CEXPF_D: ((index == 14) ? FBFFT32_CEXPF_E: ((index == 15) ? FBFFT32_CEXPF_F: ((index == 16) ? FBFFT32_CEXPF_G: ((index == 17) ? FBFFT32_CEXPF_H: ((index == 18) ? FBFFT32_CEXPF_I: ((index == 19) ? FBFFT32_CEXPF_J: ((index == 20) ? FBFFT32_CEXPF_K: ((index == 21) ? FBFFT32_CEXPF_L: ((index == 22) ? FBFFT32_CEXPF_M: ((index == 23) ? FBFFT32_CEXPF_N: ((index == 24) ? FBFFT32_CEXPF_O: ((index == 25) ? FBFFT32_CEXPF_P: ((index == 26) ? FBFFT32_CEXPF_Q: ((index == 27) ? FBFFT32_CEXPF_R: ((index == 28) ? FBFFT32_CEXPF_S: ((index == 29) ? FBFFT32_CEXPF_T: ((index == 30) ? FBFFT32_CEXPF_U: ((index == 31) ? FBFFT32_CEXPF_V: NAN))))))))))))))))))))))))))))))); } template <> __device__ __host__ __forceinline__ Complex cexp<16>(int index) { return cexp<32>(2 * index); } template <> __device__ __host__ __forceinline__ Complex cexp<8>(int index) { return cexp<16>(2 * index); } template <> __device__ __host__ __forceinline__ Complex cexp<4>(int index) { return cexp<8>(2 * index); } template <> __device__ __host__ __forceinline__ Complex cexp<2>(int index) { return cexp<4>(2 * index); } constexpr int kNumTwiddles = 128; // __device__, __shared__ and __constant__ variables cannot be defined as // external using the extern keyword. The only exception is for dynamically // allocated __shared__ variables as described in Section B.2.3. // // Putting everything in an anon namespace, each .cu scope has its own // array in global memory, and its own single time initialization. // // This is minor overhead. // float __device__ twiddleFactors[kNumTwiddles * 2]; /* Computed in Sollya as round(cos(k * pi / 128), single, RN)) */ const float twiddleFactorsHost[kNumTwiddles * 2] = { 0x1.p0 , 0.0, 0xf.fec43p-4 , 0x6.48558p-8, 0xf.fb10fp-4 , 0xc.8fb3p-8, 0xf.f4e6dp-4 , 0x1.2d520ap-4, 0xf.ec46dp-4 , 0x1.917a6cp-4, 0xf.e1324p-4 , 0x1.f564e6p-4, 0xf.d3aacp-4 , 0x2.59020cp-4, 0xf.c3b28p-4 , 0x2.bc4288p-4, 0xf.b14bep-4 , 0x3.1f1708p-4, 0xf.9c79dp-4 , 0x3.81704cp-4, 0xf.853f8p-4 , 0x3.e33f3p-4, 0xf.6ba07p-4 , 0x4.447498p-4, 0xf.4fa0bp-4 , 0x4.a50188p-4, 0xf.31447p-4 , 0x5.04d728p-4, 0xf.10908p-4 , 0x5.63e6ap-4, 0xe.ed89ep-4 , 0x5.c2215p-4, 0xe.c835ep-4 , 0x6.1f78a8p-4, 0xe.a09a7p-4 , 0x6.7bde5p-4, 0xe.76bd8p-4 , 0x6.d744p-4, 0xe.4aa59p-4 , 0x7.319ba8p-4, 0xe.1c598p-4 , 0x7.8ad75p-4, 0xd.ebe05p-4 , 0x7.e2e938p-4, 0xd.b941ap-4 , 0x8.39c3dp-4, 0xd.84853p-4 , 0x8.8f59bp-4, 0xd.4db31p-4 , 0x8.e39dap-4, 0xd.14d3dp-4 , 0x9.3682ap-4, 0xc.d9f02p-4 , 0x9.87fcp-4, 0xc.9d112p-4 , 0x9.d7fd1p-4, 0xc.5e403p-4 , 0xa.26799p-4, 0xc.1d87p-4 , 0xa.73656p-4, 0xb.daef9p-4 , 0xa.beb4ap-4, 0xb.96842p-4 , 0xb.085bbp-4, 0xb.504f3p-4 , 0xb.504f3p-4, 0xb.085bbp-4 , 0xb.96842p-4, 0xa.beb4ap-4 , 0xb.daef9p-4, 0xa.73656p-4 , 0xc.1d87p-4, 0xa.26799p-4 , 0xc.5e403p-4, 0x9.d7fd1p-4 , 0xc.9d112p-4, 0x9.87fcp-4 , 0xc.d9f02p-4, 0x9.3682ap-4 , 0xd.14d3dp-4, 0x8.e39dap-4 , 0xd.4db31p-4, 0x8.8f59bp-4 , 0xd.84853p-4, 0x8.39c3dp-4 , 0xd.b941ap-4, 0x7.e2e938p-4 , 0xd.ebe05p-4, 0x7.8ad75p-4 , 0xe.1c598p-4, 0x7.319ba8p-4 , 0xe.4aa59p-4, 0x6.d744p-4 , 0xe.76bd8p-4, 0x6.7bde5p-4 , 0xe.a09a7p-4, 0x6.1f78a8p-4 , 0xe.c835ep-4, 0x5.c2215p-4 , 0xe.ed89ep-4, 0x5.63e6ap-4 , 0xf.10908p-4, 0x5.04d728p-4 , 0xf.31447p-4, 0x4.a50188p-4 , 0xf.4fa0bp-4, 0x4.447498p-4 , 0xf.6ba07p-4, 0x3.e33f3p-4 , 0xf.853f8p-4, 0x3.81704cp-4 , 0xf.9c79dp-4, 0x3.1f1708p-4 , 0xf.b14bep-4, 0x2.bc4288p-4 , 0xf.c3b28p-4, 0x2.59020cp-4 , 0xf.d3aacp-4, 0x1.f564e6p-4 , 0xf.e1324p-4, 0x1.917a6cp-4 , 0xf.ec46dp-4, 0x1.2d520ap-4 , 0xf.f4e6dp-4, 0xc.8fb3p-8 , 0xf.fb10fp-4, 0x6.48558p-8 , 0xf.fec43p-4, 0.0 , 0x1.p0, -0x6.48558p-8 , 0xf.fec43p-4, -0xc.8fb3p-8 , 0xf.fb10fp-4, -0x1.2d520ap-4 , 0xf.f4e6dp-4, -0x1.917a6cp-4 , 0xf.ec46dp-4, -0x1.f564e6p-4 , 0xf.e1324p-4, -0x2.59020cp-4 , 0xf.d3aacp-4, -0x2.bc4288p-4 , 0xf.c3b28p-4, -0x3.1f1708p-4 , 0xf.b14bep-4, -0x3.81704cp-4 , 0xf.9c79dp-4, -0x3.e33f3p-4 , 0xf.853f8p-4, -0x4.447498p-4 , 0xf.6ba07p-4, -0x4.a50188p-4 , 0xf.4fa0bp-4, -0x5.04d728p-4 , 0xf.31447p-4, -0x5.63e6ap-4 , 0xf.10908p-4, -0x5.c2215p-4 , 0xe.ed89ep-4, -0x6.1f78a8p-4 , 0xe.c835ep-4, -0x6.7bde5p-4 , 0xe.a09a7p-4, -0x6.d744p-4 , 0xe.76bd8p-4, -0x7.319ba8p-4 , 0xe.4aa59p-4, -0x7.8ad75p-4 , 0xe.1c598p-4, -0x7.e2e938p-4 , 0xd.ebe05p-4, -0x8.39c3dp-4 , 0xd.b941ap-4, -0x8.8f59bp-4 , 0xd.84853p-4, -0x8.e39dap-4 , 0xd.4db31p-4, -0x9.3682ap-4 , 0xd.14d3dp-4, -0x9.87fcp-4 , 0xc.d9f02p-4, -0x9.d7fd1p-4 , 0xc.9d112p-4, -0xa.26799p-4 , 0xc.5e403p-4, -0xa.73656p-4 , 0xc.1d87p-4, -0xa.beb4ap-4 , 0xb.daef9p-4, -0xb.085bbp-4 , 0xb.96842p-4, -0xb.504f3p-4 , 0xb.504f3p-4, -0xb.96842p-4 , 0xb.085bbp-4, -0xb.daef9p-4 , 0xa.beb4ap-4, -0xc.1d87p-4 , 0xa.73656p-4, -0xc.5e403p-4 , 0xa.26799p-4, -0xc.9d112p-4 , 0x9.d7fd1p-4, -0xc.d9f02p-4 , 0x9.87fcp-4, -0xd.14d3dp-4 , 0x9.3682ap-4, -0xd.4db31p-4 , 0x8.e39dap-4, -0xd.84853p-4 , 0x8.8f59bp-4, -0xd.b941ap-4 , 0x8.39c3dp-4, -0xd.ebe05p-4 , 0x7.e2e938p-4, -0xe.1c598p-4 , 0x7.8ad75p-4, -0xe.4aa59p-4 , 0x7.319ba8p-4, -0xe.76bd8p-4 , 0x6.d744p-4, -0xe.a09a7p-4 , 0x6.7bde5p-4, -0xe.c835ep-4 , 0x6.1f78a8p-4, -0xe.ed89ep-4 , 0x5.c2215p-4, -0xf.10908p-4 , 0x5.63e6ap-4, -0xf.31447p-4 , 0x5.04d728p-4, -0xf.4fa0bp-4 , 0x4.a50188p-4, -0xf.6ba07p-4 , 0x4.447498p-4, -0xf.853f8p-4 , 0x3.e33f3p-4, -0xf.9c79dp-4 , 0x3.81704cp-4, -0xf.b14bep-4 , 0x3.1f1708p-4, -0xf.c3b28p-4 , 0x2.bc4288p-4, -0xf.d3aacp-4 , 0x2.59020cp-4, -0xf.e1324p-4 , 0x1.f564e6p-4, -0xf.ec46dp-4 , 0x1.917a6cp-4, -0xf.f4e6dp-4 , 0x1.2d520ap-4, -0xf.fb10fp-4 , 0xc.8fb3p-8, -0xf.fec43p-4 , 0x6.48558p-8 }; void initTwiddles() { static bool firstTime = true; if (firstTime) { firstTime = false; cudaMemcpyToSymbol(twiddleFactors, twiddleFactorsHost, kNumTwiddles * sizeof(facebook::cuda::Complex)); } } } // anon namespace }}} // namespace
the_stack
#include "separableconvflow_cuda_kernel.cuh" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKDIMY #define BLOCKDIMY (16) #endif using at::Half; //forward path of our layer template <typename scalar_t> __global__ void SeparableConvFlowLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, scalar_t* flow_output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float flow_y = 0.0f; float sum_weights = 0.0f; for ( int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; flow_y += (float)(intFilterY) * temp2 ; sum_weights += temp2; } //sum_weights = fabs(sum_weights); flow_y = flow_y / sum_weights - ((float)(filter_size)-1.0)/2.0; flow_output[batch_i * flow_output_b_stride + 1 * flow_output_c_stride+ h_i* flow_output_h_stride + w_i] = fabs(sum_weights) > 0.0f ? flow_y : -2000; float flow_x = 0.0f; float sum_weights_x = 0.0f; for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; flow_x += (float)(intFilterX) * temp3; sum_weights_x += temp3; } //sum_weights_x = fabs(sum_weights_x); flow_x = flow_x / sum_weights_x - ((float)(filter_size)-1.0)/2.0; // what if the sum_weight is less than zeros. flow_output[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i* flow_output_h_stride + w_i] = fabs(sum_weights_x) >0.0f ? flow_x : -2000; } return ; } template <typename scalar_t> __global__ void SeparableConvFlowLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, const scalar_t* __restrict__ gradflow_output, scalar_t* gradinput1, scalar_t* gradinput2, scalar_t* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds){ float flow_y = 0.0f; float sum_weights = 0.0f; for ( int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; flow_y += (float)(intFilterY) * temp2 ; sum_weights += temp2; } //flow_y = flow_y / sum_weights - ((float)(filter_size)-1.0)/2.0; //flow_output_data[batch_i * flow_output_b_stride + 1 * flow_output_c_stride+ h_i* flow_output_h_stride + w_i] = // sum_weights >0.0f ? flow_y : -2000; //float sign = sum_weights >0.0f ? 1.0f : -1.0f; //sum_weights = fabs(sum_weights); if(fabs(sum_weights) >0.0f ){ float gradflow_y = gradflow_output[batch_i * flow_output_b_stride + 1* flow_output_c_stride + h_i * flow_output_h_stride + w_i ] ; float offset = flow_y / ( sum_weights * sum_weights); for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ] = gradflow_y * ((float)(intFilterY) / sum_weights - offset); } } float flow_x = 0.0f; float sum_weights_x = 0.0f; for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; flow_x += (float)(intFilterX) * temp3; sum_weights_x += temp3; } //flow_x = flow_x / sum_weights_x - ((float)(filter_size)-1.0)/2.0; //flow_output_data[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i* flow_output_h_stride + w_i] = // sum_weights_x >0 ? flow_x : -2000; //float sign_x = sum_weights_x >0.0f ? 1.0f : -1.0f; //sum_weights_x = fabs(sum_weights_x); if(fabs(sum_weights_x) > 0.0f ){ float gradflow_x = gradflow_output[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i * flow_output_h_stride + w_i]; float offset = flow_x / (sum_weights_x * sum_weights_x); for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { gradinput3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] += gradflow_x * ((float)(intFilterX) /sum_weights_x - offset); } } } return ; } int SeparableConvFlowLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch,const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& flow_output ) { int error = 1 ; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] { SeparableConvFlowLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //output_b_stride,output_c_stride,output_h_stride,output_w_stride, flow_output_b_stride,flow_output_c_stride,flow_output_h_stride,flow_output_w_stride, input1.data<scalar_t>(),input2.data<scalar_t>(),input3.data<scalar_t>(), flow_output.data<scalar_t>() ); })); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in SeparableConvFlowLayer_gpu_forward_kernel: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int SeparableConvFlowLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& gradflow_output, at::Tensor& gradinput1, at::Tensor& gradinput2, at::Tensor& gradinput3 ) { int error = 1 ; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] { SeparableConvFlowLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //output_b_stride,output_c_stride,output_h_stride,output_w_stride, flow_output_b_stride,flow_output_c_stride,flow_output_h_stride,flow_output_w_stride, input1.data<scalar_t>(), input2.data<scalar_t>(), input3.data<scalar_t>(), gradflow_output.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>(), gradinput3.data<scalar_t>() ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #define L {{L}} #define N {{N}} #define D {{D}} #if D > N #error "D must be less or equal N" #endif #define min_macros(a,b) ((a) < (b) ? (a) : (b)) #define ASSET_DEBUG {{ASSET_DEBUG}} #define ULL unsigned long long /** * The maximum number of threads per block. * This number must be in range [1, 1024]. * The effective number of threads will be set dynamically * at runtime to match the tile (width L) of a block. */ #define N_THREADS {{N_THREADS}} /** * To reduce branch divergence in 'next_sequence_sorted' function * within a warp (threads in a warp take different branches), * each thread runs CWR_LOOPS of 'combinations_with_replacement'. */ #define CWR_LOOPS {{CWR_LOOPS}} #define L_BLOCK_SUPREMUM min_macros(N_THREADS, L) typedef {{precision}} asset_float; __constant__ asset_float log_factorial[N + 1]; __constant__ asset_float logK; __constant__ ULL ITERATIONS_TODO; __constant__ ULL L_BLOCK; __constant__ ULL L_NUM_BLOCKS; __constant__ ULL iteration_table[D][N]; /* Maps the iteration ID to the entries of a sequence_sorted array */ /** * Compute capabilities lower than 6.0 don't have hardware support for * double-precision atomicAdd. This software implementation is taken from * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html */ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 __device__ double atomicAdd(double* address, double val) { ULL* address_as_ull = (ULL*)address; ULL old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * Builds the next sequence_sorted, given the absolute iteration ID. * The time complexity is O(N+D), not O(N*D). * * @param sequence_sorted the output sequence_sorted array of size D * @param iteration the global iteration ID */ __device__ void next_sequence_sorted(int *sequence_sorted, ULL iteration) { int row, element = N - 1; for (row = D - 1; row >= 0; row--) { while (element > row && iteration < iteration_table[row][element]) { element--; } iteration -= iteration_table[row][element]; sequence_sorted[D - 1 - row] = element + 1; } } /** * Set 'sequence_sorted' to the next valid sequence of indices in-place. */ __device__ void combinations_with_replacement(int *sequence_sorted) { int increment_id = D - 1; while (increment_id > 0 && sequence_sorted[increment_id - 1] == sequence_sorted[increment_id]) { sequence_sorted[increment_id] = D - increment_id; increment_id--; } sequence_sorted[increment_id]++; } /** * CUDA kernel that computes P_total - the joint survival probabilities matrix. * * @param P_out P_total output array of size L * @param log_du_device input log_du flattened matrix of size L*(D+1) */ __global__ void jsf_uniform_orderstat_3d_kernel(asset_float *P_out, const float *log_du_device) { unsigned int i; ULL row; // the row shift of log_du and P_total in the number of elements, between 0 and L const ULL l_shift = (blockIdx.x % L_NUM_BLOCKS) * L_BLOCK; // account for the last block width that can be less than L_BLOCK const ULL block_width = (L - l_shift < L_BLOCK) ? (L - l_shift) : L_BLOCK; extern __shared__ float shared_mem[]; asset_float *P_total = (asset_float*) shared_mem; // L_BLOCK floats float *log_du = (float*)&P_total[L_BLOCK]; // L_BLOCK * (D + 1) floats for (row = threadIdx.x; row < block_width; row += blockDim.x) { P_total[row] = 0; for (i = 0; i <= D; i++) { log_du[row * (D + 1) + i] = log_du_device[(row + l_shift) * (D + 1) + i]; } } __syncthreads(); int di[D + 1]; int sequence_sorted[D]; asset_float P_thread[L_BLOCK_SUPREMUM]; for (row = 0; row < block_width; row++) { P_thread[row] = 0; } const ULL burnout = (blockIdx.x / L_NUM_BLOCKS) * blockDim.x * CWR_LOOPS + threadIdx.x * CWR_LOOPS; const ULL stride = (gridDim.x / L_NUM_BLOCKS) * blockDim.x * CWR_LOOPS; ULL iteration, cwr_loop; for (iteration = burnout; iteration < ITERATIONS_TODO; iteration += stride) { next_sequence_sorted(sequence_sorted, iteration); for (cwr_loop = 0; (cwr_loop < CWR_LOOPS) && (sequence_sorted[0] != N + 1); cwr_loop++) { int prev = N; for (i = 0; i < D; i++) { di[i] = prev - sequence_sorted[i]; prev = sequence_sorted[i]; } di[D] = sequence_sorted[D - 1]; asset_float sum_log_di_factorial = 0.f; for (i = 0; i <= D; i++) { sum_log_di_factorial += log_factorial[di[i]]; } asset_float colsum; const asset_float colsum_base = logK - sum_log_di_factorial; const float *log_du_row = log_du; for (row = 0; row < block_width; row++) { colsum = colsum_base; for (i = 0; i <= D; i++) { if (di[i] != 0) { colsum += di[i] * log_du_row[i]; } } P_thread[row] += exp(colsum); log_du_row += D + 1; } combinations_with_replacement(sequence_sorted); } } for (row = threadIdx.x; row < block_width + threadIdx.x; row++) { // Reduce atomicAdd conflicts by adding threadIdx.x to each row atomicAdd(P_total + row % block_width, P_thread[row % block_width]); } __syncthreads(); for (row = threadIdx.x; row < block_width; row += blockDim.x) { atomicAdd(P_out + row + l_shift, P_total[row]); } } /** * Creates a flattened matrix (D-1)*N that will be used * to map the iteration ID to a sequence_sorted array. */ ULL create_iteration_table() { ULL *m = (ULL*) calloc(D * N, sizeof(ULL)); unsigned int row, col; for (col = 0; col < N; col++) { m[col] = col; } for (row = 1; row < D; row++) { ULL sum = 0; for (col = row + 1; col < N; col++) { sum += m[(row - 1) * N + col]; m[row * N + col] = sum; } } ULL it_todo = 1; double it_todo_double = 1.0; for (row = 0; row < D; row++) { it_todo += m[row * N + N-1]; it_todo_double += m[row * N + N-1]; } // check for the integer overflow; // values greater than ULONG_MAX are not supported by CUDA assert(it_todo_double <= ULONG_MAX); gpuErrchk( cudaMemcpyToSymbol(iteration_table, m, sizeof(ULL) * D * N) ); gpuErrchk( cudaMemcpyToSymbol((const void*) &ITERATIONS_TODO, (const void*) &it_todo, sizeof(ULL)) ); free(m); return it_todo; } // For debugging purposes only void print_constants() { int i, col; printf(">>> iteration_table\n"); ULL iteration_table_host[D * N]; cudaMemcpyFromSymbol(iteration_table_host, iteration_table, sizeof(ULL) * D * N); int row; for (row = 0; row < D; row++) { for (col = 0; col < N; col++) { printf("%10llu ", iteration_table_host[row * N + col]); } printf("\n"); } printf("\n"); ULL it_todo_host; cudaMemcpyFromSymbol((void*)&it_todo_host, (const void*)&ITERATIONS_TODO, sizeof(ULL)); printf(">>> ITERATIONS_TODO = %llu\n", it_todo_host); ULL l_block; cudaMemcpyFromSymbol((void*)&l_block, (const void*)&L_BLOCK, sizeof(ULL)); printf(">>> L_BLOCK = %llu\n", l_block); ULL l_num_blocks; cudaMemcpyFromSymbol((void*)&l_num_blocks, (const void*)&L_NUM_BLOCKS, sizeof(ULL)); printf(">>> L_NUM_BLOCKS = %llu\n", l_num_blocks); asset_float logK_host; cudaMemcpyFromSymbol((void*)&logK_host, (const void*)&logK, sizeof(asset_float)); printf(">>> logK = %f\n\n", logK_host); asset_float log_factorial_host[N + 1]; cudaMemcpyFromSymbol(log_factorial_host, log_factorial, sizeof(asset_float) * (N+1)); printf(">>> log_factorial\n"); for (i = 0; i <= N; i++) { printf("%f ", log_factorial_host[i]); } printf("\n\n"); } /** * ASSET jsf_uniform_orderstat_3d host function to calculate P_total. * The result of a calculation is saved in P_total_host array. * * @param P_total_host a pointer to P_total array to be calculated * @param log_du_host input flattened L*(D+1) matrix of log_du values */ void jsf_uniform_orderstat_3d(asset_float *P_total_host, FILE *log_du_file) { float *log_du_device; gpuErrchk( cudaMalloc((void**)&log_du_device, sizeof(float) * L * (D + 1)) ); float *log_du_host; #if L * (D + 1) < 100000000LLU // For arrays of size <100 Mb, allocate host memory for log_du log_du_host = (float*) malloc(sizeof(float) * L * (D + 1)); fread(log_du_host, sizeof(float), L * (D + 1), log_du_file); gpuErrchk( cudaMemcpyAsync(log_du_device, log_du_host, sizeof(float) * L * (D + 1), cudaMemcpyHostToDevice) ); #else // Use P_total buffer to read log_du and copy batches to a GPU card log_du_host = (float*) P_total_host; ULL col; for (col = 0; col <= D; col++) { fread(log_du_host, sizeof(float), L, log_du_file); // Wait till the copy finishes before filling the buffer with a next chunk. gpuErrchk( cudaMemcpy(log_du_device + col * L, log_du_host, sizeof(float) * L, cudaMemcpyHostToDevice) ); } #endif fclose(log_du_file); asset_float *P_total_device; // Initialize P_total_device with zeros. // Note that values other than 0x00 or 0xFF (NaN) won't work // with cudaMemset when the data type is float or double. gpuErrchk( cudaMalloc((void**)&P_total_device, sizeof(asset_float) * L) ); gpuErrchk( cudaMemsetAsync(P_total_device, 0, sizeof(asset_float) * L) ); ULL it_todo = create_iteration_table(); asset_float logK_host = 0.f; asset_float log_factorial_host[N + 1] = {0.f}; int i; for (i = 1; i <= N; i++) { logK_host += log((asset_float) i); log_factorial_host[i] = logK_host; } gpuErrchk( cudaMemcpyToSymbol((const void*) &logK, (const void*) &logK_host, sizeof(asset_float)) ); gpuErrchk( cudaMemcpyToSymbol(log_factorial, log_factorial_host, sizeof(asset_float) * (N + 1)) ); cudaDeviceProp device_prop; gpuErrchk( cudaGetDeviceProperties(&device_prop, 0) ); const ULL max_l_block = device_prop.sharedMemPerBlock / (sizeof(asset_float) * (D + 2)); /** * It's not necessary to match N_THREADS with the final L_BLOCK. Alternatively, * the desired L_BLOCK can be another parameter specified by the user. But * the optimal L_BLOCK on average matches N_THREADS, therefore, to avoid * the user thinking too much, we take care of the headache by setting * L_BLOCK = N_THREADS. */ unsigned int n_threads = (unsigned int) min_macros(N_THREADS, min_macros(max_l_block, device_prop.maxThreadsPerBlock)); if (n_threads > device_prop.warpSize) { // It's more efficient to make the number of threads // a multiple of the warp size (32). n_threads -= n_threads % device_prop.warpSize; } const ULL l_block = min_macros(n_threads, L); gpuErrchk( cudaMemcpyToSymbol((const void*) &L_BLOCK, (const void*) &l_block, sizeof(ULL)) ); const ULL l_num_blocks = (ULL) ceil(L * 1.f / l_block); gpuErrchk( cudaMemcpyToSymbol((const void*) &L_NUM_BLOCKS, (const void*) &l_num_blocks, sizeof(ULL)) ); ULL grid_size = (ULL) ceil(it_todo * 1.f / (n_threads * CWR_LOOPS)); grid_size = min_macros(grid_size, device_prop.maxGridSize[0]); if (grid_size > l_num_blocks) { // make grid_size divisible by l_num_blocks grid_size -= grid_size % l_num_blocks; } else { // grid_size must be at least l_num_blocks grid_size = l_num_blocks; } printf(">>> it_todo=%llu, grid_size=%llu, L_BLOCK=%llu, N_THREADS=%u\n\n", it_todo, grid_size, l_block, n_threads); // Wait for asynchronous memory copies to finish. gpuErrchk( cudaDeviceSynchronize() ); if (log_du_host != (float*) P_total_host) { // the memory has been allocated free(log_du_host); } #if ASSET_DEBUG print_constants(); #endif // Executing the kernel const ULL shared_mem_used = sizeof(asset_float) * l_block + sizeof(float) * l_block * (D + 1); jsf_uniform_orderstat_3d_kernel<<<grid_size, n_threads, shared_mem_used>>>(P_total_device, log_du_device); // Check for invalid launch argument. gpuErrchk( cudaPeekAtLastError() ); // Transfer data back to host memory. // If the exit code is non-zero, the kernel failed to complete the task. cudaError_t cuda_completed_status = cudaMemcpy(P_total_host, P_total_device, sizeof(asset_float) * L, cudaMemcpyDeviceToHost); cudaFree(P_total_device); cudaFree(log_du_device); gpuErrchk( cuda_completed_status ); } int main(int argc, char* argv[]) { // compile command: nvcc -o asset.o asset.cu // (run after you fill the template keys L, N, D, etc.) if (argc != 3) { fprintf(stderr, "Usage: ./asset.o /path/to/log_du.dat /path/to/P_total_output.dat\n"); return 1; } char *log_du_path = argv[1]; char *P_total_path = argv[2]; FILE *log_du_file = fopen(log_du_path, "rb"); if (log_du_file == NULL) { fprintf(stderr, "File '%s' not found\n", log_du_path); return 1; } asset_float *P_total = (asset_float*) malloc(sizeof(asset_float) * L); jsf_uniform_orderstat_3d(P_total, log_du_file); FILE *P_total_file = fopen(P_total_path, "wb"); if (P_total_file == NULL) { free(P_total); fprintf(stderr, "Could not open '%s' for writing.\n", P_total_path); return 1; } fwrite(P_total, sizeof(asset_float), L, P_total_file); fclose(P_total_file); free(P_total); return 0; }
the_stack
#ifdef _WIN32 #pragma warning(disable : 4244) #endif #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include <cub/device/device_radix_sort.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_run_length_encode.cuh> #include <cub/device/device_scan.cuh> #include <cub/iterator/counting_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/accumulation_type.h" #include "core/providers/cuda/shared_inc/cuda_call.h" namespace onnxruntime { namespace cuda { namespace gather_grad_internal { // Note: // For these implementations, first we generate sorted lists of dX and dY // indices, ordered by dX indices. Then, we can consider segments of the sorted // lists. // // Each continuous run of indices with the same dX value in dX_indices_sorted // forms a segment. // // For example, given: // dX_indices_sorted = [1, 1, 2, 2, 2, 3] // dY_indices_sorted = [1, 4, 0, 3, 5, 2] // The segments will be: '--' '-----' ' // // The segments can be processed in parallel, or further divided into partial // segments for increased parallelism. // unit for handling indexing and counting of segments or partial segments using SegmentIndex_t = GatheredIndexIndex_t; constexpr GatheredIndexIndex_t kMaxPartialSegmentSize = 10; template <typename TInputIterator, typename TOutputIterator> __global__ void CopyKernel(TOutputIterator dst, TInputIterator src, int64_t length) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, length); dst[id] = src[id]; } // get sorted dX and dY indices, ordered by dX indices template <typename TIndex> void GetSortedIndices( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices, GatheredIndexIndex_t num_gathered_indices, IAllocatorUniquePtr<TIndex>& dX_indices_sorted_out, IAllocatorUniquePtr<TIndex>& dY_indices_sorted_out) { auto dY_indices = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); CopyKernel<<<CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock), GridDim::maxThreadsPerBlock, 0, stream>>>( dY_indices.get(), cub::CountingInputIterator<TIndex>{0}, num_gathered_indices); auto dX_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); auto dY_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices, 0, sizeof(TIndex)*8, stream)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( temp_storage.get(), temp_storage_size_bytes, dX_indices, dX_indices_sorted.get(), dY_indices.get(), dY_indices_sorted.get(), num_gathered_indices, 0, sizeof(TIndex)*8, stream)); dX_indices_sorted_out = std::move(dX_indices_sorted); dY_indices_sorted_out = std::move(dY_indices_sorted); } template <typename T> IAllocatorUniquePtr<T> GetOffsetsFromCounts( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const T* counts, int32_t num_counts) { auto offsets = allocator.GetScratchBuffer<T>(num_counts); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum( nullptr, temp_storage_size_bytes, counts, offsets.get(), num_counts, stream)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum( temp_storage.get(), temp_storage_size_bytes, counts, offsets.get(), num_counts, stream)); return offsets; } // adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/Embedding.cu#L121 template <typename T, typename TIndex, int NumElementsPerThread> __global__ void DirectSumKernel( const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { GatheredIndexIndex_t idx = blockIdx.x * 4 + threadIdx.y; if (idx < num_gathered_indices && (idx == 0 || dX_indices_sorted[idx] != dX_indices_sorted[idx - 1])) { do { // All index values are expected to be within bounds [-s, s-1] along axis of size s. auto target_row = dX_indices_sorted[idx]; if (target_row < 0) target_row += gather_dimension_size; for (int64_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) { const auto gathered_element_idx_start = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread; const auto dX_row_offset = (batch_idx * gather_dimension_size + target_row) * num_gathered_per_index; const auto dY_row_offset = (batch_idx * num_gathered_indices + dY_indices_sorted[idx]) * num_gathered_per_index; AccumulationType_t<T> dY_value[NumElementsPerThread]; AccumulationType_t<T> dX_value[NumElementsPerThread]; #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dY_value[ii] = static_cast<AccumulationType_t<T>>(dY_data[dY_row_offset + gathered_element_idx]); dX_value[ii] = static_cast<AccumulationType_t<T>>(dX_data[dX_row_offset + gathered_element_idx]); } } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { dX_value[ii] += dY_value[ii]; } #pragma unroll for (int ii = 0; ii < NumElementsPerThread; ii++) { const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE; if (gathered_element_idx < num_gathered_per_index) { dX_data[dX_row_offset + gathered_element_idx] = static_cast<T>(dX_value[ii]); } } } idx++; } while (idx < num_gathered_indices && dX_indices_sorted[idx] == dX_indices_sorted[idx - 1]); } } // directly sum gathered dY values into the corresponding dX value template <typename T, typename TIndex> void DirectSumImpl( cudaStream_t stream, const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches) { dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE)); DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread><<<grid, block, 0, stream>>>( dX_indices_sorted, dY_indices_sorted, dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } // partial sums implementation adapted from here: // https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/EmbeddingBackwardKernel.cu __global__ void ComputePerSegmentPartialSegmentCountsKernel( SegmentIndex_t* ret, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments, GatheredIndexIndex_t num_gathered_indices) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { const auto idx_start = segment_offsets[id]; const auto idx_end = (id == num_of_segments - 1) ? num_gathered_indices : segment_offsets[id + 1]; const auto size = idx_end - idx_start; ret[id] = CeilDiv(size, kMaxPartialSegmentSize); } } __global__ void ComputePartialSegmentOffsetsKernel( GatheredIndexIndex_t* ret, const SegmentIndex_t* partials_per_segment, const SegmentIndex_t* partials_per_segment_offset, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_of_segments) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; if (id < num_of_segments) { auto idx = partials_per_segment_offset[id]; const auto num_partials = partials_per_segment[id]; const auto segment_offset = segment_offsets[id]; for (SegmentIndex_t i = 0; i < num_partials; ++i) { ret[idx++] = segment_offset + i * kMaxPartialSegmentSize; } } } template <typename T, typename TIndex> __global__ void ComputePartialSegmentSumsKernel( const TIndex* dY_indices_sorted, const T* dY_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, const GatheredIndexIndex_t* partial_segment_offsets, SegmentIndex_t num_partial_segments, AccumulationType_t<T>* partial_segment_sums, const int64_t num_gathered_per_index_warp_size_multiple) { const auto id = blockIdx.x * blockDim.x + threadIdx.x; const auto partial_segment_id = id / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = id % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (partial_segment_id >= num_partial_segments) { return; } const auto idx_begin = partial_segment_offsets[partial_segment_id]; const auto idx_end = (partial_segment_id == num_partial_segments - 1) ? num_gathered_indices : partial_segment_offsets[partial_segment_id + 1]; AccumulationType_t<T> partial_segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { const auto target_row = dY_indices_sorted[idx]; partial_segment_sum += static_cast<AccumulationType_t<T>>( dY_data[batch_id * num_gathered_indices * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id]); } partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + partial_segment_id * num_gathered_per_index + gathered_element_id] = partial_segment_sum; } template <typename T, typename TIndex> __global__ void ComputeSegmentSumsAndScatterKernel( const TIndex* dX_indices_sorted, T* dX_data, int64_t num_gathered_per_index, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments, const AccumulationType_t<T>* partial_segment_sums, const SegmentIndex_t* per_segment_partial_segment_offsets, SegmentIndex_t num_partial_segments, const int64_t num_gathered_per_index_warp_size_multiple, const int64_t gather_dimension_size) { const auto gid = blockIdx.x * blockDim.x + threadIdx.x; const auto segment_id = gid / num_gathered_per_index_warp_size_multiple; const auto gathered_element_id = gid % num_gathered_per_index_warp_size_multiple; const auto batch_id = blockIdx.y; if (gathered_element_id >= num_gathered_per_index) { return; } if (segment_id >= num_segments) { return; } const auto idx_begin = per_segment_partial_segment_offsets[segment_id]; const auto idx_end = (segment_id == num_segments - 1) ? num_partial_segments : per_segment_partial_segment_offsets[segment_id + 1]; AccumulationType_t<T> segment_sum = 0; for (auto idx = idx_begin; idx < idx_end; ++idx) { segment_sum += partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index + idx * num_gathered_per_index + gathered_element_id]; } auto target_row = dX_indices_sorted[segment_offsets[segment_id]]; // All index values are expected to be within bounds [-s, s-1] along axis of size s. if (target_row < 0) target_row += gather_dimension_size; dX_data[batch_id * gather_dimension_size * num_gathered_per_index + target_row * num_gathered_per_index + gathered_element_id] = segment_sum; } // get partial sums of gathered dY values first, then sum the partial sums into // the corresponding dX value template <typename T, typename TIndex> void PartialSumsImpl( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const TIndex* dX_indices_sorted, const TIndex* dY_indices_sorted, const T* dY_data, T* dX_data, GatheredIndexIndex_t num_gathered_indices, int64_t num_gathered_per_index, int64_t gather_dimension_size, int64_t num_batches, const GatheredIndexIndex_t* segment_offsets, SegmentIndex_t num_segments) { // each segment is split into partial segments of at most // kMaxPartialSegmentSize index pairs. // compute the number of partial segments per segment auto per_segment_partial_segment_counts = allocator.GetScratchBuffer<SegmentIndex_t>(num_segments); { const auto blocks_per_grid = CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock); ComputePerSegmentPartialSegmentCountsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>( per_segment_partial_segment_counts.get(), segment_offsets, num_segments, num_gathered_indices); } // compute partial segment offsets per segment auto per_segment_partial_segment_offsets = GetOffsetsFromCounts( stream, allocator, per_segment_partial_segment_counts.get(), num_segments); SegmentIndex_t host_num_partial_segments = 0; { SegmentIndex_t last_segment_partial_segment_offset = 0, last_segment_partial_segment_count = 0; // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpyAsync( &last_segment_partial_segment_offset, &per_segment_partial_segment_offsets.get()[num_segments - 1], sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpyAsync( &last_segment_partial_segment_count, &per_segment_partial_segment_counts.get()[num_segments - 1], sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream)); CUDA_CALL_THROW(cudaStreamSynchronize(stream)); host_num_partial_segments = last_segment_partial_segment_offset + last_segment_partial_segment_count; } // compute index offsets per partial segment auto partial_segment_offsets = allocator.GetScratchBuffer<GatheredIndexIndex_t>(host_num_partial_segments); { const auto blocks_per_grid = CeilDiv(num_segments, GridDim::maxThreadsPerBlock); ComputePartialSegmentOffsetsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>( partial_segment_offsets.get(), per_segment_partial_segment_counts.get(), per_segment_partial_segment_offsets.get(), segment_offsets, num_segments); } { const auto num_gathered_per_index_warp_size_multiple = CeilDiv(num_gathered_per_index, GPU_WARP_SIZE) * GPU_WARP_SIZE; const auto threads_per_block = std::min<int64_t>(num_gathered_per_index_warp_size_multiple, GridDim::maxThreadsPerBlock); // compute partial segment sums auto partial_segment_sums = allocator.GetScratchBuffer<AccumulationType_t<T>>( num_batches * host_num_partial_segments * num_gathered_per_index); { const dim3 blocks_per_grid( CeilDiv(host_num_partial_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); ComputePartialSegmentSumsKernel<<<blocks_per_grid, threads_per_block, 0, stream>>>( dY_indices_sorted, dY_data, num_gathered_indices, num_gathered_per_index, partial_segment_offsets.get(), host_num_partial_segments, partial_segment_sums.get(), num_gathered_per_index_warp_size_multiple); } // compute segment sums from partial segment sums { const dim3 blocks_per_grid( CeilDiv(num_segments * num_gathered_per_index_warp_size_multiple, threads_per_block), num_batches); ComputeSegmentSumsAndScatterKernel<<<blocks_per_grid, threads_per_block, 0, stream>>>( dX_indices_sorted, dX_data, num_gathered_per_index, segment_offsets, num_segments, partial_segment_sums.get(), per_segment_partial_segment_offsets.get(), host_num_partial_segments, num_gathered_per_index_warp_size_multiple, gather_dimension_size); } } } template <typename T, typename TIndex> void Impl( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( stream, allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); // get number of segments and segment counts SegmentIndex_t host_num_segments = 0; auto segment_counts = allocator.GetScratchBuffer<GatheredIndexIndex_t>(num_gathered_indices); { auto num_segments = allocator.GetScratchBuffer<SegmentIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices, stream)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode( temp_storage.get(), temp_storage_size_bytes, dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(), num_segments.get(), num_gathered_indices, stream)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpyAsync( &host_num_segments, num_segments.get(), sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream)); CUDA_CALL_THROW(cudaStreamSynchronize(stream)); } // get largest segment size and use that to select implementation GatheredIndexIndex_t host_max_segment_count = 0; { auto max_segment_count = allocator.GetScratchBuffer<GatheredIndexIndex_t>(1); size_t temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceReduce::Max( nullptr, temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments, stream)); auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceReduce::Max( temp_storage.get(), temp_storage_size_bytes, segment_counts.get(), max_segment_count.get(), host_num_segments, stream)); // CPU/GPU sync! CUDA_CALL_THROW(cudaMemcpyAsync( &host_max_segment_count, max_segment_count.get(), sizeof(GatheredIndexIndex_t), cudaMemcpyDeviceToHost, stream)); CUDA_CALL_THROW(cudaStreamSynchronize(stream)); } constexpr GatheredIndexIndex_t kMaxSegmentSizeThreshold = 32; if (host_max_segment_count <= kMaxSegmentSizeThreshold) { DirectSumImpl( stream, dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } else { auto segment_offsets = GetOffsetsFromCounts( stream, allocator, segment_counts.get(), host_num_segments); segment_counts.reset(); PartialSumsImpl( stream, allocator, dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches, segment_offsets.get(), host_num_segments); } } // this is a backup implementation that doesn't incur GPU/CPU syncs, but // doesn't perform well if there are many duplicate values in dX_indices template <typename T, typename TIndex> void Impl_Simplified( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted; GetSortedIndices( stream, allocator, dX_indices, num_gathered_indices, dX_indices_sorted, dY_indices_sorted); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE)); DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread><<<grid, block, 0, stream>>>( dX_indices_sorted.get(), dY_indices_sorted.get(), dY_data, dX_data, num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches); } } // namespace gather_grad_internal template <typename T, typename TIndex> void GatherGradImpl( cudaStream_t stream, const CudaScratchBufferAllocator& allocator, const T* dY_data, const TIndex* dX_indices, const GatheredIndexIndex_t num_gathered_indices, const int64_t gather_dimension_size, const int64_t num_gathered_per_index, const int64_t num_batches, T* dX_data) { gather_grad_internal::Impl( stream, allocator, dY_data, dX_indices, num_gathered_indices, gather_dimension_size, num_gathered_per_index, num_batches, dX_data); } #define SPECIALIZED(T, TIndex) \ template void GatherGradImpl<T, TIndex>( \ cudaStream_t stream, \ const CudaScratchBufferAllocator& allocator, \ const T* dY_data, \ const TIndex* dX_indices, \ const GatheredIndexIndex_t num_gathered_indices, \ const int64_t gather_dimension_size, \ const int64_t num_gathered_per_index, \ const int64_t num_batches, \ T* dX_data); #define SPECIALIZED_WITH_IDX(T) \ SPECIALIZED(T, int32_t) \ SPECIALIZED(T, int64_t) SPECIALIZED_WITH_IDX(float) SPECIALIZED_WITH_IDX(half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_WITH_IDX(nv_bfloat16) #endif #undef SPECIALIZED_WITH_IDX #undef SPECIALIZED } // namespace cuda } // namespace onnxruntime
the_stack
template <unsigned int block_size> __global__ void FarthestPointSamplingKernel( // clang-format off const at::PackedTensorAccessor64<float, 3, at::RestrictPtrTraits> points, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> lengths, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> K, at::PackedTensorAccessor64<int64_t, 2, at::RestrictPtrTraits> idxs, at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> min_point_dist, const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits> start_idxs // clang-format on ) { // Get constants const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t D = points.size(2); // Create single shared memory buffer which is split and cast to different // types: dists/dists_idx are used to save the maximum distances seen by the // points processed by any one thread and the associated point indices. // These values only need to be accessed by other threads in this block which // are processing the same batch and not by other blocks. extern __shared__ char shared_buf[]; float* dists = (float*)shared_buf; // block_size floats int64_t* dists_idx = (int64_t*)&dists[block_size]; // block_size int64_t // Get batch index and thread index const int64_t batch_idx = blockIdx.x; const size_t tid = threadIdx.x; // If K is greater than the number of points in the pointcloud // we only need to iterate until the smaller value is reached. const int64_t k_n = min(K[batch_idx], lengths[batch_idx]); // Write the first selected point to global memory in the first thread int64_t selected = start_idxs[batch_idx]; if (tid == 0) idxs[batch_idx][0] = selected; // Iterate to find k_n sampled points for (int64_t k = 1; k < k_n; ++k) { // Keep track of the maximum of the minimum distance to previously selected // points seen by this thread int64_t max_dist_idx = 0; float max_dist = -1.0; // Iterate through all the points in this pointcloud. For already selected // points, the minimum distance to the set of previously selected points // will be 0.0 so they won't be selected again. for (int64_t p = tid; p < lengths[batch_idx]; p += block_size) { // Calculate the distance to the last selected point float dist2 = 0.0; for (int64_t d = 0; d < D; ++d) { float diff = points[batch_idx][selected][d] - points[batch_idx][p][d]; dist2 += (diff * diff); } // If the distance of point p to the last selected point is // less than the previous minimum distance of p to the set of selected // points, then updated the corresponding value in min_point_dist // so it always contains the min distance. const float p_min_dist = min(dist2, min_point_dist[batch_idx][p]); min_point_dist[batch_idx][p] = p_min_dist; // Update the max distance and point idx for this thread. max_dist_idx = (p_min_dist > max_dist) ? p : max_dist_idx; max_dist = (p_min_dist > max_dist) ? p_min_dist : max_dist; } // After going through all points for this thread, save the max // point and idx seen by this thread. Each thread sees P/block_size points. dists[tid] = max_dist; dists_idx[tid] = max_dist_idx; // Sync to ensure all threads in the block have updated their max point. __syncthreads(); // Parallelized block reduction to find the max point seen by // all the threads in this block for iteration k. // Each block represents one batch element so we can use a divide/conquer // approach to find the max, syncing all threads after each step. for (int s = block_size / 2; s > 0; s >>= 1) { if (tid < s) { // Compare the best point seen by two threads and update the shared // memory at the location of the first thread index with the max out // of the two threads. if (dists[tid] < dists[tid + s]) { dists[tid] = dists[tid + s]; dists_idx[tid] = dists_idx[tid + s]; } } __syncthreads(); } // TODO(nikhilar): As reduction proceeds, the number of “active” threads // decreases. When tid < 32, there should only be one warp left which could // be unrolled. // The overall max after reducing will be saved // at the location of tid = 0. selected = dists_idx[0]; if (tid == 0) { // Write the farthest point for iteration k to global memory idxs[batch_idx][k] = selected; } } } at::Tensor FarthestPointSamplingCuda( const at::Tensor& points, // (N, P, 3) const at::Tensor& lengths, // (N,) const at::Tensor& K, // (N,) const at::Tensor& start_idxs) { // Check inputs are on the same device at::TensorArg p_t{points, "points", 1}, lengths_t{lengths, "lengths", 2}, k_t{K, "K", 3}, start_idxs_t{start_idxs, "start_idxs", 4}; at::CheckedFrom c = "FarthestPointSamplingCuda"; at::checkAllSameGPU(c, {p_t, lengths_t, k_t, start_idxs_t}); at::checkAllSameType(c, {lengths_t, k_t, start_idxs_t}); // Set the device for the kernel launch based on the device of points at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK( points.size(0) == lengths.size(0), "Point and lengths must have the same batch dimension"); TORCH_CHECK( points.size(0) == K.size(0), "Points and K must have the same batch dimension"); const int64_t N = points.size(0); const int64_t P = points.size(1); const int64_t max_K = at::max(K).item<int64_t>(); // Initialize the output tensor with the sampled indices auto idxs = at::full({N, max_K}, -1, lengths.options()); auto min_point_dist = at::full({N, P}, 1e10, points.options()); if (N == 0 || P == 0) { AT_CUDA_CHECK(cudaGetLastError()); return idxs; } // Set the number of blocks to the batch size so that the // block reduction step can be done for each pointcloud // to find the max distance point in the pointcloud at each iteration. const size_t blocks = N; // Set the threads to the nearest power of 2 of the number of // points in the pointcloud (up to the max threads in a block). // This will ensure each thread processes the minimum necessary number of // points (P/threads). const int points_pow_2 = std::log(static_cast<double>(P)) / std::log(2.0); const size_t threads = max(min(1 << points_pow_2, MAX_THREADS_PER_BLOCK), 1); // Create the accessors auto points_a = points.packed_accessor64<float, 3, at::RestrictPtrTraits>(); auto lengths_a = lengths.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto K_a = K.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto idxs_a = idxs.packed_accessor64<int64_t, 2, at::RestrictPtrTraits>(); auto start_idxs_a = start_idxs.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(); auto min_point_dist_a = min_point_dist.packed_accessor64<float, 2, at::RestrictPtrTraits>(); // Initialize the shared memory which will be used to store the // distance/index of the best point seen by each thread. size_t shared_mem = threads * sizeof(float) + threads * sizeof(int64_t); // TODO: using shared memory for min_point_dist gives an ~2x speed up // compared to using a global (N, P) shaped tensor, however for // larger pointclouds this may exceed the shared memory limit per block. // If a speed up is required for smaller pointclouds, then the storage // could be switched to shared memory if the required total shared memory is // within the memory limit per block. // Support a case for all powers of 2 up to MAX_THREADS_PER_BLOCK possible per // block. switch (threads) { case 1024: FarthestPointSamplingKernel<1024> <<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 512: FarthestPointSamplingKernel<512><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 256: FarthestPointSamplingKernel<256><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 128: FarthestPointSamplingKernel<128><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 64: FarthestPointSamplingKernel<64><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 32: FarthestPointSamplingKernel<32><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 16: FarthestPointSamplingKernel<16><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 8: FarthestPointSamplingKernel<8><<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 4: FarthestPointSamplingKernel<4><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 2: FarthestPointSamplingKernel<2><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; case 1: FarthestPointSamplingKernel<1><<<threads, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); break; default: FarthestPointSamplingKernel<1024> <<<blocks, threads, shared_mem, stream>>>( points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a); } AT_CUDA_CHECK(cudaGetLastError()); return idxs; }
the_stack
#define BLOCK_DIM_X 1024 #define BLOCK_DIM_Y 1 #define MPI_CALL(call) \ { \ int mpi_status = call; \ if (0 != mpi_status) { \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ int mpi_error_string_length = 0; \ MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \ if (NULL != mpi_error_string) \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %s " \ "(%d).\n", \ #call, __LINE__, __FILE__, mpi_error_string, mpi_status); \ else \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %d.\n", \ #call, __LINE__, __FILE__, mpi_status); \ } \ } #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } // convert NVSHMEM_SYMMETRIC_SIZE string to long long unsigned int long long unsigned int parse_nvshmem_symmetric_size(char *value) { long long unsigned int units, size; assert(value != NULL); if (strchr(value, 'G') != NULL) { units=1e9; } else if (strchr(value, 'M') != NULL) { units=1e6; } else if (strchr(value, 'K') != NULL) { units=1e3; } else { units=1; } assert(atof(value) >= 0); size = (long long unsigned int) atof(value) * units; return size; } constexpr float tol = 1.0e-8; const float PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(float* __restrict__ const a_new, float* __restrict__ const a, const float pi, const int offset, const int nx, const int my_ny, int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const float y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[(iy + 1) * nx + 0] = y0; a[(iy + 1) * nx + (nx - 1)] = y0; a_new[(iy + 1) * nx + 0] = y0; a_new[(iy + 1) * nx + (nx - 1)] = y0; } } __global__ void jacobi_kernel(float* __restrict__ const a_new, const float* __restrict__ const a, float* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, const int top_pe, const int top_iy, const int bottom_pe, const int bottom_iy) { int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; __shared__ float block_l2_sum[BLOCK_DIM_X*BLOCK_DIM_Y]; unsigned thread_index = threadIdx.y*BLOCK_DIM_X + threadIdx.x; if (iy < iy_end && ix < (nx - 1)) { // Update grid point const float new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; float residue = new_val - a[iy * nx + ix]; // Set block-level L2 norm value for this grid point block_l2_sum[thread_index] = residue * residue; } else { block_l2_sum[thread_index] = 0; } /* starting (x, y) coordinate of the block */ int block_iy = iy - threadIdx.y; /* Alternatively, block_iy = blockIdx.y * blockDim.y + iy_start */ int block_ix = ix - threadIdx.x; /* Alternatively, block_ix = blockIdx.x * blockDim.x + 1 */ /* Communicate the boundaries */ if ((block_iy <= iy_start) && (iy_start < block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + top_iy * nx + block_ix, a_new + iy_start * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), top_pe); } if ((block_iy < iy_end) && (iy_end <= block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + bottom_iy * nx + block_ix, a_new + (iy_end - 1) * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), bottom_pe); } // Reduce L2 norm for the block in parallel for (unsigned stride = 1; stride < BLOCK_DIM_X*BLOCK_DIM_Y; stride *= 2) { __syncthreads(); if ((thread_index) % (2*stride) == 0) { block_l2_sum[thread_index] += block_l2_sum[thread_index + stride]; } } // Atomically update global L2 norm with block-reduced L2 norm if (thread_index == 0) { atomicAdd(l2_norm, block_l2_sum[0]); } } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h, const bool print, int mype); int get_argval(char** begin, char** end, const std::string& arg, const int default_val) { int argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; float* d; float* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval(argv, argv + argc, "-niter", 1000); const int nx = get_argval(argv, argv + argc, "-nx", 16384); const int ny = get_argval(argv, argv + argc, "-ny", 16384); float* a_new; float* a_ref_h; float* a_h; double runtime_serial = 0.0; float l2_norms[2]; int rank = 0, size = 1; MPI_CALL(MPI_Init(&argc, &argv)); MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size)); int num_devices; CUDA_RT_CALL(cudaGetDeviceCount(&num_devices)); int local_rank = -1, local_size = 1; { MPI_Comm local_comm; MPI_Info info; MPI_CALL(MPI_Info_create(&info)); MPI_CALL( MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, info, &local_comm)); MPI_CALL(MPI_Comm_rank(local_comm, &local_rank)); MPI_CALL(MPI_Comm_size(local_comm, &local_size)); if (num_devices < local_size) { fprintf(stderr, "ERROR: Number of devices is less numer of PEs \ on the node!\n"); MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); MPI_CALL(MPI_Finalize()); return -1; } MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); } CUDA_RT_CALL(cudaSetDevice(local_rank)); CUDA_RT_CALL(cudaFree(0)); MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; // Set symmetric heap size for nvshmem based on problem size // Its default value in nvshmem is 1 GB which is not sufficient // for large mesh sizes long long unsigned int mesh_size_per_rank = nx * (((ny - 2) + size - 1) / size + 2); long long unsigned int required_symmetric_heap_size = 2 * mesh_size_per_rank * sizeof(float) * 1.1; // Factor 2 is because 2 arrays are allocated - a and a_new // 1.1 factor is just for alignment or other usage char * value = getenv("NVSHMEM_SYMMETRIC_SIZE"); if (value) { /* env variable is set */ long long unsigned int size_env = parse_nvshmem_symmetric_size(value); if (size_env < required_symmetric_heap_size) { fprintf(stderr, "ERROR: Minimum NVSHMEM_SYMMETRIC_SIZE = %lluB, Current NVSHMEM_SYMMETRIC_SIZE = %s\n", required_symmetric_heap_size, value); MPI_CALL(MPI_Finalize()); return -1; } } else { char symmetric_heap_size_str[100]; sprintf(symmetric_heap_size_str, "%llu", required_symmetric_heap_size); if (!rank) printf("Setting environment variable NVSHMEM_SYMMETRIC_SIZE = %llu\n", required_symmetric_heap_size); setenv("NVSHMEM_SYMMETRIC_SIZE", symmetric_heap_size_str, 1); } nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); int npes = nvshmem_n_pes(); int mype = nvshmem_my_pe(); nvshmem_barrier_all(); bool result_correct = true; float* a; cudaStream_t compute_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done[2]; cudaEvent_t reset_l2_norm_done[2]; l2_norm_buf l2_norm_bufs[2]; CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(float))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, (0 == mype), mype); nvshmem_barrier_all(); // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size; int chunk_size_low = (ny - 2) / npes; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2 int num_ranks_low = npes * chunk_size_low + npes - (ny - 2); // Number of ranks with chunk_size = chunk_size_low if (mype < num_ranks_low) chunk_size = chunk_size_low; else chunk_size = chunk_size_high; a = (float*)nvshmem_malloc( nx * (chunk_size_high + 2) * sizeof(float)); // Using chunk_size_high so that it is same across all PEs a_new = (float*)nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(float)); cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(float)); cudaMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(float)); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (mype < num_ranks_low) { iy_start_global = mype * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (mype - num_ranks_low) * chunk_size_high + 1; } int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array // do not process boundaries iy_end_global = std::min(iy_end_global, ny - 4); int iy_start = 1; int iy_end = (iy_end_global - iy_start_global + 1) + iy_start; // calculate boundary indices for top and bottom boundaries int top_pe = mype > 0 ? mype - 1 : (npes - 1); int bottom_pe = (mype + 1) % npes; int iy_end_top = (top_pe < num_ranks_low) ? chunk_size_low + 1 : chunk_size_high + 1; int iy_start_bottom = 0; // Set diriclet boundary conditions on left and right boundary initialize_boundaries<<<(ny / npes) / 128 + 1, 128>>>(a, a_new, PI, iy_start_global - 1, nx, chunk_size, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreateWithFlags(&compute_stream, cudaStreamNonBlocking)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreate(&compute_done[0])); CUDA_RT_CALL(cudaEventCreate(&compute_done[1])); CUDA_RT_CALL(cudaEventCreate(&reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventCreate(&reset_l2_norm_done[1])); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreate(&l2_norm_bufs[i].copy_done)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(float))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(float))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(float))); *(l2_norm_bufs[i].h) = 1.0; } nvshmemx_barrier_all_on_stream(compute_stream); MPI_CALL(MPI_Allreduce(l2_norm_bufs[0].h, &l2_norms[0], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[1].h, &l2_norms[1], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); CUDA_RT_CALL(cudaDeviceSynchronize()); if (!mype) { printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); } dim3 dim_grid((nx + BLOCK_DIM_X-1) / BLOCK_DIM_X, (chunk_size + BLOCK_DIM_Y-1) / BLOCK_DIM_Y, 1); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); int iter = 0; if (!mype) { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } nvshmem_barrier_all(); double start = MPI_Wtime(); bool l2_norm_greater_than_tol = true; nvtxRangePush("Jacobi Solve Multi-GPU"); while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<<<dim_grid, dim_block, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, top_pe, iy_end_top, bottom_pe, iy_start_bottom); /* Instead of using nvshmemx_barrier_all_on_stream, we are using a custom implementation of barrier that just synchronizes with the neighbor PEs that is the PEs with whom a PE communicates. This will perform faster than a global barrier that would do redundant synchronization for this application. */ nvshmemx_barrier_all_on_stream(compute_stream); // perform L2 norm calculation // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(float), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[prev].h, &l2_norms[prev], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); iter++; if ((iter % 100) == 0) { if (!mype) printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(float), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); std::swap(a_new, a); } CUDA_RT_CALL(cudaDeviceSynchronize()); nvshmem_barrier_all(); double stop = MPI_Wtime(); nvtxRangePop(); nvshmem_barrier_all(); CUDA_RT_CALL(cudaMemcpy(a_h + iy_start_global * nx, a + nx, std::min(ny - 2 - iy_start_global, chunk_size) * nx * sizeof(float), cudaMemcpyDeviceToHost)); result_correct = true; for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR on rank %d: a[%d * %d + %d] = %f does not match %f " "(reference)\n", rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } int global_result_correct = 1; MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD)); result_correct = global_result_correct; if (!mype && result_correct) { printf("Num GPUs: %d.\n", npes); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, npes, (stop - start), runtime_serial / (stop - start), runtime_serial / (npes * (stop - start)) * 100); } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } nvshmem_free(a); nvshmem_free(a_new); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done[1])); CUDA_RT_CALL(cudaEventDestroy(compute_done[0])); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); nvshmem_finalize(); MPI_CALL(MPI_Finalize()); return (result_correct == 1) ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, float* const a_ref_h, const bool print, int mype) { float* a; float* a_new; float* l2_norm_d; float* l2_norm_h; int iy_start = 1; int iy_end = ny - 3; CUDA_RT_CALL(cudaMalloc((void**)&a, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMalloc((void**)&a_new, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(float))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(float))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny - 2, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(float))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(float))); CUDA_RT_CALL(cudaDeviceSynchronize()); if (print) printf("Single GPU jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, nx, ny); dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 dim_grid((nx + BLOCK_DIM_X-1) / BLOCK_DIM_X, ((ny - 2) + BLOCK_DIM_Y-1) / BLOCK_DIM_Y, 1); int iter = 0; float l2_norm = 1.0; CUDA_RT_CALL(cudaDeviceSynchronize()); double start = MPI_Wtime(); nvtxRangePush("Jacobi Solve Single GPU"); while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d, 0, sizeof(float))); jacobi_kernel<<<dim_grid, dim_block>>>(a_new, a, l2_norm_d, iy_start, iy_end, nx, mype, iy_end + 1, mype, (iy_start - 1)); iter++; if (print && ((iter % 100) == 0)) { CUDA_RT_CALL(cudaMemcpy(l2_norm_h, l2_norm_d, sizeof(float), cudaMemcpyDeviceToHost)); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); } std::swap(a_new, a); } CUDA_RT_CALL(cudaDeviceSynchronize()); nvtxRangePop(); double stop = MPI_Wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_RT_CALL(cudaFreeHost(l2_norm_h)); CUDA_RT_CALL(cudaFree(l2_norm_d)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3, int outputStride0, int outputStride1, int outputStride2, int outputStride3, const int dimSize, const int concatDim, int linearIndex) { int offset = 0; int curDimSize = 3 == concatDim ? dimSize : outputSize3; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStride3; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 2 == concatDim ? dimSize : outputSize2; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride2; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 1 == concatDim ? dimSize : outputSize1; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride1; offset += curDimOffset; linearIndex = nextDimIndex; return offset + linearIndex * outputStride0; // for (int i = 3; i >= 1; i--) { // int curDimSize = i == concatDim ? dimSize : outputSize[i]; // int nextDimIndex = linearIndex / curDimSize; // int curDimIndex = linearIndex - curDimSize * nextDimIndex; // int curDimOffset = curDimIndex * outputStride[i]; // offset += curDimOffset; // linearIndex = nextDimIndex; // } // return offset + linearIndex * outputStride[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; tid += stride; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; tid += stride; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } // From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh // Result of div/mod operation stored together. template <typename Value> struct DivMod { Value div, mod; __host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { } }; // Base case: we only have an implementation for uint32_t for now. For // everything else, we use plain division. template <typename Value> struct IntDivider { IntDivider() { } // Dummy constructor for arrays. IntDivider(Value d) : divisor(d) { } __host__ __device__ inline Value div(Value n) const { return n / divisor; } __host__ __device__ inline Value mod(Value n) const { return n % divisor; } __host__ __device__ inline DivMod<Value> divmod(Value n) const { return DivMod<Value>(n / divisor, n % divisor); } Value divisor; }; // Implement fast integer division. template <> struct IntDivider<unsigned int> { static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); IntDivider() { } // Dummy constructor for arrays. IntDivider(unsigned int d) : divisor(d) { assert(divisor >= 1 && divisor <= INT32_MAX); // TODO: gcc/clang has __builtin_clz() but it's not portable. for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; uint64_t one = 1; uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; m1 = magic; assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. } __host__ __device__ inline unsigned int div(unsigned int n) const { #ifdef __CUDA_ARCH__ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and // 'm1'. unsigned int t = __umulhi(n, m1); return (t + n) >> shift; #else // Using uint64_t so that the addition does not overflow. uint64_t t = ((uint64_t) n * m1) >> 32; return (t + n) >> shift; #endif } __host__ __device__ inline unsigned int mod(unsigned int n) const { return n - div(n) * divisor; } __host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const { unsigned int q = div(n); return DivMod<unsigned int>(q, n - q * divisor); } unsigned int divisor; // d above. unsigned int m1; // Magic number: m' above. unsigned int shift; // Shift amounts. }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh /// OffsetCalculator calculates the offset in bytes of a linear index for NARGS /// operands that share the same shape, but may have different strides. template <int NARGS> struct OffsetCalculator { static constexpr int MAX_DIMS = 25; // The offset for each argument (in bytes). Wrapper around fixed-size array. struct offsets_t { __host__ __device__ uint32_t& operator[](int idx) { return values[idx]; } uint32_t values[NARGS]; }; // OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) { OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) { for (int i = 0; i < MAX_DIMS; ++i) { if (i < dims) { sizes_[i] = IntDivider<uint32_t>(sizes[i]); } else { sizes_[i] = IntDivider<uint32_t>(1); } for (int arg = 0; arg < NARGS; arg++) { strides_[i][arg] = i < dims ? strides[arg][i] : 0; } } } __host__ __device__ offsets_t get(uint32_t linear_idx) const { offsets_t offsets; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] = 0; } #pragma unroll for (int dim = 0; dim < MAX_DIMS; ++dim) { if (dim == dims) { break; } auto divmod = sizes_[dim].divmod(linear_idx); linear_idx = divmod.div; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] += divmod.mod * strides_[dim][arg]; } } return offsets; } void print() { for (auto i = 1; i < 128; i++) { auto offsets = get(i); printf("offsets[%d]: ", i); for (auto arg = 0; arg < NARGS; arg++) { printf("%d ", offsets[arg]); } printf("\n"); } } int dims; IntDivider<uint32_t> sizes_[MAX_DIMS]; uint32_t strides_[MAX_DIMS][NARGS]; }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh template<int nt, int vt, typename func_t> __launch_bounds__(nt, 4) __global__ void elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f); } template<typename func_t> void gpu_unary_kernel(float *res, float *x, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<2> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in = &x[offsets[1]]; *out = f(*in); }); } template<typename func_t> void gpu_binary_kernel(float *res, float *x, float *y, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<3> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in1 = &x[offsets[1]]; float* in2 = &y[offsets[2]]; *out = f(*in1, *in2); }); } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); // Tensor 'toGPU' invocation. float* x276 = (float*)myGpuMalloc(262144 * sizeof(float)); int32_t x5 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0); int64_t x6 = fsize(x5); float* x7 = (float*)mmap(0, x6, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x5, 0); float* x8 = x7+5205440; CUDA_CALL(cudaMemcpy(x276, x8, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x279 = (float*)myGpuMalloc(256 * sizeof(float)); float* x9 = x7+148672; CUDA_CALL(cudaMemcpy(x279, x9, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x282 = (float*)myGpuMalloc(128 * sizeof(float)); float* x10 = x7+816064; CUDA_CALL(cudaMemcpy(x282, x10, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x285 = (float*)myGpuMalloc(128 * sizeof(float)); float* x11 = x7+950080; CUDA_CALL(cudaMemcpy(x285, x11, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x288 = (float*)myGpuMalloc(64 * sizeof(float)); float* x12 = x7+94784; CUDA_CALL(cudaMemcpy(x288, x12, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x291 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x13 = x7+220608; CUDA_CALL(cudaMemcpy(x291, x13, 32768 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x294 = (float*)myGpuMalloc(512 * sizeof(float)); float* x14 = x7+22495680; CUDA_CALL(cudaMemcpy(x294, x14, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x297 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x15 = x7+2964928; CUDA_CALL(cudaMemcpy(x297, x15, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x300 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x16 = x7+4348352; CUDA_CALL(cudaMemcpy(x300, x16, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x303 = (float*)myGpuMalloc(512 * sizeof(float)); float* x17 = x7+20133312; CUDA_CALL(cudaMemcpy(x303, x17, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x306 = (float*)myGpuMalloc(256 * sizeof(float)); float* x18 = x7+2169536; CUDA_CALL(cudaMemcpy(x306, x18, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x309 = (float*)myGpuMalloc(128 * sizeof(float)); float* x19 = x7+668224; CUDA_CALL(cudaMemcpy(x309, x19, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x312 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x20 = x7+2432448; CUDA_CALL(cudaMemcpy(x312, x20, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x315 = (float*)myGpuMalloc(512 * sizeof(float)); float* x21 = x7+1446336; CUDA_CALL(cudaMemcpy(x315, x21, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x318 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x22 = x7+4081088; CUDA_CALL(cudaMemcpy(x318, x22, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x321 = (float*)myGpuMalloc(256 * sizeof(float)); float* x23 = x7+1578688; CUDA_CALL(cudaMemcpy(x321, x23, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x324 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x24 = x7+6325696; CUDA_CALL(cudaMemcpy(x324, x24, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x327 = (float*)myGpuMalloc(512 * sizeof(float)); float* x25 = x7+602048; CUDA_CALL(cudaMemcpy(x327, x25, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x330 = (float*)myGpuMalloc(64 * sizeof(float)); float* x26 = x7+165888; CUDA_CALL(cudaMemcpy(x330, x26, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x333 = (float*)myGpuMalloc(512 * sizeof(float)); float* x27 = x7+1164736; CUDA_CALL(cudaMemcpy(x333, x27, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x336 = (float*)myGpuMalloc(64 * sizeof(float)); float* x28 = x7+6080; CUDA_CALL(cudaMemcpy(x336, x28, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x339 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x29 = x7+253888; CUDA_CALL(cudaMemcpy(x339, x29, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x342 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x30 = x7+20135360; CUDA_CALL(cudaMemcpy(x342, x30, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x345 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x31 = x7+2960832; CUDA_CALL(cudaMemcpy(x345, x31, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x348 = (float*)myGpuMalloc(256 * sizeof(float)); float* x32 = x7+3227072; CUDA_CALL(cudaMemcpy(x348, x32, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x351 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x33 = x7+3228096; CUDA_CALL(cudaMemcpy(x351, x33, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x354 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x34 = x7+43456; CUDA_CALL(cudaMemcpy(x354, x34, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x357 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x35 = x7+22496704; CUDA_CALL(cudaMemcpy(x357, x35, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x360 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x36 = x7+9092544; CUDA_CALL(cudaMemcpy(x360, x36, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x363 = (float*)myGpuMalloc(128 * sizeof(float)); float* x37 = x7+816320; CUDA_CALL(cudaMemcpy(x363, x37, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x366 = (float*)myGpuMalloc(256 * sizeof(float)); float* x38 = x7+60608; CUDA_CALL(cudaMemcpy(x366, x38, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x369 = (float*)myGpuMalloc(256 * sizeof(float)); float* x39 = x7+219584; CUDA_CALL(cudaMemcpy(x369, x39, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x372 = (float*)myGpuMalloc(128 * sizeof(float)); float* x40 = x7+1379392; CUDA_CALL(cudaMemcpy(x372, x40, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x375 = (float*)myGpuMalloc(128 * sizeof(float)); float* x41 = x7+1231296; CUDA_CALL(cudaMemcpy(x375, x41, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x378 = (float*)myGpuMalloc(64 * sizeof(float)); float* x42 = x7+1856; CUDA_CALL(cudaMemcpy(x378, x42, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x381 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x43 = x7+1098176; CUDA_CALL(cudaMemcpy(x381, x43, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x384 = (float*)myGpuMalloc(512 * sizeof(float)); float* x44 = x7+601536; CUDA_CALL(cudaMemcpy(x384, x44, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x387 = (float*)myGpuMalloc(128 * sizeof(float)); float* x45 = x7+401728; CUDA_CALL(cudaMemcpy(x387, x45, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x390 = (float*)myGpuMalloc(64 * sizeof(float)); float* x46 = x7+131904; CUDA_CALL(cudaMemcpy(x390, x46, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x393 = (float*)myGpuMalloc(128 * sizeof(float)); float* x47 = x7+949696; CUDA_CALL(cudaMemcpy(x393, x47, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x396 = (float*)myGpuMalloc(512 * sizeof(float)); float* x48 = x7+15664576; CUDA_CALL(cudaMemcpy(x396, x48, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x399 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x49 = x7+18027968; CUDA_CALL(cudaMemcpy(x399, x49, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x402 = (float*)myGpuMalloc(10 * sizeof(float)); float* x50 = x7+23573952; CUDA_CALL(cudaMemcpy(x402, x50, 10 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x405 = (float*)myGpuMalloc(64 * sizeof(float)); float* x51 = x7+43264; CUDA_CALL(cudaMemcpy(x405, x51, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x408 = (float*)myGpuMalloc(512 * sizeof(float)); float* x52 = x7+11453376; CUDA_CALL(cudaMemcpy(x408, x52, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x411 = (float*)myGpuMalloc(64 * sizeof(float)); float* x53 = x7+6272; CUDA_CALL(cudaMemcpy(x411, x53, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x414 = (float*)myGpuMalloc(512 * sizeof(float)); float* x54 = x7+882112; CUDA_CALL(cudaMemcpy(x414, x54, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x417 = (float*)myGpuMalloc(64 * sizeof(float)); float* x55 = x7+6144; CUDA_CALL(cudaMemcpy(x417, x55, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x420 = (float*)myGpuMalloc(512 * sizeof(float)); float* x56 = x7+1445824; CUDA_CALL(cudaMemcpy(x420, x56, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x423 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x57 = x7+1379776; CUDA_CALL(cudaMemcpy(x423, x57, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x426 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x58 = x7+3818944; CUDA_CALL(cudaMemcpy(x426, x58, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x429 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x59 = x7+5202368; CUDA_CALL(cudaMemcpy(x429, x59, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x432 = (float*)myGpuMalloc(256 * sizeof(float)); float* x60 = x7+148416; CUDA_CALL(cudaMemcpy(x432, x60, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x435 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x61 = x7+7441856; CUDA_CALL(cudaMemcpy(x435, x61, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x438 = (float*)myGpuMalloc(64 * sizeof(float)); float* x62 = x7+94720; CUDA_CALL(cudaMemcpy(x438, x62, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x441 = (float*)myGpuMalloc(128 * sizeof(float)); float* x63 = x7+1097792; CUDA_CALL(cudaMemcpy(x441, x63, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x444 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x64 = x7+12504512; CUDA_CALL(cudaMemcpy(x444, x64, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x447 = (float*)myGpuMalloc(256 * sizeof(float)); float* x65 = x7+4938944; CUDA_CALL(cudaMemcpy(x447, x65, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x450 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x66 = x7+14611904; CUDA_CALL(cudaMemcpy(x450, x66, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x453 = (float*)myGpuMalloc(512 * sizeof(float)); float* x67 = x7+15666112; CUDA_CALL(cudaMemcpy(x453, x67, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x456 = (float*)myGpuMalloc(512 * sizeof(float)); float* x68 = x7+18026432; CUDA_CALL(cudaMemcpy(x456, x68, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x459 = (float*)myGpuMalloc(512 * sizeof(float)); float* x69 = x7+9091520; CUDA_CALL(cudaMemcpy(x459, x69, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x462 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x70 = x7+19080640; CUDA_CALL(cudaMemcpy(x462, x70, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x465 = (float*)myGpuMalloc(256 * sizeof(float)); float* x71 = x7+6588608; CUDA_CALL(cudaMemcpy(x465, x71, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x468 = (float*)myGpuMalloc(256 * sizeof(float)); float* x72 = x7+8299456; CUDA_CALL(cudaMemcpy(x468, x72, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x471 = (float*)myGpuMalloc(256 * sizeof(float)); float* x73 = x7+60352; CUDA_CALL(cudaMemcpy(x471, x73, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x474 = (float*)myGpuMalloc(64 * sizeof(float)); float* x74 = x7+202944; CUDA_CALL(cudaMemcpy(x474, x74, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x477 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x75 = x7+166080; CUDA_CALL(cudaMemcpy(x477, x75, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x480 = (float*)myGpuMalloc(256 * sizeof(float)); float* x76 = x7+6058432; CUDA_CALL(cudaMemcpy(x480, x76, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x483 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x77 = x7+2436544; CUDA_CALL(cudaMemcpy(x483, x77, 524288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x486 = (float*)myGpuMalloc(256 * sizeof(float)); float* x78 = x7+77248; CUDA_CALL(cudaMemcpy(x486, x78, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x489 = (float*)myGpuMalloc(256 * sizeof(float)); float* x79 = x7+6587840; CUDA_CALL(cudaMemcpy(x489, x79, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x492 = (float*)myGpuMalloc(512 * sizeof(float)); float* x80 = x7+20133824; CUDA_CALL(cudaMemcpy(x492, x80, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x495 = (float*)myGpuMalloc(128 * sizeof(float)); float* x81 = x7+1379264; CUDA_CALL(cudaMemcpy(x495, x81, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x498 = (float*)myGpuMalloc(256 * sizeof(float)); float* x82 = x7+7708608; CUDA_CALL(cudaMemcpy(x498, x82, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x501 = (float*)myGpuMalloc(64 * sizeof(float)); float* x83 = x7+165824; CUDA_CALL(cudaMemcpy(x501, x83, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x504 = (float*)myGpuMalloc(512 * sizeof(float)); float* x84 = x7+1164224; CUDA_CALL(cudaMemcpy(x504, x84, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x507 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x85 = x7+94912; CUDA_CALL(cudaMemcpy(x507, x85, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x510 = (float*)myGpuMalloc(128 * sizeof(float)); float* x86 = x7+253376; CUDA_CALL(cudaMemcpy(x510, x86, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x513 = (float*)myGpuMalloc(256 * sizeof(float)); float* x87 = x7+7708096; CUDA_CALL(cudaMemcpy(x513, x87, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x516 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x88 = x7+2962880; CUDA_CALL(cudaMemcpy(x516, x88, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x519 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x89 = x7+203200; CUDA_CALL(cudaMemcpy(x519, x89, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x522 = (float*)myGpuMalloc(512 * sizeof(float)); float* x90 = x7+883648; CUDA_CALL(cudaMemcpy(x522, x90, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x525 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x91 = x7+6059456; CUDA_CALL(cudaMemcpy(x525, x91, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x528 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x92 = x7+6336; CUDA_CALL(cudaMemcpy(x528, x92, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x531 = (float*)myGpuMalloc(256 * sizeof(float)); float* x93 = x7+148928; CUDA_CALL(cudaMemcpy(x531, x93, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x534 = (float*)myGpuMalloc(256 * sizeof(float)); float* x94 = x7+5467584; CUDA_CALL(cudaMemcpy(x534, x94, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x537 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x95 = x7+8563136; CUDA_CALL(cudaMemcpy(x537, x95, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x540 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x96 = x7+19076544; CUDA_CALL(cudaMemcpy(x540, x96, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x543 = (float*)myGpuMalloc(128 * sizeof(float)); float* x97 = x7+816192; CUDA_CALL(cudaMemcpy(x543, x97, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x546 = (float*)myGpuMalloc(256 * sizeof(float)); float* x98 = x7+3818176; CUDA_CALL(cudaMemcpy(x546, x98, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x549 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x99 = x7+8299968; CUDA_CALL(cudaMemcpy(x549, x99, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x552 = (float*)myGpuMalloc(256 * sizeof(float)); float* x100 = x7+5468352; CUDA_CALL(cudaMemcpy(x552, x100, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x555 = (float*)myGpuMalloc(256 * sizeof(float)); float* x101 = x7+2170048; CUDA_CALL(cudaMemcpy(x555, x101, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x558 = (float*)myGpuMalloc(128 * sizeof(float)); float* x102 = x7+668352; CUDA_CALL(cudaMemcpy(x558, x102, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x561 = (float*)myGpuMalloc(512 * sizeof(float)); float* x103 = x7+468928; CUDA_CALL(cudaMemcpy(x561, x103, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x564 = (float*)myGpuMalloc(64 * sizeof(float)); float* x104 = x7+94848; CUDA_CALL(cudaMemcpy(x564, x104, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x567 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x105 = x7+23545280; CUDA_CALL(cudaMemcpy(x567, x105, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x570 = (float*)myGpuMalloc(256 * sizeof(float)); float* x106 = x7+7179456; CUDA_CALL(cudaMemcpy(x570, x106, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x573 = (float*)myGpuMalloc(64 * sizeof(float)); float* x107 = x7+43328; CUDA_CALL(cudaMemcpy(x573, x107, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x576 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x108 = x7+401856; CUDA_CALL(cudaMemcpy(x576, x108, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x579 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x109 = x7+14609856; CUDA_CALL(cudaMemcpy(x579, x109, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x582 = (float*)myGpuMalloc(256 * sizeof(float)); float* x110 = x7+2169280; CUDA_CALL(cudaMemcpy(x582, x110, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x585 = (float*)myGpuMalloc(256 * sizeof(float)); float* x111 = x7+7178944; CUDA_CALL(cudaMemcpy(x585, x111, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x588 = (float*)myGpuMalloc(64 * sizeof(float)); float* x112 = x7+1920; CUDA_CALL(cudaMemcpy(x588, x112, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x591 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x113 = x7+816576; CUDA_CALL(cudaMemcpy(x591, x113, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x594 = (float*)myGpuMalloc(128 * sizeof(float)); float* x114 = x7+949952; CUDA_CALL(cudaMemcpy(x594, x114, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x597 = (float*)myGpuMalloc(512 * sizeof(float)); float* x115 = x7+11452864; CUDA_CALL(cudaMemcpy(x597, x115, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x600 = (float*)myGpuMalloc(64 * sizeof(float)); float* x116 = x7+6208; CUDA_CALL(cudaMemcpy(x600, x116, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x603 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x117 = x7+12506560; CUDA_CALL(cudaMemcpy(x603, x117, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x606 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x118 = x7+4939200; CUDA_CALL(cudaMemcpy(x606, x118, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x609 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x119 = x7+2433472; CUDA_CALL(cudaMemcpy(x609, x119, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x612 = (float*)myGpuMalloc(64 * sizeof(float)); float* x120 = x7+203136; CUDA_CALL(cudaMemcpy(x612, x120, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x615 = (float*)myGpuMalloc(512 * sizeof(float)); float* x121 = x7+601024; CUDA_CALL(cudaMemcpy(x615, x121, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x618 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x122 = x7+7442880; CUDA_CALL(cudaMemcpy(x618, x122, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x621 = (float*)myGpuMalloc(512 * sizeof(float)); float* x123 = x7+9092032; CUDA_CALL(cudaMemcpy(x621, x123, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x624 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x124 = x7+8564160; CUDA_CALL(cudaMemcpy(x624, x124, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x627 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x125 = x7+23551424; CUDA_CALL(cudaMemcpy(x627, x125, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x630 = (float*)myGpuMalloc(256 * sizeof(float)); float* x126 = x7+4938688; CUDA_CALL(cudaMemcpy(x630, x126, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x633 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x127 = x7+14613952; CUDA_CALL(cudaMemcpy(x633, x127, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x636 = (float*)myGpuMalloc(256 * sizeof(float)); float* x128 = x7+60096; CUDA_CALL(cudaMemcpy(x636, x128, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x639 = (float*)myGpuMalloc(128 * sizeof(float)); float* x129 = x7+1097664; CUDA_CALL(cudaMemcpy(x639, x129, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x642 = (float*)myGpuMalloc(128 * sizeof(float)); float* x130 = x7+401600; CUDA_CALL(cudaMemcpy(x642, x130, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x645 = (float*)myGpuMalloc(256 * sizeof(float)); float* x131 = x7+4347328; CUDA_CALL(cudaMemcpy(x645, x131, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x648 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x132 = x7+132032; CUDA_CALL(cudaMemcpy(x648, x132, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x651 = (float*)myGpuMalloc(256 * sizeof(float)); float* x133 = x7+1578944; CUDA_CALL(cudaMemcpy(x651, x133, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x654 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x134 = x7+1165760; CUDA_CALL(cudaMemcpy(x654, x134, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x657 = (float*)myGpuMalloc(256 * sizeof(float)); float* x135 = x7+220352; CUDA_CALL(cudaMemcpy(x657, x135, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x660 = (float*)myGpuMalloc(128 * sizeof(float)); float* x136 = x7+253760; CUDA_CALL(cudaMemcpy(x660, x136, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x663 = (float*)myGpuMalloc(64 * sizeof(float)); float* x137 = x7+203008; CUDA_CALL(cudaMemcpy(x663, x137, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x666 = (float*)myGpuMalloc(256 * sizeof(float)); float* x138 = x7+6058688; CUDA_CALL(cudaMemcpy(x666, x138, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x669 = (float*)myGpuMalloc(512 * sizeof(float)); float* x139 = x7+15665088; CUDA_CALL(cudaMemcpy(x669, x139, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x672 = (float*)myGpuMalloc(512 * sizeof(float)); float* x140 = x7+18026944; CUDA_CALL(cudaMemcpy(x672, x140, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x675 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x141 = x7+8566208; CUDA_CALL(cudaMemcpy(x675, x141, 524288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x678 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x142 = x7+5203392; CUDA_CALL(cudaMemcpy(x678, x142, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x681 = (float*)myGpuMalloc(256 * sizeof(float)); float* x143 = x7+8298944; CUDA_CALL(cudaMemcpy(x681, x143, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x684 = (float*)myGpuMalloc(64 * sizeof(float)); float* x144 = x7+94656; CUDA_CALL(cudaMemcpy(x684, x144, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x687 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x145 = x7+4084160; CUDA_CALL(cudaMemcpy(x687, x145, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x690 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x146 = x7+19078592; CUDA_CALL(cudaMemcpy(x690, x146, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x693 = (float*)myGpuMalloc(512 * sizeof(float)); float* x147 = x7+467392; CUDA_CALL(cudaMemcpy(x693, x147, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x696 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x148 = x7+6322624; CUDA_CALL(cudaMemcpy(x696, x148, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x699 = (float*)myGpuMalloc(512 * sizeof(float)); float* x149 = x7+883136; CUDA_CALL(cudaMemcpy(x699, x149, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x702 = (float*)myGpuMalloc(128 * sizeof(float)); float* x150 = x7+1379648; CUDA_CALL(cudaMemcpy(x702, x150, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x705 = (float*)myGpuMalloc(512 * sizeof(float)); float* x151 = x7+468416; CUDA_CALL(cudaMemcpy(x705, x151, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x708 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x152 = x7+149440; CUDA_CALL(cudaMemcpy(x708, x152, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x711 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x153 = x7+7445952; CUDA_CALL(cudaMemcpy(x711, x153, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x714 = (float*)myGpuMalloc(1728 * sizeof(float)); float* x154 = x7+0; CUDA_CALL(cudaMemcpy(x714, x154, 1728 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x717 = (float*)myGpuMalloc(64 * sizeof(float)); float* x155 = x7+131840; CUDA_CALL(cudaMemcpy(x717, x155, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x720 = (float*)myGpuMalloc(512 * sizeof(float)); float* x156 = x7+15665600; CUDA_CALL(cudaMemcpy(x720, x156, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x723 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x157 = x7+15666624; CUDA_CALL(cudaMemcpy(x723, x157, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x726 = (float*)myGpuMalloc(512 * sizeof(float)); float* x158 = x7+1445312; CUDA_CALL(cudaMemcpy(x726, x158, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x729 = (float*)myGpuMalloc(256 * sizeof(float)); float* x159 = x7+3227840; CUDA_CALL(cudaMemcpy(x729, x159, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x732 = (float*)myGpuMalloc(64 * sizeof(float)); float* x160 = x7+43392; CUDA_CALL(cudaMemcpy(x732, x160, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x735 = (float*)myGpuMalloc(512 * sizeof(float)); float* x161 = x7+11452352; CUDA_CALL(cudaMemcpy(x735, x161, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x738 = (float*)myGpuMalloc(512 * sizeof(float)); float* x162 = x7+18025920; CUDA_CALL(cudaMemcpy(x738, x162, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x741 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x163 = x7+6324672; CUDA_CALL(cudaMemcpy(x741, x163, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x744 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x164 = x7+60864; CUDA_CALL(cudaMemcpy(x744, x164, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x747 = (float*)myGpuMalloc(256 * sizeof(float)); float* x165 = x7+5468096; CUDA_CALL(cudaMemcpy(x747, x165, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x750 = (float*)myGpuMalloc(64 * sizeof(float)); float* x166 = x7+43200; CUDA_CALL(cudaMemcpy(x750, x166, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x753 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x167 = x7+1231808; CUDA_CALL(cudaMemcpy(x753, x167, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x756 = (float*)myGpuMalloc(256 * sizeof(float)); float* x168 = x7+149184; CUDA_CALL(cudaMemcpy(x756, x168, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x759 = (float*)myGpuMalloc(512 * sizeof(float)); float* x169 = x7+1163712; CUDA_CALL(cudaMemcpy(x759, x169, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x762 = (float*)myGpuMalloc(256 * sizeof(float)); float* x170 = x7+7178688; CUDA_CALL(cudaMemcpy(x762, x170, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x765 = (float*)myGpuMalloc(512 * sizeof(float)); float* x171 = x7+22495168; CUDA_CALL(cudaMemcpy(x765, x171, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x768 = (float*)myGpuMalloc(128 * sizeof(float)); float* x172 = x7+949824; CUDA_CALL(cudaMemcpy(x768, x172, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x771 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x173 = x7+78272; CUDA_CALL(cudaMemcpy(x771, x173, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x774 = (float*)myGpuMalloc(128 * sizeof(float)); float* x174 = x7+253504; CUDA_CALL(cudaMemcpy(x774, x174, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x777 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x175 = x7+14607808; CUDA_CALL(cudaMemcpy(x777, x175, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x780 = (float*)myGpuMalloc(256 * sizeof(float)); float* x176 = x7+4348096; CUDA_CALL(cudaMemcpy(x780, x176, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x783 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x177 = x7+1579456; CUDA_CALL(cudaMemcpy(x783, x177, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x786 = (float*)myGpuMalloc(256 * sizeof(float)); float* x178 = x7+7708864; CUDA_CALL(cudaMemcpy(x786, x178, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x789 = (float*)myGpuMalloc(128 * sizeof(float)); float* x179 = x7+668480; CUDA_CALL(cudaMemcpy(x789, x179, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x792 = (float*)myGpuMalloc(256 * sizeof(float)); float* x180 = x7+4347840; CUDA_CALL(cudaMemcpy(x792, x180, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x795 = (float*)myGpuMalloc(64 * sizeof(float)); float* x181 = x7+203072; CUDA_CALL(cudaMemcpy(x795, x181, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x798 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x182 = x7+1447360; CUDA_CALL(cudaMemcpy(x798, x182, 131072 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x801 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x183 = x7+23547328; CUDA_CALL(cudaMemcpy(x801, x183, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x804 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x184 = x7+4083136; CUDA_CALL(cudaMemcpy(x804, x184, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x807 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x185 = x7+8565184; CUDA_CALL(cudaMemcpy(x807, x185, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x810 = (float*)myGpuMalloc(256 * sizeof(float)); float* x186 = x7+220096; CUDA_CALL(cudaMemcpy(x810, x186, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x813 = (float*)myGpuMalloc(256 * sizeof(float)); float* x187 = x7+6588096; CUDA_CALL(cudaMemcpy(x813, x187, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x816 = (float*)myGpuMalloc(256 * sizeof(float)); float* x188 = x7+6058944; CUDA_CALL(cudaMemcpy(x816, x188, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x819 = (float*)myGpuMalloc(64 * sizeof(float)); float* x189 = x7+166016; CUDA_CALL(cudaMemcpy(x819, x189, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x822 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x190 = x7+5204416; CUDA_CALL(cudaMemcpy(x822, x190, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x825 = (float*)myGpuMalloc(256 * sizeof(float)); float* x191 = x7+8299200; CUDA_CALL(cudaMemcpy(x825, x191, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x828 = (float*)myGpuMalloc(128 * sizeof(float)); float* x192 = x7+401472; CUDA_CALL(cudaMemcpy(x828, x192, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x831 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x193 = x7+950208; CUDA_CALL(cudaMemcpy(x831, x193, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x834 = (float*)myGpuMalloc(256 * sizeof(float)); float* x194 = x7+4938432; CUDA_CALL(cudaMemcpy(x834, x194, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x837 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x195 = x7+12508608; CUDA_CALL(cudaMemcpy(x837, x195, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x840 = (float*)myGpuMalloc(512 * sizeof(float)); float* x196 = x7+22494656; CUDA_CALL(cudaMemcpy(x840, x196, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x843 = (float*)myGpuMalloc(512 * sizeof(float)); float* x197 = x7+18027456; CUDA_CALL(cudaMemcpy(x843, x197, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x846 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x198 = x7+884160; CUDA_CALL(cudaMemcpy(x846, x198, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x849 = (float*)myGpuMalloc(256 * sizeof(float)); float* x199 = x7+4347584; CUDA_CALL(cudaMemcpy(x849, x199, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x852 = (float*)myGpuMalloc(256 * sizeof(float)); float* x200 = x7+1579200; CUDA_CALL(cudaMemcpy(x852, x200, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x855 = (float*)myGpuMalloc(256 * sizeof(float)); float* x201 = x7+59840; CUDA_CALL(cudaMemcpy(x855, x201, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x858 = (float*)myGpuMalloc(256 * sizeof(float)); float* x202 = x7+3818432; CUDA_CALL(cudaMemcpy(x858, x202, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x861 = (float*)myGpuMalloc(512 * sizeof(float)); float* x203 = x7+9090496; CUDA_CALL(cudaMemcpy(x861, x203, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x864 = (float*)myGpuMalloc(512 * sizeof(float)); float* x204 = x7+22496192; CUDA_CALL(cudaMemcpy(x864, x204, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x867 = (float*)myGpuMalloc(256 * sizeof(float)); float* x205 = x7+77504; CUDA_CALL(cudaMemcpy(x867, x205, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x870 = (float*)myGpuMalloc(128 * sizeof(float)); float* x206 = x7+253632; CUDA_CALL(cudaMemcpy(x870, x206, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x873 = (float*)myGpuMalloc(512 * sizeof(float)); float* x207 = x7+11451840; CUDA_CALL(cudaMemcpy(x873, x207, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x876 = (float*)myGpuMalloc(64 * sizeof(float)); float* x208 = x7+1728; CUDA_CALL(cudaMemcpy(x876, x208, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x879 = (float*)myGpuMalloc(512 * sizeof(float)); float* x209 = x7+600512; CUDA_CALL(cudaMemcpy(x879, x209, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x882 = (float*)myGpuMalloc(64 * sizeof(float)); float* x210 = x7+131776; CUDA_CALL(cudaMemcpy(x882, x210, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x885 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x211 = x7+7443904; CUDA_CALL(cudaMemcpy(x885, x211, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x888 = (float*)myGpuMalloc(512 * sizeof(float)); float* x212 = x7+467904; CUDA_CALL(cudaMemcpy(x888, x212, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x891 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x213 = x7+2963904; CUDA_CALL(cudaMemcpy(x891, x213, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x894 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x214 = x7+11453888; CUDA_CALL(cudaMemcpy(x894, x214, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x897 = (float*)myGpuMalloc(512 * sizeof(float)); float* x215 = x7+20134336; CUDA_CALL(cudaMemcpy(x897, x215, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x900 = (float*)myGpuMalloc(2097152 * sizeof(float)); float* x216 = x7+12510656; CUDA_CALL(cudaMemcpy(x900, x216, 2097152 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x903 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x217 = x7+14616000; CUDA_CALL(cudaMemcpy(x903, x217, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x906 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x218 = x7+2434496; CUDA_CALL(cudaMemcpy(x906, x218, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x909 = (float*)myGpuMalloc(128 * sizeof(float)); float* x219 = x7+1097920; CUDA_CALL(cudaMemcpy(x909, x219, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x912 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x220 = x7+4085184; CUDA_CALL(cudaMemcpy(x912, x220, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x915 = (float*)myGpuMalloc(256 * sizeof(float)); float* x221 = x7+3227328; CUDA_CALL(cudaMemcpy(x915, x221, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x918 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x222 = x7+2961856; CUDA_CALL(cudaMemcpy(x918, x222, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x921 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x223 = x7+7179712; CUDA_CALL(cudaMemcpy(x921, x223, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x924 = (float*)myGpuMalloc(128 * sizeof(float)); float* x224 = x7+668096; CUDA_CALL(cudaMemcpy(x924, x224, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x927 = (float*)myGpuMalloc(512 * sizeof(float)); float* x225 = x7+1165248; CUDA_CALL(cudaMemcpy(x927, x225, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x930 = (float*)myGpuMalloc(512 * sizeof(float)); float* x226 = x7+9091008; CUDA_CALL(cudaMemcpy(x930, x226, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x933 = (float*)myGpuMalloc(128 * sizeof(float)); float* x227 = x7+816448; CUDA_CALL(cudaMemcpy(x933, x227, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x936 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x228 = x7+7709120; CUDA_CALL(cudaMemcpy(x936, x228, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x939 = (float*)myGpuMalloc(20480 * sizeof(float)); float* x229 = x7+23553472; CUDA_CALL(cudaMemcpy(x939, x229, 20480 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x942 = (float*)myGpuMalloc(256 * sizeof(float)); float* x230 = x7+4938176; CUDA_CALL(cudaMemcpy(x942, x230, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x945 = (float*)myGpuMalloc(256 * sizeof(float)); float* x231 = x7+2169792; CUDA_CALL(cudaMemcpy(x945, x231, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x948 = (float*)myGpuMalloc(256 * sizeof(float)); float* x232 = x7+6059200; CUDA_CALL(cudaMemcpy(x948, x232, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x951 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x233 = x7+6323648; CUDA_CALL(cudaMemcpy(x951, x233, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x954 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x234 = x7+4082112; CUDA_CALL(cudaMemcpy(x954, x234, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x957 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x235 = x7+1984; CUDA_CALL(cudaMemcpy(x957, x235, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x960 = (float*)myGpuMalloc(512 * sizeof(float)); float* x236 = x7+1446848; CUDA_CALL(cudaMemcpy(x960, x236, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x963 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x237 = x7+668608; CUDA_CALL(cudaMemcpy(x963, x237, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x966 = (float*)myGpuMalloc(128 * sizeof(float)); float* x238 = x7+1231552; CUDA_CALL(cudaMemcpy(x966, x238, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x969 = (float*)myGpuMalloc(256 * sizeof(float)); float* x239 = x7+3818688; CUDA_CALL(cudaMemcpy(x969, x239, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x972 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x240 = x7+6321600; CUDA_CALL(cudaMemcpy(x972, x240, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x975 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x241 = x7+12502464; CUDA_CALL(cudaMemcpy(x975, x241, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x978 = (float*)myGpuMalloc(256 * sizeof(float)); float* x242 = x7+8299712; CUDA_CALL(cudaMemcpy(x978, x242, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x981 = (float*)myGpuMalloc(256 * sizeof(float)); float* x243 = x7+5467840; CUDA_CALL(cudaMemcpy(x981, x243, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x984 = (float*)myGpuMalloc(128 * sizeof(float)); float* x244 = x7+1231424; CUDA_CALL(cudaMemcpy(x984, x244, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x987 = (float*)myGpuMalloc(256 * sizeof(float)); float* x245 = x7+78016; CUDA_CALL(cudaMemcpy(x987, x245, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x990 = (float*)myGpuMalloc(64 * sizeof(float)); float* x246 = x7+131968; CUDA_CALL(cudaMemcpy(x990, x246, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x993 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x247 = x7+19082688; CUDA_CALL(cudaMemcpy(x993, x247, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x996 = (float*)myGpuMalloc(512 * sizeof(float)); float* x248 = x7+882624; CUDA_CALL(cudaMemcpy(x996, x248, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x999 = (float*)myGpuMalloc(256 * sizeof(float)); float* x249 = x7+219840; CUDA_CALL(cudaMemcpy(x999, x249, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1002 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x250 = x7+8562112; CUDA_CALL(cudaMemcpy(x1002, x250, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1005 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x251 = x7+5468608; CUDA_CALL(cudaMemcpy(x1005, x251, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1008 = (float*)myGpuMalloc(256 * sizeof(float)); float* x252 = x7+7179200; CUDA_CALL(cudaMemcpy(x1008, x252, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1011 = (float*)myGpuMalloc(64 * sizeof(float)); float* x253 = x7+1792; CUDA_CALL(cudaMemcpy(x1011, x253, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1014 = (float*)myGpuMalloc(128 * sizeof(float)); float* x254 = x7+401344; CUDA_CALL(cudaMemcpy(x1014, x254, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1017 = (float*)myGpuMalloc(256 * sizeof(float)); float* x255 = x7+7708352; CUDA_CALL(cudaMemcpy(x1017, x255, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1020 = (float*)myGpuMalloc(256 * sizeof(float)); float* x256 = x7+6588352; CUDA_CALL(cudaMemcpy(x1020, x256, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1023 = (float*)myGpuMalloc(512 * sizeof(float)); float* x257 = x7+20134848; CUDA_CALL(cudaMemcpy(x1023, x257, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1026 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x258 = x7+602560; CUDA_CALL(cudaMemcpy(x1026, x258, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1029 = (float*)myGpuMalloc(64 * sizeof(float)); float* x259 = x7+165952; CUDA_CALL(cudaMemcpy(x1029, x259, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1032 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x260 = x7+469440; CUDA_CALL(cudaMemcpy(x1032, x260, 131072 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1035 = (float*)myGpuMalloc(256 * sizeof(float)); float* x261 = x7+3227584; CUDA_CALL(cudaMemcpy(x1035, x261, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1038 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x262 = x7+23549376; CUDA_CALL(cudaMemcpy(x1038, x262, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1041 = (float*)myGpuMalloc(128 * sizeof(float)); float* x263 = x7+1231680; CUDA_CALL(cudaMemcpy(x1041, x263, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1044 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x264 = x7+6588864; CUDA_CALL(cudaMemcpy(x1044, x264, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1047 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x265 = x7+5201344; CUDA_CALL(cudaMemcpy(x1047, x265, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1050 = (float*)myGpuMalloc(256 * sizeof(float)); float* x266 = x7+77760; CUDA_CALL(cudaMemcpy(x1050, x266, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1053 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x267 = x7+19084736; CUDA_CALL(cudaMemcpy(x1053, x267, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1056 = (float*)myGpuMalloc(128 * sizeof(float)); float* x268 = x7+1098048; CUDA_CALL(cudaMemcpy(x1056, x268, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1059 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x269 = x7+2435520; CUDA_CALL(cudaMemcpy(x1059, x269, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1062 = (float*)myGpuMalloc(128 * sizeof(float)); float* x270 = x7+1379520; CUDA_CALL(cudaMemcpy(x1062, x270, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1065 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x271 = x7+2170304; CUDA_CALL(cudaMemcpy(x1065, x271, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1068 = (float*)myGpuMalloc(256 * sizeof(float)); float* x272 = x7+1578432; CUDA_CALL(cudaMemcpy(x1068, x272, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1071 = (float*)myGpuMalloc(256 * sizeof(float)); float* x273 = x7+3817920; CUDA_CALL(cudaMemcpy(x1071, x273, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1074 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x274 = x7+7444928; CUDA_CALL(cudaMemcpy(x1074, x274, 1024 * sizeof(float), cudaMemcpyHostToDevice)); int32_t x1076 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0); int64_t x1077 = fsize(x1076); int64_t x1079 = x1077 / 3073LL; int32_t x1080 = (int32_t)x1079; int32_t x1081 = x1080 * 3072; float* x1082 = (float*)myMalloc(x1081 * sizeof(float));; int* x1083 = (int32_t*)myMalloc(x1080 * sizeof(int32_t));; char* x1078 = (char*)mmap(0, x1077, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x1076, 0); for(int x1085=0; x1085 < x1080; x1085++) { int32_t x1086 = x1085 * 3073; char x1087 = x1078[x1086]; int32_t x1088 = (int32_t)(unsigned char)x1087; x1083[x1085] = x1088; int32_t x1094 = x1086 + 1; int32_t x1092 = x1085 * 3072; for(int x1091=0; x1091 < 3072; x1091++) { int32_t x1095 = x1094 + x1091; char x1096 = x1078[x1095]; int32_t x1093 = x1092 + x1091; float x1097 = (float)(unsigned char)x1096; float x1098 = x1097 / 255.0f; x1082[x1093] = x1098; } } int32_t x1104 = x1080 / 64; int32_t x1138 = 31 / 1; int32_t x1139 = x1138 + 1; int32_t x1143 = 4096 * x1139; int32_t x1144 = x1143 * x1139; int32_t x1140 = x1139 * x1139; int32_t x1141 = 64 * x1140; int32_t x1142 = 64 * x1141; int32_t x1167 = x1139 - 2; int32_t x1168 = x1167 / 2; int32_t x1169 = x1168 + 1; int32_t x1173 = 4096 * x1169; int32_t x1174 = x1173 * x1169; bool x1177 = x1169 >= 1; bool x1178; if (x1177) { x1178 = x1177; } else { x1178 = false; } int32_t x1183 = x1168 / 1; int32_t x1184 = x1183 + 1; int32_t x1188 = 4096 * x1184; int32_t x1189 = x1188 * x1184; int32_t x1185 = x1184 * x1184; int32_t x1186 = 64 * x1185; int32_t x1187 = 64 * x1186; int32_t x1208 = x1184 + 2; bool x1209 = x1208 >= 3; bool x1210; if (x1209) { x1210 = x1209; } else { x1210 = false; } int32_t x1215 = x1208 - 3; int32_t x1216 = x1215 / 1; int32_t x1217 = x1216 + 1; int32_t x1221 = 4096 * x1217; int32_t x1222 = x1221 * x1217; int32_t x1218 = x1217 * x1217; int32_t x1219 = 64 * x1218; int32_t x1220 = 64 * x1219; bool x1241 = x1217 >= 1; bool x1242; if (x1241) { x1242 = x1241; } else { x1242 = false; } int32_t x1247 = x1216 / 1; int32_t x1248 = x1247 + 1; int32_t x1252 = 16384 * x1248; int32_t x1253 = x1252 * x1248; int32_t x1249 = x1248 * x1248; int32_t x1250 = 256 * x1249; int32_t x1251 = 64 * x1250; int32_t x1271 = 16384 * x1184; int32_t x1272 = x1271 * x1184; int32_t x1269 = 256 * x1185; int32_t x1270 = 64 * x1269; bool x1285 = x1184 == 1; bool x1286 = x1184 == x1248; bool x1287 = x1285 || x1286; bool x1288; if (x1287) { x1288 = x1287; } else { x1288 = false; } bool x1304 = x1248 >= 1; bool x1305; if (x1304) { x1305 = x1304; } else { x1305 = false; } int32_t x1310 = x1247 / 1; int32_t x1311 = x1310 + 1; int32_t x1315 = 4096 * x1311; int32_t x1316 = x1315 * x1311; int32_t x1312 = x1311 * x1311; int32_t x1313 = 64 * x1312; int32_t x1314 = 64 * x1313; int32_t x1335 = x1311 + 2; bool x1336 = x1335 >= 3; bool x1337; if (x1336) { x1337 = x1336; } else { x1337 = false; } int32_t x1342 = x1335 - 3; int32_t x1343 = x1342 / 1; int32_t x1344 = x1343 + 1; int32_t x1348 = 4096 * x1344; int32_t x1349 = x1348 * x1344; int32_t x1345 = x1344 * x1344; int32_t x1346 = 64 * x1345; int32_t x1347 = 64 * x1346; bool x1368 = x1344 >= 1; bool x1369; if (x1368) { x1369 = x1368; } else { x1369 = false; } int32_t x1374 = x1343 / 1; int32_t x1375 = x1374 + 1; int32_t x1379 = 16384 * x1375; int32_t x1380 = x1379 * x1375; int32_t x1376 = x1375 * x1375; int32_t x1377 = 256 * x1376; int32_t x1378 = 64 * x1377; bool x1393 = x1248 == 1; bool x1394 = x1248 == x1375; bool x1395 = x1393 || x1394; bool x1396; if (x1395) { x1396 = x1395; } else { x1396 = false; } bool x1412 = x1375 >= 1; bool x1413; if (x1412) { x1413 = x1412; } else { x1413 = false; } int32_t x1418 = x1374 / 1; int32_t x1419 = x1418 + 1; int32_t x1423 = 4096 * x1419; int32_t x1424 = x1423 * x1419; int32_t x1420 = x1419 * x1419; int32_t x1421 = 64 * x1420; int32_t x1422 = 64 * x1421; int32_t x1443 = x1419 + 2; bool x1444 = x1443 >= 3; bool x1445; if (x1444) { x1445 = x1444; } else { x1445 = false; } int32_t x1450 = x1443 - 3; int32_t x1451 = x1450 / 1; int32_t x1452 = x1451 + 1; int32_t x1456 = 4096 * x1452; int32_t x1457 = x1456 * x1452; int32_t x1453 = x1452 * x1452; int32_t x1454 = 64 * x1453; int32_t x1455 = 64 * x1454; bool x1476 = x1452 >= 1; bool x1477; if (x1476) { x1477 = x1476; } else { x1477 = false; } int32_t x1482 = x1451 / 1; int32_t x1483 = x1482 + 1; int32_t x1487 = 16384 * x1483; int32_t x1488 = x1487 * x1483; int32_t x1484 = x1483 * x1483; int32_t x1485 = 256 * x1484; int32_t x1486 = 64 * x1485; bool x1501 = x1375 == 1; bool x1502 = x1375 == x1483; bool x1503 = x1501 || x1502; bool x1504; if (x1503) { x1504 = x1503; } else { x1504 = false; } bool x1520 = x1483 >= 1; bool x1521; if (x1520) { x1521 = x1520; } else { x1521 = false; } int32_t x1526 = x1482 / 1; int32_t x1527 = x1526 + 1; int32_t x1531 = 8192 * x1527; int32_t x1532 = x1531 * x1527; int32_t x1528 = x1527 * x1527; int32_t x1529 = 128 * x1528; int32_t x1530 = 64 * x1529; int32_t x1551 = x1527 + 2; bool x1552 = x1551 >= 3; bool x1553; if (x1552) { x1553 = x1552; } else { x1553 = false; } int32_t x1558 = x1551 - 3; int32_t x1559 = x1558 / 2; int32_t x1560 = x1559 + 1; int32_t x1564 = 8192 * x1560; int32_t x1565 = x1564 * x1560; int32_t x1561 = x1560 * x1560; int32_t x1562 = 128 * x1561; int32_t x1563 = 64 * x1562; bool x1584 = x1560 >= 1; bool x1585; if (x1584) { x1585 = x1584; } else { x1585 = false; } int32_t x1590 = x1559 / 1; int32_t x1591 = x1590 + 1; int32_t x1595 = 32768 * x1591; int32_t x1596 = x1595 * x1591; int32_t x1592 = x1591 * x1591; int32_t x1593 = 512 * x1592; int32_t x1594 = 64 * x1593; int32_t x1612 = x1482 / 2; int32_t x1613 = x1612 + 1; int32_t x1617 = 32768 * x1613; int32_t x1618 = x1617 * x1613; int32_t x1614 = x1613 * x1613; int32_t x1615 = 512 * x1614; int32_t x1616 = 64 * x1615; bool x1631 = x1613 == 1; bool x1632 = x1613 == x1591; bool x1633 = x1631 || x1632; bool x1634; if (x1633) { x1634 = x1633; } else { x1634 = false; } bool x1650 = x1591 >= 1; bool x1651; if (x1650) { x1651 = x1650; } else { x1651 = false; } int32_t x1656 = x1590 / 1; int32_t x1657 = x1656 + 1; int32_t x1661 = 8192 * x1657; int32_t x1662 = x1661 * x1657; int32_t x1658 = x1657 * x1657; int32_t x1659 = 128 * x1658; int32_t x1660 = 64 * x1659; int32_t x1681 = x1657 + 2; bool x1682 = x1681 >= 3; bool x1683; if (x1682) { x1683 = x1682; } else { x1683 = false; } int32_t x1688 = x1681 - 3; int32_t x1689 = x1688 / 1; int32_t x1690 = x1689 + 1; int32_t x1694 = 8192 * x1690; int32_t x1695 = x1694 * x1690; int32_t x1691 = x1690 * x1690; int32_t x1692 = 128 * x1691; int32_t x1693 = 64 * x1692; bool x1714 = x1690 >= 1; bool x1715; if (x1714) { x1715 = x1714; } else { x1715 = false; } int32_t x1720 = x1689 / 1; int32_t x1721 = x1720 + 1; int32_t x1725 = 32768 * x1721; int32_t x1726 = x1725 * x1721; int32_t x1722 = x1721 * x1721; int32_t x1723 = 512 * x1722; int32_t x1724 = 64 * x1723; bool x1739 = x1591 == 1; bool x1740 = x1591 == x1721; bool x1741 = x1739 || x1740; bool x1742; if (x1741) { x1742 = x1741; } else { x1742 = false; } bool x1758 = x1721 >= 1; bool x1759; if (x1758) { x1759 = x1758; } else { x1759 = false; } int32_t x1764 = x1720 / 1; int32_t x1765 = x1764 + 1; int32_t x1769 = 8192 * x1765; int32_t x1770 = x1769 * x1765; int32_t x1766 = x1765 * x1765; int32_t x1767 = 128 * x1766; int32_t x1768 = 64 * x1767; int32_t x1789 = x1765 + 2; bool x1790 = x1789 >= 3; bool x1791; if (x1790) { x1791 = x1790; } else { x1791 = false; } int32_t x1796 = x1789 - 3; int32_t x1797 = x1796 / 1; int32_t x1798 = x1797 + 1; int32_t x1802 = 8192 * x1798; int32_t x1803 = x1802 * x1798; int32_t x1799 = x1798 * x1798; int32_t x1800 = 128 * x1799; int32_t x1801 = 64 * x1800; bool x1822 = x1798 >= 1; bool x1823; if (x1822) { x1823 = x1822; } else { x1823 = false; } int32_t x1828 = x1797 / 1; int32_t x1829 = x1828 + 1; int32_t x1833 = 32768 * x1829; int32_t x1834 = x1833 * x1829; int32_t x1830 = x1829 * x1829; int32_t x1831 = 512 * x1830; int32_t x1832 = 64 * x1831; bool x1847 = x1721 == 1; bool x1848 = x1721 == x1829; bool x1849 = x1847 || x1848; bool x1850; if (x1849) { x1850 = x1849; } else { x1850 = false; } bool x1866 = x1829 >= 1; bool x1867; if (x1866) { x1867 = x1866; } else { x1867 = false; } int32_t x1872 = x1828 / 1; int32_t x1873 = x1872 + 1; int32_t x1877 = 8192 * x1873; int32_t x1878 = x1877 * x1873; int32_t x1874 = x1873 * x1873; int32_t x1875 = 128 * x1874; int32_t x1876 = 64 * x1875; int32_t x1897 = x1873 + 2; bool x1898 = x1897 >= 3; bool x1899; if (x1898) { x1899 = x1898; } else { x1899 = false; } int32_t x1904 = x1897 - 3; int32_t x1905 = x1904 / 1; int32_t x1906 = x1905 + 1; int32_t x1910 = 8192 * x1906; int32_t x1911 = x1910 * x1906; int32_t x1907 = x1906 * x1906; int32_t x1908 = 128 * x1907; int32_t x1909 = 64 * x1908; bool x1930 = x1906 >= 1; bool x1931; if (x1930) { x1931 = x1930; } else { x1931 = false; } int32_t x1936 = x1905 / 1; int32_t x1937 = x1936 + 1; int32_t x1941 = 32768 * x1937; int32_t x1942 = x1941 * x1937; int32_t x1938 = x1937 * x1937; int32_t x1939 = 512 * x1938; int32_t x1940 = 64 * x1939; bool x1955 = x1829 == 1; bool x1956 = x1829 == x1937; bool x1957 = x1955 || x1956; bool x1958; if (x1957) { x1958 = x1957; } else { x1958 = false; } bool x1974 = x1937 >= 1; bool x1975; if (x1974) { x1975 = x1974; } else { x1975 = false; } int32_t x1980 = x1936 / 1; int32_t x1981 = x1980 + 1; int32_t x1985 = 16384 * x1981; int32_t x1986 = x1985 * x1981; int32_t x1982 = x1981 * x1981; int32_t x1983 = 256 * x1982; int32_t x1984 = 64 * x1983; int32_t x2005 = x1981 + 2; bool x2006 = x2005 >= 3; bool x2007; if (x2006) { x2007 = x2006; } else { x2007 = false; } int32_t x2012 = x2005 - 3; int32_t x2013 = x2012 / 2; int32_t x2014 = x2013 + 1; int32_t x2018 = 16384 * x2014; int32_t x2019 = x2018 * x2014; int32_t x2015 = x2014 * x2014; int32_t x2016 = 256 * x2015; int32_t x2017 = 64 * x2016; bool x2038 = x2014 >= 1; bool x2039; if (x2038) { x2039 = x2038; } else { x2039 = false; } int32_t x2044 = x2013 / 1; int32_t x2045 = x2044 + 1; int32_t x2049 = 65536 * x2045; int32_t x2050 = x2049 * x2045; int32_t x2046 = x2045 * x2045; int32_t x2047 = 1024 * x2046; int32_t x2048 = 64 * x2047; int32_t x2066 = x1936 / 2; int32_t x2067 = x2066 + 1; int32_t x2071 = 65536 * x2067; int32_t x2072 = x2071 * x2067; int32_t x2068 = x2067 * x2067; int32_t x2069 = 1024 * x2068; int32_t x2070 = 64 * x2069; bool x2085 = x2067 == 1; bool x2086 = x2067 == x2045; bool x2087 = x2085 || x2086; bool x2088; if (x2087) { x2088 = x2087; } else { x2088 = false; } bool x2104 = x2045 >= 1; bool x2105; if (x2104) { x2105 = x2104; } else { x2105 = false; } int32_t x2110 = x2044 / 1; int32_t x2111 = x2110 + 1; int32_t x2115 = 16384 * x2111; int32_t x2116 = x2115 * x2111; int32_t x2112 = x2111 * x2111; int32_t x2113 = 256 * x2112; int32_t x2114 = 64 * x2113; int32_t x2135 = x2111 + 2; bool x2136 = x2135 >= 3; bool x2137; if (x2136) { x2137 = x2136; } else { x2137 = false; } int32_t x2142 = x2135 - 3; int32_t x2143 = x2142 / 1; int32_t x2144 = x2143 + 1; int32_t x2148 = 16384 * x2144; int32_t x2149 = x2148 * x2144; int32_t x2145 = x2144 * x2144; int32_t x2146 = 256 * x2145; int32_t x2147 = 64 * x2146; bool x2168 = x2144 >= 1; bool x2169; if (x2168) { x2169 = x2168; } else { x2169 = false; } int32_t x2174 = x2143 / 1; int32_t x2175 = x2174 + 1; int32_t x2179 = 65536 * x2175; int32_t x2180 = x2179 * x2175; int32_t x2176 = x2175 * x2175; int32_t x2177 = 1024 * x2176; int32_t x2178 = 64 * x2177; bool x2193 = x2045 == 1; bool x2194 = x2045 == x2175; bool x2195 = x2193 || x2194; bool x2196; if (x2195) { x2196 = x2195; } else { x2196 = false; } bool x2212 = x2175 >= 1; bool x2213; if (x2212) { x2213 = x2212; } else { x2213 = false; } int32_t x2218 = x2174 / 1; int32_t x2219 = x2218 + 1; int32_t x2223 = 16384 * x2219; int32_t x2224 = x2223 * x2219; int32_t x2220 = x2219 * x2219; int32_t x2221 = 256 * x2220; int32_t x2222 = 64 * x2221; int32_t x2243 = x2219 + 2; bool x2244 = x2243 >= 3; bool x2245; if (x2244) { x2245 = x2244; } else { x2245 = false; } int32_t x2250 = x2243 - 3; int32_t x2251 = x2250 / 1; int32_t x2252 = x2251 + 1; int32_t x2256 = 16384 * x2252; int32_t x2257 = x2256 * x2252; int32_t x2253 = x2252 * x2252; int32_t x2254 = 256 * x2253; int32_t x2255 = 64 * x2254; bool x2276 = x2252 >= 1; bool x2277; if (x2276) { x2277 = x2276; } else { x2277 = false; } int32_t x2282 = x2251 / 1; int32_t x2283 = x2282 + 1; int32_t x2287 = 65536 * x2283; int32_t x2288 = x2287 * x2283; int32_t x2284 = x2283 * x2283; int32_t x2285 = 1024 * x2284; int32_t x2286 = 64 * x2285; bool x2301 = x2175 == 1; bool x2302 = x2175 == x2283; bool x2303 = x2301 || x2302; bool x2304; if (x2303) { x2304 = x2303; } else { x2304 = false; } bool x2320 = x2283 >= 1; bool x2321; if (x2320) { x2321 = x2320; } else { x2321 = false; } int32_t x2326 = x2282 / 1; int32_t x2327 = x2326 + 1; int32_t x2331 = 16384 * x2327; int32_t x2332 = x2331 * x2327; int32_t x2328 = x2327 * x2327; int32_t x2329 = 256 * x2328; int32_t x2330 = 64 * x2329; int32_t x2351 = x2327 + 2; bool x2352 = x2351 >= 3; bool x2353; if (x2352) { x2353 = x2352; } else { x2353 = false; } int32_t x2358 = x2351 - 3; int32_t x2359 = x2358 / 1; int32_t x2360 = x2359 + 1; int32_t x2364 = 16384 * x2360; int32_t x2365 = x2364 * x2360; int32_t x2361 = x2360 * x2360; int32_t x2362 = 256 * x2361; int32_t x2363 = 64 * x2362; bool x2384 = x2360 >= 1; bool x2385; if (x2384) { x2385 = x2384; } else { x2385 = false; } int32_t x2390 = x2359 / 1; int32_t x2391 = x2390 + 1; int32_t x2395 = 65536 * x2391; int32_t x2396 = x2395 * x2391; int32_t x2392 = x2391 * x2391; int32_t x2393 = 1024 * x2392; int32_t x2394 = 64 * x2393; bool x2409 = x2283 == 1; bool x2410 = x2283 == x2391; bool x2411 = x2409 || x2410; bool x2412; if (x2411) { x2412 = x2411; } else { x2412 = false; } bool x2428 = x2391 >= 1; bool x2429; if (x2428) { x2429 = x2428; } else { x2429 = false; } int32_t x2434 = x2390 / 1; int32_t x2435 = x2434 + 1; int32_t x2439 = 16384 * x2435; int32_t x2440 = x2439 * x2435; int32_t x2436 = x2435 * x2435; int32_t x2437 = 256 * x2436; int32_t x2438 = 64 * x2437; int32_t x2459 = x2435 + 2; bool x2460 = x2459 >= 3; bool x2461; if (x2460) { x2461 = x2460; } else { x2461 = false; } int32_t x2466 = x2459 - 3; int32_t x2467 = x2466 / 1; int32_t x2468 = x2467 + 1; int32_t x2472 = 16384 * x2468; int32_t x2473 = x2472 * x2468; int32_t x2469 = x2468 * x2468; int32_t x2470 = 256 * x2469; int32_t x2471 = 64 * x2470; bool x2492 = x2468 >= 1; bool x2493; if (x2492) { x2493 = x2492; } else { x2493 = false; } int32_t x2498 = x2467 / 1; int32_t x2499 = x2498 + 1; int32_t x2503 = 65536 * x2499; int32_t x2504 = x2503 * x2499; int32_t x2500 = x2499 * x2499; int32_t x2501 = 1024 * x2500; int32_t x2502 = 64 * x2501; bool x2517 = x2391 == 1; bool x2518 = x2391 == x2499; bool x2519 = x2517 || x2518; bool x2520; if (x2519) { x2520 = x2519; } else { x2520 = false; } bool x2536 = x2499 >= 1; bool x2537; if (x2536) { x2537 = x2536; } else { x2537 = false; } int32_t x2542 = x2498 / 1; int32_t x2543 = x2542 + 1; int32_t x2547 = 16384 * x2543; int32_t x2548 = x2547 * x2543; int32_t x2544 = x2543 * x2543; int32_t x2545 = 256 * x2544; int32_t x2546 = 64 * x2545; int32_t x2567 = x2543 + 2; bool x2568 = x2567 >= 3; bool x2569; if (x2568) { x2569 = x2568; } else { x2569 = false; } int32_t x2574 = x2567 - 3; int32_t x2575 = x2574 / 1; int32_t x2576 = x2575 + 1; int32_t x2580 = 16384 * x2576; int32_t x2581 = x2580 * x2576; int32_t x2577 = x2576 * x2576; int32_t x2578 = 256 * x2577; int32_t x2579 = 64 * x2578; bool x2600 = x2576 >= 1; bool x2601; if (x2600) { x2601 = x2600; } else { x2601 = false; } int32_t x2606 = x2575 / 1; int32_t x2607 = x2606 + 1; int32_t x2611 = 65536 * x2607; int32_t x2612 = x2611 * x2607; int32_t x2608 = x2607 * x2607; int32_t x2609 = 1024 * x2608; int32_t x2610 = 64 * x2609; bool x2625 = x2499 == 1; bool x2626 = x2499 == x2607; bool x2627 = x2625 || x2626; bool x2628; if (x2627) { x2628 = x2627; } else { x2628 = false; } bool x2644 = x2607 >= 1; bool x2645; if (x2644) { x2645 = x2644; } else { x2645 = false; } int32_t x2650 = x2606 / 1; int32_t x2651 = x2650 + 1; int32_t x2655 = 32768 * x2651; int32_t x2656 = x2655 * x2651; int32_t x2652 = x2651 * x2651; int32_t x2653 = 512 * x2652; int32_t x2654 = 64 * x2653; int32_t x2675 = x2651 + 2; bool x2676 = x2675 >= 3; bool x2677; if (x2676) { x2677 = x2676; } else { x2677 = false; } int32_t x2682 = x2675 - 3; int32_t x2683 = x2682 / 2; int32_t x2684 = x2683 + 1; int32_t x2688 = 32768 * x2684; int32_t x2689 = x2688 * x2684; int32_t x2685 = x2684 * x2684; int32_t x2686 = 512 * x2685; int32_t x2687 = 64 * x2686; bool x2708 = x2684 >= 1; bool x2709; if (x2708) { x2709 = x2708; } else { x2709 = false; } int32_t x2714 = x2683 / 1; int32_t x2715 = x2714 + 1; int32_t x2719 = 131072 * x2715; int32_t x2720 = x2719 * x2715; int32_t x2716 = x2715 * x2715; int32_t x2717 = 2048 * x2716; int32_t x2718 = 64 * x2717; int32_t x2736 = x2606 / 2; int32_t x2737 = x2736 + 1; int32_t x2741 = 131072 * x2737; int32_t x2742 = x2741 * x2737; int32_t x2738 = x2737 * x2737; int32_t x2739 = 2048 * x2738; int32_t x2740 = 64 * x2739; bool x2755 = x2737 == 1; bool x2756 = x2737 == x2715; bool x2757 = x2755 || x2756; bool x2758; if (x2757) { x2758 = x2757; } else { x2758 = false; } bool x2774 = x2715 >= 1; bool x2775; if (x2774) { x2775 = x2774; } else { x2775 = false; } int32_t x2780 = x2714 / 1; int32_t x2781 = x2780 + 1; int32_t x2785 = 32768 * x2781; int32_t x2786 = x2785 * x2781; int32_t x2782 = x2781 * x2781; int32_t x2783 = 512 * x2782; int32_t x2784 = 64 * x2783; int32_t x2805 = x2781 + 2; bool x2806 = x2805 >= 3; bool x2807; if (x2806) { x2807 = x2806; } else { x2807 = false; } int32_t x2812 = x2805 - 3; int32_t x2813 = x2812 / 1; int32_t x2814 = x2813 + 1; int32_t x2818 = 32768 * x2814; int32_t x2819 = x2818 * x2814; int32_t x2815 = x2814 * x2814; int32_t x2816 = 512 * x2815; int32_t x2817 = 64 * x2816; bool x2838 = x2814 >= 1; bool x2839; if (x2838) { x2839 = x2838; } else { x2839 = false; } int32_t x2844 = x2813 / 1; int32_t x2845 = x2844 + 1; int32_t x2849 = 131072 * x2845; int32_t x2850 = x2849 * x2845; int32_t x2846 = x2845 * x2845; int32_t x2847 = 2048 * x2846; int32_t x2848 = 64 * x2847; bool x2863 = x2715 == 1; bool x2864 = x2715 == x2845; bool x2865 = x2863 || x2864; bool x2866; if (x2865) { x2866 = x2865; } else { x2866 = false; } bool x2882 = x2845 >= 1; bool x2883; if (x2882) { x2883 = x2882; } else { x2883 = false; } int32_t x2888 = x2844 / 1; int32_t x2889 = x2888 + 1; int32_t x2893 = 32768 * x2889; int32_t x2894 = x2893 * x2889; int32_t x2890 = x2889 * x2889; int32_t x2891 = 512 * x2890; int32_t x2892 = 64 * x2891; int32_t x2913 = x2889 + 2; bool x2914 = x2913 >= 3; bool x2915; if (x2914) { x2915 = x2914; } else { x2915 = false; } int32_t x2920 = x2913 - 3; int32_t x2921 = x2920 / 1; int32_t x2922 = x2921 + 1; int32_t x2926 = 32768 * x2922; int32_t x2927 = x2926 * x2922; int32_t x2923 = x2922 * x2922; int32_t x2924 = 512 * x2923; int32_t x2925 = 64 * x2924; bool x2946 = x2922 >= 1; bool x2947; if (x2946) { x2947 = x2946; } else { x2947 = false; } int32_t x2952 = x2921 / 1; int32_t x2953 = x2952 + 1; int32_t x2957 = 131072 * x2953; int32_t x2958 = x2957 * x2953; int32_t x2954 = x2953 * x2953; int32_t x2955 = 2048 * x2954; int32_t x2956 = 64 * x2955; bool x2971 = x2845 == 1; bool x2972 = x2845 == x2953; bool x2973 = x2971 || x2972; bool x2974; if (x2973) { x2974 = x2973; } else { x2974 = false; } bool x2990 = x2953 >= 2; bool x2991; if (x2990) { x2991 = x2990; } else { x2991 = false; } int32_t x3000 = x2953 - 2; int32_t x3001 = x3000 / 1; int32_t x3002 = x3001 + 1; int32_t x3006 = 131072 * x3002; int32_t x3007 = x3006 * x3002; int32_t x3003 = x3002 * x3002; int32_t x3004 = 2048 * x3003; int32_t x3005 = 64 * x3004; for(int x1106=0; x1106 < x1104; x1106++) { int32_t x1107 = x1106 * 64; int32_t x1108 = x1107 * 3072; float* x1109 = x1082+x1108; int* x1110 = x1083+x1107; printf("input (size Const(64) x Const(3) x Const(32) x Const(32))\n"); float x1112 = 0.0f; for(int x1114=0; x1114 < 196608; x1114++) { float x1115 = x1112; float x1117 = x1109[x1114]; float x1116 = fabs(x1115); float x1118 = fabs(x1117); bool x1119 = x1116 > x1118; float x1122; if (x1119) { x1122 = x1115; } else { float x1120 = x1109[x1114]; x1122 = x1120; } x1112 = x1122; } float x1126 = x1112; printf("Max Abs: %.5f || ",x1126); for(int x1129=0; x1129 < 10; x1129++) { float x1130 = x1109[x1129]; printf("%.5f ",x1130); } printf("\n"); // Tensor 'toGPU' invocation. float* x1136 = (float*)myGpuMalloc(196608 * sizeof(float)); CUDA_CALL(cudaMemcpy(x1136, x1109, 196608 * sizeof(float), cudaMemcpyHostToDevice)); float* x1145 = (float*)myGpuMalloc(x1144 * sizeof(float)); float* x1146 = (float*)myMalloc(1 * sizeof(float));; x1146[0] = 0.0f; float* x1148 = (float*)myMalloc(1 * sizeof(float));; x1148[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 3, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1139, x1139)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1148, in_desc, x1136, filt_desc, x714, conv_desc, algo, ws_data, ws_size, x1146, out_desc, x1145)); }; float* x1151 = (float*)myGpuMalloc(x1142 * sizeof(float)); float* x1152 = (float*)myMalloc(1 * sizeof(float));; x1152[0] = 0.0f; float* x1154 = (float*)myMalloc(1 * sizeof(float));; x1154[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1139, x1139)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1139, x1139)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1154, x1154, in_desc, x1145, out_desc, x1151, sbmv_desc, x876, x1011, x378, x588, 1.0E-5)); }; float* x1157 = (float*)myMalloc(1 * sizeof(float));; x1157[0] = 0.0f; float* x1159 = (float*)myMalloc(1 * sizeof(float));; x1159[0] = 1.0f; float* x1161 = (float*)myGpuMalloc(x1142 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1139, x1139)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1159, x_desc, x1151, x1157, x_desc, x1161)); }; float* x1163 = (float*)myMalloc(1 * sizeof(float));; x1163[0] = 0.0f; float* x1165 = (float*)myMalloc(1 * sizeof(float));; x1165[0] = 1.0f; float* x1175 = (float*)myGpuMalloc(x1174 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1139, x1139) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1169, x1169)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x1165, in_desc, x1161, x1163, out_desc, x1175)); }; if (x1178) { } else { assert(false && "ERROR not specified"); } float* x1190 = (float*)myGpuMalloc(x1189 * sizeof(float)); float* x1191 = (float*)myMalloc(1 * sizeof(float));; x1191[0] = 0.0f; float* x1193 = (float*)myMalloc(1 * sizeof(float));; x1193[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1169, x1169)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1184, x1184)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1193, in_desc, x1175, filt_desc, x957, conv_desc, algo, ws_data, ws_size, x1191, out_desc, x1190)); }; float* x1196 = (float*)myGpuMalloc(x1187 * sizeof(float)); float* x1197 = (float*)myMalloc(1 * sizeof(float));; x1197[0] = 0.0f; float* x1199 = (float*)myMalloc(1 * sizeof(float));; x1199[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1184, x1184)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1184, x1184)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1199, x1199, in_desc, x1190, out_desc, x1196, sbmv_desc, x336, x417, x600, x411, 1.0E-5)); }; float* x1202 = (float*)myMalloc(1 * sizeof(float));; x1202[0] = 0.0f; float* x1204 = (float*)myMalloc(1 * sizeof(float));; x1204[0] = 1.0f; float* x1206 = (float*)myGpuMalloc(x1187 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1184, x1184)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1204, x_desc, x1196, x1202, x_desc, x1206)); }; if (x1210) { } else { assert(false && "ERROR not specified"); } float* x1223 = (float*)myGpuMalloc(x1222 * sizeof(float)); float* x1224 = (float*)myMalloc(1 * sizeof(float));; x1224[0] = 0.0f; float* x1226 = (float*)myMalloc(1 * sizeof(float));; x1226[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1184, x1184)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1217, x1217)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1226, in_desc, x1206, filt_desc, x528, conv_desc, algo, ws_data, ws_size, x1224, out_desc, x1223)); }; float* x1229 = (float*)myGpuMalloc(x1220 * sizeof(float)); float* x1230 = (float*)myMalloc(1 * sizeof(float));; x1230[0] = 0.0f; float* x1232 = (float*)myMalloc(1 * sizeof(float));; x1232[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1217, x1217)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1217, x1217)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1232, x1232, in_desc, x1223, out_desc, x1229, sbmv_desc, x750, x405, x573, x732, 1.0E-5)); }; float* x1235 = (float*)myMalloc(1 * sizeof(float));; x1235[0] = 0.0f; float* x1237 = (float*)myMalloc(1 * sizeof(float));; x1237[0] = 1.0f; float* x1239 = (float*)myGpuMalloc(x1220 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1217, x1217)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1237, x_desc, x1229, x1235, x_desc, x1239)); }; if (x1242) { } else { assert(false && "ERROR not specified"); } float* x1254 = (float*)myGpuMalloc(x1253 * sizeof(float)); float* x1255 = (float*)myMalloc(1 * sizeof(float));; x1255[0] = 0.0f; float* x1257 = (float*)myMalloc(1 * sizeof(float));; x1257[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1217, x1217)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1257, in_desc, x1239, filt_desc, x354, conv_desc, algo, ws_data, ws_size, x1255, out_desc, x1254)); }; float* x1260 = (float*)myGpuMalloc(x1251 * sizeof(float)); float* x1261 = (float*)myMalloc(1 * sizeof(float));; x1261[0] = 0.0f; float* x1263 = (float*)myMalloc(1 * sizeof(float));; x1263[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1263, x1263, in_desc, x1254, out_desc, x1260, sbmv_desc, x855, x636, x471, x366, 1.0E-5)); }; if (x1178) { } else { assert(false && "ERROR not specified"); } float* x1273 = (float*)myGpuMalloc(x1272 * sizeof(float)); float* x1274 = (float*)myMalloc(1 * sizeof(float));; x1274[0] = 0.0f; float* x1276 = (float*)myMalloc(1 * sizeof(float));; x1276[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1169, x1169)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1184, x1184)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1276, in_desc, x1175, filt_desc, x744, conv_desc, algo, ws_data, ws_size, x1274, out_desc, x1273)); }; float* x1279 = (float*)myGpuMalloc(x1270 * sizeof(float)); float* x1280 = (float*)myMalloc(1 * sizeof(float));; x1280[0] = 0.0f; float* x1282 = (float*)myMalloc(1 * sizeof(float));; x1282[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1184, x1184)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1184, x1184)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1282, x1282, in_desc, x1273, out_desc, x1279, sbmv_desc, x486, x867, x1050, x987, 1.0E-5)); }; if (x1288) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1184) x Sym(1184), res: x Const(64) x Const(256) x Sym(1248) x Sym(1248)"); } float* x1293 = (float*)myMalloc(1 * sizeof(float));; x1293[0] = 1.0f; float* x1295 = (float*)myMalloc(1 * sizeof(float));; x1295[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1184, x1184)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1293, bias_desc, x1279, x1295, out_desc, x1260)); }; float* x1298 = (float*)myMalloc(1 * sizeof(float));; x1298[0] = 0.0f; float* x1300 = (float*)myMalloc(1 * sizeof(float));; x1300[0] = 1.0f; float* x1302 = (float*)myGpuMalloc(x1251 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1300, x_desc, x1260, x1298, x_desc, x1302)); }; if (x1305) { } else { assert(false && "ERROR not specified"); } float* x1317 = (float*)myGpuMalloc(x1316 * sizeof(float)); float* x1318 = (float*)myMalloc(1 * sizeof(float));; x1318[0] = 0.0f; float* x1320 = (float*)myMalloc(1 * sizeof(float));; x1320[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1311, x1311)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1320, in_desc, x1302, filt_desc, x771, conv_desc, algo, ws_data, ws_size, x1318, out_desc, x1317)); }; float* x1323 = (float*)myGpuMalloc(x1314 * sizeof(float)); float* x1324 = (float*)myMalloc(1 * sizeof(float));; x1324[0] = 0.0f; float* x1326 = (float*)myMalloc(1 * sizeof(float));; x1326[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1311, x1311)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1311, x1311)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1326, x1326, in_desc, x1317, out_desc, x1323, sbmv_desc, x684, x438, x288, x564, 1.0E-5)); }; float* x1329 = (float*)myMalloc(1 * sizeof(float));; x1329[0] = 0.0f; float* x1331 = (float*)myMalloc(1 * sizeof(float));; x1331[0] = 1.0f; float* x1333 = (float*)myGpuMalloc(x1314 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1311, x1311)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1331, x_desc, x1323, x1329, x_desc, x1333)); }; if (x1337) { } else { assert(false && "ERROR not specified"); } float* x1350 = (float*)myGpuMalloc(x1349 * sizeof(float)); float* x1351 = (float*)myMalloc(1 * sizeof(float));; x1351[0] = 0.0f; float* x1353 = (float*)myMalloc(1 * sizeof(float));; x1353[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1311, x1311)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1344, x1344)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1353, in_desc, x1333, filt_desc, x507, conv_desc, algo, ws_data, ws_size, x1351, out_desc, x1350)); }; float* x1356 = (float*)myGpuMalloc(x1347 * sizeof(float)); float* x1357 = (float*)myMalloc(1 * sizeof(float));; x1357[0] = 0.0f; float* x1359 = (float*)myMalloc(1 * sizeof(float));; x1359[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1344, x1344)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1344, x1344)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1359, x1359, in_desc, x1350, out_desc, x1356, sbmv_desc, x882, x717, x390, x990, 1.0E-5)); }; float* x1362 = (float*)myMalloc(1 * sizeof(float));; x1362[0] = 0.0f; float* x1364 = (float*)myMalloc(1 * sizeof(float));; x1364[0] = 1.0f; float* x1366 = (float*)myGpuMalloc(x1347 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1344, x1344)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1364, x_desc, x1356, x1362, x_desc, x1366)); }; if (x1369) { } else { assert(false && "ERROR not specified"); } float* x1381 = (float*)myGpuMalloc(x1380 * sizeof(float)); float* x1382 = (float*)myMalloc(1 * sizeof(float));; x1382[0] = 0.0f; float* x1384 = (float*)myMalloc(1 * sizeof(float));; x1384[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1344, x1344)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1384, in_desc, x1366, filt_desc, x648, conv_desc, algo, ws_data, ws_size, x1382, out_desc, x1381)); }; float* x1387 = (float*)myGpuMalloc(x1378 * sizeof(float)); float* x1388 = (float*)myMalloc(1 * sizeof(float));; x1388[0] = 0.0f; float* x1390 = (float*)myMalloc(1 * sizeof(float));; x1390[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1390, x1390, in_desc, x1381, out_desc, x1387, sbmv_desc, x432, x279, x531, x756, 1.0E-5)); }; if (x1396) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1248) x Sym(1248), res: x Const(64) x Const(256) x Sym(1375) x Sym(1375)"); } float* x1401 = (float*)myMalloc(1 * sizeof(float));; x1401[0] = 1.0f; float* x1403 = (float*)myMalloc(1 * sizeof(float));; x1403[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1248, x1248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1401, bias_desc, x1302, x1403, out_desc, x1387)); }; float* x1406 = (float*)myMalloc(1 * sizeof(float));; x1406[0] = 0.0f; float* x1408 = (float*)myMalloc(1 * sizeof(float));; x1408[0] = 1.0f; float* x1410 = (float*)myGpuMalloc(x1378 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1408, x_desc, x1387, x1406, x_desc, x1410)); }; if (x1413) { } else { assert(false && "ERROR not specified"); } float* x1425 = (float*)myGpuMalloc(x1424 * sizeof(float)); float* x1426 = (float*)myMalloc(1 * sizeof(float));; x1426[0] = 0.0f; float* x1428 = (float*)myMalloc(1 * sizeof(float));; x1428[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1419, x1419)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1428, in_desc, x1410, filt_desc, x708, conv_desc, algo, ws_data, ws_size, x1426, out_desc, x1425)); }; float* x1431 = (float*)myGpuMalloc(x1422 * sizeof(float)); float* x1432 = (float*)myMalloc(1 * sizeof(float));; x1432[0] = 0.0f; float* x1434 = (float*)myMalloc(1 * sizeof(float));; x1434[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1419, x1419)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1419, x1419)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1434, x1434, in_desc, x1425, out_desc, x1431, sbmv_desc, x501, x330, x1029, x819, 1.0E-5)); }; float* x1437 = (float*)myMalloc(1 * sizeof(float));; x1437[0] = 0.0f; float* x1439 = (float*)myMalloc(1 * sizeof(float));; x1439[0] = 1.0f; float* x1441 = (float*)myGpuMalloc(x1422 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1419, x1419)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1439, x_desc, x1431, x1437, x_desc, x1441)); }; if (x1445) { } else { assert(false && "ERROR not specified"); } float* x1458 = (float*)myGpuMalloc(x1457 * sizeof(float)); float* x1459 = (float*)myMalloc(1 * sizeof(float));; x1459[0] = 0.0f; float* x1461 = (float*)myMalloc(1 * sizeof(float));; x1461[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1419, x1419)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1452, x1452)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1461, in_desc, x1441, filt_desc, x477, conv_desc, algo, ws_data, ws_size, x1459, out_desc, x1458)); }; float* x1464 = (float*)myGpuMalloc(x1455 * sizeof(float)); float* x1465 = (float*)myMalloc(1 * sizeof(float));; x1465[0] = 0.0f; float* x1467 = (float*)myMalloc(1 * sizeof(float));; x1467[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1452, x1452)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1452, x1452)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1467, x1467, in_desc, x1458, out_desc, x1464, sbmv_desc, x474, x663, x795, x612, 1.0E-5)); }; float* x1470 = (float*)myMalloc(1 * sizeof(float));; x1470[0] = 0.0f; float* x1472 = (float*)myMalloc(1 * sizeof(float));; x1472[0] = 1.0f; float* x1474 = (float*)myGpuMalloc(x1455 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1452, x1452)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1472, x_desc, x1464, x1470, x_desc, x1474)); }; if (x1477) { } else { assert(false && "ERROR not specified"); } float* x1489 = (float*)myGpuMalloc(x1488 * sizeof(float)); float* x1490 = (float*)myMalloc(1 * sizeof(float));; x1490[0] = 0.0f; float* x1492 = (float*)myMalloc(1 * sizeof(float));; x1492[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1452, x1452)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1492, in_desc, x1474, filt_desc, x519, conv_desc, algo, ws_data, ws_size, x1490, out_desc, x1489)); }; float* x1495 = (float*)myGpuMalloc(x1486 * sizeof(float)); float* x1496 = (float*)myMalloc(1 * sizeof(float));; x1496[0] = 0.0f; float* x1498 = (float*)myMalloc(1 * sizeof(float));; x1498[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1498, x1498, in_desc, x1489, out_desc, x1495, sbmv_desc, x369, x999, x810, x657, 1.0E-5)); }; if (x1504) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1375) x Sym(1375), res: x Const(64) x Const(256) x Sym(1483) x Sym(1483)"); } float* x1509 = (float*)myMalloc(1 * sizeof(float));; x1509[0] = 1.0f; float* x1511 = (float*)myMalloc(1 * sizeof(float));; x1511[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1375, x1375)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1509, bias_desc, x1410, x1511, out_desc, x1495)); }; float* x1514 = (float*)myMalloc(1 * sizeof(float));; x1514[0] = 0.0f; float* x1516 = (float*)myMalloc(1 * sizeof(float));; x1516[0] = 1.0f; float* x1518 = (float*)myGpuMalloc(x1486 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1516, x_desc, x1495, x1514, x_desc, x1518)); }; if (x1521) { } else { assert(false && "ERROR not specified"); } float* x1533 = (float*)myGpuMalloc(x1532 * sizeof(float)); float* x1534 = (float*)myMalloc(1 * sizeof(float));; x1534[0] = 0.0f; float* x1536 = (float*)myMalloc(1 * sizeof(float));; x1536[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1527, x1527)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1536, in_desc, x1518, filt_desc, x291, conv_desc, algo, ws_data, ws_size, x1534, out_desc, x1533)); }; float* x1539 = (float*)myGpuMalloc(x1530 * sizeof(float)); float* x1540 = (float*)myMalloc(1 * sizeof(float));; x1540[0] = 0.0f; float* x1542 = (float*)myMalloc(1 * sizeof(float));; x1542[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1527, x1527)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1527, x1527)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1542, x1542, in_desc, x1533, out_desc, x1539, sbmv_desc, x510, x774, x870, x660, 1.0E-5)); }; float* x1545 = (float*)myMalloc(1 * sizeof(float));; x1545[0] = 0.0f; float* x1547 = (float*)myMalloc(1 * sizeof(float));; x1547[0] = 1.0f; float* x1549 = (float*)myGpuMalloc(x1530 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1527, x1527)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1547, x_desc, x1539, x1545, x_desc, x1549)); }; if (x1553) { } else { assert(false && "ERROR not specified"); } float* x1566 = (float*)myGpuMalloc(x1565 * sizeof(float)); float* x1567 = (float*)myMalloc(1 * sizeof(float));; x1567[0] = 0.0f; float* x1569 = (float*)myMalloc(1 * sizeof(float));; x1569[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1527, x1527)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1560, x1560)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1569, in_desc, x1549, filt_desc, x339, conv_desc, algo, ws_data, ws_size, x1567, out_desc, x1566)); }; float* x1572 = (float*)myGpuMalloc(x1563 * sizeof(float)); float* x1573 = (float*)myMalloc(1 * sizeof(float));; x1573[0] = 0.0f; float* x1575 = (float*)myMalloc(1 * sizeof(float));; x1575[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1560, x1560)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1560, x1560)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1575, x1575, in_desc, x1566, out_desc, x1572, sbmv_desc, x1014, x828, x642, x387, 1.0E-5)); }; float* x1578 = (float*)myMalloc(1 * sizeof(float));; x1578[0] = 0.0f; float* x1580 = (float*)myMalloc(1 * sizeof(float));; x1580[0] = 1.0f; float* x1582 = (float*)myGpuMalloc(x1563 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1560, x1560)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1580, x_desc, x1572, x1578, x_desc, x1582)); }; if (x1585) { } else { assert(false && "ERROR not specified"); } float* x1597 = (float*)myGpuMalloc(x1596 * sizeof(float)); float* x1598 = (float*)myMalloc(1 * sizeof(float));; x1598[0] = 0.0f; float* x1600 = (float*)myMalloc(1 * sizeof(float));; x1600[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1560, x1560)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1600, in_desc, x1582, filt_desc, x576, conv_desc, algo, ws_data, ws_size, x1598, out_desc, x1597)); }; float* x1603 = (float*)myGpuMalloc(x1594 * sizeof(float)); float* x1604 = (float*)myMalloc(1 * sizeof(float));; x1604[0] = 0.0f; float* x1606 = (float*)myMalloc(1 * sizeof(float));; x1606[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1606, x1606, in_desc, x1597, out_desc, x1603, sbmv_desc, x693, x888, x705, x561, 1.0E-5)); }; if (x1521) { } else { assert(false && "ERROR not specified"); } float* x1619 = (float*)myGpuMalloc(x1618 * sizeof(float)); float* x1620 = (float*)myMalloc(1 * sizeof(float));; x1620[0] = 0.0f; float* x1622 = (float*)myMalloc(1 * sizeof(float));; x1622[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1483, x1483)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1613, x1613)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1622, in_desc, x1518, filt_desc, x1032, conv_desc, algo, ws_data, ws_size, x1620, out_desc, x1619)); }; float* x1625 = (float*)myGpuMalloc(x1616 * sizeof(float)); float* x1626 = (float*)myMalloc(1 * sizeof(float));; x1626[0] = 0.0f; float* x1628 = (float*)myMalloc(1 * sizeof(float));; x1628[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1613, x1613)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1613, x1613)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1628, x1628, in_desc, x1619, out_desc, x1625, sbmv_desc, x879, x615, x384, x327, 1.0E-5)); }; if (x1634) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1613) x Sym(1613), res: x Const(64) x Const(512) x Sym(1591) x Sym(1591)"); } float* x1639 = (float*)myMalloc(1 * sizeof(float));; x1639[0] = 1.0f; float* x1641 = (float*)myMalloc(1 * sizeof(float));; x1641[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1613, x1613)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1639, bias_desc, x1625, x1641, out_desc, x1603)); }; float* x1644 = (float*)myMalloc(1 * sizeof(float));; x1644[0] = 0.0f; float* x1646 = (float*)myMalloc(1 * sizeof(float));; x1646[0] = 1.0f; float* x1648 = (float*)myGpuMalloc(x1594 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1646, x_desc, x1603, x1644, x_desc, x1648)); }; if (x1651) { } else { assert(false && "ERROR not specified"); } float* x1663 = (float*)myGpuMalloc(x1662 * sizeof(float)); float* x1664 = (float*)myMalloc(1 * sizeof(float));; x1664[0] = 0.0f; float* x1666 = (float*)myMalloc(1 * sizeof(float));; x1666[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1657, x1657)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1666, in_desc, x1648, filt_desc, x1026, conv_desc, algo, ws_data, ws_size, x1664, out_desc, x1663)); }; float* x1669 = (float*)myGpuMalloc(x1660 * sizeof(float)); float* x1670 = (float*)myMalloc(1 * sizeof(float));; x1670[0] = 0.0f; float* x1672 = (float*)myMalloc(1 * sizeof(float));; x1672[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1657, x1657)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1657, x1657)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1672, x1672, in_desc, x1663, out_desc, x1669, sbmv_desc, x924, x309, x558, x789, 1.0E-5)); }; float* x1675 = (float*)myMalloc(1 * sizeof(float));; x1675[0] = 0.0f; float* x1677 = (float*)myMalloc(1 * sizeof(float));; x1677[0] = 1.0f; float* x1679 = (float*)myGpuMalloc(x1660 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1657, x1657)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1677, x_desc, x1669, x1675, x_desc, x1679)); }; if (x1683) { } else { assert(false && "ERROR not specified"); } float* x1696 = (float*)myGpuMalloc(x1695 * sizeof(float)); float* x1697 = (float*)myMalloc(1 * sizeof(float));; x1697[0] = 0.0f; float* x1699 = (float*)myMalloc(1 * sizeof(float));; x1699[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1657, x1657)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1690, x1690)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1699, in_desc, x1679, filt_desc, x963, conv_desc, algo, ws_data, ws_size, x1697, out_desc, x1696)); }; float* x1702 = (float*)myGpuMalloc(x1693 * sizeof(float)); float* x1703 = (float*)myMalloc(1 * sizeof(float));; x1703[0] = 0.0f; float* x1705 = (float*)myMalloc(1 * sizeof(float));; x1705[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1690, x1690)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1690, x1690)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1705, x1705, in_desc, x1696, out_desc, x1702, sbmv_desc, x282, x543, x363, x933, 1.0E-5)); }; float* x1708 = (float*)myMalloc(1 * sizeof(float));; x1708[0] = 0.0f; float* x1710 = (float*)myMalloc(1 * sizeof(float));; x1710[0] = 1.0f; float* x1712 = (float*)myGpuMalloc(x1693 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1690, x1690)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1710, x_desc, x1702, x1708, x_desc, x1712)); }; if (x1715) { } else { assert(false && "ERROR not specified"); } float* x1727 = (float*)myGpuMalloc(x1726 * sizeof(float)); float* x1728 = (float*)myMalloc(1 * sizeof(float));; x1728[0] = 0.0f; float* x1730 = (float*)myMalloc(1 * sizeof(float));; x1730[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1690, x1690)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1730, in_desc, x1712, filt_desc, x591, conv_desc, algo, ws_data, ws_size, x1728, out_desc, x1727)); }; float* x1733 = (float*)myGpuMalloc(x1724 * sizeof(float)); float* x1734 = (float*)myMalloc(1 * sizeof(float));; x1734[0] = 0.0f; float* x1736 = (float*)myMalloc(1 * sizeof(float));; x1736[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1736, x1736, in_desc, x1727, out_desc, x1733, sbmv_desc, x414, x996, x699, x522, 1.0E-5)); }; if (x1742) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1591) x Sym(1591), res: x Const(64) x Const(512) x Sym(1721) x Sym(1721)"); } float* x1747 = (float*)myMalloc(1 * sizeof(float));; x1747[0] = 1.0f; float* x1749 = (float*)myMalloc(1 * sizeof(float));; x1749[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1591, x1591)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1747, bias_desc, x1648, x1749, out_desc, x1733)); }; float* x1752 = (float*)myMalloc(1 * sizeof(float));; x1752[0] = 0.0f; float* x1754 = (float*)myMalloc(1 * sizeof(float));; x1754[0] = 1.0f; float* x1756 = (float*)myGpuMalloc(x1724 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1754, x_desc, x1733, x1752, x_desc, x1756)); }; if (x1759) { } else { assert(false && "ERROR not specified"); } float* x1771 = (float*)myGpuMalloc(x1770 * sizeof(float)); float* x1772 = (float*)myMalloc(1 * sizeof(float));; x1772[0] = 0.0f; float* x1774 = (float*)myMalloc(1 * sizeof(float));; x1774[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1765, x1765)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1774, in_desc, x1756, filt_desc, x846, conv_desc, algo, ws_data, ws_size, x1772, out_desc, x1771)); }; float* x1777 = (float*)myGpuMalloc(x1768 * sizeof(float)); float* x1778 = (float*)myMalloc(1 * sizeof(float));; x1778[0] = 0.0f; float* x1780 = (float*)myMalloc(1 * sizeof(float));; x1780[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1765, x1765)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1765, x1765)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1780, x1780, in_desc, x1771, out_desc, x1777, sbmv_desc, x393, x768, x594, x285, 1.0E-5)); }; float* x1783 = (float*)myMalloc(1 * sizeof(float));; x1783[0] = 0.0f; float* x1785 = (float*)myMalloc(1 * sizeof(float));; x1785[0] = 1.0f; float* x1787 = (float*)myGpuMalloc(x1768 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1765, x1765)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1785, x_desc, x1777, x1783, x_desc, x1787)); }; if (x1791) { } else { assert(false && "ERROR not specified"); } float* x1804 = (float*)myGpuMalloc(x1803 * sizeof(float)); float* x1805 = (float*)myMalloc(1 * sizeof(float));; x1805[0] = 0.0f; float* x1807 = (float*)myMalloc(1 * sizeof(float));; x1807[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1765, x1765)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1798, x1798)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1807, in_desc, x1787, filt_desc, x831, conv_desc, algo, ws_data, ws_size, x1805, out_desc, x1804)); }; float* x1810 = (float*)myGpuMalloc(x1801 * sizeof(float)); float* x1811 = (float*)myMalloc(1 * sizeof(float));; x1811[0] = 0.0f; float* x1813 = (float*)myMalloc(1 * sizeof(float));; x1813[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1798, x1798)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1798, x1798)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1813, x1813, in_desc, x1804, out_desc, x1810, sbmv_desc, x639, x441, x909, x1056, 1.0E-5)); }; float* x1816 = (float*)myMalloc(1 * sizeof(float));; x1816[0] = 0.0f; float* x1818 = (float*)myMalloc(1 * sizeof(float));; x1818[0] = 1.0f; float* x1820 = (float*)myGpuMalloc(x1801 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1798, x1798)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1818, x_desc, x1810, x1816, x_desc, x1820)); }; if (x1823) { } else { assert(false && "ERROR not specified"); } float* x1835 = (float*)myGpuMalloc(x1834 * sizeof(float)); float* x1836 = (float*)myMalloc(1 * sizeof(float));; x1836[0] = 0.0f; float* x1838 = (float*)myMalloc(1 * sizeof(float));; x1838[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1798, x1798)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1838, in_desc, x1820, filt_desc, x381, conv_desc, algo, ws_data, ws_size, x1836, out_desc, x1835)); }; float* x1841 = (float*)myGpuMalloc(x1832 * sizeof(float)); float* x1842 = (float*)myMalloc(1 * sizeof(float));; x1842[0] = 0.0f; float* x1844 = (float*)myMalloc(1 * sizeof(float));; x1844[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1844, x1844, in_desc, x1835, out_desc, x1841, sbmv_desc, x759, x504, x333, x927, 1.0E-5)); }; if (x1850) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1721) x Sym(1721), res: x Const(64) x Const(512) x Sym(1829) x Sym(1829)"); } float* x1855 = (float*)myMalloc(1 * sizeof(float));; x1855[0] = 1.0f; float* x1857 = (float*)myMalloc(1 * sizeof(float));; x1857[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1721, x1721)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1855, bias_desc, x1756, x1857, out_desc, x1841)); }; float* x1860 = (float*)myMalloc(1 * sizeof(float));; x1860[0] = 0.0f; float* x1862 = (float*)myMalloc(1 * sizeof(float));; x1862[0] = 1.0f; float* x1864 = (float*)myGpuMalloc(x1832 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1862, x_desc, x1841, x1860, x_desc, x1864)); }; if (x1867) { } else { assert(false && "ERROR not specified"); } float* x1879 = (float*)myGpuMalloc(x1878 * sizeof(float)); float* x1880 = (float*)myMalloc(1 * sizeof(float));; x1880[0] = 0.0f; float* x1882 = (float*)myMalloc(1 * sizeof(float));; x1882[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1873, x1873)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1882, in_desc, x1864, filt_desc, x654, conv_desc, algo, ws_data, ws_size, x1880, out_desc, x1879)); }; float* x1885 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1886 = (float*)myMalloc(1 * sizeof(float));; x1886[0] = 0.0f; float* x1888 = (float*)myMalloc(1 * sizeof(float));; x1888[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1873, x1873)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1873, x1873)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1888, x1888, in_desc, x1879, out_desc, x1885, sbmv_desc, x375, x984, x966, x1041, 1.0E-5)); }; float* x1891 = (float*)myMalloc(1 * sizeof(float));; x1891[0] = 0.0f; float* x1893 = (float*)myMalloc(1 * sizeof(float));; x1893[0] = 1.0f; float* x1895 = (float*)myGpuMalloc(x1876 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1873, x1873)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1893, x_desc, x1885, x1891, x_desc, x1895)); }; if (x1899) { } else { assert(false && "ERROR not specified"); } float* x1912 = (float*)myGpuMalloc(x1911 * sizeof(float)); float* x1913 = (float*)myMalloc(1 * sizeof(float));; x1913[0] = 0.0f; float* x1915 = (float*)myMalloc(1 * sizeof(float));; x1915[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1873, x1873)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1906, x1906)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1915, in_desc, x1895, filt_desc, x753, conv_desc, algo, ws_data, ws_size, x1913, out_desc, x1912)); }; float* x1918 = (float*)myGpuMalloc(x1909 * sizeof(float)); float* x1919 = (float*)myMalloc(1 * sizeof(float));; x1919[0] = 0.0f; float* x1921 = (float*)myMalloc(1 * sizeof(float));; x1921[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1906, x1906)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1906, x1906)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1921, x1921, in_desc, x1912, out_desc, x1918, sbmv_desc, x495, x372, x1062, x702, 1.0E-5)); }; float* x1924 = (float*)myMalloc(1 * sizeof(float));; x1924[0] = 0.0f; float* x1926 = (float*)myMalloc(1 * sizeof(float));; x1926[0] = 1.0f; float* x1928 = (float*)myGpuMalloc(x1909 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1906, x1906)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1926, x_desc, x1918, x1924, x_desc, x1928)); }; if (x1931) { } else { assert(false && "ERROR not specified"); } float* x1943 = (float*)myGpuMalloc(x1942 * sizeof(float)); float* x1944 = (float*)myMalloc(1 * sizeof(float));; x1944[0] = 0.0f; float* x1946 = (float*)myMalloc(1 * sizeof(float));; x1946[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1906, x1906)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1946, in_desc, x1928, filt_desc, x423, conv_desc, algo, ws_data, ws_size, x1944, out_desc, x1943)); }; float* x1949 = (float*)myGpuMalloc(x1940 * sizeof(float)); float* x1950 = (float*)myMalloc(1 * sizeof(float));; x1950[0] = 0.0f; float* x1952 = (float*)myMalloc(1 * sizeof(float));; x1952[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1952, x1952, in_desc, x1943, out_desc, x1949, sbmv_desc, x726, x420, x315, x960, 1.0E-5)); }; if (x1958) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1829) x Sym(1829), res: x Const(64) x Const(512) x Sym(1937) x Sym(1937)"); } float* x1963 = (float*)myMalloc(1 * sizeof(float));; x1963[0] = 1.0f; float* x1965 = (float*)myMalloc(1 * sizeof(float));; x1965[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1829, x1829)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1963, bias_desc, x1864, x1965, out_desc, x1949)); }; float* x1968 = (float*)myMalloc(1 * sizeof(float));; x1968[0] = 0.0f; float* x1970 = (float*)myMalloc(1 * sizeof(float));; x1970[0] = 1.0f; float* x1972 = (float*)myGpuMalloc(x1940 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1970, x_desc, x1949, x1968, x_desc, x1972)); }; if (x1975) { } else { assert(false && "ERROR not specified"); } float* x1987 = (float*)myGpuMalloc(x1986 * sizeof(float)); float* x1988 = (float*)myMalloc(1 * sizeof(float));; x1988[0] = 0.0f; float* x1990 = (float*)myMalloc(1 * sizeof(float));; x1990[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1981, x1981)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1990, in_desc, x1972, filt_desc, x798, conv_desc, algo, ws_data, ws_size, x1988, out_desc, x1987)); }; float* x1993 = (float*)myGpuMalloc(x1984 * sizeof(float)); float* x1994 = (float*)myMalloc(1 * sizeof(float));; x1994[0] = 0.0f; float* x1996 = (float*)myMalloc(1 * sizeof(float));; x1996[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1981, x1981)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1981, x1981)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1996, x1996, in_desc, x1987, out_desc, x1993, sbmv_desc, x1068, x321, x651, x852, 1.0E-5)); }; float* x1999 = (float*)myMalloc(1 * sizeof(float));; x1999[0] = 0.0f; float* x2001 = (float*)myMalloc(1 * sizeof(float));; x2001[0] = 1.0f; float* x2003 = (float*)myGpuMalloc(x1984 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1981, x1981)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2001, x_desc, x1993, x1999, x_desc, x2003)); }; if (x2007) { } else { assert(false && "ERROR not specified"); } float* x2020 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2021 = (float*)myMalloc(1 * sizeof(float));; x2021[0] = 0.0f; float* x2023 = (float*)myMalloc(1 * sizeof(float));; x2023[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1981, x1981)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2023, in_desc, x2003, filt_desc, x783, conv_desc, algo, ws_data, ws_size, x2021, out_desc, x2020)); }; float* x2026 = (float*)myGpuMalloc(x2017 * sizeof(float)); float* x2027 = (float*)myMalloc(1 * sizeof(float));; x2027[0] = 0.0f; float* x2029 = (float*)myMalloc(1 * sizeof(float));; x2029[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2014, x2014)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2014, x2014)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2029, x2029, in_desc, x2020, out_desc, x2026, sbmv_desc, x582, x306, x945, x555, 1.0E-5)); }; float* x2032 = (float*)myMalloc(1 * sizeof(float));; x2032[0] = 0.0f; float* x2034 = (float*)myMalloc(1 * sizeof(float));; x2034[0] = 1.0f; float* x2036 = (float*)myGpuMalloc(x2017 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2014, x2014)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2034, x_desc, x2026, x2032, x_desc, x2036)); }; if (x2039) { } else { assert(false && "ERROR not specified"); } float* x2051 = (float*)myGpuMalloc(x2050 * sizeof(float)); float* x2052 = (float*)myMalloc(1 * sizeof(float));; x2052[0] = 0.0f; float* x2054 = (float*)myMalloc(1 * sizeof(float));; x2054[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2014, x2014)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2054, in_desc, x2036, filt_desc, x1065, conv_desc, algo, ws_data, ws_size, x2052, out_desc, x2051)); }; float* x2057 = (float*)myGpuMalloc(x2048 * sizeof(float)); float* x2058 = (float*)myMalloc(1 * sizeof(float));; x2058[0] = 0.0f; float* x2060 = (float*)myMalloc(1 * sizeof(float));; x2060[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2060, x2060, in_desc, x2051, out_desc, x2057, sbmv_desc, x312, x609, x906, x1059, 1.0E-5)); }; if (x1975) { } else { assert(false && "ERROR not specified"); } float* x2073 = (float*)myGpuMalloc(x2072 * sizeof(float)); float* x2074 = (float*)myMalloc(1 * sizeof(float));; x2074[0] = 0.0f; float* x2076 = (float*)myMalloc(1 * sizeof(float));; x2076[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1937, x1937)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2067, x2067)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2076, in_desc, x1972, filt_desc, x483, conv_desc, algo, ws_data, ws_size, x2074, out_desc, x2073)); }; float* x2079 = (float*)myGpuMalloc(x2070 * sizeof(float)); float* x2080 = (float*)myMalloc(1 * sizeof(float));; x2080[0] = 0.0f; float* x2082 = (float*)myMalloc(1 * sizeof(float));; x2082[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2067, x2067)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2067, x2067)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2082, x2082, in_desc, x2073, out_desc, x2079, sbmv_desc, x345, x918, x516, x891, 1.0E-5)); }; if (x2088) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2067) x Sym(2067), res: x Const(64) x Const(1024) x Sym(2045) x Sym(2045)"); } float* x2093 = (float*)myMalloc(1 * sizeof(float));; x2093[0] = 1.0f; float* x2095 = (float*)myMalloc(1 * sizeof(float));; x2095[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2067, x2067)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2093, bias_desc, x2079, x2095, out_desc, x2057)); }; float* x2098 = (float*)myMalloc(1 * sizeof(float));; x2098[0] = 0.0f; float* x2100 = (float*)myMalloc(1 * sizeof(float));; x2100[0] = 1.0f; float* x2102 = (float*)myGpuMalloc(x2048 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2100, x_desc, x2057, x2098, x_desc, x2102)); }; if (x2105) { } else { assert(false && "ERROR not specified"); } float* x2117 = (float*)myGpuMalloc(x2116 * sizeof(float)); float* x2118 = (float*)myMalloc(1 * sizeof(float));; x2118[0] = 0.0f; float* x2120 = (float*)myMalloc(1 * sizeof(float));; x2120[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2111, x2111)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2120, in_desc, x2102, filt_desc, x297, conv_desc, algo, ws_data, ws_size, x2118, out_desc, x2117)); }; float* x2123 = (float*)myGpuMalloc(x2114 * sizeof(float)); float* x2124 = (float*)myMalloc(1 * sizeof(float));; x2124[0] = 0.0f; float* x2126 = (float*)myMalloc(1 * sizeof(float));; x2126[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2111, x2111)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2111, x2111)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2126, x2126, in_desc, x2117, out_desc, x2123, sbmv_desc, x348, x915, x1035, x729, 1.0E-5)); }; float* x2129 = (float*)myMalloc(1 * sizeof(float));; x2129[0] = 0.0f; float* x2131 = (float*)myMalloc(1 * sizeof(float));; x2131[0] = 1.0f; float* x2133 = (float*)myGpuMalloc(x2114 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2111, x2111)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2131, x_desc, x2123, x2129, x_desc, x2133)); }; if (x2137) { } else { assert(false && "ERROR not specified"); } float* x2150 = (float*)myGpuMalloc(x2149 * sizeof(float)); float* x2151 = (float*)myMalloc(1 * sizeof(float));; x2151[0] = 0.0f; float* x2153 = (float*)myMalloc(1 * sizeof(float));; x2153[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2111, x2111)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2144, x2144)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2153, in_desc, x2133, filt_desc, x351, conv_desc, algo, ws_data, ws_size, x2151, out_desc, x2150)); }; float* x2156 = (float*)myGpuMalloc(x2147 * sizeof(float)); float* x2157 = (float*)myMalloc(1 * sizeof(float));; x2157[0] = 0.0f; float* x2159 = (float*)myMalloc(1 * sizeof(float));; x2159[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2144, x2144)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2144, x2144)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2159, x2159, in_desc, x2150, out_desc, x2156, sbmv_desc, x1071, x546, x858, x969, 1.0E-5)); }; float* x2162 = (float*)myMalloc(1 * sizeof(float));; x2162[0] = 0.0f; float* x2164 = (float*)myMalloc(1 * sizeof(float));; x2164[0] = 1.0f; float* x2166 = (float*)myGpuMalloc(x2147 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2144, x2144)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2164, x_desc, x2156, x2162, x_desc, x2166)); }; if (x2169) { } else { assert(false && "ERROR not specified"); } float* x2181 = (float*)myGpuMalloc(x2180 * sizeof(float)); float* x2182 = (float*)myMalloc(1 * sizeof(float));; x2182[0] = 0.0f; float* x2184 = (float*)myMalloc(1 * sizeof(float));; x2184[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2144, x2144)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2184, in_desc, x2166, filt_desc, x426, conv_desc, algo, ws_data, ws_size, x2182, out_desc, x2181)); }; float* x2187 = (float*)myGpuMalloc(x2178 * sizeof(float)); float* x2188 = (float*)myMalloc(1 * sizeof(float));; x2188[0] = 0.0f; float* x2190 = (float*)myMalloc(1 * sizeof(float));; x2190[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2190, x2190, in_desc, x2181, out_desc, x2187, sbmv_desc, x318, x954, x804, x687, 1.0E-5)); }; if (x2196) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2045) x Sym(2045), res: x Const(64) x Const(1024) x Sym(2175) x Sym(2175)"); } float* x2201 = (float*)myMalloc(1 * sizeof(float));; x2201[0] = 1.0f; float* x2203 = (float*)myMalloc(1 * sizeof(float));; x2203[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2045, x2045)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2201, bias_desc, x2102, x2203, out_desc, x2187)); }; float* x2206 = (float*)myMalloc(1 * sizeof(float));; x2206[0] = 0.0f; float* x2208 = (float*)myMalloc(1 * sizeof(float));; x2208[0] = 1.0f; float* x2210 = (float*)myGpuMalloc(x2178 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2208, x_desc, x2187, x2206, x_desc, x2210)); }; if (x2213) { } else { assert(false && "ERROR not specified"); } float* x2225 = (float*)myGpuMalloc(x2224 * sizeof(float)); float* x2226 = (float*)myMalloc(1 * sizeof(float));; x2226[0] = 0.0f; float* x2228 = (float*)myMalloc(1 * sizeof(float));; x2228[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2219, x2219)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2228, in_desc, x2210, filt_desc, x912, conv_desc, algo, ws_data, ws_size, x2226, out_desc, x2225)); }; float* x2231 = (float*)myGpuMalloc(x2222 * sizeof(float)); float* x2232 = (float*)myMalloc(1 * sizeof(float));; x2232[0] = 0.0f; float* x2234 = (float*)myMalloc(1 * sizeof(float));; x2234[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2219, x2219)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2219, x2219)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2234, x2234, in_desc, x2225, out_desc, x2231, sbmv_desc, x645, x849, x792, x780, 1.0E-5)); }; float* x2237 = (float*)myMalloc(1 * sizeof(float));; x2237[0] = 0.0f; float* x2239 = (float*)myMalloc(1 * sizeof(float));; x2239[0] = 1.0f; float* x2241 = (float*)myGpuMalloc(x2222 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2219, x2219)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2239, x_desc, x2231, x2237, x_desc, x2241)); }; if (x2245) { } else { assert(false && "ERROR not specified"); } float* x2258 = (float*)myGpuMalloc(x2257 * sizeof(float)); float* x2259 = (float*)myMalloc(1 * sizeof(float));; x2259[0] = 0.0f; float* x2261 = (float*)myMalloc(1 * sizeof(float));; x2261[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2219, x2219)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2252, x2252)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2261, in_desc, x2241, filt_desc, x300, conv_desc, algo, ws_data, ws_size, x2259, out_desc, x2258)); }; float* x2264 = (float*)myGpuMalloc(x2255 * sizeof(float)); float* x2265 = (float*)myMalloc(1 * sizeof(float));; x2265[0] = 0.0f; float* x2267 = (float*)myMalloc(1 * sizeof(float));; x2267[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2252, x2252)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2252, x2252)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2267, x2267, in_desc, x2258, out_desc, x2264, sbmv_desc, x942, x834, x630, x447, 1.0E-5)); }; float* x2270 = (float*)myMalloc(1 * sizeof(float));; x2270[0] = 0.0f; float* x2272 = (float*)myMalloc(1 * sizeof(float));; x2272[0] = 1.0f; float* x2274 = (float*)myGpuMalloc(x2255 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2252, x2252)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2272, x_desc, x2264, x2270, x_desc, x2274)); }; if (x2277) { } else { assert(false && "ERROR not specified"); } float* x2289 = (float*)myGpuMalloc(x2288 * sizeof(float)); float* x2290 = (float*)myMalloc(1 * sizeof(float));; x2290[0] = 0.0f; float* x2292 = (float*)myMalloc(1 * sizeof(float));; x2292[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2252, x2252)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2292, in_desc, x2274, filt_desc, x606, conv_desc, algo, ws_data, ws_size, x2290, out_desc, x2289)); }; float* x2295 = (float*)myGpuMalloc(x2286 * sizeof(float)); float* x2296 = (float*)myMalloc(1 * sizeof(float));; x2296[0] = 0.0f; float* x2298 = (float*)myMalloc(1 * sizeof(float));; x2298[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2298, x2298, in_desc, x2289, out_desc, x2295, sbmv_desc, x1047, x429, x678, x822, 1.0E-5)); }; if (x2304) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2175) x Sym(2175), res: x Const(64) x Const(1024) x Sym(2283) x Sym(2283)"); } float* x2309 = (float*)myMalloc(1 * sizeof(float));; x2309[0] = 1.0f; float* x2311 = (float*)myMalloc(1 * sizeof(float));; x2311[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2175, x2175)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2309, bias_desc, x2210, x2311, out_desc, x2295)); }; float* x2314 = (float*)myMalloc(1 * sizeof(float));; x2314[0] = 0.0f; float* x2316 = (float*)myMalloc(1 * sizeof(float));; x2316[0] = 1.0f; float* x2318 = (float*)myGpuMalloc(x2286 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2316, x_desc, x2295, x2314, x_desc, x2318)); }; if (x2321) { } else { assert(false && "ERROR not specified"); } float* x2333 = (float*)myGpuMalloc(x2332 * sizeof(float)); float* x2334 = (float*)myMalloc(1 * sizeof(float));; x2334[0] = 0.0f; float* x2336 = (float*)myMalloc(1 * sizeof(float));; x2336[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2327, x2327)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2336, in_desc, x2318, filt_desc, x276, conv_desc, algo, ws_data, ws_size, x2334, out_desc, x2333)); }; float* x2339 = (float*)myGpuMalloc(x2330 * sizeof(float)); float* x2340 = (float*)myMalloc(1 * sizeof(float));; x2340[0] = 0.0f; float* x2342 = (float*)myMalloc(1 * sizeof(float));; x2342[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2327, x2327)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2327, x2327)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2342, x2342, in_desc, x2333, out_desc, x2339, sbmv_desc, x534, x981, x747, x552, 1.0E-5)); }; float* x2345 = (float*)myMalloc(1 * sizeof(float));; x2345[0] = 0.0f; float* x2347 = (float*)myMalloc(1 * sizeof(float));; x2347[0] = 1.0f; float* x2349 = (float*)myGpuMalloc(x2330 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2327, x2327)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2347, x_desc, x2339, x2345, x_desc, x2349)); }; if (x2353) { } else { assert(false && "ERROR not specified"); } float* x2366 = (float*)myGpuMalloc(x2365 * sizeof(float)); float* x2367 = (float*)myMalloc(1 * sizeof(float));; x2367[0] = 0.0f; float* x2369 = (float*)myMalloc(1 * sizeof(float));; x2369[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2327, x2327)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2360, x2360)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2369, in_desc, x2349, filt_desc, x1005, conv_desc, algo, ws_data, ws_size, x2367, out_desc, x2366)); }; float* x2372 = (float*)myGpuMalloc(x2363 * sizeof(float)); float* x2373 = (float*)myMalloc(1 * sizeof(float));; x2373[0] = 0.0f; float* x2375 = (float*)myMalloc(1 * sizeof(float));; x2375[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2360, x2360)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2360, x2360)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2375, x2375, in_desc, x2366, out_desc, x2372, sbmv_desc, x480, x666, x816, x948, 1.0E-5)); }; float* x2378 = (float*)myMalloc(1 * sizeof(float));; x2378[0] = 0.0f; float* x2380 = (float*)myMalloc(1 * sizeof(float));; x2380[0] = 1.0f; float* x2382 = (float*)myGpuMalloc(x2363 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2360, x2360)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2380, x_desc, x2372, x2378, x_desc, x2382)); }; if (x2385) { } else { assert(false && "ERROR not specified"); } float* x2397 = (float*)myGpuMalloc(x2396 * sizeof(float)); float* x2398 = (float*)myMalloc(1 * sizeof(float));; x2398[0] = 0.0f; float* x2400 = (float*)myMalloc(1 * sizeof(float));; x2400[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2360, x2360)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2400, in_desc, x2382, filt_desc, x525, conv_desc, algo, ws_data, ws_size, x2398, out_desc, x2397)); }; float* x2403 = (float*)myGpuMalloc(x2394 * sizeof(float)); float* x2404 = (float*)myMalloc(1 * sizeof(float));; x2404[0] = 0.0f; float* x2406 = (float*)myMalloc(1 * sizeof(float));; x2406[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2406, x2406, in_desc, x2397, out_desc, x2403, sbmv_desc, x972, x696, x951, x741, 1.0E-5)); }; if (x2412) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2283) x Sym(2283), res: x Const(64) x Const(1024) x Sym(2391) x Sym(2391)"); } float* x2417 = (float*)myMalloc(1 * sizeof(float));; x2417[0] = 1.0f; float* x2419 = (float*)myMalloc(1 * sizeof(float));; x2419[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2283, x2283)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2417, bias_desc, x2318, x2419, out_desc, x2403)); }; float* x2422 = (float*)myMalloc(1 * sizeof(float));; x2422[0] = 0.0f; float* x2424 = (float*)myMalloc(1 * sizeof(float));; x2424[0] = 1.0f; float* x2426 = (float*)myGpuMalloc(x2394 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2424, x_desc, x2403, x2422, x_desc, x2426)); }; if (x2429) { } else { assert(false && "ERROR not specified"); } float* x2441 = (float*)myGpuMalloc(x2440 * sizeof(float)); float* x2442 = (float*)myMalloc(1 * sizeof(float));; x2442[0] = 0.0f; float* x2444 = (float*)myMalloc(1 * sizeof(float));; x2444[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2435, x2435)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2444, in_desc, x2426, filt_desc, x324, conv_desc, algo, ws_data, ws_size, x2442, out_desc, x2441)); }; float* x2447 = (float*)myGpuMalloc(x2438 * sizeof(float)); float* x2448 = (float*)myMalloc(1 * sizeof(float));; x2448[0] = 0.0f; float* x2450 = (float*)myMalloc(1 * sizeof(float));; x2450[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2435, x2435)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2435, x2435)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2450, x2450, in_desc, x2441, out_desc, x2447, sbmv_desc, x489, x813, x1020, x465, 1.0E-5)); }; float* x2453 = (float*)myMalloc(1 * sizeof(float));; x2453[0] = 0.0f; float* x2455 = (float*)myMalloc(1 * sizeof(float));; x2455[0] = 1.0f; float* x2457 = (float*)myGpuMalloc(x2438 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2435, x2435)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2455, x_desc, x2447, x2453, x_desc, x2457)); }; if (x2461) { } else { assert(false && "ERROR not specified"); } float* x2474 = (float*)myGpuMalloc(x2473 * sizeof(float)); float* x2475 = (float*)myMalloc(1 * sizeof(float));; x2475[0] = 0.0f; float* x2477 = (float*)myMalloc(1 * sizeof(float));; x2477[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2435, x2435)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2468, x2468)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2477, in_desc, x2457, filt_desc, x1044, conv_desc, algo, ws_data, ws_size, x2475, out_desc, x2474)); }; float* x2480 = (float*)myGpuMalloc(x2471 * sizeof(float)); float* x2481 = (float*)myMalloc(1 * sizeof(float));; x2481[0] = 0.0f; float* x2483 = (float*)myMalloc(1 * sizeof(float));; x2483[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2468, x2468)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2468, x2468)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2483, x2483, in_desc, x2474, out_desc, x2480, sbmv_desc, x762, x585, x1008, x570, 1.0E-5)); }; float* x2486 = (float*)myMalloc(1 * sizeof(float));; x2486[0] = 0.0f; float* x2488 = (float*)myMalloc(1 * sizeof(float));; x2488[0] = 1.0f; float* x2490 = (float*)myGpuMalloc(x2471 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2468, x2468)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2488, x_desc, x2480, x2486, x_desc, x2490)); }; if (x2493) { } else { assert(false && "ERROR not specified"); } float* x2505 = (float*)myGpuMalloc(x2504 * sizeof(float)); float* x2506 = (float*)myMalloc(1 * sizeof(float));; x2506[0] = 0.0f; float* x2508 = (float*)myMalloc(1 * sizeof(float));; x2508[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2468, x2468)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2508, in_desc, x2490, filt_desc, x921, conv_desc, algo, ws_data, ws_size, x2506, out_desc, x2505)); }; float* x2511 = (float*)myGpuMalloc(x2502 * sizeof(float)); float* x2512 = (float*)myMalloc(1 * sizeof(float));; x2512[0] = 0.0f; float* x2514 = (float*)myMalloc(1 * sizeof(float));; x2514[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2514, x2514, in_desc, x2505, out_desc, x2511, sbmv_desc, x435, x618, x885, x1074, 1.0E-5)); }; if (x2520) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2391) x Sym(2391), res: x Const(64) x Const(1024) x Sym(2499) x Sym(2499)"); } float* x2525 = (float*)myMalloc(1 * sizeof(float));; x2525[0] = 1.0f; float* x2527 = (float*)myMalloc(1 * sizeof(float));; x2527[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2391, x2391)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2525, bias_desc, x2426, x2527, out_desc, x2511)); }; float* x2530 = (float*)myMalloc(1 * sizeof(float));; x2530[0] = 0.0f; float* x2532 = (float*)myMalloc(1 * sizeof(float));; x2532[0] = 1.0f; float* x2534 = (float*)myGpuMalloc(x2502 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2532, x_desc, x2511, x2530, x_desc, x2534)); }; if (x2537) { } else { assert(false && "ERROR not specified"); } float* x2549 = (float*)myGpuMalloc(x2548 * sizeof(float)); float* x2550 = (float*)myMalloc(1 * sizeof(float));; x2550[0] = 0.0f; float* x2552 = (float*)myMalloc(1 * sizeof(float));; x2552[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2543, x2543)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2552, in_desc, x2534, filt_desc, x711, conv_desc, algo, ws_data, ws_size, x2550, out_desc, x2549)); }; float* x2555 = (float*)myGpuMalloc(x2546 * sizeof(float)); float* x2556 = (float*)myMalloc(1 * sizeof(float));; x2556[0] = 0.0f; float* x2558 = (float*)myMalloc(1 * sizeof(float));; x2558[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2543, x2543)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2543, x2543)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2558, x2558, in_desc, x2549, out_desc, x2555, sbmv_desc, x513, x1017, x498, x786, 1.0E-5)); }; float* x2561 = (float*)myMalloc(1 * sizeof(float));; x2561[0] = 0.0f; float* x2563 = (float*)myMalloc(1 * sizeof(float));; x2563[0] = 1.0f; float* x2565 = (float*)myGpuMalloc(x2546 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2543, x2543)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2563, x_desc, x2555, x2561, x_desc, x2565)); }; if (x2569) { } else { assert(false && "ERROR not specified"); } float* x2582 = (float*)myGpuMalloc(x2581 * sizeof(float)); float* x2583 = (float*)myMalloc(1 * sizeof(float));; x2583[0] = 0.0f; float* x2585 = (float*)myMalloc(1 * sizeof(float));; x2585[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2543, x2543)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2576, x2576)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2585, in_desc, x2565, filt_desc, x936, conv_desc, algo, ws_data, ws_size, x2583, out_desc, x2582)); }; float* x2588 = (float*)myGpuMalloc(x2579 * sizeof(float)); float* x2589 = (float*)myMalloc(1 * sizeof(float));; x2589[0] = 0.0f; float* x2591 = (float*)myMalloc(1 * sizeof(float));; x2591[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2576, x2576)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2576, x2576)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2591, x2591, in_desc, x2582, out_desc, x2588, sbmv_desc, x681, x825, x468, x978, 1.0E-5)); }; float* x2594 = (float*)myMalloc(1 * sizeof(float));; x2594[0] = 0.0f; float* x2596 = (float*)myMalloc(1 * sizeof(float));; x2596[0] = 1.0f; float* x2598 = (float*)myGpuMalloc(x2579 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2576, x2576)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2596, x_desc, x2588, x2594, x_desc, x2598)); }; if (x2601) { } else { assert(false && "ERROR not specified"); } float* x2613 = (float*)myGpuMalloc(x2612 * sizeof(float)); float* x2614 = (float*)myMalloc(1 * sizeof(float));; x2614[0] = 0.0f; float* x2616 = (float*)myMalloc(1 * sizeof(float));; x2616[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2576, x2576)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2616, in_desc, x2598, filt_desc, x549, conv_desc, algo, ws_data, ws_size, x2614, out_desc, x2613)); }; float* x2619 = (float*)myGpuMalloc(x2610 * sizeof(float)); float* x2620 = (float*)myMalloc(1 * sizeof(float));; x2620[0] = 0.0f; float* x2622 = (float*)myMalloc(1 * sizeof(float));; x2622[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2622, x2622, in_desc, x2613, out_desc, x2619, sbmv_desc, x1002, x537, x624, x807, 1.0E-5)); }; if (x2628) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2499) x Sym(2499), res: x Const(64) x Const(1024) x Sym(2607) x Sym(2607)"); } float* x2633 = (float*)myMalloc(1 * sizeof(float));; x2633[0] = 1.0f; float* x2635 = (float*)myMalloc(1 * sizeof(float));; x2635[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2499, x2499)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2633, bias_desc, x2534, x2635, out_desc, x2619)); }; float* x2638 = (float*)myMalloc(1 * sizeof(float));; x2638[0] = 0.0f; float* x2640 = (float*)myMalloc(1 * sizeof(float));; x2640[0] = 1.0f; float* x2642 = (float*)myGpuMalloc(x2610 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2640, x_desc, x2619, x2638, x_desc, x2642)); }; if (x2645) { } else { assert(false && "ERROR not specified"); } float* x2657 = (float*)myGpuMalloc(x2656 * sizeof(float)); float* x2658 = (float*)myMalloc(1 * sizeof(float));; x2658[0] = 0.0f; float* x2660 = (float*)myMalloc(1 * sizeof(float));; x2660[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2651, x2651)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2660, in_desc, x2642, filt_desc, x675, conv_desc, algo, ws_data, ws_size, x2658, out_desc, x2657)); }; float* x2663 = (float*)myGpuMalloc(x2654 * sizeof(float)); float* x2664 = (float*)myMalloc(1 * sizeof(float));; x2664[0] = 0.0f; float* x2666 = (float*)myMalloc(1 * sizeof(float));; x2666[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2651, x2651)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2651, x2651)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2666, x2666, in_desc, x2657, out_desc, x2663, sbmv_desc, x861, x930, x459, x621, 1.0E-5)); }; float* x2669 = (float*)myMalloc(1 * sizeof(float));; x2669[0] = 0.0f; float* x2671 = (float*)myMalloc(1 * sizeof(float));; x2671[0] = 1.0f; float* x2673 = (float*)myGpuMalloc(x2654 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2651, x2651)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2671, x_desc, x2663, x2669, x_desc, x2673)); }; if (x2677) { } else { assert(false && "ERROR not specified"); } float* x2690 = (float*)myGpuMalloc(x2689 * sizeof(float)); float* x2691 = (float*)myMalloc(1 * sizeof(float));; x2691[0] = 0.0f; float* x2693 = (float*)myMalloc(1 * sizeof(float));; x2693[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2651, x2651)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2684, x2684)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2693, in_desc, x2673, filt_desc, x360, conv_desc, algo, ws_data, ws_size, x2691, out_desc, x2690)); }; float* x2696 = (float*)myGpuMalloc(x2687 * sizeof(float)); float* x2697 = (float*)myMalloc(1 * sizeof(float));; x2697[0] = 0.0f; float* x2699 = (float*)myMalloc(1 * sizeof(float));; x2699[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2684, x2684)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2684, x2684)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2699, x2699, in_desc, x2690, out_desc, x2696, sbmv_desc, x873, x735, x597, x408, 1.0E-5)); }; float* x2702 = (float*)myMalloc(1 * sizeof(float));; x2702[0] = 0.0f; float* x2704 = (float*)myMalloc(1 * sizeof(float));; x2704[0] = 1.0f; float* x2706 = (float*)myGpuMalloc(x2687 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2684, x2684)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2704, x_desc, x2696, x2702, x_desc, x2706)); }; if (x2709) { } else { assert(false && "ERROR not specified"); } float* x2721 = (float*)myGpuMalloc(x2720 * sizeof(float)); float* x2722 = (float*)myMalloc(1 * sizeof(float));; x2722[0] = 0.0f; float* x2724 = (float*)myMalloc(1 * sizeof(float));; x2724[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2684, x2684)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2724, in_desc, x2706, filt_desc, x894, conv_desc, algo, ws_data, ws_size, x2722, out_desc, x2721)); }; float* x2727 = (float*)myGpuMalloc(x2718 * sizeof(float)); float* x2728 = (float*)myMalloc(1 * sizeof(float));; x2728[0] = 0.0f; float* x2730 = (float*)myMalloc(1 * sizeof(float));; x2730[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2730, x2730, in_desc, x2721, out_desc, x2727, sbmv_desc, x975, x444, x603, x837, 1.0E-5)); }; if (x2645) { } else { assert(false && "ERROR not specified"); } float* x2743 = (float*)myGpuMalloc(x2742 * sizeof(float)); float* x2744 = (float*)myMalloc(1 * sizeof(float));; x2744[0] = 0.0f; float* x2746 = (float*)myMalloc(1 * sizeof(float));; x2746[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2607, x2607)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2737, x2737)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2746, in_desc, x2642, filt_desc, x900, conv_desc, algo, ws_data, ws_size, x2744, out_desc, x2743)); }; float* x2749 = (float*)myGpuMalloc(x2740 * sizeof(float)); float* x2750 = (float*)myMalloc(1 * sizeof(float));; x2750[0] = 0.0f; float* x2752 = (float*)myMalloc(1 * sizeof(float));; x2752[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2737, x2737)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2737, x2737)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2752, x2752, in_desc, x2743, out_desc, x2749, sbmv_desc, x777, x579, x450, x633, 1.0E-5)); }; if (x2758) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(2737) x Sym(2737), res: x Const(64) x Const(2048) x Sym(2715) x Sym(2715)"); } float* x2763 = (float*)myMalloc(1 * sizeof(float));; x2763[0] = 1.0f; float* x2765 = (float*)myMalloc(1 * sizeof(float));; x2765[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2737, x2737)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2763, bias_desc, x2749, x2765, out_desc, x2727)); }; float* x2768 = (float*)myMalloc(1 * sizeof(float));; x2768[0] = 0.0f; float* x2770 = (float*)myMalloc(1 * sizeof(float));; x2770[0] = 1.0f; float* x2772 = (float*)myGpuMalloc(x2718 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2770, x_desc, x2727, x2768, x_desc, x2772)); }; if (x2775) { } else { assert(false && "ERROR not specified"); } float* x2787 = (float*)myGpuMalloc(x2786 * sizeof(float)); float* x2788 = (float*)myMalloc(1 * sizeof(float));; x2788[0] = 0.0f; float* x2790 = (float*)myMalloc(1 * sizeof(float));; x2790[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2781, x2781)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2790, in_desc, x2772, filt_desc, x903, conv_desc, algo, ws_data, ws_size, x2788, out_desc, x2787)); }; float* x2793 = (float*)myGpuMalloc(x2784 * sizeof(float)); float* x2794 = (float*)myMalloc(1 * sizeof(float));; x2794[0] = 0.0f; float* x2796 = (float*)myMalloc(1 * sizeof(float));; x2796[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2781, x2781)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2781, x2781)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2796, x2796, in_desc, x2787, out_desc, x2793, sbmv_desc, x396, x669, x720, x453, 1.0E-5)); }; float* x2799 = (float*)myMalloc(1 * sizeof(float));; x2799[0] = 0.0f; float* x2801 = (float*)myMalloc(1 * sizeof(float));; x2801[0] = 1.0f; float* x2803 = (float*)myGpuMalloc(x2784 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2781, x2781)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2801, x_desc, x2793, x2799, x_desc, x2803)); }; if (x2807) { } else { assert(false && "ERROR not specified"); } float* x2820 = (float*)myGpuMalloc(x2819 * sizeof(float)); float* x2821 = (float*)myMalloc(1 * sizeof(float));; x2821[0] = 0.0f; float* x2823 = (float*)myMalloc(1 * sizeof(float));; x2823[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2781, x2781)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2814, x2814)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2823, in_desc, x2803, filt_desc, x723, conv_desc, algo, ws_data, ws_size, x2821, out_desc, x2820)); }; float* x2826 = (float*)myGpuMalloc(x2817 * sizeof(float)); float* x2827 = (float*)myMalloc(1 * sizeof(float));; x2827[0] = 0.0f; float* x2829 = (float*)myMalloc(1 * sizeof(float));; x2829[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2814, x2814)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2814, x2814)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2829, x2829, in_desc, x2820, out_desc, x2826, sbmv_desc, x738, x456, x672, x843, 1.0E-5)); }; float* x2832 = (float*)myMalloc(1 * sizeof(float));; x2832[0] = 0.0f; float* x2834 = (float*)myMalloc(1 * sizeof(float));; x2834[0] = 1.0f; float* x2836 = (float*)myGpuMalloc(x2817 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2814, x2814)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2834, x_desc, x2826, x2832, x_desc, x2836)); }; if (x2839) { } else { assert(false && "ERROR not specified"); } float* x2851 = (float*)myGpuMalloc(x2850 * sizeof(float)); float* x2852 = (float*)myMalloc(1 * sizeof(float));; x2852[0] = 0.0f; float* x2854 = (float*)myMalloc(1 * sizeof(float));; x2854[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2814, x2814)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2854, in_desc, x2836, filt_desc, x399, conv_desc, algo, ws_data, ws_size, x2852, out_desc, x2851)); }; float* x2857 = (float*)myGpuMalloc(x2848 * sizeof(float)); float* x2858 = (float*)myMalloc(1 * sizeof(float));; x2858[0] = 0.0f; float* x2860 = (float*)myMalloc(1 * sizeof(float));; x2860[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2860, x2860, in_desc, x2851, out_desc, x2857, sbmv_desc, x540, x690, x462, x993, 1.0E-5)); }; if (x2866) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(2715) x Sym(2715), res: x Const(64) x Const(2048) x Sym(2845) x Sym(2845)"); } float* x2871 = (float*)myMalloc(1 * sizeof(float));; x2871[0] = 1.0f; float* x2873 = (float*)myMalloc(1 * sizeof(float));; x2873[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2715, x2715)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2871, bias_desc, x2772, x2873, out_desc, x2857)); }; float* x2876 = (float*)myMalloc(1 * sizeof(float));; x2876[0] = 0.0f; float* x2878 = (float*)myMalloc(1 * sizeof(float));; x2878[0] = 1.0f; float* x2880 = (float*)myGpuMalloc(x2848 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2878, x_desc, x2857, x2876, x_desc, x2880)); }; if (x2883) { } else { assert(false && "ERROR not specified"); } float* x2895 = (float*)myGpuMalloc(x2894 * sizeof(float)); float* x2896 = (float*)myMalloc(1 * sizeof(float));; x2896[0] = 0.0f; float* x2898 = (float*)myMalloc(1 * sizeof(float));; x2898[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2889, x2889)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2898, in_desc, x2880, filt_desc, x1053, conv_desc, algo, ws_data, ws_size, x2896, out_desc, x2895)); }; float* x2901 = (float*)myGpuMalloc(x2892 * sizeof(float)); float* x2902 = (float*)myMalloc(1 * sizeof(float));; x2902[0] = 0.0f; float* x2904 = (float*)myMalloc(1 * sizeof(float));; x2904[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2889, x2889)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2889, x2889)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2904, x2904, in_desc, x2895, out_desc, x2901, sbmv_desc, x303, x492, x897, x1023, 1.0E-5)); }; float* x2907 = (float*)myMalloc(1 * sizeof(float));; x2907[0] = 0.0f; float* x2909 = (float*)myMalloc(1 * sizeof(float));; x2909[0] = 1.0f; float* x2911 = (float*)myGpuMalloc(x2892 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2889, x2889)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2909, x_desc, x2901, x2907, x_desc, x2911)); }; if (x2915) { } else { assert(false && "ERROR not specified"); } float* x2928 = (float*)myGpuMalloc(x2927 * sizeof(float)); float* x2929 = (float*)myMalloc(1 * sizeof(float));; x2929[0] = 0.0f; float* x2931 = (float*)myMalloc(1 * sizeof(float));; x2931[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2889, x2889)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2922, x2922)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2931, in_desc, x2911, filt_desc, x342, conv_desc, algo, ws_data, ws_size, x2929, out_desc, x2928)); }; float* x2934 = (float*)myGpuMalloc(x2925 * sizeof(float)); float* x2935 = (float*)myMalloc(1 * sizeof(float));; x2935[0] = 0.0f; float* x2937 = (float*)myMalloc(1 * sizeof(float));; x2937[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2922, x2922)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2922, x2922)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2937, x2937, in_desc, x2928, out_desc, x2934, sbmv_desc, x840, x765, x294, x864, 1.0E-5)); }; float* x2940 = (float*)myMalloc(1 * sizeof(float));; x2940[0] = 0.0f; float* x2942 = (float*)myMalloc(1 * sizeof(float));; x2942[0] = 1.0f; float* x2944 = (float*)myGpuMalloc(x2925 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2922, x2922)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2942, x_desc, x2934, x2940, x_desc, x2944)); }; if (x2947) { } else { assert(false && "ERROR not specified"); } float* x2959 = (float*)myGpuMalloc(x2958 * sizeof(float)); float* x2960 = (float*)myMalloc(1 * sizeof(float));; x2960[0] = 0.0f; float* x2962 = (float*)myMalloc(1 * sizeof(float));; x2962[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2922, x2922)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2962, in_desc, x2944, filt_desc, x357, conv_desc, algo, ws_data, ws_size, x2960, out_desc, x2959)); }; float* x2965 = (float*)myGpuMalloc(x2956 * sizeof(float)); float* x2966 = (float*)myMalloc(1 * sizeof(float));; x2966[0] = 0.0f; float* x2968 = (float*)myMalloc(1 * sizeof(float));; x2968[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardInference( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2968, x2968, in_desc, x2959, out_desc, x2965, sbmv_desc, x567, x801, x1038, x627, 1.0E-5)); }; if (x2974) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(2845) x Sym(2845), res: x Const(64) x Const(2048) x Sym(2953) x Sym(2953)"); } float* x2979 = (float*)myMalloc(1 * sizeof(float));; x2979[0] = 1.0f; float* x2981 = (float*)myMalloc(1 * sizeof(float));; x2981[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2845, x2845)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2979, bias_desc, x2880, x2981, out_desc, x2965)); }; float* x2984 = (float*)myMalloc(1 * sizeof(float));; x2984[0] = 0.0f; float* x2986 = (float*)myMalloc(1 * sizeof(float));; x2986[0] = 1.0f; float* x2988 = (float*)myGpuMalloc(x2956 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2986, x_desc, x2965, x2984, x_desc, x2988)); }; if (x2991) { } else { assert(false && "Image too small for averagePool_batch: x Const(64) x Const(2048) x Sym(2953) x Sym(2953)|(2,2)"); } float* x2996 = (float*)myMalloc(1 * sizeof(float));; x2996[0] = 0.0f; float* x2998 = (float*)myMalloc(1 * sizeof(float));; x2998[0] = 1.0f; float* x3008 = (float*)myGpuMalloc(x3007 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x2953, x2953) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3002, x3002)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 1, 1 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x2998, in_desc, x2988, x2996, out_desc, x3008)); }; int32_t x3010 = 0; int32_t x3011 = 1; x3011 *= 64; x3010 += 1; int32_t x3014 = x3010; bool x3015 = x3014 >= 2; if (x3015) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3021 = x3014 == 0; if (x3021) { int32_t x3022 = x3011; bool x3023 = x3022 == x3005; if (x3023) { } else { assert(false && "must same size!!"); } } else { } int32_t x3030 = x3011; // gemm: List(Const(64), Sym(3031)), Vector(Const(10), Const(2048)) float* x3034 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3035 = (float*)myMalloc(1 * sizeof(float));; x3035[0] = 0.0f; float* x3037 = (float*)myMalloc(1 * sizeof(float));; x3037[0] = 1.0f; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, 10,64,2048,x3037,x939,2048,x3008,2048,x3035,x3034,10)); float* x3040 = (float*)myMalloc(1 * sizeof(float));; x3040[0] = 1.0f; float* x3042 = (float*)myMalloc(1 * sizeof(float));; x3042[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3040, bias_desc, x402, x3042, out_desc, x3034)); }; // Tensor 'toCPU' invocation. float* x3046 = (float*)myMalloc(640 * sizeof(float));; CUDA_CALL(cudaMemcpy(x3046, x3034, 640 * sizeof(float), cudaMemcpyDeviceToHost)); printf("output (size Const(64) x Const(10))\n"); float x3049 = 0.0f; for(int x3051=0; x3051 < 640; x3051++) { float x3052 = x3049; float x3054 = x3046[x3051]; float x3053 = fabs(x3052); float x3055 = fabs(x3054); bool x3056 = x3053 > x3055; float x3059; if (x3056) { x3059 = x3052; } else { float x3057 = x3046[x3051]; x3059 = x3057; } x3049 = x3059; } float x3063 = x3049; printf("Max Abs: %.5f || ",x3063); for(int x3065=0; x3065 < 10; x3065++) { float x3066 = x3046[x3065]; printf("%.5f ",x3066); } printf("\n"); assert(false && "stop"); } // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
the_stack
using namespace std; #define EIGEN_USE_GPU #define maxThreadsPerBlock 1024 __global__ void _qsgdreduceSumV2(float *g_odata, float *g_idata, unsigned int n) { extern __shared__ float sdata[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int gridSize = blockDim.x * gridDim.x; unsigned int i = blockIdx.x * blockDim.x + tid; unsigned int blockSize = blockDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i];// + g_idata[i + blockDim.x]; i += gridSize; } __syncthreads(); // in-place reduction and complete unroll if (blockSize >= 1024) { if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); } if (blockSize >= 512) { if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); } if (blockSize >= 256) { if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); } if (blockSize >= 128) { if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); } // unrolling warp if (tid < 32) { volatile float *vsmem = sdata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } } __global__ void _qsgdreduceClipThresholdV2(float *g_odata, float *g_idata, unsigned int n) { extern __shared__ float sdata[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int gridSize = blockDim.x * gridDim.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int blockSize = blockDim.x; sdata[tid] = 0; while (i < n) { if (isfinite(g_idata[i])) { sdata[tid] += g_idata[i] * g_idata[i];// + g_idata[i + blockDim.x] * g_idata[i + blockDim.x]; } i += gridSize; } __syncthreads(); // in-place reduction and complete unroll if (blockSize >= 1024) { if (tid < 512) sdata[tid] += sdata[tid + 512]; __syncthreads(); } if (blockSize >= 512) { if (tid < 256) sdata[tid] += sdata[tid + 256]; __syncthreads(); } if (blockSize >= 256) { if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); } if (blockSize >= 128) { if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); } // unrolling warp if (tid < 32) { volatile float *vsmem = sdata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } } __global__ void _qsgdreduceAbsMaxV2(float *g_odata, float *g_idata, unsigned int n) { extern __shared__ float sdata[]; // set thread ID unsigned int tid = threadIdx.x; unsigned int gridSize = blockDim.x * gridDim.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int blockSize = blockDim.x; sdata[tid] = 0; while (i < n) { if (isfinite(g_idata[i]) && isfinite(sdata[tid])) sdata[tid] = fmaxf(sdata[tid], fabsf(g_idata[i])); //fmaxf(fabsf(g_idata[i]), fabsf(g_idata[i + blockDim.x]))); else sdata[tid] = nanf("123"); i += gridSize; } __syncthreads(); // in-place reduction and complete unroll if (blockSize >= 1024) { if (tid < 512) { if (isfinite(sdata[tid]) && isfinite(sdata[tid + 512])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 512]); else sdata[tid] = nanf("123"); } __syncthreads(); } if (blockSize >= 512) { if (tid < 256) { if (isfinite(sdata[tid]) && isfinite(sdata[tid + 256])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 256]); else sdata[tid] = nanf("123"); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (isfinite(sdata[tid]) && isfinite(sdata[tid + 128])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 128]); else sdata[tid] = nanf("123"); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if (isfinite(sdata[tid]) && isfinite(sdata[tid + 64])) sdata[tid] = fmaxf(sdata[tid], sdata[tid + 64]); else sdata[tid] = nanf("123"); } __syncthreads(); } // unrolling warp if (tid < 32) { volatile float *vsmem = sdata; if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 32])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 32]); else vsmem[tid] = nanf("123"); if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 16])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 16]); else vsmem[tid] = nanf("123"); if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 8])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 8]); else vsmem[tid] = nanf("123"); if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 4])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 4]); else vsmem[tid] = nanf("123"); if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 2])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 2]); else vsmem[tid] = nanf("123"); if (isfinite(vsmem[tid]) && isfinite(vsmem[tid + 1])) vsmem[tid] = fmaxf(vsmem[tid], vsmem[tid + 1]); else vsmem[tid] = nanf("123"); } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } } __global__ void _qsgdcomputeSqrt(float *scaler) { *scaler = sqrt(*scaler); //printf("l2 norm result: %f\n", *scaler); //__syncthreads(); } __global__ void _qsgdinitCURand(unsigned int len, unsigned int seed, curandState* states) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; /* we have to initialize the state */ if (index < len) curand_init(seed + index, /* the seed can be the same for each core, here we pass the time in from the CPU */ 0, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[index]); } __global__ void _qsgdcompensateMemory(float *dst, const float *src, const float *local_mem, int len) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int stride = gridDim.x * blockDim.x; for (int i = index; i < len; i += stride){ if (isfinite(src[i])) { //dst[i] = src[i]; // + local_mem[i]; //remove memory compensation for comparison purposes. dst[i] = src[i] + local_mem[i]; } else { dst[i] = nanf("123"); } //printf("CompensateMemory result: idx=%d, src=%f, mem=%f, dst=%f\n", i, src[i], local_mem[i], dst[i]); //__syncthreads(); } } __global__ void _qsgdTernarizeValue(int8_t *dst, const float *src, float *scaler, float *local_mem, const int len, int level, curandState* states) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int stride = gridDim.x * blockDim.x; curandState local_state = states[index]; float norm_scaler = *scaler; // The input tensor here has been clipped. // Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i]) for (int i = index; i < len; i += stride) { if (isfinite(norm_scaler) && isfinite(src[i])) { float rand_sample = curand_uniform(&local_state); float level_float = (float)level / norm_scaler * fabsf(src[i]); int8_t previous_level = floor(level_float); if (rand_sample < level_float - previous_level) { dst[i] = previous_level + 1; // 1 is required by qsgd } else { dst[i] = previous_level; } if (src[i] < 0){ dst[i] = -dst[i]; } // update local memory local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes. } else { // encode value to the minimum for Inf or NaN dst[i] = -128; } //printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]); //__syncthreads(); } } // For qsgd allreduce // __global__ void _qsgdDeternarizeValue(int len, float *dst, int8_t *src, float *scaler, int level) // { // int index = blockIdx.x * blockDim.x + threadIdx.x; // int stride = blockDim.x * gridDim.x; // float norm_scaler = *scaler; // for (int i = index; i < len; i += stride) // { // dst[i] = norm_scaler / (float)level * (float)src[i]; // } // } // For qsgd allgather __global__ void _qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, float *scaler, int level) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; float norm_scaler = *scaler; for (int i = index; i < len; i += stride) { if (src[i] == -128) { dst[i] = nanf("123"); } else { dst[i] += norm_scaler / (float)level * (float)src[i]; } //printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]); //__syncthreads(); } } __global__ void _bucket_l2norm(const int len, double *dst, float *src, const int bucket_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; const int loop_times = len / bucket_size; const int remain_nums = len % bucket_size; for (int i = index; i < loop_times; i += stride) { #pragma unroll for (int j = 0; j < bucket_size; j ++){ if (isfinite(src[bucket_size*i+j])) { dst[i] += (double)(src[bucket_size*i+j]) * (double)(src[bucket_size*i+j]); } } dst[i] = sqrt(dst[i]); } if (remain_nums && index == loop_times){ #pragma unroll for (int i = 0; i < remain_nums; i++){ if (isfinite(src[bucket_size*loop_times+i])) { dst[loop_times] += (double)(src[bucket_size*loop_times+i]) * (double)(src[bucket_size*loop_times+i]); } } dst[loop_times] = sqrt(dst[loop_times]); } } __global__ void _bucket_qsgdTernarizeValue(int8_t *dst, const float *src, double *scaler, const int len, int level, const int bucket_size, unsigned int seed) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; unsigned int stride = gridDim.x * blockDim.x; // curandState local_state = states[index]; curandState local_state; // The input tensor here has been clipped. // Hence we have the ternarize formula: dst[i] = new_level[i] * sign(src[i]) for (int i = index; i < len; i += stride) { float norm_scaler = (float)(scaler[i/bucket_size]); curand_init(seed + index, 0, 0, &local_state); if (isfinite(norm_scaler) && isfinite(src[i])) { float rand_sample = curand_uniform(&local_state); float level_float = (float)level / norm_scaler * fabsf(src[i]); int8_t previous_level = floor(level_float); if (rand_sample < level_float - previous_level) { dst[i] = previous_level + 1; // 1 is required by qsgd } else { dst[i] = previous_level; } if (src[i] < 0){ dst[i] = -dst[i]; } // update local memory //local_mem[i] = src[i] - norm_scaler / (float)level * (float)dst[i]; // remove vanilla local memory update for comparison purposes. } else { // encode value to the minimum for Inf or NaN dst[i] = -128; } //printf("compressed result: idx=%d, scaler=%f, src=%f, dst=%d, update_mem=%f\n", i, *scaler, src[i], dst[i], local_mem[i]); //__syncthreads(); } } // For qsgd allgather __global__ void _bucket_qsgdDeternarizeAndAdd(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < len; i += stride) { float norm_scaler = (float)(scaler[i/bucket_size]); if (src[i] == -128) { dst[i] = nanf("123"); } else { dst[i] = norm_scaler / (float)level * (float)src[i]; //atomicAdd(dst+i, norm_scaler / (float)level * (float)src[i]); } //printf("decompressed result: idx=%d, scaler=%f, src=%d, dst=%f\n", i, *scaler, src[i], dst[i]); //__syncthreads(); } } /*----------------------------------- Reduce Wrapper --------------------------------------------*/ void qsgdGPUReduce(int len, float *d_out, float *d_intermediate_res, float *result, int whichKernel, cudaStream_t stream) { // d_intermediate_res holds the input // setting up blocks int numBlocks = (int) ceil(1.0 * len / maxThreadsPerBlock); //(len / maxThreadsPerBlock) + 1; int prevNumBlocks = len; // recursively reduce to get the result while (numBlocks > maxThreadsPerBlock) { // clear d_out cudaMemset(d_out, 0, numBlocks * sizeof(float)); switch (whichKernel) { // reduce sum case 0: _qsgdreduceSumV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len); break; // reduce absmax case 1: _qsgdreduceAbsMaxV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len); break; // reduce clip threshold case 2: _qsgdreduceClipThresholdV2<<<numBlocks, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, len); // we don't need to square the intermediate results. whichKernel = 0; break; default: break; } // by now, d_out holds the intermediate result, copy it to intermedaite_res for the next run cudaMemcpy(d_intermediate_res, d_out, numBlocks * sizeof(float), cudaMemcpyDeviceToDevice); // compute reduced problem size prevNumBlocks = numBlocks; len = numBlocks; numBlocks = (int) ceil(1.0 * numBlocks / maxThreadsPerBlock); //numBlocks / maxThreadsPerBlock + 1; } // use one block to compute the rest. // clear d_out cudaMemset(d_out, 0, prevNumBlocks* sizeof(float)); switch (whichKernel) { // reduce sum case 0: _qsgdreduceSumV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks); break; // reduce absmax case 1: _qsgdreduceAbsMaxV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks); break; // reduce clip threshold case 2: _qsgdreduceClipThresholdV2<<<1, maxThreadsPerBlock, maxThreadsPerBlock * sizeof(float)>>>(d_out, d_intermediate_res, prevNumBlocks); break; default: break; } // as we just use one block, just move the first element of d_out to result cudaMemcpy(result, d_out, sizeof(float), cudaMemcpyDeviceToDevice); } /*----------------------------------- Kernel Launch Wrappers ------------------------------------*/ void GPUReduceL2Norm(float *array, int len, double *l2norm_scaler, const int bucket_size) { int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock); _bucket_l2norm<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(len, l2norm_scaler, array, bucket_size); } // void qsgdGPUInit_curand(int n, unsigned int seed, curandState* cuda_states) // { // int blocksPerGrid = (int) ceil(1.0 * n / maxThreadsPerBlock); // _qsgdinitCURand<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(n, seed, cuda_states); // } // void qsgdGPUCompensateMemory(float *dst, const float *src, const float* local_mem, int len) // { // int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock); // _qsgdcompensateMemory<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst, src, local_mem, len); // } void GPUTernarizeMultiLevelValue(int8_t *dst, const float *src, double *scaler, int len, int level, const int bucket_size) { int blocksPerGrid = (int) ceil(1.0 * std::min(len, 1024 * 1024 * 25) / maxThreadsPerBlock); unsigned int seed = time(NULL); _bucket_qsgdTernarizeValue<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst, src, scaler, len, level, bucket_size, seed); } void GPUDeternarizeMultiLevelValue(int len, float *dst, int8_t *src, double *scaler, int level, const int bucket_size) { int blocksPerGrid = (int) ceil(1.0 * len / maxThreadsPerBlock); _bucket_qsgdDeternarizeAndAdd<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(len, dst, src, scaler, level, bucket_size); } std::vector<torch::Tensor> qsgd_compress_cuda(torch::Tensor input, int level, int bucket_size) { int element_nums = input.numel(); int num_buckets = ceil((float)element_nums / bucket_size); auto d_l2norm_scaler = torch::zeros(num_buckets, torch::TensorOptions().dtype(torch::kFloat64).device(input.device())); auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt8).device(input.device())); // curandState* cuda_states; // cuda_states = (curandState*)torch::empty(element_nums, torch::TensorOptions().dtype(torch::kInt).device(input.device())).data_ptr(); // qsgdGPUInit_curand(element_nums, time(NULL), cuda_states); GPUReduceL2Norm((float*)input.data_ptr(), element_nums, (double*)d_l2norm_scaler.data_ptr(), bucket_size); GPUTernarizeMultiLevelValue((int8_t*)buffer_data.data_ptr(), (float*)input.data_ptr(), (double*)d_l2norm_scaler.data_ptr(), element_nums, level, bucket_size); return {buffer_data, d_l2norm_scaler}; } torch::Tensor qsgd_decompress_cuda(torch::Tensor input, torch::Tensor d_l2norm_scaler, int level, int bucket_size) { int element_nums = input.numel(); int num_buckets = ceil((float)element_nums / bucket_size); auto buffer_data = torch::empty(element_nums, torch::TensorOptions().dtype(torch::kFloat32).device(input.device())); GPUDeternarizeMultiLevelValue(element_nums, (float*)buffer_data.data_ptr(), (int8_t*)input.data_ptr(), (double*)d_l2norm_scaler.data_ptr(), level, bucket_size); return buffer_data; }
the_stack
#ifndef HELPER_MATH_H #define HELPER_MATH_H #include <cuda.h> typedef unsigned int uint; typedef unsigned short ushort; typedef unsigned char uchar; typedef uchar3 bool3; #ifndef __CUDACC__ #include <math.h> inline __host__ __device__ int imax3 (int a, int b, int c) { return (a>b) ? ((a>c) ? a : c) : ((b>c) ? b : c); } inline __host__ __device__ int imin3 (int a, int b, int c) { return (a<b) ? ((a<c) ? a : c) : ((b<c) ? b : c); } //////////////////////////////////////////////////////////////////////////////// // host implementations of CUDA functions //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float fminf(float a, float b) { return a < b ? a : b; } inline __host__ __device__ float fmaxf(float a, float b) { return a > b ? a : b; } inline __host__ __device__ int max(int a, int b) { return a > b ? a : b; } inline __host__ __device__ int min(int a, int b) { return a < b ? a : b; } inline __host__ __device__ float rsqrtf(float x) { return 1.0f / sqrtf(x); } #endif //////////////////////////////////////////////////////////////////////////////// // constructors //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 make_float2(float s) { return make_float2(s, s); } inline __host__ __device__ float2 make_float2(float3 a) { return make_float2(a.x, a.y); } inline __host__ __device__ float2 make_float2(int2 a) { return make_float2(float(a.x), float(a.y)); } inline __host__ __device__ float2 make_float2(uint2 a) { return make_float2(float(a.x), float(a.y)); } inline __host__ __device__ int2 make_int2(int s) { return make_int2(s, s); } inline __host__ __device__ int2 make_int2(int3 a) { return make_int2(a.x, a.y); } inline __host__ __device__ int2 make_int2(uint2 a) { return make_int2(int(a.x), int(a.y)); } inline __host__ __device__ int2 make_int2(float2 a) { return make_int2(int(a.x), int(a.y)); } inline __host__ __device__ uint2 make_uint2(uint s) { return make_uint2(s, s); } inline __host__ __device__ uint2 make_uint2(uint3 a) { return make_uint2(a.x, a.y); } inline __host__ __device__ uint2 make_uint2(int2 a) { return make_uint2(uint(a.x), uint(a.y)); } inline __host__ __device__ float3 make_float3(float s) { return make_float3(s, s, s); } inline __host__ __device__ float3 make_float3(bool3 s) { return make_float3(s.x, s.y, s.z); } inline __host__ __device__ float3 make_float3(float2 a) { return make_float3(a.x, a.y, 0.0f); } inline __host__ __device__ float3 make_float3(float2 a, float s) { return make_float3(a.x, a.y, s); } inline __host__ __device__ float3 make_float3(float4 a) { return make_float3(a.x, a.y, a.z); } inline __host__ __device__ float3 make_float3(int3 a) { return make_float3(float(a.x), float(a.y), float(a.z)); } inline __host__ __device__ float3 make_float3(uint3 a) { return make_float3(float(a.x), float(a.y), float(a.z)); } inline __host__ __device__ int3 make_int3(int s) { return make_int3(s, s, s); } inline __host__ __device__ int3 make_int3(bool3 s) { return make_int3(s.x, s.y, s.z); } inline __host__ __device__ int3 make_int3(int2 a) { return make_int3(a.x, a.y, 0); } inline __host__ __device__ int3 make_int3(int2 a, int s) { return make_int3(a.x, a.y, s); } inline __host__ __device__ int3 make_int3( int3 a ) { return make_int3(int(a.x), int(a.y), int(a.z)); } inline __host__ __device__ int3 make_int3( uint3 a ) { return make_int3(int(a.x), int(a.y), int(a.z)); } inline __host__ __device__ int3 make_int3(float3 a) { return make_int3(int(a.x), int(a.y), int(a.z)); } inline __host__ __device__ uint3 make_uint3(uint s) { return make_uint3(s, s, s); } inline __host__ __device__ uint3 make_uint3(uint2 a) { return make_uint3(a.x, a.y, 0); } inline __host__ __device__ uint3 make_uint3(uint2 a, uint s) { return make_uint3(a.x, a.y, s); } inline __host__ __device__ uint3 make_uint3(uint4 a) { return make_uint3(a.x, a.y, a.z); } inline __host__ __device__ uint3 make_uint3(int3 a) { return make_uint3(uint(a.x), uint(a.y), uint(a.z)); } inline __host__ __device__ uint3 make_uint3(float3 a) { return make_uint3(uint(a.x), uint(a.y), uint(a.z)); } inline __host__ __device__ float4 make_float4(float s) { return make_float4(s, s, s, s); } inline __host__ __device__ float4 make_float4(float3 a) { return make_float4(a.x, a.y, a.z, 0.0f); } inline __host__ __device__ float4 make_float4(float3 a, float w) { return make_float4(a.x, a.y, a.z, w); } inline __host__ __device__ float4 make_float4(int4 a) { return make_float4(float(a.x), float(a.y), float(a.z), float(a.w)); } inline __host__ __device__ float4 make_float4(uint4 a) { return make_float4(float(a.x), float(a.y), float(a.z), float(a.w)); } inline __host__ __device__ float4 make_float4(uchar4 a) { return make_float4(float(a.x)/255.0f, float(a.y)/255.0f, float(a.z)/255.0f, float(a.w)/255.0f); } inline __host__ __device__ float3 make_float3(uchar4 a) { return make_float3(float(a.x)/255.0f, float(a.y)/255.0f, float(a.z)/255.0f); } inline __host__ __device__ int4 make_int4(int s) { return make_int4(s, s, s, s); } inline __host__ __device__ int4 make_int4(int3 a) { return make_int4(a.x, a.y, a.z, 0); } inline __host__ __device__ int4 make_int4(int3 a, int w) { return make_int4(a.x, a.y, a.z, w); } inline __host__ __device__ int4 make_int4(uint4 a) { return make_int4(int(a.x), int(a.y), int(a.z), int(a.w)); } inline __host__ __device__ int4 make_int4(float4 a) { return make_int4(int(a.x), int(a.y), int(a.z), int(a.w)); } inline __host__ __device__ int3 make_int3(uchar4 a) { return make_int3(int(a.x), int(a.y), int(a.z)); } inline __host__ __device__ uint4 make_uint4(uint s) { return make_uint4(s, s, s, s); } inline __host__ __device__ uint4 make_uint4(uint3 a) { return make_uint4(a.x, a.y, a.z, 0); } inline __host__ __device__ uint4 make_uint4(uint3 a, uint w) { return make_uint4(a.x, a.y, a.z, w); } inline __host__ __device__ uint4 make_uint4(int4 a) { return make_uint4(uint(a.x), uint(a.y), uint(a.z), uint(a.w)); } //////////////////////////////////////////////////////////////////////////////// // negate //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 operator-(float2 &a) { return make_float2(-a.x, -a.y); } inline __host__ __device__ int2 operator-(int2 &a) { return make_int2(-a.x, -a.y); } inline __host__ __device__ float3 operator-(float3 &a) { return make_float3(-a.x, -a.y, -a.z); } inline __host__ __device__ int3 operator-(int3 &a) { return make_int3(-a.x, -a.y, -a.z); } inline __host__ __device__ float4 operator-(float4 &a) { return make_float4(-a.x, -a.y, -a.z, -a.w); } inline __host__ __device__ int4 operator-(int4 &a) { return make_int4(-a.x, -a.y, -a.z, -a.w); } //////////////////////////////////////////////////////////////////////////////// // addition //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 operator+(float2 a, float2 b) { return make_float2(a.x + b.x, a.y + b.y); } inline __host__ __device__ void operator+=(float2 &a, float2 b) { a.x += b.x; a.y += b.y; } inline __host__ __device__ float2 operator+(float2 a, float b) { return make_float2(a.x + b, a.y + b); } inline __host__ __device__ float2 operator+(float b, float2 a) { return make_float2(a.x + b, a.y + b); } inline __host__ __device__ void operator+=(float2 &a, float b) { a.x += b; a.y += b; } inline __host__ __device__ int2 operator+(int2 a, int2 b) { return make_int2(a.x + b.x, a.y + b.y); } inline __host__ __device__ void operator+=(int2 &a, int2 b) { a.x += b.x; a.y += b.y; } inline __host__ __device__ int2 operator+(int2 a, int b) { return make_int2(a.x + b, a.y + b); } inline __host__ __device__ int2 operator+(int b, int2 a) { return make_int2(a.x + b, a.y + b); } inline __host__ __device__ void operator+=(int2 &a, int b) { a.x += b; a.y += b; } inline __host__ __device__ uint2 operator+(uint2 a, uint2 b) { return make_uint2(a.x + b.x, a.y + b.y); } inline __host__ __device__ void operator+=(uint2 &a, uint2 b) { a.x += b.x; a.y += b.y; } inline __host__ __device__ uint2 operator+(uint2 a, uint b) { return make_uint2(a.x + b, a.y + b); } inline __host__ __device__ uint2 operator+(uint b, uint2 a) { return make_uint2(a.x + b, a.y + b); } inline __host__ __device__ void operator+=(uint2 &a, uint b) { a.x += b; a.y += b; } inline __host__ __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __host__ __device__ void operator+=(float3 &a, float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __host__ __device__ float3 operator+(float3 a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ void operator+=(float3 &a, float b) { a.x += b; a.y += b; a.z += b; } inline __host__ __device__ int3 operator+(int3 a, int3 b) { return make_int3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __host__ __device__ float3 operator+(int3 &a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __host__ __device__ void operator+=(int3 &a, int3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __host__ __device__ int3 operator+(int3 a, int b) { return make_int3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ void operator+=(int3 &a, int b) { a.x += b; a.y += b; a.z += b; } inline __host__ __device__ uint3 operator+(uint3 a, uint3 b) { return make_uint3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __host__ __device__ void operator+=(uint3 &a, uint3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __host__ __device__ uint3 operator+(uint3 a, uint b) { return make_uint3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ void operator+=(uint3 &a, uint b) { a.x += b; a.y += b; a.z += b; } inline __host__ __device__ int3 operator+(int b, int3 a) { return make_int3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ uint3 operator+(uint b, uint3 a) { return make_uint3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ float3 operator+(float b, float3 a) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __host__ __device__ float4 operator+(float4 a, float4 b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } inline __host__ __device__ void operator+=(float4 &a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __host__ __device__ float4 operator+(float4 a, float b) { return make_float4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ float4 operator+(float b, float4 a) { return make_float4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ void operator+=(float4 &a, float b) { a.x += b; a.y += b; a.z += b; a.w += b; } inline __host__ __device__ int4 operator+(int4 a, int4 b) { return make_int4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } inline __host__ __device__ void operator+=(int4 &a, int4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __host__ __device__ int4 operator+(int4 a, int b) { return make_int4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ int4 operator+(int b, int4 a) { return make_int4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ void operator+=(int4 &a, int b) { a.x += b; a.y += b; a.z += b; a.w += b; } inline __host__ __device__ uint4 operator+(uint4 a, uint4 b) { return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } inline __host__ __device__ void operator+=(uint4 &a, uint4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __host__ __device__ uint4 operator+(uint4 a, uint b) { return make_uint4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ uint4 operator+(uint b, uint4 a) { return make_uint4(a.x + b, a.y + b, a.z + b, a.w + b); } inline __host__ __device__ void operator+=(uint4 &a, uint b) { a.x += b; a.y += b; a.z += b; a.w += b; } //////////////////////////////////////////////////////////////////////////////// // subtract //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 operator-(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); } inline __host__ __device__ void operator-=(float2 &a, float2 b) { a.x -= b.x; a.y -= b.y; } inline __host__ __device__ float2 operator-(float2 a, float b) { return make_float2(a.x - b, a.y - b); } inline __host__ __device__ float2 operator-(float b, float2 a) { return make_float2(b - a.x, b - a.y); } inline __host__ __device__ void operator-=(float2 &a, float b) { a.x -= b; a.y -= b; } inline __host__ __device__ int2 operator-(int2 a, int2 b) { return make_int2(a.x - b.x, a.y - b.y); } inline __host__ __device__ void operator-=(int2 &a, int2 b) { a.x -= b.x; a.y -= b.y; } inline __host__ __device__ int2 operator-(int2 a, int b) { return make_int2(a.x - b, a.y - b); } inline __host__ __device__ int2 operator-(int b, int2 a) { return make_int2(b - a.x, b - a.y); } inline __host__ __device__ void operator-=(int2 &a, int b) { a.x -= b; a.y -= b; } inline __host__ __device__ uint2 operator-(uint2 a, uint2 b) { return make_uint2(a.x - b.x, a.y - b.y); } inline __host__ __device__ void operator-=(uint2 &a, uint2 b) { a.x -= b.x; a.y -= b.y; } inline __host__ __device__ uint2 operator-(uint2 a, uint b) { return make_uint2(a.x - b, a.y - b); } inline __host__ __device__ uint2 operator-(uint b, uint2 a) { return make_uint2(b - a.x, b - a.y); } inline __host__ __device__ void operator-=(uint2 &a, uint b) { a.x -= b; a.y -= b; } inline __host__ __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __host__ __device__ void operator-=(float3 &a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __host__ __device__ float3 operator-(float3 a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); } inline __host__ __device__ float3 operator-(float b, float3 a) { return make_float3(b - a.x, b - a.y, b - a.z); } inline __host__ __device__ void operator-=(float3 &a, float b) { a.x -= b; a.y -= b; a.z -= b; } inline __host__ __device__ int3 operator-(int3 a, int3 b) { return make_int3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __host__ __device__ float3 operator-(int3 &a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __host__ __device__ void operator-=(int3 &a, int3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __host__ __device__ void operator-=(int3 &a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __host__ __device__ int3 operator-(int3 a, int b) { return make_int3(a.x - b, a.y - b, a.z - b); } inline __host__ __device__ int3 operator-(int b, int3 a) { return make_int3(b - a.x, b - a.y, b - a.z); } inline __host__ __device__ void operator-=(int3 &a, int b) { a.x -= b; a.y -= b; a.z -= b; } inline __host__ __device__ uint3 operator-(uint3 a, uint3 b) { return make_uint3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __host__ __device__ void operator-=(uint3 &a, uint3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __host__ __device__ uint3 operator-(uint3 a, uint b) { return make_uint3(a.x - b, a.y - b, a.z - b); } inline __host__ __device__ uint3 operator-(uint b, uint3 a) { return make_uint3(b - a.x, b - a.y, b - a.z); } inline __host__ __device__ void operator-=(uint3 &a, uint b) { a.x -= b; a.y -= b; a.z -= b; } inline __host__ __device__ float4 operator-(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } inline __host__ __device__ void operator-=(float4 &a, float4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __host__ __device__ float4 operator-(float4 a, float b) { return make_float4(a.x - b, a.y - b, a.z - b, a.w - b); } inline __host__ __device__ void operator-=(float4 &a, float b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; } inline __host__ __device__ int4 operator-(int4 a, int4 b) { return make_int4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } inline __host__ __device__ void operator-=(int4 &a, int4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __host__ __device__ int4 operator-(int4 a, int b) { return make_int4(a.x - b, a.y - b, a.z - b, a.w - b); } inline __host__ __device__ int4 operator-(int b, int4 a) { return make_int4(b - a.x, b - a.y, b - a.z, b - a.w); } inline __host__ __device__ void operator-=(int4 &a, int b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; } inline __host__ __device__ uint4 operator-(uint4 a, uint4 b) { return make_uint4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } inline __host__ __device__ void operator-=(uint4 &a, uint4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __host__ __device__ uint4 operator-(uint4 a, uint b) { return make_uint4(a.x - b, a.y - b, a.z - b, a.w - b); } inline __host__ __device__ uint4 operator-(uint b, uint4 a) { return make_uint4(b - a.x, b - a.y, b - a.z, b - a.w); } inline __host__ __device__ void operator-=(uint4 &a, uint b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; } //////////////////////////////////////////////////////////////////////////////// // multiply //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 operator*(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); } inline __host__ __device__ void operator*=(float2 &a, float2 b) { a.x *= b.x; a.y *= b.y; } inline __host__ __device__ float2 operator*(float2 a, float b) { return make_float2(a.x * b, a.y * b); } inline __host__ __device__ float2 operator*(float b, float2 a) { return make_float2(b * a.x, b * a.y); } inline __host__ __device__ void operator*=(float2 &a, float b) { a.x *= b; a.y *= b; } inline __host__ __device__ int2 operator*(int2 a, int2 b) { return make_int2(a.x * b.x, a.y * b.y); } inline __host__ __device__ void operator*=(int2 &a, int2 b) { a.x *= b.x; a.y *= b.y; } inline __host__ __device__ int2 operator*(int2 a, int b) { return make_int2(a.x * b, a.y * b); } inline __host__ __device__ int2 operator*(int b, int2 a) { return make_int2(b * a.x, b * a.y); } inline __host__ __device__ void operator*=(int2 &a, int b) { a.x *= b; a.y *= b; } inline __host__ __device__ uint2 operator*(uint2 a, uint2 b) { return make_uint2(a.x * b.x, a.y * b.y); } inline __host__ __device__ void operator*=(uint2 &a, uint2 b) { a.x *= b.x; a.y *= b.y; } inline __host__ __device__ uint2 operator*(uint2 a, uint b) { return make_uint2(a.x * b, a.y * b); } inline __host__ __device__ uint2 operator*(uint b, uint2 a) { return make_uint2(b * a.x, b * a.y); } inline __host__ __device__ void operator*=(uint2 &a, uint b) { a.x *= b; a.y *= b; } inline __host__ __device__ float3 operator*(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } inline __host__ __device__ float3 operator*(float3 a, int3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } inline __host__ __device__ void operator*=(float3 &a, float3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __host__ __device__ float3 operator*(float3 a, float b) { return make_float3(a.x * b, a.y * b, a.z * b); } inline __host__ __device__ float3 operator*(float b, float3 a) { return make_float3(b * a.x, b * a.y, b * a.z); } inline __host__ __device__ void operator*=(float3 &a, float b) { a.x *= b; a.y *= b; a.z *= b; } inline __host__ __device__ int3 operator*(int3 a, int3 b) { return make_int3(a.x * b.x, a.y * b.y, a.z * b.z); } inline __host__ __device__ float3 operator*(int3 &a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } inline __host__ __device__ void operator*=(int3 &a, int3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __host__ __device__ int3 operator*(int3 a, int b) { return make_int3(a.x * b, a.y * b, a.z * b); } inline __host__ __device__ int3 operator*(int b, int3 a) { return make_int3(b * a.x, b * a.y, b * a.z); } inline __host__ __device__ void operator*=(int3 &a, int b) { a.x *= b; a.y *= b; a.z *= b; } inline __host__ __device__ uint3 operator*(uint3 a, uint3 b) { return make_uint3(a.x * b.x, a.y * b.y, a.z * b.z); } inline __host__ __device__ void operator*=(uint3 &a, uint3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __host__ __device__ uint3 operator*(uint3 a, uint b) { return make_uint3(a.x * b, a.y * b, a.z * b); } inline __host__ __device__ uint3 operator*(uint b, uint3 a) { return make_uint3(b * a.x, b * a.y, b * a.z); } inline __host__ __device__ void operator*=(uint3 &a, uint b) { a.x *= b; a.y *= b; a.z *= b; } inline __host__ __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } inline __host__ __device__ void operator*=(float4 &a, float4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __host__ __device__ float4 operator*(float4 a, float b) { return make_float4(a.x * b, a.y * b, a.z * b, a.w * b); } inline __host__ __device__ float4 operator*(float b, float4 a) { return make_float4(b * a.x, b * a.y, b * a.z, b * a.w); } inline __host__ __device__ void operator*=(float4 &a, float b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } inline __host__ __device__ int4 operator*(int4 a, int4 b) { return make_int4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } inline __host__ __device__ void operator*=(int4 &a, int4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __host__ __device__ int4 operator*(int4 a, int b) { return make_int4(a.x * b, a.y * b, a.z * b, a.w * b); } inline __host__ __device__ int4 operator*(int b, int4 a) { return make_int4(b * a.x, b * a.y, b * a.z, b * a.w); } inline __host__ __device__ void operator*=(int4 &a, int b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } inline __host__ __device__ uint4 operator*(uint4 a, uint4 b) { return make_uint4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } inline __host__ __device__ void operator*=(uint4 &a, uint4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __host__ __device__ uint4 operator*(uint4 a, uint b) { return make_uint4(a.x * b, a.y * b, a.z * b, a.w * b); } inline __host__ __device__ uint4 operator*(uint b, uint4 a) { return make_uint4(b * a.x, b * a.y, b * a.z, b * a.w); } inline __host__ __device__ void operator*=(uint4 &a, uint b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } //////////////////////////////////////////////////////////////////////////////// // divide //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ uint3 operator/ (uint3 a, int b) { return make_uint3(a.x / b, a.y / b, a.z / b); } inline __host__ __device__ uint3 operator% (uint3 a, int b) { return make_uint3(a.x % b, a.y % b, a.z % b); } inline __host__ __device__ float2 operator/(float2 a, float2 b) { return make_float2(a.x / b.x, a.y / b.y); } inline __host__ __device__ void operator/=(float2 &a, float2 b) { a.x /= b.x; a.y /= b.y; } inline __host__ __device__ float2 operator/(float2 a, float b) { return make_float2(a.x / b, a.y / b); } inline __host__ __device__ void operator/=(float2 &a, float b) { a.x /= b; a.y /= b; } inline __host__ __device__ float2 operator/(float b, float2 a) { return make_float2(b / a.x, b / a.y); } inline __host__ __device__ float3 operator/(float3 a, float3 b) { return make_float3(a.x / b.x, a.y / b.y, a.z / b.z); } inline __host__ __device__ void operator/=(float3 &a, float3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __host__ __device__ float3 operator/(float3 a, float b) { return make_float3(a.x / b, a.y / b, a.z / b); } inline __host__ __device__ void operator/=(float3 &a, float b) { a.x /= b; a.y /= b; a.z /= b; } inline __host__ __device__ float3 operator/(float b, float3 a) { return make_float3(b / a.x, b / a.y, b / a.z); } inline __host__ __device__ float4 operator/(float4 a, float4 b) { return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } inline __host__ __device__ void operator/=(float4 &a, float4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } inline __host__ __device__ float4 operator/(float4 a, float b) { return make_float4(a.x / b, a.y / b, a.z / b, a.w / b); } inline __host__ __device__ void operator/=(float4 &a, float b) { a.x /= b; a.y /= b; a.z /= b; a.w /= b; } inline __host__ __device__ float4 operator/(float b, float4 a) { return make_float4(b / a.x, b / a.y, b / a.z, b / a.w); } //////////////////////////////////////////////////////////////////////////////// // min //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 fminf(float2 a, float2 b) { return make_float2(fminf(a.x,b.x), fminf(a.y,b.y)); } inline __host__ __device__ float3 fminf(float3 a, float3 b) { return make_float3(fminf(a.x,b.x), fminf(a.y,b.y), fminf(a.z,b.z)); } inline __host__ __device__ float4 fminf(float4 a, float4 b) { return make_float4(fminf(a.x,b.x), fminf(a.y,b.y), fminf(a.z,b.z), fminf(a.w,b.w)); } inline __host__ __device__ int2 min(int2 a, int2 b) { return make_int2(min(a.x,b.x), min(a.y,b.y)); } inline __host__ __device__ int3 min(int3 a, int3 b) { return make_int3(min(a.x,b.x), min(a.y,b.y), min(a.z,b.z)); } inline __host__ __device__ int4 min(int4 a, int4 b) { return make_int4(min(a.x,b.x), min(a.y,b.y), min(a.z,b.z), min(a.w,b.w)); } inline __host__ __device__ uint2 min(uint2 a, uint2 b) { return make_uint2(min(a.x,b.x), min(a.y,b.y)); } inline __host__ __device__ uint3 min(uint3 a, uint3 b) { return make_uint3(min(a.x,b.x), min(a.y,b.y), min(a.z,b.z)); } inline __host__ __device__ uint4 min(uint4 a, uint4 b) { return make_uint4(min(a.x,b.x), min(a.y,b.y), min(a.z,b.z), min(a.w,b.w)); } //////////////////////////////////////////////////////////////////////////////// // max //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 fmaxf(float2 a, float2 b) { return make_float2(fmaxf(a.x,b.x), fmaxf(a.y,b.y)); } inline __host__ __device__ float3 fmaxf(float3 a, float3 b) { return make_float3(fmaxf(a.x,b.x), fmaxf(a.y,b.y), fmaxf(a.z,b.z)); } inline __host__ __device__ float4 fmaxf(float4 a, float4 b) { return make_float4(fmaxf(a.x,b.x), fmaxf(a.y,b.y), fmaxf(a.z,b.z), fmaxf(a.w,b.w)); } inline __host__ __device__ int2 max(int2 a, int2 b) { return make_int2(max(a.x,b.x), max(a.y,b.y)); } inline __host__ __device__ int3 max(int3 a, int3 b) { return make_int3(max(a.x,b.x), max(a.y,b.y), max(a.z,b.z)); } inline __host__ __device__ int4 max(int4 a, int4 b) { return make_int4(max(a.x,b.x), max(a.y,b.y), max(a.z,b.z), max(a.w,b.w)); } inline __host__ __device__ uint2 max(uint2 a, uint2 b) { return make_uint2(max(a.x,b.x), max(a.y,b.y)); } inline __host__ __device__ uint3 max(uint3 a, uint3 b) { return make_uint3(max(a.x,b.x), max(a.y,b.y), max(a.z,b.z)); } inline __host__ __device__ uint4 max(uint4 a, uint4 b) { return make_uint4(max(a.x,b.x), max(a.y,b.y), max(a.z,b.z), max(a.w,b.w)); } //////////////////////////////////////////////////////////////////////////////// // lerp // - linear interpolation between a and b, based on value t in [0, 1] range //////////////////////////////////////////////////////////////////////////////// inline __device__ __host__ float lerp(float a, float b, float t) { return a + t*(b-a); } inline __device__ __host__ float2 lerp(float2 a, float2 b, float t) { return a + t*(b-a); } inline __device__ __host__ float3 lerp(float3 a, float3 b, float t) { return a + t*(b-a); } inline __device__ __host__ float4 lerp(float4 a, float4 b, float t) { return a + t*(b-a); } //////////////////////////////////////////////////////////////////////////////// // clamp // - clamp the value v to be in the range [a, b] //////////////////////////////////////////////////////////////////////////////// inline __device__ __host__ float clamp(float f, float a, float b) { return fmaxf(a, fminf(f, b)); } inline __device__ __host__ int clamp(int f, int a, int b) { return max(a, min(f, b)); } inline __device__ __host__ uint clamp(uint f, uint a, uint b) { return max(a, min(f, b)); } inline __device__ __host__ float2 clamp(float2 v, float a, float b) { return make_float2(clamp(v.x, a, b), clamp(v.y, a, b)); } inline __device__ __host__ float2 clamp(float2 v, float2 a, float2 b) { return make_float2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } inline __device__ __host__ float3 clamp(float3 v, float a, float b) { return make_float3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } inline __device__ __host__ float3 clamp(float3 v, float3 a, float3 b) { return make_float3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } inline __device__ __host__ float4 clamp(float4 v, float a, float b) { return make_float4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } inline __device__ __host__ float4 clamp(float4 v, float4 a, float4 b) { return make_float4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } inline __device__ __host__ int2 clamp(int2 v, int a, int b) { return make_int2(clamp(v.x, a, b), clamp(v.y, a, b)); } inline __device__ __host__ int2 clamp(int2 v, int2 a, int2 b) { return make_int2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } inline __device__ __host__ int3 clamp(int3 v, int a, int b) { return make_int3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } inline __device__ __host__ int3 clamp(int3 v, int3 a, int3 b) { return make_int3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } inline __device__ __host__ int4 clamp(int4 v, int a, int b) { return make_int4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } inline __device__ __host__ int4 clamp(int4 v, int4 a, int4 b) { return make_int4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } inline __device__ __host__ uint2 clamp(uint2 v, uint a, uint b) { return make_uint2(clamp(v.x, a, b), clamp(v.y, a, b)); } inline __device__ __host__ uint2 clamp(uint2 v, uint2 a, uint2 b) { return make_uint2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } inline __device__ __host__ uint3 clamp(uint3 v, uint a, uint b) { return make_uint3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b)); } inline __device__ __host__ uint3 clamp(uint3 v, uint3 a, uint3 b) { return make_uint3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z)); } inline __device__ __host__ uint4 clamp(uint4 v, uint a, uint b) { return make_uint4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b), clamp(v.w, a, b)); } inline __device__ __host__ uint4 clamp(uint4 v, uint4 a, uint4 b) { return make_uint4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y), clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w)); } //////////////////////////////////////////////////////////////////////////////// // dot product //////////////////////////////////////////////////////////////////////////////// __forceinline__ __host__ __device__ float dot(float2 a, float2 b) { return a.x * b.x + a.y * b.y; } __forceinline__ __host__ __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __forceinline__ __host__ __device__ float dot(float4 a, float4 b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } __forceinline__ __host__ __device__ int dot(int2 a, int2 b) { return a.x * b.x + a.y * b.y; } __forceinline__ __host__ __device__ int dot(int3 a, int3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __forceinline__ __host__ __device__ int dot(int4 a, int4 b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } __forceinline__ __host__ __device__ uint dot(uint2 a, uint2 b) { return a.x * b.x + a.y * b.y; } __forceinline__ __host__ __device__ uint dot(uint3 a, uint3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __forceinline__ __host__ __device__ uint dot(uint4 a, uint4 b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } //////////////////////////////////////////////////////////////////////////////// // length //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float length(float2 v) { return sqrtf(dot(v, v)); } inline __host__ __device__ float length(float3 v) { return sqrtf(dot(v, v)); } inline __host__ __device__ float length(float4 v) { return sqrtf(dot(v, v)); } //////////////////////////////////////////////////////////////////////////////// // normalize //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 normalize(float2 v) { float invLen = rsqrtf(dot(v, v)); return v * invLen; } inline __host__ __device__ float3 normalize(float3 v) { float invLen = rsqrtf(dot(v, v)); return v * invLen; } inline __host__ __device__ float4 normalize(float4 v) { float invLen = rsqrtf(dot(v, v)); return v * invLen; } //////////////////////////////////////////////////////////////////////////////// // floor //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 floorf(float2 v) { return make_float2(floorf(v.x), floorf(v.y)); } inline __host__ __device__ float3 floorf(float3 v) { return make_float3(floorf(v.x), floorf(v.y), floorf(v.z)); } inline __host__ __device__ float4 floorf(float4 v) { return make_float4(floorf(v.x), floorf(v.y), floorf(v.z), floorf(v.w)); } //////////////////////////////////////////////////////////////////////////////// // frac - returns the fractional portion of a scalar or each vector component //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float fracf(float v) { return v - floorf(v); } inline __host__ __device__ float2 fracf(float2 v) { return make_float2(fracf(v.x), fracf(v.y)); } inline __host__ __device__ float3 fracf(float3 v) { return make_float3(fracf(v.x), fracf(v.y), fracf(v.z)); } inline __host__ __device__ float4 fracf(float4 v) { return make_float4(fracf(v.x), fracf(v.y), fracf(v.z), fracf(v.w)); } //////////////////////////////////////////////////////////////////////////////// // fmod //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 fmodf(float2 a, float2 b) { return make_float2(fmodf(a.x, b.x), fmodf(a.y, b.y)); } inline __host__ __device__ float3 fmodf(float3 a, float3 b) { return make_float3(fmodf(a.x, b.x), fmodf(a.y, b.y), fmodf(a.z, b.z)); } inline __host__ __device__ float4 fmodf(float4 a, float4 b) { return make_float4(fmodf(a.x, b.x), fmodf(a.y, b.y), fmodf(a.z, b.z), fmodf(a.w, b.w)); } //////////////////////////////////////////////////////////////////////////////// // absolute value //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float2 fabs(float2 v) { return make_float2(fabsf(v.x), fabsf(v.y)); } inline __host__ __device__ float3 fabs(float3 v) { return make_float3(fabsf(v.x), fabsf(v.y), fabsf(v.z)); } inline __host__ __device__ float4 fabs(float4 v) { return make_float4(fabsf(v.x), fabsf(v.y), fabsf(v.z), fabsf(v.w)); } inline __host__ __device__ int2 abs(int2 v) { return make_int2(abs(v.x), abs(v.y)); } inline __host__ __device__ int3 abs(int3 v) { return make_int3(abs(v.x), abs(v.y), abs(v.z)); } inline __host__ __device__ int4 abs(int4 v) { return make_int4(abs(v.x), abs(v.y), abs(v.z), abs(v.w)); } //////////////////////////////////////////////////////////////////////////// // 4x4 matrix operations //////////////////////////////////////////////////////////////////////////// // Gets the result of transforming a point (vec.x, vec.y, vec.z, 1)^T by the // 4x4 column-major matrix mtx. // In other words, computes // (p.x) (mtx[ 0] mtx[ 4] mtx[ 8]) (vec.x) (mtx[12]) // (p.y) = (mtx[ 1] mtx[ 5] mtx[ 9]) (vec.y) + (mtx[13]) // (p.z) (mtx[ 2] mtx[ 6] mtx[10]) (vec.z) (mtx[14]) inline __host__ __device__ float3 mmult(float* mtx, float3 vec) { float3 p; p.x = vec.x * mtx[0] + vec.y * mtx[4] + vec.z * mtx[8] + mtx[12]; p.y = vec.x * mtx[1] + vec.y * mtx[5] + vec.z * mtx[9] + mtx[13]; p.z = vec.x * mtx[2] + vec.y * mtx[6] + vec.z * mtx[10] + mtx[14]; return p; } //////////////////////////////////////////////////////////////////////////////// // reflect // - returns reflection of incident ray I around surface normal N // - N should be normalized, reflected vector's length is equal to length of I //////////////////////////////////////////////////////////////////////////////// inline __host__ __device__ float3 reflect(float3 i, float3 n) { return i - 2.0f * n * dot(n,i); } //////////////////////////////////////////////////////////////////////////////// // cross product //////////////////////////////////////////////////////////////////////////////// __forceinline__ __host__ __device__ float3 cross(float3 a, float3 b) { return make_float3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x); } //////////////////////////////////////////////////////////////////////////////// // smoothstep // - returns 0 if x < a // - returns 1 if x > b // - otherwise returns smooth interpolation between 0 and 1 based on x //////////////////////////////////////////////////////////////////////////////// inline __device__ __host__ float smoothstep(float a, float b, float x) { float y = clamp((x - a) / (b - a), 0.0f, 1.0f); return (y*y*(3.0f - (2.0f*y))); } inline __device__ __host__ float2 smoothstep(float2 a, float2 b, float2 x) { float2 y = clamp((x - a) / (b - a), 0.0f, 1.0f); return (y*y*(make_float2(3.0f) - (make_float2(2.0f)*y))); } inline __device__ __host__ float3 smoothstep(float3 a, float3 b, float3 x) { float3 y = clamp((x - a) / (b - a), 0.0f, 1.0f); return (y*y*(make_float3(3.0f) - (make_float3(2.0f)*y))); } inline __device__ __host__ float4 smoothstep(float4 a, float4 b, float4 x) { float4 y = clamp((x - a) / (b - a), 0.0f, 1.0f); return (y*y*(make_float4(3.0f) - (make_float4(2.0f)*y))); } inline __host__ __device__ float3 fabs3 ( float3 a ) { return make_float3 ( fabsf(a.x), fabsf(a.y), fabsf(a.z) ); } inline __host__ __device__ float3 floor3 ( float3 a ) { return make_float3 ( floorf(a.x), floorf(a.y), floorf(a.z) ); } inline __host__ __device__ int3 iabs3 ( int3 a ) { return make_int3 ( abs(a.x), abs(a.y), abs(a.z) ); } inline __host__ __device__ int3 isign3 ( float3 a ) { return make_int3 ( (a.x > 0) ? 1 : -1, (a.y > 0) ? 1 : -1, (a.z > 0) ? 1 : -1 ); //return make_int3 ( copysignf(1,a.x), copysignf(1,a.y), copysignf(1,a.z) ); } inline __host__ __device__ float3 fyzx ( float3 a ) { return make_float3 ( a.y, a.z, a.x ); } inline __host__ __device__ float3 fzxy ( float3 a ) { return make_float3 ( a.z, a.x, a.y ); } inline __host__ __device__ float3 fxyz ( float4 a ) { return make_float3 ( a.x, a.y, a.z ); } inline __host__ __device__ bool3 make_bool3 ( uchar a, uchar b, uchar c ) { return make_uchar3( a, b, c ); } inline __host__ __device__ bool3 lessThan3 ( float3 a, float3 b ) { return make_bool3 ( uchar(a.x < b.x), uchar(a.y < b.y), uchar(a.z < b.z) ); } inline __host__ __device__ bool3 lessThanEqual3 ( float3 a, float3 b ) { return make_bool3 ( uchar(a.x <= b.x), uchar(a.y <= b.y), uchar(a.z <= b.z) ); } // Matrix operations inline __host__ __device__ float3 mul4x ( float3 a, float* mtx ) { return make_float3 ( a.x*mtx[0] + a.y*mtx[4] + a.z*mtx[8] + mtx[12], a.x*mtx[1] + a.y*mtx[5] + a.z*mtx[9] + mtx[13], a.x*mtx[2] + a.y*mtx[6] + a.z*mtx[10] + mtx[14] ); } // Random numbers // Generate random unsigned int in [0, 2^24) static __host__ __device__ __inline__ unsigned int lcg(unsigned int &prev) { const unsigned int LCG_A = 1664525u; const unsigned int LCG_C = 1013904223u; prev = (LCG_A * prev + LCG_C); return prev & 0x00FFFFFF; } static __host__ __device__ __inline__ unsigned int lcg2(unsigned int &prev) { prev = (prev*8121 + 28411) % 134456; return prev; } // Generate random float in [0, 1) static __host__ __device__ __inline__ float rnd(unsigned int &prev) { return ((float) lcg(prev) / (float) 0x01000000); } static __host__ __device__ float3 lerp3 ( float3 a, float3 b, float t ) { return make_float3 ( a.x+t*(b.x-a.x), a.y+t*(b.y-a.y), a.z+t*(b.z-a.z) ); } static __host__ __device__ float3 lerp3 ( float4 a, float4 b, float t ) { return make_float3 ( a.x+t*(b.x-a.x), a.y+t*(b.y-a.y), a.z+t*(b.z-a.z) ); } static __host__ __device__ float4 lerp4 ( float4 a, float4 b, float t ) { return make_float4 ( a.x+t*(b.x-a.x), a.y+t*(b.y-a.y), a.z+t*(b.z-a.z), a.w+t*(b.w-a.w) ); } #endif
the_stack
#if defined(USE_ROCM) #include <cfloat> #endif using caffe2::utils::RotatedBox; namespace caffe2 { namespace { __global__ void GeneratePreNMSUprightBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const float4* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, float4* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a // float4 is a struct with float x,y,z,w const float4 anchor = d_anchors[a]; // x1,y1,x2,y2 :coordinates of anchor a, shifted for position (h,w) const float shift_w = feat_stride * w; float x1 = shift_w + anchor.x; float x2 = shift_w + anchor.z; const float shift_h = feat_stride * h; float y1 = shift_h + anchor.y; float y2 = shift_h + anchor.w; // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,4*A,K) // We're going to compute 4 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 4) + a * 4 * K + h * W + w; const float dx = d_bbox_deltas[deltas_idx]; // Stride of K between each dimension deltas_idx += K; const float dy = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dw = d_bbox_deltas[deltas_idx]; deltas_idx += K; float dh = d_bbox_deltas[deltas_idx]; // Upper bound on dw,dh dw = fmin(dw, bbox_xform_clip); dh = fmin(dh, bbox_xform_clip); // Applying the deltas float width = x2 - x1 + float(int(legacy_plus_one)); const float ctr_x = x1 + 0.5f * width; const float pred_ctr_x = ctr_x + width * dx; // TODO fuse madd const float pred_w = width * expf(dw); x1 = pred_ctr_x - 0.5f * pred_w; x2 = pred_ctr_x + 0.5f * pred_w - float(int(legacy_plus_one)); float height = y2 - y1 + float(int(legacy_plus_one)); const float ctr_y = y1 + 0.5f * height; const float pred_ctr_y = ctr_y + height * dy; const float pred_h = height * expf(dh); y1 = pred_ctr_y - 0.5f * pred_h; y2 = pred_ctr_y + 0.5f * pred_h - float(int(legacy_plus_one)); // Clipping box to image const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Filter boxes // Removing boxes with one dim < min_size // (center of box is in image, because of previous step) width = x2 - x1 + float(int(legacy_plus_one)); // may have changed height = y2 - y1 + float(int(legacy_plus_one)); bool keep_box = fmin(width, height) >= min_size_scaled; // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = {x1, y1, x2, y2}; // d_inout_scores size: (num_images,KA) if (!keep_box) d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } __global__ void GeneratePreNMSRotatedBoxesKernel( const int* d_sorted_scores_keys, const int nboxes_to_generate, const float* d_bbox_deltas, const RotatedBox* d_anchors, const int H, const int W, const int A, const float feat_stride, const float min_size, const float* d_img_info_vec, const int num_images, const float bbox_xform_clip, const bool legacy_plus_one, const bool angle_bound_on, const int angle_bound_lo, const int angle_bound_hi, const bool clip_angle_thresh, RotatedBox* d_out_boxes, const int prenms_nboxes, // leading dimension of out_boxes float* d_inout_scores, char* d_boxes_keep_flags) { constexpr float PI = 3.14159265358979323846; const int K = H * W; const int KA = K * A; CUDA_2D_KERNEL_LOOP(ibox, nboxes_to_generate, image_index, num_images) { // box_conv_index : # of the same box, but indexed in // the scores from the conv layer, of shape (A,H,W) // the num_images dimension was already removed // box_conv_index = a*K + h*W + w const int box_conv_index = d_sorted_scores_keys[image_index * KA + ibox]; // We want to decompose box_conv_index in (a,h,w) // such as box_conv_index = a*K + h*W + w // (avoiding modulos in the process) int remaining = box_conv_index; const int dA = K; // stride of A const int a = remaining / dA; remaining -= a * dA; const int dH = W; // stride of H const int h = remaining / dH; remaining -= h * dH; const int w = remaining; // dW = 1 // Loading the anchor a and applying shifts. // RotatedBox in [ctr_x, ctr_y, w, h, angle] format. // Zero shift for width, height and angle. RotatedBox box = d_anchors[a]; box.x_ctr += feat_stride * w; // x_ctr shifted for w box.y_ctr += feat_stride * h; // y_ctr shifted for h // TODO use fast math when possible // Deltas for that box // Deltas of shape (num_images,5*A,K) // We're going to compute 5 scattered reads // better than the alternative, ie transposing the complete deltas // array first int deltas_idx = image_index * (KA * 5) + a * 5 * K + h * W + w; // Stride of K between each dimension RotatedBox delta; delta.x_ctr = d_bbox_deltas[deltas_idx + K * 0]; delta.y_ctr = d_bbox_deltas[deltas_idx + K * 1]; delta.w = d_bbox_deltas[deltas_idx + K * 2]; delta.h = d_bbox_deltas[deltas_idx + K * 3]; delta.a = d_bbox_deltas[deltas_idx + K * 4]; // Upper bound on dw,dh delta.w = fmin(delta.w, bbox_xform_clip); delta.h = fmin(delta.h, bbox_xform_clip); // Convert back to degrees delta.a *= 180.f / PI; // Applying the deltas box.x_ctr += delta.x_ctr * box.w; box.y_ctr += delta.y_ctr * box.h; box.w *= expf(delta.w); box.h *= expf(delta.h); box.a += delta.a; if (angle_bound_on) { // Normalize angle to be within [angle_bound_lo, angle_bound_hi]. // Deltas are guaranteed to be <= period / 2 while computing training // targets by bbox_transform_inv. const float period = angle_bound_hi - angle_bound_lo; // CAFFE_ENFORCE(period > 0 && period % 180 == 0); if (box.a < angle_bound_lo) { box.a += period; } else if (box.a > angle_bound_hi) { box.a -= period; } } // Clipping box to image. // Only clip boxes that are almost upright (with a tolerance of // clip_angle_thresh) for backward compatibility with horizontal boxes. const float img_height = d_img_info_vec[3 * image_index + 0]; const float img_width = d_img_info_vec[3 * image_index + 1]; const float min_size_scaled = min_size * d_img_info_vec[3 * image_index + 2]; if (fabs(box.a) <= clip_angle_thresh) { // Convert from [x_ctr, y_ctr, w, h] to [x1, y1, x2, y2] float x1 = box.x_ctr - (box.w - float(int(legacy_plus_one))) / 2.f; float y1 = box.y_ctr - (box.h - float(int(legacy_plus_one))) / 2.f; float x2 = x1 + box.w - float(int(legacy_plus_one)); float y2 = y1 + box.h - float(int(legacy_plus_one)); // Clip x1 = fmax(fmin(x1, img_width - float(int(legacy_plus_one))), 0.0f); y1 = fmax(fmin(y1, img_height - float(int(legacy_plus_one))), 0.0f); x2 = fmax(fmin(x2, img_width - float(int(legacy_plus_one))), 0.0f); y2 = fmax(fmin(y2, img_height - float(int(legacy_plus_one))), 0.0f); // Convert back to [x_ctr, y_ctr, w, h] box.x_ctr = (x1 + x2) / 2.f; box.y_ctr = (y1 + y2) / 2.f; box.w = x2 - x1 + float(int(legacy_plus_one)); box.h = y2 - y1 + float(int(legacy_plus_one)); } // Filter boxes. // Removing boxes with one dim < min_size or center outside the image. bool keep_box = (fmin(box.w, box.h) >= min_size_scaled) && (box.x_ctr < img_width) && (box.y_ctr < img_height); // We are not deleting the box right now even if !keep_box // we want to keep the relative order of the elements stable // we'll do it in such a way later // d_boxes_keep_flags size: (num_images,prenms_nboxes) // d_out_boxes size: (num_images,prenms_nboxes) const int out_index = image_index * prenms_nboxes + ibox; d_boxes_keep_flags[out_index] = keep_box; d_out_boxes[out_index] = box; // d_inout_scores size: (num_images,KA) if (!keep_box) { d_inout_scores[image_index * KA + ibox] = FLT_MIN; // for NMS } } } __global__ void WriteUprightBoxesOutput( const float4* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const float4 box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 5 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x; d_image_out_rois[base_idx + 2] = box.y; d_image_out_rois[base_idx + 3] = box.z; d_image_out_rois[base_idx + 4] = box.w; } } __global__ void WriteRotatedBoxesOutput( const RotatedBox* d_image_boxes, const float* d_image_scores, const int* d_image_boxes_keep_list, const int nboxes, const int image_index, float* d_image_out_rois, float* d_image_out_rois_probs) { CUDA_1D_KERNEL_LOOP(i, nboxes) { const int ibox = d_image_boxes_keep_list[i]; const RotatedBox box = d_image_boxes[ibox]; const float score = d_image_scores[ibox]; // Scattered memory accesses // postnms_nboxes is small anyway d_image_out_rois_probs[i] = score; const int base_idx = 6 * i; d_image_out_rois[base_idx + 0] = image_index; d_image_out_rois[base_idx + 1] = box.x_ctr; d_image_out_rois[base_idx + 2] = box.y_ctr; d_image_out_rois[base_idx + 3] = box.w; d_image_out_rois[base_idx + 4] = box.h; d_image_out_rois[base_idx + 5] = box.a; } } __global__ void InitializeDataKernel( const int num_images, const int KA, int* d_image_offsets, int* d_boxes_keys_iota) { CUDA_2D_KERNEL_LOOP(box_idx, KA, img_idx, num_images) { d_boxes_keys_iota[img_idx * KA + box_idx] = box_idx; // One 1D line sets the 1D data if (box_idx == 0) { d_image_offsets[img_idx] = KA * img_idx; // One thread sets the last+1 offset if (img_idx == 0) d_image_offsets[num_images] = KA * num_images; } } } } // namespace template <> bool GenerateProposalsOp<CUDAContext>::RunOnDevice() { const auto& scores = Input(0); const auto& bbox_deltas = Input(1); const auto& im_info_tensor = Input(2); const auto& anchors = Input(3); auto* out_rois = Output(0); auto* out_rois_probs = Output(1); CAFFE_ENFORCE_EQ(scores.ndim(), 4, scores.ndim()); CAFFE_ENFORCE(scores.template IsType<float>(), scores.meta().name()); const auto num_images = scores.dim(0); const auto A = scores.dim(1); const auto H = scores.dim(2); const auto W = scores.dim(3); const auto box_dim = anchors.dim(1); CAFFE_ENFORCE(box_dim == 4 || box_dim == 5); const int K = H * W; const int conv_layer_nboxes = K * A; // Getting data members ready // We'll sort the scores // we want to remember their original indexes, // ie their indexes in the tensor of shape (num_images,A,K) // from the conv layer // each row of d_conv_layer_indexes is at first initialized to 1..A*K dev_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); int* d_conv_layer_indexes = dev_conv_layer_indexes_.template mutable_data<int>(); // d_image_offset[i] = i*K*A for i from 1 to num_images+1 // Used by the segmented sort to only sort scores within one image dev_image_offset_.Resize(num_images + 1); int* d_image_offset = dev_image_offset_.template mutable_data<int>(); // The following calls to CUB primitives do nothing // (because the first arg is nullptr) // except setting cub_*_temp_storage_bytes size_t cub_sort_temp_storage_bytes = 0; float* flt_ptr = nullptr; int* int_ptr = nullptr; cub::DeviceSegmentedRadixSort::SortPairsDescending( nullptr, cub_sort_temp_storage_bytes, flt_ptr, flt_ptr, int_ptr, int_ptr, num_images * conv_layer_nboxes, num_images, int_ptr, int_ptr, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_sort_buffer_.Resize(cub_sort_temp_storage_bytes); void* d_cub_sort_temp_storage = dev_cub_sort_buffer_.template mutable_data<char>(); size_t cub_select_temp_storage_bytes = 0; char* char_ptr = nullptr; cub::DeviceSelect::Flagged( nullptr, cub_select_temp_storage_bytes, flt_ptr, char_ptr, flt_ptr, int_ptr, K * A, context_.cuda_stream()); // Allocate temporary storage for CUB dev_cub_select_buffer_.Resize(cub_select_temp_storage_bytes); void* d_cub_select_temp_storage = dev_cub_select_buffer_.template mutable_data<char>(); // Initialize : // - each row of dev_conv_layer_indexes to 1..K*A // - each d_nboxes to 0 // - d_image_offset[i] = K*A*i for i 1..num_images+1 // 2D grid InitializeDataKernel<<< (CAFFE_GET_BLOCKS(A * K), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( num_images, conv_layer_nboxes, d_image_offset, d_conv_layer_indexes); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Sorting input scores dev_sorted_conv_layer_indexes_.Resize(num_images, conv_layer_nboxes); dev_sorted_scores_.Resize(num_images, conv_layer_nboxes); const float* d_in_scores = scores.data<float>(); int* d_sorted_conv_layer_indexes = dev_sorted_conv_layer_indexes_.template mutable_data<int>(); float* d_sorted_scores = dev_sorted_scores_.template mutable_data<float>(); ; cub::DeviceSegmentedRadixSort::SortPairsDescending( d_cub_sort_temp_storage, cub_sort_temp_storage_bytes, d_in_scores, d_sorted_scores, d_conv_layer_indexes, d_sorted_conv_layer_indexes, num_images * conv_layer_nboxes, num_images, d_image_offset, d_image_offset + 1, 0, 8 * sizeof(float), // sort all bits context_.cuda_stream()); // Keeping only the topN pre_nms const int nboxes_to_generate = std::min(conv_layer_nboxes, rpn_pre_nms_topN_); // Generating the boxes associated to the topN pre_nms scores dev_boxes_.Resize(num_images, box_dim * nboxes_to_generate); dev_boxes_keep_flags_.Resize(num_images, nboxes_to_generate); const float* d_bbox_deltas = bbox_deltas.data<float>(); const float* d_anchors = anchors.data<float>(); const float* d_im_info_vec = im_info_tensor.data<float>(); float* d_boxes = dev_boxes_.template mutable_data<float>(); ; char* d_boxes_keep_flags = dev_boxes_keep_flags_.template mutable_data<char>(); if (box_dim == 4) { GeneratePreNMSUprightBoxesKernel<<< (CAFFE_GET_BLOCKS(nboxes_to_generate), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const float4*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, reinterpret_cast<float4*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { GeneratePreNMSRotatedBoxesKernel<<< (CAFFE_GET_BLOCKS(nboxes_to_generate), num_images), CAFFE_CUDA_NUM_THREADS, // blockDim.y == 1 0, context_.cuda_stream()>>>( d_sorted_conv_layer_indexes, nboxes_to_generate, d_bbox_deltas, reinterpret_cast<const RotatedBox*>(d_anchors), H, W, A, feat_stride_, rpn_min_size_, d_im_info_vec, num_images, utils::BBOX_XFORM_CLIP_DEFAULT, legacy_plus_one_, angle_bound_on_, angle_bound_lo_, angle_bound_hi_, clip_angle_thresh_, reinterpret_cast<RotatedBox*>(d_boxes), nboxes_to_generate, d_sorted_scores, d_boxes_keep_flags); C10_CUDA_KERNEL_LAUNCH_CHECK(); } const int nboxes_generated = nboxes_to_generate; dev_image_prenms_boxes_.Resize(box_dim * nboxes_generated); float* d_image_prenms_boxes = dev_image_prenms_boxes_.template mutable_data<float>(); dev_image_prenms_scores_.Resize(nboxes_generated); float* d_image_prenms_scores = dev_image_prenms_scores_.template mutable_data<float>(); dev_image_boxes_keep_list_.Resize(nboxes_generated); int* d_image_boxes_keep_list = dev_image_boxes_keep_list_.template mutable_data<int>(); const int roi_cols = box_dim + 1; const int max_postnms_nboxes = std::min(nboxes_generated, rpn_post_nms_topN_); dev_postnms_rois_.Resize(roi_cols * num_images * max_postnms_nboxes); dev_postnms_rois_probs_.Resize(num_images * max_postnms_nboxes); float* d_postnms_rois = dev_postnms_rois_.template mutable_data<float>(); float* d_postnms_rois_probs = dev_postnms_rois_probs_.template mutable_data<float>(); dev_prenms_nboxes_.Resize(num_images); host_prenms_nboxes_.Resize(num_images); int* d_prenms_nboxes = dev_prenms_nboxes_.template mutable_data<int>(); int* h_prenms_nboxes = host_prenms_nboxes_.template mutable_data<int>(); int nrois_in_output = 0; for (int image_index = 0; image_index < num_images; ++image_index) { // Sub matrices for current image const float* d_image_boxes = &d_boxes[image_index * nboxes_generated * box_dim]; const float* d_image_sorted_scores = &d_sorted_scores[image_index * K * A]; char* d_image_boxes_keep_flags = &d_boxes_keep_flags[image_index * nboxes_generated]; float* d_image_postnms_rois = &d_postnms_rois[roi_cols * nrois_in_output]; float* d_image_postnms_rois_probs = &d_postnms_rois_probs[nrois_in_output]; // Moving valid boxes (ie the ones with d_boxes_keep_flags[ibox] == true) // to the output tensors if (box_dim == 4) { cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const float4*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<float4*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } else { cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, reinterpret_cast<const RotatedBox*>(d_image_boxes), d_image_boxes_keep_flags, reinterpret_cast<RotatedBox*>(d_image_prenms_boxes), d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); } cub::DeviceSelect::Flagged( d_cub_select_temp_storage, cub_select_temp_storage_bytes, d_image_sorted_scores, d_image_boxes_keep_flags, d_image_prenms_scores, d_prenms_nboxes, nboxes_generated, context_.cuda_stream()); host_prenms_nboxes_.CopyFrom(dev_prenms_nboxes_); // We know prenms_boxes <= topN_prenms, because nboxes_generated <= // topN_prenms. Calling NMS on the generated boxes const int prenms_nboxes = *h_prenms_nboxes; int nkeep; utils::nms_gpu( d_image_prenms_boxes, prenms_nboxes, rpn_nms_thresh_, legacy_plus_one_, d_image_boxes_keep_list, &nkeep, dev_nms_mask_, host_nms_mask_, &context_, box_dim); // All operations done after previous sort were keeping the relative order // of the elements the elements are still sorted keep topN <=> truncate the // array const int postnms_nboxes = std::min(nkeep, rpn_post_nms_topN_); // Moving the out boxes to the output tensors, // adding the image_index dimension on the fly if (box_dim == 4) { WriteUprightBoxesOutput<<< CAFFE_GET_BLOCKS(postnms_nboxes), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( reinterpret_cast<const float4*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { WriteRotatedBoxesOutput<<< CAFFE_GET_BLOCKS(postnms_nboxes), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( reinterpret_cast<const RotatedBox*>(d_image_prenms_boxes), d_image_prenms_scores, d_image_boxes_keep_list, postnms_nboxes, image_index, d_image_postnms_rois, d_image_postnms_rois_probs); C10_CUDA_KERNEL_LAUNCH_CHECK(); } nrois_in_output += postnms_nboxes; } // Using a buffer because we cannot call ShrinkTo out_rois->Resize(nrois_in_output, roi_cols); out_rois_probs->Resize(nrois_in_output); float* d_out_rois = out_rois->template mutable_data<float>(); float* d_out_rois_probs = out_rois_probs->template mutable_data<float>(); CUDA_CHECK(cudaMemcpyAsync( d_out_rois, d_postnms_rois, nrois_in_output * roi_cols * sizeof(float), cudaMemcpyDeviceToDevice, context_.cuda_stream())); CUDA_CHECK(cudaMemcpyAsync( d_out_rois_probs, d_postnms_rois_probs, nrois_in_output * sizeof(float), cudaMemcpyDeviceToDevice, context_.cuda_stream())); return true; } REGISTER_CUDA_OPERATOR(GenerateProposals, GenerateProposalsOp<CUDAContext>); } // namespace caffe2 C10_EXPORT_CAFFE2_OP_TO_C10_CUDA( GenerateProposals, caffe2::GenerateProposalsOp<caffe2::CUDAContext>);
the_stack
namespace kfusion { namespace device { texture<ushort, 2> dprev_tex; texture<Normal, 2> nprev_tex; texture<Point, 2> vprev_tex; struct ComputeIcpHelper::Policy { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y, B = 6, COLS = 6, ROWS = 6, DIAG = 6, UPPER_DIAG_MAT = (COLS * ROWS - DIAG) / 2 + DIAG, TOTAL = UPPER_DIAG_MAT + B, FINAL_REDUCE_CTA_SIZE = 256, FINAL_REDUCE_STRIDE = FINAL_REDUCE_CTA_SIZE }; }; __kf_device__ float2 ComputeIcpHelper::proj(const float3& p) const { float2 coo; coo.x = __fmaf_rn(f.x, __fdividef(p.x, p.z), c.x); coo.y = __fmaf_rn(f.y, __fdividef(p.y, p.z), c.y); return coo; } __kf_device__ float3 ComputeIcpHelper::reproj(float u, float v, float z) const { float x = z * (u - c.x) * finv.x; float y = z * (v - c.y) * finv.y; return make_float3(x, y, z); } #if defined USE_DEPTH __kf_device__ int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const { int src_z = dcurr(y, x); if (src_z == 0) return 40; s = aff * reproj(x, y, src_z * 0.001f); float2 coo = proj(s); if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows) return 80; int dst_z = tex2D(dprev_tex, coo.x, coo.y); if (dst_z == 0) return 120; d = reproj(coo.x, coo.y, dst_z * 0.001f); float dist2 = norm_sqr(s - d); if (dist2 > dist2_thres) return 160; float3 ns = aff.R * tr(ncurr(y, x)); nd = tr(tex2D(nprev_tex, coo.x, coo.y)); float cosine = fabs(dot(ns, nd)); if (cosine < min_cosine) return 200; return 0; } #else __kf_device__ int ComputeIcpHelper::find_coresp(int x, int y, float3& nd, float3& d, float3& s) const { s = tr(vcurr(y, x)); if (isnan(s.x)) return 40; s = aff * s; float2 coo = proj(s); if (s.z <= 0 || coo.x < 0 || coo.y < 0 || coo.x >= cols || coo.y >= rows) return 80; d = tr(tex2D(vprev_tex, coo.x, coo.y)); if (isnan(d.x)) return 120; float dist2 = norm_sqr(s - d); if (dist2 > dist2_thres) return 160; float3 ns = aff.R * tr(ncurr(y, x)); nd = tr(tex2D(nprev_tex, coo.x, coo.y)); float cosine = fabs(dot(ns, nd)); if (cosine < min_cosine) return 200; return 0; } #endif __kf_device__ void ComputeIcpHelper::partial_reduce(const float row[7], PtrStep<float>& partial_buf) const { volatile __shared__ float smem[Policy::CTA_SIZE]; int tid = Block::flattenedThreadId(); float *pos = partial_buf.data + blockIdx.x + gridDim.x * blockIdx.y; size_t step = partial_buf.step / sizeof(float); #define STOR \ if (tid == 0) \ { \ *pos = smem[0]; \ pos += step; \ } __syncthreads(); smem[tid] = row[0] * row[0]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[1]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[2]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[3]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[4]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[0] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR //////////////////////////////// __syncthreads(); smem[tid] = row[1] * row[1]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[1] * row[2]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[1] * row[3]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[1] * row[4]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[1] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[1] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR //////////////////////////////// __syncthreads(); smem[tid] = row[2] * row[2]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[2] * row[3]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[2] * row[4]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[2] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[2] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR //////////////////////////////// __syncthreads(); smem[tid] = row[3] * row[3]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[3] * row[4]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[3] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[3] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR /////////////////////////////////////////////////// __syncthreads(); smem[tid] = row[4] * row[4]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[4] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[4] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR /////////////////////////////////////////////////// __syncthreads(); smem[tid] = row[5] * row[5]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR __syncthreads(); smem[tid] = row[5] * row[6]; __syncthreads(); Block::reduce<Policy::CTA_SIZE>(smem, plus()); STOR } __global__ void icp_helper_kernel(const ComputeIcpHelper helper, PtrStep<float> partial_buf) { int x = threadIdx.x + blockIdx.x * ComputeIcpHelper::Policy::CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * ComputeIcpHelper::Policy::CTA_SIZE_Y; float3 n, d, s; int filtered = (x < helper.cols && y < helper.rows) ? helper.find_coresp(x, y, n, d, s) : 1; //if (x < helper.cols && y < helper.rows) mask(y, x) = filtered; float row[7]; if (!filtered) { *(float3*) &row[0] = cross(s, n); *(float3*) &row[3] = n; row[6] = dot(n, d - s); } else row[0] = row[1] = row[2] = row[3] = row[4] = row[5] = row[6] = 0.f; helper.partial_reduce(row, partial_buf); } __global__ void icp_final_reduce_kernel(const PtrStep<float> partial_buf, const int length, float* final_buf) { const float *beg = partial_buf.ptr(blockIdx.x); const float *end = beg + length; int tid = threadIdx.x; float sum = 0.f; for (const float *t = beg + tid; t < end; t += ComputeIcpHelper::Policy::FINAL_REDUCE_STRIDE) sum += *t; __shared__ float smem[ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE]; smem[tid] = sum; __syncthreads(); Block::reduce<ComputeIcpHelper::Policy::FINAL_REDUCE_CTA_SIZE>(smem, plus()); if (tid == 0) final_buf[blockIdx.x] = smem[0]; } } } void kfusion::device::ComputeIcpHelper::operator()(const Depth& dprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, cudaStream_t s) { dprev_tex.filterMode = cudaFilterModePoint; nprev_tex.filterMode = cudaFilterModePoint; TextureBinder dprev_binder(dprev, dprev_tex); TextureBinder nprev_binder(nprev, nprev_tex); dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y); dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y)); int partials_count = (int) (grid.x * grid.y); allocate_buffer(buffer, partials_count); icp_helper_kernel<<<grid, block, 0, s>>>(*this, buffer); cudaSafeCall(cudaGetLastError()); int b = Policy::FINAL_REDUCE_CTA_SIZE; int g = Policy::TOTAL; icp_final_reduce_kernel<<<g, b, 0, s>>>(buffer, partials_count, buffer.ptr(Policy::TOTAL)); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), cudaMemcpyDeviceToHost, s)); cudaSafeCall(cudaGetLastError()); } void kfusion::device::ComputeIcpHelper::operator()(const Points& vprev, const Normals& nprev, DeviceArray2D<float>& buffer, float* data, cudaStream_t s) { dprev_tex.filterMode = cudaFilterModePoint; nprev_tex.filterMode = cudaFilterModePoint; TextureBinder vprev_binder(vprev, vprev_tex); TextureBinder nprev_binder(nprev, nprev_tex); dim3 block(Policy::CTA_SIZE_X, Policy::CTA_SIZE_Y); dim3 grid(divUp((int) cols, block.x), divUp((int) rows, block.y)); int partials_count = (int) (grid.x * grid.y); allocate_buffer(buffer, partials_count); icp_helper_kernel<<<grid, block, 0, s>>>(*this, buffer); cudaSafeCall(cudaGetLastError()); int b = Policy::FINAL_REDUCE_CTA_SIZE; int g = Policy::TOTAL; icp_final_reduce_kernel<<<g, b, 0, s>>>(buffer, partials_count, buffer.ptr(Policy::TOTAL)); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaMemcpyAsync(data, buffer.ptr(Policy::TOTAL), Policy::TOTAL * sizeof(float), cudaMemcpyDeviceToHost, s)); cudaSafeCall(cudaGetLastError()); } void kfusion::device::ComputeIcpHelper::allocate_buffer(DeviceArray2D<float>& buffer, int partials_count) { if (partials_count < 0) { const int input_cols = 640; const int input_rows = 480; int gx = divUp(input_cols, Policy::CTA_SIZE_X); int gy = divUp(input_rows, Policy::CTA_SIZE_Y); partials_count = gx * gy; } int min_rows = Policy::TOTAL + 1; int min_cols = max(partials_count, Policy::TOTAL); if (buffer.rows() < min_rows || buffer.cols() < min_cols) buffer.create(min_rows, min_cols); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// ComputeIcpHelper::PageLockHelper kfusion::device::ComputeIcpHelper::PageLockHelper::PageLockHelper() : data(0) { cudaSafeCall(cudaMallocHost((void ** )&data, Policy::TOTAL * sizeof(float))); } kfusion::device::ComputeIcpHelper::PageLockHelper::~PageLockHelper() { cudaSafeCall(cudaFreeHost(data)); data = 0; }
the_stack
#include "utils.cuh" #include <limits> #include <raft/device_atomics.cuh> namespace raft { namespace mst { namespace detail { template <typename vertex_t, typename edge_t, typename alteration_t> __global__ void kernel_min_edge_per_vertex(const edge_t* offsets, const vertex_t* indices, const alteration_t* weights, const vertex_t* color, const vertex_t* color_index, edge_t* new_mst_edge, const bool* mst_edge, alteration_t* min_edge_color, const vertex_t v) { edge_t tid = threadIdx.x + blockIdx.x * blockDim.x; unsigned warp_id = tid / 32; unsigned lane_id = tid % 32; __shared__ edge_t min_edge_index[32]; __shared__ alteration_t min_edge_weight[32]; __shared__ vertex_t min_color[32]; min_edge_index[lane_id] = std::numeric_limits<edge_t>::max(); min_edge_weight[lane_id] = std::numeric_limits<alteration_t>::max(); min_color[lane_id] = std::numeric_limits<vertex_t>::max(); __syncthreads(); vertex_t self_color_idx = color_index[warp_id]; vertex_t self_color = color[self_color_idx]; // find the minimum edge associated per row // each thread in warp holds the minimum edge for // only the edges that thread scanned if (warp_id < v) { // one row is associated with one warp edge_t row_start = offsets[warp_id]; edge_t row_end = offsets[warp_id + 1]; // assuming one warp per row // find min for each thread in warp for (edge_t e = row_start + lane_id; e < row_end; e += 32) { alteration_t curr_edge_weight = weights[e]; vertex_t successor_color_idx = color_index[indices[e]]; vertex_t successor_color = color[successor_color_idx]; if (!mst_edge[e] && self_color != successor_color) { if (curr_edge_weight < min_edge_weight[lane_id]) { min_color[lane_id] = successor_color; min_edge_weight[lane_id] = curr_edge_weight; min_edge_index[lane_id] = e; } } } } __syncthreads(); // reduce across threads in warp // each thread in warp holds min edge scanned by itself // reduce across all those warps for (int offset = 16; offset > 0; offset >>= 1) { if (lane_id < offset) { if (min_edge_weight[lane_id] > min_edge_weight[lane_id + offset]) { min_color[lane_id] = min_color[lane_id + offset]; min_edge_weight[lane_id] = min_edge_weight[lane_id + offset]; min_edge_index[lane_id] = min_edge_index[lane_id + offset]; } } __syncthreads(); } // min edge may now be found in first thread if (lane_id == 0) { if (min_edge_weight[0] != std::numeric_limits<alteration_t>::max()) { new_mst_edge[warp_id] = min_edge_index[0]; // atomically set min edge per color // takes care of super vertex case atomicMin(&min_edge_color[self_color], min_edge_weight[0]); } } } template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t> __global__ void min_edge_per_supervertex(const vertex_t* color, const vertex_t* color_index, edge_t* new_mst_edge, bool* mst_edge, const vertex_t* indices, const weight_t* weights, const alteration_t* altered_weights, vertex_t* temp_src, vertex_t* temp_dst, weight_t* temp_weights, const alteration_t* min_edge_color, const vertex_t v, bool symmetrize_output) { auto tid = get_1D_idx<vertex_t>(); if (tid < v) { vertex_t vertex_color_idx = color_index[tid]; vertex_t vertex_color = color[vertex_color_idx]; edge_t edge_idx = new_mst_edge[tid]; // check if valid outgoing edge was found // find minimum edge is same as minimum edge of whole supervertex // if yes, that is part of mst if (edge_idx != std::numeric_limits<edge_t>::max()) { alteration_t vertex_weight = altered_weights[edge_idx]; bool add_edge = false; if (min_edge_color[vertex_color] == vertex_weight) { add_edge = true; auto dst = indices[edge_idx]; if (!symmetrize_output) { auto dst_edge_idx = new_mst_edge[dst]; auto dst_color = color[color_index[dst]]; // vertices added each other // only if destination has found an edge // the edge points back to source // the edge is minimum edge found for dst color if (dst_edge_idx != std::numeric_limits<edge_t>::max() && indices[dst_edge_idx] == tid && min_edge_color[dst_color] == altered_weights[dst_edge_idx]) { if (vertex_color > dst_color) { add_edge = false; } } } if (add_edge) { temp_src[tid] = tid; temp_dst[tid] = dst; temp_weights[tid] = weights[edge_idx]; mst_edge[edge_idx] = true; } } if (!add_edge) { new_mst_edge[tid] = std::numeric_limits<edge_t>::max(); } } } } template <typename vertex_t, typename edge_t, typename weight_t> __global__ void add_reverse_edge(const edge_t* new_mst_edge, const vertex_t* indices, const weight_t* weights, vertex_t* temp_src, vertex_t* temp_dst, weight_t* temp_weights, const vertex_t v, bool symmetrize_output) { auto tid = get_1D_idx<vertex_t>(); if (tid < v) { bool reverse_needed = false; edge_t edge_idx = new_mst_edge[tid]; if (edge_idx != std::numeric_limits<edge_t>::max()) { vertex_t neighbor_vertex = indices[edge_idx]; edge_t neighbor_edge_idx = new_mst_edge[neighbor_vertex]; // if neighbor picked no vertex then reverse edge is // definitely needed if (neighbor_edge_idx == std::numeric_limits<edge_t>::max()) { reverse_needed = true; } else { // check what vertex the neighbor vertex picked if (symmetrize_output) { vertex_t neighbor_vertex_neighbor = indices[neighbor_edge_idx]; // if vertices did not pick each other // add a reverse edge if (tid != neighbor_vertex_neighbor) { reverse_needed = true; } } } // if reverse was needed, add the edge if (reverse_needed) { // it is assumed the each vertex only picks one valid min edge // per cycle // hence, we store at index tid + v for the reverse edge scenario temp_src[tid + v] = neighbor_vertex; temp_dst[tid + v] = tid; temp_weights[tid + v] = weights[edge_idx]; } } } } // executes for newly added mst edges and updates the colors of both vertices to the lower color template <typename vertex_t, typename edge_t> __global__ void min_pair_colors(const vertex_t v, const vertex_t* indices, const edge_t* new_mst_edge, const vertex_t* color, const vertex_t* color_index, vertex_t* next_color) { auto i = get_1D_idx<vertex_t>(); if (i < v) { edge_t edge_idx = new_mst_edge[i]; if (edge_idx != std::numeric_limits<edge_t>::max()) { vertex_t neighbor_vertex = indices[edge_idx]; // vertex_t self_color = color[i]; vertex_t self_color_idx = color_index[i]; vertex_t self_color = color[self_color_idx]; vertex_t neighbor_color_idx = color_index[neighbor_vertex]; vertex_t neighbor_super_color = color[neighbor_color_idx]; // update my own color as source of edge // update neighbour color index directly // this will ensure v1 updates supervertex color // while v2 will update the color of its supervertex // thus, allowing the colors to progress towards 0 atomicMin(&next_color[self_color_idx], neighbor_super_color); atomicMin(&next_color[neighbor_color_idx], self_color); } } } // for each vertex, update color if it was changed in min_pair_colors kernel template <typename vertex_t> __global__ void update_colors(const vertex_t v, vertex_t* color, const vertex_t* color_index, const vertex_t* next_color, bool* done) { auto i = get_1D_idx<vertex_t>(); if (i < v) { vertex_t self_color = color[i]; vertex_t self_color_idx = color_index[i]; vertex_t new_color = next_color[self_color_idx]; // update self color to new smaller color if (self_color > new_color) { color[i] = new_color; *done = false; } } } // point vertices to their final color index template <typename vertex_t> __global__ void final_color_indices(const vertex_t v, const vertex_t* color, vertex_t* color_index) { auto i = get_1D_idx<vertex_t>(); if (i < v) { vertex_t self_color_idx = color_index[i]; vertex_t self_color = color[self_color_idx]; // if self color is not equal to self color index, // it means self is not supervertex // in which case, iterate until we can find // parent supervertex while (self_color_idx != self_color) { self_color_idx = color_index[self_color]; self_color = color[self_color_idx]; } // point to new supervertex color_index[i] = self_color_idx; } } // Alterate the weights, make all undirected edge weight unique while keeping Wuv == Wvu // Consider using curand device API instead of precomputed random_values array template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t> __global__ void alteration_kernel(const vertex_t v, const edge_t e, const edge_t* offsets, const vertex_t* indices, const weight_t* weights, alteration_t max, alteration_t* random_values, alteration_t* altered_weights) { auto row = get_1D_idx<vertex_t>(); if (row < v) { auto row_begin = offsets[row]; auto row_end = offsets[row + 1]; for (auto i = row_begin; i < row_end; i++) { auto column = indices[i]; altered_weights[i] = weights[i] + max * (random_values[row] + random_values[column]); } } } template <typename vertex_t, typename edge_t> __global__ void kernel_count_new_mst_edges(const vertex_t* mst_src, edge_t* mst_edge_count, const vertex_t v) { auto tid = get_1D_idx<vertex_t>(); // count number of new mst edges added bool predicate = tid < v && (mst_src[tid] != std::numeric_limits<vertex_t>::max()); vertex_t block_count = __syncthreads_count(predicate); if (threadIdx.x == 0 && block_count > 0) { atomicAdd(mst_edge_count, block_count); } } } // namespace detail } // namespace mst } // namespace raft
the_stack
#pragma once #include <gunrock/util/sort_device.cuh> #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/gtf/gtf_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> #define debug_aml(a...) #include <gunrock/app/mf/mf_enactor.cuh> #include <gunrock/app/gtf/gtf_test.cuh> //#define debug_aml(a...) \ {printf("%s:%d ", __FILE__, __LINE__); printf(a); printf("\n");} namespace gunrock { namespace app { namespace gtf { /** * @brief Speciflying parameters for gtf Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info * \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } /** * @brief defination of gtf iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct GTFIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::Problem ProblemT; typedef typename ProblemT::GraphT GraphT; typedef typename GraphT::CsrT CsrT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; GTFIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of gtf, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { auto enactor = this->enactor; auto gpu_num = this->gpu_num; auto num_gpus = enactor->num_gpus; auto gpu_offset = num_gpus * gpu_num; auto &data_slice = enactor->problem->data_slices[gpu_num][0]; // MF specific auto &mf_data_slice = enactor->problem->mf_problem.data_slices[gpu_num][0]; auto &mf_problem = enactor->problem->mf_problem; auto &mf_enactor = enactor->mf_enactor; auto &mf_flow = mf_data_slice.flow; auto mf_target = util::DEVICE; auto &h_reverse = data_slice.reverse; auto &enactor_slice = enactor->enactor_slices[gpu_offset + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; auto &iteration = enactor_stats.iteration; //!!! allowed? auto num_nodes = graph.nodes; // n + 2 = V auto num_org_nodes = num_nodes - 2; // n auto num_edges = graph.edges; // m + n*4 auto offset = num_edges - (num_org_nodes)*2; //!!! auto source = data_slice.source; auto sink = data_slice.sink; auto &next_communities = data_slice.next_communities; auto &curr_communities = data_slice.curr_communities; auto &community_sizes = data_slice.community_sizes; auto &community_weights = data_slice.community_weights; auto &community_active = data_slice.community_active; auto &community_accus = data_slice.community_accus; auto &vertex_active = data_slice.vertex_active; auto &vertex_reachabilities = data_slice.vertex_reachabilities; auto &edge_residuals = data_slice.edge_residuals; auto &edge_flows = data_slice.edge_flows; auto &active = data_slice.active; auto &num_comms = data_slice.num_comms; auto &previous_num_comms = data_slice.previous_num_comms; auto &num_updated_vertices = data_slice.num_updated_vertices; auto &Y = data_slice.Y; util::CpuTimer cpu_timer; cpu_timer.Start(); /* printf("iteration %d \n", iteration); GUARD_CU(edge_residuals.ForAll( [mf_flow, graph, source] __host__ __device__ (ValueT *edge_residuals, const SizeT &e){ if(e < 10) printf("GPU: e_idx %d, e_val %f\n", e, graph.edge_values[e]); //edge_residuals[e] = graph.edge_values[e]; // just for debugging purposes #!!! }, graph.edges, util::DEVICE, oprtr_parameters.stream)); */ cpu_timer.Start(); GUARD_CU(graph.edge_values.Move(util::DEVICE, util::HOST, graph.edges, 0, oprtr_parameters.stream)); GUARD_CU(cudaDeviceSynchronize()); cpu_timer.Stop(); // printf("move: %f \n", cpu_timer.ElapsedMillis()); mf_problem.parameters.Set("source", source); mf_problem.parameters.Set("sink", sink); cpu_timer.Start(); GUARD_CU(mf_problem.Reset(graph, h_reverse + 0, mf_target)); GUARD_CU(cudaDeviceSynchronize()); cpu_timer.Stop(); // printf("problem reset: %f \n", cpu_timer.ElapsedMillis()); cpu_timer.Start(); GUARD_CU(mf_enactor.Reset(source, mf_target)); GUARD_CU(cudaDeviceSynchronize()); cpu_timer.Stop(); // printf("enact reset: %f \n", cpu_timer.ElapsedMillis()); cpu_timer.Start(); GUARD_CU(mf_enactor.Enact()); GUARD_CU(cudaDeviceSynchronize()); cpu_timer.Stop(); // printf("mf: %f \n", cpu_timer.ElapsedMillis()); cpu_timer.Start(); // min cut GUARD_CU(edge_residuals.ForAll( [mf_flow, graph, source] __host__ __device__(ValueT * edge_residuals, const SizeT &e) { // if(e == 0) printf("in residual assignment beginning of gtf\n"); edge_residuals[e] = graph.edge_values[e] - mf_flow[e]; mf_flow[e] = 0.; // if(e < 10)printf("GPU: er_idx %d, e_res %f \n", e, // edge_residuals[e]); }, graph.edges, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(vertex_reachabilities.ForAll( [] __host__ __device__(bool *vertex_reachabilities, const SizeT &v) { vertex_reachabilities[v] = false; // if(v == 0) printf("in reach\n"); }, graph.nodes, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(vertex_reachabilities.ForAll( [edge_residuals, graph, community_sizes, source] __host__ __device__( bool *vertex_reachabilities, const SizeT &idx) { VertexT head = 0; VertexT tail = 0; VertexT *queue = community_sizes + 0; queue[head] = source; while (tail <= head) { VertexT v = queue[tail]; auto e_start = graph.GetNeighborListOffset(v); auto num_neighbors = graph.GetNeighborListLength(v); auto e_end = e_start + num_neighbors; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == false && abs(edge_residuals[e]) > 1e-6) { head++; queue[head] = u; vertex_reachabilities[u] = true; } } tail++; } // if(idx == 0) printf("in min-cut\n"); }, 1, util::DEVICE, oprtr_parameters.stream)); ////////////////////////////////////////////////// GUARD_CU(community_weights.ForAll( [vertex_active, // vertex specific vertex_reachabilities, next_communities, // community specific curr_communities, community_active, community_sizes, community_accus, edge_residuals, // intermediate output // others num_comms, num_edges, num_org_nodes, graph, active] __host__ __device__(ValueT * community_weights, const VertexT &idx) { { auto &edge_capacities = graph.edge_values; unsigned int comm; for (comm = 0; comm < num_comms[0]; comm++) { community_weights[comm] = 0; community_sizes[comm] = 0; next_communities[comm] = 0; } auto pervious_num_comms = num_comms[0]; for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; if (vertex_reachabilities[v] == 1) { // reachable by source comm = next_communities[curr_communities[v]]; if (comm == 0) { // not assigned yet comm = num_comms[0]; next_communities[curr_communities[v]] = num_comms[0]; community_active[comm] = true; num_comms[0]++; community_weights[comm] = 0; community_sizes[comm] = 0; next_communities[comm] = 0; community_accus[comm] = community_accus[curr_communities[v]]; } curr_communities[v] = comm; community_weights[comm] += edge_residuals[num_edges - num_org_nodes * 2 + v]; community_sizes[comm]++; // printf("++ %d %f %f\n", comm, community_weights[comm], // community_accus[comm]); } else { // otherwise comm = curr_communities[v]; SizeT e_start = graph.GetNeighborListOffset(v); SizeT num_neighbors = graph.GetNeighborListLength(v); community_weights[comm] -= edge_residuals[e_start + num_neighbors - 1]; community_sizes[comm]++; auto e_end = e_start + num_neighbors - 2; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == 1) { edge_residuals[e] = 0; } } // printf("-- %d %f %f\n", comm, community_weights[comm], // community_accus[comm]); } } // end of for v // printf("%d %f %f\n", comm, community_weights[comm], // community_accus[comm]); for (comm = 0; comm < pervious_num_comms; comm++) { if (community_active[comm]) { if (next_communities[comm] == 0) { community_weights[comm] = 0; community_active[comm] = false; } else if (community_sizes[comm] == 0) { community_active[comm] = false; community_active[next_communities[comm]] = false; community_weights[next_communities[comm]] = 0; } else { // printf("values: comm: %d, sizes: %d, weights: %f, accus: // %f.\n", // comm, community_sizes[comm], community_weights[comm], // community_accus[comm]); community_weights[comm] /= community_sizes[comm]; community_accus[comm] += community_weights[comm]; } } else { community_weights[comm] = 0; } } for (; comm < num_comms[0]; comm++) { community_weights[comm] /= community_sizes[comm]; community_accus[comm] += community_weights[comm]; // printf("comm %d, accus %f, sizes %d \n", // comm, community_accus [comm], community_sizes [comm]); // printf("values: comm: %d, sizes: %d, weights: %f, accus: // %f.\n", // comm, community_sizes[comm], community_weights[comm], // community_accus[comm]); } active[0] = false; for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; auto comm = curr_communities[v]; if (!community_active[comm] || abs(community_weights[comm]) <= 1e-6) { if (vertex_reachabilities[v] == 1) edge_residuals[num_edges - num_org_nodes * 2 + v] = 0; if (vertex_reachabilities[v] != 1) { SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; edge_residuals[e] = 0; } vertex_active[v] = false; community_active[comm] = false; } else { active[0] = true; SizeT e_from_src = num_edges - num_org_nodes * 2 + v; SizeT e_to_dest = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; if (vertex_reachabilities[v] == 1) { edge_residuals[e_from_src] -= community_weights[comm]; if (edge_residuals[e_from_src] < 0) { double temp = -1 * edge_residuals[e_from_src]; edge_residuals[e_from_src] = edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = temp; } } else { edge_residuals[e_to_dest] += community_weights[comm]; if (edge_residuals[e_to_dest] < 0) { double temp = -1 * edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = edge_residuals[e_from_src]; edge_residuals[e_from_src] = temp; } } } } // end of for v // for (SizeT e = 0; e < graph.edges; e ++){ // edge_capacities[e] = edge_residuals[e]; // printf("CPU: eidx %d, edge_v %f \n", e, edge_capacities[e]); //} } }, 1, util::DEVICE, oprtr_parameters.stream)); /* //////////////////////////////// GUARD_CU(community_weights.ForAll( [community_sizes, next_communities, num_comms, vertex_reachabilities, community_accus] __host__ __device__ (ValueT *community_weight, const SizeT &pos){ if(pos < num_comms[0]){ community_weight [pos] = 0; community_sizes [pos] = 0; next_communities [pos] = 0; //printf("vertext value %f \n", community_accus[0]); } //printf("%d, ", vertex_reachabilities[pos]); }, num_nodes, util::DEVICE, oprtr_parameters.stream)); //printf("core runs permantly1 \n"); GUARD_CU(previous_num_comms.ForAll( [num_comms] __host__ __device__ (VertexT *previous_num_comm, const SizeT &pos){ previous_num_comm[pos] = num_comms[pos]; }, 1, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(community_weights.ForAll( [vertex_active, // vertex specific vertex_reachabilities, next_communities, //community specific curr_communities, community_active, community_sizes, community_accus, edge_residuals, // intermediate output // others num_comms, num_edges, num_org_nodes, graph] __host__ __device__ (ValueT *community_weights, const VertexT &idx){ { VertexT comm; //if(idx == 0) printf("in 1st for loop begin\n"); for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; if (vertex_reachabilities[v]) { // reachable by source comm = next_communities[curr_communities[v]]; if (comm == 0) { // not assigned yet comm = num_comms[0]; next_communities[curr_communities[v]] = num_comms[0]; community_active [comm] = true; num_comms[0] ++; community_weights[comm] = 0; community_sizes [comm] = 0; next_communities [comm] = 0; community_accus [comm] = community_accus[curr_communities[v]]; } curr_communities[v] = comm; community_weights[comm] += edge_residuals[num_edges - num_org_nodes * 2 + v]; community_sizes [comm] ++; //printf("++ %d %f %f\n", comm, community_weights[comm], community_accus[comm]); } else { // otherwise comm = curr_communities[v]; SizeT e_start = graph.GetNeighborListOffset(v); SizeT num_neighbors = graph.GetNeighborListLength(v); community_weights[comm] -= edge_residuals[e_start + num_neighbors - 1]; community_sizes [comm] ++; auto e_end = e_start + num_neighbors - 2; for (auto e = e_start; e < e_end; e++) { VertexT u = graph.GetEdgeDest(e); if (vertex_reachabilities[u] == 1) { edge_residuals[e] = 0; } } //printf("-- %d %f %f\n", comm, community_weights[comm], community_accus[comm]); } } //if(idx == 0) printf("in 1st for loop end\n"); } }, 1, util::DEVICE, oprtr_parameters.stream)); //loop only once GUARD_CU(community_weights.ForAll( [next_communities, //community specific curr_communities, community_active, community_sizes, community_accus, // others previous_num_comms] __host__ __device__ (ValueT *community_weights, unsigned int &idx){ { for (auto comm = 0; comm < previous_num_comms[0]; comm ++) { if (community_active[comm]) { if (next_communities[comm] == 0) { community_weights[comm] = 0; community_active [comm] = false; } else if (community_sizes[comm] == 0) { community_active [comm] = false; community_active [next_communities[comm]] = false; community_weights[next_communities[comm]] = 0; } else { //printf("values: comm: %d, sizes: %d, weights: %f, accus: %f.\n", comm, community_sizes[comm], community_weights[comm], community_accus[comm]); community_weights[comm] /= community_sizes [comm]; community_accus [comm] += community_weights[comm]; } } else { community_weights[comm] = 0; } } } //if(comm == 0) printf("in 2st for loop end\n"); }, 1, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(community_weights.ForAll( [next_communities, //community specific community_sizes, community_accus, // others previous_num_comms, num_comms, active] //!!! __host__ __device__ (ValueT *community_weights, unsigned int &comm){ { if(comm < num_comms[0] && comm >= previous_num_comms[0]){ community_weights[comm] /= community_sizes [comm]; community_accus [comm] += community_weights[comm]; printf("comm %d, accus %f, sizes %d \n", comm, community_accus [comm], community_sizes [comm]); } active[0] = 0; } }, num_org_nodes, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(community_weights.ForAll( [vertex_active, // vertex specific vertex_reachabilities, next_communities, //community specific curr_communities, community_active, community_sizes, community_accus, edge_residuals, // intermediate output // others num_comms, num_edges, num_org_nodes, graph, active] __host__ __device__ (ValueT *community_weights, const VertexT &idx){ { for (VertexT v = 0; v < num_org_nodes; v++) { if (!vertex_active[v]) continue; auto comm = curr_communities[v]; if (!community_active[comm] || abs(community_weights[comm]) < 1e-6) { if (vertex_reachabilities[v] == 1) edge_residuals[num_edges - num_org_nodes * 2 + v] = 0; if (vertex_reachabilities[v] != 1) { SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; edge_residuals[e] = 0; } vertex_active[v] = false; community_active[comm] = false; } else { active[0] = 1; SizeT e_from_src = num_edges - num_org_nodes * 2 + v; SizeT e_to_dest = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; if (vertex_reachabilities[v] == 1) { edge_residuals[e_from_src] -= community_weights[comm]; if (edge_residuals[e_from_src] < 0) { double temp = -1 * edge_residuals[e_from_src]; edge_residuals[e_from_src] = edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = temp; } } else { edge_residuals[e_to_dest] += community_weights[comm]; if (edge_residuals[e_to_dest] < 0) { double temp = -1 * edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = edge_residuals[e_from_src]; edge_residuals[e_from_src] = temp; } } } } // end of for v */ // below is parallel /* if (!vertex_active[v]) return; auto comm = curr_communities[v]; if (!community_active[comm] || abs(community_weights[comm]) < 1e-6) { if (vertex_reachabilities[v] == 1) edge_residuals[num_edges - num_org_nodes * 2 + v] = 0; if (vertex_reachabilities[v] != 1) { SizeT e = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; edge_residuals[e] = 0; } vertex_active[v] = false; community_active[comm] = false; } else { active[0] = 1; SizeT e_from_src = num_edges - num_org_nodes * 2 + v; SizeT e_to_dest = graph.GetNeighborListOffset(v) + graph.GetNeighborListLength(v) - 1; if (vertex_reachabilities[v] == 1) { edge_residuals[e_from_src] -= community_weights[comm]; if (edge_residuals[e_from_src] < 0) { auto temp = -1 * edge_residuals[e_from_src]; edge_residuals[e_from_src] = edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = temp; } } else { edge_residuals[e_to_dest] += community_weights[comm]; if (edge_residuals[e_to_dest] < 0) { auto temp = -1 * edge_residuals[e_to_dest]; edge_residuals[e_to_dest] = edge_residuals[e_from_src]; edge_residuals[e_from_src] = temp; } } } //if(v == 0) printf("in 3st for loop end\n"); */ //!!!} //!!!}, 1, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(edge_residuals.ForAll( [graph, iteration, active] __host__ __device__(ValueT * edge_residuals, SizeT & e) { { if (false) { // if(iteration == 0){ active[0] = 0; edge_residuals[e] = graph.edge_values[e]; // just for debugging purposes #!!! } else { graph.edge_values[e] = edge_residuals[e]; } } }, graph.edges, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(community_accus.ForAll( [active, community_accus, curr_communities] __host__ __device__( ValueT * community_accus, SizeT & v) { { if (active[0] == 0) { ValueT tmp = max(community_accus[v] - 3., 0.0); community_accus[v] = tmp + min(community_accus[v] + 3., 0.0); // printf("%d %f \n", v, community_accus[curr_communities[v]]); } } }, num_org_nodes, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(community_accus.ForAll( [active, community_accus, curr_communities, Y] __host__ __device__( ValueT * community_accus, SizeT & v) { { if (active[0] == 0) { Y[v] = community_accus[curr_communities[v]]; } // if(v == 0) printf("in last for loop end\n"); } }, num_org_nodes, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(cudaDeviceSynchronize()); cpu_timer.Stop(); // printf("gtf: %f \n", cpu_timer.ElapsedMillis()); GUARD_CU2(cudaStreamSynchronize(oprtr_parameters.stream), "cudaStreamSynchronize failed"); // printf("new updated vertices %d\n", frontier.queue_length); cpu_timer.Start(); frontier.queue_reset = true; oprtr_parameters.filter_mode = "BY_PASS"; GUARD_CU(oprtr::Filter<oprtr::OprtrType_V2V>( graph.csr(), frontier.V_Q(), frontier.Next_V_Q(), oprtr_parameters, [active] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { return active[0] > 0; })); cpu_timer.Stop(); frontier.queue_index++; // Get back the resulted frontier length GUARD_CU(frontier.work_progress.GetQueueLength( frontier.queue_index, frontier.queue_length, false, oprtr_parameters.stream, true)); GUARD_CU2(cudaStreamSynchronize(oprtr_parameters.stream), "cudaStreamSynchronize failed"); // printf("new updated vertices %d (version after filter)\n", \ frontier.queue_length);\ fflush(stdout); data_slice.num_updated_vertices = frontier.queue_length; return retval; } /* cudaError_t Compute_OutputLength(int peer_) { // No need to load balance or get output size return cudaSuccess; }*/ /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each transmition item, typed ValueT * @param[in] received_length The number of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { auto &enactor = this->enactor; auto &problem = enactor->problem; auto gpu_num = this->gpu_num; auto gpu_offset = gpu_num * enactor->num_gpus; auto &data_slice = problem->data_slices[gpu_num][0]; auto &enactor_slice = enactor->enactor_slices[gpu_offset + peer_]; auto iteration = enactor_slice.enactor_stats.iteration; debug_aml("ExpandIncomming do nothing"); /* for key " + std::to_string(key) + " and for in_pos " + std::to_string(in_pos) + " and for vertex ass ins " + std::to_string(vertex_associate_ins[in_pos]) + " and for value ass ins " + std::to_string(value__associate_ins[in_pos]));*/ auto expand_op = [] __host__ __device__( VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { // TODO: fill in the lambda to combine received and local data, e.g.: // ValueT in_val = value__associate_ins[in_pos]; // ValueT old_val = atomicMin(distances + key, in_val); // if (old_val <= in_val) // return false; return true; }; debug_aml("expand incoming\n"); cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } bool Stop_Condition(int gpu_num = 0) { auto enactor = this->enactor; int num_gpus = enactor->num_gpus; auto &enactor_slice = enactor->enactor_slices[0]; auto iteration = enactor_slice.enactor_stats.iteration; auto &retval = enactor_slice.enactor_stats.retval; if (retval != cudaSuccess) { printf("(CUDA error %d @ GPU %d: %s\n", retval, 0 % num_gpus, cudaGetErrorString(retval)); fflush(stdout); return true; } auto &data_slice = enactor->problem->data_slices[gpu_num][0]; if (data_slice.num_updated_vertices == 0) return true; return false; } }; // end of gtfIteration /** * @brief gtf enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase<typename _Problem::GraphT, typename _Problem::VertexT, typename _Problem::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: typedef _Problem Problem; typedef typename Problem::VertexT VertexT; typedef typename Problem::ValueT ValueT; typedef typename Problem::SizeT SizeT; typedef typename Problem::GraphT GraphT; typedef typename Problem::MfProblemT MfProblemT; typedef EnactorBase<GraphT, VertexT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef GTFIterationLoop<EnactorT> IterationT; Problem *problem; IterationT *iterations; // typedef mf::Problem<GraphT> MfProblemT; typedef mf::Enactor<MfProblemT> MfEnactorT; MfEnactorT mf_enactor; /** * @brief gtfEnactor constructor */ Enactor() : BaseEnactor("gtf"), mf_enactor(), problem(NULL) { // TODO: change according to algorithmic needs this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief MFEnactor destructor */ virtual ~Enactor() { // Release(); } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Initialize the problem. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; // Lazy initialization GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false)); GUARD_CU(mf_enactor.Init(problem.mf_problem, target)); auto num_gpus = this->num_gpus; for (int gpu = 0; gpu < num_gpus; ++gpu) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto gpu_offset = gpu * num_gpus; auto &enactor_slice = this->enactor_slices[gpu_offset + 0]; auto &graph = problem.sub_graphs[gpu]; auto nodes = graph.nodes; auto edges = graph.edges; GUARD_CU( enactor_slice.frontier.Allocate(nodes, edges, this->queue_factors)); } iterations = new IterationT[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief one run of gtf, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { debug_aml("Run enact"); gunrock::app::Iteration_Loop<0, // NUM_VERTEX_ASSOCIATES 1, // NUM_VALUE__ASSOCIATES IterationT>( thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Reset enactor * @param[in] src Source node to start primitive. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(const VertexT &src, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; debug_aml("Enactor Reset, src %d", src); typedef typename EnactorT::Problem::GraphT::GpT GpT; auto num_gpus = this->num_gpus; GUARD_CU(BaseEnactor::Reset(target)); // Initialize frontiers according to the algorithm gtf for (int gpu = 0; gpu < num_gpus; gpu++) { auto gpu_offset = gpu * num_gpus; if (num_gpus == 1 || (gpu == this->problem->org_graph->GpT::partition_table[src])) { this->thread_slices[gpu].init_size = 1; for (int peer_ = 0; peer_ < num_gpus; ++peer_) { auto &frontier = this->enactor_slices[gpu_offset + peer_].frontier; frontier.queue_length = (peer_ == 0) ? 1 : 0; if (peer_ == 0) { GUARD_CU(frontier.V_Q()->ForEach( [src] __host__ __device__(VertexT & v) { v = src; }, 1, target, 0)); } } } else { this->thread_slices[gpu].init_size = 0; for (int peer_ = 0; peer_ < num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu_offset + peer_].frontier; frontier.queue_length = 0; } } } GUARD_CU(BaseEnactor::Sync()); debug_aml("Enactor Reset end"); return retval; } /** * @brief Enacts a gtf computing on the specified graph. * @param[in] src Source node to start primitive. * \return cudaError_t error message(s), if any */ cudaError_t Enact() { cudaError_t retval = cudaSuccess; debug_aml("enact"); printf("enact calling successfully!!!!!!!!!!!\n"); GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU gtf Done.", this->flag & Debug); return retval; } /** @} */ }; } // namespace gtf } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h> #include <thrust/system/detail/generic/reduce_by_key.h> #include <thrust/remove.h> #include <thrust/iterator/transform_iterator.h> #include <error.h> #include <cutil.h> #include <types.h> #include <cusp/detail/format_utils.h> namespace amgx { namespace aggregation { typedef thrust::tuple<int, int> tuple_t; // -------------------- // Kernels // -------------------- // Kernel to store aggregate I of each fine point index i template <typename IndexType> __global__ void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x) { int agg = aggregates[tid]; for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++) { I[j] = agg; } } } // Kernel to store aggregate J of each fine point index j template <typename IndexType> __global__ void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x) { int j = column_indices[tid]; J[tid] = aggregates[j]; } } // Constructor template<class T_Config> ThrustCoarseAGeneratorBase<T_Config>::ThrustCoarseAGeneratorBase() { } //----------------------------------------------------- // Method to compute the Galerkin product: A_c=R*A*P //----------------------------------------------------- // Method to compute A on DEVICE using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ThrustCoarseAGenerator<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_d &A, Matrix_d &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates) { if (A.hasProps(DIAG)) { FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED); } cudaCheckError(); IVector I(A.get_num_nz(), -1); IVector J(A.get_num_nz(), -1); VVector V(A.get_num_nz(), -1); const int block_size_I = 128; const int block_size_J = 256; const int num_blocks_I = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_rows() - 1) / block_size_I + 1) ); const int num_blocks_J = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_nz() - 1) / block_size_J + 1) ); const IndexType *row_offsets_ptr = A.row_offsets.raw(); const IndexType *column_indices_ptr = A.col_indices.raw(); const IndexType *aggregates_ptr = aggregates.raw(); IndexType *I_ptr = I.raw(); IndexType *J_ptr = J.raw(); // Kernel to fill array I with aggregates number for fine points i iToIKernel <<< num_blocks_I, block_size_I>>>(row_offsets_ptr, aggregates_ptr, I_ptr, (int)A.get_num_rows()); cudaCheckError(); // Kernel to fill array J with aggregates number for fine points j jToJKernel <<< num_blocks_J, block_size_J>>>(column_indices_ptr, aggregates_ptr, J_ptr, (int)A.get_num_nz()); cudaCheckError(); // Copy A.values to V array thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin()); cudaCheckError(); // Sort (I,J,V) by rows and columns (I,J) cusp::detail::sort_by_row_and_column(I, J, V); cudaCheckError(); // compute unique number of nonzeros in the output IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1, thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1, IndexType(0), thrust::plus<IndexType>(), thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1; cudaCheckError(); // allocate space for coarse matrix Ac Ac.addProps(CSR); if (A.hasProps(DIAG)) { Ac.addProps(DIAG); } if (A.is_matrix_singleGPU()) { Ac.resize(num_aggregates, num_aggregates, NNZ, 1); } else { Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0); if (A.hasProps(DIAG)) { Ac.computeDiagonal(); } } // Reduce by key to fill in Ac.column_indices and Ac.values IVector new_row_indices(NNZ, 0); thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), V.begin(), thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())), Ac.values.begin(), thrust::equal_to< thrust::tuple<IndexType, IndexType> >(), thrust::plus<ValueType>()); cudaCheckError(); // Convert array new_row_indices to offsets cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets); cudaCheckError(); I.clear(); I.shrink_to_fit(); J.clear(); J.shrink_to_fit(); V.clear(); V.shrink_to_fit(); } // Method to compute A on HOST using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void ThrustCoarseAGenerator<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_h &A, Matrix_h &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates) { if (A.hasProps(DIAG)) { FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED); } IVector I(A.get_num_nz(), -1); IVector J(A.get_num_nz(), -1); VVector V(A.get_num_nz(), -1); const IndexType *row_offsets_ptr = A.row_offsets.raw(); const IndexType *column_indices_ptr = A.col_indices.raw(); const IndexType *aggregates_ptr = aggregates.raw(); IndexType *I_ptr = I.raw(); IndexType *J_ptr = J.raw(); // Kernel to fill array I with aggregates number for fine points i for ( int tid = 0; tid < (int)A.get_num_rows(); tid++ ) { int agg = aggregates_ptr[tid]; for (int j = row_offsets_ptr[tid]; j < row_offsets_ptr[tid + 1]; j++) { I_ptr[j] = agg; } } // Kernel to fill array J with aggregates number for fine points j for ( int tid = 0; tid < (int)A.get_num_nz(); tid++ ) { int j = column_indices_ptr[tid]; J_ptr[tid] = aggregates_ptr[j]; } // Copy A.values to V array thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin()); cudaCheckError(); // Sort (I,J,V) by rows and columns (I,J) cusp::detail::sort_by_row_and_column(I, J, V); cudaCheckError(); // compute unique number of nonzeros in the output IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1, thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1, IndexType(0), thrust::plus<IndexType>(), thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1; cudaCheckError(); // allocate space for coarse matrix Ac Ac.addProps(CSR); if (A.hasProps(DIAG)) { Ac.addProps(DIAG); } if (A.is_matrix_singleGPU()) { Ac.resize(num_aggregates, num_aggregates, NNZ, 1); } else { Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0); if (A.hasProps(DIAG)) { Ac.computeDiagonal(); } } // Reduce by key to fill in Ac.column_indices and Ac.values typename Matrix_h::IVector new_row_indices(NNZ, 0); thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), V.begin(), thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())), Ac.values.begin(), thrust::equal_to< thrust::tuple<IndexType, IndexType> >(), thrust::plus<ValueType>()); cudaCheckError(); // Convert array new_row_indices to offsets cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets); cudaCheckError(); I.clear(); I.shrink_to_fit(); J.clear(); J.shrink_to_fit(); V.clear(); V.shrink_to_fit(); } // ------------------------------------------------ template <class T_Config> void ThrustCoarseAGeneratorBase<T_Config>::computeAOperator(const Matrix<T_Config> &A, Matrix<T_Config> &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates) { Ac.set_initialized(0); if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1) { computeAOperator_1x1( A, Ac, aggregates, R_row_offsets, R_column_indices, num_aggregates ); } else { FatalError("Unsupported block size for ThrustCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (Ac.is_matrix_singleGPU()) { Ac.computeDiagonal(); } Ac.set_initialized(1); } // --------------------------- // Explict instantiations // --------------------------- #define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGeneratorBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGenerator<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } }
the_stack
#define CMAX(x,y,z) (imax3(x,y,z)-imin3(x,y,z)) #define RGBA2INT(r,g,b,a) ( (uint((a)*255.0f)<<24) | (uint((b)*255.0f)<<16) | (uint((g)*255.0f)<<8) | uint((r)*255.0f) ) #define CLR2INT(c) ( (uint((c.w)*255.0f)<<24) | (uint((c.z)*255.0f)<<16) | (uint((c.y)*255.0f)<<8) | uint((c.x)*255.0f) ) #define INT2CLR(c) ( make_float4( float(c & 0xFF)/255.0f, float((c>>8) & 0xFF)/255.0f, float((c>>16) & 0xFF)/255.0f, float((c>>24) & 0xFF)/255.0f )) #define T_UCHAR 0 // channel types #define T_FLOAT 3 #define T_INT 6 extern "C" __global__ void gvdbUpdateApronFacesF ( VDBInfo* gvdb, uchar chan, int brickcnt, int brickres, int brickwid, int* nbrtable ) { // Compute brick & atlas vox int brk = blockIdx.x; if (brk > brickcnt) return; int side = threadIdx.x; uint2 suv = make_uint2(blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); if (suv.x >= brickwid || suv.y >= brickwid || side >= 3 ) return; int3 vox, vinc; switch (side) { case 0: vox = make_int3(0, suv.x, suv.y); vinc = make_int3(1, 0, 0); break; case 1: vox = make_int3(suv.x, 0, suv.y); vinc = make_int3(0, 1, 0); break; case 2: vox = make_int3(suv.x, suv.y, 0); vinc = make_int3(0, 0, 1); break; } int3 vnbr = vox + vinc*(brickwid - 1); // neighbor offset // Get current brick VDBNode* node = getNode(gvdb, 0, brk); if (node == 0x0) return; vox += make_int3(node->mValue); // self atlas start // Get neigboring brick int nbr = nbrtable[brk * 6 + side]; if (nbr == ID_UNDEFL) return; node = getNode(gvdb, 0, nbr); vnbr += make_int3(node->mValue); // neighbor atlas start // Update self and neighbor (gvdb->volOut[chan] is the surface object for the atlas) float v1 = surf3Dread<float>(gvdb->volOut[chan], vox.x * sizeof(float), vox.y, vox.z); // get self voxel float v2 = surf3Dread<float>(gvdb->volOut[chan], vnbr.x * sizeof(float), vnbr.y, vnbr.z); // get neighbor voxel surf3Dwrite(v1, gvdb->volOut[chan], (vnbr.x + vinc.x) * sizeof(float), (vnbr.y + vinc.y), (vnbr.z + vinc.z) ); // neighbor apron surf3Dwrite(v2, gvdb->volOut[chan], (vox.x - vinc.x) * sizeof(float), (vox.y - vinc.y), (vox.z - vinc.z)); // self apron } // Function template for updating the apron without UpdateApronFaces' neighbor // table. Each of the voxels of the apron computes its world-space // (= index-space) position and looks up what its value should be, sampling // from a neighboring brick, or using the boundary value if no brick contains // the voxel. // // This should be called using blocks with an x dimension of 6, arranged in a // grid along the x axis. Each yz plane of the block will fill in the voxels // for a different face. template<class T> __device__ void UpdateApron(VDBInfo* gvdb, const uchar channel, const int brickCount, const int paddedBrickRes, const T boundaryValue, int ApronSize) { // The brick this block processes. const int brick = blockIdx.x; if (brick > brickCount) return; // Determine which voxel of the apron to compute and write. uint3 brickVoxel; // In the local coordinate space of the brick { const int side = threadIdx.x; // Side of the brick, from 0 to 5 uint2 suv = make_uint2(blockIdx.y*blockDim.y + threadIdx.y, blockIdx.z*blockDim.z + threadIdx.z); // Position on the side of the brick if (suv.x >= paddedBrickRes || suv.y >= paddedBrickRes || side >= 6) return; switch (side) { case 0: brickVoxel = make_uint3(0, suv.x, suv.y); break; case 1: brickVoxel = make_uint3(suv.x, 0, suv.y); break; case 2: brickVoxel = make_uint3(suv.x, suv.y, 0); break; case 3: brickVoxel = make_uint3(paddedBrickRes - 1, suv.x, suv.y); break; case 4: brickVoxel = make_uint3(suv.x, paddedBrickRes - 1, suv.y); break; case 5: brickVoxel = make_uint3(suv.x, suv.y, paddedBrickRes - 1); break; } } // Compute the position of the voxel in the atlas uint3 atlasVoxel; // In the coordinate space of the entire atlas { VDBNode* node = getNode(gvdb, 0, brick); if (node == 0x0) return; // This brick ID didn't correspond to a known brick, which is invalid // (The (1,1,1) here accounts for the 1 unit of apron padding) atlasVoxel = brickVoxel + make_uint3(node->mValue) - make_uint3(ApronSize, ApronSize, ApronSize); } // Get the value of the voxel by converting to index-space and then // sampling the value at that index T value; { float3 worldPos; if (!getAtlasToWorld(gvdb, atlasVoxel, worldPos)) return; float3 offs, vmin; uint64 nodeID; VDBNode* node = getNodeAtPoint(gvdb, worldPos, &offs, &vmin, &nodeID); if (node == 0x0) { // Out of range, use the boundary value value = boundaryValue; } else { offs += (worldPos - vmin); // Get the atlas position value = surf3Dread<T>(gvdb->volOut[channel], uint(offs.x) * sizeof(T), uint(offs.y), uint(offs.z)); } } // Write to the apron voxel surf3Dwrite(value, gvdb->volOut[channel], atlasVoxel.x * sizeof(T), atlasVoxel.y, atlasVoxel.z); } extern "C" __global__ void gvdbUpdateApronF (VDBInfo* gvdb, uchar chan, int brickcnt, int brickres, int brickwid, float boundval, int mApron) { UpdateApron<float>(gvdb, chan, brickcnt, brickres, boundval, mApron); } extern "C" __global__ void gvdbUpdateApronF4 ( VDBInfo* gvdb, uchar chan, int brickcnt, int brickres, int brickwid, float boundval, int mApron) { UpdateApron<float4>(gvdb, chan, brickcnt, brickres, make_float4(boundval, boundval, boundval, boundval), mApron); } extern "C" __global__ void gvdbUpdateApronC ( VDBInfo* gvdb, uchar chan, int brickcnt, int brickres, int brickwid, float boundval, int mApron) { UpdateApron<uchar>(gvdb, chan, brickcnt, brickres, boundval, mApron); } extern "C" __global__ void gvdbUpdateApronC4 ( VDBInfo* gvdb, uchar chan, int brickcnt, int brickres, int brickwid, float boundval, int mApron) { UpdateApron<uchar4>(gvdb, chan, brickcnt, brickres, make_uchar4(boundval, boundval, boundval, boundval), mApron); } // Loads the shared memory for this CUDA block, given the index of this thread in the // shared memory, and its location in the atlas. This should be called for values of // ndx in the range [1,8]^3; this function will make additional loads as needed to // fill in the voxels adjacent along an axis to [1,8]^3. // This assumes that the function is being called with a block size of (8, 8, 8). template<class T> __device__ void LoadSharedMemory(VDBInfo* gvdb, uchar channel, T sharedVoxels[10][10][10], uint3 ndx, uint3 voxI) { // Copy the atlas voxel at voxI into sharedVoxels[ndx]. sharedVoxels[ndx.x][ndx.y][ndx.z] = surf3Dread<T>(gvdb->volOut[channel], voxI.x * sizeof(T), voxI.y, voxI.z); // Load voxels adjacent to [1,9]^3. if (ndx.x == 1) { sharedVoxels[0][ndx.y][ndx.z] = surf3Dread<T>(gvdb->volOut[channel], (voxI.x - 1) * sizeof(T), voxI.y, voxI.z); } else if (ndx.x == 8) { sharedVoxels[9][ndx.y][ndx.z] = surf3Dread<T>(gvdb->volOut[channel], (voxI.x + 1) * sizeof(T), voxI.y, voxI.z); } if (ndx.y == 1) { sharedVoxels[ndx.x][0][ndx.z] = surf3Dread<T>(gvdb->volOut[channel], voxI.x * sizeof(T), voxI.y - 1, voxI.z); } else if (ndx.y == 8) { sharedVoxels[ndx.x][9][ndx.z] = surf3Dread<T>(gvdb->volOut[channel], voxI.x * sizeof(T), voxI.y + 1, voxI.z); } if (ndx.z == 1) { sharedVoxels[ndx.x][ndx.y][0] = surf3Dread<T>(gvdb->volOut[channel], voxI.x * sizeof(T), voxI.y, voxI.z - 1); } else if (ndx.z == 8) { sharedVoxels[ndx.x][ndx.y][9] = surf3Dread<T>(gvdb->volOut[channel], voxI.x * sizeof(T), voxI.y, voxI.z + 1); } // Make sure all loads from the block have completed. __syncthreads(); } // Suppose you're running a kernel with a block size of (8, 8, 8). This function // will take each thread's indices and return: // localIdx: threadIdx + gvdb->atlas_apron // atlasIdx: The corresponding voxel in the atlas, skipping over aprons. // For instance, for the brick starting at (0,0,0), atlasIdx will be equal to // localIdx. But this will not be the case for other bricks. __device__ void GetVoxelIndicesPacked(VDBInfo* gvdb, uint3& localIdx, uint3& atlasIdx) { const uint3 atlasApron = make_uint3(gvdb->atlas_apron); localIdx = threadIdx + atlasApron; // What atlasIdx would be if atlas_apron were 0 const uint3 packedVox = blockIdx * blockDim + threadIdx; // Find the 3D index of the brick atlasIdx corresponds to const int brickResNoApron = gvdb->brick_res - 2 * gvdb->atlas_apron; const uint3 brick = make_uint3( packedVox.x / brickResNoApron, packedVox.y / brickResNoApron, packedVox.z / brickResNoApron); // Convert to a position in the full atlas atlasIdx = packedVox + brick * atlasApron * 2 + atlasApron; } // A helper macro that sets up local and atlas coordinates, checks to see if // they're inside the atlas bounds, and returns if not. Skips over atlas // boundaries, which means that it can use a smaller computation grid than // the older GVDB_VOX. Assumes a block size of (8,8,8). #define GVDB_VOXPACKED \ uint3 localIdx, atlasIdx; \ GetVoxelIndicesPacked(gvdb, localIdx, atlasIdx); \ if (atlasIdx.x >= atlasRes.x || atlasIdx.y >= atlasRes.y || atlasIdx.z >= atlasRes.z) return; // A helper macro that sets up unpacked local and atlas coordinates, checks to // see if they're inside the atlas bounds, and returns if not. Does not skip // over atlas boundaries, so this covers the entire atlas. This used to be GVDB_VOX. #define GVDB_VOXUNPACKED \ uint3 localIdx = threadIdx + make_uint3(1, 1, 1); \ uint3 atlasIdx = blockIdx * blockDim + localIdx; \ if (atlasIdx.x >= atlasRes.x || atlasIdx.y >= atlasRes.y || atlasIdx.z >= atlasRes.z) return; extern "C" __global__ void gvdbOpGrow ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ float sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<float>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); /*float nl; float3 n; n.x = 0.5 * (svox[ndx.x-1][ndx.y][ndx.z] - svox[ndx.x+1][ndx.y][ndx.z]); n.y = 0.5 * (svox[ndx.x][ndx.y-1][ndx.z] - svox[ndx.x][ndx.y+1][ndx.z]); n.z = 0.5 * (svox[ndx.x][ndx.y][ndx.z-1] - svox[ndx.x][ndx.y][ndx.z+1]); nl = sqrt(n.x*n.x+n.y*n.y+n.z*n.z); float v = svox[ndx.x][ndx.y][ndx.z]; if ( nl > 0.2 ) v += amt;*/ float v = sharedVoxels[localIdx.x][localIdx.y][localIdx.z]; if ( v != 0.0) v += p1 * 10.0; //*0.1; if ( v < 0.01) v = 0.0; surf3Dwrite(v, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } extern "C" __global__ void gvdbOpCut ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ float sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<float>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); // Determine block and index position float3 wpos; if (!getAtlasToWorld(gvdb, atlasIdx, wpos)) return; float v = sharedVoxels[localIdx.x][localIdx.y][localIdx.z]; if (wpos.x < 50 && v > 0 && v < 2) { v = 0.02; surf3Dwrite(v, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } } extern "C" __global__ void gvdbReduction( VDBInfo* gvdb, int3 res, uchar chan, int3 packres, float* outbuf) { // voxels - not including apron uint3 packvox = blockIdx * make_uint3(blockDim.x, blockDim.y, 1) + threadIdx; int bres = (gvdb->brick_res - gvdb->atlas_apron * 2); // brick res minus apron uint3 brick = packvox / bres; uint3 vox = (brick*gvdb->brick_res) + (packvox % bres) + make_uint3(1, 1, 1); if (vox.x >= res.x || vox.y >= res.y) return; // integrate along z-axis float sum = 0.0; float v; for (int z = 0; z < packres.z; z++) { vox.z = (int(z / bres)*gvdb->brick_res) + (z % bres) + 1; v = surf3Dread<float>(gvdb->volOut[chan], vox.x * sizeof(float), vox.y, vox.z); sum += v; // (v > gvdb->thresh.x) ? 1.0 : 0.0; } outbuf[packvox.y * packres.x + packvox.x] = sum; } extern "C" __global__ void gvdbResample ( VDBInfo* gvdb, int3 atlasRes, uchar chan, int3 srcres, float* src, float* xform, float3 inr, float3 outr ) { GVDB_VOXUNPACKED float3 wpos; if (!getAtlasToWorld(gvdb, atlasIdx, wpos)) return; wpos -= make_float3(.5, .5, .5); // transform to src index int3 ndx; ndx.x = (int) (wpos.x * xform[0] + wpos.y * xform[4] + wpos.z * xform[8] + xform[12]); ndx.y = (int) (wpos.x * xform[1] + wpos.y * xform[5] + wpos.z * xform[9] + xform[13]); ndx.z = (int) (wpos.x * xform[2] + wpos.y * xform[6] + wpos.z * xform[10] + xform[14]); // skip if outside src if ( ndx.x < 0 || ndx.y < 0 || ndx.z < 0 || ndx.x >= srcres.x || ndx.y >= srcres.y || ndx.z >= srcres.z ) return; // get value float v = src[ (ndx.z*srcres.y + ndx.y)*srcres.x + ndx.x ]; v = outr.x + (v-inr.x)*(outr.y-outr.x)/(inr.y-inr.x); // remap value surf3Dwrite(v, gvdb->volOut[chan], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } extern "C" __global__ void gvdbDownsample(int3 srcres, float* src, int3 destres, float3 destmax, float* dest, float* xform, float3 inr, float3 outr) { uint3 vox = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if (vox.x >= destres.x || vox.y >= destres.y || vox.z >= destres.z) return; float3 dmin, dmax; dmin = make_float3(vox.x, vox.y, vox.z) * destmax / make_float3(destres.x + 1, destres.y + 1, destres.z + 1); dmax = make_float3(vox.x + 1, vox.y + 1, vox.z + 1) * destmax / make_float3(destres.x + 1, destres.y + 1, destres.z + 1) - make_float3(1, 1, 1); // transform to src index int3 smin, smax; smin.x = (int)(dmin.x * xform[0] + dmin.y * xform[4] + dmin.z * xform[8] + xform[12]); smin.y = (int)(dmin.x * xform[1] + dmin.y * xform[5] + dmin.z * xform[9] + xform[13]); smin.z = (int)(dmin.x * xform[2] + dmin.y * xform[6] + dmin.z * xform[10] + xform[14]); smax.x = (int)(dmax.x * xform[0] + dmax.y * xform[4] + dmax.z * xform[8] + xform[12]); smax.y = (int)(dmax.x * xform[1] + dmax.y * xform[5] + dmax.z * xform[9] + xform[13]); smax.z = (int)(dmax.x * xform[2] + dmax.y * xform[6] + dmax.z * xform[10] + xform[14]); smin.x = (smin.x < 0) ? 0 : ((smin.x > srcres.x - 1) ? srcres.x - 1 : smin.x); smin.y = (smin.y < 0) ? 0 : ((smin.y > srcres.y - 1) ? srcres.y - 1 : smin.y); smin.z = (smin.z < 0) ? 0 : ((smin.z > srcres.z - 1) ? srcres.z - 1 : smin.z); smax.x = (smax.x < smin.x) ? smin.x : ((smax.x > srcres.x - 1) ? srcres.x - 1 : smax.x); smax.y = (smax.y < smin.y) ? smin.y : ((smax.y > srcres.y - 1) ? srcres.y - 1 : smax.y); smax.z = (smax.z < smin.z) ? smin.z : ((smax.z > srcres.z - 1) ? srcres.z - 1 : smax.z); // downsample float v = 0; for (int z = smin.z; z <= smax.z; z++) for (int y = smin.y; y <= smax.y; y++) for (int x = smin.x; x <= smax.x; x++) { v += outr.x + (src[ (z*srcres.y + y)*srcres.x + x ] - inr.x)*(outr.y - outr.x) / (inr.y - inr.x); } v /= (smax.x - smin.x + 1)*(smax.y - smin.y + 1)*(smax.z - smin.z + 1); // output value dest[(vox.z*destres.y + vox.y)*destres.x + vox.x] = v; } extern "C" __global__ void gvdbOpFillF ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { GVDB_VOXUNPACKED if ( p3 < 0 ) { //float v = vox.y; // + (vox.z*30 + vox.x)/900.0; float v = sinf(atlasIdx.x * 12 / (3.141592 * 30.0)); v += sinf(atlasIdx.y * 12 / (3.141592 * 30.0)); v += sinf(atlasIdx.z * 12 / (3.141592 * 30.0)); surf3Dwrite(v, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } else { surf3Dwrite(p1, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } } extern "C" __global__ void gvdbOpFillC4 ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { GVDB_VOXUNPACKED surf3Dwrite ( CLR2INT(make_float4(p1,p2,p3,1.0)), gvdb->volOut[channel], atlasIdx.x*sizeof(uchar4), atlasIdx.y, atlasIdx.z ); } extern "C" __global__ void gvdbOpFillC ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { GVDB_VOXUNPACKED const uchar c = static_cast<uchar>(p1); surf3Dwrite(c, gvdb->volOut[channel], atlasIdx.x * sizeof(uchar), atlasIdx.y, atlasIdx.z); } extern "C" __global__ void gvdbOpSmooth ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ float sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<float>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); //-- smooth float v = p1 * sharedVoxels[localIdx.x][localIdx.y][localIdx.z]; v += sharedVoxels[localIdx.x - 1][localIdx.y][localIdx.z]; v += sharedVoxels[localIdx.x + 1][localIdx.y][localIdx.z]; v += sharedVoxels[localIdx.x][localIdx.y - 1][localIdx.z]; v += sharedVoxels[localIdx.x][localIdx.y + 1][localIdx.z]; v += sharedVoxels[localIdx.x][localIdx.y][localIdx.z - 1]; v += sharedVoxels[localIdx.x][localIdx.y][localIdx.z + 1]; v = v / (p1 + 6.0) + p2; surf3Dwrite(v, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); } extern "C" __global__ void gvdbOpClrExpand ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ uchar4 sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<uchar4>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); int3 c, cs; int cp; c = make_int3(sharedVoxels[localIdx.x][localIdx.y][localIdx.z]); cs = c * p1; c = make_int3(sharedVoxels[localIdx.x - 1][localIdx.y][localIdx.z]); cs += c * p2; c = make_int3(sharedVoxels[localIdx.x + 1][localIdx.y][localIdx.z]); cs += c * p2; c = make_int3(sharedVoxels[localIdx.x][localIdx.y - 1][localIdx.z]); cs += c * p2; c = make_int3(sharedVoxels[localIdx.x][localIdx.y + 1][localIdx.z]); cs += c * p2; c = make_int3(sharedVoxels[localIdx.x][localIdx.y][localIdx.z - 1]); cs += c * p2; c = make_int3(sharedVoxels[localIdx.x][localIdx.y][localIdx.z + 1]); cs += c * p2; cp = max(cs.x, max(cs.y,cs.z) ); cs = (cp > 255) ? make_int3(cs.x*255/cp, cs.y*255/cp, cs.z*255/cp) : cs; surf3Dwrite ( make_uchar4(cs.x, cs.y, cs.z, 1), gvdb->volOut[channel], atlasIdx.x*sizeof(uchar4), atlasIdx.y, atlasIdx.z ); } extern "C" __global__ void gvdbOpExpandC ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ uchar sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<uchar>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); uchar c = 0; c = (sharedVoxels[localIdx.x - 1][localIdx.y][localIdx.z] == (uchar)p1) ? 1 : c; c = (sharedVoxels[localIdx.x + 1][localIdx.y][localIdx.z] == (uchar)p1) ? 1 : c; c = (sharedVoxels[localIdx.x][localIdx.y - 1][localIdx.z] == (uchar)p1) ? 1 : c; c = (sharedVoxels[localIdx.x][localIdx.y + 1][localIdx.z] == (uchar)p1) ? 1 : c; c = (sharedVoxels[localIdx.x][localIdx.y][localIdx.z - 1] == (uchar)p1) ? 1 : c; c = (sharedVoxels[localIdx.x][localIdx.y][localIdx.z + 1] == (uchar)p1) ? 1 : c; uchar v = sharedVoxels[localIdx.x][localIdx.y][localIdx.z]; if ( v == 0 && c == 1 ) { c = static_cast<uchar>(p2); surf3Dwrite(c, gvdb->volOut[channel], atlasIdx.x * sizeof(uchar), atlasIdx.y, atlasIdx.z); } } extern "C" __global__ void gvdbOpNoise ( VDBInfo* gvdb, int3 atlasRes, uchar channel, float p1, float p2, float p3 ) { __shared__ float sharedVoxels[10][10][10]; GVDB_VOXPACKED LoadSharedMemory<float>(gvdb, channel, sharedVoxels, localIdx, atlasIdx); //-- noise float v = sharedVoxels[localIdx.x][localIdx.y][localIdx.z]; if (v > 0.01) v += random(make_float3(atlasIdx.x, atlasIdx.y, atlasIdx.z)) * p1; surf3Dwrite(v, gvdb->volOut[channel], atlasIdx.x * sizeof(float), atlasIdx.y, atlasIdx.z); }
the_stack
Compile: nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu To enable/disable different performance options add the flat -DPERFOPTSx Where x is a bitmask defining the options used (see below). Run: ./LSTM or ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> Example (run on an NVIDIA M40): > ./LSTM Running with default settings seqLength 100, numLayers 4, hiddenSize 512, miniBatch 64 i checksum (example 0) 5.113463E+04 h checksum (example 0) 2.048000E+03 c checksum (example 0) 2.058137E+05 i checksum 3.272639E+06 c checksum 1.317278E+07 h checksum 1.310720E+05 Runtime 27.807743ms */ #include <stdio.h> #include <cublas_v2.h> #include <curand.h> // Performance is not significantly different, but false saves memory. // False does not work with unfused pointwise ops. #define TRAINING (false) #ifndef PERFOPTS #define PERFOPTS (31) #endif #define GROUP_GEMM ((PERFOPTS & 1)) #define USE_STREAMS ((PERFOPTS & 2)) #define FUSE_PW ((PERFOPTS & 4)) #define PRE_TRANSPOSE ((PERFOPTS & 8)) #define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1)) // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } // Pointwise functions __global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] += bias[i % nBias]; } __global__ void pw_vecAdd(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] + b[i]; } __global__ void pw_vecMul(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] * b[i]; } __global__ void pw_tanh(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = tanh(a[i]); } __global__ void pw_sigmoid(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = sigmoidf(a[i]); } // Unfused LSTM (calling many pointwise kernels). int LSTM_elementwise_unfused( int hiddenSize, int miniBatch, float * __restrict__ tmp_h, float * __restrict__ tmp_i, float * __restrict__ bias, float * __restrict__ linearGates, float * __restrict__ h_data, float * __restrict__ i_data, float * __restrict__ c_in, float * __restrict__ c_out, bool training, cudaStream_t stream) { dim3 blockDim; dim3 gridDim; int numElements = hiddenSize * miniBatch; blockDim.x = 128; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; for (int i = 0; i < 4; i++) { if (tmp_h != NULL) { pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, tmp_i + i * numElements, tmp_h + i * numElements, numElements); cudaErrCheck(cudaGetLastError()); } pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize); cudaErrCheck(cudaGetLastError()); pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + (i + 4) * hiddenSize, numElements, hiddenSize); cudaErrCheck(cudaGetLastError()); if (training) { printf("LSTM_elementWise_unfused does not support training\n"); return 1; } } pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 0 * numElements, tmp_i + 0 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 1 * numElements, tmp_i + 1 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_tanh <<< gridDim, blockDim, 0, stream >>> (tmp_i + 2 * numElements, tmp_i + 2 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 3 * numElements, tmp_i + 3 * numElements, numElements); cudaErrCheck(cudaGetLastError()); float *in_gate = tmp_i + 0 * numElements; float *forget_gate = tmp_i + 1 * numElements; float *in_gate2 = tmp_i + 2 * numElements; float *out_gate = tmp_i + 3 * numElements; if (c_in == NULL) { pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements); cudaErrCheck(cudaGetLastError()); } else { pw_vecMul <<< gridDim, blockDim, 0, stream >>> (forget_gate, forget_gate, c_in, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements); cudaErrCheck(cudaGetLastError()); pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, forget_gate, numElements); cudaErrCheck(cudaGetLastError()); } if (c_out != NULL) { cudaErrCheck(cudaMemcpyAsync(c_out, in_gate, numElements * sizeof(float), cudaMemcpyDeviceToDevice, stream)); } pw_tanh <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (h_data, out_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (i_data, out_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); return 0; } // Fused forward kernel __global__ void elementWise_fp(int hiddenSize, int miniBatch, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *i_out, float *c_in, float *c_out, bool training) { int index = blockIdx.x * blockDim.x + threadIdx.x; int numElements = miniBatch * hiddenSize; if (index >= numElements) return; int batch = index / hiddenSize; int gateIndex = (index % hiddenSize) + 4 * batch * hiddenSize; float g[4]; for (int i = 0; i < 4; i++) { g[i] = tmp_i[i * hiddenSize + gateIndex] + tmp_h[i * hiddenSize + gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize]; if (training) linearGates[gateIndex + i * hiddenSize] = g[i]; } float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float in_gate2 = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float val = (forget_gate * c_in[index]) + (in_gate * in_gate2); c_out[index] = val; val = out_gate * tanhf(val); h_out[index] = val; i_out[index] = val; } float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, bool checkF) { float *h_data; float *i_data; float *c_data; float *T; float *T_f; float *bias; float *tmp_h; float *tmp_i; float *linearGates; cudaStream_t *stream_i; cudaStream_t *stream_h; cudaEvent_t **events_i; cudaEvent_t **events_h; // Need a cuBLAS handle. cublasHandle_t handle; cublasErrCheck(cublasCreate(&handle)); // Allocate streams/events stream_i = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t)); stream_h = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t)); // If we don't want to use streams we can launch everything in to the NULL stream for (int i = 0; i < numLayers; i++) { if (USE_STREAMS) { cudaErrCheck(cudaStreamCreate(&stream_i[i])); // Priority is empirical. cudaErrCheck(cudaStreamCreateWithPriority(&stream_h[i], 0, -1)); } else { stream_i[i] = NULL; stream_h[i] = NULL; } } events_i = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*)); events_h = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*)); for (int i = 0; i < numLayers; i++) { events_i[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t)); events_h[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t)); } // Input/output data int numElements = hiddenSize * miniBatch; cudaErrCheck(cudaMalloc((void**)&h_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&i_data, (seqLength) * (numLayers + 1) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&T, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&T_f, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&bias, numLayers * hiddenSize * 8 * sizeof(float))); // Workspace cudaErrCheck(cudaMalloc((void**)&tmp_h, 4 * numLayers * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_i, 4 * seqLength * numElements * sizeof(float))); // Activations if (TRAINING) { cudaErrCheck(cudaMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float))); } // Initialise with random values. curandGenerator_t rng; curandErrCheck(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT)); curandErrCheck(curandSetPseudoRandomGeneratorSeed(rng, 1337ull)); curandErrCheck(curandGenerateUniform(rng, h_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(curandGenerateUniform(rng, c_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(curandGenerateUniform(rng, i_data, (seqLength) * (numLayers + 1) * numElements)); curandErrCheck(curandGenerateUniform(rng, T, numLayers * hiddenSize * hiddenSize * 8)); curandErrCheck(curandGenerateUniform(rng, bias, numLayers * hiddenSize * 8)); curandErrCheck(curandDestroyGenerator(rng)); // Make sure everything is done before we start the timers cudaErrCheck(cudaDeviceSynchronize()); // Timing starts here float elapsedTime; cudaEvent_t start, stop; cudaErrCheck(cudaEventCreate(&start)); cudaErrCheck(cudaEventCreate(&stop)); cudaErrCheck(cudaEventRecord(start)); float alpha = 1.f; float beta = 0.f; const cublasOperation_t transa = (PRE_TRANSPOSE && (seqLength > 1)) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t transb = CUBLAS_OP_N; // Optimization 4 if (transa == CUBLAS_OP_N) { for (int layer = 0; layer < numLayers; layer++) { float *T_i_in = T + layer * hiddenSize * hiddenSize * 8; float *T_i_out = T_f + layer * hiddenSize * hiddenSize * 8; float *T_h_in = T + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; float *T_h_out = T_f + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; cublasErrCheck(cublasSetStream(handle, stream_i[layer])); cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize)); cublasErrCheck(cublasSetStream(handle, stream_h[layer])); cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_h_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_h_out, 4 * hiddenSize)); } } else { T_f = T; } if (transb != CUBLAS_OP_N) { printf("Only transb == CUBLAS_OP_N supported\n"); return -1; } int lStart = 0; int lEnd = 0; int rStart = 0; int rEnd = 0; int recurBatchSize = RECUR_BATCH_SIZE; while (true) { // Many layer "scheduling". if (lEnd == 0) { lStart = 0; lEnd = 1; rStart = 0; } else { // Move "up" and "left" lStart++; lEnd++; rStart -= recurBatchSize; // Over the top or off the left, reset to layer 0 if (lEnd > numLayers || rStart < 0) { rStart += (lStart + 1) * recurBatchSize; lStart = 0; lEnd = 1; } // Off the right, step up while (rStart >= seqLength && lEnd <= numLayers) { lStart++; lEnd++; rStart -= recurBatchSize; } // Over the top or off the left, done! if (lEnd > numLayers || rStart < 0) { break; } } rEnd = rStart + recurBatchSize; if (rEnd > seqLength) rEnd = seqLength; for (int layer = lStart; layer < lEnd; layer++) { cublasErrCheck(cublasSetStream(handle, stream_i[layer])); for (int i = rStart; i < rEnd; i++) { if (layer > 0) { cudaErrCheck(cudaStreamWaitEvent(stream_i[layer], events_h[layer - 1][i], 0)); cudaErrCheck(cudaEventDestroy(events_h[layer - 1][i])); } } // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(cublasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(cublasSgemm(handle, transa, transb, hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } for (int i = rStart; i < rEnd; i++) { cudaErrCheck(cudaEventCreate(&events_i[layer][i], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_i[layer][i], stream_i[layer])); } for (int i = rStart; i < rEnd; i++) { cublasErrCheck(cublasSetStream(handle, stream_h[layer])); // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(cublasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(cublasSgemm(handle, transa, transb, hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } cudaErrCheck(cudaStreamWaitEvent(stream_h[layer], events_i[layer][i], 0)); cudaErrCheck(cudaEventDestroy(events_i[layer][i])); // Optimization 3 if (FUSE_PW) { dim3 blockDim; dim3 gridDim; blockDim.x = 256; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; elementWise_fp <<< gridDim, blockDim , 0, stream_h[layer] >>> (hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING); cudaErrCheck(cudaGetLastError()); } else { LSTM_elementwise_unfused(hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING, stream_h[layer]); } if (layer != numLayers - 1) { cudaErrCheck(cudaEventCreate(&events_h[layer][i], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_h[layer][i], stream_h[layer])); } } } } cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start, stop)); cudaErrCheck(cudaDeviceSynchronize()); // We're done. Print some checksums if (checkF) { float* testOutputi; float* testOutputh; float* testOutputc; int numElements = hiddenSize * miniBatch; testOutputi = (float*)malloc(numElements * seqLength * sizeof(float)); testOutputh = (float*)malloc(numElements * numLayers * sizeof(float)); testOutputc = (float*)malloc(numElements * numLayers * sizeof(float)); cudaErrCheck(cudaMemcpy(testOutputi, i_data + numLayers * seqLength * numElements, seqLength * numElements * sizeof(float), cudaMemcpyDeviceToHost)); for (int layer = 0; layer < numLayers; layer++) { cudaErrCheck(cudaMemcpy(testOutputh + layer * numElements, h_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost)); cudaErrCheck(cudaMemcpy(testOutputc + layer * numElements, c_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } double checksumi = 0.; double checksumh = 0.; double checksumc = 0.; for (int m = 0; m < miniBatch; m++) { for (int j = 0; j < seqLength; j++) { for (int i = 0; i < hiddenSize; i++) { checksumi += testOutputi[j * numElements + m * hiddenSize + i]; if (hiddenSize <= 8) printf("i: (%d,%d): %E\n", j, i, testOutputi[j * numElements + m * hiddenSize + i]); } } for (int j = 0; j < numLayers; j++) { for (int i = 0; i < hiddenSize; i++) { checksumh += testOutputh[j * numElements + m * hiddenSize + i]; checksumc += testOutputc[j * numElements + m * hiddenSize + i]; } } if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi); if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh); if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc); } printf("i checksum %E ", checksumi); printf("c checksum %E ", checksumc); printf("h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } cudaErrCheck(cudaDeviceSynchronize()); cudaErrCheck(cudaFree(h_data)); cudaErrCheck(cudaFree(i_data)); cudaErrCheck(cudaFree(c_data)); if (T != T_f) cudaErrCheck(cudaFree(T)); cudaErrCheck(cudaFree(T_f)); cudaErrCheck(cudaFree(bias)); cudaErrCheck(cudaFree(tmp_h)); cudaErrCheck(cudaFree(tmp_i)); if (TRAINING) cudaErrCheck(cudaFree(linearGates)); for (int i = 0; i < numLayers; i++) { if (stream_i[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_i[i])); if (stream_h[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_h[i])); } free(stream_i); free(stream_h); for (int i = 0; i < numLayers; i++) { free(events_i[i]); free(events_h[i]); } free(events_i); free(events_h); return elapsedTime; } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int miniBatch; if (argc == 5) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); miniBatch = atoi(argv[4]); } else if (argc == 1) { printf("Running with default settings\n"); seqLength = 100; numLayers = 4; hiddenSize = 512; miniBatch = 64; } else { printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n"); return 1; } printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch); int numRuns = 1; float totalTime = 0.f; for (int run = 0; run < numRuns; run++) { totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true); } printf("Runtime %fms\n", totalTime / numRuns); return time < 0; }
the_stack
#include <iostream> #include <math.h> #include "LocalHistogramEqualization.h" using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 定义图像灰度值个数 #define HISTO_NUM 256 // Kernel 函数:_hisEqualHistoKer(并行实现所有窗口的直方图统计) // 根据分割数将图像分割成窗口后,对每个窗口进行直方图统计工作。 static __global__ void _hisEqualHistoKer( ImageCuda inimg, // 输入图像 int *his, // 各个窗口的直方图 int ww, // 窗口宽度 int wh, // 窗口长度 int blockperwinw, // 窗每个窗口横向对应的线程块数目 int blockperwinh // 窗每个窗口纵向对应的线程块数目 ); // Kernel 函数:_hisEqualKer(并行实现归一化直方图的计算) // 各个窗口的直方图统计完成后,对每个直方图进行归一化工作。 static __global__ void _hisEqualKer( int *his, // 各个窗口的直方图 float *norhis, // 各个窗口归一化直方图 int *max, // 各个窗口最大灰度 int *min, // 各个窗口最小灰度 int total // 窗口总像素点数 ); // Kernel 函数:_hisEqualLastKer(实现直方图均衡化操作) // 根据归一化直方图和原始图像,计算cumucounter和cumugray。 static __global__ void _hisEqualLastKer( ImageCuda inimg, // 输入图像 float *norhis, // 各个窗口归一化直方图 int *max, // 各个窗口最大灰度 int *min, // 各个窗口最小灰度 int ww, // 窗口宽度 int wh, // 窗口长度 int blockperwinw, // 窗每个窗口横向对应的线程块数目 int blockperwinh, // 窗每个窗口纵向对应的线程块数目 int *cumucounter, // 窗口重叠数 int *cumugray // 均衡化结果 ); // Kernel 函数:_hisEqualSecKer(实现将各窗口重新整合成输出图像) // 各个窗口直方图均衡化完成后,此函数将根据所有窗口的均衡化结果重新整合成输出 // 图像。 static __global__ void _hisEqualSecKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int *cumucounter, // 窗口重叠数量 int *cumugray, // 均衡化结果 unsigned char t0, // 外部参数 float c1, // 外部参数 float c2, // 外部参数 float weight // 外部参数 ); // Kernel 函数:_hisEqualHistoKer(并行实现所有窗口的直方图统计) static __global__ void _hisEqualHistoKer(ImageCuda inimg, int *his, int ww, int wh, int blockperwinw, int blockperwinh) { // 申请大小为灰度图像灰度级 256 的共享内存,其中下标代表图像的灰度值,数 // 组用来累加等于该灰度值的像素点个数。 __shared__ unsigned int temp[HISTO_NUM]; // 标记当前线程块对应的窗口的横纵索引以及总索引数 __shared__ int winnumx, winnumy, winidx; // dstc 和 dstr 分别表示线程坐标的 x 和 y 分量(其中,c 表示 // column,r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 计算该线程在块内的相对位置。 int inindex = threadIdx.y * blockDim.x + threadIdx.x; // 每块的第一个线程负责计算当前线程块对应的窗口的横纵索引以及总索引数。 if(inindex == 0) { // 当前线程块对应的窗口的横纵索引。 winnumx = blockIdx.x / blockperwinw ; winnumy = blockIdx.y / blockperwinh ; // 窗口索引 winidx = (gridDim.x / blockperwinw) * winnumy + winnumx; } // 若线程在块内的相对位置小于 256,即灰度级大小,则用来给共享内存赋初值 0。 if (inindex < HISTO_NUM) temp[inindex] = 0; // 进行块内同步,保证执行到此处,共享内存的数组中所有元素的值都为 0。 __syncthreads(); // 计算窗口内相对坐标。 int inidxc = dstc - winnumx * blockDim.x * blockperwinw; int inidxr = dstr - winnumy * blockDim.y * blockperwinh; // 计算线程对应于图像数据的坐标。 int inidx = (winnumx * ww / 2 + inidxc) + inimg.pitchBytes * (winnumy * wh / 2 + inidxr); // 输入坐标点对应的像素值。 int curgray ; // 检查像素点是否越界,如果不越界,则进行统计。 if(inidxc < ww && inidxr < wh){ curgray = inimg.imgMeta.imgData[inidx]; // 使用原子操作实现+1操作,可以防止多个线程同时更改数据而发生的写错误。 // 灰度值统计数组对应数目+1 atomicAdd(&temp[curgray], 1); } // 块内同步。此处保证图像中所有点的像素值都被统计过。 __syncthreads(); // 用每一个块内前 256 个线程,将共享内存 temp 中的结果保存到输出数组中。 // 每个窗口直方图对应his的一段 if (inindex < HISTO_NUM && temp[inindex] != 0) atomicAdd(&his[inindex + winidx * HISTO_NUM], temp[inindex]); } // Kernel 函数:_hisEqualKer(并行实现归一化直方图的计算) static __global__ void _hisEqualKer(int *his, float *norhis, int *max, int *min, int total) { // 计算当前线程块的索引,即对应的窗口索引。 int winindx = blockIdx.y * gridDim.x + blockIdx.x; // 计算当前线程在块内的索引。 int inindex = threadIdx.y * blockDim.x + threadIdx.x; // 计算当前线程块负责处理的直方图的起始下标 int inxstart = winindx * HISTO_NUM; // 统计各个灰度值出现的概率 // 起始下标加上块内索引即为对应直方图数组的下标。 norhis[inindex + inxstart] = 1.0 * his [inindex + inxstart] / total; // 线程同步,保证概率统计完毕 __syncthreads(); // 块内第一个线程计算累计归一化直方图, if(inindex == 0){ // 数组各个位置存在计算先后顺序关系,故串行实现。 // 起始下标加上索引i即为对应直方图数组的下标。 for (int i = 1;i < HISTO_NUM;i++){ norhis[i + inxstart] += norhis[i - 1 + inxstart] ; } } // 块内第2个线程查找最大最小灰度值max ,min // 由于对于有序短数组并行查找最值效率不高,故对于每个直方图分别直接串行计算 // 起始下标加上索引i即为对应直方图数组的下标。 if(inindex == 1){ // 第一个不为零的位置即为最小灰度值 for (int i = 0; i < HISTO_NUM; i++) { if(his[i + inxstart] != 0) { min[winindx] = i; break; } } // 最后一个不为零的位置即为最大灰度值 for (int i = HISTO_NUM - 1; i >= 0; i--) { if (his[i + inxstart] != 0) { max[winindx] = i; break; } } } } // Kernel 函数:_hisEqualLastKer(实现直方图均衡化操作) static __global__ void _hisEqualLastKer(ImageCuda inimg, float *norhis, int *max, int *min, int ww, int wh, int blockperwinw, int blockperwinh, int *cumucounter, int *cumugray ) { // 标记当前线程块对应的窗口的横纵索引以及总索引数 int winnumx, winnumy, winidx; // 计算当前线程块对应的窗口的横纵索引以及总索引数。 winnumx = blockIdx.x / blockperwinw ; winnumy = blockIdx.y / blockperwinh ; // 窗口索引 winidx = (gridDim.x / blockperwinw) * winnumy + winnumx; // dstc 和 dstr 分别表示线程坐标的 x 和 y 分量 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 计算窗口内相对坐标。 int inidxc = dstc - winnumx * blockDim.x * blockperwinw; int inidxr = dstr - winnumy * blockDim.y * blockperwinh; // 检查像素点是否越界,如果不越界,则进行统计。 if(inidxc < ww && inidxr < wh){ // 计算线程对应于图像数据的坐标。 int inidx = (winnumx * ww / 2 + inidxc) + inimg.pitchBytes * (winnumy * wh / 2 + inidxr); // 计算与图像同维度的数组( cumugray 和 cumucounter )的对应下标 。 int inidx2 = (winnumx * ww / 2 + inidxc) + inimg.imgMeta.width * (winnumy * wh / 2 + inidxr); // 线程对应的直方图的起始下标,即对应的窗口索引乘以256 int inxstart = winidx * HISTO_NUM; // 计算最大最小灰度值之差 int sub = max[winidx]- min[winidx]; // 将均衡化结果累加于 cumugray 数组,同时 cumucounter++ // 通过原子操作完成累加,避免内存写竞争引起的错误 atomicAdd(&cumugray[inidx2], norhis[inimg.imgMeta.imgData[inidx] + inxstart] * sub + min[winidx]); atomicAdd(&cumucounter[inidx2], 1); } } // Kernel 函数:_hisEqualSecKer(实现将各窗口计算结果重新整合成输出图像) static __global__ void _hisEqualSecKer(ImageCuda inimg, ImageCuda outimg, int *cumucounter, int *cumugray, unsigned char t0, float c1, float c2, float weight) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量(其中,c 表示 // column,r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 另一方面防止由于段错误导致程序崩溃。 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height ) return; // 获取当前像素点在图像中的相对位置。 int curpos = dstr * inimg.pitchBytes + dstc; // 获取重叠计数数组和灰度数组下标。 int curpos2 = dstr * inimg.imgMeta.width+ dstc; // 获得当前像素点的像素值。 unsigned char g = inimg.imgMeta.imgData[curpos]; // 对原图像的像素值进行处理。 float gray; if (g <= t0) { gray = 0.0f; } else if(g >= 250) { gray = 255.0f; } else { gray = c1 * (logf(g) - c2); } // 根据原图像和均衡化的结果计算输出图像。 outimg.imgMeta.imgData[curpos] = (unsigned char)((cumugray[curpos2] / cumucounter[curpos2] - gray) * weight + gray); } // 成员方法:localHistEqual(图像直方图均衡化) __host__ int LocalHistEqual::localHistEqual(Image *inimg, Image* outimg) { // 局部变量,错误码。 int errcode; cudaError_t cudaerrcode; // 检查输入图像,输出图像是否为空。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) { return errcode; } // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { return errcode; } // 提取输出图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } // 计算窗口的宽度和长度。 int ww = inimg->width / divNum; int wh = inimg->height / divNum; // 获得图像内像素点的总数量。 int totalnum = inimg->width * inimg->height; // 为核函数所需的 global 内存在 device 端开辟空间。 // 为均衡化结果和窗口重叠数量一次性申请所有空间,然后通过偏移索引 // 各个数组。 int *devdata; cudaerrcode = cudaMalloc((void **)&devdata, sizeof (int) * totalnum * 2); // 开辟失败,释放内存空间。 if (cudaerrcode != cudaSuccess) { cudaFree(devdata); return CUDA_ERROR; } int *devcumucounter = devdata; int *devcumugray = &devdata[totalnum]; // 初始化为 0 cudaerrcode = cudaMemset(devdata, 0, sizeof (int) * totalnum * 2); if (cudaerrcode != cudaSuccess) { cudaFree(devdata); return CUDA_ERROR; } // 图像窗口的总数 int winnum = (divNum * 2 - 1) * (divNum * 2 - 1); // 每个窗口的直方图和最大值最小值三个数组一次性申请所有空间, // 然后通过偏移索引各个数组。 int *his, *max, *min; cudaerrcode = cudaMalloc((void **)&his, sizeof (int) * winnum * (HISTO_NUM + 2)); // 开辟失败,释放内存空间。 if (cudaerrcode != cudaSuccess) { cudaFree(devdata); cudaFree(his); return CUDA_ERROR; } max = &his[winnum * HISTO_NUM]; min = &max[winnum]; // 初始化为0 cudaerrcode = cudaMemset(his, 0, sizeof (int) * winnum * (HISTO_NUM + 2)); if (cudaerrcode != cudaSuccess) { cudaFree(devdata); cudaFree(his); return CUDA_ERROR; } // 归一化直方图 float *norhis; cudaerrcode = cudaMalloc((void **)& norhis, sizeof(float) * winnum * HISTO_NUM ); // 开辟失败,释放内存空间。 if (cudaerrcode != cudaSuccess) { cudaFree(devdata); cudaFree(his); cudaFree(norhis); return CUDA_ERROR; } // 初始化为0 cudaerrcode = cudaMemset(norhis, 0.0f, sizeof (float) * winnum * HISTO_NUM); // 开辟失败,释放内存空间。 if (cudaerrcode != cudaSuccess) { cudaFree(devdata); cudaFree(his); cudaFree(norhis); return CUDA_ERROR; } // 根据外部参数 t0 计算 c1 和 c2 两个参数的值。 float c1 = 255 / log(1.0f * 250 / (t0 + 1)); float c2 = log((t0 + 1) * 1.0f); // 计算调用均衡化函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X ; blocksize.y = DEF_BLOCK_Y ; // 每个窗口横向线程块数目 int blockperwinw = (ww + blocksize.x - 1) / blocksize.x, // 每个窗口纵向线程块数目 blockperwinh = (wh + blocksize.y - 1) / blocksize.y; // 线程总规模 gridsize.x = blockperwinw * (divNum * 2 - 1) ; gridsize.y = blockperwinh * (divNum * 2 - 1) ; // 统计直方图 _hisEqualHistoKer<<<gridsize, blocksize>>>(insubimgCud, his, ww, wh, blockperwinw, blockperwinh ); if (cudaGetLastError() != cudaSuccess) { // 核函数出错。 cudaFree(his); cudaFree(norhis); cudaFree(devdata); return CUDA_ERROR; } // 窗口内总像素点数 int total = ww * wh; // 线程块数目即为窗口数目 dim3 gridsize2; gridsize2.x = divNum * 2 - 1; gridsize2.y = divNum * 2 - 1; // 计算归一化直方图和最大最小值 _hisEqualKer<<<gridsize2, blocksize>>>(his, norhis, max, min, total); if (cudaGetLastError() != cudaSuccess) { // 核函数出错。 cudaFree(his); cudaFree(norhis); cudaFree(devdata); return CUDA_ERROR; } // 计算各个窗口均衡化最终结果,gridsize与统计直方图时相同 // 由于窗口长、宽、每个窗口横纵向线程块数目等信息已经计算过, // 在核函数中直接使用会减少一定的计算量,所以作为参数传入 _hisEqualLastKer<<<gridsize, blocksize>>>(insubimgCud, norhis, max, min, ww, wh, blockperwinw, blockperwinh, devcumucounter, devcumugray ); if (cudaGetLastError() != cudaSuccess) { // 核函数出错。 cudaFree(his); cudaFree(norhis); cudaFree(devdata); return CUDA_ERROR; } // 计算调用整合 Kernel 函数的线程块的数量。 dim3 gridsize3; gridsize3.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize3.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用核函数,将各窗口重新整合成输出图像。 _hisEqualSecKer<<<gridsize3, blocksize>>>(insubimgCud, outsubimgCud, devcumucounter, devcumugray, t0, c1, c2, weight); if (cudaGetLastError() != cudaSuccess) { // 核函数出错。 cudaFree(his); cudaFree(norhis); cudaFree(devdata); return CUDA_ERROR; } // 释放申请空间,避免内存泄露 cudaFree(devdata); cudaFree(his); cudaFree(norhis); return NO_ERROR; }
the_stack
#include <gflags/gflags.h> #include <glog/logging.h> #include <cuda_profiler_api.h> #include "cuNVSM/model.h" #include "cuNVSM/hdf5.h" #include "cuNVSM/gradient_check.h" DEFINE_uint64(num_epochs, 100000, "Number of training iterations."); DEFINE_uint64(document_cutoff, 0, "Number of documents per epoch (default: all)."); DEFINE_string(document_list, "", "Path to document list (default: all)."); DEFINE_string(term_blacklist, "", "Path to term blacklist (default: none)."); DEFINE_uint64(word_repr_size, 4, "Dimensionality of word representations."); DEFINE_uint64(entity_repr_size, 4, "Dimensionality of entity representations."); DEFINE_uint64(batch_size, 1024, "Size of training batches."); DEFINE_uint64(window_size, 8, "Size of training word windows."); DEFINE_uint64(num_random_entities, 1, "Number of random negative examples sampled for each positive example."); DEFINE_uint64(seed, 0, "Pseudo-random number generator seed."); DEFINE_double(regularization_lambda, 0.01, "Regularization lambda."); DEFINE_double(learning_rate, 0.0, "Learning rate."); DEFINE_string(update_method, "", "Update method (sgd, adagrad, sparse_adam, dense_adam or full_adam)."); DEFINE_string(weighting, "auto", "Instance weighting strategy (auto, uniform or inv_doc_frequency)."); DEFINE_string(feature_weighting, "uniform", "Feature weighting strategy (uniform or self_information)."); DEFINE_bool(bias_negative_samples, false, "Introduces a bias towards negative samples. " "This is considered a bug in the CIKM model."); DEFINE_string(nonlinearity, "", "Nonlinearity (tanh or hard_tanh)."); DEFINE_bool(l2_phrase_normalization, false, "Enables l2 normalization of phrase representations."); DEFINE_bool(l2_entity_normalization, false, "Enables l2 normalization of entity representations."); DEFINE_bool(batch_normalization, false, "Enables batch normalization."); DEFINE_uint64(max_vocabulary_size, 60000, "Maximum vocabulary size."); DEFINE_uint64(min_document_frequency, 2, "Minimum document frequency of term in order to be retained by vocabulary filtering."); DEFINE_double(max_document_frequency, 0.5, "Maximum document frequency of term in order to be retained by vocabulary filtering. " "If smaller than 1.0, then max_document_frequency is interpreted as relative to the index size; " "otherwise, it is considered an absolute threshold."); DEFINE_bool(include_oov, false, "Whether to include a special-purpose OoV token for term positions with a filtered dictionary term."); DEFINE_bool(compute_initial_cost, false, "Compute the cost before any learning is performed."); DEFINE_bool(check_gradients, false, "Enable gradient checking. " "CAUTION: this will lead to insanely slow learning."); DEFINE_bool(no_shuffle, false, "Do not shuffle the training set."); DEFINE_bool(dump_initial_model, false, "Dump the model after random initialization, but before training."); DEFINE_int64(dump_every, 0, "Number of batches that should be processed " "before the model is dumped during a single epoch. " "The model is always dumped at the end of every epoch."); DEFINE_double(entity_similarity_weight, 0.0, "Mixture weight of the entity-entity objective."); DEFINE_double(term_similarity_weight, 0.0, "Mixture weight of the term-term objective."); DEFINE_string(output, "", "Path to output model."); template <typename BatchT> class BatchHandler { public: static BatchT* create(const lse::TrainConfig& train_config) { return new BatchT(train_config); } static size_t num_instances(const BatchT& batch) { return batch.num_instances(); } static void clear(BatchT& batch) { return batch.clear(); } }; template <typename FirstT, typename SecondT> class BatchHandler<std::tuple<FirstT, SecondT>> { public: static std::tuple<FirstT, SecondT>* create(const lse::TrainConfig& train_config) { return new std::tuple<FirstT, SecondT>( FirstT(train_config), SecondT(train_config)); } static size_t num_instances(const std::tuple<FirstT, SecondT>& batch) { return min(std::get<0>(batch).num_instances(), std::get<1>(batch).num_instances()); } static void clear(std::tuple<FirstT, SecondT>& batch) { std::get<0>(batch).clear(); std::get<1>(batch).clear(); } }; void exception_handler() { void *trace_elems[20]; int trace_elem_count(backtrace(trace_elems, 20)); char **stack_syms(backtrace_symbols(trace_elems, trace_elem_count)); for (int i = 0; i < trace_elem_count; ++i) { LOG(ERROR) << stack_syms[i]; } free(stack_syms); try { throw; } catch (const thrust::system::system_error& e) { LOG(ERROR) << "Thurst system_error: " << e.what(); } catch (const std::exception& e) { LOG(ERROR) << "Exception: " << typeid(e).name() << ": " << e.what(); } catch (...) { LOG(ERROR) << "Unknown exception"; } exit(1); } const std::map<std::string, TextEntity::WeightingStrategy> WEIGHTING_STRATEGIES { {"auto", TextEntity::AUTOMATIC_WEIGHTING}, {"uniform", TextEntity::UNIFORM}, {"inv_doc_frequency", TextEntity::INV_DOC_FREQUENCY}, }; const std::map<std::string, TextEntity::TermWeightingStrategy> FEATURE_WEIGHTING_STRATEGIES { {"uniform", TextEntity::UNIFORM_TERM_WEIGHTING}, {"self_information", TextEntity::SELF_INFORMATION_TERM_WEIGHTING}, }; template <typename T> T* read_strings(const std::string& path) { std::ifstream file(path); T* const strings = new T; std::string str; while (std::getline(file, str)) { if (!str.empty()) { insert_or_die(str, strings); } } return strings; } std::vector<std::string>* construct_document_list() { if (!FLAGS_document_list.empty()) { LOG(INFO) << "Reading document list from " << FLAGS_document_list << "."; return read_strings<std::vector<std::string>>(FLAGS_document_list); } else { return nullptr; } } TextEntity::IndriSource::TermBlacklist* construct_term_blacklist() { if (!FLAGS_term_blacklist.empty()) { LOG(INFO) << "Reading term blacklist from " << FLAGS_term_blacklist << "."; return read_strings<TextEntity::IndriSource::TermBlacklist>(FLAGS_term_blacklist); } else { return nullptr; } } TextEntity::IndriSource* construct_indri_source( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng) { std::unique_ptr<std::vector<std::string>> document_list( construct_document_list()); std::unique_ptr<TextEntity::IndriSource::TermBlacklist> term_blacklist( construct_term_blacklist()); return new TextEntity::IndriSource( data_config.repository_path(), train_config.window_size(), rng, data_config.max_vocabulary_size(), data_config.min_document_frequency(), data_config.max_document_frequency(), FLAGS_document_cutoff, data_config.include_oov(), false, /* include_digits */ document_list.get(), term_blacklist.get(), !train_config.no_shuffle(), /* shuffle */ TextEntity::AUTOMATIC_SAMPLING, /* sampling_method */ WEIGHTING_STRATEGIES.at(FLAGS_weighting), /* weighting_strategy */ FEATURE_WEIGHTING_STRATEGIES.at(FLAGS_feature_weighting) /* feature_weighting_strategy */); } template <typename BatchT> DataSource<BatchT>* wrap_source_async(const lse::TrainConfig& train_config, DataSource<BatchT>* const source) { return new AsyncSource<BatchT>( 10, /* num_concurrent_batches */ train_config.batch_size(), train_config.window_size(), source); } template <typename ObjectiveT> DataSource<typename ObjectiveT::BatchType>* construct_data_source( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map = nullptr); template <> DataSource<typename TextEntity::Objective::BatchType>* construct_data_source<TextEntity::Objective>( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map) { CHECK(!data_config.repository_path().empty()); return wrap_source_async(train_config, construct_indri_source(data_config, train_config, rng)); } template <> DataSource<typename EntityEntity::Objective::BatchType>* construct_data_source<EntityEntity::Objective>( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map) { CHECK_NE(identifiers_map, (IdentifiersMapT*) nullptr); DataSource<typename EntityEntity::Objective::BatchType>* entity_entity_source; entity_entity_source = new EntityEntity::DataSource( data_config.similarity_path(), *identifiers_map, rng); return new RepeatingSource<EntityEntity::Batch>( -1, /* num_repeats */ entity_entity_source); } template <> DataSource<typename TermTerm::Objective::BatchType>* construct_data_source<TermTerm::Objective>( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map) { CHECK_NE(identifiers_map, (IdentifiersMapT*) nullptr); DataSource<typename TermTerm::Objective::BatchType>* term_term_source = new TermTerm::DataSource( data_config.similarity_path(), *identifiers_map, rng); return new RepeatingSource<TermTerm::Batch>( -1, /* num_repeats */ term_term_source); } template <> DataSource<typename TextEntityEntityEntity::Objective::BatchType>* construct_data_source<TextEntityEntityEntity::Objective>( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map_unused) { CHECK(!data_config.repository_path().empty()); CHECK(!data_config.similarity_path().empty()); auto indri_source = construct_indri_source(data_config, train_config, rng); std::unique_ptr<IdentifiersMapT> identifiers_map( indri_source->build_document_identifiers_map()); DataSource<TextEntity::Batch>* text_entity_source = wrap_source_async(train_config, indri_source); DataSource<RepresentationSimilarity::Batch>* entity_entity_source = construct_data_source<EntityEntity::Objective>( data_config, train_config, rng, identifiers_map.get()); return new MultiSource<TextEntity::Batch, EntityEntity::Batch>( std::make_tuple(text_entity_source, entity_entity_source)); } template <> DataSource<typename TextEntityTermTerm::Objective::BatchType>* construct_data_source<TextEntityTermTerm::Objective>( const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* const rng, const IdentifiersMapT* const identifiers_map_unused) { CHECK(!data_config.repository_path().empty()); CHECK(!data_config.similarity_path().empty()); auto indri_source = construct_indri_source(data_config, train_config, rng); std::unique_ptr<std::map<std::string, WordIdxType>> identifiers_map( indri_source->build_term_identifiers_map()); DataSource<TextEntity::Batch>* text_entity_source = wrap_source_async(train_config, indri_source); DataSource<TermTerm::Batch>* term_term_source = construct_data_source<TermTerm::Objective>( data_config, train_config, rng, identifiers_map.get()); return new MultiSource<TextEntity::Batch, TermTerm::Batch>( std::make_tuple(text_entity_source, term_term_source)); } template <typename ObjectiveT> class DumpModelFn { public: explicit DumpModelFn(const size_t epoch, const Model<ObjectiveT>* const model) : epoch_(epoch), model_(model) { CHECK_GE(epoch_, 0); } void operator()(const std::string& identifier) const { if (!FLAGS_output.empty()) { std::stringstream ss; ss << FLAGS_output << "_" << epoch_; if (!identifier.empty()) { ss << "_" << identifier; } ss << ".hdf5"; const std::string filename = ss.str(); write_to_hdf5(*model_, filename); LOG(INFO) << "Saved model to " << filename << "."; } } private: const size_t epoch_; const Model<ObjectiveT>* const model_; }; template <typename ObjectiveT> std::pair<size_t, DefaultModel::FloatT> iterate_data(const lse::TrainConfig& train_config, const bool backpropagate, Model<ObjectiveT>* const model, DataSource<typename ObjectiveT::BatchType>* const data_source, typename ObjectiveT::BatchType* const batch, RNG* const rng, const DumpModelFn<ObjectiveT>* const dump_model_fn = nullptr) { size_t epoch_num_batches = 0; DefaultModel::FloatT agg_cost = 0.0; const std::chrono::time_point<std::chrono::steady_clock> iteration_start = std::chrono::steady_clock::now(); while (data_source->has_next()) { const std::chrono::time_point<std::chrono::steady_clock> batch_start = std::chrono::steady_clock::now(); BatchHandler<typename ObjectiveT::BatchType>::clear(*batch); nvtxRangePush("Batch"); nvtxRangePush("FetchData"); data_source->next(batch); nvtxRangePop(); const int64 max_threads_per_block = Runtime<FLOATING_POINT_TYPE>::getInstance()->props().maxThreadsPerBlock; if (BatchHandler<typename ObjectiveT::BatchType>::num_instances(*batch) % max_threads_per_block != 0) { LOG(ERROR) << "Skipping Batch #" << epoch_num_batches << " as it is not a multiple of " << max_threads_per_block << " " << "(" << BatchHandler<typename ObjectiveT::BatchType>::num_instances(*batch) << " instances)."; } else { // Save RNG state at beginning of epoch. std::stringstream rng_state; rng_state << *rng; nvtxRangePush("ComputeCost"); std::unique_ptr<typename ObjectiveT::ForwardResultType> result( model->compute_cost(*batch, rng)); nvtxRangePop(); nvtxRangePush("ComputeGradients"); std::unique_ptr<typename ObjectiveT::GradientsType> gradients( model->compute_gradients(*result)); nvtxRangePop(); if (FLAGS_check_gradients) { CHECK(GradientCheckFn<Model<ObjectiveT>>()( model, *batch, *result, *gradients, 1e-4 /* epsilon */, 1e-1 /* relative_error_threshold */, rng_state, rng)) << "Gradient check failed."; } if (backpropagate) { nvtxRangePush("UpdateParameters"); model->update(*gradients, train_config.learning_rate(), result->scaled_regularization_lambda()); nvtxRangePop(); } std::chrono::duration<float64> epoch_diff = std::chrono::steady_clock::now() - iteration_start; const float64 epoch_duration_until_now = epoch_diff.count(); std::chrono::duration<float64> batch_diff = std::chrono::steady_clock::now() - batch_start; const float64 batch_duration_until_now = batch_diff.count(); const float64 progress = data_source->progress(); const float64 seconds_remaining = ( (1.0 - progress) * (epoch_duration_until_now / progress)); agg_cost += result->get_cost(); VLOG(1) << "Batch #" << epoch_num_batches << " (" << std::setprecision(8) << progress * 100.0 << "%; " << seconds_to_humanreadable_time(seconds_remaining) << " remaining" << "): " << "cost=" << result->get_cost() << ", " << "duration=" << batch_duration_until_now; } if (dump_model_fn != nullptr && FLAGS_dump_every > 0 && epoch_num_batches > 0 && epoch_num_batches % FLAGS_dump_every == 0) { (*dump_model_fn)(std::to_string(epoch_num_batches)); } ++ epoch_num_batches; nvtxRangePop(); } CHECK_GT(epoch_num_batches, 0) << "No batches to train during epoch"; return std::make_pair(epoch_num_batches, agg_cost); } template <typename T> T ParseProto(const std::string& msg) { T proto; CHECK(google::protobuf::TextFormat::ParseFromString(msg, &proto)); return proto; } const std::map<std::string, UpdateMethodConf> UPDATE_METHODS { {"sgd", ParseProto<UpdateMethodConf>("type: SGD")}, {"adagrad", ParseProto<UpdateMethodConf>("type: ADAGRAD")}, {"sparse_adam", ParseProto<UpdateMethodConf>("type: ADAM adam_conf: < mode: SPARSE >")}, {"dense_adam", ParseProto<UpdateMethodConf>("type: ADAM adam_conf: < mode: DENSE_UPDATE >")}, {"full_adam", ParseProto<UpdateMethodConf>("type: ADAM adam_conf: < mode: DENSE_UPDATE_DENSE_VARIANCE >")}, }; const std::map<std::string, lse::ModelDesc::TransformDesc::Nonlinearity> NONLINEARITIES { {"tanh", lse::ModelDesc::TransformDesc::TANH}, {"hard_tanh", lse::ModelDesc::TransformDesc::HARD_TANH}, }; template <typename ObjectiveT> void train(const lse::ModelDesc& model_desc, const lse::DataConfig& data_config, const lse::TrainConfig& train_config, RNG* rng) { std::unique_ptr<DataSource<typename ObjectiveT::BatchType>> data_source( construct_data_source<ObjectiveT>( data_config, train_config, rng)); // Extract meta data through a generic interface. lse::Metadata meta; data_source->extract_metadata(&meta); const size_t vocabulary_size = meta.term_size(); const size_t corpus_size = meta.object_size(); CHECK_GT(vocabulary_size, 0); CHECK_GT(corpus_size, 0); LOG(INFO) << "Training statistics: " << "vocabulary size=" << vocabulary_size << ", " << "corpus size=" << corpus_size; Model<ObjectiveT> model(vocabulary_size, corpus_size, model_desc, train_config); model.initialize(rng); cudaDeviceSynchronize(); LOG(INFO) << "Initialized cuNVSM with " << model.num_parameters() << " parameters " << "for training on " << vocabulary_size << " words and " << corpus_size << " objects."; if (!FLAGS_output.empty()) { std::stringstream ss; ss << FLAGS_output << "_meta"; const std::string filename = ss.str(); std::ofstream meta_file; meta_file.open(filename); meta.SerializeToOstream(&meta_file); } std::unique_ptr<typename ObjectiveT::BatchType> batch( BatchHandler<typename ObjectiveT::BatchType>::create(train_config)); std::vector<DefaultModel::FloatT> epoch_costs; if (FLAGS_compute_initial_cost) { size_t epoch_num_batches; DefaultModel::FloatT agg_cost; std::tie(epoch_num_batches, agg_cost) = iterate_data<ObjectiveT>( train_config, false, /* backpropagate */ &model, data_source.get(), batch.get(), rng); data_source->reset(); const DefaultModel::FloatT initial_cost = agg_cost / epoch_num_batches; epoch_costs.push_back(initial_cost); LOG(INFO) << "Epoch #0 (initial): cost=" << epoch_costs; } if (FLAGS_dump_initial_model) { DumpModelFn<ObjectiveT> dump_model_fn(0, &model); // Dump model. dump_model_fn("" /* empty identifier */); } const std::chrono::time_point<std::chrono::steady_clock> start = std::chrono::steady_clock::now(); size_t num_batches = 0; for (size_t epoch = 1; epoch <= train_config.num_epochs(); ++epoch) { const std::chrono::time_point<std::chrono::steady_clock> epoch_start = std::chrono::steady_clock::now(); DumpModelFn<ObjectiveT> dump_model_fn(epoch, &model); nvtxRangePush("Epoch"); size_t epoch_num_batches; DefaultModel::FloatT agg_cost; std::tie(epoch_num_batches, agg_cost) = iterate_data<ObjectiveT>( train_config, true, /* backpropagate */ &model, data_source.get(), batch.get(), rng, &dump_model_fn); num_batches += epoch_num_batches; const std::chrono::duration<float64> epoch_duration = std::chrono::steady_clock::now() - epoch_start; const std::chrono::duration<float64> total_duration = std::chrono::steady_clock::now() - start; const float64 batches_per_second = num_batches / total_duration.count(); const DefaultModel::FloatT epoch_cost = agg_cost / epoch_num_batches; epoch_costs.push_back(epoch_cost); LOG(INFO) << "Epoch #" << epoch << ": " << "duration=" << seconds_to_humanreadable_time(epoch_duration.count()) << " " << "(" << batches_per_second << " batches/second) " << "cost=" << epoch_costs; // Dump model. dump_model_fn("" /* empty identifier */); data_source->reset(); nvtxRangePop(); } } int main(int argc, char* argv[]) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); std::set_terminate(exception_handler); CHECK_GE(argc, 2) << "Usage: " << argv[0] << " [OPTIONS] <path to Indri index>"; CHECK(contains_key(UPDATE_METHODS, FLAGS_update_method)) << "Please specify a valid --update_method."; CHECK(contains_key(WEIGHTING_STRATEGIES, FLAGS_weighting)) << "Please specify a valid --weighting."; // Model. lse::ModelDesc model_desc; model_desc.set_word_repr_size(FLAGS_word_repr_size); model_desc.set_entity_repr_size(FLAGS_entity_repr_size); model_desc.mutable_transform_desc()->set_batch_normalization(FLAGS_batch_normalization); model_desc.mutable_transform_desc()->set_nonlinearity(NONLINEARITIES.at(FLAGS_nonlinearity)); model_desc.set_clip_sigmoid(true); model_desc.set_bias_negative_samples(FLAGS_bias_negative_samples); model_desc.set_l2_normalize_phrase_reprs(FLAGS_l2_phrase_normalization); model_desc.set_l2_normalize_entity_reprs(FLAGS_l2_entity_normalization); // Data. lse::DataConfig data_config; const std::string repository_path = argv[1]; data_config.set_repository_path(repository_path); if (argc >= 3) { const std::string similarity_path = argv[2]; data_config.set_similarity_path(similarity_path); } data_config.set_max_vocabulary_size(FLAGS_max_vocabulary_size); data_config.set_min_document_frequency(FLAGS_min_document_frequency); uint64 max_document_frequency = 0; if (FLAGS_max_document_frequency <= 1.0) { max_document_frequency = static_cast<uint64>( ceil(indri::GetDocumentCount(repository_path) * FLAGS_max_document_frequency)); LOG(INFO) << "Setting max_document_frequency to " << max_document_frequency << "."; } else { max_document_frequency = static_cast<uint64>(FLAGS_max_document_frequency); } data_config.set_max_document_frequency(max_document_frequency); data_config.set_include_oov(FLAGS_include_oov); CHECK_GT(data_config.max_vocabulary_size(), 0); CHECK_GE(data_config.max_document_frequency(), 0); // Training. lse::TrainConfig train_config; train_config.set_num_epochs(FLAGS_num_epochs); train_config.set_batch_size(FLAGS_batch_size); train_config.set_window_size(FLAGS_window_size); train_config.set_num_random_entities(FLAGS_num_random_entities); train_config.set_regularization_lambda(FLAGS_regularization_lambda); train_config.set_learning_rate(FLAGS_learning_rate); train_config.mutable_update_method()->CopyFrom(UPDATE_METHODS.at(FLAGS_update_method)); train_config.set_no_shuffle(FLAGS_no_shuffle); CHECK_GE(FLAGS_entity_similarity_weight, 0.0); CHECK_LE(FLAGS_entity_similarity_weight, 1.0); CHECK_GE(FLAGS_term_similarity_weight, 0.0); CHECK_LE(FLAGS_term_similarity_weight, 1.0); train_config.set_text_entity_weight(1.0 - FLAGS_entity_similarity_weight - FLAGS_term_similarity_weight); train_config.set_entity_entity_weight(FLAGS_entity_similarity_weight); train_config.set_term_term_weight(FLAGS_term_similarity_weight); CHECK_GT(FLAGS_seed, 0) << "Please specify a --seed value."; if (train_config.learning_rate() == 0.0) { switch (train_config.update_method().type()) { default: case SGD: case ADAGRAD: train_config.set_learning_rate(0.01); break; case ADAM: train_config.set_learning_rate(0.001); break; } } LOG(INFO) << "Model descriptor: " << model_desc; LOG(INFO) << "Data configuration: " << data_config; LOG(INFO) << "Training configuration: " << train_config; LOG(INFO) << SHOW_DEFINE(FLOATING_POINT_TYPE); RNG rng; rng.seed(FLAGS_seed); if (train_config.entity_entity_weight() != 0.0) { CHECK(!data_config.similarity_path().empty()); CHECK_EQ(train_config.term_term_weight(), 0.0); train<TextEntityEntityEntity::Objective>( model_desc, data_config, train_config, &rng); } else if (train_config.term_term_weight() != 0.0) { CHECK(!data_config.similarity_path().empty()); CHECK_EQ(train_config.entity_entity_weight(), 0.0); train<TextEntityTermTerm::Objective>( model_desc, data_config, train_config, &rng); } else { train<TextEntity::Objective>( model_desc, data_config, train_config, &rng); } // Synchronize GPU. cudaDeviceSynchronize(); // Notify profiler that we are done. cudaProfilerStop(); DLOG(INFO) << "Finished."; return 0; }
the_stack
#include "lbann/operators/math/unary.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common.cuh" namespace lbann { namespace { // ========================================================= // Operator objects for entry-wise unary layers // ========================================================= // Note: Unary operator corresponds to forward prop step // (\f$ y = f(x) \f$) and binary operator corresponds to // back prop step // (\f$ \frac{dL}{dx} = \frac{dL}{dy} f'(x) \f$). /** Logical not operator. */ template <typename DataT> struct LogicalNotOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& b = x != DataT(0.0) && !gpu_lib::isnan(x); return !b ? DataT(1.0) : DataT(0.0); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(0.0); } }; /** Negative operator. */ template <typename DataT> struct NegativeOpImpl { inline __device__ DataT operator()(DataT const& x) const { return -x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy; } }; /** Sign operator. */ template <typename DataT> struct SignOpImpl { inline __device__ DataT operator()(DataT const& x) const { DataT const zero = 0.; DataT const one = 1.; if (x > zero) { return one; } else if (x < zero) { return -one; } else { return zero; } } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(0.0); } }; /** Round operator. */ template <typename DataT> struct RoundOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::round(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(0.0); } }; /** Ceiling operator. */ template <typename DataT> struct CeilOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::ceil(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(0.0); } }; /** Floor operator. */ template <typename DataT> struct FloorOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::floor(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(0.0); } }; /** Reciprocal operator. */ template <typename DataT> struct ReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { return DataT(1.) / x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { if (dy == DataT(0.0)) { return DataT(0.0); } else { return - dy / (x*x); } } }; /** Square operator. */ template <typename DataT> struct SquareOpImpl { inline __device__ DataT operator()(DataT const& x) const { return x*x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(2.) * x * dy; } }; /** Square root operator. */ template <typename DataT> struct SqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(2.) * gpu_lib::sqrt(x)); } }; /** Reciprocal square root operator. */ template <typename DataT> struct RsqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::rsqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& s = gpu_lib::sqrt(x); return - dy / (DataT(2.) * x * s); } }; /** Safe reciprocal operator. * If a standard reciprocal produces an infinity or NaN, zero is * output instead. */ template <typename DataT> struct SafeReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return y; } else { return DataT(0.0); } } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return - dy * y*y; } else { return DataT(0.0); } } }; /** Exponential operator. */ template <typename DataT> struct ExpOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::exp(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Exponential minus one operator. */ template <typename DataT> struct Expm1OpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::expm1(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Natural logarithm operator. */ template <typename DataT> struct LogOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / x; } }; /** Natural logarithm one plus operator. */ template <typename DataT> struct Log1pOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log1p(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (x + DataT(1.0)); } }; /** Cosine operator. */ template <typename DataT> struct CosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy * gpu_lib::sin(x); } }; /** Sine operator. */ template <typename DataT> struct SinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cos(x); } }; /** Tangent operator. */ template <typename DataT> struct TanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cos(x); return dy / (c*c); } }; /** Arccosine operator. */ template <typename DataT> struct AcosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / gpu_lib::sqrt(DataT(1.0) - x*x); } }; /** Arcsine operator. */ template <typename DataT> struct AsinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) - x*x); } }; /** Arctangent operator. */ template <typename DataT> struct AtanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) + x*x); } }; /** Hyperbolic cosine operator. */ template <typename DataT> struct CoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::sinh(x); } }; /** Hyperbolic sine operator. */ template <typename DataT> struct SinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cosh(x); } }; /** Hyperbolic tangent operator. */ template <typename DataT> struct TanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cosh(x); return dy / (c*c); } }; /** Hyperbolic arccosine operator. */ template <typename DataT> struct AcoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / (gpu_lib::sqrt(x - DataT(1.0)) * gpu_lib::sqrt(x + DataT(1.0))); } }; /** Hyperbolic arcsine operator. */ template <typename DataT> struct AsinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) + x*x); } }; /** Hyperbolic arctangent operator. */ template <typename DataT> struct AtanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) - x*x); } }; /** Error function operator. */ template <typename DataT> struct ErfOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erf(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const two_rsqrt_pi(1.12837916709551257389); return dy * two_rsqrt_pi * gpu_lib::exp(-x*x); } }; /** Inverse error function operator. */ template <typename DataT> struct ErfInvOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erfinv(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const half_sqrt_pi(0.88622692545275801364); auto const& y = gpu_lib::erfinv(x); return dy * half_sqrt_pi * gpu_lib::exp(y*y); } }; } // namespace // Template instantiation #define DEFINE_COMPUTE_OPS(OP_NAME) \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::fp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<LocalOutputTensorType> outputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(outputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto& output = outputs.front().data(); \ El::EntrywiseMap(input, \ output, \ OP_NAME##OpImpl<DataT>{}); \ } \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::bp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<ConstLocalOutputTensorType> grads_wrt_outputs, \ std::vector<LocalInputTensorType> grads_wrt_inputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_outputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_inputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto const& grad_wrt_output = grads_wrt_outputs.front().data(); \ auto& grad_wrt_input = grads_wrt_inputs.front().data(); \ internal::EntrywiseZipInto(input, \ grad_wrt_output, \ grad_wrt_input, \ OP_NAME##OpImpl<DataT>{}); \ } DEFINE_COMPUTE_OPS(Acos) DEFINE_COMPUTE_OPS(Acosh) DEFINE_COMPUTE_OPS(Asin) DEFINE_COMPUTE_OPS(Asinh) DEFINE_COMPUTE_OPS(Atan) DEFINE_COMPUTE_OPS(Atanh) DEFINE_COMPUTE_OPS(Ceil) DEFINE_COMPUTE_OPS(Cos) DEFINE_COMPUTE_OPS(Cosh) DEFINE_COMPUTE_OPS(Erf) DEFINE_COMPUTE_OPS(ErfInv) DEFINE_COMPUTE_OPS(Exp) DEFINE_COMPUTE_OPS(Expm1) DEFINE_COMPUTE_OPS(Floor) DEFINE_COMPUTE_OPS(Log) DEFINE_COMPUTE_OPS(Log1p) DEFINE_COMPUTE_OPS(LogicalNot) DEFINE_COMPUTE_OPS(Negative) DEFINE_COMPUTE_OPS(Reciprocal) DEFINE_COMPUTE_OPS(Round) DEFINE_COMPUTE_OPS(Rsqrt) DEFINE_COMPUTE_OPS(SafeReciprocal) DEFINE_COMPUTE_OPS(Sign) DEFINE_COMPUTE_OPS(Sin) DEFINE_COMPUTE_OPS(Sinh) DEFINE_COMPUTE_OPS(Sqrt) DEFINE_COMPUTE_OPS(Square) DEFINE_COMPUTE_OPS(Tan) DEFINE_COMPUTE_OPS(Tanh) #define PROTO(T) \ template class AcosOperator<T, El::Device::GPU>; \ template class AcoshOperator<T, El::Device::GPU>; \ template class AsinOperator<T, El::Device::GPU>; \ template class AsinhOperator<T, El::Device::GPU>; \ template class AtanOperator<T, El::Device::GPU>; \ template class AtanhOperator<T, El::Device::GPU>; \ template class CeilOperator<T, El::Device::GPU>; \ template class CosOperator<T, El::Device::GPU>; \ template class CoshOperator<T, El::Device::GPU>; \ template class ErfInvOperator<T, El::Device::GPU>; \ template class ErfOperator<T, El::Device::GPU>; \ template class ExpOperator<T, El::Device::GPU>; \ template class Expm1Operator<T, El::Device::GPU>; \ template class FloorOperator<T, El::Device::GPU>; \ template class Log1pOperator<T, El::Device::GPU>; \ template class LogOperator<T, El::Device::GPU>; \ template class LogicalNotOperator<T, El::Device::GPU>; \ template class NegativeOperator<T, El::Device::GPU>; \ template class ReciprocalOperator<T, El::Device::GPU>; \ template class RoundOperator<T, El::Device::GPU>; \ template class RsqrtOperator<T, El::Device::GPU>; \ template class SafeReciprocalOperator<T, El::Device::GPU>; \ template class SignOperator<T, El::Device::GPU>; \ template class SinOperator<T, El::Device::GPU>; \ template class SinhOperator<T, El::Device::GPU>; \ template class SqrtOperator<T, El::Device::GPU>; \ template class SquareOperator<T, El::Device::GPU>; \ template class TanOperator<T, El::Device::GPU>; \ template class TanhOperator<T, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
the_stack
namespace arboretum { namespace core { using thrust::device_vector; using thrust::host_vector; template <typename SUM_T, typename NODE_VALUE_T> __global__ void gain_kernel( const SUM_T *const __restrict__ left_sum, const NODE_VALUE_T *const __restrict__ segments_fvalues, const unsigned span, const SUM_T *const __restrict__ parent_sum_iter, const unsigned int *const __restrict__ parent_count_iter, const size_t n, const GainFunctionParameters parameters, my_atomics *res) { for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { if (i == 0) continue; const unsigned int fvalue = segments_fvalues[i]; const unsigned int fvalue_prev = segments_fvalues[i - 1]; if (fvalue != fvalue_prev) { // TODO: Binary search? unsigned segment = 0; while (i >= parent_count_iter[segment + 1]) { segment++; } if (i == parent_count_iter[segment + 1]) continue; const SUM_T left_sum_offset = parent_sum_iter[segment]; const SUM_T left_sum_value = left_sum[i] - left_sum_offset; const size_t left_count_offset = parent_count_iter[segment]; const size_t left_count_value = i - left_count_offset; const SUM_T total_sum = parent_sum_iter[segment + 1] - left_sum_offset; const size_t total_count = parent_count_iter[segment + 1] - left_count_offset; const float gain = gain_func(left_sum_value, total_sum, left_count_value, total_count, parameters); if (gain > 0.0) { updateAtomicMax(&(res[segment].ulong), gain, i); } } } } template <typename NODE_T, typename SUM_T, typename BIN_T> __global__ void filter_apply_candidates( my_atomics *gain_feature, SUM_T *sum, unsigned *split, unsigned *count, unsigned *node_size_prefix_sum_next, SUM_T *node_sum_prefix_sum_next, const my_atomics *candidates, const SUM_T *split_sum, const BIN_T *fvalue, const BIN_T *fvalue_sorted, NODE_T *row2Node, const unsigned *node_size_prefix_sum, const SUM_T *node_sum_prefix_sum, const int feature, const unsigned level, const unsigned n) { // TODO: get rid of dynamic parallelism for (unsigned i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { const unsigned node_start = node_size_prefix_sum[i]; const unsigned node_end = node_size_prefix_sum[i + 1]; const unsigned node_size = node_end - node_start; const float gain_ = candidates[i].floats[0]; const unsigned idx = candidates[i].ints[1]; const SUM_T node_start_sum = node_sum_prefix_sum[i]; const SUM_T node_end_sum = node_sum_prefix_sum[i + 1]; if (node_size > 0) { my_atomics current_gain_feature = gain_feature[i]; if (current_gain_feature.Gain() < gain_ || (current_gain_feature.Gain() == gain_ && feature < current_gain_feature.Feature())) { const SUM_T split_sum_value = split_sum[idx]; my_atomics val; val.floats[0] = gain_; val.ints[1] = feature; gain_feature[i] = val; sum[i] = split_sum_value - node_start_sum; count[i] = idx - node_start; BIN_T threshold = fvalue_sorted[idx]; split[i] = threshold; unsigned block_size = MAX_THREADS > node_size ? node_size : MAX_THREADS; unsigned grid_size = unsigned((node_size + block_size - 1) / block_size); cudaStream_t s; DEVICE_OK(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking)); apply_split<NODE_T><<<grid_size, block_size, 0, s>>>( row2Node + node_start, fvalue + node_start, threshold, level, node_size); DEVICE_OK(cudaDeviceSynchronize()); DEVICE_OK(cudaStreamDestroy(s)); node_size_prefix_sum_next[2 * i + 1] = idx; node_size_prefix_sum_next[2 * i + 2] = node_end; node_sum_prefix_sum_next[2 * i + 1] = split_sum_value; node_sum_prefix_sum_next[2 * i + 2] = node_end_sum; } else if (current_gain_feature.Gain() == 0 && current_gain_feature.Feature() == -1) { sum[i] = node_end_sum - node_start_sum; split[i] = (unsigned)-1; count[i] = node_size; node_size_prefix_sum_next[2 * i + 1] = node_size_prefix_sum_next[2 * i + 2] = node_end; node_sum_prefix_sum_next[2 * i + 1] = node_sum_prefix_sum_next[2 * i + 2] = node_end_sum; } } else { node_size_prefix_sum_next[2 * i + 1] = node_size_prefix_sum_next[2 * i + 2] = node_end; node_sum_prefix_sum_next[2 * i + 1] = node_sum_prefix_sum_next[2 * i + 2] = node_end_sum; } } } template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T> ContinuousTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::ContinuousTreeGrower( const size_t size, const unsigned depth, const unsigned hist_size, const BestSplit<SUM_T> *best, Histogram<SUM_T> *features_histogram, const InternalConfiguration *config) : BaseGrower<NODE_T, BIN_T, GRAD_T, SUM_T>(size, depth, best, features_histogram, config) { node_fvalue.resize(size); node_fvalue_sorted.resize(size); sum.resize(size); run_lenght.resize(1); size_t temp_storage_bytes = 0; OK(cub::DeviceSegmentedRadixSort::SortPairs( NULL, temp_storage_bytes, (BIN_T *)nullptr, (BIN_T *)nullptr, (GRAD_T *)nullptr, (GRAD_T *)nullptr, size, 1 << this->depth, (unsigned *)nullptr, (unsigned *)nullptr, 0, 1)); this->temp_bytes_allocated = std::max(this->temp_bytes_allocated, temp_storage_bytes); temp_storage_bytes = 0; SUM_T initial_value; init(initial_value); cub::Sum sum_op; OK(cub::DeviceScan::ExclusiveScan(NULL, temp_storage_bytes, (GRAD_T *)nullptr, (SUM_T *)nullptr, sum_op, initial_value, size)); this->temp_bytes_allocated = std::max(this->temp_bytes_allocated, temp_storage_bytes); temp_storage_bytes = 0; OK(cub::DeviceReduce::ReduceByKey( NULL, temp_storage_bytes, (BIN_T *)nullptr, (BIN_T *)nullptr, (GRAD_T *)nullptr, (SUM_T *)nullptr, thrust::raw_pointer_cast(run_lenght.data()), sum_op, size)); this->temp_bytes_allocated = std::max(this->temp_bytes_allocated, temp_storage_bytes); temp_storage_bytes = 0; OK(cub::DeviceRunLengthEncode::Encode( NULL, temp_storage_bytes, (BIN_T *)nullptr, (BIN_T *)nullptr, (BIN_T *)nullptr, thrust::raw_pointer_cast(run_lenght.data()), size)); this->temp_bytes_allocated = std::max(this->temp_bytes_allocated, temp_storage_bytes); OK(cudaMalloc(&this->temp_bytes, this->temp_bytes_allocated)); } template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T> inline void ContinuousTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::ApplySplit( NODE_T *row2Node, const unsigned level, const BIN_T threshold, size_t from, size_t to) { int gridSize; int blockSize; compute1DInvokeConfig(to - from, &gridSize, &blockSize, apply_split<NODE_T, BIN_T>); apply_split<NODE_T, BIN_T><<<gridSize, blockSize, 0, this->stream>>>( row2Node + from, thrust::raw_pointer_cast(node_fvalue.data()) + from, threshold, level, to - from); } template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T> void ContinuousTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::ProcessDenseFeature( const device_vector<unsigned> &partitioning_index, const device_vector<NODE_T> &row2Node, const device_vector<GRAD_T> &grad_d, device_vector<BIN_T> &fvalue_d, BIN_T *fvalue_h, const device_vector<SUM_T> &parent_node_sum, const device_vector<unsigned int> &parent_node_count, const unsigned char fvalue_size, const unsigned level, const unsigned depth, const GainFunctionParameters gain_param, const bool partition_only, const int fid) { const unsigned lenght = 1 << level; OK(cudaMemsetAsync(thrust::raw_pointer_cast(this->result_d.data()), 0, lenght * sizeof(my_atomics), this->stream)); BIN_T *fvalue_tmp = NULL; if (!fvalue_d.empty()) { fvalue_tmp = thrust::raw_pointer_cast(fvalue_d.data()); } else { OK(cudaMemcpyAsync(thrust::raw_pointer_cast(this->fvalue.data()), fvalue_h, this->size * sizeof(BIN_T), cudaMemcpyHostToDevice, this->stream)); fvalue_tmp = thrust::raw_pointer_cast(this->fvalue.data()); } if (level != 0) { const unsigned lenght = 1 << (level - 1); int gridSize = 0; int blockSize = 0; compute1DInvokeConfig(lenght, &gridSize, &blockSize, partition<NODE_T, BIN_T>, 0, 1); partition<NODE_T, BIN_T, 2><<<gridSize, blockSize, 0, this->stream>>>( thrust::raw_pointer_cast(node_fvalue.data()), thrust::raw_pointer_cast(row2Node.data()), fvalue_tmp, thrust::raw_pointer_cast(parent_node_count.data()), depth - level - 1, this->temp_bytes_allocated, this->temp_bytes, this->size, lenght); OK(cudaEventRecord(this->event, this->stream)); OK(cudaStreamWaitEvent(this->copy_d2h_stream, this->event, 0)); OK(cudaMemcpyAsync(fvalue_h, thrust::raw_pointer_cast(node_fvalue.data()), this->size * sizeof(BIN_T), cudaMemcpyDeviceToHost, this->copy_d2h_stream)); if (!fvalue_d.empty()) { OK(cudaMemcpyAsync(thrust::raw_pointer_cast(fvalue_d.data()), thrust::raw_pointer_cast(node_fvalue.data()), this->size * sizeof(BIN_T), cudaMemcpyDeviceToDevice, this->copy_d2h_stream)); } this->d_fvalue_partitioned = (BIN_T *)thrust::raw_pointer_cast(node_fvalue.data()); } else { this->d_fvalue_partitioned = fvalue_tmp; } if (partition_only) return; // FIXME: fvalue_size + 1 or just fvalue_size? OK(cub::DeviceSegmentedRadixSort::SortPairs( this->temp_bytes, this->temp_bytes_allocated, this->d_fvalue_partitioned, thrust::raw_pointer_cast(node_fvalue_sorted.data()), thrust::raw_pointer_cast(grad_d.data()), thrust::raw_pointer_cast(this->grad_sorted.data()), this->size, 1 << level, thrust::raw_pointer_cast(parent_node_count.data()), thrust::raw_pointer_cast(parent_node_count.data()) + 1, 0, fvalue_size + 1, this->stream)); SUM_T initial_value; init(initial_value); cub::Sum sum_op; OK(cub::DeviceScan::ExclusiveScan( this->temp_bytes, this->temp_bytes_allocated, thrust::raw_pointer_cast(this->grad_sorted.data()), thrust::raw_pointer_cast(sum.data()), sum_op, initial_value, this->size, this->stream)); gain_kernel<<<this->gridSizeGain, this->blockSizeGain, 0, this->stream>>>( thrust::raw_pointer_cast(sum.data()), thrust::raw_pointer_cast(node_fvalue_sorted.data()), lenght, thrust::raw_pointer_cast(parent_node_sum.data()), thrust::raw_pointer_cast(parent_node_count.data()), this->size, gain_param, thrust::raw_pointer_cast(this->result_d.data())); } template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T> inline void ContinuousTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::FindBest( BestSplit<SUM_T> &best, device_vector<NODE_T> &row2Node, const device_vector<SUM_T> &parent_node_sum, const device_vector<unsigned int> &parent_node_count, const unsigned fid, const unsigned level, const unsigned depth, const unsigned size) { int gridSize = 0; int blockSize = 0; compute1DInvokeConfig(size, &gridSize, &blockSize, filter_apply_candidates<NODE_T, SUM_T, BIN_T>); filter_apply_candidates<NODE_T, SUM_T, BIN_T> <<<gridSize, blockSize, 0, this->stream>>>( thrust::raw_pointer_cast(best.gain_feature.data()), thrust::raw_pointer_cast(best.sum.data()), thrust::raw_pointer_cast(best.split_value.data()), thrust::raw_pointer_cast(best.count.data()), thrust::raw_pointer_cast(best.parent_node_count_next.data()), thrust::raw_pointer_cast(best.parent_node_sum_next.data()), thrust::raw_pointer_cast(this->result_d.data()), thrust::raw_pointer_cast(this->sum.data()), this->d_fvalue_partitioned, (BIN_T *)thrust::raw_pointer_cast(this->node_fvalue_sorted.data()), thrust::raw_pointer_cast(row2Node.data()), thrust::raw_pointer_cast(parent_node_count.data()), thrust::raw_pointer_cast(parent_node_sum.data()), fid, depth - level - 2, size); } // clang-format off /*[[[cog import cog for t in [('float', 'float'), ('float', 'double'), ('float2', 'float2'), ('float2', 'mydouble2')]: for bin_type in ['unsigned int', 'unsigned short', 'unsigned char']: cog.outl("template class ContinuousTreeGrower<{0}, unsigned int, {1}, {2}>;".format( bin_type, t[0], t[1])) cog.outl("template class ContinuousTreeGrower<{0}, unsigned short, {1}, {2}>;".format( bin_type, t[0], t[1])) ]]]*/ template class ContinuousTreeGrower<unsigned int, unsigned int, float, float>; template class ContinuousTreeGrower<unsigned int, unsigned short, float, float>; template class ContinuousTreeGrower<unsigned short, unsigned int, float, float>; template class ContinuousTreeGrower<unsigned short, unsigned short, float, float>; template class ContinuousTreeGrower<unsigned char, unsigned int, float, float>; template class ContinuousTreeGrower<unsigned char, unsigned short, float, float>; template class ContinuousTreeGrower<unsigned int, unsigned int, float, double>; template class ContinuousTreeGrower<unsigned int, unsigned short, float, double>; template class ContinuousTreeGrower<unsigned short, unsigned int, float, double>; template class ContinuousTreeGrower<unsigned short, unsigned short, float, double>; template class ContinuousTreeGrower<unsigned char, unsigned int, float, double>; template class ContinuousTreeGrower<unsigned char, unsigned short, float, double>; template class ContinuousTreeGrower<unsigned int, unsigned int, float2, float2>; template class ContinuousTreeGrower<unsigned int, unsigned short, float2, float2>; template class ContinuousTreeGrower<unsigned short, unsigned int, float2, float2>; template class ContinuousTreeGrower<unsigned short, unsigned short, float2, float2>; template class ContinuousTreeGrower<unsigned char, unsigned int, float2, float2>; template class ContinuousTreeGrower<unsigned char, unsigned short, float2, float2>; template class ContinuousTreeGrower<unsigned int, unsigned int, float2, mydouble2>; template class ContinuousTreeGrower<unsigned int, unsigned short, float2, mydouble2>; template class ContinuousTreeGrower<unsigned short, unsigned int, float2, mydouble2>; template class ContinuousTreeGrower<unsigned short, unsigned short, float2, mydouble2>; template class ContinuousTreeGrower<unsigned char, unsigned int, float2, mydouble2>; template class ContinuousTreeGrower<unsigned char, unsigned short, float2, mydouble2>; //[[[end]]] (checksum: f58c2c982b43db032408d6f7dd00111e) // clang-format on } // namespace core } // namespace arboretum
the_stack
#ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #include <cusp/csr_matrix.h> #include <cusp/io/matrix_market.h> #ifdef _WIN32 #pragma warning (pop) #endif #include <types.h> #include <iomanip> #include <map> #include <basic_types.h> #include <matrix.h> #include <amgx_timer.h> #include "amgx_types/util.h" #include "amgx_types/io.h" using namespace std; namespace amgx { template<class T_Config> MatrixIO<T_Config>::readerMap &MatrixIO<T_Config>::getReaderMap() { static readerMap readers_map; return readers_map; } template<class T_Config> void MatrixIO<T_Config>::registerReader(string key, readerFunc func) { readerMap &readers_map = getReaderMap(); typename readerMap::const_iterator iter = readers_map.find(key); if (iter != readers_map.end()) { std::string err = "Reader '" + key + "' is already registered"; FatalError(err, AMGX_ERR_CORE); } readers_map[key] = func; } template<class T_Config> void MatrixIO<T_Config>::unregisterReaders() { readerMap &readers_map = getReaderMap(); readers_map.clear(); } template<class T_Config> MatrixIO<T_Config>::writerMap &MatrixIO<T_Config>::getWriterMap() { static writerMap writer_map; return writer_map; } template<class T_Config> void MatrixIO<T_Config>::registerWriter(string key, writerFunc func) { writerMap &writer_map = getWriterMap(); typename writerMap::const_iterator iter = writer_map.find(key); if (iter != writer_map.end()) { std::string err = "Reader '" + key + "' is already registered"; FatalError(err, AMGX_ERR_CORE); } writer_map[key] = func; } template<class T_Config> void MatrixIO<T_Config>::unregisterWriters() { writerMap &writer_map = getWriterMap(); writer_map.clear(); } template<class T_Config> bool MatrixIO<T_Config>::writeSystemMatrixMarket(const char *fname, const Matrix<T_Config> *pA, const VVector *pb, const VVector *px) { typedef typename T_Config::MatPrec ValueTypeA; typedef typename T_Config::VecPrec ValueTypeB; if (!fname) { FatalError( "Bad filename", AMGX_ERR_BAD_PARAMETERS); } if (!pA) { FatalError( "MatrixMarket should contain matrix", AMGX_ERR_BAD_PARAMETERS); } std::ofstream fout; std::string err = "Writing system to file " + string(fname) + "\n"; amgx_output(err.c_str(), err.length()); fout.open(fname); if (!fout) { FatalError( "Cannot open file for writing!", AMGX_ERR_BAD_PARAMETERS); } const Matrix<T_Config> &A = *pA; bool is_mtx = true; bool is_rhs = pb != NULL && pb->size() > 0; bool is_soln = px != NULL && px->size() > 0; fout << "%%MatrixMarket"; if (is_mtx) { fout << " matrix coordinate "; if (types::util<typename Matrix<T_Config>::value_type>::is_real) { fout << "real "; } else { fout << "complex "; } fout << "general"; } else { if (types::util<typename Matrix<T_Config>::value_type>::is_real) { fout << "real "; } else { fout << "complex "; } } fout << std::endl; fout << "%%NVAMG " << A.get_block_dimx() << " " << A.get_block_dimy() << " "; if (A.hasProps(DIAG) && is_mtx) { fout << "diagonal "; } if (is_mtx) { fout << "sorted "; } if (is_rhs) { fout << "rhs "; } if (is_soln) { fout << "solution"; } fout << std::endl; fout << A.get_num_rows()*A.get_block_dimx() << " " << A.get_num_cols()*A.get_block_dimy() << " " << A.get_num_nz()*A.get_block_size() << std::endl; // rules are simple: If there is csr property - write csr and coo (if exists). Else write coo. fout << std::setprecision(std::numeric_limits<ValueTypeA>::digits10 + 1) << std::scientific; if (is_mtx) { if (A.hasProps(COO)) { for (int i = 0; i < A.get_num_nz(); i++) { for (int kx = 0; kx < A.get_block_dimx(); kx++) for (int ky = 0; ky < A.get_block_dimy(); ky++) { fout << A.row_indices[i]*A.get_block_dimx() + kx + 1 << " " << A.col_indices[i]*A.get_block_dimy() + ky + 1 << " " << A.values[i * A.get_block_size() + kx * A.get_block_dimy() + ky] << std::endl; } } } else if (A.hasProps(CSR)) { for (int i = 0; i < A.get_num_rows(); i++) { for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { int c = A.col_indices[j]; // typename Matrix::value_type v=A.values[j]; for (int kx = 0; kx < A.get_block_dimx(); kx++) for (int ky = 0; ky < A.get_block_dimy(); ky++) { fout << i *A.get_block_dimx() + kx + 1 << " " << c *A.get_block_dimy() + ky + 1 << " " << A.values[j * A.get_block_size() + kx * A.get_block_dimy() + ky] << std::endl; } } } } if (A.hasProps(DIAG)) { for (int i = 0; i < A.get_num_rows(); i++) { for (int k = 0; k < A.get_block_size(); k++) { fout << A.values[A.diag[i]*A.get_block_size() + k] << " "; } fout << std::endl; } } } // End of writing matrix fout << std::setprecision(std::numeric_limits<ValueTypeB>::digits10 + 1) << std::scientific; //write rhs if (is_rhs) { const VVector &b = *pb; fout << b.size() << std::endl; for (int i = 0; i < b.size(); i++) { fout << b[i] << std::endl; } } // write initial guess if we have it if (is_soln) { const VVector &x = *px; fout << x.size() << std::endl; for (int i = 0; i < x.size(); i++) { fout << x[i] << std::endl; } } fout.close(); err = "Done writing system to file!\n"; amgx_output(err.c_str(), err.length()); return true; } template<class T_Config> bool MatrixIO<T_Config>::writeSystemBinary(const char *fname, const Matrix<T_Config> *pA, const VVector *pb, const VVector *px) { typedef typename T_Config::template setMemSpace<AMGX_host>::Type TConfig_h; typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_value_type_h; typedef Vector<ivec_value_type_h> IVector_h; typedef Vector<TConfig_h> VVector_h; typedef typename Matrix<TConfig_h>::MVector MVector_h; typedef typename Matrix<T_Config>::index_type IndexType; typedef typename Matrix<T_Config>::value_type ValueTypeA; typedef typename Vector<T_Config>::value_type ValueTypeB; // change back to matrix type later typedef typename types::util<ValueTypeA>::uptype UpValueType; if (!fname) { FatalError( "Bad filename", AMGX_ERR_BAD_PARAMETERS); } if (!pA) { FatalError( "MatrixMarket should contain matrix", AMGX_ERR_BAD_PARAMETERS); } FILE *fout; const char header [] = "%%NVAMGBinary\n"; std::string err = "Writing system to file " + string(fname) + "\n"; amgx_output(err.c_str(), err.length()); fout = fopen(fname, "wb"); if (!fout) { FatalError( "Cannot open output file!11", AMGX_ERR_BAD_PARAMETERS); } bool is_mtx = true; bool is_rhs = pb != NULL && pb->size() > 0; bool is_soln = px != NULL && px->size() > 0; const Matrix<T_Config> &A = *pA; uint32_t matrix_format = 42; if (A.hasProps(CSR)) { matrix_format = 0; } else if (A.hasProps(COO)) { matrix_format = 1; } else { FatalError("Unsupported matrix format", AMGX_ERR_BAD_PARAMETERS); } if (types::util<ValueTypeA>::is_complex) { matrix_format += COMPLEX; } const int system_header_size = 9; uint32_t system_flags [] = { (uint32_t)(is_mtx), (uint32_t)(is_rhs), (uint32_t)(is_soln), matrix_format, (uint32_t)(A.hasProps(DIAG)), (uint32_t)(A.get_block_dimx()), (uint32_t)(A.get_block_dimy()), (uint32_t)(A.get_num_rows()), (uint32_t)(A.get_num_nz()) }; fwrite(header, sizeof(char), strlen(header), fout); fwrite(system_flags, sizeof(uint32_t), system_header_size, fout); std::vector< UpValueType > tempv(A.values.size(), types::util< UpValueType >::get_zero()); if (is_mtx) { if (A.hasProps(CSR)) { IVector_h t_int = A.row_offsets; fwrite(t_int.raw(), sizeof(int), A.get_num_rows() + 1, fout); //assuming int as an index t_int = A.col_indices; fwrite(t_int.raw(), sizeof(int), A.get_num_nz(), fout); //assuming int as an index for (int k = 0; k < A.values.size(); k++) { types::util<ValueTypeA>::to_uptype(A.values[k], tempv[k]); } fwrite(&tempv[0], sizeof(UpValueType), A.get_block_dimx() * A.get_block_dimy() * (A.get_num_nz() + (A.hasProps(DIAG) ? A.get_num_rows() : 0) ), fout); // including diag in the end if exists. } else { FatalError("Unsupported matrix format for now", AMGX_ERR_IO); } } // End of writing matrix VVector_h tvec; //write rhs if (is_rhs) { if (pb->size() != A.get_num_rows()*A.get_block_dimy()) { FatalError("rhs vector and matrix dimension does not match", AMGX_ERR_BAD_PARAMETERS); } tempv.resize(A.get_num_rows()*A.get_block_dimy()); for (int k = 0; k < pb->size(); k++) { types::util<ValueTypeB>::to_uptype((*pb)[k], tempv[k]); } fwrite(&tempv[0], sizeof(UpValueType), pb->size(), fout); } // write initial guess if we have it if (is_soln) { if (px->size() != A.get_num_rows()*A.get_block_dimx()) { FatalError("solution vector and matrix dimension does not match", AMGX_ERR_BAD_PARAMETERS); } tempv.resize(A.get_num_rows()*A.get_block_dimy()); for (int k = 0; k < px->size(); k++) { types::util<ValueTypeB>::to_uptype((*px)[k], tempv[k]); } fwrite(&tempv[0], sizeof(UpValueType), px->size(), fout); } fclose(fout); err = "Done writing system to file!\n"; amgx_output(err.c_str(), err.length()); return true; } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readSystem(const char *fname , Matrix<T_Config> &A , VVector &b , VVector &x , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows // row indices for given rank ) { AMGX_CPU_PROFILER( "MatrixIO::read_sytem " ); try { readerMap &readers_map = getReaderMap(); //open file std::string err; if (io_config::hasProps(io_config::SIZE, props)) { err = "Reading matrix dimensions in file: " + string(fname) + "\n"; } else if (io_config::hasProps(io_config::PRINT, props)) { err = "Reading matrix in file: " + string(fname) + "\n"; } amgx_output(err.c_str(), err.length()); std::ifstream fin(fname); if (!fin) { err = "Error opening file '" + string(fname) + "'\n"; FatalError(err.c_str(), AMGX_ERR_IO); } // Extract the file format from the file std::string fformat; fin >> fformat; if (fformat.substr(0, 2) != "%%") { err = "Invalid header line in file " + string(fname) + " First line should begin with: %%MatrixFormat\n"; FatalError(err.c_str(), AMGX_ERR_IO); } else { fformat = fformat.substr(2, fformat.size()); } typename readerMap::const_iterator iter = readers_map.find(fformat); if (iter == readers_map.end()) { err = "Could not find a reader for matrix of type '" + fformat + "'\n"; FatalError(err.c_str(), AMGX_ERR_IO); } //call reader A.set_initialized(0); (iter->second)(fin , fname , A , b , x , cfg , props , rank_rows ); A.computeDiagonal(); A.set_initialized(1); fin.close(); } catch (amgx_exception e) { std::string err = "Error while reading matrix: "; amgx_output(err.c_str(), err.length()); amgx_output(e.what(), strlen(e.what())); return AMGX_ERR_IO; } return AMGX_OK; } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::writeSystem (const char *filename, const Matrix<T_Config> *A, const VVector *b, const VVector *x) { std::string format; try { AMG_Config *cfg = NULL; if (A) { cfg = A->getResources()->getResourcesConfig(); } if (b) { cfg = b->getResources()->getResourcesConfig(); } if (!cfg) { FatalError("Couldn't get resources from matrix or vector", AMGX_ERR_BAD_PARAMETERS); } format = cfg->AMG_Config::getParameter<std::string>("matrix_writer", "default"); } catch (amgx_exception e) { std::string err = "Error while writing matrix: "; amgx_output(err.c_str(), err.length()); amgx_output(e.what(), strlen(e.what())); return AMGX_ERR_IO; } // call to actual writeMatrixWithFormat: return writeSystemWithFormat (filename, format.c_str(), A, b, x); } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::writeSystemWithFormat (const char *filename, const char *format, const Matrix<T_Config> *A, const VVector *b, const VVector *x) { AMGX_CPU_PROFILER( "MatrixIO::sytem " ); try { writerMap &writers_map = getWriterMap(); typename writerMap::const_iterator iter = writers_map.find(format); if (iter == writers_map.end()) { std::string err; err = "Could not find a writer: '" + std::string(format) + "'\n"; FatalError(err.c_str(), AMGX_ERR_IO); } if ( !(iter->second)( filename, A, b, x ) ) { return AMGX_ERR_IO; } } catch (amgx_exception e) { std::string err = "Error while writing matrix: "; amgx_output(err.c_str(), err.length()); amgx_output(e.what(), strlen(e.what())); return AMGX_ERR_IO; } return AMGX_OK; } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readSystem(const char *fname , Matrix<T_Config> &A , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows // row indices for given rank ) { VVector b = VVector(0); VVector x = VVector(0); return readSystem(fname, A, b, x, cfg, props, rank_rows); } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readSystem(const char *fname , Matrix<T_Config> &A , VVector &b , const AMG_Config &cfg , unsigned int props , const IVector_h &rank_rows // row indices for given rank ) { VVector v = VVector(0); if (io_config::hasProps(io_config::RHS, props)) { return readSystem(fname, A, b, v, cfg, props, rank_rows); } else { return readSystem(fname, A, v, b, cfg, props, rank_rows); } } template<class T_Config> string MatrixIO<T_Config>::readSystemFormat(const char *fname) { readerMap &readers_map = getReaderMap(); //open file std::string out = "Reading matrix format in file: " + string(fname) + "\n"; amgx_output(out.c_str(), out.length()); std::ifstream fin(fname); if (!fin) { out = "Error opening file: " + string(fname) + "\n"; FatalError(out.c_str(), AMGX_ERR_IO); } // Extract the file format from the file std::string fformat; fin >> fformat; if (fformat.substr(0, 2) != "%%") { out = "Invalid header line in file " + std::string( fname ) + " First line should begin with: %%MatrixFormat\n"; FatalError(out.c_str(), AMGX_ERR_IO); } else { fformat = fformat.substr(2, fformat.size()); } return fformat; } /*template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readGeometry( AuxData* obj, const char* fname) { std::string err; err = "Reading matrix in file: " + string(fname) + "\n"; amgx_output(err.c_str(), err.length()); std::ifstream fin(fname); if(!fin) { err = "Error opening file '" + string(fname) + "'\n"; FatalError(err.c_str(), AMGX_ERR_IO); } int n,dimension; fin >> n >> dimension; MVector_h hgeo_x; MVector_h hgeo_y; MVector* geo_x = new MVector; MVector* geo_y = new MVector; hgeo_x.resize(n); hgeo_y.resize(n); if (dimension == 3) { MVector_h hgeo_z; MVector* geo_z = new MVector; hgeo_z.resize(n); for(int i = 0;i < n;i ++) fin >> hgeo_x[i] >> hgeo_y[i] >> hgeo_z[i]; *geo_z = hgeo_z; obj->setParameterPtr< MVector > ("geo.z", geo_z); } else if (dimension == 2) { for(int i = 0;i < n;i ++) fin >> hgeo_x[i] >> hgeo_y[i]; } obj->setParameter<int>("dim", dimension); obj->setParameter<int>("geo_size",(int)(hgeo_x.size())); *geo_x = hgeo_x; *geo_y = hgeo_y; obj->setParameterPtr< MVector > ("geo.x", geo_x); obj->setParameterPtr< MVector > ("geo.y", geo_y); return AMGX_OK; } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readColoring( AuxData* obj, const char* fname) { std::string err; err = "Reading matrix in file: " + string(fname) + "\n"; amgx_output(err.c_str(), err.length()); std::ifstream fin(fname); if(!fin) { err = "Error opening file '" + string(fname) + "'\n"; FatalError(err.c_str(), AMGX_ERR_IO); } int num_rows, num_colors; fin >> num_rows >> num_colors; typedef TemplateConfig<AMGX_host, T_Config::vecPrec, T_Config::matPrec, T_Config::indPrec> TConfig_h; typedef typename Matrix<TConfig_h>::IVector IVector_h; IVector_h* row_coloring = new IVector_h; row_coloring->resize(num_rows); for(int i = 0;i < num_rows;i ++) fin >> (*row_coloring)[i]; obj->setParameter<int>("coloring_size", num_rows); obj->setParameter<int>("colors_num", num_colors); obj->setParameterPtr< IVector_h > ("coloring", row_coloring); return AMGX_OK; } template<class T_Config> AMGX_ERROR MatrixIO<T_Config>::readGeometry( AuxData* obj, int n,int dimension ) { typedef typename Matrix<T_Config>::MVector VVector; MVector_h geo_x; MVector_h geo_y; geo_x.resize(n); geo_y.resize(n); int num_one_dim; if (dimension == 3) { MVector_h geo_z; geo_z.resize(n); num_one_dim = (int) cbrt((double)n); for (int i = 0;i < num_one_dim;i++) for (int j = 0;j < num_one_dim;j++) for (int k = 0;k < num_one_dim;k++) { geo_x[i + j*num_one_dim + k*num_one_dim*num_one_dim] = 1.0*i/(num_one_dim-1); geo_y[i + j*num_one_dim + k*num_one_dim*num_one_dim] = 1.0*j/(num_one_dim-1); geo_z[i + j*num_one_dim + k*num_one_dim*num_one_dim] = 1.0*k/(num_one_dim-1); } VVector *dgeo_z = new VVector; *dgeo_z = geo_z; obj->setParameterPtr< VVector > ("geo.z", dgeo_z); } else if (dimension == 2) { num_one_dim = (int) sqrt((double)n); for (int i = 0;i < num_one_dim;i++) for (int j = 0;j < num_one_dim;j++) { geo_x[i + j*num_one_dim] = 1.0*i/(num_one_dim-1); geo_y[i + j*num_one_dim] = 1.0*j/(num_one_dim-1); //(*geo_z)[i + j*num_one_dim] = 0; } } VVector *dgeo_y = new VVector; VVector *dgeo_x = new VVector; *dgeo_y = geo_y; *dgeo_x = geo_x; obj->setParameter<int>("dim", dimension); obj->setParameter<int>("geo_size",(int)(n)); obj->setParameterPtr< VVector > ("geo.x", dgeo_x); obj->setParameterPtr< VVector > ("geo.y", dgeo_y); return AMGX_OK; }*/ /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MatrixIO<TemplateMode<CASE>::Type >; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE //AMGX_FORCOMPLEX_BUILDS_DEVICE(AMGX_CASE_LINE) // template class MatrixIO<Matrix_d>; // template class MatrixIO<Matrix_h>; } // end namespace amgx
the_stack
#include "miner.h" extern "C" { #include "sph/sph_blake.h" } #include "cuda_helper.h" #ifdef __INTELLISENSE__ #define __byte_perm(x, y, b) x #endif /* threads per block and nonces per thread */ #define TPB 768 #define NPT 384 #define maxResults 8 __constant__ uint32_t _ALIGN(16) c_data[20]; /* 8 adapters max */ static uint32_t *d_resNonce[MAX_GPUS]; static uint32_t *h_resNonce[MAX_GPUS]; /* hash by cpu with blake 256 */ extern "C" void blake256_8roundHash(void *output, const void *input){ uchar hash[64]; sph_blake256_context ctx; sph_blake256_set_rounds(8); sph_blake256_init(&ctx); sph_blake256(&ctx, input, 80); sph_blake256_close(&ctx, hash); memcpy(output, hash, 32); } #define GSn4(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2,a3,b3,c3,d3,x3,y3) { \ v[ a] = v[ a] + v[ b] + x; v[a1] = v[a1] + v[b1] + x1; v[a2] = v[a2] + v[b2] + x2; v[a3] = v[a3] + v[b3] + x3;\ v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]); v[d3] = ROL16(v[d3] ^ v[a3]); \ v[ c] = v[ c] + v[ d]; v[c1] = v[c1] + v[d1]; v[c2] = v[c2] + v[d2]; v[c3] = v[c3] + v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12); v[b3] = ROTR32(v[b3] ^ v[c3], 12); \ v[ a] = v[ a] + v[ b] + y; v[a1] = v[a1] + v[b1] + y1; v[a2] = v[a2] + v[b2] + y2; v[a3] = v[a3] + v[b3] + y3; \ v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]); v[d3] = ROR8(v[d3] ^ v[a3]); \ v[ c] = v[ c] + v[ d]; v[c1] = v[c1] + v[d1]; v[c2] = v[c2] + v[d2]; v[c3] = v[c3] + v[d3]; \ v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7); v[b3] = ROTR32(v[b3] ^ v[c3], 7); \ } #define GS(a,b,c,d,x,y) { \ v[a] += (m[x] ^ z[y]) + v[b]; \ v[d] = ROL16(v[d] ^ v[a]); \ v[c] += v[d]; \ v[b] = ROTR32(v[b] ^ v[c],12); \ v[a] += (m[y] ^ z[x]) + v[b]; \ v[d] = ROR8(v[d] ^ v[a]); \ v[c] += v[d]; \ v[b] = ROTR32(v[b] ^ v[c], 7); \ } __global__ __launch_bounds__(TPB) void blake256_8round_gpu_hash(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce){ uint64_t m3 = startNonce + blockDim.x * blockIdx.x + threadIdx.x; const uint64_t step = gridDim.x * blockDim.x; const uint64_t maxNonce = startNonce + threads; uint32_t v[16]; uint32_t z[16] = { 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89, 0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C, 0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917 }; uint32_t m[16] = { c_data[16], c_data[17], c_data[18], 0, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 640 }; uint32_t h7 = c_data[19]; uint32_t xors[16]; for(; m3<maxNonce;m3+=step){ m[3] = _LODWORD(m3); #pragma unroll 16 for(int i=0;i<16;i++){ v[i] = c_data[i]; } v[ 1] = v[ 1] + (m[3] ^ z[2]); v[13] = ROR8(v[13] ^ v[ 1]); v[ 9] = v[ 9] + v[13]; v[ 5] = ROTR32(v[ 5] ^ v[ 9], 7); v[ 0] = v[ 0] + v[ 5]; v[15] = ROL16(v[15] ^ v[ 0]); v[10] = v[10] + v[15]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 0] = v[ 0] + v[ 5] + z[ 8]; v[15] = ROR8(v[15] ^ v[ 0]); v[10] = v[10] + v[15]; v[ 5] = ROTR32(v[ 5] ^ v[10], 7); v[ 1] = v[ 1] + v[ 6] + z[11]; v[12] = ROL16(v[12] ^ v[ 1]); v[13] = ROL16(v[13] ^ v[ 2]); v[11] = v[11] + v[12]; v[ 8] = v[ 8] + v[13]; v[ 9] = v[ 9] + v[14]; v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12); v[ 1] = v[ 1] + v[ 6] + z[10]; v[ 2] = v[ 2] + v[ 7] + (m[13] ^ z[12]);v[ 3] = v[ 3] + v[ 4] + (m[15] ^ z[14]); v[12] = ROR8(v[12] ^ v[ 1]); v[13] = ROR8(v[13] ^ v[ 2]); v[14] = ROR8(v[14] ^ v[ 3]); v[11] = v[11] + v[12]; v[ 8] = v[ 8] + v[13]; v[ 9] = v[ 9] + v[14]; v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7); // 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } xors[ 0] = z[10]; xors[ 1] = m[ 4] ^ z[ 8]; xors[ 2] = z[15]; xors[ 3] = m[13] ^ z[ 6]; xors[ 4] = z[14]; xors[ 5] = z[ 4]; xors[ 6] = z[ 9] ^ m[15]; xors[ 7] = z[13]; xors[ 8] = m[ 1] ^ z[12]; xors[ 9] = m[ 0] ^ z[ 2]; xors[10] = z[ 7]; xors[11] = z[ 3]; xors[12] = z[ 1]; xors[13] = z[ 0] ^ m[ 2]; xors[14] = z[11]; xors[15] = z[ 5] ^ m[ 3]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } xors[ 0] = z[ 8]; xors[ 1] = z[ 0]; xors[ 2] = z[ 2]; xors[ 3] = m[15] ^ z[13]; xors[ 4] = z[11]; xors[ 5] = z[12] ^ m[ 0]; xors[ 6] = z[ 5] ^ m[ 2]; xors[ 7] = z[15] ^ m[13]; xors[ 8] = z[14]; xors[ 9] = m[ 3] ^ z[ 6]; xors[10] = z[ 1]; xors[11] = z[ 4]; xors[12] = z[10]; xors[13] = z[ 3]; xors[14] = z[ 7] ^ m[ 1]; xors[15] = z[ 9] ^ m[ 4]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } xors[ 0] = z[ 9]; xors[ 1] = m[ 3] ^ z[ 1]; xors[ 2] = m[13] ^ z[12]; xors[ 3] = z[14]; xors[ 4] = z[ 7]; xors[ 5] = z[ 3] ^ m[ 1]; xors[ 6] = z[13]; xors[ 7] = z[11]; xors[ 8] = m[ 2] ^ z[ 6]; xors[ 9] = z[10]; xors[10] = m[ 4] ^ z[ 0]; xors[11] = m[15] ^ z[ 8]; xors[12] = z[ 2]; xors[13] = z[ 5]; xors[14] = z[ 4] ^ m[ 0]; xors[15] = z[15]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 4{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } xors[ 0] = z[ 0]; xors[ 1] = z[ 7]; xors[ 2] = m[ 2] ^ z[ 4]; xors[ 3] = z[15]; xors[ 4] = z[ 9] ^ m[ 0]; xors[ 5] = z[ 5]; xors[ 6] = z[ 2] ^ m[ 4]; xors[ 7] = z[10] ^ m[15]; xors[ 8] = z[ 1]; xors[ 9] = z[12]; xors[10] = z[ 8]; xors[11] = m[ 3] ^ z[13]; xors[12] = z[14] ^ m[ 1]; xors[13] = z[11]; xors[14] = z[ 6]; xors[15] = z[ 3] ^ m[13]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 5{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } xors[ 0] = m[ 2] ^ z[12]; xors[ 1] = z[10]; xors[ 2] = m[ 0] ^ z[11]; xors[ 3] = z[ 3]; xors[ 4] = z[ 2]; xors[ 5] = z[ 6]; xors[ 6] = z[ 0]; xors[ 7] = z[ 8] ^ m[ 3]; xors[ 8] = m[ 4] ^ z[13]; xors[ 9] = z[ 5]; xors[10] = m[15] ^ z[14]; xors[11] = m[ 1] ^ z[ 9]; xors[12] = z[ 4] ^ m[13]; xors[13] = z[ 7]; xors[14] = z[15]; xors[15] = z[ 1]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 6{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } xors[ 0] = z[ 5]; xors[ 1] = m[ 1] ^ z[15]; xors[ 2] = z[13]; xors[ 3] = m[ 4] ^ z[10]; xors[ 4] = z[12]; xors[ 5] = z[ 1] ^ m[15]; xors[ 6] = z[14] ^ m[13]; xors[ 7] = z[ 4]; xors[ 8] = m[ 0] ^ z[ 7]; xors[ 9] = z[ 3]; xors[10] = z[ 2]; xors[11] = z[11]; xors[12] = z[ 0]; xors[13] = z[ 6] ^ m[ 3]; xors[14] = z[ 9] ^ m[ 2]; xors[15] = z[ 8]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]); // 7{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } xors[ 0] = m[13] ^ z[11]; xors[ 1] = z[14]; xors[ 2] = z[ 1]; xors[ 3] = m[ 3] ^ z[ 9]; xors[ 4] = z[13]; xors[ 5] = z[ 7]; xors[ 6] = z[12] ^ m[ 1]; xors[ 7] = z[ 3]; xors[ 8] = z[ 0]; xors[10] = z[ 6]; xors[12] = z[ 5] ^ m[ 0]; xors[14] = z[ 8]; GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]); v[ 0] = v[ 0] + v[ 5] + xors[ 8]; v[ 2] = v[ 2] + v[ 7] + xors[10]; v[15] = ROL16(v[15] ^ v[ 0]); v[13] = ROL16(v[13] ^ v[ 2]); v[10] = v[10] + v[15]; v[ 8] = v[ 8] + v[13]; v[ 5] = ROTR32(v[ 5] ^ v[10], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 0] = v[ 0] + v[ 5] + xors[12]; v[ 2] = v[ 2] + v[ 7] + xors[14]; v[13] = ROR8(v[13] ^ v[ 2]); v[15] = ROTR32(v[15] ^ v[ 0],1); v[ 8] += v[13]; // only compute h7 if(xor3x(v[ 7],h7,v[ 8])==v[15]){ uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1; if(pos<maxResults) resNonce[pos]=m[ 3]; return; } } } __host__ void blake256_8round_cpu_setBlock_16(const int thr_id,const uint32_t* endiandata, uint32_t *penddata){ const uint32_t _ALIGN(64) z[16] = { 0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89, 0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C, 0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917 }; uint32_t _ALIGN(64) h[22]; sph_blake256_context ctx; sph_blake256_set_rounds(8); sph_blake256_init(&ctx); sph_blake256(&ctx, endiandata, 64); h[ 0] = ctx.H[0]; h[ 1] = ctx.H[1]; h[ 2] = ctx.H[2]; h[21] = ctx.H[3]; h[ 4] = ctx.H[4]; h[20] = ctx.H[5]; h[19] = ctx.H[6]; h[16] = ctx.H[7]; uint32_t tmp = h[20]; h[20] = h[19]; h[19] = h[16]; h[16] = penddata[ 0]; h[17] = penddata[ 1]; h[18] = penddata[ 2]; h[12] = z[ 4] ^ 640; h[ 8] = z[ 0]; h[ 0] += (h[16] ^ z[ 1]) + h[ 4]; h[12] = SPH_ROTR32(h[12] ^ h[0],16); h[ 8] += h[12]; h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8], 12); h[ 0] += (h[17] ^ z[ 0]) + h[ 4]; h[12] = SPH_ROTR32(h[12] ^ h[0],8); h[ 8] += h[12]; h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8], 7); h[1] += (h[18] ^ z[ 3]) + tmp; h[13] = SPH_ROTR32(z[ 5] ^ 640 ^ h[1],16); h[ 5] = ROTR32(tmp ^ (z[ 1] + h[13]), 12); h[ 1] += h[ 5]; h[ 2] += (0x80000000UL ^ z[ 5]) + h[20]; h[14] = SPH_ROTR32(z[ 6] ^ h[2], 16); h[ 6] = z[ 2] + h[14]; h[ 6] = SPH_ROTR32(h[20] ^ h[ 6], 12); h[21] += z[ 7] + h[19]; h[ 0] += z[ 9]; h[ 2] += z[ 4] + h[ 6]; h[ 9] = z[ 1] + h[13]; h[10] = z[ 2] + h[14]; h[14] = SPH_ROTR32(h[14] ^ h[2],8); //0x0321 h[10]+=h[14]; h[ 6] = SPH_ROTR32(h[ 6] ^ h[10],7); h[15] = SPH_ROTR32(z[ 7] ^ h[21],16); h[11] = z[ 3] + h[15]; h[ 7] = SPH_ROTR32(h[19] ^ h[11], 12); h[ 3] = h[21] + h[ 7] + z[ 6]; h[15] = SPH_ROTR32(h[15] ^ h[ 3],8); h[11]+= h[15]; h[ 7] = ROTR32(h[ 7] ^ h[11],7); h[ 2]+= z[13]; h[ 3]+= z[15]; h[ 2]+= h[ 7]; h[ 3]+= h[ 4]; h[14] = SPH_ROTR32(h[14] ^ h[ 3], 16); h[19] = SPH_ROTL32(h[19],7); //align the rotation with v[7] v[15]; cudaMemcpyToSymbol(c_data, h, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice); } static bool init[MAX_GPUS] = { 0 }; extern "C" int scanhash_blake256_8round(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done){ uint32_t *pdata = work->data; uint32_t *ptarget = work->target; const uint32_t first_nonce = pdata[19]; int dev_id = device_map[thr_id]; int intensity = 31; uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity); if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); if (!init[thr_id]) { cudaSetDevice(dev_id); if (opt_cudaschedule == -1 && gpu_threads == 1) { cudaDeviceReset(); // reduce cpu usage (linux) cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); CUDA_LOG_ERROR(); } gpulog(LOG_INFO,thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t))); h_resNonce[thr_id] = (uint32_t*) malloc(maxResults * sizeof(uint32_t)); if(h_resNonce[thr_id] == NULL){ gpulog(LOG_ERR,thr_id,"Host memory allocation failed"); exit(EXIT_FAILURE); } CUDA_LOG_ERROR(); init[thr_id] = true; } uint32_t _ALIGN(64) endiandata[20]; for (int k=0; k < 19; k++) be32enc(&endiandata[k], pdata[k]); blake256_8round_cpu_setBlock_16(thr_id,endiandata,&pdata[16]); const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB)); const dim3 block(TPB); int rc = 0; cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t)); do { blake256_8round_gpu_hash<<<grid,block>>>(throughput, pdata[19], d_resNonce[thr_id]); cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost); if (h_resNonce[thr_id][0] != 0){ cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t)); if(h_resNonce[thr_id][0]>(maxResults-1)){ gpulog(LOG_WARNING,dev_id,"Candidate flood: %u",h_resNonce[thr_id][0]); h_resNonce[thr_id][0]=maxResults-1; } uint32_t i; for(i=1;i<h_resNonce[thr_id][0]+1;i++){ uint32_t vhashcpu[8]; be32enc(&endiandata[19], h_resNonce[thr_id][i]); blake256_8roundHash(vhashcpu, endiandata); if (vhashcpu[ 6] <= ptarget[ 6] && fulltest(vhashcpu, ptarget)){ work_set_target_ratio(work, vhashcpu); *hashes_done = pdata[19] - first_nonce + throughput; pdata[19] = h_resNonce[thr_id][i]; rc =1; //search for 2nd nonce for(uint32_t j=i+1;j<h_resNonce[thr_id][0]+1;j++){ be32enc(&endiandata[19], h_resNonce[thr_id][j]); blake256_8roundHash(vhashcpu, endiandata); if(vhashcpu[ 6]<=ptarget[6] && fulltest(vhashcpu, ptarget)){ pdata[21] = h_resNonce[thr_id][j]; // if(!opt_quiet) // gpulog(LOG_BLUE,dev_id,"Found 2nd nonce: %u/%08X - %u/%08X",i,pdata[19],j,pdata[21]); if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) { work_set_target_ratio(work, vhashcpu); xchg(pdata[21], pdata[19]); } rc=2; break; } } return rc; } } } pdata[19] += throughput; } while (!work_restart[thr_id].restart && ((uint64_t)max_nonce > ((uint64_t)(pdata[19]) + (uint64_t)throughput))); *hashes_done = pdata[19] - first_nonce; return rc; } // cleanup extern "C" void free_blake256_8round(int thr_id) { if (!init[thr_id]) return; cudaDeviceSynchronize(); free(h_resNonce[thr_id]); cudaFree(d_resNonce[thr_id]); init[thr_id] = false; cudaDeviceSynchronize(); }
the_stack
// For compatibility with Pytorch 1.1 #ifndef TORCH_CHECK #define TORCH_CHECK AT_CHECK #endif #define BFLY_BENCHMARK false #define BFLY_MAX5_BENCHMARK false // Only support float (not double) for now to speed up compilation time #undef AT_DISPATCH_FLOATING_TYPES #define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ [&] { \ const auto& the_type = TYPE; \ /* don't use TYPE again in case it is an expensive or side-effect op */ \ at::ScalarType _st = ::detail::scalar_type(the_type); \ switch (_st) { \ AT_PRIVATE_CASE_TYPE(at::ScalarType::Float, float, __VA_ARGS__) \ default: \ AT_ERROR(#NAME, " not implemented for '", toString(_st), "'"); \ } \ }() #define thc_cos std::cos #define thc_sin std::sin #define FULL_MASK 0xffffffff #define MIN_MACRO(x, y) (((x) <= (y)) ? (x) : (y)) #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) static constexpr int SMEM_PER_MP = 96 * (1 << 10); #else static constexpr int SMEM_PER_MP = 64 * (1 << 10); #endif static constexpr int WARP_SIZE = 32; // static constexpr int SMEM_PER_MP = 64 * (1 << 10); static constexpr int MAX_SMEM_PER_BLOCK = 48 * (1 << 10); static constexpr int MAX_BLOCK_SIZE = 1024; // static constexpr int WORK_PER_THREAD = 16; // static constexpr int ELEMENTARY_SIZE = MAX_BLOCK_SIZE / 2; // static constexpr int MAX_N_FACTORS = 10; static constexpr int MAX5_FORWARD_BLOCK_SIZE = 512; static constexpr int MAX5_BACKWARD_BLOCK_SIZE = 256; static constexpr int ITEMS_PER_THREAD_FORWARD[14] = {4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 13, 10, 4, 4}; static constexpr int ITEMS_PER_THREAD_BACKWARD[14] = {16, 16, 16, 16, 16, 16, 16, 16, 16, 8, 8, 8, 8, 8}; static constexpr int ITEMS_PER_THREAD_FORWARD_MAX5[9] = {1, 2, 2, 4, 4, 4, 2, 1, 1}; static constexpr int ITEMS_PER_THREAD_BACKWARD_MAX5[8] = {8, 8, 8, 8, 8, 6, 8, 3}; static constexpr int MIN_BLOCKS_PER_MP_FORWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; static constexpr int MIN_BLOCKS_PER_MP_BACKWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) static constexpr int ITEMS_PER_THREAD_ORTHO_FORWARD[14] = {4, 4, 4, 4, 4, 4, 4, 8, 16, 16, 16, 10, 4, 4}; static constexpr int ITEMS_PER_THREAD_ORTHO_BACKWARD[14] = {16, 16, 16, 16, 16, 16, 16, 16, 8, 16, 8, 8, 8, 8}; static constexpr int MIN_BLOCKS_PER_MP_ORTHO_FORWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1}; static constexpr int MIN_BLOCKS_PER_MP_ORTHO_BACKWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1}; #else static constexpr int ITEMS_PER_THREAD_ORTHO_FORWARD[14] = {4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 10, 4, 4}; static constexpr int ITEMS_PER_THREAD_ORTHO_BACKWARD[14] = {16, 16, 16, 16, 16, 16, 16, 16, 8, 8, 8, 8, 8, 8}; static constexpr int MIN_BLOCKS_PER_MP_ORTHO_FORWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1}; static constexpr int MIN_BLOCKS_PER_MP_ORTHO_BACKWARD[14] = {1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1}; #endif template <typename T, size_t N> using CudaAcsr = at::PackedTensorAccessor32<T, N, at::RestrictPtrTraits>; constexpr __host__ __device__ int min_const(int x, int y) { return x <= y ? x : y; } constexpr __host__ __device__ int min_const(int x, int y, int z) { return min_const(min_const(x, y), z); } constexpr __host__ __device__ int max_const(int x, int y) { return x >= y ? x : y; } constexpr __host__ __device__ int div_up_const(int a, int b) { return (a + b - 1) / b; } // __host__ __device__ static inline int64_t div_up(int64_t a, int64_t b) { // return (a + b - 1) / b; // } __host__ __device__ static inline int div_up(int a, int b) { return (a + b - 1) / b; } // Delete the bit at position @position in the binary representation of x __host__ __device__ static inline int delete_bit(int x, unsigned char position) { int mask = (1 << position) - 1; return ((x >> 1) & ~mask) | (x & mask); // return ((x >> (position + 1)) << position) + (x & ((1 << position) - 1)); } template <typename scalar_t> static __device__ __forceinline__ void atomicAdd(thrust::complex<scalar_t> *address, thrust::complex<scalar_t> val) { atomicAdd((scalar_t *)address, val.real()); atomicAdd((scalar_t *)address + 1, val.imag()); } template <typename scalar_t> static __device__ __forceinline__ thrust::complex<scalar_t> __shfl_down_sync(unsigned int mask, thrust::complex<scalar_t> value, unsigned int delta, int width = warpSize) { return thrust::complex<scalar_t>(__shfl_down_sync(mask, value.real(), delta, width), __shfl_down_sync(mask, value.imag(), delta, width)); } template<typename scalar_t> struct InputReader { const CudaAcsr<scalar_t, 3> input_a; const int batch_size; InputReader(const at::Tensor input): input_a(input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>()), batch_size(input.size(0)) {} template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void load(scalar_t input_val[mult_per_warp][items_per_thread], int idx) { #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * warpSize + idx; const int s = blockIdx.z; #pragma unroll for (int item = 0; item < items_per_thread; item++){ const int b = blockIdx.x * items_per_thread + item; input_val[mult][item] = b < batch_size ? input_a[b][s][i] : 0; } } } template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void load_max5(scalar_t input_val[mult_per_warp][items_per_thread], int batch_idx_start, int input_idx_start, int input_idx_stride) { const int s = blockIdx.z; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * input_idx_stride + input_idx_start; #pragma unroll for (int item = 0; item < items_per_thread; item++){ input_val[mult][item] = batch_idx_start + item < batch_size ? input_a[batch_idx_start + item][s][i] : 0; } } } }; template<typename scalar_t> struct OutputWriter { CudaAcsr<scalar_t, 3> output_a; const int batch_size; OutputWriter(at::Tensor output): output_a(output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>()), batch_size(output.size(0)) {} template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void save(scalar_t output_val[mult_per_warp][items_per_thread], int idx) { #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * warpSize + idx; const int s = blockIdx.z; #pragma unroll for (int item = 0; item < items_per_thread; item++){ const int b = blockIdx.x * items_per_thread + item; if (b < batch_size) { output_a[b][s][i] = output_val[mult][item]; } } } } template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void save_max5(scalar_t output_val[mult_per_warp][items_per_thread], int batch_idx_start, int input_idx_start, int input_idx_stride) { const int s = blockIdx.z; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * input_idx_stride + input_idx_start; #pragma unroll for (int item = 0; (item < items_per_thread) && (batch_idx_start + item < batch_size); item++){ output_a[batch_idx_start + item][s][i] = output_val[mult][item]; } } } }; template<typename scalar_t> struct IntermediateStorage { CudaAcsr<scalar_t, 4> storage_a; IntermediateStorage(const at::Tensor storage): storage_a(storage.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>()) {} template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void save(scalar_t output_val[mult_per_warp][items_per_thread], int idx, int step) { #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * warpSize + idx; const int s = blockIdx.z; #pragma unroll for (int item = 0; item < items_per_thread; item++){ const int b = blockIdx.x * items_per_thread + item; storage_a[step][b][s][i] = output_val[mult][item]; } } } template<int items_per_thread, int mult_per_warp=1> __device__ __forceinline__ void load(scalar_t input_val[mult_per_warp][items_per_thread], int idx, int step) { #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int i = mult * warpSize + idx; const int s = blockIdx.z; #pragma unroll for (int item = 0; item < items_per_thread; item++){ const int b = blockIdx.x * items_per_thread + item; input_val[mult][item] = storage_a[step][b][s][i]; } } } }; template <int items_per_thread, int mult_per_warp=1, int smem_per_thread=items_per_thread, typename scalar_t> __device__ __forceinline__ void block_exchange(scalar_t *temp_storage, scalar_t values[mult_per_warp][items_per_thread], int thread_idx_1, int thread_idx_2, int nthreads) { constexpr int nsteps = div_up_const(items_per_thread, smem_per_thread); // TODO: combine mult_per_warp and iterms_per_thread, i.e. 2D -> 1D, to reduce number of syncthreads. #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { #pragma unroll for (int i = 0; i < nsteps; i++) { if ((i > 0) || (mult > 0)) { __syncthreads(); } #pragma unroll for (int item = 0; (item < smem_per_thread) && (i * smem_per_thread + item < items_per_thread); item++) { temp_storage[thread_idx_1 + item * nthreads] = values[mult][i * smem_per_thread + item]; } __syncthreads(); #pragma unroll for (int item = 0; (item < smem_per_thread) && (i * smem_per_thread + item < items_per_thread); item++) { values[mult][i * smem_per_thread + item] = temp_storage[thread_idx_2 + item * nthreads]; } } } } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, typename scalar_t> __device__ __forceinline__ void b_untied_forward(const CudaAcsr<scalar_t, 4> twiddle_a, scalar_t input_val[mult_per_warp][items_per_thread], int twiddle_idx_start, int input_idx) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well #pragma unroll // TODO: for loop over mult first instead of step first, // will have to split into 2 parts: intra-thread and intra-warp. for (int step = 0; step < nsteps; step++) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int twiddle_idx = twiddle_idx_start + step; if (log_stride < 5) { int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { // TODO: make num thread per warp an input argument const scalar_t twiddle_val[2] = {twiddle_a[s][twiddle_idx][0][mult * warpSize + input_idx], twiddle_a[s][twiddle_idx][1][mult * warpSize + input_idx]}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t input_val_other = __shfl_xor_sync(FULL_MASK, input_val[mult][item], lane_mask); input_val[mult][item] = twiddle_val[0] * input_val[mult][item] + twiddle_val[1] * input_val_other; } } } else { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][mult * warpSize + input_idx], twiddle_a[s][twiddle_idx][1][mult * warpSize + input_idx]}, {twiddle_a[s][twiddle_idx][0][(mult + mult_stride) * warpSize + input_idx], twiddle_a[s][twiddle_idx][1][(mult + mult_stride) * warpSize + input_idx]}}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t inputs[2] = {input_val[mult][item], input_val[mult + mult_stride][item]}; input_val[mult][item] = twiddle_val[0][0] * inputs[0] + twiddle_val[0][1] * inputs[1]; // The order of twiddle[1] is swapped by design input_val[mult + mult_stride][item] = twiddle_val[1][1] * inputs[0] + twiddle_val[1][0] * inputs[1]; } } } } } template <int log_n, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_FORWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_FORWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_multiply_untied_forward_fast_cuda_kernel(const CudaAcsr<scalar_t, 4> twiddle_a, InputReader<scalar_t> input_reader, OutputWriter<scalar_t> output_writer, int batch_size) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { b_untied_forward<min_const(log_n, 5), increasing_stride, items_per_thread> (twiddle_a, input_val, 0, input_idx_1); } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); if (increasing_stride) { b_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp>(twiddle_a, input_val, 0, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp>(twiddle_a, input_val, nsteps_1, input_idx_2); // Don't need __syncthreads() before block_exchange because threads are writing to the same indices. block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); } else { block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp>(twiddle_a, input_val, 0, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp>(twiddle_a, input_val, nsteps_2, input_idx_1); } } output_writer.save<items_per_thread, mult_per_warp>(input_val, input_idx_1); } void butterfly_multiply_untied_forward_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, at::Tensor &output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); OutputWriter<scalar_t> output_writer(output); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_FORWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ increasing_stride ? butterfly_multiply_untied_forward_fast_cuda_kernel<log_n_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, batch_size) \ : butterfly_multiply_untied_forward_fast_cuda_kernel<log_n_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, batch_size); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_fast_cuda failed with error code ", cudaGetLastError()); } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, typename scalar_t> __device__ __forceinline__ void b_untied_forward_shared_twiddle(const scalar_t s_twiddle[nsteps][2][1 << nsteps], scalar_t input_val[mult_per_warp][items_per_thread], const int t_idx) { #pragma unroll for (int step = 0; step < nsteps; step++) { int log_stride = increasing_stride ? step : nsteps - 1 - step; if (log_stride < 5) { int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t twiddle_val[2] = {s_twiddle[step][0][mult * warpSize + t_idx], s_twiddle[step][1][mult * warpSize + t_idx]}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t input_val_other = __shfl_xor_sync(FULL_MASK, input_val[mult][item], lane_mask); input_val[mult][item] = twiddle_val[0] * input_val[mult][item] + twiddle_val[1] * input_val_other; } } } else { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{s_twiddle[step][0][mult * warpSize + t_idx], s_twiddle[step][1][mult * warpSize + t_idx]}, {s_twiddle[step][0][(mult + mult_stride) * warpSize + t_idx], s_twiddle[step][1][(mult + mult_stride) * warpSize + t_idx]}}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t inputs[2] = {input_val[mult][item], input_val[mult + mult_stride][item]}; input_val[mult][item] = twiddle_val[0][0] * inputs[0] + twiddle_val[0][1] * inputs[1]; // The order of twiddle[1] is swapped by design input_val[mult + mult_stride][item] = twiddle_val[1][1] * inputs[0] + twiddle_val[1][0] * inputs[1]; } } } } } template <int nsteps, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_FORWARD[nsteps - 1], typename scalar_t> C10_LAUNCH_BOUNDS_2(MAX5_FORWARD_BLOCK_SIZE, min_blocks_per_mp) __global__ void butterfly_multiply_untied_forward_max5_fast_cuda_kernel(const CudaAcsr<scalar_t, 5> twiddle_a, InputReader<scalar_t> input_reader, OutputWriter<scalar_t> output_writer, int log_n, int twiddle_idx_start, int input_idx_start_bit) { constexpr int span = 1 << nsteps; constexpr int mult_per_warp = span > WARP_SIZE ? span / WARP_SIZE : 1; __shared__ scalar_t s_twiddle[nsteps][2][span]; scalar_t input_val[mult_per_warp][items_per_thread]; const int t_idx = threadIdx.x; const int batch_idx = (threadIdx.y + (blockIdx.x >> (log_n - nsteps)) * blockDim.y) * items_per_thread; const int remaining_input_idx = blockIdx.x & ((1 << (log_n - nsteps)) - 1); const int low_bits = remaining_input_idx & ((1 << input_idx_start_bit) - 1); const int high_bits = (remaining_input_idx >> input_idx_start_bit) << (input_idx_start_bit + nsteps); // All threads with the same t_idx should have the same input_idx const int input_idx = high_bits | (t_idx << input_idx_start_bit) | low_bits; const int input_idx_stride = (1 << input_idx_start_bit) * warpSize; const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well for (int t = threadIdx.x + threadIdx.y * blockDim.x; t < nsteps * (span / 2); t += blockDim.x * blockDim.y) { const int step = t / (span / 2); const int twiddle_idx = twiddle_idx_start + step; const int s_twiddle_stride = 1 << (increasing_stride ? step : nsteps - 1 - step); const int remainder = t % (span / 2); const int low_order_bits = remainder & (s_twiddle_stride - 1); const int s_idx = 2 * (remainder - low_order_bits) + low_order_bits; const int idx = (high_bits >> 1) | (remainder << input_idx_start_bit) | low_bits; s_twiddle[step][0][s_idx] = twiddle_a[s][twiddle_idx][idx][0][0]; s_twiddle[step][1][s_idx] = twiddle_a[s][twiddle_idx][idx][0][1]; s_twiddle[step][1][s_idx + s_twiddle_stride] = twiddle_a[s][twiddle_idx][idx][1][0]; s_twiddle[step][0][s_idx + s_twiddle_stride] = twiddle_a[s][twiddle_idx][idx][1][1]; } input_reader.load_max5<items_per_thread, mult_per_warp>(input_val, batch_idx, input_idx, input_idx_stride); __syncthreads(); b_untied_forward_shared_twiddle<nsteps, increasing_stride, items_per_thread, mult_per_warp>(s_twiddle, input_val, t_idx); output_writer.save_max5<items_per_thread, mult_per_warp>(input_val, batch_idx, input_idx, input_idx_stride); } std::vector<int> butterfly_max5_plan(const int log_n, const int nblocks, const int max_nsteps, const bool increasing_stride) { const int niters = div_up(log_n, max_nsteps); const int niters_total = niters * (nblocks == 0 ? 1 : 2 * nblocks); const int nsteps = div_up(log_n, niters); const int nsteps_remainder = log_n - (niters - 1) * nsteps; std::vector<int> bit_milestones; bit_milestones.reserve(niters_total + 1); auto push_strides = [log_n, nsteps, nsteps_remainder](std::vector<int>& milestones, bool increasing) { if (increasing) { for (int i = nsteps_remainder; i <= log_n; i += nsteps) { milestones.push_back(i); } } else { for (int i = log_n - nsteps; i > 0; i -= nsteps) { milestones.push_back(i); } milestones.push_back(0); } }; bit_milestones.push_back(increasing_stride ? 0 : log_n); if (nblocks == 0) { // bit_milestones has niters + 1 elements the form [0, nsteps_remainder, nsteps_remainder + nsteps, ..., log_n] // if increasing stride. Otherwise it's the reverse. push_strides(bit_milestones, increasing_stride); } else { if (increasing_stride) { for (int block = 0; block < nblocks; block++) { push_strides(bit_milestones, true); push_strides(bit_milestones, false); } } else { for (int block = 0; block < nblocks; block++) { push_strides(bit_milestones, false); push_strides(bit_milestones, true); } } } return bit_milestones; } void butterfly_multiply_untied_forward_max5_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, at::Tensor &output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); auto stream = at::cuda::getCurrentCUDAStream(); const std::vector<int> bit_milestones = butterfly_max5_plan(log_n, nblocks, 9, increasing_stride); const int niters = bit_milestones.size() - 1; AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_max5_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); int twiddle_idx_start = 0; for (int iter = 0; iter < niters; iter++) { const InputReader<scalar_t> input_reader(iter == 0 ? input : output); OutputWriter<scalar_t> output_writer(output); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_FORWARD_BLOCK_SIZE / block_x; dim3 block(block_x, min(max_block_y, div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1]))); // grid.x must be at least n / span dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1] * block.y) * n_div_span, 1, nstack); switch (nsteps) { #define CASE_NSTEPS(nsteps_val) case nsteps_val: \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); break; MAP(CASE_NSTEPS, 1, 2, 3, 4, 5, 6, 7, 8, 9) } twiddle_idx_start += nsteps; } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_NSTEPS TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_max5_fast_cuda failed with error code ", cudaGetLastError()); } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, int reg_storage_per_thread=items_per_thread, typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> __device__ __forceinline__ void b_untied_forward_backward(const CudaAcsr<scalar_t, 4> twiddle_a, CudaAcsr<scalar_t, 4> d_twiddle_a, scalar_t input_val[mult_per_warp][items_per_thread], scalar_t grad_val[mult_per_warp][items_per_thread], int twiddle_idx_start, int input_idx) { constexpr int nslices = div_up_const(items_per_thread, reg_storage_per_thread); static_assert(nslices == 1, "nslices not 1"); const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well scalar_t twiddle_val[nsteps][mult_per_warp][2]; accscalar_t d_twiddle_val[nsteps][mult_per_warp][2] = {0}; scalar_t input_val_storage[nsteps][mult_per_warp][reg_storage_per_thread]; // Strange bug: if I use the for loop with #pragma unroll (even though nslices=1) // the result is wrong for n = 4096, batch_size >= 8, increasing_stride=False, // items_per_thread=8 (not 1, 2, or 4). // For now I'm disabling slicing (i.e. reg_storage_per_thread=items_per_thread always). // #pragma unroll // for (int slice = 0; slice < nslices; slice++) { { constexpr int slice = 0; assert(slice == 0); #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { #pragma unroll for (int item = 0; (item < reg_storage_per_thread) && (slice * reg_storage_per_thread + item < items_per_thread); item++) { input_val_storage[0][mult][item] = input_val[mult][slice * reg_storage_per_thread + item]; } } #pragma unroll for (int step = 0; step < nsteps; step++) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int lane_mask = 1 << log_stride; int twiddle_idx = step + twiddle_idx_start; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { if (slice == 0) { twiddle_val[step][mult][0] = twiddle_a[s][twiddle_idx][0][mult * warpSize + input_idx]; twiddle_val[step][mult][1] = twiddle_a[s][twiddle_idx][1][mult * warpSize + input_idx]; } if (step < nsteps - 1) { // Don't need input for the last step #pragma unroll for (int item = 0; (item < reg_storage_per_thread) && (slice * reg_storage_per_thread + item < items_per_thread); item++) { scalar_t input_val_other = log_stride < 5 ? __shfl_xor_sync(FULL_MASK, input_val_storage[step][mult][item], lane_mask) : input_val_storage[step][mult ^ (1 << (log_stride - 5))][item]; input_val_storage[step + 1][mult][item] = twiddle_val[step][mult][0] * input_val_storage[step][mult][item] + twiddle_val[step][mult][1] * input_val_other; } } } } #pragma unroll for (int step = nsteps - 1; step >= 0; step--) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int twiddle_idx = step + twiddle_idx_start; int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { #pragma unroll for (int item = 0; (item < reg_storage_per_thread) && (slice * reg_storage_per_thread + item < items_per_thread); item++) { int item_offset = slice * reg_storage_per_thread + item; d_twiddle_val[step][mult][0] += grad_val[mult][item_offset] * input_val_storage[step][mult][item]; scalar_t input_val_other = log_stride < 5 ? __shfl_xor_sync(FULL_MASK, input_val_storage[step][mult][item], lane_mask) : input_val_storage[step][mult ^ (1 << (log_stride - 5))][item]; d_twiddle_val[step][mult][1] += grad_val[mult][item_offset] * input_val_other; if (log_stride < 5) { grad_val[mult][item_offset] = twiddle_val[step][mult][0] * grad_val[mult][item_offset] + __shfl_xor_sync(FULL_MASK, twiddle_val[step][mult][1] * grad_val[mult][item_offset], lane_mask); } } if (slice == nslices - 1) { atomicAdd(&d_twiddle_a[s][twiddle_idx][0][mult * warpSize + input_idx], d_twiddle_val[step][mult][0]); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][mult * warpSize + input_idx], d_twiddle_val[step][mult][1]); } } if (log_stride >= 5) { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; #pragma unroll for (int item = 0; (item < reg_storage_per_thread) && (slice * reg_storage_per_thread + item < items_per_thread); item++) { int item_offset = slice * reg_storage_per_thread + item; scalar_t grads[2] = {grad_val[mult][item_offset], grad_val[mult + mult_stride][item_offset]}; // The order of twiddle[1] is swapped by design grad_val[mult][item_offset] = twiddle_val[step][mult][0] * grads[0] + twiddle_val[step][mult + mult_stride][1] * grads[1]; grad_val[mult + mult_stride][item_offset] = twiddle_val[step][mult][1] * grads[0] + twiddle_val[step][mult + mult_stride][0] * grads[1]; } } } } } } template <int log_n, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_BACKWARD[log_n - 1], int max_reg_storage_per_thread=items_per_thread, int min_blocks_per_mp=MIN_BLOCKS_PER_MP_BACKWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_multiply_untied_forward_backward_fast_cuda_kernel(const CudaAcsr<scalar_t, 4> twiddle_a, InputReader<scalar_t> input_reader, InputReader<scalar_t> grad_reader, CudaAcsr<scalar_t, 4> d_twiddle_a, OutputWriter<scalar_t> d_input_writer, int batch_size) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int reg_storage_per_thread = min_const(max_reg_storage_per_thread, items_per_thread); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); b_untied_forward_backward<min_const(log_n, 5), increasing_stride, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, 0, input_idx_1); } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); if (increasing_stride) { b_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp>(twiddle_a, input_val, 0, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_2); b_untied_forward_backward<nsteps_2, true, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, nsteps_1, input_idx_2); // Don't need __syncthreads() before block_exchange because threads are writing to the same indices. block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); b_untied_forward_backward<nsteps_1, true, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, 0, input_idx_1); } else { block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp>(twiddle_a, input_val, 0, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); b_untied_forward_backward<nsteps_1, false, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, nsteps_2, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, threadIdx.x, thread_idx_2, nthreads); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_2); b_untied_forward_backward<nsteps_2, false, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, 0, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); } } d_input_writer.save<items_per_thread, mult_per_warp>(grad_val, input_idx_1); } void butterfly_multiply_untied_forward_backward_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_backward_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); const InputReader<scalar_t> grad_reader(grad); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_BACKWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ increasing_stride ? butterfly_multiply_untied_forward_backward_fast_cuda_kernel<log_n_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, batch_size) \ : butterfly_multiply_untied_forward_backward_fast_cuda_kernel<log_n_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, batch_size); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_backward_fast_cuda failed with error code ", cudaGetLastError()); } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> __device__ __forceinline__ void b_untied_forward_backward_shared_twiddle(const scalar_t s_twiddle[nsteps][2][1 << nsteps], accscalar_t s_d_twiddle[nsteps][2][1 << nsteps], scalar_t input_val[nsteps][mult_per_warp][items_per_thread], scalar_t grad_val[mult_per_warp][items_per_thread], int t_idx) { // Forward pass #pragma unroll for (int step = 0; step < nsteps - 1; step++) { // Don't need input for the last step int log_stride = increasing_stride ? step : nsteps - 1 - step; int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t twiddle_val[2] = {s_twiddle[step][0][mult * warpSize + t_idx], s_twiddle[step][1][mult * warpSize + t_idx]}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t input_val_other = log_stride < 5 ? __shfl_xor_sync(FULL_MASK, input_val[step][mult][item], lane_mask) : input_val[step][mult ^ (1 << (log_stride - 5))][item]; input_val[step + 1][mult][item] = twiddle_val[0] * input_val[step][mult][item] + twiddle_val[1] * input_val_other; } } } // Backward pass #pragma unroll for (int step = nsteps - 1; step >= 0; step--) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t twiddle_val[2] = {s_twiddle[step][0][mult * warpSize + t_idx], s_twiddle[step][1][mult * warpSize + t_idx]}; accscalar_t d_twiddle_val[2] = {0}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { d_twiddle_val[0] += grad_val[mult][item] * input_val[step][mult][item]; scalar_t input_val_other = log_stride < 5 ? __shfl_xor_sync(FULL_MASK, input_val[step][mult][item], lane_mask) : input_val[step][mult ^ (1 << (log_stride - 5))][item]; d_twiddle_val[1] += grad_val[mult][item] * input_val_other; if (log_stride < 5) { grad_val[mult][item] = twiddle_val[0] * grad_val[mult][item] + __shfl_xor_sync(FULL_MASK, twiddle_val[1] * grad_val[mult][item], lane_mask); } } atomicAdd(&s_d_twiddle[step][0][mult * warpSize + t_idx], d_twiddle_val[0]); atomicAdd(&s_d_twiddle[step][1][mult * warpSize + t_idx], d_twiddle_val[1]); } if (log_stride >= 5) { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{s_twiddle[step][0][mult * warpSize + t_idx], s_twiddle[step][1][mult * warpSize + t_idx]}, {s_twiddle[step][0][(mult + mult_stride) * warpSize + t_idx], s_twiddle[step][1][(mult + mult_stride) * warpSize + t_idx]}}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t grads[2] = {grad_val[mult][item], grad_val[mult + mult_stride][item]}; // The order of twiddle[1] is swapped by design grad_val[mult][item] = twiddle_val[0][0] * grads[0] + twiddle_val[1][1] * grads[1]; grad_val[mult + mult_stride][item] = twiddle_val[0][1] * grads[0] + twiddle_val[1][0] * grads[1]; } } } } } template <int nsteps, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_BACKWARD_MAX5[nsteps - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_BACKWARD[nsteps - 1], typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> C10_LAUNCH_BOUNDS_2(MAX5_BACKWARD_BLOCK_SIZE, min_blocks_per_mp) __global__ void butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel(const CudaAcsr<scalar_t, 5> twiddle_a, InputReader<scalar_t> input_reader, InputReader<scalar_t> grad_reader, CudaAcsr<scalar_t, 5> d_twiddle_a, OutputWriter<scalar_t> d_input_writer, int log_n, int twiddle_idx_start, int input_idx_start_bit) { constexpr int span = 1 << nsteps; constexpr int mult_per_warp = span > WARP_SIZE ? span / WARP_SIZE : 1; __shared__ scalar_t s_twiddle[nsteps][2][span]; __shared__ accscalar_t s_d_twiddle[nsteps][2][span]; scalar_t input_val[nsteps][mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; const int t_idx = threadIdx.x; const int batch_idx = (threadIdx.y + (blockIdx.x >> (log_n - nsteps)) * blockDim.y) * items_per_thread; const int remaining_input_idx = blockIdx.x & ((1 << (log_n - nsteps)) - 1); const int low_bits = remaining_input_idx & ((1 << input_idx_start_bit) - 1); const int high_bits = (remaining_input_idx >> input_idx_start_bit) << (input_idx_start_bit + nsteps); // All threads with the same t_idx should have the same input_idx const int input_idx = high_bits | (t_idx << input_idx_start_bit) | low_bits; const int input_idx_stride = (1 << input_idx_start_bit) * warpSize; const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well for (int t = threadIdx.x + threadIdx.y * blockDim.x; t < nsteps * (span / 2); t += blockDim.x * blockDim.y) { const int step = t / (span / 2); const int twiddle_idx = twiddle_idx_start + step; const int s_twiddle_stride = 1 << (increasing_stride ? step : nsteps - 1 - step); const int remainder = t % (span / 2); const int low_order_bits = remainder & (s_twiddle_stride - 1); const int s_idx = 2 * (remainder - low_order_bits) + low_order_bits; const int idx = (high_bits >> 1) | (remainder << input_idx_start_bit) | low_bits; s_twiddle[step][0][s_idx] = twiddle_a[s][twiddle_idx][idx][0][0]; s_twiddle[step][1][s_idx] = twiddle_a[s][twiddle_idx][idx][0][1]; s_twiddle[step][1][s_idx + s_twiddle_stride] = twiddle_a[s][twiddle_idx][idx][1][0]; s_twiddle[step][0][s_idx + s_twiddle_stride] = twiddle_a[s][twiddle_idx][idx][1][1]; s_d_twiddle[step][0][s_idx] = 0; s_d_twiddle[step][1][s_idx] = 0; s_d_twiddle[step][1][s_idx + s_twiddle_stride] = 0; s_d_twiddle[step][0][s_idx + s_twiddle_stride] = 0; } input_reader.load_max5<items_per_thread, mult_per_warp>(input_val[0], batch_idx, input_idx, input_idx_stride); __syncthreads(); grad_reader.load_max5<items_per_thread, mult_per_warp>(grad_val, batch_idx, input_idx, input_idx_stride); b_untied_forward_backward_shared_twiddle<nsteps, increasing_stride, items_per_thread, mult_per_warp> (s_twiddle, s_d_twiddle, input_val, grad_val, t_idx); d_input_writer.save_max5<items_per_thread, mult_per_warp>(grad_val, batch_idx, input_idx, input_idx_stride); __syncthreads(); for (int t = threadIdx.x + threadIdx.y * blockDim.x; t < nsteps * (span / 2); t += blockDim.x * blockDim.y) { const int step = t / (span / 2); const int twiddle_idx = twiddle_idx_start + step; const int s_twiddle_stride = 1 << (increasing_stride ? step : nsteps - 1 - step); const int remainder = t % (span / 2); const int low_order_bits = remainder & (s_twiddle_stride - 1); const int s_idx = 2 * (remainder - low_order_bits) + low_order_bits; const int idx = (high_bits >> 1) | (remainder << input_idx_start_bit) | low_bits; atomicAdd(&d_twiddle_a[s][twiddle_idx][idx][0][0], s_d_twiddle[step][0][s_idx]); atomicAdd(&d_twiddle_a[s][twiddle_idx][idx][0][1], s_d_twiddle[step][1][s_idx]); atomicAdd(&d_twiddle_a[s][twiddle_idx][idx][1][0], s_d_twiddle[step][1][s_idx + s_twiddle_stride]); atomicAdd(&d_twiddle_a[s][twiddle_idx][idx][1][1], s_d_twiddle[step][0][s_idx + s_twiddle_stride]); } } void butterfly_multiply_untied_forward_backward_max5_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); auto stream = at::cuda::getCurrentCUDAStream(); const std::vector<int> bit_milestones = butterfly_max5_plan(log_n, nblocks, 8, increasing_stride); const int niters = bit_milestones.size() - 1; auto intermediate_storage = at::empty({niters - 1, batch_size, nstack, n}, at::dtype(input.dtype()).device(input.device())); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_max5_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); // Forward pass int twiddle_idx_start = 0; for (int iter = 0; iter < niters - 1; iter++) { const InputReader<scalar_t> input_reader(iter == 0 ? input : intermediate_storage[iter - 1]); OutputWriter<scalar_t> output_writer(intermediate_storage[iter]); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_FORWARD_BLOCK_SIZE / block_x; dim3 block(block_x, min(max_block_y, div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1]))); // grid.x must be at least n / span dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1] * block.y) * n_div_span, 1, nstack); switch (nsteps) { #define CASE_NSTEPS_FORWARD(nsteps_val) case nsteps_val: \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); break; MAP(CASE_NSTEPS_FORWARD, 1, 2, 3, 4, 5, 6, 7, 8) } twiddle_idx_start += nsteps; } // Backward pass twiddle_idx_start = log_n * (nblocks == 0 ? 1 : 2 * nblocks); for (int iter = niters - 1; iter >= 0; iter--) { const InputReader<scalar_t> input_reader(iter == 0 ? input : intermediate_storage[iter - 1]); const InputReader<scalar_t> grad_reader(iter == niters - 1 ? grad : d_input); OutputWriter<scalar_t> d_input_writer(d_input); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); twiddle_idx_start -= nsteps; const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_BACKWARD_BLOCK_SIZE / block_x; dim3 block(block_x, min(max_block_y, div_up(batch_size, ITEMS_PER_THREAD_BACKWARD_MAX5[nsteps - 1]))); // grid.x must be at least n / span dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_BACKWARD_MAX5[nsteps - 1] * block.y) * n_div_span, 1, nstack); switch (nsteps) { #define CASE_NSTEPS_BACKWARD(nsteps_val) case nsteps_val: \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<nsteps_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<nsteps_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); break; MAP(CASE_NSTEPS_BACKWARD, 1, 2, 3, 4, 5, 6, 7, 8) } } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_NSTEPS_FORWARD #undef CASE_NSTEPS_BACKWARD TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_backward_max5_fast_cuda failed with error code ", cudaGetLastError()); } template <int log_n, int items_per_thread=ITEMS_PER_THREAD_FORWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_FORWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_bbs_multiply_untied_forward_fast_cuda_kernel(const CudaAcsr<scalar_t, 4> twiddle_a, InputReader<scalar_t> input_reader, OutputWriter<scalar_t> output_writer, int batch_size, int nblocks) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { for (int block = 0; block < nblocks; block++) { b_untied_forward<min_const(log_n, 5), false, items_per_thread> (twiddle_a, input_val, block * 2 * log_n, input_idx_1); b_untied_forward<min_const(log_n, 5), true, items_per_thread> (twiddle_a, input_val, (block * 2 + 1) * log_n, input_idx_1); } } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); for (int block = 0; block < nblocks; block++) { b_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_a, input_val, block * 2 * log_n, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_a, input_val, block * 2 * log_n + nsteps_2, input_idx_1); b_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_a, input_val, (block * 2 + 1) * log_n, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_a, input_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2); } block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); } output_writer.save<items_per_thread, mult_per_warp>(input_val, input_idx_1); } void butterfly_bbs_multiply_untied_forward_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, at::Tensor &output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_bbs_multiply_untied_forward_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); OutputWriter<scalar_t> output_writer(output); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_FORWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ butterfly_bbs_multiply_untied_forward_fast_cuda_kernel<log_n_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, batch_size, nblocks); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_bbs_multiply_untied_forward_fast_cuda failed with error code ", cudaGetLastError()); } template <int log_n, int items_per_thread=ITEMS_PER_THREAD_BACKWARD[log_n - 1], int max_reg_storage_per_thread=items_per_thread, int min_blocks_per_mp=MIN_BLOCKS_PER_MP_BACKWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_bbs_multiply_untied_forward_backward_fast_cuda_kernel(const CudaAcsr<scalar_t, 4> twiddle_a, InputReader<scalar_t> input_reader, InputReader<scalar_t> grad_reader, IntermediateStorage<scalar_t> inter_storage, CudaAcsr<scalar_t, 4> d_twiddle_a, OutputWriter<scalar_t> d_input_writer, int batch_size, int nblocks) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int reg_storage_per_thread = min_const(max_reg_storage_per_thread, items_per_thread); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { for (int block = 0; block < nblocks; block++) { b_untied_forward<min_const(log_n, 5), false, items_per_thread> (twiddle_a, input_val, block * 2 * log_n, input_idx_1); if (block < nblocks - 1) { inter_storage.save<items_per_thread>(input_val, input_idx_1, block * 2); b_untied_forward<min_const(log_n, 5), true, items_per_thread> (twiddle_a, input_val, (block * 2 + 1) * log_n, input_idx_1); inter_storage.save<items_per_thread>(input_val, input_idx_1, block * 2 + 1); } } grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); for (int block = nblocks - 1; block >= 0; block--) { if (block < nblocks - 1) { inter_storage.load<items_per_thread>(input_val, input_idx_1, block * 2); } b_untied_forward_backward<min_const(log_n, 5), true, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n, input_idx_1); block == 0 ? input_reader.load<items_per_thread>(input_val, input_idx_1) : inter_storage.load<items_per_thread>(input_val, input_idx_1, (block - 1) * 2 + 1); b_untied_forward_backward<min_const(log_n, 5), false, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n, input_idx_1); } } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); for (int block = 0; block < nblocks; block++) { b_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_a, input_val, block * 2 * log_n, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); inter_storage.save<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4); b_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_a, input_val, block * 2 * log_n + nsteps_2, input_idx_1); inter_storage.save<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4 + 1); b_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_a, input_val, (block * 2 + 1) * log_n, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); if (block < nblocks - 1) { // We can store using input_idx_1 instead of input_idx_2 since we'll load using input_idx_1 as well inter_storage.save<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4 + 2); b_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_a, input_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2); inter_storage.save<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4 + 3); } } grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_2); for (int block = nblocks - 1; block >= 0; block--) { if (block < nblocks - 1) { inter_storage.load<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4 + 2); } b_untied_forward_backward<nsteps_2, true, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); inter_storage.load<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4 + 1); b_untied_forward_backward<nsteps_1, true, items_per_thread, mult_per_warp, reg_storage_per_thread> (twiddle_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n, input_idx_1); inter_storage.load<items_per_thread, mult_per_warp>(input_val, input_idx_1, block * 4); b_untied_forward_backward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n + nsteps_2, input_idx_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, threadIdx.x, thread_idx_2, nthreads); block == 0 ? input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_2) : inter_storage.load<items_per_thread, mult_per_warp>(input_val, input_idx_1, (block - 1) * 4 + 3); b_untied_forward_backward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n, input_idx_2); } block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); } d_input_writer.save<items_per_thread, mult_per_warp>(grad_val, input_idx_1); } void butterfly_bbs_multiply_untied_forward_backward_fast_cuda(const at::Tensor &twiddle, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_input) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); auto intermediate_storage = at::empty({log_n <= 5 ? (nblocks - 1) * 2 : (nblocks - 1) * 4 + 2, batch_size, nstack, n}, at::dtype(input.dtype()).device(input.device())); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_bbs_multiply_untied_forward_backward_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); const InputReader<scalar_t> grad_reader(grad); IntermediateStorage<scalar_t> inter_storage(intermediate_storage); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_BACKWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ butterfly_bbs_multiply_untied_forward_backward_fast_cuda_kernel<log_n_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_input_writer, batch_size, nblocks); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_bbs_multiply_untied_forward_backward_fast_cuda failed with error code ", cudaGetLastError()); } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, typename scalar_t> __device__ __forceinline__ void b_ortho_untied_forward(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, scalar_t input_val[mult_per_warp][items_per_thread], int twiddle_idx_start, int input_idx, int log_input_stride_start) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well #pragma unroll // TODO: for loop over mult first instead of step first, // will have to split into 2 parts: intra-thread and intra-warp. for (int step = 0; step < nsteps; step++) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int log_input_stride = log_input_stride_start + log_stride; int twiddle_idx = twiddle_idx_start + step; if (log_stride < 5) { int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { // TODO: make num thread per warp an input argument int idx = mult * warpSize + input_idx; int low_order_bits = idx & ((1 << log_input_stride) - 1); // int low_order_bits = idx % (1 << log_input_stride); // Bit manipulation to delete the bit at log_input_stride int index_access = ((idx >> (log_input_stride + 1)) << log_input_stride) + low_order_bits; bool odd = (idx >> log_input_stride) & 1U; scalar_t twiddle_val_mine = !odd ? twiddle_cos_a[s][twiddle_idx][index_access] : twiddle_sin_a[s][twiddle_idx][index_access]; scalar_t twiddle_val_other = __shfl_xor_sync(FULL_MASK, twiddle_val_mine, lane_mask); const scalar_t twiddle_val[2] = {!odd ? twiddle_val_mine : twiddle_val_other, !odd ? -twiddle_val_other : twiddle_val_mine}; // if (not odd) { // twiddle_val[0] = -twiddle_cos_a[s][twiddle_idx][index_access]; // twiddle_val[1] = -twiddle_sin_a[s][twiddle_idx][index_access]; // } // scalar_t twiddle_val_exch[2] = {__shfl_xor_sync(FULL_MASK, twiddle_val[0], lane_mask), // __shfl_xor_sync(FULL_MASK, twiddle_val[1], lane_mask)}; // if (odd) { // twiddle_val[0] = twiddle_val_exch[0]; // twiddle_val[1] = -twiddle_val_exch[1]; // } #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t input_val_other = __shfl_xor_sync(FULL_MASK, input_val[mult][item], lane_mask); input_val[mult][item] = twiddle_val[0] * input_val[mult][item] + twiddle_val[1] * input_val_other; } } } else { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; int idx = mult * warpSize + input_idx; low_order_bits = idx & ((1 << log_input_stride) - 1); // int low_order_bits = idx % (1 << log_input_stride); // int index_access = ((idx & ~(1U << log_input_stride)) - low_order_bits) / 2 + low_order_bits; int index_access = ((idx >> (log_input_stride + 1)) << log_input_stride) + low_order_bits; const scalar_t twiddle_val[2] = {twiddle_cos_a[s][twiddle_idx][index_access], twiddle_sin_a[s][twiddle_idx][index_access]}; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t inputs[2] = {input_val[mult][item], input_val[mult + mult_stride][item]}; input_val[mult][item] = twiddle_val[0] * inputs[0] - twiddle_val[1] * inputs[1]; input_val[mult + mult_stride][item] = twiddle_val[1] * inputs[0] + twiddle_val[0] * inputs[1]; } } } } } template <int log_n, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_ORTHO_FORWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_ORTHO_FORWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_ortho_multiply_untied_forward_fast_cuda_kernel(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, InputReader<scalar_t> input_reader, OutputWriter<scalar_t> output_writer, int batch_size) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { b_ortho_untied_forward<min_const(log_n, 5), increasing_stride, items_per_thread> (twiddle_cos_a, twiddle_sin_a, input_val, 0, input_idx_1, 0); } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); if (increasing_stride) { b_ortho_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, 0, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, nsteps_1, input_idx_2, nsteps_1); // Don't need __syncthreads() before block_exchange because threads are writing to the same indices. block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); } else { block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, 0, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, nsteps_2, input_idx_1, 0); } } output_writer.save<items_per_thread, mult_per_warp>(input_val, input_idx_1); } void butterfly_ortho_multiply_untied_forward_fast_cuda(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &input, at::Tensor &output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_untied_forward_fast_cuda", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); OutputWriter<scalar_t> output_writer(output); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_ORTHO_FORWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ increasing_stride ? butterfly_ortho_multiply_untied_forward_fast_cuda_kernel<log_n_val, true> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, input_reader, output_writer, batch_size) \ : butterfly_ortho_multiply_untied_forward_fast_cuda_kernel<log_n_val, false> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, input_reader, output_writer, batch_size); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_untied_forward_fast_cuda failed with error code ", cudaGetLastError()); } template <int nsteps, bool increasing_stride, int items_per_thread, int mult_per_warp=1, typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> __device__ __forceinline__ void b_ortho_untied_backward(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, CudaAcsr<scalar_t, 3> d_twiddle_a, scalar_t output_val[mult_per_warp][items_per_thread], scalar_t grad_val[mult_per_warp][items_per_thread], int twiddle_idx_start, int input_idx, int log_input_stride_start) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well #pragma unroll for (int step = nsteps - 1; step >= 0; step--) { int log_stride = increasing_stride ? step : nsteps - 1 - step; int log_input_stride = log_input_stride_start + log_stride; int twiddle_idx = step + twiddle_idx_start; if (log_stride < 5) { int lane_mask = 1 << log_stride; #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { int idx = mult * warpSize + input_idx; int low_order_bits = idx & ((1 << log_input_stride) - 1); // int low_order_bits = idx % (1 << log_input_stride); // Bit manipulation to delete the bit at log_input_stride int index_access = ((idx >> (log_input_stride + 1)) << log_input_stride) + low_order_bits; bool odd = (idx >> log_input_stride) & 1U; scalar_t twiddle_val_mine = !odd ? twiddle_cos_a[s][twiddle_idx][index_access] : twiddle_sin_a[s][twiddle_idx][index_access]; scalar_t twiddle_val_other = __shfl_xor_sync(FULL_MASK, twiddle_val_mine, lane_mask); const scalar_t twiddle_val[2] = {!odd ? twiddle_val_mine : twiddle_val_other, !odd ? twiddle_val_other : -twiddle_val_mine}; accscalar_t d_twiddle_val = 0; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t output_val_other = __shfl_xor_sync(FULL_MASK, output_val[mult][item], lane_mask); output_val[mult][item] = twiddle_val[0] * output_val[mult][item] + twiddle_val[1] * output_val_other; scalar_t grad_val_other = __shfl_xor_sync(FULL_MASK, grad_val[mult][item], lane_mask); output_val_other = __shfl_xor_sync(FULL_MASK, output_val[mult][item], lane_mask); if (!odd) { d_twiddle_val += (grad_val[mult][item] * output_val[mult][item] + grad_val_other * output_val_other) * (-twiddle_val[1]) + (-grad_val[mult][item] * output_val_other + grad_val_other * output_val[mult][item]) * twiddle_val[0]; } grad_val[mult][item] = twiddle_val[0] * grad_val[mult][item] + twiddle_val[1] * grad_val_other; } if (!odd) { atomicAdd(&d_twiddle_a[s][twiddle_idx][index_access], d_twiddle_val); } } } else { int mult_stride = 1 << (log_stride - 5); #pragma unroll for (int m = 0; m < mult_per_warp / 2; m++) { int low_order_bits = m & (mult_stride - 1); // int low_order_bits = m % mult_stride; int mult = 2 * (m - low_order_bits) + low_order_bits; int idx = mult * warpSize + input_idx; low_order_bits = idx & ((1 << log_input_stride) - 1); // int low_order_bits = idx % (1 << log_input_stride); int index_access = ((idx >> (log_input_stride + 1)) << log_input_stride) + low_order_bits; const scalar_t twiddle_val[2] = {twiddle_cos_a[s][twiddle_idx][index_access], twiddle_sin_a[s][twiddle_idx][index_access]}; accscalar_t d_twiddle_val = 0; #pragma unroll for (int item = 0; item < items_per_thread; item++) { scalar_t outputs[2] = {output_val[mult][item], output_val[mult + mult_stride][item]}; output_val[mult][item] = twiddle_val[0] * outputs[0] + twiddle_val[1] * outputs[1]; output_val[mult + mult_stride][item] = -twiddle_val[1] * outputs[0] + twiddle_val[0] * outputs[1]; scalar_t grads[2] = {grad_val[mult][item], grad_val[mult + mult_stride][item]}; d_twiddle_val += (grads[0] * output_val[mult][item] + grads[1] * output_val[mult + mult_stride][item]) * (-twiddle_val[1]) + (-grads[0] * output_val[mult + mult_stride][item] + grads[1] * output_val[mult][item]) * twiddle_val[0]; grad_val[mult][item] = twiddle_val[0] * grads[0] + twiddle_val[1] * grads[1]; grad_val[mult + mult_stride][item] = -twiddle_val[1] * grads[0] + twiddle_val[0] * grads[1]; } atomicAdd(&d_twiddle_a[s][twiddle_idx][index_access], d_twiddle_val); } } } } template <int log_n, bool increasing_stride, int items_per_thread=ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_ORTHO_BACKWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_ortho_multiply_untied_backward_fast_cuda_kernel(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, InputReader<scalar_t> output_reader, InputReader<scalar_t> grad_reader, CudaAcsr<scalar_t, 3> d_twiddle_a, OutputWriter<scalar_t> d_input_writer, int batch_size) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t output_val[mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); output_reader.load<items_per_thread, mult_per_warp>(output_val, input_idx_1); if (log_n <= 5) { grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); b_ortho_untied_backward<min_const(log_n, 5), increasing_stride, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, 0, input_idx_1, 0); } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); if (increasing_stride) { block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, threadIdx.x, thread_idx_2, nthreads); grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_2); b_ortho_untied_backward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, nsteps_1, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_backward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, 0, input_idx_1, 0); } else { grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); b_ortho_untied_backward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, nsteps_2, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, threadIdx.x, thread_idx_2, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_backward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, 0, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); } } d_input_writer.save<items_per_thread, mult_per_warp>(grad_val, input_idx_1); } void butterfly_ortho_multiply_untied_backward_fast_cuda(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &output, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_untied_backward_fast_cuda", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> output_reader(output); const InputReader<scalar_t> grad_reader(grad); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ increasing_stride ? butterfly_ortho_multiply_untied_backward_fast_cuda_kernel<log_n_val, true> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, output_reader, grad_reader, d_twiddle_a, d_input_writer, batch_size) \ : butterfly_ortho_multiply_untied_backward_fast_cuda_kernel<log_n_val, false> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, output_reader, grad_reader, d_twiddle_a, d_input_writer, batch_size); break; // MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_untied_backward_fast_cuda failed with error code ", cudaGetLastError()); } template <int items_per_thread, int mult_per_warp=1, typename scalar_t> __device__ __forceinline__ void diag_forward(const CudaAcsr<scalar_t, 3> diagonal_a, scalar_t input_val[mult_per_warp][items_per_thread], int diagonal_idx, int input_idx) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t diag_val = diagonal_a[s][diagonal_idx][mult * warpSize + input_idx]; #pragma unroll for (int item = 0; item < items_per_thread; item++) { input_val[mult][item] *= diag_val; } } } template <int log_n, int items_per_thread=ITEMS_PER_THREAD_ORTHO_FORWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_ORTHO_FORWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> // C10_LAUNCH_BOUNDS_2 supposedly takes min(1 << log_n, 1024) // https://github.com/pytorch/pytorch/blob/v1.1.0/c10/macros/Macros.h // However, it doesn't seem to work correctly so I have to take min explicitly. C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_odo_multiply_untied_forward_fast_cuda_kernel(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, const CudaAcsr<scalar_t, 3> diagonal_a, InputReader<scalar_t> input_reader, OutputWriter<scalar_t> output_writer, int batch_size, int nblocks) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { for (int block = 0; block < nblocks; block++) { b_ortho_untied_forward<min_const(log_n, 5), false, items_per_thread> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n, input_idx_1, 0); diag_forward<items_per_thread>(diagonal_a, input_val, block, input_idx_1); b_ortho_untied_forward<min_const(log_n, 5), true, items_per_thread> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n, input_idx_1, 0); } } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); for (int block = 0; block < nblocks; block++) { b_ortho_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n + nsteps_2, input_idx_1, 0); diag_forward<items_per_thread, mult_per_warp>(diagonal_a, input_val, block, input_idx_1); b_ortho_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2, nsteps_1); } block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); } output_writer.save<items_per_thread, mult_per_warp>(input_val, input_idx_1); } void butterfly_odo_multiply_untied_forward_fast_cuda(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &input, at::Tensor &output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_odo_multiply_untied_forward_fast_cuda", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); OutputWriter<scalar_t> output_writer(output); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_ORTHO_FORWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<log_n_val> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); break; MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_forward_fast_cuda failed with error code ", cudaGetLastError()); } template <int items_per_thread, int mult_per_warp=1, typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> __device__ __forceinline__ void diag_backward(const CudaAcsr<scalar_t, 3> diagonal_a, CudaAcsr<scalar_t, 3> d_diagonal_a, scalar_t output_val[mult_per_warp][items_per_thread], scalar_t grad_val[mult_per_warp][items_per_thread], int diagonal_idx, int input_idx) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t diag_val = diagonal_a[s][diagonal_idx][mult * warpSize + input_idx]; accscalar_t d_diag_val = 0; #pragma unroll for (int item = 0; item < items_per_thread; item++) { output_val[mult][item] /= diag_val; d_diag_val += output_val[mult][item] * grad_val[mult][item]; grad_val[mult][item] *= diag_val; } atomicAdd(&d_diagonal_a[s][diagonal_idx][mult * warpSize + input_idx], d_diag_val); } } template <int log_n, int items_per_thread=ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_ORTHO_BACKWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_odo_multiply_untied_backward_fast_cuda_kernel(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, const CudaAcsr<scalar_t, 3> diagonal_a, InputReader<scalar_t> output_reader, InputReader<scalar_t> grad_reader, CudaAcsr<scalar_t, 3> d_twiddle_a, CudaAcsr<scalar_t, 3> d_diagonal_a, OutputWriter<scalar_t> d_input_writer, int batch_size, int nblocks) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t output_val[mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); output_reader.load<items_per_thread, mult_per_warp>(output_val, input_idx_1); if (log_n <= 5) { grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); for (int block = nblocks - 1; block >= 0; block--) { b_ortho_untied_backward<min_const(log_n, 5), true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, (block * 2 + 1) * log_n, input_idx_1, 0); diag_backward<items_per_thread>(diagonal_a, d_diagonal_a, output_val, grad_val, block, input_idx_1); b_ortho_untied_backward<min_const(log_n, 5), false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, block * 2 * log_n, input_idx_1, 0); } } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, threadIdx.x, thread_idx_2, nthreads); grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_2); for (int block = nblocks - 1; block >= 0; block--) { b_ortho_untied_backward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_backward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, (block * 2 + 1) * log_n, input_idx_1, 0); diag_backward<items_per_thread, mult_per_warp>(diagonal_a, d_diagonal_a, output_val, grad_val, block, input_idx_1); b_ortho_untied_backward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, block * 2 * log_n + nsteps_2, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, threadIdx.x, thread_idx_2, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, output_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_backward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, output_val, grad_val, block * 2 * log_n, input_idx_2, nsteps_1); } block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); } d_input_writer.save<items_per_thread, mult_per_warp>(grad_val, input_idx_1); } void butterfly_odo_multiply_untied_backward_fast_cuda(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &output, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_diagonal, at::Tensor& d_input) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_odo_multiply_untied_backward_fast_cuda", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> output_reader(output); const InputReader<scalar_t> grad_reader(grad); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_diagonal_a = d_diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<log_n_val> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); break; // MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_backward_fast_cuda failed with error code ", cudaGetLastError()); } template <int items_per_thread, int mult_per_warp=1, typename scalar_t, typename accscalar_t=at::acc_type<scalar_t, true>> __device__ __forceinline__ void diag_backward_with_input(const CudaAcsr<scalar_t, 3> diagonal_a, CudaAcsr<scalar_t, 3> d_diagonal_a, scalar_t input_val[mult_per_warp][items_per_thread], scalar_t grad_val[mult_per_warp][items_per_thread], int diagonal_idx, int input_idx) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well #pragma unroll for (int mult = 0; mult < mult_per_warp; mult++) { const scalar_t diag_val = diagonal_a[s][diagonal_idx][mult * warpSize + input_idx]; accscalar_t d_diag_val = 0; #pragma unroll for (int item = 0; item < items_per_thread; item++) { d_diag_val += input_val[mult][item] * grad_val[mult][item]; grad_val[mult][item] *= diag_val; } atomicAdd(&d_diagonal_a[s][diagonal_idx][mult * warpSize + input_idx], d_diag_val); } } template <int log_n, int items_per_thread=ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1], int min_blocks_per_mp=MIN_BLOCKS_PER_MP_ORTHO_BACKWARD[log_n - 1], int max_smem_per_thread=items_per_thread, typename scalar_t> C10_LAUNCH_BOUNDS_2(MIN_MACRO(1 << log_n, MAX_BLOCK_SIZE), min_blocks_per_mp) __global__ void butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel(const CudaAcsr<scalar_t, 3> twiddle_cos_a, const CudaAcsr<scalar_t, 3> twiddle_sin_a, const CudaAcsr<scalar_t, 3> diagonal_a, InputReader<scalar_t> input_reader, InputReader<scalar_t> grad_reader, IntermediateStorage<scalar_t> inter_storage, CudaAcsr<scalar_t, 3> d_twiddle_a, CudaAcsr<scalar_t, 3> d_diagonal_a, OutputWriter<scalar_t> d_input_writer, int batch_size, int nblocks) { constexpr int n = 1 << log_n; constexpr int nthreads = min_const(n, MAX_BLOCK_SIZE); constexpr int smem_limit = min_const(SMEM_PER_MP / min_blocks_per_mp, MAX_SMEM_PER_BLOCK); constexpr int smem_per_thread = min_const(max_smem_per_thread, items_per_thread, smem_limit / (nthreads * sizeof(scalar_t))); constexpr int mult_per_warp = n / nthreads; scalar_t input_val[mult_per_warp][items_per_thread]; scalar_t grad_val[mult_per_warp][items_per_thread]; // const int input_idx_1 = (threadIdx.x % warpSize) + mult_per_warp * warpSize * (threadIdx.x / warpSize); const int input_idx_1 = (threadIdx.x & ((1 << 5) - 1)) + mult_per_warp * warpSize * (threadIdx.x >> 5); input_reader.load<items_per_thread, mult_per_warp>(input_val, input_idx_1); if (log_n <= 5) { for (int block = 0; block < nblocks; block++) { b_ortho_untied_forward<min_const(log_n, 5), false, items_per_thread> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n, input_idx_1, 0); inter_storage.save<items_per_thread>(input_val, input_idx_1, block); diag_forward<items_per_thread>(diagonal_a, input_val, block, input_idx_1); b_ortho_untied_forward<min_const(log_n, 5), true, items_per_thread> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n, input_idx_1, 0); } grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_1); for (int block = nblocks - 1; block >= 0; block--) { b_ortho_untied_backward<min_const(log_n, 5), true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n, input_idx_1, 0); inter_storage.load<items_per_thread>(input_val, input_idx_1, block); diag_backward_with_input<items_per_thread>(diagonal_a, d_diagonal_a, input_val, grad_val, block, input_idx_1); b_ortho_untied_backward<min_const(log_n, 5), false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n, input_idx_1, 0); } } else { __shared__ scalar_t temp_storage[nthreads * smem_per_thread]; // constexpr int nsteps_1 = div_up_const(log_n, 2); constexpr int nsteps_1 = log_n <= 10 ? 5 : log_n - 5; constexpr int nsteps_2 = max_const(log_n - nsteps_1, 1); // Take max to avoid compiler's warning constexpr int log_nwarps = min_const(max_const(log_n - 5, 1), 5); // Take max to avoid compiler's warning const int input_idx_2 = ((threadIdx.x & ((1 << log_nwarps) - 1)) << nsteps_1) + (threadIdx.x >> log_nwarps); const int thread_idx_2 = (threadIdx.x & ((1 << log_nwarps) - 1)) * warpSize + (threadIdx.x >> log_nwarps); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); for (int block = 0; block < nblocks; block++) { b_ortho_untied_forward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_forward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, block * 2 * log_n + nsteps_2, input_idx_1, 0); inter_storage.save<items_per_thread, mult_per_warp>(input_val, input_idx_1, block); diag_forward<items_per_thread, mult_per_warp>(diagonal_a, input_val, block, input_idx_1); b_ortho_untied_forward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_forward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, input_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2, nsteps_1); } grad_reader.load<items_per_thread, mult_per_warp>(grad_val, input_idx_2); for (int block = nblocks - 1; block >= 0; block--) { b_ortho_untied_backward<nsteps_2, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n + nsteps_1, input_idx_2, nsteps_1); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, thread_idx_2, threadIdx.x, nthreads); b_ortho_untied_backward<nsteps_1, true, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, (block * 2 + 1) * log_n, input_idx_1, 0); inter_storage.load<items_per_thread, mult_per_warp>(input_val, input_idx_1, block); diag_backward_with_input<items_per_thread, mult_per_warp>(diagonal_a, d_diagonal_a, input_val, grad_val, block, input_idx_1); b_ortho_untied_backward<nsteps_1, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n + nsteps_2, input_idx_1, 0); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, threadIdx.x, thread_idx_2, nthreads); __syncthreads(); block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, input_val, threadIdx.x, thread_idx_2, nthreads); b_ortho_untied_backward<nsteps_2, false, items_per_thread, mult_per_warp> (twiddle_cos_a, twiddle_sin_a, d_twiddle_a, input_val, grad_val, block * 2 * log_n, input_idx_2, nsteps_1); } block_exchange<items_per_thread, mult_per_warp, smem_per_thread>(temp_storage, grad_val, thread_idx_2, threadIdx.x, nthreads); } d_input_writer.save<items_per_thread, mult_per_warp>(grad_val, input_idx_1); } void butterfly_odo_multiply_untied_forward_backward_fast_cuda(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_diagonal, at::Tensor& d_input) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); auto intermediate_storage = at::empty({nblocks, batch_size, nstack, n}, at::dtype(input.dtype()).device(input.device())); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_odo_multiply_untied_forward_backward_fast_cuda", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); const InputReader<scalar_t> grad_reader(grad); IntermediateStorage<scalar_t> inter_storage(intermediate_storage); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_diagonal_a = d_diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_ORTHO_BACKWARD[log_n - 1]), 1, nstack); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { #define CASE_LOG_N(log_n_val) case log_n_val: \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<log_n_val> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); break; // MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) MAP(CASE_LOG_N, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_LOG_N TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_forward_backward_fast_cuda failed with error code ", cudaGetLastError()); } #if BFLY_BENCHMARK void butterfly_odo_multiply_untied_forward_fast_cuda_benchmark(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &input, at::Tensor &output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_odo_multiply_untied_forward_fast_cuda_benchmark", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); OutputWriter<scalar_t> output_writer(output); dim3 block(min(n, MAX_BLOCK_SIZE)); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { case 9: #define CASE_IPT_9(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<9, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<9, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ } while (0); // MAP(CASE_IPT_9, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 16); // MAP(CASE_IPT_9, 1, 2, 4, 6, 8, 12, 16); MAP(CASE_IPT_9, 6, 8, 12, 16); break; case 10: #define CASE_IPT_10(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<10, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<10, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ } while (0); // MAP(CASE_IPT_10, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 16); // MAP(CASE_IPT_10, 1, 2, 4, 6, 8, 12, 16); MAP(CASE_IPT_10, 6, 8, 12, 16); break; case 11: #define CASE_IPT_11(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<11, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_fast_cuda_kernel<11, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, output_writer, batch_size, nblocks); \ } while (0); // MAP(CASE_IPT_11, 1, 2, 4, 6, 8, 12, 16); MAP(CASE_IPT_11, 6, 8, 12, 16); break; } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_IPT_9 #undef CASE_IPT_10 #undef CASE_IPT_11 TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_forward_fast_cuda_benchmark failed with error code ", cudaGetLastError()); } void butterfly_odo_multiply_untied_backward_fast_cuda_benchmark(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &output, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_diagonal, at::Tensor& d_input) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_odo_multiply_untied_backward_fast_cuda_benchmark", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> output_reader(output); const InputReader<scalar_t> grad_reader(grad); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_diagonal_a = d_diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { case 9: #define CASE_IPT_9(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<9, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<9, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<9, items_per_thread_val, 3> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<9, items_per_thread_val, 4> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ } while(0); // MAP(CASE_IPT_9, 1, 2, 4, 6, 8, 12, 16, 24); break; case 10: #define CASE_IPT_10(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<10, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<10, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<10, items_per_thread_val, 3> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_backward_fast_cuda_kernel<10, items_per_thread_val, 4> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, output_reader, grad_reader, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ } while(0); // MAP(CASE_IPT_10, 1, 2, 4, 6, 8, 12, 16, 24); break; } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_IPT_9 #undef CASE_IPT_10 TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_backward_fast_cuda_benchmark failed with error code ", cudaGetLastError()); } void butterfly_odo_multiply_untied_forward_backward_fast_cuda_benchmark(const at::Tensor &twiddle_cos, const at::Tensor &twiddle_sin, const at::Tensor &diagonal, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_diagonal, at::Tensor& d_input) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle_cos.size(1) / (2 * log_n); auto intermediate_storage = at::empty({nblocks, batch_size, nstack, n}, at::dtype(input.dtype()).device(input.device())); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_odo_multiply_untied_forward_backward_fast_cuda_benchmark", [&] { const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto diagonal_a = diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const InputReader<scalar_t> input_reader(input); const InputReader<scalar_t> grad_reader(grad); IntermediateStorage<scalar_t> inter_storage(intermediate_storage); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_diagonal_a = d_diagonal.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); OutputWriter<scalar_t> d_input_writer(d_input); dim3 block(min(n, MAX_BLOCK_SIZE)); auto stream = at::cuda::getCurrentCUDAStream(); switch (log_n) { case 9: #define CASE_IPT_9(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<9, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<9, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ } while(0); // MAP(CASE_IPT_9, 1, 2, 4, 6, 8, 12, 16, 24); MAP(CASE_IPT_9, 6, 8, 12, 16); break; case 10: #define CASE_IPT_10(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<10, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<10, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ } while(0); // MAP(CASE_IPT_10, 1, 2, 4, 6, 8, 12, 16, 24); MAP(CASE_IPT_9, 6, 8, 12, 16); break; case 11: #define CASE_IPT_11(items_per_thread_val) do { \ dim3 grid(div_up(batch_size, items_per_thread_val), 1, nstack); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<11, items_per_thread_val, 1> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ butterfly_odo_multiply_untied_forward_backward_fast_cuda_kernel<11, items_per_thread_val, 2> \ <<<grid, block, 0, stream>>>(twiddle_cos_a, twiddle_sin_a, diagonal_a, input_reader, grad_reader, inter_storage, d_twiddle_a, d_diagonal_a, d_input_writer, batch_size, nblocks); \ } while(0); // MAP(CASE_IPT_11, 1, 2, 4, 6, 8, 12, 16, 24); MAP(CASE_IPT_9, 6, 8, 12, 16); break; } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_IPT_9 #undef CASE_IPT_10 #undef CASE_IPT_11 TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_odo_multiply_untied_forward_backward_fast_cuda_benchmark failed with error code ", cudaGetLastError()); } #endif // BFLY_BENCHMARK #if BFLY_MAX5_BENCHMARK void butterfly_multiply_untied_forward_max5_fast_cuda_benchmark(const at::Tensor &twiddle, const at::Tensor &input, at::Tensor &output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); auto stream = at::cuda::getCurrentCUDAStream(); const std::vector<int> bit_milestones = butterfly_max5_plan(log_n, nblocks, 8, increasing_stride); const int niters = bit_milestones.size() - 1; AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_max5_fast_cuda_benchmark", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); int twiddle_idx_start = 0; for (int iter = 0; iter < niters; iter++) { const InputReader<scalar_t> input_reader(iter == 0 ? input : output); OutputWriter<scalar_t> output_writer(output); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_FORWARD_BLOCK_SIZE / block_x; switch (nsteps) { case 1: #define CASE_IPT_1(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<1, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<1, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_1, 1, 2, 4, 6, 8, 12, 16) break; case 2: #define CASE_IPT_2(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<2, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<2, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_2, 1, 2, 4, 6, 8, 12, 16) break; case 3: #define CASE_IPT_3(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<3, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<3, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_3, 1, 2, 4, 6, 8, 12, 16) break; case 4: #define CASE_IPT_4(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<4, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<4, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_4, 1, 2, 4, 6, 8, 12, 16) break; case 5: #define CASE_IPT_5(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<5, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<5, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_5, 1, 2, 4, 6, 8, 12, 16) break; case 6: #define CASE_IPT_6(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<6, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<6, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_6, 1, 2, 4, 6, 8, 12, 16) break; case 7: #define CASE_IPT_7(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<7, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<7, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_7, 1, 2, 4, 6, 8, 12, 16) break; case 8: #define CASE_IPT_8(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<8, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<8, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_8, 1, 2, 4, 6, 8, 12, 16) break; } twiddle_idx_start += nsteps; } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_IPT_1 #undef CASE_IPT_2 #undef CASE_IPT_3 #undef CASE_IPT_4 #undef CASE_IPT_5 #undef CASE_IPT_6 #undef CASE_IPT_7 #undef CASE_IPT_8 TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_max5_fast_cuda_benchmark failed with error code ", cudaGetLastError()); } void butterfly_multiply_untied_forward_backward_max5_fast_cuda_benchmark(const at::Tensor &twiddle, const at::Tensor &input, const at::Tensor &grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); const int nblocks = twiddle.size(1) / (2 * log_n); auto stream = at::cuda::getCurrentCUDAStream(); const std::vector<int> bit_milestones = butterfly_max5_plan(log_n, nblocks, 8, increasing_stride); const int niters = bit_milestones.size() - 1; auto intermediate_storage = at::empty({niters - 1, batch_size, nstack, n}, at::dtype(input.dtype()).device(input.device())); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_max5_fast_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); // Forward pass int twiddle_idx_start = 0; for (int iter = 0; iter < niters - 1; iter++) { const InputReader<scalar_t> input_reader(iter == 0 ? input : intermediate_storage[iter - 1]); OutputWriter<scalar_t> output_writer(intermediate_storage[iter]); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_FORWARD_BLOCK_SIZE / block_x; dim3 block(block_x, min(max_block_y, div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1]))); // grid.x must be at least n / span dim3 grid(div_up(batch_size, ITEMS_PER_THREAD_FORWARD_MAX5[nsteps - 1] * block.y) * n_div_span, 1, nstack); switch (nsteps) { #define CASE_NSTEPS_FORWARD(nsteps_val) case nsteps_val: \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, true> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_max5_fast_cuda_kernel<nsteps_val, false> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, output_writer, log_n, twiddle_idx_start, start_bit); break; MAP(CASE_NSTEPS_FORWARD, 1, 2, 3, 4, 5, 6, 7) } twiddle_idx_start += nsteps; } // Backward pass twiddle_idx_start = log_n * (nblocks == 0 ? 1 : 2 * nblocks); for (int iter = niters - 1; iter >= 0; iter--) { const InputReader<scalar_t> input_reader(iter == 0 ? input : intermediate_storage[iter - 1]); const InputReader<scalar_t> grad_reader(iter == niters - 1 ? grad : d_input); OutputWriter<scalar_t> d_input_writer(d_input); const bool increasing_stride_this_iter = bit_milestones[iter] <= bit_milestones[iter + 1]; const int start_bit = increasing_stride_this_iter ? bit_milestones[iter] : bit_milestones[iter + 1]; const int nsteps = abs(bit_milestones[iter + 1] - bit_milestones[iter]); twiddle_idx_start -= nsteps; const int span = 1 << nsteps; const int n_div_span = 1 << (log_n - nsteps); // = n / span const int block_x = min(span, WARP_SIZE); const int max_block_y = MAX5_BACKWARD_BLOCK_SIZE / block_x; switch (nsteps) { case 1: #define CASE_IPT_1(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<1, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<1, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_1, 1, 2, 4, 6, 8, 12, 16) break; case 2: #define CASE_IPT_2(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<2, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<2, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_2, 1, 2, 4, 6, 8, 12, 16) break; case 3: #define CASE_IPT_3(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<3, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<3, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_3, 1, 2, 4, 6, 8, 12, 16) break; case 4: #define CASE_IPT_4(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<4, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<4, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_4, 1, 2, 4, 6, 8, 12, 16) break; case 5: #define CASE_IPT_5(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<5, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<5, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_5, 1, 2, 4, 6, 8, 12, 16) break; case 6: #define CASE_IPT_6(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<6, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<6, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_6, 1, 2, 4, 6, 8, 12, 16) break; case 7: #define CASE_IPT_7(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<7, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<7, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_7, 1, 2, 4, 6, 8, 12, 16) break; case 8: #define CASE_IPT_8(items_per_thread_val) do { \ dim3 block(block_x, min(max_block_y, div_up(batch_size, items_per_thread_val))); \ dim3 grid(div_up(batch_size, items_per_thread_val * block.y) * n_div_span, 1, nstack); \ increasing_stride_this_iter ? butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<8, true, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit) \ : butterfly_multiply_untied_forward_backward_max5_fast_cuda_kernel<8, false, items_per_thread_val> \ <<<grid, block, 0, stream>>>(twiddle_a, input_reader, grad_reader, d_twiddle_a, d_input_writer, \ log_n, twiddle_idx_start, start_bit); \ } while(0); MAP(CASE_IPT_8, 1, 2, 3, 4, 6, 8) break; } } }); // Have to keep this #undef outside the AT_DISPATCH_FLOATING_TYPES macro for it to work #undef CASE_NSTEPS_FORWARD #undef CASE_IPT_1 #undef CASE_IPT_2 #undef CASE_IPT_3 #undef CASE_IPT_4 #undef CASE_IPT_5 #undef CASE_IPT_6 #undef CASE_IPT_7 #undef CASE_IPT_8 TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_backward_max5_fast_cuda_benchmark failed with error code ", cudaGetLastError()); } #endif // BFLY_MAX5_BENCHMARK
the_stack
namespace dart { inline __host__ __device__ unsigned char clamp(int c) { return min(max(0,c),255); } // h: 0-360 // s: 0 - 1 // v: 0 - 1 inline __host__ __device__ uchar3 hsv2rgb(float h, float s, float v) { float c = v*s; float hPrime = h/60.0f; float x = c*(1 - fabs(fmodf(hPrime,2) - 1)); float m = v-c; int hPrimeInt = hPrime; switch (hPrimeInt) { case 0: return make_uchar3(255*(c+m),255*(x+m),255*(m)); case 1: return make_uchar3(255*(x+m),255*(c+m),255*(m)); case 2: return make_uchar3(255*(m),255*(c+m),255*(x+m)); case 3: return make_uchar3(255*(m),255*(x+m),255*(c+m)); case 4: return make_uchar3(255*(x+m),255*(m),255*(c+m)); case 5: return make_uchar3(255*(c+m),255*(m),255*(x+m)); } return make_uchar3(0,0,0); } // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <ColorRamp R> __global__ void gpu_visualizeModelSdfPlane(uchar3 * img, const int width, const int height, const float2 origin, const float2 size, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } uchar3 & imgVal = img[x + y*width]; float4 pc = make_float4(origin.x + x/(float)width*size.x, origin.y + y/(float)height*size.y, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); minSdfVal = min(minSdfVal,sdfVal); } if (minSdfVal > 1e19) { imgVal = make_uchar3(0,0,0); } else { // printf("%f\n",minSdfVal); float normVal = (minSdfVal-minVal)/(maxVal - minVal); switch (R) { case ColorRampGrayscale: imgVal = make_uchar3(clamp(255*normVal),clamp(255*normVal),clamp(255*normVal)); break; case ColorRampHeatMap: // if (normVal < 0.25) { imgVal = make_uchar3(0,clamp(255*(normVal/0.25)),255); } // else if (normVal < 0.5) { imgVal = make_uchar3(0,255,clamp(255*((0.5-normVal)/0.25))); } // else if (normVal < 0.75) { imgVal = make_uchar3(clamp(255*((normVal - 0.5)/0.25)),255,0); } // else { imgVal = make_uchar3(255,clamp(255*(1.0-normVal)/0.25),0); } imgVal = hsv2rgb(240*max(0.0f,min(1.0f-normVal,1.0f)),0.5,0.8); // imgVal.x = clamp(imgVal.x); // imgVal.y = clamp(imgVal.y); // imgVal.z = clamp(imgVal.z); break; case ColorRampRedGreen: // if (normVal < 0.5) { // imgVal = make_uchar3(clamp(200*(1-2*normVal)), // clamp(30*(1-2*normVal)), // clamp(30*(1-2*normVal))); // } // else { // imgVal = make_uchar3(clamp(30*(2*normVal - 1)), // clamp(200*(2*normVal - 1)), // clamp(30*(2*normVal - 1))); // } // if (normVal < 0.2) { // imgVal = make_uchar3(255, //0,0); // clamp(255*normVal/0.2), // clamp(255*normVal/0.2)); // } // else { // imgVal = make_uchar3(clamp(255*(1.0-normVal)/0.8), // 255, // clamp(255*(1.0-normVal/0.8))); // } if (normVal < 0.2) { imgVal = make_uchar3(clamp(255*(0.2-normVal)/0.2),0,0); } else { imgVal = make_uchar3(0,clamp(255*(normVal-0.2)/0.8),0); } break; } } } template <ColorRamp R> __global__ void gpu_visualizeModelSdfPlaneProjective(uchar3 * img, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength, const float minVal, const float maxVal) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } uchar3 & imgVal = img[x + y*width]; float4 pc = make_float4((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); minSdfVal = min(minSdfVal,sdfVal); } if (minSdfVal == 1e20) { imgVal = make_uchar3(0,0,0); } else { float normVal = (minSdfVal-minVal)/(maxVal - minVal); switch (R) { case ColorRampGrayscale: imgVal = make_uchar3(clamp(255*normVal),clamp(255*normVal),clamp(255*normVal)); break; case ColorRampHeatMap: if (normVal < 0.25) { imgVal = make_uchar3(0,clamp(255*(normVal/0.25)),255); } else if (normVal < 0.5) { imgVal = make_uchar3(0,255,clamp(255*((0.5-normVal)/0.25))); } else if (normVal < 0.75) { imgVal = make_uchar3(clamp(255*((normVal - 0.5)/0.25)),255,0); } else { imgVal = make_uchar3(255,clamp(255*(1.0-normVal)/0.25),0); } break; case ColorRampRedGreen: // if (normVal < 0.5) { // imgVal = make_uchar3(clamp(200*(1-2*normVal)), // clamp(30*(1-2*normVal)), // clamp(30*(1-2*normVal))); // } // else { // imgVal = make_uchar3(clamp(30*(2*normVal - 1)), // clamp(200*(2*normVal - 1)), // clamp(30*(2*normVal - 1))); // } if (normVal < 0.2) { imgVal = make_uchar3(255, 0,0); //clamp(255*normVal/0.2), //clamp(255*normVal/0.2)); } else { imgVal = make_uchar3(0,255,0);//make_uchar3(clamp(255*(1.0-normVal)/0.8), // 255, // clamp(255*(1.0-normVal/0.8)); } break; } } } template <bool firstModel> __global__ void gpu_getMultiModelSdfSlice(float * sdfVals, const int width, const int height, const float2 origin, const float2 size, const SE3 T_mp, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 ptp = make_float4(origin.x + (x+0.5)/(float)width*size.x, origin.y + (y+0.5)/(float)height*size.y, 0,1.0f); float4 ptm = T_mp * ptp; float minSdfVal = 1e20; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*ptm; const Grid3D<float> & sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf)*sdf.resolution; minSdfVal = min(minSdfVal,sdfVal); } if (firstModel || minSdfVal < sdfVals[x + y*width]) { sdfVals[x + y*width] = minSdfVal; } } __global__ void gpu_getModelSdfSlice(float * sdfVals, const int width, const int height, const float2 origin, const float2 size, const SE3 T_mp, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 ptp = make_float4(origin.x + (x+0.5)/(float)width*size.x, origin.y + (y+0.5)/(float)height*size.y, 0,1.0f); float4 ptm = T_mp * ptp; float minSdfVal = 1e20; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*ptm; const Grid3D<float> & sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf)*sdf.resolution; minSdfVal = min(minSdfVal,sdfVal); } sdfVals[x + y*width] = minSdfVal; } __global__ void gpu_getSdfSlice(float * sdfVals, const int width, const int height, const float2 origin, const float2 size, const SE3 T_sp, const Grid3D<float> * sdf) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 ptp = make_float4(origin.x + (x+0.5f)/(float)width*size.x, origin.y + (y+0.5f)/(float)height*size.y, 0,1.0f); float4 pts = T_sp * ptp; float3 pSdf = sdf->getGridCoords(make_float3(pts.x,pts.y,pts.z)); if (!sdf->isInBoundsInterp(pSdf)) { return; } const float sdfVal = sdf->getValueInterpolated(pSdf)*sdf->resolution; sdfVals[x + y*width] = sdfVal; } __global__ void gpu_getObservationSdfPlane(float * sdfVals, const int width, const int height, const Grid3D<float> * sdf, const float planeDepth) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float3 pSdf = make_float3((x/(float)(width-1))*sdf->dim.x, (y/(float)(height-1))*sdf->dim.y, planeDepth*sdf->dim.z); if (sdf->isInBoundsInterp(pSdf)) { sdfVals[x + y*width] = sdf->getValueInterpolated(pSdf); } else { sdfVals[x + y*width] = NAN; } } __global__ void gpu_getObservationSdfPlaneProjective(float * sdfVals, const int width, const int height, const Grid3D<float> * sdf, const float planeDepth, const float focalLength) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float3 pc = make_float3((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth); float3 pSdf = sdf->getGridCoords(pc); if (sdf->isInBoundsInterp(pSdf)) { sdfVals[x + y*width] = sdf->getValueInterpolated(pSdf); } else { sdfVals[x + y*width] = NAN; } } __global__ void gpu_getModelSdfPlaneProjective(float * sdf, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 pc = make_float4((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); minSdfVal = min(minSdfVal,sdfVal); } sdf[x + y*width] = minSdfVal; } __global__ void gpu_getModelSdfGradientPlaneProjective(float3 * grad, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } float4 pc = make_float4((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; int minSdf = -1; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsGradientInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); if (sdfVal < minSdfVal) { minSdfVal = sdfVal; minSdf = s; } } if (minSdf != -1) { const int minF = sdfFrames[minSdf]; const float4 pMinF = T_fms[minF]*pm; const float3 pSdf = sdfs[minSdf].getGridCoords(make_float3(pMinF)); grad[x + y*width] = sdfs[minSdf].getGradientInterpolated(pSdf); } } __global__ void gpu_visualizeDataAssociationPlane(uchar3 * img, const int width, const int height, const float2 origin, const float2 size, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } uchar3 & imgVal = img[x + y*width]; float4 pc = make_float4(origin.x + x/(float)width*size.x, origin.y + y/(float)height*size.y, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; int minS = -1; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); if (sdfVal < minSdfVal) { minSdfVal = sdfVal; minS = s; } } if (minS == -1) { imgVal = make_uchar3(0,0,0); } else { imgVal = sdfColors[minS]; } } __global__ void gpu_visualizeDataAssociationPlaneProjective(uchar3 * img, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth, const float focalLength) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } uchar3 &imgVal = img[x + y*width]; float4 pc = make_float4((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; int minS = -1; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); if (sdfVal < minSdfVal) { minSdfVal = sdfVal; minS = s; } } if (minS == -1) { imgVal = make_uchar3(0,0,0); } else { imgVal = sdfColors[minS]; } } __global__ void gpu_visualizeDataAssociationPlaneProjective(uchar4 * img, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth, const float focalLength) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } uchar4 & imgVal = img[x + y*width]; float4 pc = make_float4((x-width/2)*planeDepth/focalLength, (y-height/2)*planeDepth/focalLength, planeDepth,1.0f); float4 pm = T_mc * pc; float minSdfVal = 1e20; int minS = -1; for (int s=0; s<nSdfs; ++s) { const int f = sdfFrames[s]; const float4 pf = T_fms[f]*pm; const Grid3D<float> &sdf = sdfs[s]; float3 pSdf = sdf.getGridCoords(make_float3(pf.x,pf.y,pf.z)); if (!sdf.isInBoundsInterp(pSdf)) { continue; } const float sdfVal = sdf.getValueInterpolated(pSdf); if (sdfVal < minSdfVal) { minSdfVal = sdfVal; minS = s; } } if (minS == -1) { imgVal = make_uchar4(0,0,0,0); } else { imgVal = make_uchar4(sdfColors[minS].x,sdfColors[minS].y,sdfColors[minS].z,255); } } // -=-=-=-=-=-=-=-=-=- host interface functions -=-=-=-=-=-=-=-=-=- void visualizeModelSdfPlane(uchar3 * img, const int width, const int height, const float2 origin, const float2 size, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float minVal, const float maxVal, const ColorRamp ramp) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); switch(ramp) { case ColorRampGrayscale: gpu_visualizeModelSdfPlane<ColorRampGrayscale><<<grid,block>>>(img,width,height,origin,size,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,minVal,maxVal); break; case ColorRampHeatMap: gpu_visualizeModelSdfPlane<ColorRampHeatMap><<<grid,block>>>(img,width,height,origin,size,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,minVal,maxVal); break; case ColorRampRedGreen: gpu_visualizeModelSdfPlane<ColorRampRedGreen><<<grid,block>>>(img,width,height,origin,size,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,minVal,maxVal); } } void visualizeModelSdfPlaneProjective(uchar3 * img, const int width, const int height, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength, const float minVal, const float maxVal, const ColorRamp ramp) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); switch(ramp) { case ColorRampGrayscale: gpu_visualizeModelSdfPlaneProjective<ColorRampGrayscale><<<grid,block>>>(img,width,height,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,focalLength,minVal,maxVal); break; case ColorRampHeatMap: gpu_visualizeModelSdfPlaneProjective<ColorRampHeatMap><<<grid,block>>>(img,width,height,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,focalLength,minVal,maxVal); break; case ColorRampRedGreen: gpu_visualizeModelSdfPlaneProjective<ColorRampRedGreen><<<grid,block>>>(img,width,height,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,focalLength,minVal,maxVal); } } void getMultiModelSdfSlice(float * sdfSlice, const int width, const int height, const float2 origin, const float2 size, const std::vector<SE3> & T_pm, const std::vector<MirroredModel*> & models) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getMultiModelSdfSlice<true><<<grid,block>>>(sdfSlice,width,height,origin,size,T_pm[0], models[0]->getDeviceTransformsModelToFrame(), models[0]->getDeviceSdfFrames(), models[0]->getDeviceSdfs(), models[0]->getNumSdfs()); for (int m=1; m<models.size(); ++m) { gpu_getMultiModelSdfSlice<false><<<grid,block>>>(sdfSlice,width,height,origin,size,T_pm[m], models[m]->getDeviceTransformsModelToFrame(), models[m]->getDeviceSdfFrames(), models[m]->getDeviceSdfs(), models[m]->getNumSdfs()); } } void getModelSdfSlice(float * sdfSlice, const int width, const int height, const float2 origin, const float2 size, const SE3 & T_pm, const MirroredModel & model) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getModelSdfSlice<<<grid,block>>>(sdfSlice,width,height,origin,size,T_pm, model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs()); } void getSdfSlice(float * sdfSlice, const int width, const int height, const float2 origin, const float2 size, const SE3 & T_sp, const Grid3D<float> * deviceSdf) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getSdfSlice<<<grid,block>>>(sdfSlice,width,height,origin,size,T_sp,deviceSdf); } void getModelSdfPlaneProjective(float * sdf, const int width, const int height, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getModelSdfPlaneProjective<<<grid,block>>>(sdf,width,height,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,focalLength); } void getModelSdfGradientPlaneProjective(float3 * grad, const int width, const int height, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float planeDepth, const float focalLength) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getModelSdfGradientPlaneProjective<<<grid,block>>>(grad,width,height,T_mc,T_fms,sdfFrames,sdfs,nSdfs,planeDepth,focalLength); } void getObservationSdfPlane(float * sdfVals, const int width, const int height, const Grid3D<float> * sdf, const float planeDepth) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getObservationSdfPlane<<<grid,block>>>(sdfVals, width, height, sdf, planeDepth); } void getObservationSdfPlaneProjective(float * sdfVals, const int width, const int height, const Grid3D<float> * sdf, const float planeDepth, const float focalLength) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_getObservationSdfPlaneProjective<<<grid,block>>>(sdfVals, width, height, sdf, planeDepth, focalLength); } void visualizeDataAssociationPlane(uchar3 * img, const int width, const int height, const float2 origin, const float2 size, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_visualizeDataAssociationPlane<<<grid,block>>>(img,width,height, origin,size, T_mc,T_fms,sdfFrames, sdfs,nSdfs,sdfColors, planeDepth); } void visualizeDataAssociationPlaneProjective(uchar3 * img, const int width, const int height, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth, const float focalLength) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_visualizeDataAssociationPlaneProjective<<<grid,block>>>(img,width,height, T_mc,T_fms,sdfFrames, sdfs,nSdfs,sdfColors, planeDepth,focalLength); } void visualizeDataAssociationPlaneProjective(uchar4 * img, const int width, const int height, const SE3 & T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const uchar3 * sdfColors, const float planeDepth, const float focalLength) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_visualizeDataAssociationPlaneProjective<<<grid,block>>>(img,width,height, T_mc,T_fms,sdfFrames, sdfs,nSdfs,sdfColors, planeDepth,focalLength); } }
the_stack
* \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "dispatch/dispatch_reduce.cuh" #include "dispatch/dispatch_reduce_by_key.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. ![](reduce_logo.png) * \ingroup SingleModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a sequence of input elements. * * \par Usage Considerations * \cdp_class{DeviceReduce} * * \par Performance * \linear_performance{reduction, reduce-by-key, and run-length encode} * * \par * The following chart illustrates DeviceReduce::Sum * performance across different CUDA architectures for \p int32 keys. * * \image html reduce_int32.png * * \par * The following chart illustrates DeviceReduce::ReduceByKey (summation) * performance across different CUDA architectures for \p fp32 * values. Segments are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * * \par * \plots_below * */ struct DeviceReduce { /** * \brief Computes a device-wide reduction using the specified binary \p reduction_op functor and initial value \p init. * * \par * - Does not support binary reduction operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates a user-defined min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * __device__ __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * CustomMin min_op; * int init; // e.g., INT_MAX * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduction * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); * * // d_out <-- [0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam ReductionOpT <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam T <b>[inferred]</b> Data element type that is convertible to the \p value type of \p InputIteratorT */ template < typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T> CUB_RUNTIME_FUNCTION static cudaError_t Reduce( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) ReductionOpT reduction_op, ///< [in] Binary reduction functor T init, ///< [in] Initial value of the reduction cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, init, stream, debug_synchronous); } /** * \brief Computes a device-wide sum using the addition (\p +) operator. * * \par * - Uses \p 0 as the initial value of the reduction. * - Does not support \p + operators that are non-commutative.. * - \devicestorage * * \par Performance * The following charts illustrate saturated sum-reduction performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. * * \image html reduce_int32.png * \image html reduce_int64.png * * \par Snippet * The code snippet below illustrates the sum-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sum-reduction * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out <-- [38] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Sum( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef typename std::iterator_traits<InputIteratorT>::value_type T; // Data element type return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Sum>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Sum(), T(), // zero-initialize stream, debug_synchronous); } /** * \brief Computes a device-wide minimum using the less-than ('<') operator. * * \par * - Uses <tt>std::numeric_limits<T>::max()</tt> as the initial value of the reduction. * - Does not support \p < operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run min-reduction * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out <-- [0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Min( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef typename std::iterator_traits<InputIteratorT>::value_type T; // Data element type return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Min>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Min(), Traits<T>::Max(), // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide minimum using the less-than ('<') operator, also returning the index of that item. * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The minimum is written to <tt>d_out.value</tt> and its offset in the input array is written to <tt>d_out.key</tt>. * - The <tt>{1, std::numeric_limits<T>::max()}</tt> tuple is produced for zero-length inputs * - Does not support \p < operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmin-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmin-reduction * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // d_out <-- [{5, 0}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>cub::KeyValuePair<int, T></tt>) \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMin( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef typename std::iterator_traits<InputIteratorT>::value_type T; // Data element type typedef ArgIndexInputIterator<InputIteratorT, int> ArgIndexInputIteratorT; // Wrapped input iterator type ArgIndexInputIteratorT d_argmin_in(d_in); KeyValuePair<OffsetT, T> init = {1, Traits<T>::Max()}; // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent return DispatchReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetT, cub::ArgMin>::Dispatch( d_temp_storage, temp_storage_bytes, d_argmin_in, d_out, num_items, cub::ArgMin(), init, stream, debug_synchronous); } /** * \brief Computes a device-wide maximum using the greater-than ('>') operator. * * \par * - Uses <tt>std::numeric_limits<T>::lowest()</tt> as the initial value of the reduction. * - Does not support \p > operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the max-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run max-reduction * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // d_out <-- [9] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Max( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef typename std::iterator_traits<InputIteratorT>::value_type T; // Data element type return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Max>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Max(), Traits<T>::Lowest(), // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide maximum using the greater-than ('>') operator, also returning the index of that item * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The maximum is written to <tt>d_out.value</tt> and its offset in the input array is written to <tt>d_out.key</tt>. * - The <tt>{1, std::numeric_limits<T>::lowest()}</tt> tuple is produced for zero-length inputs * - Does not support \p > operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmax-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmax-reduction * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // d_out <-- [{6, 9}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>cub::KeyValuePair<int, T></tt>) \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMax( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef typename std::iterator_traits<InputIteratorT>::value_type T; // Data element type typedef ArgIndexInputIterator<InputIteratorT, int> ArgIndexInputIteratorT; // Wrapped input iterator ArgIndexInputIteratorT d_argmax_in(d_in); KeyValuePair<OffsetT, T> init = {1, Traits<T>::Lowest()}; // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent return DispatchReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetT, cub::ArgMax>::Dispatch( d_temp_storage, temp_storage_bytes, d_argmax_in, d_out, num_items, cub::ArgMax(), init, stream, debug_synchronous); } /** * \brief Reduces segments of values, where segments are demarcated by corresponding runs of identical keys. * * \par * This operation computes segmented reductions within \p d_values_in using * the specified binary \p reduction_op functor. The segments are identified by * "runs" of corresponding keys in \p d_keys_in, where runs are maximal ranges of * consecutive, identical keys. For the <em>i</em><sup>th</sup> run encountered, * the first key of the run and the corresponding value aggregate of that run are * written to <tt>d_unique_out[<em>i</em>]</tt> and <tt>d_aggregates_out[<em>i</em>]</tt>, * respectively. The total number of runs encountered is written to \p d_num_runs_out. * * \par * - The <tt>==</tt> equality operator is used to determine whether keys are equivalent * - \devicestorage * * \par Performance * The following chart illustrates reduction-by-key (sum) performance across * different CUDA architectures for \p fp32 and \p fp64 values, respectively. Segments * are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * \image html reduce_by_key_fp64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html reduce_by_key_fp32_len_5.png * \image html reduce_by_key_fp64_len_5.png * * \par Snippet * The code snippet below illustrates the segmented reduction of \p int values grouped * by runs of associated \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * CUB_RUNTIME_FUNCTION __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_keys_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_values_in; // e.g., [0, 7, 1, 6, 2, 5, 3, 4] * int *d_unique_out; // e.g., [-, -, -, -, -, -, -, -] * int *d_aggregates_out; // e.g., [-, -, -, -, -, -, -, -] * int *d_num_runs_out; // e.g., [-] * CustomMin reduction_op; * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduce-by-key * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); * * // d_unique_out <-- [0, 2, 9, 5, 8] * // d_aggregates_out <-- [0, 1, 6, 2, 4] * // d_num_runs_out <-- [5] * * \endcode * * \tparam KeysInputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input keys \iterator * \tparam UniqueOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing unique output keys \iterator * \tparam ValuesInputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input values \iterator * \tparam AggregatesOutputIterator <b>[inferred]</b> Random-access output iterator type for writing output value aggregates \iterator * \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator * \tparam ReductionOpT <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename KeysInputIteratorT, typename UniqueOutputIteratorT, typename ValuesInputIteratorT, typename AggregatesOutputIteratorT, typename NumRunsOutputIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t ReduceByKey( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) ReductionOpT reduction_op, ///< [in] Binary reduction functor int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType* FlagIterator; // FlagT iterator type (not used) typedef NullType SelectOp; // Selection op (not used) typedef Equality EqualityOp; // Default == operator return DispatchReduceByKey<KeysInputIteratorT, UniqueOutputIteratorT, ValuesInputIteratorT, AggregatesOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, ReductionOpT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, EqualityOp(), reduction_op, num_items, stream, debug_synchronous); } }; /** * \example example_device_reduce.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <cmath> #include <cfloat> #include <cstdio> #include <cstdlib> #include <cassert> #include <iostream> #include <algorithm> #include <complex> #include "config.h" #include "symbol.h" #include "dvector.h" #include "util.h" #include "recfilter.h" namespace rod { #if CUDA_SM >= 20 # define W1 8 # define NB1 6 # define W23 8 # define NB23 6 # define W45 7 # define NB45 5 # define W6 8 # define NB6 7 #else # define W1 8 # define NB1 4 # define W23 8 # define NB23 0 # define W45 8 # define NB45 0 # define W6 8 # define NB6 4 #endif /*! @mainpage recursive-filtering \section notes Notes Naming conventions are: c_ constant; t_ texture; g_ global memory; s_ shared memory; d_ device pointer; a_ cuda-array; p_ template parameter; f_ surface. */ #if !CONST_DATA_DEFINED #define CONST_DATA_DEFINED 1 template <int R> struct const_data { int width, height, rowstride, adj_width, adj_height, m_size, // number of column-blocks, n_size, // number of row-blocks, last_m, last_n, border; float inv_width, inv_height; Vector<float,R+1> weights; Matrix<float,R,R> AbF_T, AbR_T, HARB_AFP_T, AbF, AbR, HARB_AFP; Matrix<float,R,WS> ARE_T, HARB_AFB, TAFB, ARB_AFP_T; }; template <int R> struct get_cdata; #endif __constant__ const_data<ORDER> PP_CAT(cdata_,ORDER); #define cdata PP_CAT(cdata_,ORDER) template <> struct get_cdata<ORDER> { static const_data<ORDER> &get() { return cdata; } }; #ifndef TEXTURE_DEFINED #define TEXTURE_DEFINED 1 texture<float, cudaTextureType2D, cudaReadModeElementType> t_in; #endif //=== IMPLEMENTATION ========================================================== #ifndef AUX_FUNCS_DEFINED #define AUX_FUNCS_DEFINED 1 template <int W, int U, int V> __device__ void read_block(Matrix<float,U,V> &block, int m, int n, float inv_width, float inv_height) { int tx = threadIdx.x, ty = threadIdx.y; float tu = ((m-cdata.border)*WS+tx+.5f)*inv_width, tv = ((n-cdata.border)*WS+ty+.5f)*inv_height; float (*bdata)[V] = (float (*)[V]) &block[ty][tx] #if CUDA_SM >= 20 ,(*bdata2)[V] = (float (*)[V])&block[ty+WS][tx] #endif ; #pragma unroll for(int i=0; i<WS-(WS%W); i+=W) { **bdata = tex2D(t_in, tu, tv); bdata += W; #if CUDA_SM >= 20 **bdata2 = tex2D(t_in, tu+WS*inv_width, tv); bdata2 += W; #endif tv += W*inv_height; } if(ty < WS%W) { **bdata = tex2D(t_in, tu, tv); #if CUDA_SM >= 20 **bdata2 = tex2D(t_in, tu+WS*inv_width, tv); #endif } } template <int W, int U, int V> __device__ void write_block(float *out, const Matrix<float,U,V> &block, int width, int height, int rowstride, int m, int n, int last_m, int last_n) { int tx = threadIdx.x, ty = threadIdx.y; // current block intersects transp_out's area? // if(m >= cdata.border && m <= last_m && n >= cdata.border && n <= last_n) { int y = (n-cdata.border)*WS, x = (m-cdata.border)*WS+tx; out += y*rowstride + x; // if(y < height) { int maxy = min(height, y+WS); for(int i=0; y<maxy; ++y, ++i, out += width) *out = block[ty*WS+i][tx]; } } } template <class T, int R> __device__ Vector<T,R> mad(Matrix<T,R,WS> &r, const Vector<T,R> &a, const Matrix<T,R,R> &b) { #pragma unroll for(int j=0; j<R; ++j) { T acc = *r[j]; #pragma unroll for(int i=0; i<R; ++i) acc += a[i]*b[i][j]; *r[j] = acc; } return r.col(0); } template <class T, int R> __device__ Vector<T,R> mad(Matrix<T,R,WS> &r, const Matrix<T,R,R> &a, const Vector<T,R> &b) { #pragma unroll for(int i=0; i<R; ++i) { T acc = *r[i]; #pragma unroll for(int j=0; j<R; ++j) acc += a[i][j]*b[j]; *r[i] = acc; } return r.col(0); } template <class T, int R> __device__ void mad(Matrix<T,R,WS> &r, const Matrix<T,R,WS> &a, const Matrix<T,R,R> &b) { #pragma unroll for(int j=0; j<R; ++j) { T acc = *r[j]; #pragma unroll for(int i=0; i<R; ++i) acc += *a[i]*b[i][j]; *r[j] = acc; } } template <class T, int R> __device__ void mad(Matrix<T,R,WS> &r, const Matrix<T,R,R> &a, const Matrix<T,R,WS> &b) { #pragma unroll for(int i=0; i<R; ++i) { T acc = *r[i]; #pragma unroll for(int j=0; j<R; ++j) acc += a[i][j]* *b[j]; *r[i] = acc; } } template <class T, int R> __device__ void mad(Matrix<T,R,WS> &r, const Matrix<T,R,WS> &a, const Matrix<T,R,WS> &b, const Matrix<T,R,WS> &c, volatile T (*block_RD)[WS/2+WS+1]) { int tx = threadIdx.x, ty = threadIdx.y; Matrix<T,R,R> rint; for(int i=0; i<R; ++i) { for(int j=0; j<R; ++j) { block_RD[ty][tx] = a[i][tx] * *b[j]; block_RD[ty][tx] += block_RD[ty][tx-1]; block_RD[ty][tx] += block_RD[ty][tx-2]; block_RD[ty][tx] += block_RD[ty][tx-4]; block_RD[ty][tx] += block_RD[ty][tx-8]; block_RD[ty][tx] += block_RD[ty][tx-16]; rint[i][j] = block_RD[ty][WS-1]; } } mad(r, rint, (const Matrix<T,R,WS> &)c[0][tx]); } #endif /** * @brief Algorithm 5 stage 1 * * This function computes the algorithm stage 5.1 following: * * In parallel for all $m$ and $n$, compute and store each * $P_{m,n}(\bar{Y})$, $E_{m,n}(\hat{Z})$, $P^\T_{m,n}(\check{U})$, * and $E^\T_{m,n}(\tilde{V})$. * * @param[in] g_in Input image * @param[out] g_transp_ybar All P_{m,n}(\bar{Y}) * @param[out] g_transp_zhat All $E_{m,n}(\hat{Z})$ * @param[out] g_ucheck All $P^\T_{m,n}(\check{U})$ * @param[out] g_vtilde All $E^\T_{m,n}(\tilde{V})$ */ __global__ #if NB1 __launch_bounds__(WS*W1, NB1) #endif void collect_carries(Matrix<float,ORDER,WS> *g_pybar, Matrix<float,ORDER,WS> *g_ezhat, Matrix<float,ORDER,WS> *g_ptucheck, Matrix<float,ORDER,WS> *g_etvtilde) { int tx = threadIdx.x, ty = threadIdx.y, #if CUDA_SM >= 20 m = blockIdx.x*2, #else m = blockIdx.x, #endif n = blockIdx.y; // each cuda block will work on two horizontally adjacent WSxWS input data // blocks, so allocate enough shared memory for these. #if CUDA_SM >= 20 __shared__ Matrix<float,WS*2,WS+1> block; #else __shared__ Matrix<float,WS,WS+1> block; #endif // load data into shared memory read_block<W1>(block, m, n, cdata.inv_width, cdata.inv_height); #if CUDA_SM >= 20 m += ty; if(m >= cdata.m_size) return; #endif __syncthreads(); #if CUDA_SM >= 20 if(ty < 2) #else if(ty == 0) #endif { Matrix<float,ORDER,WS> &pybar = (Matrix<float,ORDER,WS>&)g_pybar[n*cdata.m_size+m][0][tx], &ezhat = (Matrix<float,ORDER,WS>&)g_ezhat[n*cdata.m_size+m][0][tx], &ptucheck = (Matrix<float,ORDER,WS>&)g_ptucheck[n*cdata.m_size+m][0][tx], &etvtilde = (Matrix<float,ORDER,WS>&)g_etvtilde[n*cdata.m_size+m][0][tx]; const float B0_1 = cdata.weights[0], B0_2 = B0_1*B0_1, B0_3 = B0_2*B0_1, B0_4 = B0_2*B0_2; { float *bdata = block[tx+ty*WS]; // calculate pybar, scan left -> right { Vector<float,ORDER> p = zeros<float,ORDER>(); p[ORDER-1] = *bdata++; #pragma unroll for(int j=1; j<WS; ++j, ++bdata) { #if CUDA_SM >= 20 || ORDER>1 *bdata = fwd(p, *bdata, cdata.weights); #else *bdata = p[0] = rec_op(*bdata, p[0]*cdata.weights[1]); #endif } if(m < cdata.m_size-1) pybar.set_col(0, p*B0_1); } { --bdata; Vector<float,ORDER> e = zeros<float,ORDER>(); e[0] = *bdata--; #pragma unroll for(int j=WS-2; j>=0; --j, --bdata) { #if CUDA_SM >= 20 || ORDER>1 *bdata = rev(*bdata, e, cdata.weights); #else *bdata = e[0] = rec_op(*bdata, e[0]*cdata.weights[1]); #endif } if(m > 0) ezhat.set_col(0, e*B0_2); } } { float (*bdata)[WS+1] = (float (*)[WS+1]) &block[ty*WS][tx]; { Vector<float,ORDER> p = zeros<float,ORDER>(); p[ORDER-1] = **bdata++; #pragma unroll for(int i=1; i<WS; ++i, ++bdata) { #if CUDA_SM >= 20 || ORDER>1 **bdata = fwd(p, **bdata, cdata.weights); #else **bdata = p[0] = rec_op(**bdata, p[0]*cdata.weights[1]); #endif } if(n < cdata.n_size-1) ptucheck.set_col(0, p*B0_3); } if(n > 0) { --bdata; Vector<float,ORDER> e = zeros<float,ORDER>(); e[0] = **bdata--; #pragma unroll for(int i=WS-2; i>=0; --i, --bdata) { #if CUDA_SM >= 20 || ORDER>1 rev(**bdata, e, cdata.weights); #else e[0] = rec_op(**bdata, e[0]*cdata.weights[1]); #endif } etvtilde.set_col(0, e*B0_4); } } } } /** * @brief Algorithm 4 stage 2 and 3 (fusioned) * * This function computes the algorithm stages 5.2 and 5.3 following: * * In parallel for all $n$, sequentially for each $m$, compute and * store the $P_{m,n}(Y)$ according to (37) and using the previously * computed $P_{m-1,n}(\bar{Y})$. * * with simple kernel fusioned (going thorough global memory): * * In parallel for all $n$, sequentially for each $m$, compute and * store $E_{m,n}(Z)$ according to (45) using the previously computed * $P_{m-1,n}(Y)$ and $E_{m+1,n}(\hat{Z})$. * * @param[in,out] g_transp_ybar All $P_{m,n}(\bar{Y})$ * @param[in,out] g_transp_zhat All $E_{m,n}(\hat{Z})$ */ __global__ #if NB23 __launch_bounds__(WS*W23, NB23) #endif void adjust_carries(Matrix<float,ORDER,WS> *g_pybar, Matrix<float,ORDER,WS> *g_ezhat, int m_size, int n_size) { int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y; __shared__ Matrix<float,ORDER,WS> block[W23]; Matrix<float,ORDER,WS> &bdata = (Matrix<float,ORDER,WS> &)block[ty][0][tx]; // P(ybar) -> P(y) processing -------------------------------------- Matrix<float,ORDER,WS> *pybar = (Matrix<float,ORDER,WS> *)&g_pybar[n*m_size+ty][0][tx]; Vector<float,ORDER> py = zeros<float,ORDER>(); // P(Y) int m = 0; if(blockDim.y == W23) { int mmax = m_size-(m_size%W23)-1; for(; m<mmax; m+=W23) { // read P(Y) bdata.set_col(0, pybar->col(0)); __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dm=0; dm<W23; ++dm, ++bdata) py = mad(bdata[0], py, cdata.AbF_T); } __syncthreads(); pybar->set_col(0,bdata.col(0)); pybar += W23; } } // remaining column-blocks if(m < m_size-1) { if(m+ty < m_size-1) bdata.set_col(0, pybar->col(0)); int remaining = m_size-1 - m; __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dm=0; dm<remaining; ++dm, ++bdata) py = mad(bdata[0], py, cdata.AbF_T); } __syncthreads(); if(m+ty < m_size-1) pybar->set_col(0,bdata.col(0)); } // E(zhat) -> E(z) processing -------------------------------------- m = m_size-1; Matrix<float,ORDER,WS> *pm1y = (Matrix<float,ORDER,WS> *)&g_pybar[n*m_size+m-ty-1][0][tx], *ezhat = (Matrix<float,ORDER,WS> *)&g_ezhat[n*m_size+m-ty][0][tx]; // all pybars must be updated! __syncthreads(); Vector<float,ORDER> ez = zeros<float,ORDER>(); m = m_size-1; if(blockDim.y == W23) { int mmin = m_size%W23; for(; m>=mmin; m-=W23) { if(m > 0) { bdata.set_col(0, ezhat->col(0)); if(m-ty > 0) mad(bdata, *pm1y, cdata.HARB_AFP_T); __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dm=0; dm<W23; ++dm, ++bdata) ez = mad(bdata[0], ez, cdata.AbR_T); } __syncthreads(); ezhat->set_col(0,bdata.col(0)); } ezhat -= W23; pm1y -= W23; } } // remaining column-blocks if(m > 0) { int remaining = m+1; if(m-ty > 0) { bdata.set_col(0, ezhat->col(0)); mad(bdata, *pm1y, cdata.HARB_AFP_T); } __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dm=1; dm<remaining; ++dm, ++bdata) ez = mad(bdata[0], ez, cdata.AbR_T); } __syncthreads(); if(m-ty > 0) ezhat->set_col(0,bdata.col(0)); } } /** * @brief Algorithm 5 stage 4 and 5 (fusioned) * * This function computes the algorithm stages 5.2 and 5.3 following: * * In parallel for all $n$, sequentially for each $m$, compute and * store the $P_{m,n}(Y)$ according to (37) and using the previously * computed $P_{m-1,n}(\bar{Y})$. * * with simple kernel fusioned (going thorough global memory): * * In parallel for all $n$, sequentially for each $m$, compute and * store $E_{m,n}(Z)$ according to (45) using the previously computed * $P_{m-1,n}(Y)$ and $E_{m+1,n}(\hat{Z})$. * * @param[in,out] g_transp_ybar All $P_{m,n}(\bar{Y})$ * @param[in,out] g_transp_zhat All $E_{m,n}(\hat{Z})$ */ __global__ #if NB45 __launch_bounds__(WS*W45, NB45) #endif void adjust_carries(Matrix<float,ORDER,WS> *g_ptucheck, Matrix<float,ORDER,WS> *g_etvtilde, Matrix<float,ORDER,WS> *g_py, Matrix<float,ORDER,WS> *g_ez, int m_size, int n_size) { int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x; __shared__ Matrix<float,ORDER,WS> block[W45]; volatile __shared__ float block_RD_raw[W45][WS/2+WS+1]; volatile float (*block_RD)[WS/2+WS+1] = (float (*)[WS/2+WS+1]) &block_RD_raw[0][WS/2]; if(ty < W45) block_RD_raw[ty][tx] = 0; Matrix<float,ORDER,WS> &bdata = (Matrix<float,ORDER,WS> &)block[ty][0][tx]; // Pt(ucheck) -> Pt(u) processing -------------------------------------- Matrix<float,ORDER,WS> *ptucheck = (Matrix<float,ORDER,WS> *)&g_ptucheck[ty*cdata.m_size+m][0][tx], *pm1y = (Matrix<float,ORDER,WS> *)&g_py[ty*cdata.m_size+m-1][0][tx], *em1z = (Matrix<float,ORDER,WS> *)&g_ez[ty*cdata.m_size+m+1][0][tx]; Vector<float,ORDER> ptu = zeros<float,ORDER>(); // Pt(U) int n = 0; if(blockDim.y == W45) { int nmax = n_size-(n_size%W45)-1; for(; n<nmax; n+=W45) { // read Pt(U) bdata.set_col(0, ptucheck->col(0)); if(m > 0) mad(bdata, cdata.TAFB, *pm1y, cdata.ARB_AFP_T, block_RD); if(m < cdata.m_size-1) mad(bdata, cdata.TAFB, *em1z, cdata.ARE_T, block_RD); __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dn=0; dn<W45; ++dn, ++bdata) ptu = mad(*bdata, cdata.AbF, ptu); } __syncthreads(); ptucheck->set_col(0,bdata.col(0)); ptucheck += W45*cdata.m_size; pm1y += W45*cdata.m_size; em1z += W45*cdata.m_size; } } // remaining column-blocks if(n < cdata.n_size-1) { if(n+ty < cdata.n_size-1) { bdata.set_col(0, ptucheck->col(0)); if(m < cdata.m_size-1) mad(bdata, cdata.TAFB, *em1z, cdata.ARE_T, block_RD); if(m > 0) mad(bdata, cdata.TAFB, *pm1y, cdata.ARB_AFP_T, block_RD); } int remaining = n_size-1 - n; __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dn=0; dn<remaining; ++dn, ++bdata) ptu = mad(bdata[0], cdata.AbF, ptu); } __syncthreads(); if(n+ty < n_size-1) ptucheck->set_col(0,bdata.col(0)); } // E(zhat) -> E(z) processing -------------------------------------- n = n_size-1; Matrix<float,ORDER,WS> *etvtilde = (Matrix<float,ORDER,WS> *)&g_etvtilde[(n-ty)*cdata.m_size+m][0][tx], *ptn1u = (Matrix<float,ORDER,WS> *)&g_ptucheck[(n-ty-1)*cdata.m_size+m][0][tx]; pm1y = (Matrix<float,ORDER,WS> *)&g_py[(n-ty)*cdata.m_size+m-1][0][tx]; em1z = (Matrix<float,ORDER,WS> *)&g_ez[(n-ty)*cdata.m_size+m+1][0][tx]; // all pybars must be updated! __syncthreads(); Vector<float,ORDER> etv = zeros<float,ORDER>(); if(blockDim.y == W45) { int nmin = n_size%W45; for(; n>=nmin; n-=W45) { if(n > 0) { bdata.set_col(0, etvtilde->col(0)); if(m > 0) mad(bdata, cdata.HARB_AFB, *pm1y, cdata.ARB_AFP_T, block_RD); if(m < cdata.m_size-1) mad(bdata, cdata.HARB_AFB, *em1z, cdata.ARE_T, block_RD); if(n-ty > 0) mad(bdata, *ptn1u, cdata.HARB_AFP_T); __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dn=0; dn<W45; ++dn, ++bdata) etv = mad(bdata[0], cdata.AbR, etv); } __syncthreads(); etvtilde->set_col(0,bdata.col(0)); } etvtilde -= W45*cdata.m_size; pm1y -= W45*cdata.m_size; em1z -= W45*cdata.m_size; ptn1u -= W45*cdata.m_size; } } // remaining column-blocks if(n > 0) { int remaining = n+1; if(n-ty > 0) { bdata.set_col(0, etvtilde->col(0)); if(m > 0) mad(bdata, cdata.HARB_AFB, *pm1y, cdata.ARB_AFP_T, block_RD); if(m < cdata.m_size-1) mad(bdata, cdata.HARB_AFB, *em1z, cdata.ARE_T, block_RD); mad(bdata, *ptn1u, cdata.HARB_AFP_T); } __syncthreads(); if(ty == 0) { Matrix<float,ORDER,WS> *bdata = (Matrix<float,ORDER,WS> *)&block[0][0][tx]; #pragma unroll for(int dn=1; dn<remaining; ++dn, ++bdata) etv = mad(bdata[0], cdata.AbR, etv); } __syncthreads(); if(n-ty > 0) etvtilde->set_col(0,bdata.col(0)); } } __global__ #if NB6 __launch_bounds__(WS*W6, NB6) #endif void write_result(float *g_out, const Matrix<float,ORDER,WS> *g_py, const Matrix<float,ORDER,WS> *g_ez, const Matrix<float,ORDER,WS> *g_ptu, const Matrix<float,ORDER,WS> *g_etv) { int tx = threadIdx.x, ty = threadIdx.y, #if CUDA_SM >= 20 m = blockIdx.x*2, #else m = blockIdx.x, #endif n = blockIdx.y; // each cuda block will work on two horizontally adjacent WSxWS input data // blocks, so allocate enough shared memory for these. #if CUDA_SM >= 20 __shared__ Matrix<float,WS*2,WS+1> block; #else __shared__ Matrix<float,WS,WS+1> block; #endif // load data into shared memory read_block<W6>(block, m, n, cdata.inv_width, cdata.inv_height); #if CUDA_SM >= 20 m += ty; if(m >= cdata.m_size) return; #endif __syncthreads(); #if CUDA_SM >= 20 if(ty < 2) #else if(ty == 0) #endif { Matrix<float,ORDER,WS> &py = (Matrix<float,ORDER,WS>&) g_py[n*cdata.m_size+m-1][0][tx], &ez = (Matrix<float,ORDER,WS>&) g_ez[n*cdata.m_size+m+1][0][tx], &ptu = (Matrix<float,ORDER,WS>&) g_ptu[(n-1)*cdata.m_size+m][0][tx], &etv = (Matrix<float,ORDER,WS>&) g_etv[(n+1)*cdata.m_size+m][0][tx]; const float B0_2 = cdata.weights[0]*cdata.weights[0]; { float *bdata = block[tx+ty*WS]; // calculate pybar, scan left -> right Vector<float,ORDER> p = m==0 ? zeros<float,ORDER>() : py.col(0) / cdata.weights[0]; #pragma unroll for(int j=0; j<WS; ++j, ++bdata) *bdata = fwd(p, *bdata, cdata.weights); --bdata; Vector<float,ORDER> e = m==cdata.m_size-1 ? zeros<float,ORDER>() : ez.col(0); #pragma unroll for(int j=WS-1; j>=0; --j, --bdata) *bdata = rev(*bdata*B0_2, e, cdata.weights); } { float (*bdata)[WS+1] = (float (*)[WS+1]) &block[ty*WS][tx]; Vector<float,ORDER> p = n==0 ? zeros<float,ORDER>() : ptu.col(0) / cdata.weights[0]; #pragma unroll for(int i=0; i<WS; ++i, ++bdata) **bdata = fwd(p, **bdata, cdata.weights); --bdata; Vector<float,ORDER> e = n==cdata.n_size-1 ? zeros<float,ORDER>() : etv.col(0); // for some reason it's faster when this is here then inside the // next if block; int x = (m-cdata.border)*WS+tx; int y = (n-cdata.border+1)*WS-1; // current block intersects transp_out's area? if(m >= cdata.border && m <= cdata.last_m && n >= cdata.border && n <= cdata.last_n) { // image's end is in the middle of the block and we're outside // the image width? if(y >= cdata.height) { // process data until we get into the image int i; #pragma unroll for(i=y; i>=cdata.height; --i, --bdata) rev(**bdata*B0_2, e, cdata.weights); // bdata -= y-cdata.height+1; // now we're inside the image, we must write to transp_out float *out = g_out + (cdata.height-1)*cdata.rowstride + x; int nmin = y-(WS-1); #pragma unroll for(;i>=nmin; --i, --bdata, out -= cdata.rowstride) { rev(**bdata*B0_2, e, cdata.weights); if(x < cdata.width) *out = e[0]; } } else { float *out = g_out + y*cdata.rowstride + x; #pragma unroll for(int i=WS-1; i>=0; --i, --bdata, out -= cdata.rowstride) { rev(**bdata*B0_2, e, cdata.weights); if(x < cdata.width) *out = e[0]; } } } } } } #undef cdata } // namespace rod
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/logical.h> #include <thrust/sequence.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { /** * @brief Concatenate lists within the same row into one list, ignoring any null list during * concatenation. */ std::unique_ptr<column> concatenate_lists_ignore_null(column_view const& input, bool build_null_mask, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_rows = input.size(); static_assert(std::is_same_v<offset_type, int32_t> && std::is_same_v<size_type, int32_t>); auto out_offsets = make_numeric_column( data_type{type_id::INT32}, num_rows + 1, mask_state::UNALLOCATED, stream, mr); auto const d_out_offsets = out_offsets->mutable_view().template begin<offset_type>(); auto const d_row_offsets = lists_column_view(input).offsets_begin(); auto const d_list_offsets = lists_column_view(lists_column_view(input).child()).offsets_begin(); // Concatenating the lists at the same row by converting the entry offsets from the child column // into row offsets of the root column. Those entry offsets are subtracted by the first entry // offset to output zero-based offsets. auto const iter = thrust::make_counting_iterator<size_type>(0); thrust::transform(rmm::exec_policy(stream), iter, iter + num_rows + 1, d_out_offsets, [d_row_offsets, d_list_offsets] __device__(auto const idx) { auto const start_offset = d_list_offsets[d_row_offsets[0]]; return d_list_offsets[d_row_offsets[idx]] - start_offset; }); // The child column of the output lists column is just copied from the input column. auto out_entries = std::make_unique<column>( lists_column_view(lists_column_view(input).get_sliced_child(stream)).get_sliced_child(stream)); auto [null_mask, null_count] = [&] { if (!build_null_mask) return std::make_pair(cudf::detail::copy_bitmask(input, stream, mr), input.null_count()); // The output row will be null only if all lists on the input row are null. auto const lists_dv_ptr = column_device_view::create(lists_column_view(input).child(), stream); return cudf::detail::valid_if( iter, iter + num_rows, [d_row_offsets, lists_dv = *lists_dv_ptr, iter] __device__(auto const idx) { return thrust::any_of( thrust::seq, iter + d_row_offsets[idx], iter + d_row_offsets[idx + 1], [&] __device__(auto const list_idx) { return lists_dv.is_valid(list_idx); }); }, stream, mr); }(); return make_lists_column(num_rows, std::move(out_offsets), std::move(out_entries), null_count, null_count > 0 ? std::move(null_mask) : rmm::device_buffer{}, stream, mr); } /** * @brief Generate list offsets and list validities for the output lists column. * * This function is called only when (has_null_list == true and null_policy == NULLIFY_OUTPUT_ROW). */ std::pair<std::unique_ptr<column>, rmm::device_uvector<int8_t>> generate_list_offsets_and_validities(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_rows = input.size(); static_assert(std::is_same_v<offset_type, int32_t> && std::is_same_v<size_type, int32_t>); auto out_offsets = make_numeric_column( data_type{type_id::INT32}, num_rows + 1, mask_state::UNALLOCATED, stream, mr); auto const lists_of_lists_dv_ptr = column_device_view::create(input, stream); auto const lists_dv_ptr = column_device_view::create(lists_column_view(input).child(), stream); auto const d_out_offsets = out_offsets->mutable_view().template begin<offset_type>(); auto const d_row_offsets = lists_column_view(input).offsets_begin(); auto const d_list_offsets = lists_column_view(lists_column_view(input).child()).offsets_begin(); // The array of int8_t stores validities for the output list elements. auto validities = rmm::device_uvector<int8_t>(num_rows, stream); // Compute output list sizes and validities. auto const iter = thrust::make_counting_iterator<size_type>(0); thrust::transform( rmm::exec_policy(stream), iter, iter + num_rows, d_out_offsets, [lists_of_lists_dv = *lists_of_lists_dv_ptr, lists_dv = *lists_dv_ptr, d_row_offsets, d_list_offsets, d_validities = validities.begin(), iter] __device__(auto const idx) { if (d_row_offsets[idx] == d_row_offsets[idx + 1]) { // This is a null/empty row. d_validities[idx] = static_cast<int8_t>(lists_of_lists_dv.is_valid(idx)); return size_type{0}; } // The output row will not be null only if all lists on the input row are not null. auto const is_valid = thrust::all_of(thrust::seq, iter + d_row_offsets[idx], iter + d_row_offsets[idx + 1], [&] __device__(auto const list_idx) { return lists_dv.is_valid(list_idx); }); d_validities[idx] = static_cast<int8_t>(is_valid); if (!is_valid) { return size_type{0}; } // Compute size of the output list as sum of sizes of all lists in the current input row. return d_list_offsets[d_row_offsets[idx + 1]] - d_list_offsets[d_row_offsets[idx]]; }); // Compute offsets from sizes. thrust::exclusive_scan( rmm::exec_policy(stream), d_out_offsets, d_out_offsets + num_rows + 1, d_out_offsets); return {std::move(out_offsets), std::move(validities)}; } /** * @brief Gather entries from the input lists column, ignoring rows that have null list elements. * * This function is called only when (has_null_list == true and null_policy == NULLIFY_OUTPUT_ROW). */ std::unique_ptr<column> gather_list_entries(column_view const& input, column_view const& output_list_offsets, size_type num_rows, size_type num_output_entries, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const child_col = lists_column_view(input).child(); auto const entry_col = lists_column_view(child_col).child(); auto const d_row_offsets = lists_column_view(input).offsets_begin(); auto const d_list_offsets = lists_column_view(child_col).offsets_begin(); auto gather_map = rmm::device_uvector<size_type>(num_output_entries, stream); // Fill the gather map with indices of the lists from the child column of the input column. thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), num_rows, [d_row_offsets, d_list_offsets, d_indices = gather_map.begin(), d_out_list_offsets = output_list_offsets.template begin<offset_type>()] __device__(size_type const idx) { // The output row has been identified as a null/empty list during list size computation. if (d_out_list_offsets[idx + 1] == d_out_list_offsets[idx]) { return; } // The indices of the list elements on the row `idx` of the input column. thrust::sequence(thrust::seq, d_indices + d_out_list_offsets[idx], d_indices + d_out_list_offsets[idx + 1], d_list_offsets[d_row_offsets[idx]]); }); auto result = cudf::detail::gather(table_view{{entry_col}}, gather_map, out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, stream, mr); return std::move(result->release()[0]); } std::unique_ptr<column> concatenate_lists_nullifying_rows(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // Generate offsets and validities of the output lists column. auto [list_offsets, list_validities] = generate_list_offsets_and_validities(input, stream, mr); auto const offsets_view = list_offsets->view(); auto const num_rows = input.size(); auto const num_output_entries = cudf::detail::get_value<size_type>(offsets_view, num_rows, stream); auto list_entries = gather_list_entries(input, offsets_view, num_rows, num_output_entries, stream, mr); auto [null_mask, null_count] = cudf::detail::valid_if( list_validities.begin(), list_validities.end(), thrust::identity<int8_t>{}, stream, mr); return make_lists_column(num_rows, std::move(list_offsets), std::move(list_entries), null_count, null_count ? std::move(null_mask) : rmm::device_buffer{}, stream, mr); } } // namespace /** * @copydoc cudf::lists::concatenate_list_elements * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<column> concatenate_list_elements(column_view const& input, concatenate_null_policy null_policy, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto type = input.type(); // Column that is lists of lists. CUDF_EXPECTS(type.id() == type_id::LIST, "Input column must be a lists column."); auto col = lists_column_view(input).child(); // Rows, which are lists. type = col.type(); CUDF_EXPECTS(type.id() == type_id::LIST, "Rows of the input column must be lists."); col = lists_column_view(col).child(); // The last level entries what we need to check. type = col.type(); CUDF_EXPECTS(type.id() == type_id::LIST || !cudf::is_nested(type), "Entry of the input lists column must be of list or non-nested types."); if (input.size() == 0) { return cudf::empty_like(input); } bool has_null_list = lists_column_view(input).child().has_nulls(); return (null_policy == concatenate_null_policy::IGNORE || !has_null_list) ? concatenate_lists_ignore_null(input, has_null_list, stream, mr) : concatenate_lists_nullifying_rows(input, stream, mr); } } // namespace detail /** * @copydoc cudf::lists::concatenate_list_elements */ std::unique_ptr<column> concatenate_list_elements(column_view const& input, concatenate_null_policy null_policy, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::concatenate_list_elements(input, null_policy, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
the_stack
using namespace std; typedef ML::CUDA::Test_Buckets_Binsym::Float Float; typedef ML::CUDA::Test_Buckets_Binsym::TwoBuckets TwoBuckets; typedef ML::shift_t shift_t; /** Execution kernel Parameters: - example_data: structure (packed into memory: 4-12 bytes per entry, size is number of feature occurrences) uint16_t bucket uint16_t label uint32_t example num (if not exactly one per example) float divisor (if not exactly one per label) - buckets: structure, in shared memory, size is number of buckets - double true_corr: total count for true/correct bucket - double true_incorr: total count for true/incorrect bucket - Will eventually have more to minimise amount of contention on the shared buckets - Splitting up: - 512 threads per block - 64 entries per thread - 32,768 split points accumulated per block - Algorithm - Read and extract bucket, label, example, divisor (4 at once, for 16-48 bytes to be read) - Read 4 weights (16 bytes) - Accumulate in shared memory - Continue until block is finished - To void variability - Weights are accumulated in 64 bit integers - Use two floats (one to hold the first 23 bits of mantissa, the other for the other 23 bits) - To avoid using double precision - */ // Texture. Please note that this is global, so we can't be working on more // than one thing at once. Test only. texture<float, 1, cudaReadModeElementType> weights_tex; texture<float, 1, cudaReadModeElementType> ex_weights_tex; texture<float4, 1, cudaReadModeElementType> weights_tex4; texture<float4, 1, cudaReadModeElementType> ex_weights_tex4; __global__ void stumpBinsymKernel(const uint16_t * buckets, const uint32_t * examples, // or 0 if example num == i const int32_t * labels, const float * divisors, // or 0 if always equal to 1 uint32_t size, const float * weights, const float * ex_weights, TwoBuckets * buckets_global, TwoBuckets * w_label_global_, int num_buckets, int bucket_expansion, int num_todo, bool use_texture) { // access thread id const unsigned tid = threadIdx.x; // access number of threads in this block const unsigned num_threads = blockDim.x; // Access where the block starts const unsigned block_num = blockIdx.x; unsigned offset = (block_num * (num_threads * num_todo)); // shared memory. Our buckets are accumulated in this. extern __shared__ TwoBuckets shared_data[]; TwoBuckets * buckets_shared = shared_data + 1; TwoBuckets * w_label_shared = shared_data; TwoBuckets * w_label_global = w_label_global_; bucket_expansion = 1; int buckets_allocated = num_buckets * bucket_expansion; int expansion_offset = tid % bucket_expansion; // Initialization of shared (across threads) for (unsigned i = tid; i < buckets_allocated; i += num_threads) buckets_shared[i][0] = buckets_shared[i][1] = 0.0f; if (tid == 0) w_label_shared[0][0] = w_label_shared[0][1] = 0.0f; // Wait for the initialization to be finished __syncthreads(); unsigned start_at = offset + tid; Float w_label_true(0.0f), w_label_false(0.0f); // Optimize for where examples == 0, which means we access everything // with unit stride if (examples == 0) { // We modify so that 4 values are done by tid 0, then 4 by tid 1, // and so on so that out memory accesses really read 4 values. start_at = offset + (tid * 4); const float4 uniform_divisors = make_float4(1.0f, 1.0f, 1.0f, 1.0f); for (unsigned i = 0; i < num_todo; i += 4) { int example = start_at + i * num_threads; if (example >= size) break; float4 weight, ex_weight; if (use_texture) { ex_weight.x = tex1Dfetch(ex_weights_tex, example); ex_weight.y = tex1Dfetch(ex_weights_tex, example + 1); ex_weight.z = tex1Dfetch(ex_weights_tex, example + 2); ex_weight.w = tex1Dfetch(ex_weights_tex, example + 3); //ex_weight = tex1Dfetch(ex_weights_tex4, example); weight.x = tex1Dfetch(weights_tex, example); weight.y = tex1Dfetch(weights_tex, example + 1); weight.z = tex1Dfetch(weights_tex, example + 2); weight.w = tex1Dfetch(weights_tex, example + 3); //weight = tex1Dfetch(weights_tex4, example); } else if (use_texture && false /* doesn't work; most get zero */) { ex_weight = tex1Dfetch(ex_weights_tex4, example); weight = tex1Dfetch(weights_tex4, example); } else { ex_weight = *(const float4 *)(&ex_weights[example]); weight = *(const float4 *)(&weights[example]); } weight.x *= ex_weight.x; weight.y *= ex_weight.y; weight.z *= ex_weight.z; weight.w *= ex_weight.w; const int4 label = *(const int4 *)(&labels[example]); const float4 divisor = (divisors == 0 ? uniform_divisors : *(const float4 *)(divisors + example)); const short4 real_bucket = *(const short4 *)(buckets + example); float to_add; int bucket; // First update (x) to_add = weight.x * divisor.x; bucket = real_bucket.x * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label.x], to_add); if (label.x) w_label_true += to_add; else w_label_false += to_add; if (example + 1 == size) break; // Second update (y) to_add = weight.y * divisor.y; bucket = real_bucket.y * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label.y], to_add); if (label.y) w_label_true += to_add; else w_label_false += to_add; if (example + 2 == size) break; // Third update (z) to_add = weight.z * divisor.z; bucket = real_bucket.z * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label.z], to_add); if (label.z) w_label_true += to_add; else w_label_false += to_add; if (example + 3 == size) break; // Fourth update (w) to_add = weight.w * divisor.w; bucket = real_bucket.w * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label.w], to_add); if (label.w) w_label_true += to_add; else w_label_false += to_add; } } else { for (unsigned i = 0; i < num_todo; ++i) { int index = start_at + i * num_threads; if (index >= size) break; int example = (examples == 0 ? index : examples[index]); float weight; if (use_texture) { weight = tex1Dfetch(ex_weights_tex, example); if (weight == 0.0) continue; weight *= tex1Dfetch(weights_tex, example); } else { weight = ex_weights[example]; if (weight == 0.0) continue; weight *= weights[example]; } if (weight == 0.0) continue; const int label = labels[index]; const float divisor = (divisors == 0 ? 1.0f : divisors[index]); const int real_bucket = buckets[index]; const int bucket = real_bucket * bucket_expansion + expansion_offset; const float to_add = weight * divisor; const float to_add_true = (label ? to_add : 0.0f); const float to_add_false = (label ? 0.0f : to_add); atomic_add_shared(buckets_shared[bucket][label], to_add); w_label_true += to_add_true; w_label_false += to_add_false; } } /* Accumulate the total removed field */ atomic_add_shared(w_label_shared[0][0], w_label_false); atomic_add_shared(w_label_shared[0][1], w_label_true); /* Wait until all shared is done */ __syncthreads(); /* Update the global results using atomic additions (one thread only) */ for (unsigned i = tid; i < buckets_allocated; i += num_threads) { int real_bucket = i / bucket_expansion; atomic_add(buckets_global[real_bucket][0], buckets_shared[i][0]); atomic_add(buckets_global[real_bucket][1], buckets_shared[i][1]); } if (tid == 0) { atomic_add(w_label_global[0][0], w_label_shared[0][0]); atomic_add(w_label_global[0][1], w_label_shared[0][1]); } } __global__ void stumpBinsymKernelPacked(const uint64_t * index_data, shift_t bucket_bits, shift_t example_bits, shift_t label_bits, shift_t divisor_bits, uint32_t size, const float * weights, const float * ex_weights, TwoBuckets * buckets_global, TwoBuckets * w_label_global_, int num_buckets, int bucket_expansion, int num_todo, bool use_texture) { // access thread id const unsigned tid = threadIdx.x; // access number of threads in this block const unsigned num_threads = blockDim.x; // Access where the block starts const unsigned block_num = blockIdx.x; unsigned offset = (block_num * (num_threads * num_todo)); // shared memory. Our buckets are accumulated in this. extern __shared__ TwoBuckets shared_data[]; TwoBuckets * buckets_shared = shared_data + 1; TwoBuckets * w_label_shared = shared_data; TwoBuckets * w_label_global = w_label_global_; bucket_expansion = 1; int buckets_allocated = num_buckets * bucket_expansion; int expansion_offset = tid % bucket_expansion; // Initialization of shared (across threads) for (unsigned i = tid; i < buckets_allocated; i += num_threads) buckets_shared[i][0] = buckets_shared[i][1] = 0.0f; if (tid == 0) w_label_shared[0][0] = w_label_shared[0][1] = 0.0f; // Wait for the initialization to be finished __syncthreads(); // Get our index bit buffer int total_bits = bucket_bits + example_bits + label_bits + divisor_bits; typedef ML::Buffered_Mem_Buffer<uint64_t> Mem_Buffer; typedef ML::Bit_Buffer<uint64_t, Mem_Buffer> Buffer; ML::Bit_Extractor<uint64_t, Buffer> index(index_data); unsigned start_at = offset + tid; Float w_label_true(0.0f), w_label_false(0.0f); // Optimize for where examples == 0, which means we access everything // with unit stride if (example_bits == 0) { // We modify so that 4 values are done by tid 0, then 4 by tid 1, // and so on so that our memory accesses really read 4 values. start_at = offset + (tid * 4); index.advance(start_at * total_bits); for (unsigned i = 0; i < num_todo; i += 4, index.advance((4 * num_threads - 4) * total_bits)) { int example = start_at + i * num_threads; if (example >= size) break; float4 weight, ex_weight; if (use_texture) { ex_weight.x = tex1Dfetch(ex_weights_tex, example); ex_weight.y = tex1Dfetch(ex_weights_tex, example + 1); ex_weight.z = tex1Dfetch(ex_weights_tex, example + 2); ex_weight.w = tex1Dfetch(ex_weights_tex, example + 3); //ex_weight = tex1Dfetch(ex_weights_tex4, example); weight.x = tex1Dfetch(weights_tex, example); weight.y = tex1Dfetch(weights_tex, example + 1); weight.z = tex1Dfetch(weights_tex, example + 2); weight.w = tex1Dfetch(weights_tex, example + 3); //weight = tex1Dfetch(weights_tex4, example); } else if (use_texture && false /* doesn't work; most get zero */) { ex_weight = tex1Dfetch(ex_weights_tex4, example); weight = tex1Dfetch(weights_tex4, example); } else { ex_weight = *(const float4 *)(&ex_weights[example]); weight = *(const float4 *)(&weights[example]); } weight.x *= ex_weight.x; weight.y *= ex_weight.y; weight.z *= ex_weight.z; weight.w *= ex_weight.w; unsigned label; float divisor; unsigned real_bucket; float to_add; int bucket; real_bucket = index.extract<unsigned>(bucket_bits); label = index.extract<unsigned>(label_bits); if (divisor_bits == 0) divisor = 1.0; else divisor = index.extract<float>(divisor_bits); #if 0 // First update (x) index.extract(real_bucket, bucket_bits, label, label_bits, divisor, divisor_bits); #endif to_add = weight.x * divisor; bucket = real_bucket * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label], to_add); if (label) w_label_true += to_add; else w_label_false += to_add; if (example + 1 == size) break; real_bucket = index.extract<unsigned>(bucket_bits); label = index.extract<unsigned>(label_bits); if (divisor_bits == 0) divisor = 1.0; else divisor = index.extract<float>(divisor_bits); #if 0 // Second update (y) index.extract(real_bucket, bucket_bits, label, label_bits, divisor, divisor_bits); if (divisor_bits == 0) divisor = 1.0; #endif to_add = weight.y * divisor; bucket = real_bucket * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label], to_add); if (label) w_label_true += to_add; else w_label_false += to_add; if (example + 2 == size) break; // Third update (z) real_bucket = index.extract<unsigned>(bucket_bits); label = index.extract<unsigned>(label_bits); if (divisor_bits == 0) divisor = 1.0; else divisor = index.extract<float>(divisor_bits); #if 0 index.extract(real_bucket, bucket_bits, label, label_bits, divisor, divisor_bits); if (divisor_bits == 0) divisor = 1.0; #endif to_add = weight.z * divisor; bucket = real_bucket * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label], to_add); if (label) w_label_true += to_add; else w_label_false += to_add; if (example + 3 == size) break; // Fourth update (w) real_bucket = index.extract<unsigned>(bucket_bits); label = index.extract<unsigned>(label_bits); if (divisor_bits == 0) divisor = 1.0; else divisor = index.extract<float>(divisor_bits); #if 0 index.extract(real_bucket, bucket_bits, label, label_bits, divisor, divisor_bits); if (divisor_bits == 0) divisor = 1.0; #endif to_add = weight.w * divisor; bucket = real_bucket * bucket_expansion + expansion_offset; atomic_add_shared(buckets_shared[bucket][label], to_add); if (label) w_label_true += to_add; else w_label_false += to_add; } } else { index.advance(start_at * total_bits); for (unsigned n = 0; n < num_todo; ++n, index.advance((num_threads - 1) * total_bits)) { int i = start_at + n * num_threads; if (i >= size) break; int real_bucket; int example; int label; float divisor; real_bucket = index.extract<unsigned>(bucket_bits); if (example_bits == 0) example = i; else example = index.extract<float>(example_bits); label = index.extract<unsigned>(label_bits); if (divisor_bits == 0) divisor = 1.0; else divisor = index.extract<float>(divisor_bits); #if 0 index.extract(real_bucket, bucket_bits, example, example_bits, label, label_bits, divisor, divisor_bits); #endif if (example_bits == 0) example = i; if (divisor_bits == 0) divisor = 1.0; float weight; if (use_texture) { weight = tex1Dfetch(ex_weights_tex, example); if (weight == 0.0) continue; weight *= tex1Dfetch(weights_tex, example); } else { weight = ex_weights[example]; if (weight == 0.0) continue; weight *= weights[example]; } if (weight == 0.0) continue; const int bucket = real_bucket * bucket_expansion + expansion_offset; const float to_add = weight * divisor; const float to_add_true = (label ? to_add : 0.0f); const float to_add_false = (label ? 0.0f : to_add); atomic_add_shared(buckets_shared[bucket][label], to_add); w_label_true += to_add_true; w_label_false += to_add_false; } } /* Accumulate the total removed field */ atomic_add_shared(w_label_shared[0][0], w_label_false); atomic_add_shared(w_label_shared[0][1], w_label_true); /* Wait until all shared is done */ __syncthreads(); /* Update the global results using atomic additions (one thread only) */ for (unsigned i = tid; i < buckets_allocated; i += num_threads) { int real_bucket = i / bucket_expansion; atomic_add(buckets_global[real_bucket][0], buckets_shared[i][0]); atomic_add(buckets_global[real_bucket][1], buckets_shared[i][1]); } if (tid == 0) { atomic_add(w_label_global[0][0], w_label_shared[0][0]); atomic_add(w_label_global[0][1], w_label_shared[0][1]); } } namespace ML { namespace CUDA { /*****************************************************************************/ /* TEST_BUCKETS_BINSYM */ /*****************************************************************************/ void executeHost(TwoBuckets * accum, TwoBuckets & w_label, const float * weights, const float * ex_weights, const uint16_t * buckets, const uint32_t * examples, const int32_t * labels, const float * divisors, size_t size); void executeHostCompressed(TwoBuckets * accum, TwoBuckets & w_label, const float * weights, const float * ex_weights, const uint64_t * data, int bucket_bits, int example_bits, int label_bits, int divisor_bits, size_t size); struct Test_Buckets_Binsym::Context { const Plan * plan; TwoBuckets * accum; TwoBuckets * w_label; DeviceData<TwoBuckets> d_accum; DeviceData<TwoBuckets> d_w_label; bool on_device; void synchronize() { if (on_device) { cudaError_t err = cudaThreadSynchronize(); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); d_accum.sync(accum); d_w_label.sync(w_label); } #if 1 cerr << "final results: " << endl; for (unsigned i = 0; i < 2 /*num_buckets*/; ++i) cerr << "bucket " << i << ": 0: " << accum[i][0] << " 1: " << accum[i][1] << endl; #endif cerr << "w_label: 0: " << w_label[0][0] << " 1: " << w_label[0][1] << endl; } }; struct Test_Buckets_Binsym::Plan { Plan(const uint16_t * buckets, const uint32_t * examples, // or 0 if example num == i const int32_t * labels, const float * divisors, uint32_t size, const float * weights, const float * ex_weights, int num_buckets, bool on_device, bool compressed) : buckets(buckets), examples(examples), labels(labels), divisors(divisors), size(size), weights(weights), ex_weights(ex_weights), num_buckets(num_buckets), on_device(on_device), compressed(compressed) { if (!buckets) throw Exception("no buckets"); if (compressed) compressed_index.init(buckets, examples, labels, divisors, size); if (!on_device) return; // nothing to set up if running on host // How many concurrent threads are launched at the same time to work // together? If this number is too high, then we will have contention // on the shared memory (the probability that we update multiple buckets // at once increases). If the number is too low, then we won't be able // to launch many concurrent blocks. threads = dim3( 128, 1, 1); // How many does each thread block do? num_todo = 32; // How many of these thread blocks? grid = dim3( rudiv(size, threads.x * num_todo)); cerr << "num_todo = " << num_todo << endl; cerr << "grid: x = " << grid.x << endl; // If there aren't enough buckets, then create some more and merge // them together at the end. // This helps to avoid bank conflicts. buckets_to_allocate = num_buckets; bucket_expansion = 1; if (num_buckets < 8) { bucket_expansion = (16 / num_buckets) + 1; buckets_to_allocate = num_buckets * bucket_expansion; } cerr << "num_buckets = " << num_buckets << " bucket_expansion = " << bucket_expansion << " buckets_to_allocate = " << buckets_to_allocate << endl; // How much shared memory? // // We need: // - sizeof(TwoBuckets) for each bucket; // - sizeof(TwoBuckets) for the total // // Note that this effectively limits us to 1023 buckets, as we only // have 16kb of shared memory available, and this will severely limit // parallelism. shared_mem_size = sizeof(TwoBuckets) * (buckets_to_allocate + 1); cerr << "shared_mem_size = " << shared_mem_size << endl; if (compressed) { d_compressed_index.init(compressed_index.data.get(), compressed_index.num_words); } else { d_buckets.init(buckets, size); d_examples.init(examples, size); d_labels.init(labels, size); d_divisors.init(divisors, size); } d_weights.init(weights, size); d_ex_weights.init(ex_weights, size); // set texture parameters cudaError_t err; err = cudaBindTexture(0, weights_tex, d_weights /*, d_weights.num_bytes()*/); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); err = cudaBindTexture(0, ex_weights_tex, d_ex_weights /* , d_ex_weights.num_bytes()*/); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); err = cudaBindTexture(0, weights_tex4, d_weights /*, d_weights.num_bytes()*/); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); err = cudaBindTexture(0, ex_weights_tex4, d_ex_weights /*, d_ex_weights.num_bytes()*/); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); use_texture = true; } const uint16_t * buckets; const uint32_t * examples; const int32_t * labels; const float * divisors; uint32_t size; const float * weights; const float * ex_weights; int num_buckets; bool on_device; bool compressed; dim3 threads; int num_todo; dim3 grid; int buckets_to_allocate; int bucket_expansion; int shared_mem_size; DeviceData<uint16_t> d_buckets; DeviceData<uint32_t> d_examples; DeviceData<int32_t> d_labels; DeviceData<float> d_divisors; DeviceData<float> d_weights; DeviceData<float> d_ex_weights; Bit_Compressed_Index compressed_index; DeviceData<uint64_t> d_compressed_index; bool use_texture; boost::shared_ptr<Context> executeHost(TwoBuckets * accum, TwoBuckets & w_label) const { boost::shared_ptr<Context> result(new Context()); //result->plan = this; // Get the data structures result->on_device = false; result->accum = accum; result->w_label = &w_label; if (compressed) { CUDA::executeHostCompressed(accum, w_label, weights, ex_weights, compressed_index.data.get(), compressed_index.bucket_bits, compressed_index.example_bits, compressed_index.label_bits, compressed_index.divisor_bits, size); } else { CUDA::executeHost(accum, w_label, weights, ex_weights, buckets, examples, labels, divisors, size); } return result; } boost::shared_ptr<Context> executeDevice(TwoBuckets * accum, TwoBuckets & w_label) const { boost::shared_ptr<Context> result(new Context()); //result->plan = this; // Get the data structures result->d_accum.init(accum, num_buckets); result->d_w_label.init(&w_label, 1); result->on_device = true; result->accum = accum; result->w_label = &w_label; // execute the kernel if (compressed) stumpBinsymKernelPacked<<< grid, threads, shared_mem_size >>> ( d_compressed_index, compressed_index.bucket_bits, compressed_index.example_bits, compressed_index.label_bits, compressed_index.divisor_bits, size, d_weights, d_ex_weights, result->d_accum, result->d_w_label, num_buckets, bucket_expansion, num_todo, use_texture); else stumpBinsymKernel<<< grid, threads, shared_mem_size >>> ( d_buckets, d_examples, d_labels, d_divisors, size, d_weights, d_ex_weights, result->d_accum, result->d_w_label, num_buckets, bucket_expansion, num_todo, use_texture); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) throw Exception(cudaGetErrorString(err)); return result; } boost::shared_ptr<Context> execute(TwoBuckets * accum, TwoBuckets & w_label) const { // Clear result to start with for (unsigned i = 0; i < num_buckets; ++i) accum[i][0] = accum[i][1] = 0.0; w_label[0] = w_label[1] = 0.0; if (on_device) return executeDevice(accum, w_label); else return executeHost(accum, w_label); } }; boost::shared_ptr<Test_Buckets_Binsym::Plan> Test_Buckets_Binsym:: plan(const uint16_t * buckets, const uint32_t * examples, // or 0 if example num == i const int32_t * labels, const float * divisors, uint32_t size, const float * weights, const float * ex_weights, int num_buckets, bool on_device, bool compressed) const { return boost::shared_ptr<Test_Buckets_Binsym::Plan> (new Plan(buckets, examples, labels, divisors, size, weights, ex_weights, num_buckets, on_device, compressed)); } boost::shared_ptr<Test_Buckets_Binsym::Context> Test_Buckets_Binsym:: execute(const Plan & plan, TwoBuckets * accum, TwoBuckets & w_label) const { return plan.execute(accum, w_label); } void Test_Buckets_Binsym:: synchronize(Context & context) const { context.synchronize(); } } // namespace CUDA } // namespace ML
the_stack
#include "Device/Util/SafeCudaAPI.cuh" //__cudaErrorHandler #include <cassert> //assert #if defined(NEVER_DEFINED) #include "SafeFunctions_.cuh" #endif ///@cond #define cuMemcpyDevToDev(...) \ xlib::detail::cuMemcpyDevToDevAux(__FILE__, __LINE__,__func__, __VA_ARGS__)\ #define cuMemcpyToDevice(...) \ xlib::detail::cuMemcpyToDeviceAux(__FILE__, __LINE__,__func__, __VA_ARGS__)\ #define cuMemcpyToHost(...) \ xlib::detail::cuMemcpyToHostAux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ //------------------------------------------------------------------------------ #define cuMemcpyToSymbol(...) \ xlib::detail::cuMemcpyToSymbolAux(__FILE__, __LINE__,__func__, __VA_ARGS__)\ #define cuMemcpyFromSymbol(...) \ xlib::detail::cuMemcpyFromSymbolAux(__FILE__, __LINE__,__func__, \ __VA_ARGS__) \ //------------------------------------------------------------------------------ #define cuMemset0x00(...) \ xlib::detail::cuMemset0x00Aux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ #define cuMemset0xFF(...) \ xlib::detail::cuMemset0xFFAux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ #define cuMemset(...) \ xlib::detail::cuMemsetAux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ //------------------------------------------------------------------------------ #define cuMemset2D0x00(...) \ xlib::detail::cuMemset2D0x00Aux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ #define cuMemset2D0xFF(...) \ xlib::detail::cuMemset2D0xFFAux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ #define cuMemset2D(...) \ xlib::detail::cuMemset2DAux(__FILE__, __LINE__, __func__, __VA_ARGS__) \ //------------------------------------------------------------------------------ #define cuMemcpy2DToDevice(...) \ xlib::detail::cuMemcpy2DToDeviceAux(__FILE__, __LINE__, __func__, \ __VA_ARGS__) \ #define cuMemcpy2DToHost(...) \ xlib::detail::cuMemcpy2DToHostAux(__FILE__, __LINE__, __func__, \ __VA_ARGS__) \ #define cuMemcpy2DDevToDev(...) \ xlib::detail::cuMemcpy2DDevToDevAux(__FILE__, __LINE__, __func__, \ __VA_ARGS__) \ //============================================================================== //============================================================================== namespace xlib { namespace detail { //////////////// // cuMemset // //////////////// template<typename T> void cuMemsetGenericAux(const char* file, int line, const char* func_name, T* ptr, size_t num_items, unsigned char mask) noexcept { assert(num_items > 0 && ptr != nullptr); char api_name[] = "cudaMemset(0x__)"; char value1 = static_cast<char>(mask / (0xF)); char value2 = static_cast<char>(mask % (0xF)); api_name[13] = (value1 <= '9') ? '0' + value1 : 'A' + value1 - 10; api_name[14] = (value2 <= '9') ? '0' + value2 : 'A' + value2 - 10; cudaErrorHandler(cudaMemset(ptr, mask, num_items * sizeof(T)), api_name, file, line, func_name); } template<typename T> void cuMemset0x00Aux(const char* file, int line, const char* func_name, T* ptr, size_t num_items = 1) noexcept { cuMemsetGenericAux(file, line, func_name, ptr, num_items, 0x00); } template<typename T> void cuMemset0x00Aux(const char* file, int line, const char* func_name, T& symbol) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, 1, 0x00); } template<typename T, int SIZE> void cuMemset0x00Aux(const char* file, int line, const char* func_name, T (&symbol)[SIZE]) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, SIZE, 0x00); } template<typename T> void cuMemset0xFFAux(const char* file, int line, const char* func_name, T* ptr, size_t num_items = 1) noexcept { cuMemsetGenericAux(file, line, func_name, ptr, num_items, 0xFF); } template<typename T> void cuMemset0xFFAux(const char* file, int line, const char* func_name, T& symbol) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, 1, 0xFF); } template<typename T, int SIZE> void cuMemset0xFFAux(const char* file, int line, const char* func_name, T (&symbol)[SIZE]) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, SIZE, 0xFF); } template<typename T> void cuMemsetAux(const char* file, int line, const char* func_name, T* ptr, size_t num_items, unsigned char mask) noexcept { cuMemsetGenericAux(file, line, func_name, ptr, num_items, mask); } template<typename T> void cuMemsetAux(const char* file, int line, const char* func_name, T& symbol, unsigned char mask) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, 1, mask); } template<typename T, int SIZE> void cuMemsetAux(const char* file, int line, const char* func_name, T (&symbol)[SIZE], unsigned char mask) noexcept { T* symbol_address; SAFE_CALL( cudaGetSymbolAddress(symbol_address, symbol) ) cuMemsetGenericAux(file, line, func_name, symbol_address, SIZE, mask); } //============================================================================== ////////////////// // cuMemset2D // ////////////////// template<typename T> void cuMemset2DGenericAux(const char* file, int line, const char* func_name, T* ptr, size_t rows, size_t cols, size_t pitch, unsigned char mask) noexcept { assert(ptr != nullptr && rows > 0 && cols > 0 && pitch >= cols); char api_name[] = "cudaMemset2D(0x__)"; char value1 = static_cast<char>(mask / (0xF)); char value2 = static_cast<char>(mask % (0xF)); api_name[13] = (value1 <= '9') ? '0' + value1 : 'A' + value1 - 10; api_name[14] = (value2 <= '9') ? '0' + value2 : 'A' + value2 - 10; cudaErrorHandler(cudaMemset2D(ptr, pitch * sizeof(T), mask, cols * sizeof(T), rows), api_name, file, line, func_name); } template<typename T> void cuMemset2D0x00Aux(const char* file, int line, const char* func_name, T* ptr, size_t rows, size_t cols, size_t pitch = 0) noexcept { pitch = (pitch == 0) ? cols : pitch; cuMemset2DGenericAux(file, line, func_name, ptr, rows, cols, pitch, 0x00); } template<typename T> void cuMemset2D0xFFAux(const char* file, int line, const char* func_name, T* ptr, size_t rows, size_t cols, size_t pitch = 0) noexcept { pitch = (pitch == 0) ? cols : pitch; cuMemset2DGenericAux(file, line, func_name, ptr, rows, cols, pitch, 0xFF); } template<typename T> void cuMemset2DAux(const char* file, int line, const char* func_name, T* ptr, size_t rows, size_t cols, unsigned char mask) noexcept { cuMemset2DGenericAux(file, line, func_name, ptr, rows, cols, cols, mask); } template<typename T> void cuMemset2DAux(const char* file, int line, const char* func_name, T* ptr, size_t rows, size_t cols, size_t pitch, unsigned char mask) noexcept { cuMemset2DGenericAux(file, line, func_name, ptr, rows, cols, pitch, mask); } //============================================================================== //////////////// // cuMemcpy // //////////////// template<typename T> void cuMemcpyGenericAux(const char* file, int line, const char* func_name, const T* input, size_t num_items, T* output, cudaMemcpyKind cuda_memcpy_kind) noexcept { assert(input != nullptr && output != nullptr); if (num_items == 0) return; const char* api_name[] = { "", "cudaMemcpy(ToDevice)", "cudaMemcpy(ToHost)", "cudaMemcpy(DeviceToDevice)", "" }; const auto& selected = api_name[static_cast<int>(cuda_memcpy_kind)]; cudaErrorHandler(cudaMemcpy(output, input, num_items * sizeof(T), cuda_memcpy_kind), selected, file, line, func_name); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpyToDeviceAux(const char* file, int line, const char* func_name, const T* input, size_t num_items, T* output) noexcept { cuMemcpyGenericAux(file, line, func_name, input, num_items, output, cudaMemcpyHostToDevice); } template<typename T> void cuMemcpyToDeviceAux(const char* file, int line, const char* func_name, const T& input, T* output) noexcept { cuMemcpyGenericAux(file, line, func_name, &input, 1, output, cudaMemcpyHostToDevice); } //Fixed Array to Pointer template<typename T, int SIZE> void cuMemcpyToDeviceAux(const char* file, int line, const char* func_name, const T (&input)[SIZE], T* output) noexcept { cuMemcpyGenericAux(file, line, func_name, &input, SIZE, output, cudaMemcpyHostToDevice); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpyToHostAux(const char* file, int line, const char* func_name, const T* input, size_t num_items, T* output) noexcept { cuMemcpyGenericAux(file, line, func_name, input, num_items, output, cudaMemcpyDeviceToHost); } template<typename T> void cuMemcpyToHostAux(const char* file, int line, const char* func_name, const T* input, T& output) noexcept { cuMemcpyGenericAux(file, line, func_name, input, 1, &output, cudaMemcpyDeviceToHost); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpyDevToDevAux(const char* file, int line, const char* func_name, const T* input, size_t num_items, T* output) noexcept { cuMemcpyGenericAux(file, line, func_name, input, num_items, output, cudaMemcpyDeviceToDevice); } //============================================================================== ////////////////// // cuMemcpy2D // ////////////////// template<typename T> void cuMemcpy2DGeneric(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output, size_t dst_pitch, cudaMemcpyKind cuda_memcpy_kind) noexcept { assert(input != nullptr && output != nullptr && rows > 0 && cols > 0 && src_pitch >= cols && dst_pitch >= cols); const char* api_name[] = { "", "cuda2DMemcpy(ToDevice)", "cuda2DMemcpy(ToHost)", "cuda2DMemcpy(DeviceToDevice)", "" }; const auto& selected = api_name[static_cast<int>(cuda_memcpy_kind)]; cudaErrorHandler(cudaMemcpy2D(output, dst_pitch * sizeof(T), input, src_pitch * sizeof(T), cols * sizeof(T), rows, cuda_memcpy_kind), selected, file, line, func_name); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpy2DToDeviceAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, dst_pitch, cudaMemcpyHostToDevice); } template<typename T> void cuMemcpy2DToDeviceAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, dst_pitch, cudaMemcpyHostToDevice); } template<typename T> void cuMemcpy2DToDeviceAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, cols, cudaMemcpyHostToDevice); } template<typename T> void cuMemcpy2DToDeviceAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, cols, cudaMemcpyHostToDevice); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpy2DToHostAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, dst_pitch, cudaMemcpyDeviceToHost); } template<typename T> void cuMemcpy2DToHostAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, dst_pitch, cudaMemcpyDeviceToHost); } template<typename T> void cuMemcpy2DToHostAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, cols, cudaMemcpyDeviceToHost); } template<typename T> void cuMemcpy2DToHostAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, cols, cudaMemcpyDeviceToHost); } //------------------------------------------------------------------------------ template<typename T> void cuMemcpy2DDevToDevAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, dst_pitch, cudaMemcpyDeviceToDevice); } template<typename T> void cuMemcpy2DDevToDevAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output, size_t dst_pitch) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, dst_pitch, cudaMemcpyDeviceToDevice); } template<typename T> void cuMemcpy2DDevToDevAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, size_t src_pitch, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, src_pitch, output, cols, cudaMemcpyDeviceToDevice); } template<typename T> void cuMemcpy2DDevToDevAux(const char* file, int line, const char* func_name, const T* input, size_t rows, size_t cols, T* output) noexcept { cuMemcpy2DGeneric(file, line, func_name, input, rows, cols, cols, output, cols, cudaMemcpyDeviceToDevice); } //============================================================================== //////////////////////// // cuMemcpyToSymbol // //////////////////////// //Reference To Reference template<typename T> void cuMemcpyToSymbolAux(const char* file, int line, const char* func_name, const T& input, T& symbol) noexcept { cudaErrorHandler(cudaMemcpyToSymbol(symbol, &input, sizeof(T)), "cudaMemcpyToSymbol", file, line, func_name); } template<typename T, int SIZE> void cuMemcpyToSymbolAux(const char* file, int line, const char* func_name, const T& input, T (&symbol)[SIZE]) noexcept { cudaErrorHandler(cudaMemcpyToSymbol(symbol, &input, sizeof(T)), "cudaMemcpyToSymbol", file, line, func_name); } //Pointer To Fixed Array template<typename T, int SIZE> void cuMemcpyToSymbolAux(const char* file, int line, const char* func_name, const T* input, size_t num_items, T (&symbol)[SIZE], size_t item_offset = 0) noexcept { assert(num_items + item_offset <= SIZE && input != nullptr); cudaErrorHandler(cudaMemcpyToSymbol(symbol, input, num_items * sizeof(T), item_offset * sizeof(T)), "cudaMemcpyToSymbol", file, line, func_name); } //============================================================================== //////////////////////// // cuMemcpyFromSymbol // //////////////////////// //Reference To Reference template<typename T> void cuMemcpyFromSymbolAux(const char* file, int line, const char* func_name, const T& symbol, T& output) noexcept { cudaErrorHandler(cudaMemcpyFromSymbol(&output, symbol, sizeof(T)), "cudaMemcpyFromSymbol", file, line, func_name); } template<typename T, int SIZE1, int SIZE2> void cuMemcpyFromSymbolAux(const char* file, int line, const char* func_name, const T (&symbol)[SIZE1], T (&output)[SIZE2]) noexcept { assert(SIZE1 < SIZE2); cudaErrorHandler(cudaMemcpyFromSymbol(&output, symbol, SIZE1 * sizeof(T)), "cudaMemcpyFromSymbol", file, line, func_name); } template<typename T, int SIZE1> void cuMemcpyFromSymbolAux(const char* file, int line, const char* func_name, const T (&symbol)[SIZE1], T* output) noexcept { assert(output != nullptr); cudaErrorHandler(cudaMemcpyFromSymbol(output, symbol, SIZE1 * sizeof(T)), "cudaMemcpyFromSymbol", file, line, func_name); } ///@endcond } // namespace detail } // namespace xlib
the_stack
// update: updated to use long for some integers associated with file size to support large images. // Cunren Liang, 26-MAR-2018 #include <cuda_runtime.h> #include <math.h> #include <stdio.h> #include <sys/time.h> #define THRD_PER_BLOCK 96 // Number of threads per block (should always %32==0) // --------------- STRUCTS ------------------ struct stateVector { double t; double px; double py; double pz; double vx; double vy; double vz; }; struct Orbit { int nVec; struct stateVector *svs; }; struct OutputImgArrs { double *lat; double *lon; double *z; //double *zsch; double *losang; double *incang; }; struct InputImgArrs { double *rho; double *dopline; float *DEM; }; struct Ellipsoid { double a; double e2; }; struct Peg { double lat; double lon; double hdg; }; struct PegTrans { double mat[3][3]; double ov[3]; double radcur; }; // Constant memory is ideal for const input values __constant__ double d_inpts_dbl[14]; __constant__ int d_inpts_int[7]; // --------------- GPU HELPER FUNCTIONS ---------------- __device__ int interpolateOrbit(struct Orbit *orb, double t, double *xyz, double *vel) { //, int method) { double h[4], hdot[4], f0[4], f1[4], g0[4], g1[4]; double sum = 0.0; int v0 = -1; if ((t < orb->svs[0].t) || (t > orb->svs[orb->nVec-1].t)) return 1; for (int i=0; i<orb->nVec; i++) { if ((orb->svs[i].t >= t) && (v0 == -1)) { v0 = min(max((i-2),0),(orb->nVec-4)); } } f1[0] = t - orb->svs[v0].t; f1[1] = t - orb->svs[v0+1].t; f1[2] = t - orb->svs[v0+2].t; f1[3] = t - orb->svs[v0+3].t; sum = (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); f0[0] = 1.0 - (2.0 * (t - orb->svs[v0].t) * sum); sum = (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); f0[1] = 1.0 - (2.0 * (t - orb->svs[v0+1].t) * sum); sum = (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); f0[2] = 1.0 - (2.0 * (t - orb->svs[v0+2].t) * sum); sum = (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); f0[3] = 1.0 - (2.0 * (t - orb->svs[v0+3].t) * sum); h[0] = ((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t)); h[1] = ((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t)); h[2] = ((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t)); h[3] = ((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t)); sum = ((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)); sum += ((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)); sum += ((t - orb->svs[v0+1].t) / (orb->svs[v0].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0].t - orb->svs[v0+2].t)) * (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); hdot[0] = sum; sum = ((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+1].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+1].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+1].t - orb->svs[v0+2].t)) * (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); hdot[1] = sum; sum = ((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+3].t) / (orb->svs[v0+2].t - orb->svs[v0+3].t)) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+2].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+2].t - orb->svs[v0+1].t)) * (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); hdot[2] = sum; sum = ((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t)) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+2].t) / (orb->svs[v0+3].t - orb->svs[v0+2].t)) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)); sum += ((t - orb->svs[v0].t) / (orb->svs[v0+3].t - orb->svs[v0].t)) * ((t - orb->svs[v0+1].t) / (orb->svs[v0+3].t - orb->svs[v0+1].t)) * (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); hdot[3] = sum; g1[0] = h[0] + (2.0 * (t - orb->svs[v0].t) * hdot[0]); g1[1] = h[1] + (2.0 * (t - orb->svs[v0+1].t) * hdot[1]); g1[2] = h[2] + (2.0 * (t - orb->svs[v0+2].t) * hdot[2]); g1[3] = h[3] + (2.0 * (t - orb->svs[v0+3].t) * hdot[3]); sum = (1.0 / (orb->svs[v0].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0].t - orb->svs[v0+3].t)); g0[0] = 2.0 * ((f0[0] * hdot[0]) - (h[0] * sum)); sum = (1.0 / (orb->svs[v0+1].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+2].t)) + (1.0 / (orb->svs[v0+1].t - orb->svs[v0+3].t)); g0[1] = 2.0 * ((f0[1] * hdot[1]) - (h[1] * sum)); sum = (1.0 / (orb->svs[v0+2].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+2].t - orb->svs[v0+3].t)); g0[2] = 2.0 * ((f0[2] * hdot[2]) - (h[2] * sum)); sum = (1.0 / (orb->svs[v0+3].t - orb->svs[v0].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+1].t)) + (1.0 / (orb->svs[v0+3].t - orb->svs[v0+2].t)); g0[3] = 2.0 * ((f0[3] * hdot[3]) - (h[3] * sum)); xyz[0] = (((orb->svs[v0].px * f0[0]) + (orb->svs[v0].vx * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].px * f0[1]) + (orb->svs[v0+1].vx * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].px * f0[2]) + (orb->svs[v0+2].vx * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].px * f0[3]) + (orb->svs[v0+3].vx * f1[3])) * h[3] * h[3]); xyz[1] = (((orb->svs[v0].py * f0[0]) + (orb->svs[v0].vy * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].py * f0[1]) + (orb->svs[v0+1].vy * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].py * f0[2]) + (orb->svs[v0+2].vy * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].py * f0[3]) + (orb->svs[v0+3].vy * f1[3])) * h[3] * h[3]); xyz[2] = (((orb->svs[v0].pz * f0[0]) + (orb->svs[v0].vz * f1[0])) * h[0] * h[0]) + (((orb->svs[v0+1].pz * f0[1]) + (orb->svs[v0+1].vz * f1[1])) * h[1] * h[1]) + (((orb->svs[v0+2].pz * f0[2]) + (orb->svs[v0+2].vz * f1[2])) * h[2] * h[2]) + (((orb->svs[v0+3].pz * f0[3]) + (orb->svs[v0+3].vz * f1[3])) * h[3] * h[3]); vel[0] = (((orb->svs[v0].px * g0[0]) + (orb->svs[v0].vx * g1[0])) * h[0]) + (((orb->svs[v0+1].px * g0[1]) + (orb->svs[v0+1].vx * g1[1])) * h[1]) + (((orb->svs[v0+2].px * g0[2]) + (orb->svs[v0+2].vx * g1[2])) * h[2]) + (((orb->svs[v0+3].px * g0[3]) + (orb->svs[v0+3].vx * g1[3])) * h[3]); vel[1] = (((orb->svs[v0].py * g0[0]) + (orb->svs[v0].vy * g1[0])) * h[0]) + (((orb->svs[v0+1].py * g0[1]) + (orb->svs[v0+1].vy * g1[1])) * h[1]) + (((orb->svs[v0+2].py * g0[2]) + (orb->svs[v0+2].vy * g1[2])) * h[2]) + (((orb->svs[v0+3].py * g0[3]) + (orb->svs[v0+3].vy * g1[3])) * h[3]); vel[2] = (((orb->svs[v0].pz * g0[0]) + (orb->svs[v0].vz * g1[0])) * h[0]) + (((orb->svs[v0+1].pz * g0[1]) + (orb->svs[v0+1].vz * g1[1])) * h[1]) + (((orb->svs[v0+2].pz * g0[2]) + (orb->svs[v0+2].vz * g1[2])) * h[2]) + (((orb->svs[v0+3].pz * g0[3]) + (orb->svs[v0+3].vz * g1[3])) * h[3]); return 0; } __device__ void initSpline(double *A, double *R, double *Q) { Q[0] = 0.0; R[0] = 0.0; Q[1] = -0.5 / ((Q[0] / 2.) + 2.); R[1] = ((3. * (A[2] - (2. * A[1]) + A[0])) - (R[0] / 2.)) / ((Q[0] / 2.) + 2.); Q[2] = -0.5 / ((Q[1] / 2.) + 2.); R[2] = ((3. * (A[3] - (2. * A[2]) + A[1])) - (R[1] / 2.)) / ((Q[1] / 2.) + 2.); Q[3] = -0.5 / ((Q[2] / 2.) + 2.); R[3] = ((3. * (A[4] - (2. * A[3]) + A[2])) - (R[2] / 2.)) / ((Q[2] / 2.) + 2.); Q[4] = -0.5 / ((Q[3] / 2.) + 2.); R[4] = ((3. * (A[5] - (2. * A[4]) + A[3])) - (R[3] / 2.)) / ((Q[3] / 2.) + 2.); R[5] = 0.0; R[4] = (Q[4] * R[5]) + R[4]; R[3] = (Q[3] * R[4]) + R[3]; R[2] = (Q[2] * R[3]) + R[2]; R[1] = (Q[1] * R[2]) + R[1]; } // Note we're actually passing in the "length" variable, but width makes more sense in the algorithm __device__ void spline(int indi, int j0, int width, double *A, float *DEM) { int indj; indj = min((j0+1),width); A[0] = DEM[((indi-1)*width)+(indj-1)]; indj = min((j0+2),width); A[1] = DEM[((indi-1)*width)+(indj-1)]; indj = min((j0+3),width); A[2] = DEM[((indi-1)*width)+(indj-1)]; indj = min((j0+4),width); A[3] = DEM[((indi-1)*width)+(indj-1)]; indj = min((j0+5),width); A[4] = DEM[((indi-1)*width)+(indj-1)]; indj = min((j0+6),width); A[5] = DEM[((indi-1)*width)+(indj-1)]; } __device__ double interpolateDEM(float *DEM, double lon, double lat, int width, int length) { bool out_of_bounds = ((int(lat) < 3) || (int(lat) >= (length-2)) || (int(lon) < 3) || (int(lon) >= (width-2))); if (out_of_bounds) return -500.0; double A[6], R[6], Q[6], HC[6]; double t0, t1; int indi, i0, j0; i0 = int(lon) - 2; j0 = int(lat) - 2; indi = min((i0+1), width); // bound by out_of_bounds, so this isn't a concern spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[0] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); indi = min((i0+2), width); spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[1] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); indi = min((i0+3), width); spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[2] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); indi = min((i0+4), width); spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[3] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); indi = min((i0+5), width); spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[4] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); indi = min((i0+6), width); spline(indi, j0, length, A, DEM); initSpline(A,R,Q); t0 = A[2] - A[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lat - j0 - 2.) * ((R[1] / 2.) + ((lat - j0 - 2.) * ((R[2] - R[1]) / 6.))); HC[5] = A[1] + ((lat - j0 - 2.) * (t0 + t1)); initSpline(HC,R,Q); t0 = HC[2] - HC[1] - (R[1] / 3.) - (R[2] / 6.); t1 = (lon - i0 - 2.) * ((R[1] / 2.) + ((lon - i0 - 2.) * ((R[2] - R[1]) / 6.))); return HC[1] + ((lon - i0 - 2.) * (t0 + t1)); } __device__ void unitvec(double *v, double *vhat) { double mag = norm(3,v); vhat[0] = v[0] / mag; vhat[1] = v[1] / mag; vhat[2] = v[2] / mag; } __device__ void cross(double *u, double *v, double *w) { w[0] = (u[1] * v[2]) - (u[2] * v[1]); w[1] = (u[2] * v[0]) - (u[0] * v[2]); w[2] = (u[0] * v[1]) - (u[1] * v[0]); } __device__ double dot(double *u, double *v) { return ((u[0]*v[0]) + (u[1]*v[1]) + (u[2]*v[2])); } __device__ void xyz2llh(double *xyz, double *llh, struct Ellipsoid *elp) { double d,k,p,q,r,rv,s,t,u,w; p = (pow(xyz[0],2) + pow(xyz[1],2)) / pow(elp->a,2); q = ((1.0 - elp->e2) * pow(xyz[2],2)) / pow(elp->a,2); r = (p + q - pow(elp->e2,2)) / 6.0; s = (pow(elp->e2,2) * p * q) / (4.0 * pow(r,3)); t = cbrt(1.0 + s + sqrt(s * (2.0 + s))); //t = pow((1.0 + s + sqrt(s * (2.0 + s))),(1./3.)); u = r * (1.0 + t + (1.0 / t)); rv = sqrt(pow(u,2) + (pow(elp->e2,2) * q)); w = (elp->e2 * (u + rv - q)) / (2.0 * rv); k = sqrt(u + rv + pow(w,2)) - w; d = (k * sqrt(pow(xyz[0],2) + pow(xyz[1],2))) / (k + elp->e2); llh[0] = atan2(xyz[2],d); llh[1] = atan2(xyz[1],xyz[0]); llh[2] = ((k + elp->e2 - 1.0) * sqrt(pow(d,2) + pow(xyz[2],2))) / k; } __device__ void llh2xyz(double *xyz, double *llh, struct Ellipsoid *elp) { double re; re = elp->a / sqrt(1.0 - (elp->e2 * pow(sin(llh[0]),2))); xyz[0] = (re + llh[2]) * cos(llh[0]) * cos(llh[1]); xyz[1] = (re + llh[2]) * cos(llh[0]) * sin(llh[1]); xyz[2] = ((re * (1.0 - elp->e2)) + llh[2]) * sin(llh[0]); } __device__ void tcnbasis(double *pos, double *vel, double *t, double *c, double *n, struct Ellipsoid *elp) { double llh[3], temp[3]; xyz2llh(pos,llh,elp); n[0] = -cos(llh[0]) * cos(llh[1]); n[1] = -cos(llh[0]) * sin(llh[1]); n[2] = -sin(llh[0]); cross(n,vel,temp); unitvec(temp,c); cross(c,n,temp); unitvec(temp,t); } __device__ void radar2xyz(struct Peg *peg, struct Ellipsoid *elp, struct PegTrans *ptm) { double llh[3], temp[3]; double re, rn; ptm->mat[0][0] = cos(peg->lat) * cos(peg->lon); ptm->mat[0][1] = (-sin(peg->hdg) * sin(peg->lon)) - (sin(peg->lat) * cos(peg->lon) * cos(peg->hdg)); ptm->mat[0][2] = (sin(peg->lon) * cos(peg->hdg)) - (sin(peg->lat) * cos(peg->lon) * sin(peg->hdg)); ptm->mat[1][0] = cos(peg->lat) * sin(peg->lon); ptm->mat[1][1] = (cos(peg->lon) * sin(peg->hdg)) - (sin(peg->lat) * sin(peg->lon) * cos(peg->hdg)); ptm->mat[1][2] = (-cos(peg->lon) * cos(peg->hdg)) - (sin(peg->lat) * sin(peg->lon) * sin(peg->hdg)); ptm->mat[2][0] = sin(peg->lat); ptm->mat[2][1] = cos(peg->lat) * cos(peg->hdg); ptm->mat[2][2] = cos(peg->lat) * sin(peg->hdg); re = elp->a / sqrt(1.0 - (elp->e2 * pow(sin(peg->lat),2))); rn = (elp->a * (1.0 - elp->e2)) / pow((1.0 - (elp->e2 * pow(sin(peg->lat),2))),1.5); ptm->radcur = (re * rn) / ((re * pow(cos(peg->hdg),2)) + (rn * pow(sin(peg->hdg),2))); llh[0] = peg->lat; llh[1] = peg->lon; llh[2] = 0.0; llh2xyz(temp,llh,elp); ptm->ov[0] = temp[0] - (ptm->radcur * cos(peg->lat) * cos(peg->lon)); ptm->ov[1] = temp[1] - (ptm->radcur * cos(peg->lat) * sin(peg->lon)); ptm->ov[2] = temp[2] - (ptm->radcur * sin(peg->lat)); } __device__ void xyz2sch(double *schv, double *xyzv, struct PegTrans *ptm, struct Ellipsoid *elp) { double schvt[3], llh[3]; double tempa, tempe2; schvt[0] = xyzv[0] - ptm->ov[0]; schvt[1] = xyzv[1] - ptm->ov[1]; schvt[2] = xyzv[2] - ptm->ov[2]; schv[0] = (ptm->mat[0][0] * schvt[0]) + (ptm->mat[1][0] * schvt[1]) + (ptm->mat[2][0] * schvt[2]); // Switched from using ptm->matinv schv[1] = (ptm->mat[0][1] * schvt[0]) + (ptm->mat[1][1] * schvt[1]) + (ptm->mat[2][1] * schvt[2]); schv[2] = (ptm->mat[0][2] * schvt[0]) + (ptm->mat[1][2] * schvt[1]) + (ptm->mat[2][2] * schvt[2]); tempa = elp->a; tempe2 = elp->e2; elp->a = ptm->radcur; elp->e2 = 0.; xyz2llh(schv,llh,elp); elp->a = tempa; elp->e2 = tempe2; schv[0] = ptm->radcur * llh[1]; schv[1] = ptm->radcur * llh[0]; schv[2] = llh[2]; } // --------------- CUDA FUNCTIONS ------------------ __global__ void runTopo(struct Orbit orbit, struct OutputImgArrs outImgArrs, struct InputImgArrs inImgArrs, long NPIXELS, long OFFSET) { long pixel = (blockDim.x * blockIdx.x) + threadIdx.x; if (pixel < NPIXELS) { // Make sure we're not operating on a non-existent pixel double enumat[3][3]; double xyzsat[3], velsat[3], llhsat[3], vhat[3], that[3], chat[3], nhat[3]; double llh[3], llh_prev[3], xyz[3], xyz_prev[3], sch[3], enu[3], delta[3]; double line, tline, vmag, height, dopfact, costheta, sintheta, alpha, beta; double demlat, demlon, cosalpha, aa, bb, enunorm; int iter; // Because the arrays get read from AND written to, use thread-specific vars until final assignment double thrd_z, thrd_zsch, thrd_lat, thrd_lon, thrd_distance, thrd_losang0, thrd_losang1; double thrd_incang0, thrd_incang1; int thrd_converge; struct Ellipsoid elp; struct Peg peg; struct PegTrans ptm; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * double t0 = inpts_dbl[0]; * double prf = inpts_dbl[1]; */ elp.a = d_inpts_dbl[2]; elp.e2 = d_inpts_dbl[3]; peg.lat = d_inpts_dbl[4]; peg.lon = d_inpts_dbl[5]; peg.hdg = d_inpts_dbl[6]; /* * double ufirstlat = inpts_dbl[7]; * double ufirstlon = inpts_dbl[8]; * double deltalat = inpts_dbl[9]; * double deltalon = inpts_dbl[10]; * double wvl = inpts_dbl[11]; * double ilrl = inpts_dbl[12]; * double thresh = inpts_dbl[13]; * * int NazLooks = inpts_int[0]; * int width = inpts_int[1]; * int udemlength = inpts_int[2]; * int udemwidth = inpts_int[3]; * int numiter = inpts_int[4]; * int extraiter = inpts_int[5]; * int length = inpts_int[6]; NOT USED IN THIS KERNEL * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ line = (pixel + OFFSET) / d_inpts_int[1]; tline = d_inpts_dbl[0] + (d_inpts_int[0] * (line / d_inpts_dbl[1])); if (interpolateOrbit(&orbit,tline,xyzsat,velsat) != 0) { printf("Error getting state vector for bounds computation\n"); //exit(1); } unitvec(velsat,vhat); vmag = norm(3,velsat); xyz2llh(xyzsat,llhsat,&elp); height = llhsat[2]; tcnbasis(xyzsat,velsat,that,chat,nhat,&elp); peg.lat = llhsat[0]; peg.lon = llhsat[1]; radar2xyz(&peg,&elp,&ptm); thrd_converge = 0; thrd_z = 0.0; thrd_zsch = 0.0; thrd_lat = d_inpts_dbl[7] + (0.5 * d_inpts_dbl[9] * d_inpts_int[2]); thrd_lon = d_inpts_dbl[8] + (0.5 * d_inpts_dbl[10] * d_inpts_int[3]); dopfact = (0.5 * d_inpts_dbl[11] * (inImgArrs.dopline[pixel] / vmag)) * inImgArrs.rho[pixel]; // START THE ITERATIONS for (iter=0; iter<=(d_inpts_int[4]+d_inpts_int[5]); iter++) { if (thrd_converge == 0) { // Designing this way helps prevent thread divergence as much as possible llh_prev[0] = thrd_lat / (180. / M_PI); llh_prev[1] = thrd_lon / (180. / M_PI); llh_prev[2] = thrd_z; costheta = 0.5 * (((height + ptm.radcur) / inImgArrs.rho[pixel]) + (inImgArrs.rho[pixel] / (height + ptm.radcur)) - (((ptm.radcur + thrd_zsch) / (height + ptm.radcur)) * ((ptm.radcur + thrd_zsch) / inImgArrs.rho[pixel]))); sintheta = sqrt(1.0 - pow(costheta,2)); alpha = (dopfact - (costheta * inImgArrs.rho[pixel] * dot(nhat,vhat))) / dot(vhat,that); beta = -d_inpts_dbl[12] * sqrt((pow(inImgArrs.rho[pixel],2) * pow(sintheta,2)) - pow(alpha,2)); delta[0] = (costheta * inImgArrs.rho[pixel] * nhat[0]) + (alpha * that[0]) + (beta * chat[0]); delta[1] = (costheta * inImgArrs.rho[pixel] * nhat[1]) + (alpha * that[1]) + (beta * chat[1]); delta[2] = (costheta * inImgArrs.rho[pixel] * nhat[2]) + (alpha * that[2]) + (beta * chat[2]); xyz[0] = xyzsat[0] + delta[0]; xyz[1] = xyzsat[1] + delta[1]; xyz[2] = xyzsat[2] + delta[2]; xyz2llh(xyz,llh,&elp); thrd_lat = llh[0] * (180. / M_PI); thrd_lon = llh[1] * (180. / M_PI); demlat = ((thrd_lat - d_inpts_dbl[7]) / d_inpts_dbl[9]) + 1; demlat = fmax(demlat,1.); demlat = fmin(demlat,(d_inpts_int[2]-1.)); demlon = ((thrd_lon - d_inpts_dbl[8]) / d_inpts_dbl[10]) + 1; demlon = fmax(demlon,1.); demlon = fmin(demlon,(d_inpts_int[3]-1.)); thrd_z = interpolateDEM(inImgArrs.DEM,demlon,demlat,d_inpts_int[3],d_inpts_int[2]); thrd_z = fmax(thrd_z,-500.); llh[0] = thrd_lat / (180. / M_PI); llh[1] = thrd_lon / (180. / M_PI); llh[2] = thrd_z; llh2xyz(xyz,llh,&elp); xyz2sch(sch,xyz,&ptm,&elp); thrd_zsch = sch[2]; thrd_distance = sqrt(pow((xyz[0]-xyzsat[0]),2) + pow((xyz[1]-xyzsat[1]),2) + pow((xyz[2]-xyzsat[2]),2)) - inImgArrs.rho[pixel]; thrd_converge = (fabs(thrd_distance) <= d_inpts_dbl[13]); if ((thrd_converge == 0) && (iter > d_inpts_int[4])) { // Yay avoiding thread divergence! llh2xyz(xyz_prev,llh_prev,&elp); xyz[0] = 0.5 * (xyz_prev[0] + xyz[0]); xyz[1] = 0.5 * (xyz_prev[1] + xyz[1]); xyz[2] = 0.5 * (xyz_prev[2] + xyz[2]); xyz2llh(xyz,llh,&elp); thrd_lat = llh[0] * (180. / M_PI); thrd_lon = llh[1] * (180. / M_PI); thrd_z = llh[2]; xyz2sch(sch,xyz,&ptm,&elp); thrd_zsch = sch[2]; thrd_distance = sqrt(pow((xyz[0]-xyzsat[0]),2) + pow((xyz[1]-xyzsat[1]),2) + pow((xyz[2]-xyzsat[2]),2)) - inImgArrs.rho[pixel]; } } } // Final computation costheta = 0.5 * (((height + ptm.radcur) / inImgArrs.rho[pixel]) + (inImgArrs.rho[pixel] / (height + ptm.radcur)) - (((ptm.radcur + thrd_zsch) / (height + ptm.radcur)) * ((ptm.radcur + thrd_zsch) / inImgArrs.rho[pixel]))); sintheta = sqrt(1.0 - pow(costheta,2)); alpha = (dopfact - (costheta * inImgArrs.rho[pixel] * dot(nhat,vhat))) / dot(vhat,that); beta = -d_inpts_dbl[12] * sqrt((pow(inImgArrs.rho[pixel],2) * pow(sintheta,2)) - pow(alpha,2)); delta[0] = (costheta * inImgArrs.rho[pixel] * nhat[0]) + (alpha * that[0]) + (beta * chat[0]); delta[1] = (costheta * inImgArrs.rho[pixel] * nhat[1]) + (alpha * that[1]) + (beta * chat[1]); delta[2] = (costheta * inImgArrs.rho[pixel] * nhat[2]) + (alpha * that[2]) + (beta * chat[2]); xyz[0] = xyzsat[0] + delta[0]; xyz[1] = xyzsat[1] + delta[1]; xyz[2] = xyzsat[2] + delta[2]; xyz2llh(xyz,llh,&elp); thrd_lat = llh[0] * (180. / M_PI); thrd_lon = llh[1] * (180. / M_PI); thrd_z = llh[2]; thrd_distance = sqrt(pow((xyz[0]-xyzsat[0]),2) + pow((xyz[1]-xyzsat[1]),2) + pow((xyz[2]-xyzsat[2]),2)) - inImgArrs.rho[pixel]; // Expanded from Linalg::enubasis/Linalg::tranmat enumat[0][0] = -sin(llh[1]); enumat[1][0] = -sin(llh[0]) * cos(llh[1]); enumat[2][0] = cos(llh[0]) * cos(llh[1]); enumat[0][1] = cos(llh[1]); enumat[1][1] = -sin(llh[0]) * sin(llh[1]); enumat[2][1] = cos(llh[0]) * sin(llh[1]); enumat[0][2] = 0.0; enumat[1][2] = cos(llh[0]); enumat[2][2] = sin(llh[0]); // Expanded from Linalg::matvec enu[0] = (enumat[0][0] * delta[0]) + (enumat[0][1] * delta[1]) + (enumat[0][2] * delta[2]); enu[1] = (enumat[1][0] * delta[0]) + (enumat[1][1] * delta[1]) + (enumat[1][2] * delta[2]); enu[2] = (enumat[2][0] * delta[0]) + (enumat[2][1] * delta[1]) + (enumat[2][2] * delta[2]); cosalpha = fabs(enu[2]) / norm(3,enu); thrd_losang0 = acos(cosalpha) * (180. / M_PI); thrd_losang1 = (atan2(-enu[1],-enu[0]) - (0.5*M_PI)) * (180. / M_PI); thrd_incang0 = acos(costheta) * (180. / M_PI); thrd_zsch = inImgArrs.rho[pixel] * sintheta; demlat = ((thrd_lat - d_inpts_dbl[7]) / d_inpts_dbl[9]) + 1; demlat = fmax(demlat,2.); demlat = fmin(demlat,(d_inpts_int[2]-1.)); demlon = ((thrd_lon - d_inpts_dbl[8]) / d_inpts_dbl[10]) + 1; demlon = fmax(demlon,2.); demlon = fmin(demlon,(d_inpts_int[3]-1.)); aa = interpolateDEM(inImgArrs.DEM,(demlon-1.),demlat,d_inpts_int[3],d_inpts_int[2]); bb = interpolateDEM(inImgArrs.DEM,(demlon+1.),demlat,d_inpts_int[3],d_inpts_int[2]); alpha = ((bb - aa) * (180. / M_PI)) / (2.0 * (elp.a / sqrt(1.0 - (elp.e2 * pow(sin(thrd_lat / (180. / M_PI)),2)))) * d_inpts_dbl[10]); aa = interpolateDEM(inImgArrs.DEM,demlon,(demlat-1.),d_inpts_int[3],d_inpts_int[2]); bb = interpolateDEM(inImgArrs.DEM,demlon,(demlat+1.),d_inpts_int[3],d_inpts_int[2]); beta = ((bb - aa) * (180. / M_PI)) / (2.0 * ((elp.a * (1.0 - elp.e2)) / pow((1.0 - (elp.e2 * pow(sin(thrd_lat / (180. / M_PI)),2))),1.5)) * d_inpts_dbl[9]); enunorm = norm(3,enu); enu[0] = enu[0] / enunorm; enu[1] = enu[1] / enunorm; enu[2] = enu[2] / enunorm; costheta = ((enu[0] * alpha) + (enu[1] * beta) - enu[2]) / sqrt(1.0 + pow(alpha,2) + pow(beta,2)); thrd_incang1 = acos(costheta) * (180. / M_PI); // Leave out masking stuff for now (though it's doable) // Finally write to reference arrays outImgArrs.lat[pixel] = thrd_lat; outImgArrs.lon[pixel] = thrd_lon; outImgArrs.z[pixel] = thrd_z; //outImgArrs.zsch[pixel] = thrd_zsch; outImgArrs.losang[2*pixel] = thrd_losang0; outImgArrs.losang[(2*pixel)+1] = thrd_losang1; outImgArrs.incang[2*pixel] = thrd_incang0; outImgArrs.incang[(2*pixel)+1] = thrd_incang1; } } // --------------- CPU HELPER FUNCTIONS ----------------- double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void setOrbit(struct Orbit *orb) { orb->svs = (struct stateVector *)malloc(orb->nVec * sizeof(struct stateVector)); } void freeOrbit(struct Orbit *orb) { free(orb->svs); } size_t getDeviceMem() { size_t freeByte, totalByte; cudaMemGetInfo(&freeByte, &totalByte); totalByte = (totalByte / 1e9) * 1e9; // Round down to nearest GB return totalByte; } // --------------- C FUNCTIONS ---------------- void runGPUTopo(long nBlock, long numPix, double *h_inpts_dbl, int *h_inpts_int, float *h_DEM, double *h_rho, double *h_dopline, int h_orbNvec, double *h_orbSvs, double **accArr) { //double *h_lat, *h_lon, *h_z, *h_incang, *h_losang; // , *h_zsch; double iStartCpy, iStartRun, iEndRun, iEndCpy; int i; struct stateVector *d_svs; double *d_rho, *d_dopline, *d_lat, *d_lon, *d_z, *d_incang, *d_losang; // , *d_zsch; float *d_DEM; struct InputImgArrs inImgArrs; struct OutputImgArrs outImgArrs; struct Orbit orbit; cudaSetDevice(0); printf(" Allocating host and general GPU memory...\n"); size_t nb_pixels = numPix * sizeof(double); // size of rho/dopline/lat/lon/z/zsch/incang/losang size_t nb_DEM = h_inpts_int[3] * h_inpts_int[2] * sizeof(float); // size of DEM /* h_lat = (double *)malloc(nb_pixels); h_lon = (double *)malloc(nb_pixels); h_z = (double *)malloc(nb_pixels); //h_zsch = (double *)malloc(nb_pixels); h_incang = (double *)malloc(2 * nb_pixels); h_losang = (double *)malloc(2 * nb_pixels); */ orbit.nVec = h_orbNvec; setOrbit(&orbit); for (i=0; i<h_orbNvec; i++) { orbit.svs[i].t = h_orbSvs[7*i]; orbit.svs[i].px = h_orbSvs[(7*i)+1]; orbit.svs[i].py = h_orbSvs[(7*i)+2]; orbit.svs[i].pz = h_orbSvs[(7*i)+3]; orbit.svs[i].vx = h_orbSvs[(7*i)+4]; orbit.svs[i].vy = h_orbSvs[(7*i)+5]; orbit.svs[i].vz = h_orbSvs[(7*i)+6]; } cudaMalloc((void**)&d_svs, (orbit.nVec*sizeof(struct stateVector))); cudaMalloc((double**)&d_rho, nb_pixels); cudaMalloc((double**)&d_dopline, nb_pixels); cudaMalloc((float**)&d_DEM, nb_DEM); printf(" Copying general memory to GPU...\n"); iStartCpy = cpuSecond(); cudaMemcpy(d_svs, orbit.svs, (orbit.nVec*sizeof(struct stateVector)), cudaMemcpyHostToDevice); cudaMemcpy(d_rho, h_rho, nb_pixels, cudaMemcpyHostToDevice); cudaMemcpy(d_dopline, h_dopline, nb_pixels, cudaMemcpyHostToDevice); cudaMemcpy(d_DEM, h_DEM, nb_DEM, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_inpts_dbl, h_inpts_dbl, (14*sizeof(double))); cudaMemcpyToSymbol(d_inpts_int, h_inpts_int, (7*sizeof(int))); freeOrbit(&orbit); orbit.svs = d_svs; inImgArrs.DEM = d_DEM; inImgArrs.rho = d_rho; inImgArrs.dopline = d_dopline; printf(" Allocating block memory (%d pixels per image)...\n", numPix); cudaMalloc((double**)&d_lat, nb_pixels); cudaMalloc((double**)&d_lon, nb_pixels); cudaMalloc((double**)&d_z, nb_pixels); //cudaMalloc((double**)&d_zsch, nb_pixels); cudaMalloc((double**)&d_incang, (2*nb_pixels)); cudaMalloc((double**)&d_losang, (2*nb_pixels)); outImgArrs.lat = d_lat; outImgArrs.lon = d_lon; outImgArrs.z = d_z; outImgArrs.incang = d_incang; outImgArrs.losang = d_losang; //outImgArrs.zsch = d_zsch; dim3 block(THRD_PER_BLOCK); dim3 grid((numPix + (THRD_PER_BLOCK - 1)) / THRD_PER_BLOCK); // == ceil(numPix / THRD_PER_BLOCK), preserves warp sizing if ((grid.x * THRD_PER_BLOCK) > numPix) printf(" (NOTE: There will be %d 'empty' threads per image block).\n", ((grid.x*THRD_PER_BLOCK)-numPix)); if (nBlock > -1) printf(" Starting GPU Topo for block %d...\n", nBlock); else printf(" Starting GPU Topo for remaining lines...\n"); iStartRun = cpuSecond(); if (nBlock > -1) runTopo <<<grid, block>>>(orbit, outImgArrs, inImgArrs, numPix, (nBlock*numPix)); else { long offset = abs(nBlock); runTopo <<<grid, block>>>(orbit, outImgArrs, inImgArrs, numPix, offset); } cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); // Double-duty of also waiting for the Topo algorithm to finish if (errSync != cudaSuccess) { printf(" Sync kernel error: %s\n", cudaGetErrorString(errSync)); } if (errAsync != cudaSuccess) { printf(" Async kernel error: %s\n", cudaGetErrorString(errAsync)); } iEndRun = cpuSecond(); if (nBlock > -1) printf(" GPU finished block %d in %f s.\n", nBlock, (iEndRun-iStartRun)); else printf(" GPU finished remaining lines in %f s.\n", (iEndRun-iStartRun)); printf(" Copying memory back to host...\n"); cudaMemcpy(accArr[0], outImgArrs.lat, nb_pixels, cudaMemcpyDeviceToHost); // Copy memory from device to host with offset cudaMemcpy(accArr[1], outImgArrs.lon, nb_pixels, cudaMemcpyDeviceToHost); cudaMemcpy(accArr[2], outImgArrs.z, nb_pixels, cudaMemcpyDeviceToHost); //cudaMemcpy(h_zsch, outImgArrs.zsch, nb_pixels, cudaMemcpyDeviceToHost); cudaMemcpy(accArr[3], outImgArrs.incang, (2*nb_pixels), cudaMemcpyDeviceToHost); cudaMemcpy(accArr[4], outImgArrs.losang, (2*nb_pixels), cudaMemcpyDeviceToHost); iEndCpy = cpuSecond(); if (nBlock > -1) printf(" GPU finished block %d (with memory copies) in %f s.\n", nBlock, (iEndCpy-iStartCpy)); else printf(" GPU finished remaining lines (with memory copies) in %f s.\n", (iEndCpy-iStartCpy)); printf(" Cleaning device memory and returning to main Topo function...\n"); cudaFree(d_svs); cudaFree(d_rho); cudaFree(d_dopline); cudaFree(d_lat); cudaFree(d_lon); cudaFree(d_z); //cudaFree(d_zsch); cudaFree(d_incang); cudaFree(d_losang); cudaFree(d_DEM); cudaDeviceReset(); /* accArr[0] = h_lat; accArr[1] = h_lon; accArr[2] = h_z; accArr[3] = h_incang; accArr[4] = h_losang; */ //accArr[5] = h_zsch; // Won't be used until we add the masking stuff }
the_stack
#define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){ const int BlockSize=2048; const int paddingLevel=5; __shared__ float buffer4[BlockSize*4]; __shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ float runningsum=0,runningsum2=0; for (int j=0;j<n;j+=BlockSize*4){ int n24_i=min(n-j,BlockSize*4); int n24=(n24_i+3)&~3; int n2=n24>>2; for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){ if (k+3<n24_i){ float v1=inp[i*n+j+k]; float v2=inp[i*n+j+k+1]; v2+=v1; float v3=inp[i*n+j+k+2]; float v4=inp[i*n+j+k+3]; v4+=v3; v3+=v2; v4+=v2; buffer4[k]=v1; buffer4[k+1]=v2; buffer4[k+2]=v3; buffer4[k+3]=v4; buffer[(k>>2)+(k>>(2+paddingLevel))]=v4; }else{ float v=0; for (int k2=k;k2<n24_i;k2++){ v+=inp[i*n+j+k2]; buffer4[k2]=v; } for (int k2=n24_i;k2<n24;k2++){ buffer4[k2]=v; } buffer[(k>>2)+(k>>(2+paddingLevel))]=v; } } int u=0; for (;(2<<u)<=n2;u++){ __syncthreads(); for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){ int i1=(((k<<1)+2)<<u)-1; int i2=(((k<<1)+1)<<u)-1; i1+=i1>>paddingLevel; i2+=i2>>paddingLevel; buffer[i1]+=buffer[i2]; } } u--; for (;u>=0;u--){ __syncthreads(); for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){ int i1=(((k<<1)+3)<<u)-1; int i2=(((k<<1)+2)<<u)-1; i1+=i1>>paddingLevel; i2+=i2>>paddingLevel; buffer[i1]+=buffer[i2]; } } __syncthreads(); for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){ if (k!=0){ int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel); buffer4[k]+=buffer[k2]; buffer4[k+1]+=buffer[k2]; buffer4[k+2]+=buffer[k2]; buffer4[k+3]+=buffer[k2]; } } __syncthreads(); for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){ out[i*n+j+k]=buffer4[k]+runningsum; } float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2; float r2=runningsum+t; runningsum2=t-(r2-runningsum); runningsum=r2; __syncthreads(); } } } __global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){ int base=1; while (base<n) base<<=1; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){ float q=query[i*m+j]*dataset[i*n+n-1]; int r=n-1; for (int k=base;k>=1;k>>=1) if (r>=k && dataset[i*n+r-k]>=q) r-=k; result[i*m+j]=r; } } } template <unsigned int BlockSize> __global__ void farthestpointsamplingKernel(int b,int n,int c,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ if (m<=0) return; // const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=0; if (threadIdx.x==0) idxs[i*m+0]=old; //initialize temp for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float d = 0; float p1, p2; for (int l=0;l<c;l++){ p1 = dataset[i*n*c+old*c+l]; p2 = dataset[i*n*c+k*c+l]; d += (p2-p1) * (p2-p1); } float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } template <unsigned int BlockSize> __global__ void farthestpointsamplingwithdistKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ if (m<=0) return; // const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int old=0; if (threadIdx.x==0) idxs[i*m+0]=old; //initialize temp for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float d = 0; d = dataset[i * n * n + old * n + k]; float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } template <unsigned int BlockSize> __global__ void farthestpointsamplingwithpreidxKernel(int b,int n,int c,int m,int m1,const float * __restrict__ dataset,const int * __restrict__ preidx,float * __restrict__ temp,int * __restrict__ idxs){ // b: batch_size, n: ndataset, c: channel_num, m: points_num after fps, m1: preidx number // dataset: [b, n, c] preidx: [b, m1], temp: [b, n], idxs: [b, m] if (m<=0) return; // const int BlockSize=512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } int pre_idx; for (int j=threadIdx.x;j<n;j+=blockDim.x){ // update temp metrics float pre_best = 1e38; float pre_p1, pre_p2; for (int k=0; k<m1; k++){ pre_idx = preidx[i * m1 + k]; float pre_d = 0; for (int l=0; l < c; l++){ pre_p1 = dataset[i * n * c + pre_idx * c + l]; pre_p2 = dataset[i * n * c + j * c + l]; pre_d += (pre_p2 - pre_p1) * (pre_p2 - pre_p1); } pre_best = min(pre_best, pre_d); } temp[blockIdx.x*n+j] = pre_best; } // then find current smallest distance as current old __syncthreads(); int old=0; float pre_best = -1; for (int j=0; j<n; j++){ if (pre_best < temp[blockIdx.x*n+j]){ pre_best = temp[blockIdx.x*n+j]; old = j; } } if (threadIdx.x==0) idxs[i*m+0]=old; //initialize temp __syncthreads(); for (int j=1;j<m;j++){ int besti=0; float best=-1; for (int k=threadIdx.x;k<n;k+=blockDim.x){ float td=temp[blockIdx.x*n+k]; float d = 0; float p1, p2; for (int l=0;l<c;l++){ p1 = dataset[i*n*c+old*c+l]; p2 = dataset[i*n*c+k*c+l]; d += (p2-p1) * (p2-p1); } float d2=min(d,td); if (d2!=td) temp[blockIdx.x*n+k]=d2; if (d2>best){ best=d2; besti=k; } } dists[threadIdx.x]=best; dists_i[threadIdx.x]=besti; for (int u=0;(1<<u)<blockDim.x;u++){ __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (dists[i1]<dists[i2]){ dists[i1]=dists[i2]; dists_i[i1]=dists_i[i2]; } } } __syncthreads(); old=dists_i[0]; if (threadIdx.x==0) idxs[i*m+j]=old; } } } // inp: [b, n, c] idx: [b, m] // out: [b, m, c] __global__ void gatherpointKernel(int b,int n,int m,int c,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){ int loop_time = b * m * c; CUDA_1D_KERNEL_LOOP(index, loop_time){ int cur_batch_size = index / (m * c); int cur_point_idx = index / c; int cur_channel = index % c; int a=idx[cur_point_idx]; int current_idx = cur_batch_size * (n * c) + a * c + cur_channel; out[index] = inp[current_idx]; } } // out_g: [b, m, c] idx: [b, m] // inp_g: [b, n, c] __global__ void scatteraddpointKernel(int b,int n,int m,int c,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){ int loop_time = b * m * c; CUDA_1D_KERNEL_LOOP(index, loop_time){ int cur_batch_size = index / (m * c); int cur_point_idx = index / c; int cur_channel = index % c; int a = idx[cur_point_idx]; int current_idx = cur_batch_size * n * c + a * c + cur_channel; atomicAdd(&inp_g[current_idx],out_g[index]); } } // inp: [b, n, c] mask: [b, n] // out: [b, proposal_num, c] __global__ void GatherByMaskKernel(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){ for (int cur_batch=blockIdx.x; cur_batch<b; cur_batch+=gridDim.x){ const float *cur_inp = inp + cur_batch * n * c; const float *cur_mask = mask + cur_batch * n; float* cur_out = out + cur_batch * proposal_num * c; int proposal_cnt = 0; int loop_time, tmp_channel_idx; for (int cur_pts=0; cur_pts<n; cur_pts++){ if(int(cur_mask[cur_pts]) == 0) continue; if(proposal_cnt == proposal_num) break; // a valid proposal if (proposal_cnt == 0){ loop_time = proposal_num * c; for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){ tmp_channel_idx = i % c; cur_out[i] = cur_inp[cur_pts * c + tmp_channel_idx]; } __syncthreads(); } else { loop_time = c; for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){ cur_out[proposal_cnt * c + i] = cur_inp[cur_pts * c + i]; } __syncthreads(); } proposal_cnt += 1; } } } void cumsumLauncher(int b,int n,const float * inp,float * out){ cumsumKernel<<<32,512>>>(b,n,inp,out); } //require b*n working space void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){ cumsumKernel<<<32,512>>>(b,n,inp_p,temp); binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out); } //require 32*n working space void farthestpointsamplingLauncher(int b,int n,int c,int m,const float * inp,float * temp,int * out){ farthestpointsamplingKernel<1024><<<b,1024>>>(b,n,c,m,inp,temp,out); } //require 32*n working space void farthestpointsamplingwithdistLauncher(int b,int n,int m,const float * inp,float * temp,int * out){ farthestpointsamplingwithdistKernel<1024><<<b,1024>>>(b,n,m,inp,temp,out); } //require 32*n working space void farthestpointsamplingwithpreidxLauncher(int b,int n,int c,int m,int m1,const float * inp, const int* preidx,float * temp,int * out){ farthestpointsamplingwithpreidxKernel<1024><<<b,1024>>>(b,n,c,m,m1,inp,preidx,temp,out); } void gatherpointLauncher(int b,int n,int m,int c,const float * inp,const int * idx,float * out){ gatherpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,inp,idx,out); //int thread_num = 512 / b; // gatherpointKernel<<<dim3(256,8,1),512>>>(b,n,m,inp,idx,out); } void scatteraddpointLauncher(int b,int n,int m,int c,const float * out_g,const int * idx,float * inp_g){ scatteraddpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,out_g,idx,inp_g); } void GatherByMaskLauncher(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){ GatherByMaskKernel<<<block_num,threadsPerBlock>>>(b,n,c,proposal_num,inp,mask,out); }
the_stack
#define BIN_SIZE 32 using namespace std; #define CHECK(res) if(res!=cudaSuccess){exit(-1);} #define BLOCKNUM 1024 #define THREADNUM 64 __global__ void _k_copy_padding_data_blob_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } else data_output[j] = 0.0; } else data_output[j] = 0.0; } } extern "C" void copy_padding_data_blob_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { _k_copy_padding_data_blob_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_append_padding_data_blob_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad; int output_length = output_dim * output_dim * channel; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row < output_dim - pad) { if (col < output_dim - pad) { in_start = ((row) * input_dim + col) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } else data_output[data_row][out_start] = 0.0; } else data_output[data_row][out_start] = 0.0; } } extern "C" void append_padding_data_blob_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { _k_append_padding_data_blob_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_copy_unpadding_data_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int length = input_dim * input_dim * channel; int output_dim = input_dim + 2 * pad; int indata_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; data_col = j % length; row = data_col / (input_dim * channel); //col = (data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; data_output[j] = 0.0; in_start = ((row + pad) * output_dim + (col + pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } } extern "C" void copy_unpadding_data_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { _k_copy_unpadding_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_append_unpadding_data_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_length = input_dim * input_dim * channel; int output_dim = input_dim + pad; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (input_dim * channel); //col =(data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; in_start = ((row) * output_dim + (col)) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } } extern "C" void append_unpadding_data_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { _k_append_unpadding_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_copy_padding_data_sign_gpu(unsigned int *data_input, unsigned int *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int input_length = input_dim * input_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * input_length + in_start]; } else data_output[j] = 0; } else data_output[j] = 0; } } extern "C" void copy_padding_data_sign_gpu(unsigned int *&data, int num, int input_dim, int channel, int pad, unsigned int *&out_data) { _k_copy_padding_data_sign_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_img2col_gpu(float_t *data_input, float_t *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int border = input_dim - output_dim; int out_start, in_start, in; int data_row, data_col; int k_row, k_col, c; int indata_length = input_dim * input_dim * channel; int outdata_length = output_length * block_size; for (int j = bid; j < num * output_length; j += BLOCKNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * (block_size); in_start = (data_col + (data_col / output_dim) * border) * channel; for (int i = tid; i < block_size; i += THREADNUM) { k_row = (i % (kernel_size * kernel_size)) / kernel_size; k_col = i % kernel_size; c = i / (kernel_size * kernel_size); in = in_start + (k_row * input_dim + k_col) * channel + c; data_output[data_row * outdata_length + out_start + i] = data_input[data_row * indata_length + in]; } __syncthreads(); // for (int c = 0; c < channel; c++) { // for (int ki = 0; ki < kernel_size; ki++) { // for (int kj = 0; kj < kernel_size; kj++) { // in = in_start + (ki * input_dim + kj) * channel + c; // out = out_start + c * block_size + ki * kernel_size + kj; // data_output[data_row * outdata_length + out] = // data_input[data_row * indata_length + in]; // } // } // } } } extern "C" void img2col_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int block_size = kernel_size * kernel_size * channel; int output_length = output_dim * output_dim; _k_img2col_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, pad_input, num, block_size, output_length, channel, input_dim, output_dim, stride, kernel_size); cudaThreadSynchronize(); } __global__ void _k_col2img_gpu(float_t *data, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int data_row, data_col; int k_index, outset_index; int block_size = kernel_size * kernel_size * channel; int indata_length = output_dim * output_dim * block_size; int c; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[i] = 0.0; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; k_index = ((startset_i - outset_i * stride) * kernel_size + (startset_j - outset_j * stride)) + c * kernel_size * kernel_size; outset_index = (outset_i * output_dim + outset_j) * block_size; out_data[i] += data[data_row * indata_length + outset_index + k_index]; } } } extern "C" void col2img_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int length = input_dim * input_dim * channel; _k_col2img_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, channel, input_dim, output_dim, stride, kernel_size, length, pad_input); cudaThreadSynchronize(); } __global__ void _k_img2bitcol_gpu(unsigned int *data_input, unsigned int *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int length, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int border = input_dim - output_dim; int sp[BIN_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int R[BIN_SIZE] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648 }; int end_flag = kernel_size * kernel_size * channel - 1; int count = 0, index, out_start, in_start, in; int data_row, data_col; unsigned int data = 0; int outdata_length = output_length * block_size; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * block_size; in_start = (data_col + (data_col / output_dim) * border) * channel; count = 0; for (int c = 0; c < channel; c++) { for (int ki = 0; ki < kernel_size; ki++) { for (int kj = 0; kj < kernel_size; kj++) { in = in_start + (ki * input_dim + kj) * channel + c; index = count % BIN_SIZE; sp[index] = data_input[data_row * indata_length + in]; if (index == BIN_SIZE - 1 || count == end_flag) { for (int i = 0; i < BIN_SIZE; i++) { data += R[i] * sp[i]; } data_output[data_row * outdata_length + out_start] = data; data = 0; out_start += 1; for (int m = 0; m < BIN_SIZE; m++) sp[m] = 0; } count++; } } } } } extern "C" void img2bitcol_gpu(unsigned int *&bin_data, int num, int channel, int input_dim, int kernel_size, int stride, int pad, int output_dim, unsigned int *&pad_input) { clock_t start = clock(); int length; if (channel * kernel_size * kernel_size % BIN_SIZE == 0) length = (channel * kernel_size * kernel_size / BIN_SIZE) * output_dim * output_dim; else length = (channel * kernel_size * kernel_size / BIN_SIZE + 1) * output_dim * output_dim; int block_size = length / (output_dim * output_dim); int output_length = output_dim * output_dim; int input_dim_ = input_dim + 2 * pad; _k_img2bitcol_gpu<<<BLOCKNUM, THREADNUM, 0>>>(bin_data, pad_input, num, block_size, output_length, channel, input_dim_, output_dim, stride,length, kernel_size); cudaThreadSynchronize(); } __global__ void _k_copy_data_gpu(float_t **data_input, float_t **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_gpu(float_t **&data, float_t **&out_data, int num, int length, int add) { _k_copy_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, length, add); cudaThreadSynchronize(); } __global__ void _k_copy_data_bin_gpu(unsigned int **data_input, unsigned int **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_bin_gpu(unsigned int **&data, unsigned int **&out_data, int num, int length, int add) { _k_copy_data_bin_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, length, add); cudaThreadSynchronize(); } __global__ void _k_copy2dest_gpu(float_t **data_input, float_t **index_data, float_t **data_output, int num, int input_dim, int output_dim, int channel, int kernel_size, int stride, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; //the index for the data in kernel int offset_i, offset_j; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; offset_i = startset_i - outset_i * stride; offset_j = startset_j - outset_j * stride; if (index_data[data_row][(outset_i * output_dim + outset_j) * channel + c] == (float_t) (offset_i * kernel_size + offset_j)) { data_output[data_row][data_col] += data_input[data_row][(outset_i * output_dim + outset_j) * channel + c]; } } } } extern "C" void copy2dest_gpu(float_t **&data, float_t **&index_data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, float_t **&out_data) { int length = input_dim * input_dim * channel; _k_copy2dest_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, index_data, out_data, num, input_dim, output_dim, channel, kernel_size, stride, length); cudaThreadSynchronize(); } __global__ void _k_copy2mean_gpu(float_t **data_input, float_t **data_output, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int pad, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int pw, ph; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; pw = kernel_size; ph = kernel_size; if (outset_i == output_dim - 1) ph = kernel_size - pad; if (outset_j == output_dim - 1) pw = kernel_size - pad; data_output[data_row][data_col] += (data_input[data_row][(outset_i * output_dim + outset_j) * channel + c] / (float_t) (ph * pw)); } } } extern "C" void copy2mean_gpu(float_t **&data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, int pad, float_t **&out_data) { int length = input_dim * input_dim * channel; _k_copy2mean_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, channel, input_dim, output_dim, stride, kernel_size, pad, length); cudaThreadSynchronize(); } __global__ void _k_reset_data_gpu(float_t *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_data_gpu(float_t *&data, int num, int length) { _k_reset_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_reset_bin_data_gpu(unsigned int *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_bin_data_gpu(unsigned int *&data, int num, int length) { _k_reset_bin_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_set_data_gpu(float_t **data_input, int num, int length, float_t value) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; data_input[data_row][out_start] = value; } } extern "C" void set_data_gpu(float_t **&data, int num, int length, float_t value) { _k_set_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, value); cudaThreadSynchronize(); }
the_stack
#include "common/omptarget.h" #include "common/target_atomic.h" #include "target_impl.h" EXTERN void __kmpc_nvptx_end_reduce(int32_t global_tid) {} EXTERN void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {} EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) { return __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, val, delta, size); } EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) { uint32_t lo, hi; __kmpc_impl_unpack(val, lo, hi); hi = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, hi, delta, size); lo = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, lo, delta, size); return __kmpc_impl_pack(lo, hi); } INLINE static void gpu_regular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) { shflFct(reduce_data, /*LaneId - not used= */ 0, /*Offset = */ mask, /*AlgoVersion=*/0); } } INLINE static void gpu_irregular_warp_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct, uint32_t size, uint32_t tid) { uint32_t curr_size; uint32_t mask; curr_size = size; mask = curr_size / 2; while (mask > 0) { shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1); curr_size = (curr_size + 1) / 2; mask = curr_size / 2; } } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 INLINE static uint32_t gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) { uint32_t size, remote_id, physical_lane_id; physical_lane_id = GetThreadIdInBlock() % WARPSIZE; __kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt(); __kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask(); uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2; __kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt(); do { Liveness = __kmpc_impl_activemask(); remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt); size = __kmpc_impl_popc(Liveness); logical_lane_id /= 2; shflFct(reduce_data, /*LaneId =*/logical_lane_id, /*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2); } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } #endif INLINE static int32_t nvptx_parallel_reduce_nowait( int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, bool isSPMDExecutionMode, bool isRuntimeUninitialized) { uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode); uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode); if (NumThreads == 1) return 1; /* * This reduce function handles reduction within a team. It handles * parallel regions in both L1 and L2 parallelism levels. It also * supports Generic, SPMD, and NoOMP modes. * * 1. Reduce within a warp. * 2. Warp master copies value to warp 0 via shared memory. * 3. Warp 0 reduces to a single value. * 4. The reduced value is available in the thread that returns 1. */ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; uint32_t WarpId = BlockThreadId / WARPSIZE; // Volta execution model: // For the Generic execution mode a parallel region either has 1 thread and // beyond that, always a multiple of 32. For the SPMD execution mode we may // have any number of threads. if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1)) gpu_regular_warp_reduce(reduce_data, shflFct); else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/NumThreads % WARPSIZE, /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); } return BlockThreadId == 0; #else __kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask(); if (Liveness == __kmpc_impl_all_lanes) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes gpu_irregular_warp_reduce(reduce_data, shflFct, /*LaneCount=*/__kmpc_impl_popc(Liveness), /*LaneId=*/GetThreadIdInBlock() % WARPSIZE); else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2 // parallel region may enter here; return // early. return gpu_irregular_simd_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. // // Only L1 parallel region can enter this if condition. if (NumThreads > WARPSIZE) { uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = BlockThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, BlockThreadId); return BlockThreadId == 0; } else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) { return BlockThreadId == 0; } // Get the OMP thread Id. This is different from BlockThreadId in the case of // an L2 parallel region. return global_tid == 0; #endif // __CUDA_ARCH__ >= 700 } EXTERN int32_t __kmpc_nvptx_parallel_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) { return nvptx_parallel_reduce_nowait( global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct, checkSPMDMode(loc), checkRuntimeUninitialized(loc)); } INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) { return checkGenericMode(loc) || IsTeamMaster(ThreadId); } INLINE static uint32_t roundToWarpsize(uint32_t s) { if (s < WARPSIZE) return 1; return (s & ~(unsigned)(WARPSIZE - 1)); } DEVICE static volatile uint32_t IterCnt = 0; DEVICE static volatile uint32_t Cnt = 0; EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2( kmp_Ident *loc, int32_t global_tid, void *global_buffer, int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct, kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct, kmp_ListGlobalFctPtr glredFct) { // Terminate all threads in non-SPMD mode except for the master thread. if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID()) return 0; uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc)); // In non-generic mode all workers participate in the teams reduction. // In generic mode only the team master participates in the teams // reduction because the workers are waiting for parallel work. uint32_t NumThreads = checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true) : /*Master thread only*/ 1; uint32_t TeamId = GetBlockIdInKernel(); uint32_t NumTeams = GetNumberOfBlocksInKernel(); static SHARED unsigned Bound; static SHARED unsigned ChunkTeamCount; // Block progress for teams greater than the current upper // limit. We always only allow a number of teams less or equal // to the number of slots in the buffer. bool IsMaster = isMaster(loc, ThreadId); while (IsMaster) { // Atomic read Bound = __kmpc_atomic_add((uint32_t *)&IterCnt, 0u); if (TeamId < Bound + num_of_records) break; } if (IsMaster) { int ModBockId = TeamId % num_of_records; if (TeamId < num_of_records) lgcpyFct(global_buffer, ModBockId, reduce_data); else lgredFct(global_buffer, ModBockId, reduce_data); __kmpc_impl_threadfence_system(); // Increment team counter. // This counter is incremented by all teams in the current // BUFFER_SIZE chunk. ChunkTeamCount = __kmpc_atomic_inc((uint32_t *)&Cnt, num_of_records - 1u); } // Synchronize if (checkSPMDMode(loc)) __kmpc_barrier(loc, global_tid); // reduce_data is global or shared so before being reduced within the // warp we need to bring it in local memory: // local_reduce_data = reduce_data[i] // // Example for 3 reduction variables a, b, c (of potentially different // types): // // buffer layout (struct of arrays): // a, a, ..., a, b, b, ... b, c, c, ... c // |__________| // num_of_records // // local_data_reduce layout (struct): // a, b, c // // Each thread will have a local struct containing the values to be // reduced: // 1. do reduction within each warp. // 2. do reduction across warps. // 3. write the final result to the main reduction variable // by returning 1 in the thread holding the reduction result. // Check if this is the very last team. unsigned NumRecs = __kmpc_impl_min(NumTeams, uint32_t(num_of_records)); if (ChunkTeamCount == NumTeams - Bound - 1) { // // Last team processing. // if (ThreadId >= NumRecs) return 0; NumThreads = roundToWarpsize(__kmpc_impl_min(NumThreads, NumRecs)); if (ThreadId >= NumThreads) return 0; // Load from buffer and reduce. glcpyFct(global_buffer, ThreadId, reduce_data); for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads) glredFct(global_buffer, i, reduce_data); // Reduce across warps to the warp master. if (NumThreads > 1) { gpu_regular_warp_reduce(reduce_data, shflFct); // When we have more than [warpsize] number of threads // a block reduction is performed here. uint32_t ActiveThreads = __kmpc_impl_min(NumRecs, NumThreads); if (ActiveThreads > WARPSIZE) { uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE; // Gather all the reduced values from each warp // to the first warp. cpyFct(reduce_data, WarpsNeeded); uint32_t WarpId = ThreadId / WARPSIZE; if (WarpId == 0) gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId); } } if (IsMaster) { Cnt = 0; IterCnt = 0; return 1; } return 0; } if (IsMaster && ChunkTeamCount == num_of_records - 1) { // Allow SIZE number of teams to proceed writing their // intermediate results to the global buffer. __kmpc_atomic_add((uint32_t *)&IterCnt, uint32_t(num_of_records)); } return 0; }
the_stack
#include "BoundingRect.h" #include <iostream> #include <fstream> #include <cmath> #include <algorithm> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_SHARED_LENGTH // 定义了核函数中共享内存的长度 #define DEF_SHARED_LENGTH(sharedarray) (DEF_BLOCK_X * DEF_BLOCK_Y * \ sizeof (sharedarray)) // 宏:BR_LARGE_ENOUGH // 定义了计算共享内存的函数中循环的上界。 #define BR_LARGE_ENOUGH ((1 << 30) - 1) // 宏:FINDPIXEL_PACK_LEVEL // 定义了 FINDPIXEL 核函数中一个线程中计算的像素点个数,若该值为 5,则在一个线 // 程中计算 2 ^ 5 = 32 个像素点。根据实验结果,5 是最好的选择。 #define FINDPIXEL_PACK_LEVEL 5 // 宏:FINDPIXEL_PACK_NUM // 定义了计算每个线程中计算的次数。 #define FINDPIXEL_PACK_NUM (1 << FINDPIXEL_PACK_LEVEL) // 宏:FINDPIXEL_PACK_MASK // 定义了计算线程时向上取整的MASK。 #define FINDPIXEL_PACK_MASK (FINDPIXEL_PACK_NUM - 1) // 列出了计算范围,如果超出范围,返回错误码。 #if (FINDPIXEL_PACK_LEVEL < 1 || FINDPIXEL_PACK_LEVEL > 5) # error Unsupport FINDPIXEL_PACK_LEVEL Value!!! #endif // 宏:COMPUTECOV_PACK_LEVEL // 定义了 COMPUTECOV 核函数中一个线程中计算的像素点个数,若该值为 5,则在一个线 // 程中计算 2 ^ 5 = 32个像素点。根据实验结果,5 是最好的选择。 #define COMPUTECOV_PACK_LEVEL 5 // 宏:COMPUTECOV_PACK_NUM // 定义了计算每个线程中计算的次数。 #define COMPUTECOV_PACK_NUM (1 << COMPUTECOV_PACK_LEVEL) // 宏:COMPUTECOV_PACK_MASK // 定义了计算线程时向上取整的MASK #define COMPUTECOV_PACK_MASK (COMPUTECOV_PACK_NUM - 1) // 列出了计算范围,如果超出范围,返回错误码。 #if (COMPUTECOV_PACK_LEVEL < 1 || COMPUTECOV_PACK_LEVEL > 5) # error Unsupport COMPUTECOV_PACK_LEVEL Value!!! #endif // 宏:EXTREAMPOINT_PACK_LEVEL // 定义了 EXTREAMPOINT 核函数中一个线程中计算的像素点个数,若该值为 5,则在一个 // 线程中计算 2 ^ 5 = 32个像素点。根据实验结果,5 是最好的选择。 #define EXTREAMPOINT_PACK_LEVEL 5 // 宏:EXTREAMPOINT_PACK_NUM // 定义了计算每个线程中计算的次数。 #define EXTREAMPOINT_PACK_NUM (1 << EXTREAMPOINT_PACK_LEVEL) // 宏:EXTREAMPOINT_PACK_MASK // 定义了计算线程时向上取整的MASK #define EXTREAMPOINT_PACK_MASK (EXTREAMPOINT_PACK_NUM - 1) // 列出了计算范围,如果超出范围,返回错误码。 #if (EXTREAMPOINT_PACK_LEVEL < 1 || EXTREAMPOINT_PACK_LEVEL > 5) # error Unsupport EXTREAMPOINT_PACK_LEVEL Value!!! #endif // 结构体:ObjPixelPosSumInfoInner(符合条件的对象的像素点信息) // 该结构体定义了图像中符合条件的对象的像素点信息,其中包含了像素点数量,x 坐标 // 总和, y 坐标总和。该结构的使用可以减少数据的申请和释放。 typedef struct ObjPixelPosSumInfoInner_st { unsigned long long int pixelCount; // 符合条件的像素点数量 unsigned long long int posSumX; // 符合条件的像素点的 x 坐标总和 unsigned long long int posSumY; // 符合条件的像素点的 y 坐标总和 } ObjPixelPosSumInfoInner; // 结构体:CovarianceMatrix(协方差矩阵) // 该结构体定义了 2 维的协方差矩阵的数据结构。协方差矩阵中第二个和第三个元素相 // 等,所以忽略第三个元素计算。 typedef struct CovarianceMatrix_st{ float a11; // 协方差矩阵给的第一个元素 Covariance11 = // E{[X-E(X)][X-E(X)]}。 float a12; // 协方差矩阵给的第二个,第三个元素 Covariance1 = // E{[X-E(X)][Y-E(Y)]}。 //float a21; // 协方差矩阵给的第三个元素,等于上值,忽略 Covariance21 = // E{[Y-E(Y)][X-E(X)]}。 float a22; // 协方差矩阵给的第四个元素 Covariance12 = // E{[Y-E(Y)][Y-E(Y)]}。 } CovarianceMatrix; // 结构体:Coordinate(点的坐标) // 该结构体定义了点的坐标。坐标的数据类型为 float。 typedef struct Coordinate_st { float x; // x 坐标。 float y; // y 坐标。 } Coordinate; // 结构体:CoordinateInt(点的坐标) // 该结构体定义了点的坐标。坐标的数据类型为 int。 typedef struct CoordinateInt_st { int x; // x 坐标。 int y; // y 坐标。 } CoordinateInt; // Kernel 函数: _objCalcPixelInfoKer(计算符合条件的对象的像素信息) // 计算符合条件的对象的像素点的信息,包括像素点个数,横纵坐标总和。 static __global__ void // Kernel 函数无返回值 _objCalcPixelInfoKer( ImageCuda inimg, // 输入图像 unsigned char value, // 对象的像素值 int blksize, // 块大小,等于 blocksize.x * // blocksize.y * blocksize.z。 int blksize2p, // 优化的块大小,方便规约方法。 ObjPixelPosSumInfoInner *suminfo // 对象的像素信息。 ); // Host 函数:_objCalcPixelInfo(计算符合条件的对象的像素信息) // 计算符合条件的对象的像素点的信息,包括像素点个数,横纵坐标总和。该函数在 // Host 端由 CPU 串行实现。 static __host__ void // 该函数无返回值 _objCalcPixelInfo( ImageCuda *insubimg, // 输入图像 unsigned char value, // 对象的像素值 ObjPixelPosSumInfoInner *suminfo // 返回的对象像素信息。 ); // Kernel 函数: _objCalcCovMatrixKer(计算符合条件的对象的协方差矩阵) // 根据符合条件的像素点的信息和中心值,计算对象的协方差矩阵。 static __global__ void // Kernel 函数无返回值 _objCalcCovMatrixKer( ImageCuda inimg, // 输入图像 Coordinate *expcenter, // 像素坐标的期望 unsigned char value, // 对象的像素值 int blksize, // 块大小,等于 blocksize.x * // blocksize.y * blocksize.z。 int blksize2p, // 优化的块大小,方便规约方法。 CovarianceMatrix *covmatrix // 协方差矩阵 ); // Kernel 函数: _brCalcExtreamPointKer(计算对象包围矩形的边界点) // 根据对象的旋转信息和中心点,通过逐次比较,找出对象的包围矩形的边界点。 static __global__ void // Kernel 函数无返回值 _brCalcExtreamPointKer( ImageCuda inimg, // 输入图像 unsigned char value, // 对象的像素值 int blksize, // 块大小,等于 blocksize.x * // blocksize.y * blocksize.z。 int blksize2p, // 优化的块大小,方便规约方法。 CoordinateInt *expcenter, // 像素坐标的期望, 类型为 int RotationInfo *rtinfo, // 旋转矩阵信息 BoundBoxInt *boundbox // 包围矩形信息 ); // 函数:_calcBoundingRectParam(计算包围矩形的参数) // 计算BoundingRect使用到的参数,计算结果用于随后的成员方法,这样做的目的是简 // 化代码,维护方便。 static __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 _calcBoundingRectParam( Image *inimg, // 输入图像 unsigned char value, // 对象的像素值 RotationInfo *rotateinfo, // 旋转信息 BoundBoxInt *boundboxint // 包围矩形的四个点 ); // Host 函数: brCelling2PInner(计算适合规约方法的共享内存长度) // 这个函数的目的是通过迭代的方法找出不小于 n 的最大的 2^n。 // 结果是 n2p,用来作为规约方法的共享内存的长度。 static __host__ int // 返回值:函数是否正确执行,若函数正确执行, // 返回NO_ERROR。 brCelling2PInner( int n, // 块的大小。 int *n2p // 计算的适合规约方法的共享内存长度。 ); // Kernel 函数: _objCalcExpectedCenterKer(计算符合条件的对象的中心点坐标) static __global__ void // Kernel 函数无返回值。 _objCalcExpectedCenterKer( ObjPixelPosSumInfoInner *pixelsuminfo, // 对象的像素信息。 Coordinate *expcenter // 对象的中心点坐标。 ); // Kernel 函数:_brCalcParamforExtreamPointKer //(为核函数 _brCalcExtreamPointKer 计算参数) static __global__ void // Kernel 函数无返回值 _brCalcParamforExtreamPointKer( CovarianceMatrix *covmatrix, // 协方差矩阵 RotationInfo *rtinfo, // 旋转信息 BoundBoxInt *bboxint, // 包围矩形 Coordinate *expcenter, // 中心点坐标 Coordinate *rtexpcenter, // 旋转后的中心点坐标(float 类型) CoordinateInt *expcenterint // 旋转后中心点坐标(int 类型) ); // Host 函数: brCelling2PInner(计算适合规约方法的共享内存长度) // 这个函数的目的是通过迭代的方法找出不小于 n 的最大的 2^n。 // 结果是 n2p,用来作为规约方法的共享内存的长度。 __host__ int brCelling2PInner(int n, int *n2p) { // 局部变量 i。 int i; // 检查输出指针是否为 NULL。 if (n2p == NULL) return NULL_POINTER; // 计算找出不小于 n 的最大的 2^n。 for (i = 1; i < BR_LARGE_ENOUGH; i <<= 1) { // 如果找到了,就返回正确。 if (i >= n) { *n2p = i; return NO_ERROR; } } // 如果找不到,就返回错误。 return UNKNOW_ERROR; } // Kernel 函数: _objCalcPixelInfoKer(计算符合条件的对象的像素信息) static __global__ void _objCalcPixelInfoKer( ImageCuda inimg, unsigned char value, int blksize, int blksize2p, ObjPixelPosSumInfoInner *suminfo) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理多个输出像素,这多个像素位于统一列的相邻多行 // 上,因此,对于 r 需要进行乘以 FINDPIXEL_PACK_LEVEL 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) << FINDPIXEL_PACK_LEVEL; // 本地变量。inidx 为块内索引。 int inidx = threadIdx.y * blockDim.x + threadIdx.x; int inidx2; int currdsize; ObjPixelPosSumInfoInner blksuminfo_temp; // 声明共享内存。 extern __shared__ ObjPixelPosSumInfoInner blksuminfo[]; // 初始化。 blksuminfo_temp.pixelCount = 0UL; blksuminfo_temp.posSumX = 0UL; blksuminfo_temp.posSumY = 0UL; // 找到图像中符合条件的像素点,计算像素点的数量和坐标总和。 do { // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inindex = r * inimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inindex]; // 如果当前的像素值为 value,那么像素点的计数加 1,x 和 y 的坐标总和累 // 加。由于 c 和 r 只是 ROI 图像中的坐标索引,所以需加上 ROI 的边界值。 if (intemp == value) { blksuminfo_temp.pixelCount++; blksuminfo_temp.posSumX += c; blksuminfo_temp.posSumY += r; } // 宏:FINDPIXEL_KERNEL_MAIN_PHASE // 该宏定义了一个线程中进行的计算,计算下一个像素点和对应的操作。 // 使用宏定义简化代码 #define FINDPIXEL_KERNEL_MAIN_PHASE \ if (++r >= inimg.imgMeta.height) \ break; \ inindex += inimg.pitchBytes; \ intemp = inimg.imgMeta.imgData[inindex]; \ if (intemp == value) { \ blksuminfo_temp.pixelCount++; \ blksuminfo_temp.posSumX += c; \ blksuminfo_temp.posSumY += r; \ } #define FINDPIXEL_KERNEL_MAIN_PHASEx2 \ FINDPIXEL_KERNEL_MAIN_PHASE \ FINDPIXEL_KERNEL_MAIN_PHASE #define FINDPIXEL_KERNEL_MAIN_PHASEx4 \ FINDPIXEL_KERNEL_MAIN_PHASEx2 \ FINDPIXEL_KERNEL_MAIN_PHASEx2 #define FINDPIXEL_KERNEL_MAIN_PHASEx8 \ FINDPIXEL_KERNEL_MAIN_PHASEx4 \ FINDPIXEL_KERNEL_MAIN_PHASEx4 #define FINDPIXEL_KERNEL_MAIN_PHASEx16 \ FINDPIXEL_KERNEL_MAIN_PHASEx8 \ FINDPIXEL_KERNEL_MAIN_PHASEx8 // 对于线程中的最后一个像素处理操作。 FINDPIXEL_KERNEL_MAIN_PHASE // 根据不同的 FINDPIXEL_PACK_LEVEL 定义,进行不同的线程操作 #if (FINDPIXEL_PACK_LEVEL >= 2) FINDPIXEL_KERNEL_MAIN_PHASEx2 # if (FINDPIXEL_PACK_LEVEL >= 3) FINDPIXEL_KERNEL_MAIN_PHASEx4 # if (FINDPIXEL_PACK_LEVEL >= 4) FINDPIXEL_KERNEL_MAIN_PHASEx8 # if (FINDPIXEL_PACK_LEVEL >= 5) FINDPIXEL_KERNEL_MAIN_PHASEx16 # endif # endif # endif #endif #undef FINDPIXEL_KERNEL_MAIN_PHASEx16 #undef FINDPIXEL_KERNEL_MAIN_PHASEx8 #undef FINDPIXEL_KERNEL_MAIN_PHASEx4 #undef FINDPIXEL_KERNEL_MAIN_PHASEx2 #undef FINDPIXEL_KERNEL_MAIN_PHASE } while (0); // 将线程中计算得到的临时变量赋给共享内存。 blksuminfo[inidx].pixelCount = blksuminfo_temp.pixelCount; blksuminfo[inidx].posSumX = blksuminfo_temp.posSumX; blksuminfo[inidx].posSumY = blksuminfo_temp.posSumY; __syncthreads(); // 对于 blksize2p 长度的值进行折半累加。 currdsize = (blksize2p >> 1); inidx2 = inidx + currdsize; if (inidx2 < blksize) { blksuminfo[inidx].pixelCount += blksuminfo[inidx2].pixelCount ; blksuminfo[inidx].posSumX += blksuminfo[inidx2].posSumX; blksuminfo[inidx].posSumY += blksuminfo[inidx2].posSumY; } __syncthreads(); // 使用规约的方法,累加像素信息值到共享内存的开头。 for (currdsize >>= 1; currdsize > 0; currdsize >>= 1) { if (inidx < currdsize) { inidx2 = inidx + currdsize; blksuminfo[inidx].pixelCount += blksuminfo[inidx2].pixelCount; blksuminfo[inidx].posSumX += blksuminfo[inidx2].posSumX; blksuminfo[inidx].posSumY += blksuminfo[inidx2].posSumY; } __syncthreads(); } // 把共享内存的像素信息值累加到总和上。每个线程块第一个线程会进行这个操作。 if (inidx == 0 && blksuminfo[0].pixelCount != 0) { atomicAdd(&(suminfo->pixelCount), blksuminfo[0].pixelCount); atomicAdd(&(suminfo->posSumX), blksuminfo[0].posSumX); atomicAdd(&(suminfo->posSumY), blksuminfo[0].posSumY); } } // Host 函数:_objCalcPixelInfo(计算符合条件的对象的像素信息) static __host__ void _objCalcPixelInfo( ImageCuda *insubimg, unsigned char value, ObjPixelPosSumInfoInner *suminfo) { // 检查输入指针的合法性。 if (insubimg == NULL || suminfo == NULL) return /*NULL_POINTER*/; // 初始化返回值,为下面的累加做准备。 suminfo->pixelCount = 0UL; suminfo->posSumX = 0UL; suminfo->posSumY = 0UL; // 迭代图像内所有的像素点,判断每个像素点的像素值,如果像素值满足要求,则累 // 加相应的计数信息。 for (int r = 0; r < insubimg->imgMeta.height; r++) { int inidx = r * insubimg->pitchBytes; for (int c = 0; c < insubimg->imgMeta.width; c++) { unsigned char inpixel = insubimg->imgMeta.imgData[inidx]; if (inpixel == value) { suminfo->pixelCount += 1; suminfo->posSumX += c; suminfo->posSumY += r; } inidx++; } } //return NO_ERROR; } // Kernel 函数: _objCalcExpectedCenterKer(计算符合条件的对象的中心点坐标) static __global__ void _objCalcExpectedCenterKer( ObjPixelPosSumInfoInner *pixelsuminfo, Coordinate *expcenter) { // 利用符合条件的像素点的坐标总和与坐标个数,计算对象的中心点坐标。 expcenter->x = (float)pixelsuminfo->posSumX / (float)pixelsuminfo->pixelCount; expcenter->y = (float)pixelsuminfo->posSumY / (float)pixelsuminfo->pixelCount; } // Host 函数: _objCalcExpectedCenter(计算符合条件的对象的中心点坐标) static __host__ void _objCalcExpectedCenter( ObjPixelPosSumInfoInner *pixelsuminfo, Coordinate *expcenter) { // 利用符合条件的像素点的坐标总和与坐标个数,计算对象的中心点坐标。 expcenter->x = (float)pixelsuminfo->posSumX / (float)pixelsuminfo->pixelCount; expcenter->y = (float)pixelsuminfo->posSumY / (float)pixelsuminfo->pixelCount; } // Kernel 函数: _objCalcCovMatrixKer(计算符合条件的对象的协方差矩阵) static __global__ void _objCalcCovMatrixKer( ImageCuda inimg, Coordinate *expcenter, unsigned char value, int blksize, int blksize2p, CovarianceMatrix *covmatrix) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理多个输出像素,这多个像素位于统一列的相邻多行 // 上,因此,对于 r 需要进行乘以 COMPUTECOV_PACK_LEVEL 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) << COMPUTECOV_PACK_LEVEL; // 局部变量 int inidx = threadIdx.y * blockDim.x + threadIdx.x; int inidx2, currdsize; float dx, dy, dxx, dxy, dyy; CovarianceMatrix cov_temp; // 声明共享内存 extern __shared__ CovarianceMatrix shdcov[]; // 临时变量初始化 cov_temp.a11 = 0.0f; cov_temp.a12 = 0.0f; cov_temp.a22 = 0.0f; // 找到图像中符合条件的像素点,计算对象的协方差矩阵。 do { // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inindex = r * inimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inindex]; // 计算坐标值减去中心点坐标值。 dx = c - expcenter->x; dy = r - expcenter->y; // 计算协方差矩阵的各个值. dxx = dx * dx; dxy = dx * dy; dyy = dy * dy; // 如果当前点的像素值符合要求,那么累加到协方差元素中。 if (intemp == value) { cov_temp.a11 += dxx; cov_temp.a12 += dxy; cov_temp.a22 += dyy; } // 宏:COMPUTECOV_KERNEL_MAIN_PHASE // 该宏定义了一个线程中进行的计算,计算下一个像素点和对应的操作。 // 使用宏定义简化代码 #define COMPUTECOV_KERNEL_MAIN_PHASE \ if (++r >= inimg.imgMeta.height) \ break; \ dxy += dx; \ dyy += dy + dy + 1.0f; \ dy += 1.0f; \ inindex += inimg.pitchBytes; \ intemp = inimg.imgMeta.imgData[inindex]; \ if (intemp == value) { \ cov_temp.a11 += dxx; \ cov_temp.a12 += dxy; \ cov_temp.a22 += dyy; \ } #define COMPUTECOV_KERNEL_MAIN_PHASEx2 \ COMPUTECOV_KERNEL_MAIN_PHASE \ COMPUTECOV_KERNEL_MAIN_PHASE #define COMPUTECOV_KERNEL_MAIN_PHASEx4 \ COMPUTECOV_KERNEL_MAIN_PHASEx2 \ COMPUTECOV_KERNEL_MAIN_PHASEx2 #define COMPUTECOV_KERNEL_MAIN_PHASEx8 \ COMPUTECOV_KERNEL_MAIN_PHASEx4 \ COMPUTECOV_KERNEL_MAIN_PHASEx4 #define COMPUTECOV_KERNEL_MAIN_PHASEx16 \ COMPUTECOV_KERNEL_MAIN_PHASEx8 \ COMPUTECOV_KERNEL_MAIN_PHASEx8 // 对于线程中的最后一个像素处理操作。 COMPUTECOV_KERNEL_MAIN_PHASE // 根据不同的 COMPUTECOV_PACK_LEVEL 定义,进行不同的线程操作 #if (COMPUTECOV_PACK_LEVEL >= 2) COMPUTECOV_KERNEL_MAIN_PHASEx2 # if (COMPUTECOV_PACK_LEVEL >= 3) COMPUTECOV_KERNEL_MAIN_PHASEx4 # if (COMPUTECOV_PACK_LEVEL >= 4) COMPUTECOV_KERNEL_MAIN_PHASEx8 # if (COMPUTECOV_PACK_LEVEL >= 5) COMPUTECOV_KERNEL_MAIN_PHASEx16 # endif # endif # endif #endif #undef COMPUTECOV_KERNEL_MAIN_PHASEx16 #undef COMPUTECOV_KERNEL_MAIN_PHASEx8 #undef COMPUTECOV_KERNEL_MAIN_PHASEx4 #undef COMPUTECOV_KERNEL_MAIN_PHASEx2 #undef COMPUTECOV_KERNEL_MAIN_PHASE } while (0); // 累加线程计算得到的临时变量到共享内存内。 shdcov[inidx].a11 = cov_temp.a11; shdcov[inidx].a12 = cov_temp.a12; shdcov[inidx].a22 = cov_temp.a22; __syncthreads(); // 对于 blksize2p 长度的值进行折半累加 currdsize = (blksize2p >> 1); inidx2 = inidx + currdsize; if (inidx2 < blksize) { shdcov[inidx].a11 += shdcov[inidx2].a11; shdcov[inidx].a12 += shdcov[inidx2].a12; shdcov[inidx].a22 += shdcov[inidx2].a22; } __syncthreads(); // 使用规约的方法,累加像素信息值到共享内存的开头。 for (currdsize >>= 1; currdsize > 0; currdsize >>= 1) { if (inidx < currdsize) { inidx2 = inidx + currdsize; shdcov[inidx].a11 += shdcov[inidx2].a11; shdcov[inidx].a12 += shdcov[inidx2].a12; shdcov[inidx].a22 += shdcov[inidx2].a22; } __syncthreads(); } // 把共享内存的像素信息值累加到总和上。每个线程块的第一个线程会进行这个操 // 作。 if (inidx == 0) { atomicAdd(&(covmatrix->a11), shdcov[0].a11); atomicAdd(&(covmatrix->a12), shdcov[0].a12); atomicAdd(&(covmatrix->a22), shdcov[0].a22); } } // Host 函数: _objCalcCovMatrix(计算符合条件的对象的协方差矩阵) static __host__ void _objCalcCovMatrix( ImageCuda *insubimg, Coordinate *expcenter, unsigned char value, CovarianceMatrix *covmatrix) { // 检查输入指针的合法性。 if (insubimg == NULL || expcenter == NULL || covmatrix == NULL) return /*NULL_POINTER*/; // 初始化返回值,为下面的累加做准备。 covmatrix->a11 = 0.0f; covmatrix->a12 = 0.0f; covmatrix->a22 = 0.0f; // 迭代图像内所有的像素点,判断每个像素点的像素值,如果像素值满足要求,则累 // 加相应的计数信息。 for (int r = 0; r < insubimg->imgMeta.height; r++) { int inidx = r * insubimg->pitchBytes; // 计算坐标值减去中心点坐标值。 float dx = 0.0f - expcenter->x; float dy = r - expcenter->y; // 计算协方差矩阵的各个值. float dxx = dx * dx; float dxy = dx * dy; float dyy = dy * dy; for (int c = 0; c < insubimg->imgMeta.width; c++) { unsigned char inpixel = insubimg->imgMeta.imgData[inidx]; // 如果当前坐标满足要求,则进行偏移量的累加。 if (inpixel == value) { covmatrix->a11 += dxx; covmatrix->a12 += dxy; covmatrix->a22 += dyy; } // 利用两个点之间坐标相关性减少一部分计算。 inidx++; dxx += 2 * dx + 1.0f; dxy += dy; dx += 1.0f; } } //return NO_ERROR; } // 函数:_brCalcParamforExtreamPointIn(为核函数 _brCalcExtreamPointKer // 计算参数) static __host__ __device__ void _brCalcParamforExtreamPointIn( CovarianceMatrix *covmatrix, RotationInfo *rtinfo, Coordinate *expcenter, Coordinate *rtexpcenter) { // 局部变量。 float apd, amd, bmc, det; float eigen, solx, soly, soldt; // 为计算矩阵的特征值做准备计算。 apd = covmatrix->a11 + covmatrix->a22; amd = covmatrix->a11 - covmatrix->a22; bmc = covmatrix->a12 * covmatrix->a12; // 计算矩阵的特征值。 det = sqrt(4.0f * bmc + amd * amd); eigen = (apd + det) / 2.0f; // 计算旋转角度通过 asin()。 // 求解方程式 (covmatrix - eigen * E) * sol = 0 solx = covmatrix->a12 + covmatrix->a22 - eigen; soly = eigen - covmatrix->a11 - covmatrix->a12; soldt = sqrt(solx * solx + soly * soly); // 如果解的的 x 在第二或者第三象限,转化坐标到第四或者第一象限。 if (solx < 0) { solx = -solx; soly = -soly; } // 计算旋转角度信息。 rtinfo->sin = soly / soldt; rtinfo->cos = solx / soldt; rtinfo->radian = asin(rtinfo->sin); // 当旋转角度为负时,进行调整操作。 if (rtinfo->radian < 0) { (rtinfo)->radian = -(rtinfo)->radian; (rtinfo)->sin = -(rtinfo)->sin; (rtinfo)->cos = (rtinfo)->cos; } // 根据旋转信息,计算中心点 expcenter 旋转后的坐标 rtexpcenter。 rtexpcenter->x = expcenter->x * rtinfo->cos - expcenter->y * rtinfo->sin; rtexpcenter->y = expcenter->x * rtinfo->sin + expcenter->y * rtinfo->cos; } // Kernel 函数:_brCalcParamforExtreamPointKer(为核函数 _brCalcExtreamPointKer // 计算参数) static __global__ void _brCalcParamforExtreamPointKer( CovarianceMatrix *covmatrix, RotationInfo *rtinfo, BoundBoxInt *bboxint, Coordinate *expcenter, Coordinate *rtexpcenter, CoordinateInt *expcenterint) { _brCalcParamforExtreamPointIn(covmatrix, rtinfo, expcenter, rtexpcenter); // 初始化包围矩形的信息,边界的四个值全部用中心点的值来初始化。 // 初始化中心点坐标。 expcenterint->x = (int)rtexpcenter->x; expcenterint->y = (int)rtexpcenter->y; // 初始化包围矩形的边界。 bboxint->bottom = expcenterint->y; bboxint->top = expcenterint->y; bboxint->left = expcenterint->x; bboxint->right = expcenterint->x; } // Host 函数:_brCalcParamforExtreamPoint(计算旋转角度与初始化) static __host__ void _brCalcParamforExtreamPoint( CovarianceMatrix *covmatrix, RotationInfo *rtinfo, BoundBox *bbox, Coordinate *expcenter, Coordinate *rtexpcenter) { _brCalcParamforExtreamPointIn(covmatrix, rtinfo, expcenter, rtexpcenter); // 初始化包围矩形的边界。 bbox->bottom = rtexpcenter->y; bbox->top = rtexpcenter->y; bbox->left = rtexpcenter->x; bbox->right = rtexpcenter->x; } // Kernel 函数: _brCalcExtreamPointKer(计算对象包围盒的边界点) static __global__ void _brCalcExtreamPointKer( ImageCuda inimg, unsigned char value, int blksize, int blksize2p, CoordinateInt *expcenter, RotationInfo *rtinfo, BoundBoxInt *boundbox) { // 局部变量 int ptsor[2]; float pt[2]; int x, y, x_, y_; // 块内索引。 int inidx = threadIdx.y * blockDim.x + threadIdx.x; int inidx2, currdsize; BoundBoxInt bbox; // 声明共享内存。 extern __shared__ int shdbbox[]; int *shdbboxLeft = shdbbox; int *shdbboxRight = shdbboxLeft + blksize; int *shdbboxTop = shdbboxRight + blksize; int *shdbboxBottom = shdbboxTop + blksize; // 计算线程对应的输出点的位置,其中 ptsor[0] 和 ptsor[1] 分别表示线程处理的 // 像素点的坐标的 x 和 y 分量(其中,ptsor[1] 表示 column;ptsor[1] 表示 // row)。由于我们采用了并行度缩减的策略,令一个线程处理多个输出像素,这多 // 个像素位于统一列的相邻多行上,因此,对于 r 需要进行乘以 // EXTREAMPOINT_PACK_LEVEL 计算。 ptsor[0] = blockIdx.x * blockDim.x + threadIdx.x; ptsor[1] = (blockIdx.y * blockDim.y + threadIdx.y) << EXTREAMPOINT_PACK_LEVEL; // 初始化包围矩形 bbox.left = expcenter->x; bbox.right = expcenter->x; bbox.top = expcenter->y; bbox.bottom = expcenter->y; // 找到图像中符合条件的像素点,计算对象的包围矩形的边界。 do { // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if (ptsor[0] >= inimg.imgMeta.width || ptsor[1] >= inimg.imgMeta.height) break; // 计算第一个输入坐标点对应的图像数据数组下标。 int inindex = ptsor[1] * inimg.pitchBytes + ptsor[0]; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inindex]; // 如果像素值符合要求,计算旋转以后的点,比较找出边界值。 if (intemp == value) { RECT_ROTATE_POINT(ptsor, pt, *rtinfo); x = (int)(pt[0]); x_ = x + 1; y = (int)(pt[1]); y_ = y + 1; // 比较当前像素坐标和初始值。 bbox.left = min(bbox.left, x); bbox.right = max(bbox.right, x_); bbox.bottom = min(bbox.bottom, y); bbox.top = max(bbox.top, y_); } // 宏:EXTREAMPOINT_KERNEL_MAIN_PHASE // 该宏定义了一个线程中进行的计算,计算下一个像素点和对应的操作。 // 使用宏定义简化代码 #define EXTREAMPOINT_KERNEL_MAIN_PHASE \ if (++ptsor[1] >= inimg.imgMeta.height) \ break; \ inindex = ptsor[1] * inimg.pitchBytes + ptsor[0]; \ intemp = inimg.imgMeta.imgData[inindex]; \ if (intemp == value) { \ RECT_ROTATE_POINT(ptsor, pt, *rtinfo); \ x = (int)(pt[0]); \ x_ = x + 1; \ y = (int)(pt[1]); \ y_ = y + 1; \ bbox.left = min(bbox.left, x); \ bbox.right = max(bbox.right, x_); \ bbox.bottom = min(bbox.bottom, y); \ bbox.top = max(bbox.top, y_); \ } #define EXTREAMPOINT_KERNEL_MAIN_PHASEx2 \ EXTREAMPOINT_KERNEL_MAIN_PHASE \ EXTREAMPOINT_KERNEL_MAIN_PHASE #define EXTREAMPOINT_KERNEL_MAIN_PHASEx4 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx2 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx2 #define EXTREAMPOINT_KERNEL_MAIN_PHASEx8 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx4 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx4 #define EXTREAMPOINT_KERNEL_MAIN_PHASEx16 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx8 \ EXTREAMPOINT_KERNEL_MAIN_PHASEx8 // 对于线程中的最后一个像素处理操作。 EXTREAMPOINT_KERNEL_MAIN_PHASE // 根据不同的 EXTREAMPOINT_PACK_LEVEL 定义,进行不同的线程操作 #if (EXTREAMPOINT_PACK_LEVEL >= 2) EXTREAMPOINT_KERNEL_MAIN_PHASEx2 # if (EXTREAMPOINT_PACK_LEVEL >= 3) EXTREAMPOINT_KERNEL_MAIN_PHASEx4 # if (EXTREAMPOINT_PACK_LEVEL >= 4) EXTREAMPOINT_KERNEL_MAIN_PHASEx8 # if (EXTREAMPOINT_PACK_LEVEL >= 5) EXTREAMPOINT_KERNEL_MAIN_PHASEx16 # endif # endif # endif #endif #undef EXTREAMPOINT_KERNEL_MAIN_PHASEx16 #undef EXTREAMPOINT_KERNEL_MAIN_PHASEx8 #undef EXTREAMPOINT_KERNEL_MAIN_PHASEx4 #undef EXTREAMPOINT_KERNEL_MAIN_PHASEx2 #undef EXTREAMPOINT_KERNEL_MAIN_PHASE } while (0); // 比较结果存入共享内存中。 shdbboxLeft[inidx] = bbox.left; shdbboxRight[inidx] = bbox.right + 1; shdbboxBottom[inidx] = bbox.bottom; shdbboxTop[inidx] = bbox.top; __syncthreads(); // 对于 blksize2p 长度的值进行折半比较包围矩形的边界值。 currdsize = (blksize2p >> 1); inidx2 = inidx + currdsize; if (inidx2 < blksize) { atomicMin(&(shdbboxLeft[inidx]), shdbboxLeft[inidx2]); atomicMax(&(shdbboxRight[inidx]), shdbboxRight[inidx2]); atomicMin(&(shdbboxBottom[inidx]), shdbboxBottom[inidx2]); atomicMax(&(shdbboxTop[inidx]), shdbboxTop[inidx2]); } __syncthreads(); // 使用规约的方法,把比较的结果保存到共享内存的开头。 for (currdsize >>= 1; currdsize > 0; currdsize >>= 1) { if (inidx < currdsize) { inidx2 = inidx + currdsize; atomicMin(&(shdbboxLeft[inidx]), shdbboxLeft[inidx2]); atomicMax(&(shdbboxRight[inidx]), shdbboxRight[inidx2]); atomicMin(&(shdbboxBottom[inidx]), shdbboxBottom[inidx2]); atomicMax(&(shdbboxTop[inidx]), shdbboxTop[inidx2]); } __syncthreads(); } // 比较共享内存里的边界值和初始值,更新边界值。每个线程块的第一个线程会进行 // 这个操作。 if (inidx == 0) { if (shdbboxLeft[0] != expcenter->x) atomicMin(&(boundbox->left), shdbboxLeft[0]); if (shdbboxRight[0] != expcenter->x) atomicMax(&(boundbox->right), shdbboxRight[0]); if (shdbboxBottom[0] != expcenter->y) atomicMin(&(boundbox->bottom), shdbboxBottom[0]); if (shdbboxTop[0] != expcenter->y) atomicMax(&(boundbox->top), shdbboxTop[0]); } } // Host 函数: _brCalcExtreamPoint(计算对象包围盒的边界点) static __host__ void _brCalcExtreamPoint( ImageCuda *insubimg, unsigned char value, Coordinate *expcenter, RotationInfo *rtinfo, BoundBox *boundbox) { // 检查输入指针的合法性。 if (insubimg == NULL || expcenter == NULL || rtinfo == NULL || boundbox == NULL) return /*NULL_POINTER*/; // 迭代图像内所有的像素点,判断每个像素点的像素值,如果像素值满足要求,则累 // 加相应的计数信息。 int ptsor[2]; float pt[2]; for (ptsor[1] = 0; ptsor[1] < insubimg->imgMeta.height; ptsor[1]++) { int inidx = ptsor[1] * insubimg->pitchBytes; for (ptsor[0] = 0; ptsor[0] < insubimg->imgMeta.width; ptsor[0]++) { unsigned char inpixel = insubimg->imgMeta.imgData[inidx]; if (inpixel == value) { RECT_ROTATE_POINT(ptsor, pt, *rtinfo); boundbox->left = min(boundbox->left, pt[0]); boundbox->right = max(boundbox->right, pt[0]); boundbox->bottom = min(boundbox->bottom, pt[1]); boundbox->top = max(boundbox->top, pt[1]); } inidx++; } } //return NO_ERROR; } // 函数:_calcBoundingRectParam(计算包围矩形的参数) static __host__ int _calcBoundingRectParam(Image *inimg, unsigned char value, RotationInfo *rotateinfo, BoundBoxInt *boundboxint) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 局部变量 CoordinateInt *rtexpcenterint_dev; Coordinate *expcenter_dev, *rtexpcenter_dev; CovarianceMatrix *covmatrix_dev; RotationInfo *rtinfo_dev, *rtinfon_dev; BoundBoxInt *bdboxint_dev; ObjPixelPosSumInfoInner *pixelsuminfo_dev; float *temp_dev; // 在设备端申请内存,然后分配给各个变量。 cudaError_t cuerrcode; cuerrcode = cudaMalloc((void **)&temp_dev, sizeof (ObjPixelPosSumInfoInner) + 2 * sizeof (Coordinate) + 2 * sizeof (RotationInfo) + sizeof (CovarianceMatrix) + sizeof (CoordinateInt) + sizeof (BoundBoxInt)); if (cuerrcode != cudaSuccess) return cuerrcode; // 为变量分配内存。 pixelsuminfo_dev = (ObjPixelPosSumInfoInner*)(temp_dev); expcenter_dev = (Coordinate*)(pixelsuminfo_dev + 1); rtexpcenter_dev = expcenter_dev + 1; rtinfo_dev = (RotationInfo*)(rtexpcenter_dev + 1); rtinfon_dev = rtinfo_dev + 1; covmatrix_dev = (CovarianceMatrix*)(rtinfon_dev + 1); rtexpcenterint_dev = (CoordinateInt*)(covmatrix_dev + 1); bdboxint_dev = (BoundBoxInt*)(rtexpcenterint_dev + 1); // 初始化存放像素信息的数组。 cuerrcode = cudaMemset(pixelsuminfo_dev, 0, sizeof (ObjPixelPosSumInfoInner)); if (cuerrcode != cudaSuccess) return cuerrcode; // 设置存放协方差矩阵的数组初始化全为 0。 cuerrcode = cudaMemset(covmatrix_dev, 0, sizeof (CovarianceMatrix)); if (cuerrcode != cudaSuccess) return cuerrcode; // 计算调用 _objCalcPixelInfoKer 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; int height = (insubimgCud.imgMeta.height + FINDPIXEL_PACK_MASK) / FINDPIXEL_PACK_NUM; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (height + blocksize.y - 1) / blocksize.y; // 计算 _objCalcPixelInfoKer 共享内存的长度 blksize2p。 int blkthdcnt, blksize2p; blkthdcnt = blocksize.x * blocksize.y * blocksize.z; brCelling2PInner(blkthdcnt, &blksize2p); // 计算对象的像素信息。 _objCalcPixelInfoKer<<<gridsize, blocksize, blkthdcnt * sizeof (ObjPixelPosSumInfoInner)>>>( insubimgCud, value, blkthdcnt, blksize2p, pixelsuminfo_dev); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 计算中心点。 _objCalcExpectedCenterKer<<<1, 1>>>(pixelsuminfo_dev, expcenter_dev); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 计算调用 _objCalcCovMatrixKer 函数的线程块的尺寸和线程块的数量。 height = (insubimgCud.imgMeta.height + COMPUTECOV_PACK_MASK) / COMPUTECOV_PACK_NUM; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (height + blocksize.y - 1) / blocksize.y; // 计算 _objCalcCovMatrixKer 共享内存的长度 blksize2p。 blkthdcnt = blocksize.x * blocksize.y * blocksize.z; brCelling2PInner(blkthdcnt, &blksize2p); // 计算协方差矩阵。 _objCalcCovMatrixKer<<<gridsize, blocksize, blkthdcnt * sizeof (CovarianceMatrix)>>>( insubimgCud, expcenter_dev, value, blkthdcnt, blksize2p, covmatrix_dev); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 计算 _brCalcExtreamPointKer 需要用到的一些参数 _brCalcParamforExtreamPointKer<<<1, 1>>>(covmatrix_dev, rtinfo_dev, bdboxint_dev, expcenter_dev, rtexpcenter_dev, rtexpcenterint_dev); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 计算调用 _brCalcExtreamPointKer 函数的线程块的尺寸和线程块的数量。 height = (insubimgCud.imgMeta.height + EXTREAMPOINT_PACK_MASK) / EXTREAMPOINT_PACK_NUM; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (height + blocksize.y - 1) / blocksize.y; // 计算 _brCalcExtreamPointKer 共享内存的长度 blksize2p。 blkthdcnt = blocksize.x * blocksize.y * blocksize.z; brCelling2PInner(blkthdcnt, &blksize2p); // 计算包围矩形的边界点。 _brCalcExtreamPointKer<<<gridsize, blocksize, blkthdcnt * sizeof (BoundBoxInt)>>>( insubimgCud, value, blkthdcnt, blksize2p, rtexpcenterint_dev, rtinfo_dev, bdboxint_dev); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 从设备端拷贝回主存。 cuerrcode = cudaMemcpy(boundboxint, bdboxint_dev, sizeof (BoundBoxInt), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) return cuerrcode; cuerrcode = cudaMemcpy(rotateinfo, rtinfo_dev, sizeof (RotationInfo), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) return cuerrcode; cudaFree(temp_dev); return NO_ERROR; } // 函数:_calcBoundingRectParamHost(计算包围矩形的参数) static __host__ int _calcBoundingRectParamHost( Image *inimg, unsigned char value, RotationInfo *rotateinfo, BoundBox *boundbox) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || rotateinfo == NULL || boundbox == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToHost(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 局部变量 Coordinate expcenter, rtexpcenter; CovarianceMatrix covmatrix; ObjPixelPosSumInfoInner pixelsuminfo; // 计算对象的像素信息。 _objCalcPixelInfo(&insubimgCud, value, &pixelsuminfo); // 计算中心点。 _objCalcExpectedCenter(&pixelsuminfo, &expcenter); // 计算协方差矩阵。 _objCalcCovMatrix(&insubimgCud, &expcenter, value, &covmatrix); // 计算 _brCalcExtreamPointKer 需要用到的一些参数 _brCalcParamforExtreamPoint(&covmatrix, rotateinfo, boundbox, &expcenter, &rtexpcenter); // 计算包围矩形的边界点。 _brCalcExtreamPoint(&insubimgCud, value, &rtexpcenter, rotateinfo, boundbox); return NO_ERROR; } // Host 成员方法:boundingRect(求像素值给定的对象的包围矩形) __host__ int BoundingRect::boundingRect(Image *inimg, Quadrangle *outrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 局部变量 RotationInfo rotateinfo; BoundBoxInt bdboxint; // 调用函数_calcBoundingRectParam。 errcode = _calcBoundingRectParam(inimg, value, &rotateinfo, &bdboxint); if (errcode != NO_ERROR) return errcode; // 计算包围矩形的角度。 outrect->angle = RECT_RAD_TO_DEG(rotateinfo.radian); // 计算包围矩形的边界点值。 Quadrangle temprect; temprect.points[0][0] = bdboxint.left; temprect.points[0][1] = bdboxint.top; temprect.points[1][0] = bdboxint.right; temprect.points[1][1] = bdboxint.top; temprect.points[2][0] = bdboxint.right; temprect.points[2][1] = bdboxint.bottom; temprect.points[3][0] = bdboxint.left; temprect.points[3][1] = bdboxint.bottom; // 计算旋转后的包围矩形的边界点值。即结果的边界点值。 rotateinfo.sin = -rotateinfo.sin; RECT_ROTATE_POINT(temprect.points[0], outrect->points[0], rotateinfo); RECT_ROTATE_POINT(temprect.points[1], outrect->points[1], rotateinfo); RECT_ROTATE_POINT(temprect.points[2], outrect->points[2], rotateinfo); RECT_ROTATE_POINT(temprect.points[3], outrect->points[3], rotateinfo); return NO_ERROR; } // Host 成员方法:boundingRectHost(求像素值给定的对象的包围矩形) __host__ int BoundingRect::boundingRectHost(Image *inimg, Quadrangle *outrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 局部变量 RotationInfo rotateinfo; BoundBox bdbox; // 调用函数_calcBoundingRectParam。 errcode = _calcBoundingRectParamHost(inimg, value, &rotateinfo, &bdbox); if (errcode != NO_ERROR) return errcode; // 计算包围矩形的角度。 outrect->angle = RECT_RAD_TO_DEG(rotateinfo.radian); // 计算包围矩形的边界点值。 Quadrangle temprect; temprect.points[0][0] = bdbox.left; temprect.points[0][1] = bdbox.top; temprect.points[1][0] = bdbox.right; temprect.points[1][1] = bdbox.top; temprect.points[2][0] = bdbox.right; temprect.points[2][1] = bdbox.bottom; temprect.points[3][0] = bdbox.left; temprect.points[3][1] = bdbox.bottom; // 计算旋转后的包围矩形的边界点值。即结果的边界点值。 rotateinfo.sin = -rotateinfo.sin; RECT_ROTATE_POINT(temprect.points[0], outrect->points[0], rotateinfo); RECT_ROTATE_POINT(temprect.points[1], outrect->points[1], rotateinfo); RECT_ROTATE_POINT(temprect.points[2], outrect->points[2], rotateinfo); RECT_ROTATE_POINT(temprect.points[3], outrect->points[3], rotateinfo); return NO_ERROR; } // Host 成员方法:boundingRect(求像素值给定的对象的包围矩形) __host__ int BoundingRect::boundingRect(Image *inimg, DirectedRect *outrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 局部变量 RotationInfo rotateinfo; BoundBoxInt bdboxint; float boxcenter[2]; // 调用函数_calcBoundingRectParam。 errcode = _calcBoundingRectParam(inimg, value, &rotateinfo, &bdboxint); if (errcode != NO_ERROR) return errcode; // 计算旋转角。 outrect->angle = RECT_RAD_TO_DEG(rotateinfo.radian); // 计算中心坐标。 boxcenter[0] = (bdboxint.left + bdboxint.right) / 2.0f; boxcenter[1] = (bdboxint.top + bdboxint.bottom) / 2.0f; RECT_ROTATE_POINT(boxcenter, outrect->centerPoint, rotateinfo); // 计算矩形的长宽。 outrect->length1 = bdboxint.right - bdboxint.left; outrect->length2 = bdboxint.top - bdboxint.bottom; // 选择长的作为矩形的长。 if (outrect->length1 < outrect->length2) { int length_temp; length_temp = outrect->length1; outrect->length1 = outrect->length2; outrect->length2 = length_temp; } else { // 对于旋转角度出现负值的情况,进行处理。 if (outrect->angle < 0.0f) outrect->angle += 90.0f; else outrect->angle -= 90.0f; } return NO_ERROR; } // Host 成员方法:boundingRectHost(求像素值给定的对象的包围矩形) __host__ int BoundingRect::boundingRectHost(Image *inimg, DirectedRect *outrect) { // 检查输入图像和输出包围矩形是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outrect == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 局部变量 RotationInfo rotateinfo; BoundBox bdbox; float boxcenter[2]; // 调用函数_calcBoundingRectParam。 errcode = _calcBoundingRectParamHost(inimg, value, &rotateinfo, &bdbox); if (errcode != NO_ERROR) return errcode; // 计算旋转角。 outrect->angle = RECT_RAD_TO_DEG(rotateinfo.radian); // 计算中心坐标。 boxcenter[0] = (bdbox.left + bdbox.right) / 2.0f; boxcenter[1] = (bdbox.top + bdbox.bottom) / 2.0f; RECT_ROTATE_POINT(boxcenter, outrect->centerPoint, rotateinfo); // 计算矩形的长宽。 outrect->length1 = bdbox.right - bdbox.left; outrect->length2 = bdbox.top - bdbox.bottom; // 选择长的作为矩形的长。 if (outrect->length1 < outrect->length2) { int length_temp; length_temp = outrect->length1; outrect->length1 = outrect->length2; outrect->length2 = length_temp; } else { // 对于旋转角度出现负值的情况,进行处理。 if (outrect->angle < 0.0f) outrect->angle += 90.0f; else outrect->angle -= 90.0f; } return NO_ERROR; }
the_stack
using namespace facebook::cuda; namespace facebook { namespace deeplearning { namespace torch { namespace detail { template <int BatchSize, typename T> __launch_bounds__(256, 6) __global__ void accGradWeight(DeviceTensor<T, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<T, 6> gradWeight, float scale, int dH, int dW) { int outputRow = blockIdx.z; int outputCol = blockIdx.y * blockDim.y + threadIdx.y; int outputPlane = threadIdx.x / input.getSize(kPlaneDim); int inputRow = outputRow * dH; int inputCol = outputCol * dW; int inputPlane = threadIdx.x % input.getSize(kPlaneDim); int smemSize[3] = {blockDim.y, BatchSize, gradOutput.getSize(kPlaneDim)}; extern __shared__ float pShared[]; DeviceTensor<float, 3> gradOutputSMEM(pShared + 4, smemSize); if (outputCol < gradOutput.getSize(kWidthDim)) { { int outputPlaneT = threadIdx.x % gradOutput.getSize(kPlaneDim); int inputPlaneT = threadIdx.x / gradOutput.getSize(kPlaneDim); for (int image = inputPlaneT; image < BatchSize; image += input.getSize(kPlaneDim)) { gradOutputSMEM[threadIdx.y][image][outputPlaneT] = gradOutput[image] [outputRow][outputCol][outputPlaneT]; } } __syncthreads(); for (int kernelRow = 0; kernelRow < gradWeight.getSize(kKernelHeightDim); ++kernelRow) { for (int kernelCol = 0; kernelCol < gradWeight.getSize(kKernelWidthDim); ++kernelCol) { T sum; zero(sum); for (int image = 0; image < BatchSize; ++image) { float gradOut = gradOutputSMEM[threadIdx.y][image][outputPlane]; T in = input[image][inputRow + kernelRow][inputCol + kernelCol] [inputPlane].ldg(); sum += gradOut * in; } T gw = gradWeight[outputRow][outputCol][kernelRow] [kernelCol][outputPlane][inputPlane]; gw += scale * sum; gradWeight[outputRow][outputCol][kernelRow][kernelCol] [outputPlane][inputPlane] = gw; } } } } template <int BatchSize, typename T> __global__ void accGradWeight(DeviceTensor<T, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<T, 6> gradWeight, float scale, int dH, int dW, int inputPlaneThreads) { int outputRow = blockIdx.z; int outputCol = blockIdx.y * blockDim.y + threadIdx.y; int outputPlane = threadIdx.x / inputPlaneThreads; int inputRow = outputRow * dH; int inputCol = outputCol * dW; int inputThread = threadIdx.x % inputPlaneThreads; if (outputCol < gradOutput.getSize(kWidthDim)) { for (int kernelRow = 0; kernelRow < gradWeight.getSize(kKernelHeightDim); ++kernelRow) { for (int kernelCol = 0; kernelCol < gradWeight.getSize(kKernelWidthDim); ++kernelCol) { for (int inputPlane = inputThread; inputPlane < input.getSize(kPlaneDim); inputPlane += inputPlaneThreads) { T sum; zero(sum); for (int image = 0; image < BatchSize; ++image) { float gradOut = gradOutput[image][outputRow][outputCol] [outputPlane].ldg(); T in = input[image][inputRow + kernelRow] [inputCol + kernelCol][inputPlane].ldg(); sum += gradOut * in; } T gw = gradWeight[outputRow][outputCol][kernelRow] [kernelCol][outputPlane][inputPlane]; gw += scale * sum; gradWeight[outputRow][outputCol][kernelRow][kernelCol] [outputPlane][inputPlane] = gw; } } } } } // Dispatch based on input- and output-planes being powers of two // in which case an optimized version of the kernel can be used. // template <int BatchSize, typename T> void dispatchAccGradWeightsPlanePOT(cudaStream_t stream, DeviceTensor<T, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<T, 6> gradWeight, float scale, int dH, int dW) { const int kBlockSize = 256; // threads int inputPlaneThreads = kBlockSize / gradOutput.getSize(kPlaneDim); if (inputPlaneThreads < input.getSize(kPlaneDim)) { // inputPlaneThreads must be a power of two and not greater 32. inputPlaneThreads = std::min(32, inputPlaneThreads); inputPlaneThreads = greatestPowerOfTwoLessEq(inputPlaneThreads); dim3 block(gradOutput.getSize(kPlaneDim) * inputPlaneThreads); dim3 grid(1, gradOutput.getSize(kWidthDim), gradOutput.getSize(kHeightDim)); accGradWeight<BatchSize, T><<<grid, block, 0, stream>>>( input, gradOutput, gradWeight, scale, dH, dW, inputPlaneThreads); } else { int totalPlanes = gradOutput.getSize(kPlaneDim) * input.getSize(kPlaneDim); dim3 block(totalPlanes, kBlockSize / totalPlanes); dim3 grid(1, cuda::ceil(gradOutput.getSize(kWidthDim), static_cast<int>(block.y)), gradOutput.getSize(kHeightDim)); const int smem = (block.y * BatchSize * gradOutput.getSize(kPlaneDim) + 4) * sizeof(float); accGradWeight<BatchSize, T><<<grid, block, smem, stream>>>( input, gradOutput, gradWeight, scale, dH, dW); } } // Dispatch accGradWeight implementations depending on the possible degree // of in-thread-parallelism. template <int BatchSize> void dispatchAccGradWeightsITP(cudaStream_t stream, DeviceTensor<float, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 6> gradWeight, float scale, int dH, int dW) { // determine if float4 based data IO is possible if (input.getSize(kPlaneDim) % 4 == 0 && isAligned(input.data(), sizeof(float4)) && kFloat4Optimization) { // create float4 based input tensor DeviceTensor<float4, 4> input4 = convertImageBatch<float4>(input); // creat float4 based weight tensor DeviceTensor<float4, 6> gradWeight4 = convertWeight<float4>(gradWeight); dispatchAccGradWeightsPlanePOT<BatchSize, float4>( stream, input4, gradOutput, gradWeight4, scale, dH, dW); } else if (input.getSize(kPlaneDim) % 2 == 0 && isAligned(input.data(), sizeof(float2)) && kFloat2Optimization) { // create float2 based input tensor DeviceTensor<float2, 4> input2 = convertImageBatch<float2>(input); // creat float2 based weight tensor DeviceTensor<float2, 6> gradWeight2 = convertWeight<float2>(gradWeight); dispatchAccGradWeightsPlanePOT<BatchSize, float2>( stream, input2, gradOutput, gradWeight2, scale, dH, dW); } else { dispatchAccGradWeightsPlanePOT<BatchSize, float>( stream, input, gradOutput, gradWeight, scale, dH, dW); } } // ----------------------------------------------------------------------------- // Bias // template <int BatchSize> __global__ void accGradBiasBatch(DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 3> gradBias, float scale) { int outputCol = blockIdx.y * blockDim.y + threadIdx.y; int outputRow = blockIdx.z; int outputPlane = threadIdx.x; float sum = 0.0f; // guard against horizontal tiling overhang if (outputCol < gradOutput.getSize(kWidthDim)) { for (int image = 0; image < BatchSize; ++image) { sum += gradOutput[image][outputRow][outputCol][outputPlane]; } gradBias[outputRow][outputCol][outputPlane] += sum * scale; } } // This dispatcher method determines if an intra-warp reduction is // possible to reduce same-warp atomicAdd() usage (i.e. collisions). // template <int BatchSize> void dispatchAccGradBiases(cudaStream_t stream, DeviceTensor<float, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 3> gradBias, float scale, int dH, int dW) { const int kBlockSize = 256; // threads int totalPlanes = gradOutput.getSize(kPlaneDim) * input.getSize(kPlaneDim); assert(gradOutput.getSize(kPlaneDim) <= 256); // assign subsequent threads to the output planes // if output planes is much smaller than kBlockSize tile horizontally dim3 block(gradOutput.getSize(kPlaneDim), kBlockSize / gradOutput.getSize(kPlaneDim)); // tile blocks over the complete output tensor width and hight dim3 grid(1, grid.y = cuda::ceil(gradOutput.getSize(kWidthDim), static_cast<int>(block.y)), gradOutput.getSize(kHeightDim)); accGradBiasBatch<BatchSize><<<grid, block, 0, stream>>>( gradOutput, gradBias, scale); } // Macro used in dispatcher function below. #define ACC_GRAD_PARAMETERS_CASE(B) case B: \ dispatchAccGradWeightsITP<B>(stream, input, gradOutput, \ gradWeight, scale, dH, dW); \ dispatchAccGradBiases<B>(stream, input, gradOutput, \ gradBias, scale, dH, dW); \ break // Dispatcher function that binds the batchSize, which must be a power-of-two // (POT) to a function template with the batch size baked in. void accGradParametersBatchPOT(cudaStream_t stream, DeviceTensor<float, 4> input, DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 6> gradWeight, DeviceTensor<float, 3> gradBias, float scale, int batchSize, int dH, int dW) { switch (batchSize) { ACC_GRAD_PARAMETERS_CASE(128); ACC_GRAD_PARAMETERS_CASE(64); ACC_GRAD_PARAMETERS_CASE(32); ACC_GRAD_PARAMETERS_CASE(16); ACC_GRAD_PARAMETERS_CASE(8); ACC_GRAD_PARAMETERS_CASE(4); ACC_GRAD_PARAMETERS_CASE(2); ACC_GRAD_PARAMETERS_CASE(1); default: assert(false); // input validation, for debugging only } } // based on perf benchmarking for K40 a batchsize of 32 reaches maximum // kernel efficiency, which starts to drop again above this threshold (due to // excessive register pressure/local memory spilling). // const int kAccGradParametersMaxBatchSize = 32; // Breaks the problem up into batches that are powers of two. void locallyConnectedAccGradParameters(cudaStream_t stream, const float* input, const float* gradOutput, float* gradWeight, float* gradBias, float scale, LocallyConnectedParam& params) { long batchIdx = 0; int weightSize[6] = {params.outputHeight, params.outputWidth, params.kernelHeight, params.kernelWidth, params.outputPlanes, params.inputPlanes}; DeviceTensor<float, 6> cudaGradWeight(gradWeight, weightSize); int biasSize[3] = {params.outputHeight, params.outputWidth, params.outputPlanes}; DeviceTensor<float, 3> cudaGradBias(gradBias, biasSize); int batchSize = kAccGradParametersMaxBatchSize; int inputSize[4] = {batchSize, params.inputHeight, params.inputWidth, params.inputPlanes}; int outputSize[4] = {batchSize, params.outputHeight, params.outputWidth, params.outputPlanes}; // break problem down along batch dimesion into power-of-two sized batches while (batchSize > 0) { while (batchIdx < (params.batchSize / batchSize) * batchSize) { DeviceTensor<float, 4> cudaInput(const_cast<float*>(input), inputSize); DeviceTensor<float, 4> cudaGradOutput(const_cast<float*>(gradOutput), outputSize); accGradParametersBatchPOT(stream, cudaInput, cudaGradOutput, cudaGradWeight, cudaGradBias, scale, batchSize, params.dH, params.dW); batchIdx += batchSize; input += cudaInput.numElements(); gradOutput += cudaGradOutput.numElements(); } batchSize /= 2; inputSize[0] = batchSize; outputSize[0] = batchSize; } } } // detail namespace }}} // namespaces
the_stack
#include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void WeightAvePoolForward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_feature, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int feature_length, Dtype* const top_data, Dtype* top_sum) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); //const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; Dtype similarity = 0.0; int n_index = 0; Dtype sum_value = 0.0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { // calculate simlarity similarity = 0 ; for(int l = 0 ; l < feature_length; ++ l) { n_index = n * feature_length * height * width + l * height * width; similarity += fabs(bottom_feature[n_index + ph * width + pw] - bottom_feature[n_index + h * height + w]); } aveval += bottom_slice[h * width + w] * expf(-1.0 * similarity); sum_value = sum_value + expf(-1.0*similarity); } } top_data[index] = aveval / sum_value; top_sum[n* height * width + ph * width + pw] = sum_value; } } template <typename Dtype> __global__ void MaskAvePoolForward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, Dtype* const top_sum) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); //const int pool_size = (hend - hstart) * (wend - wstart); Dtype cache_value = 0 ; hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; int w_index = index % width; int h_index = (index / width) % height; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const Dtype* const mask_slice = bottom_mask + n * height * width * kernel_h * kernel_w + h_index * width + w_index; int center_h = (kernel_h - 1) / 2; int center_w = (kernel_w - 1) / 2; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int channel_relative = (h- h_index + center_h) * kernel_w + (w - w_index + center_w); Dtype tmp = mask_slice[ channel_relative * (height * width)]; //Dtype tmp = 0.0; aveval += bottom_slice[h * width + w] * tmp; cache_value += tmp; } } //top_data[index] = aveval / cache_value; //if(cache_value < 1e-3) cache_value += 1e-3; top_data[index] = aveval / cache_value; //top_sum[n * height * width + index % (height * width)] = cache_value; top_sum[n * height * width + index % (height * width)] = cache_value; } } template <typename Dtype> __global__ void KNNPoolForward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int neighbour_num, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; Dtype cache_value = 0 ; Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const Dtype* const mask_slice = bottom_mask + n * height * width * neighbour_num; for (int k = 0; k < neighbour_num; ++k) { int tmp = static_cast<int>(mask_slice[k * height * width + ph * width + pw]); aveval += bottom_slice[tmp]; cache_value += 1; } top_data[index] = aveval / cache_value; } } template <typename Dtype> __global__ void KNNsimForward(const int nthreads, const Dtype* const bottom_mask, const Dtype* const bottom_feature, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int neighbour_num, const int feature_num, const int weight_type, Dtype* const top_bottom_sim) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % neighbour_num; const int n = index / pooled_width / pooled_height / neighbour_num; //Dtype cache_value = 0 ; Dtype aveval = 0; const Dtype* const feature_slice1 = bottom_feature + (n * feature_num) * height * width + ph * width + pw; const Dtype * const feature_slice2 = bottom_feature + n * feature_num * height * width; const Dtype* const mask_slice = bottom_mask + n * height * width * neighbour_num; for (int k = 0; k < feature_num; ++k) { int tmp = static_cast<int>(mask_slice[c * height * width + ph * width + pw]); Dtype difference = fabsf(feature_slice1[k * height * width] - feature_slice2[ k * height * width + tmp]); if(!weight_type) aveval += difference ; else aveval += difference * difference; //cache_value += 1; } top_bottom_sim[index] = max(exp(-1.0 * aveval),1e-5); } } template <typename Dtype> __global__ void KNNsimPoolForward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_mask, const Dtype* const top_bottom_sim,const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int neighbour_num, Dtype* const top_data, Dtype* const top_sum) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; Dtype cache_value = 0 ; Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const Dtype* const mask_slice = bottom_mask + n * height * width * neighbour_num; const Dtype* const top_bottom_sim_slice = top_bottom_sim + n * height * width * neighbour_num; for (int k = 0; k < neighbour_num; ++k) { int tmp = static_cast<int>(mask_slice[k * height * width + ph * width + pw]); if(tmp == (ph * width + pw)) continue; Dtype sim_value = top_bottom_sim_slice[k* height * width + ph * width + pw]; aveval += bottom_slice[tmp] * sim_value; cache_value += sim_value; } top_data[index] = aveval / cache_value; top_sum[n * height * width + ph * width + pw] = cache_value; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); int neighbour_num = 0; // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; const Dtype *bottom_mask = NULL; Dtype* top_sum = NULL; const Dtype* bottom_feature = NULL; Dtype* top_bottom_sim_data = NULL; int weight_type = this->layer_param_.pooling_param().weight_type(); // const Dtype* bottom_sim_data = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // LOG(INFO) << "pooling layer forward"; // LOG(INFO) << bottom[0]->num() << ' '<< bottom[0]->channels() << bottom[0]->height() << bottom[0]->width(); // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; case PoolingParameter_PoolMethod_MASKAVE: bottom_mask = bottom[1]->gpu_data(); top_sum = top_sum_.mutable_gpu_data(); caffe_gpu_set(top_sum_.count(), Dtype(0.0), top_sum_.mutable_gpu_data()); MaskAvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_mask, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, top_sum); break; case PoolingParameter_PoolMethod_KNNPOOL: bottom_mask = bottom[1]->gpu_data(); neighbour_num = bottom[1]->channels(); KNNPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_mask, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, neighbour_num, top_data); //Forward_cpu(bottom, top); break; case PoolingParameter_PoolMethod_KNNWEIGHTEDPOOL: bottom_mask = bottom[1]->gpu_data(); bottom_feature = bottom[2]->gpu_data(); top_bottom_sim_data = top_bottom_sim_.mutable_gpu_data(); neighbour_num = bottom[1]->channels(); KNNsimForward<Dtype><<<CAFFE_GET_BLOCKS(bottom[1]->count()), CAFFE_CUDA_NUM_THREADS>>>( bottom[1]->count(), bottom_mask, bottom_feature, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, neighbour_num, bottom[2]->channels(), weight_type, top_bottom_sim_data); //bottom_sim_data = top_bottom_sim_.cpu_data(); // for(int k = 0; k < top_bottom_sim_.count(); ++k) LOG(INFO) << bottom_sim_data[k]; KNNsimPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_mask, top_bottom_sim_.gpu_data(), bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, neighbour_num, top_data, top_sum_.mutable_gpu_data()); break; case PoolingParameter_PoolMethod_WEIGHTEDAVE: //bottom_mask = bottom[1]->gpu_data(); top_sum = top_sum_.mutable_gpu_data(); bottom_feature = bottom[1]->gpu_data(); caffe_gpu_set(top_sum_.count(), Dtype(0.0), top_sum_.mutable_gpu_data()); WeightAvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_feature, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom[1]->channels(), top_data, top_sum); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void WeightAvePoolBackward(const int nthreads, const Dtype* const bottom_feature, const Dtype* const top_sum, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int feature_length, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype similarity = 0.0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); // int pool_size = (hend - hstart) * (wend - wstart); //calculate similarity similarity = 0.0; for (int l = 0; l < feature_length; ++l) { int n_index = n * feature_length * height * width + l * height * width; similarity += fabs(bottom_feature[n_index + ((index/width)%height) * width + index % width] - bottom_feature[n_index + ph * pooled_width + pw]); } gradient += top_diff_slice[ph * pooled_width + pw] / top_sum[n * height * width + ph * width + pw]; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void MaskAvePoolBackward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_mask, const Dtype* const top_sum, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; //Dtype gradient_mask = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_sum_slice = top_sum + n * pooled_height * pooled_width; const Dtype* const mask_slice = bottom_mask + n * height * width * kernel_h * kernel_w; // Dtype * const bottom_mask_diff_slice = bottom_mask_diff + n * height * width * kernel_h * kernel_w; int center_w = (kernel_w - 1)/ 2; int center_h = (kernel_h - 1)/ 2; int w_bottom = index % width; int h_bottom = (index / width) % height; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int channel_relative = (ph - h_bottom + center_h) * kernel_w + (pw - w_bottom + center_w); gradient += top_diff_slice[ph * pooled_width + pw] * mask_slice[index % (height * width) + channel_relative * (height * width)] / top_sum_slice[ph * pooled_width + pw]; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void MaskAveMaskBackward(const int nthreads, const Dtype* const bottom_data, const Dtype* const bottom_mask,const Dtype* const top_sum, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff_mask) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % pooled_width; const int h = (index / pooled_width) % pooled_height; const int c = (index / (pooled_width * pooled_height)) % (kernel_h * kernel_w); const int n = index / pooled_width / pooled_height / (kernel_h * kernel_w); Dtype gradient = 0; //Dtype gradient_mask = 0; const Dtype* const top_diff_slice = top_diff + n * channels * pooled_height * pooled_width; //const Dtype* const top_sum_slice = top_sum + n * pooled_height * pooled_width; const Dtype* const bottom_data_slice = bottom_data + n * channels * height * width; const Dtype* const top_sum_slice = top_sum + n* pooled_height * pooled_width; int center_w = (kernel_w - 1)/ 2; int center_h = (kernel_h - 1)/ 2; int relative_w = c % kernel_w - center_w; int relative_h = (c / kernel_w) % kernel_h - center_h; int bottom_w = w + relative_w; int bottom_h = h + relative_h; if(bottom_w < 0 || bottom_h < 0 || bottom_w >= width || bottom_h >= height) { bottom_diff_mask[index] = Dtype(0.0); return; } for(int ch = 0; ch < channels; ++ch) { gradient += top_diff_slice[ch * pooled_height * pooled_width + h * pooled_width + w] * bottom_data_slice[ch * height * width + bottom_h * width + bottom_w]/ top_sum_slice[h * pooled_width + w] ; } bottom_diff_mask[index] = gradient; } } template <typename Dtype> __global__ void KNNPoolBackward(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_to_bottom_corr,const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int neighbour_num, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; Dtype gradient = 0; //Dtype aval = 0; int index_w = 0; int index_h = 0; int index_0 = 0; //Dtype gradient_mask = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * height * width; const Dtype* const top_to_bottom_slice = top_to_bottom_corr + n * height * width * 3 * neighbour_num; for (int k = 0; k < 3 * neighbour_num; ++k) { if(top_to_bottom_slice[k * height * width + h * width + w] < 0) break; index_0 = static_cast<int>(top_to_bottom_slice[k * height * width + h * width + w]); index_w = index_0 % width; index_h = (index_0 / width)% height; gradient += (top_diff_slice[index_h * width + index_w]/static_cast<Dtype>(neighbour_num)); //aval = aval + 1; } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void KNNsimPoolBackward(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_to_bottom_corr,const Dtype* const top_diff, const Dtype* const bottom_top_sim, const Dtype* const top_sum, const int num, const int channels, const int height, const int width, const int neighbour_num, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; Dtype gradient = 0; //Dtype aval = 0; int index_w = 0; int index_h = 0; int index_0 = 0; //Dtype gradient_mask = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * height * width; const Dtype* const top_to_bottom_slice = top_to_bottom_corr + n * height * width * 3 * neighbour_num + h * width + w; const Dtype* const bottom_top_sim_slice = bottom_top_sim + n * 3 * neighbour_num * height * width + h * width + w; const Dtype* const top_sum_slice = top_sum + n * height * width; for (int k = 0; k < 3 * neighbour_num; ++k) { if(top_to_bottom_slice[k * height * width] < 0) break; index_0 = static_cast<int>(top_to_bottom_slice[k * height * width]); index_w = index_0 % width; index_h = (index_0 / width)% height; if(index_0 == (h * width + w)) continue; gradient += top_diff_slice[index_h * width + index_w] * bottom_top_sim_slice[k * height * width] / top_sum_slice[index_h * width + index_w]; //aval = aval + 1; } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); int neighbour_num = 0; // Blob<Dtype> top_to_bottom; int index = 0; int index_h = 0; int index_w = 0; Dtype top_index = 0; // Blob<Dtype> top_to_bottom_count; Dtype* top_to_bottom_count_data = NULL; int next_position = 0; // We'll output the mask to top[1] if it's of size >1. Dtype* top_to_bottom_data = NULL; Dtype* bottom_top_sim_data = NULL; const Dtype *top_bottom_sim_data = NULL; const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; const Dtype *bottom_data = bottom[0]->gpu_data(); const Dtype *bottom_feature = NULL; //const Dtype *bottom_mask = bottom[1]->gpu_data(); const Dtype *bottom_mask = NULL; //Dtype *bottom_mask_diff = NULL; const Dtype* top_sum = top_sum_.gpu_data(); // int mask_count = 0; caffe_gpu_set(count, Dtype(0.0), bottom_diff); switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; case PoolingParameter_PoolMethod_MASKAVE: // mask_count = bottom[1]->num() * bottom[1]->channels() * bottom[1]->height() * bottom[1]->width(); caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_gpu_diff()); caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom[1]->mutable_gpu_diff()); bottom_mask = bottom[1]->gpu_data(); //bottom_mask_diff = bottom[1]->mutable_gpu_diff(); // LOG(INFO) << "backward data average"; // NOLINT_NEXT_LINE(whitespace/operators) // LOG(INFO) << "backward mask average backward"; MaskAvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_mask, top_sum, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); //LOG(INFO) << "backward mask average"; // NOLINT_NEXT_LINE(whitespace/operators) /*MaskAveMaskBackward<Dtype><<<CAFFE_GET_BLOCKS(mask_count), CAFFE_CUDA_NUM_THREADS>>>( mask_count, bottom_data, bottom_mask, top_sum, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_mask_diff); caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom[1]->mutable_gpu_diff());*/ // LOG(INFO) << "backward mask average end"; break; case PoolingParameter_PoolMethod_KNNPOOL: //Backward_cpu(top, propagate_down,bottom); neighbour_num = bottom[1]->channels(); caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_gpu_diff()); //top_to_bottom.Reshape(num_, 3 * neighbour_num, height_, width_); caffe_gpu_set(top_to_bottom_.count(), Dtype(-1.0), top_to_bottom_.mutable_gpu_data()); bottom_mask = bottom[1]->cpu_data(); top_to_bottom_data = top_to_bottom_.mutable_cpu_data(); //top_to_bottom_count.Reshape(num_, 1, height_, width_); caffe_gpu_set(top_to_bottom_count_.count(), Dtype(0.0), top_to_bottom_count_.mutable_gpu_data()); top_to_bottom_count_data = top_to_bottom_count_.mutable_cpu_data(); //LOG(INFO) << top_to_bottom.num()<< top_to_bottom.channels()<<top_to_bottom.height()<<top_to_bottom.width(); for(int h = 0; h< height_; ++h) { for(int w = 0; w < width_; ++w) { top_index = static_cast<Dtype>(h * width_ + w); for (int n = 0; n < num_; ++n) { for(int k = 0; k < neighbour_num; ++k) { //LOG(INFO) << k; //LOG(INFO) << neighbour_num; index = static_cast<int>(bottom_mask[((n * neighbour_num + k) * height_ + h) * width_ + w ]); // LOG(INFO)<< index; index_w = index % width_; // LOG(INFO)<< index_w; index_h = (index/width_)%height_; //LOG(INFO)<< index_h; next_position = static_cast<int>(top_to_bottom_count_data[n * height_ * width_ + index_h * width_ + index_w]); //LOG(INFO) << next_position; if(next_position < 3 * neighbour_num) { // LOG(INFO) << next_position; top_to_bottom_data[((n * 3 * neighbour_num + next_position) * height_ + index_h) * width_ + index_w] = top_index; // LOG(INFO) << "top_to_bottom_data"; top_to_bottom_count_data[n * height_ * width_ + index_h * width_ + index_w] += 1.0; // LOG(INFO) << "top_to_bottom_count"; } else { LOG(INFO) << next_position; LOG(INFO) << "number exceed 2 * num_neighbourhood"; } } } } } KNNPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_to_bottom_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, neighbour_num , bottom_diff); break; case PoolingParameter_PoolMethod_KNNWEIGHTEDPOOL: //Backward_cpu(top, propagate_down,bottom); neighbour_num = bottom[1]->channels(); caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_gpu_diff()); //top_to_bottom.Reshape(num_, 3 * neighbour_num, height_, width_); caffe_gpu_set(top_to_bottom_.count(), Dtype(-1.0), top_to_bottom_.mutable_gpu_data()); bottom_mask = bottom[1]->cpu_data(); top_to_bottom_data = top_to_bottom_.mutable_cpu_data(); //top_to_bottom_count.Reshape(num_, 1, height_, width_); caffe_gpu_set(top_to_bottom_count_.count(), Dtype(0.0), top_to_bottom_count_.mutable_gpu_data()); top_to_bottom_count_data = top_to_bottom_count_.mutable_cpu_data(); caffe_gpu_set(bottom_top_sim_.count(), Dtype(0.0), bottom_top_sim_.mutable_gpu_data()); bottom_top_sim_data = bottom_top_sim_.mutable_cpu_data(); top_bottom_sim_data = top_bottom_sim_.cpu_data(); //LOG(INFO) << top_to_bottom.num()<< top_to_bottom.channels()<<top_to_bottom.height()<<top_to_bottom.width(); for(int h = 0; h< height_; ++h) { for(int w = 0; w < width_; ++w) { top_index = static_cast<Dtype>(h * width_ + w); for (int n = 0; n < num_; ++n) { for(int k = 0; k < neighbour_num; ++k) { index = static_cast<int>(bottom_mask[((n * neighbour_num + k) * height_ + h) * width_ + w ]); index_w = index % width_; index_h = (index/width_)%height_; next_position = static_cast<int>(top_to_bottom_count_data[n * height_ * width_ + index_h * width_ + index_w]); if(next_position < 3 * neighbour_num) { top_to_bottom_data[((n * 3 * neighbour_num + next_position) * height_ + index_h) * width_ + index_w] = top_index; bottom_top_sim_data[((n * 3 * neighbour_num + next_position) * height_ + index_h) * width_ + index_w] = top_bottom_sim_data[n * neighbour_num * height_ * width_ + k * height_ * width_ + h * width_ + w]; top_to_bottom_count_data[n * height_ * width_ + index_h * width_ + index_w] += 1.0; } else { LOG(INFO) << next_position; LOG(INFO) << "number exceed 2 * num_neighbourhood"; } } } } } KNNsimPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_to_bottom_.gpu_data(), top_diff, bottom_top_sim_.gpu_data(),top_sum_.gpu_data(), top[0]->num(), channels_, height_, width_, neighbour_num , bottom_diff); break; case PoolingParameter_PoolMethod_WEIGHTEDAVE: caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_gpu_diff()); caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom[1]->mutable_gpu_diff()); bottom_feature = bottom[1]->gpu_data(); WeightAvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_feature, top_sum, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom[1]->channels(), bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "cuda_util.h" #include <iostream> #include "mat.h" __global__ void gpu_concat_forward_dim1(void* inputs, const ncnn::CudaMatInfo* input_info, int* input_offset, const int input_size, unsigned char* output, const ncnn::CudaMatInfo output_info) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column >= output_info.w) { return; } //find input for this output int input_matrix_index = 0; int i=0; while (input_offset[i] <= column && i < input_size) { input_matrix_index = i; ++i; } const unsigned char* current_input = reinterpret_cast<const unsigned char*>(*(reinterpret_cast<const long long *> (static_cast<const unsigned char*>(inputs)+input_matrix_index*sizeof(const char *)))); const ncnn::CudaMatInfo& current_input_info = input_info[input_matrix_index]; const int input_index = (column - input_offset[input_matrix_index]) * input_info[input_matrix_index].elemsize; const int output_index = column * output_info.elemsize; memcpy((void*)(output + output_index), (void*)(current_input + input_index), current_input_info.elemsize); } __global__ void gpu_concat_forward_dim3_height(void* inputs, const ncnn::CudaMatInfo* input_info, int* input_offset, const int input_size, unsigned char* output, const ncnn::CudaMatInfo output_info) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; if (column >= output_info.w || row >= output_info.h || channel >= output_info.c) { return; } //find input for this output int input_matrix_index = 0; int i=0; while (input_offset[i] <= row && i < input_size) { input_matrix_index = i; ++i; } const unsigned char* current_input = reinterpret_cast<const unsigned char*>(*(reinterpret_cast<const long long *> (static_cast<const unsigned char*>(inputs)+ input_matrix_index * sizeof(const char *)))); const ncnn::CudaMatInfo& current_input_info = input_info[input_matrix_index]; const int input_index = channel * input_info[input_matrix_index].cstep * input_info[input_matrix_index].elemsize + (row - input_offset[input_matrix_index]) * input_info[input_matrix_index].w * output_info.elemsize + column * input_info[input_matrix_index].elemsize; const int output_index = channel * output_info.cstep * output_info.elemsize + row * output_info.w * output_info.elemsize + column * output_info.elemsize; memcpy((void*)(output + output_index), (void*)(current_input + input_index), current_input_info.elemsize); } __global__ void gpu_concat_forward_dim3_width(void* inputs, const ncnn::CudaMatInfo* input_info, int* input_offset, const int input_size, unsigned char* output, const ncnn::CudaMatInfo output_info) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; if (column >= output_info.w || row >= output_info.h || channel >= output_info.c) { return; } //find input for this output int input_matrix_index = 0; int i=0; while (input_offset[i] <= column && i < input_size) { input_matrix_index = i; ++i; } const unsigned char* current_input = reinterpret_cast<const unsigned char*>(*(reinterpret_cast<const long long *> (static_cast<const unsigned char*>(inputs)+ input_matrix_index * sizeof(const char *)))); const ncnn::CudaMatInfo& current_input_info = input_info[input_matrix_index]; const int input_index = channel * input_info[input_matrix_index].cstep * input_info[input_matrix_index].elemsize + row * input_info[input_matrix_index].w * output_info.elemsize + (column - input_offset[input_matrix_index]) * input_info[input_matrix_index].elemsize; const int output_index = channel * output_info.cstep * output_info.elemsize + row * output_info.w * output_info.elemsize + column * output_info.elemsize; memcpy((void*)(output + output_index), (void*)(current_input + input_index), current_input_info.elemsize); } __global__ void gpu_concat_forward_dim3_channel(void* inputs, const ncnn::CudaMatInfo* input_info, int* input_offset, const int input_size, unsigned char* output, const ncnn::CudaMatInfo output_info) { const int column = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int channel = blockIdx.z * blockDim.z + threadIdx.z; if (column >= output_info.w || row >= output_info.h || channel >= output_info.c) { return; } //find input for this output int input_matrix_index = 0; int i=0; while (input_offset[i] <= channel && i < input_size) { input_matrix_index = i; ++i; } const unsigned char* current_input = reinterpret_cast<const unsigned char*>(*(reinterpret_cast<const long long *> (static_cast<const unsigned char*>(inputs)+ input_matrix_index * sizeof(const char *)))); const ncnn::CudaMatInfo& current_input_info = input_info[input_matrix_index]; const int input_index = (channel - input_offset[input_matrix_index]) * input_info[input_matrix_index].cstep * input_info[input_matrix_index].elemsize + row * input_info[input_matrix_index].w * output_info.elemsize + column * input_info[input_matrix_index].elemsize; const int output_index = channel * output_info.cstep * output_info.elemsize + row * output_info.w * output_info.elemsize + column * output_info.elemsize; memcpy((void*)(output + output_index), (void*)(current_input + input_index), current_input_info.elemsize); } namespace ncnn { int concat_cuda_forward(const std::vector<CudaMat>& bottom_blobs, CudaMat& top_blob, const int axis) { const int dims = bottom_blobs[0].dims; size_t elemsize = bottom_blobs[0].elemsize; const int positive_axis = axis < 0 ? dims + axis : axis; const int input_size = (bottom_blobs.size()); std::vector<int> input_offset; std::vector<ncnn::CudaMatInfo> input_info; std::vector<const int *> inputs; int* gpu_input_offset; ncnn::CudaMatInfo* gpu_input_info; int* gpu_inputs; const ncnn::CudaMatInfo output_info{top_blob}; checkCudaErrors(cudaMalloc(&gpu_input_offset, input_size*sizeof(int))); checkCudaErrors(cudaMalloc(&gpu_input_info, input_size*sizeof(ncnn::CudaMatInfo))); checkCudaErrors(cudaMalloc(&gpu_inputs, input_size * sizeof(int*))); if (dims == 1) { int thread_per_block_x = ((top_blob.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = 1; const int thread_per_block_z = 1; const int total_number_of_channels = top_blob.c; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); //find input offsets int offset = 0; for (int i=0; i<input_size; ++i) { input_offset.push_back(offset); input_info.push_back(ncnn::CudaMatInfo{bottom_blobs[i]}); inputs.push_back(static_cast<const int*>(bottom_blobs[i].get_craw_data())); offset += bottom_blobs[i].w; } checkCudaErrors(cudaMemcpy(gpu_input_offset, input_offset.data(), input_offset.size()*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_input_info, input_info.data(), input_info.size()*sizeof(ncnn::CudaMatInfo), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_inputs, inputs.data(), inputs.size() * sizeof(int*), cudaMemcpyHostToDevice)); gpu_concat_forward_dim1<<<grid_size, block_size>>>(gpu_inputs, gpu_input_info, gpu_input_offset, input_size, static_cast<unsigned char*>(top_blob.get_raw_data()), output_info); } if ((dims == 2 && positive_axis == 0) || (dims == 3 && positive_axis == 1)) { int thread_per_block_x = ((top_blob.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = top_blob.c; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); //find input offsets int offset_h = 0; for (int i=0; i<input_size; ++i) { input_offset.push_back(offset_h); input_info.push_back(ncnn::CudaMatInfo{bottom_blobs[i]}); inputs.push_back(static_cast<const int*>(bottom_blobs[i].get_craw_data())); offset_h += bottom_blobs[i].h; } checkCudaErrors(cudaMemcpy(gpu_input_offset, input_offset.data(), input_offset.size()*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_input_info, input_info.data(), input_info.size()*sizeof(ncnn::CudaMatInfo), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_inputs, inputs.data(), inputs.size() * sizeof(int*), cudaMemcpyHostToDevice)); gpu_concat_forward_dim3_height<<<grid_size, block_size>>>(gpu_inputs, gpu_input_info, gpu_input_offset, input_size, static_cast<unsigned char*>(top_blob.get_raw_data()), output_info); } if ((dims == 2 && positive_axis == 1) || (dims == 3 && positive_axis == 2)){ int thread_per_block_x = ((top_blob.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = top_blob.c; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); //find input offsets int offset_w = 0; for (int i=0; i<input_size; ++i) { input_offset.push_back(offset_w); input_info.push_back(ncnn::CudaMatInfo{bottom_blobs[i]}); inputs.push_back(static_cast<const int*>(bottom_blobs[i].get_craw_data())); offset_w += bottom_blobs[i].w; } checkCudaErrors(cudaMemcpy(gpu_input_offset, input_offset.data(), input_offset.size()*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_input_info, input_info.data(), input_info.size()*sizeof(ncnn::CudaMatInfo), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_inputs, inputs.data(), inputs.size() * sizeof(int*), cudaMemcpyHostToDevice)); gpu_concat_forward_dim3_width<<<grid_size, block_size>>>(gpu_inputs, gpu_input_info, gpu_input_offset, input_size, static_cast<unsigned char*>(top_blob.get_raw_data()), output_info); } if (dims == 3 && positive_axis == 0) { int thread_per_block_x = ((top_blob.w - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = top_blob.c; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); //find input offsets int offset_c = 0; for (int i=0; i<input_size; ++i) { input_offset.push_back(offset_c); input_info.push_back(ncnn::CudaMatInfo{bottom_blobs[i]}); inputs.push_back(static_cast<const int*>(bottom_blobs[i].get_craw_data())); offset_c += bottom_blobs[i].c; } checkCudaErrors(cudaMemcpy(gpu_input_offset, input_offset.data(), input_offset.size()*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_input_info, input_info.data(), input_info.size()*sizeof(ncnn::CudaMatInfo), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_inputs, inputs.data(), inputs.size() * sizeof(int*), cudaMemcpyHostToDevice)); gpu_concat_forward_dim3_channel<<<grid_size, block_size>>>(gpu_inputs, gpu_input_info, gpu_input_offset, input_size, static_cast<unsigned char*>(top_blob.get_raw_data()), output_info); } checkCudaErrors(cudaFree(gpu_input_offset)); checkCudaErrors(cudaFree(gpu_input_info)); checkCudaErrors(cudaFree(gpu_inputs)); //todo other dims return 0; } }
the_stack
#include <thrust/host_vector.h> #include "hist_util.h" #include "quantile.cuh" #include "device_helpers.cuh" #include "timer.h" #include "../data/device_adapter.cuh" namespace xgboost { namespace common { namespace detail { struct EntryCompareOp { __device__ bool operator()(const Entry& a, const Entry& b) { if (a.index == b.index) { return a.fvalue < b.fvalue; } return a.index < b.index; } }; /** * \brief Extracts the cuts from sorted data. * * \param device The device. * \param cuts_ptr Column pointers to CSC structured cuts * \param sorted_data Sorted entries in segments of columns * \param column_sizes_scan Describes the boundaries of column segments in sorted data * \param out_cuts Output cut values */ void ExtractCutsSparse(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<Entry const> sorted_data, Span<size_t const> column_sizes_scan, Span<SketchEntry> out_cuts); /** * \brief Extracts the cuts from sorted data, considering weights. * * \param device The device. * \param cuts_ptr Column pointers to CSC structured cuts * \param sorted_data Sorted entries in segments of columns. * \param weights_scan Inclusive scan of weights for each entry in sorted_data. * \param column_sizes_scan Describes the boundaries of column segments in sorted data. * \param cuts Output cuts. */ void ExtractWeightedCutsSparse(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<Entry> sorted_data, Span<float> weights_scan, Span<size_t> column_sizes_scan, Span<SketchEntry> cuts); // Get column size from adapter batch and for output cuts. template <typename Iter> void GetColumnSizesScan(int device, size_t num_columns, size_t num_cuts_per_feature, Iter batch_iter, data::IsValidFunctor is_valid, size_t begin, size_t end, HostDeviceVector<SketchContainer::OffsetT> *cuts_ptr, dh::caching_device_vector<size_t>* column_sizes_scan) { column_sizes_scan->resize(num_columns + 1, 0); cuts_ptr->SetDevice(device); cuts_ptr->Resize(num_columns + 1, 0); dh::XGBCachingDeviceAllocator<char> alloc; auto d_column_sizes_scan = column_sizes_scan->data().get(); dh::LaunchN(device, end - begin, [=] __device__(size_t idx) { auto e = batch_iter[begin + idx]; if (is_valid(e)) { atomicAdd(&d_column_sizes_scan[e.column_idx], static_cast<size_t>(1)); } }); // Calculate cuts CSC pointer auto cut_ptr_it = dh::MakeTransformIterator<size_t>( column_sizes_scan->begin(), [=] __device__(size_t column_size) { return thrust::min(num_cuts_per_feature, column_size); }); thrust::exclusive_scan(thrust::cuda::par(alloc), cut_ptr_it, cut_ptr_it + column_sizes_scan->size(), cuts_ptr->DevicePointer()); thrust::exclusive_scan(thrust::cuda::par(alloc), column_sizes_scan->begin(), column_sizes_scan->end(), column_sizes_scan->begin()); } inline size_t constexpr BytesPerElement(bool has_weight) { // Double the memory usage for sorting. We need to assign weight for each element, so // sizeof(float) is added to all elements. return (has_weight ? sizeof(Entry) + sizeof(float) : sizeof(Entry)) * 2; } /* \brief Calcuate the length of sliding window. Returns `sketch_batch_num_elements` * directly if it's not 0. */ size_t SketchBatchNumElements(size_t sketch_batch_num_elements, bst_row_t num_rows, bst_feature_t columns, size_t nnz, int device, size_t num_cuts, bool has_weight); // Compute number of sample cuts needed on local node to maintain accuracy // We take more cuts than needed and then reduce them later size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows); /* \brief Estimate required memory for each sliding window. * * It's not precise as to obtain exact memory usage for sparse dataset we need to walk * through the whole dataset first. Also if data is from host DMatrix, we copy the * weight, group and offset on first batch, which is not considered in the function. * * \param num_rows Number of rows in this worker. * \param num_columns Number of columns for this dataset. * \param nnz Number of non-zero element. Put in something greater than rows * * cols if nnz is unknown. * \param num_bins Number of histogram bins. * \param with_weights Whether weight is used, works the same for ranking and other models. * * \return The estimated bytes */ size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz, size_t num_bins, bool with_weights); // Count the valid entries in each column and copy them out. template <typename AdapterBatch, typename BatchIter> void MakeEntriesFromAdapter(AdapterBatch const& batch, BatchIter batch_iter, Range1d range, float missing, size_t columns, size_t cuts_per_feature, int device, HostDeviceVector<SketchContainer::OffsetT>* cut_sizes_scan, dh::caching_device_vector<size_t>* column_sizes_scan, dh::caching_device_vector<Entry>* sorted_entries) { auto entry_iter = dh::MakeTransformIterator<Entry>( thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) { return Entry(batch.GetElement(idx).column_idx, batch.GetElement(idx).value); }); data::IsValidFunctor is_valid(missing); // Work out how many valid entries we have in each column GetColumnSizesScan(device, columns, cuts_per_feature, batch_iter, is_valid, range.begin(), range.end(), cut_sizes_scan, column_sizes_scan); size_t num_valid = column_sizes_scan->back(); // Copy current subset of valid elements into temporary storage and sort sorted_entries->resize(num_valid); dh::XGBCachingDeviceAllocator<char> alloc; thrust::copy_if(thrust::cuda::par(alloc), entry_iter + range.begin(), entry_iter + range.end(), sorted_entries->begin(), is_valid); } void SortByWeight(dh::XGBCachingDeviceAllocator<char>* alloc, dh::caching_device_vector<float>* weights, dh::caching_device_vector<Entry>* sorted_entries); } // namespace detail // Compute sketch on DMatrix. // sketch_batch_num_elements 0 means autodetect. Only modify this for testing. HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins, size_t sketch_batch_num_elements = 0); template <typename AdapterBatch> void ProcessSlidingWindow(AdapterBatch const& batch, int device, size_t columns, size_t begin, size_t end, float missing, SketchContainer* sketch_container, int num_cuts) { // Copy current subset of valid elements into temporary storage and sort dh::caching_device_vector<Entry> sorted_entries; dh::caching_device_vector<size_t> column_sizes_scan; auto batch_iter = dh::MakeTransformIterator<data::COOTuple>( thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) { return batch.GetElement(idx); }); HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; detail::MakeEntriesFromAdapter(batch, batch_iter, {begin, end}, missing, columns, num_cuts, device, &cuts_ptr, &column_sizes_scan, &sorted_entries); dh::XGBCachingDeviceAllocator<char> alloc; thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(), sorted_entries.end(), detail::EntryCompareOp()); auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan(); dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back()); // Extract the cuts from all columns concurrently detail::ExtractCutsSparse(device, d_cuts_ptr, dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan), dh::ToSpan(cuts)); sorted_entries.clear(); sorted_entries.shrink_to_fit(); sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts); } template <typename Batch> void ProcessWeightedSlidingWindow(Batch batch, MetaInfo const& info, int num_cuts_per_feature, bool is_ranking, float missing, int device, size_t columns, size_t begin, size_t end, SketchContainer *sketch_container) { dh::XGBCachingDeviceAllocator<char> alloc; dh::safe_cuda(cudaSetDevice(device)); info.weights_.SetDevice(device); auto weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); auto d_group_ptr = dh::ToSpan(group_ptr); auto batch_iter = dh::MakeTransformIterator<data::COOTuple>( thrust::make_counting_iterator(0llu), [=] __device__(size_t idx) { return batch.GetElement(idx); }); dh::caching_device_vector<Entry> sorted_entries; dh::caching_device_vector<size_t> column_sizes_scan; HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; detail::MakeEntriesFromAdapter(batch, batch_iter, {begin, end}, missing, columns, num_cuts_per_feature, device, &cuts_ptr, &column_sizes_scan, &sorted_entries); data::IsValidFunctor is_valid(missing); dh::caching_device_vector<float> temp_weights(sorted_entries.size()); auto d_temp_weights = dh::ToSpan(temp_weights); if (is_ranking) { auto const weight_iter = dh::MakeTransformIterator<float>( thrust::make_constant_iterator(0lu), [=]__device__(size_t idx) -> float { auto ridx = batch.GetElement(idx).row_idx; bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx); return weights[group_idx]; }); auto retit = thrust::copy_if(thrust::cuda::par(alloc), weight_iter + begin, weight_iter + end, batch_iter + begin, d_temp_weights.data(), // output is_valid); CHECK_EQ(retit - d_temp_weights.data(), d_temp_weights.size()); } else { CHECK_EQ(batch.NumRows(), weights.size()); auto const weight_iter = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0lu), [=]__device__(size_t idx) -> float { return weights[batch.GetElement(idx).row_idx]; }); auto retit = thrust::copy_if(thrust::cuda::par(alloc), weight_iter + begin, weight_iter + end, batch_iter + begin, d_temp_weights.data(), // output is_valid); CHECK_EQ(retit - d_temp_weights.data(), d_temp_weights.size()); } detail::SortByWeight(&alloc, &temp_weights, &sorted_entries); auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); auto d_cuts_ptr = cuts_ptr.ConstDeviceSpan(); // Extract cuts dh::caching_device_vector<SketchEntry> cuts(h_cuts_ptr.back()); detail::ExtractWeightedCutsSparse(device, d_cuts_ptr, dh::ToSpan(sorted_entries), dh::ToSpan(temp_weights), dh::ToSpan(column_sizes_scan), dh::ToSpan(cuts)); sorted_entries.clear(); sorted_entries.shrink_to_fit(); // add cuts into sketches sketch_container->Push(cuts_ptr.ConstDeviceSpan(), &cuts); } /* * \brief Perform sketching on GPU. * * \param batch A batch from adapter. * \param num_bins Bins per column. * \param info Metainfo used for sketching. * \param missing Floating point value that represents invalid value. * \param sketch_container Container for output sketch. * \param sketch_batch_num_elements Number of element per-sliding window, use it only for * testing. */ template <typename Batch> void AdapterDeviceSketch(Batch batch, int num_bins, MetaInfo const& info, float missing, SketchContainer* sketch_container, size_t sketch_batch_num_elements = 0) { size_t num_rows = batch.NumRows(); size_t num_cols = batch.NumCols(); size_t num_cuts_per_feature = detail::RequiredSampleCutsPerColumn(num_bins, num_rows); int32_t device = sketch_container->DeviceIdx(); bool weighted = info.weights_.Size() != 0; if (weighted) { sketch_batch_num_elements = detail::SketchBatchNumElements( sketch_batch_num_elements, num_rows, num_cols, std::numeric_limits<size_t>::max(), device, num_cuts_per_feature, true); for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) { size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements)); ProcessWeightedSlidingWindow(batch, info, num_cuts_per_feature, CutsBuilder::UseGroup(info), missing, device, num_cols, begin, end, sketch_container); } } else { sketch_batch_num_elements = detail::SketchBatchNumElements( sketch_batch_num_elements, num_rows, num_cols, std::numeric_limits<size_t>::max(), device, num_cuts_per_feature, false); for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) { size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements)); ProcessSlidingWindow(batch, device, num_cols, begin, end, missing, sketch_container, num_cuts_per_feature); } } } } // namespace common } // namespace xgboost #endif // COMMON_HIST_UTIL_CUH_
the_stack
\brief Convolution 2D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv2d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv2d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)"; } /// Destructor Conv2dOperationProfiler::~Conv2dOperationProfiler() { } /// Prints usage statement for the math function void Conv2dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv2d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv2dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv2d" " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32" " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3" " --pad_h=1 --pad_w=1" " --stride::h=1 --stride::w=1" " --dilation::h=1 --dilation::w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv2dOperationProfiler::Conv2dProblem::bytes( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv2dOperationProfiler::Conv2dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w); } int64_t flops_total_ = flops_mainloop_ + flops_epilogue_; //complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_total_ *=4; break; default: break; } return flops_total_; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv2dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 16; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 16; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 64; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 64; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constriant by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::Conv2dConfiguration conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize( int(problem_.n), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.r), int(problem_.s), int(problem_.p), int(problem_.q), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), 1 // groups ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.set_stride_vector( problem_, operation_desc.conv_kind, operation_desc.A.layout, operation_desc.B.layout, operation_desc.C.layout); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv2dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) * conv_workspace_.configuration.problem_size.activation_size(); int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) * conv_workspace_.configuration.problem_size.filter_size(); int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) * conv_workspace_.configuration.problem_size.output_size(); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoritical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimenstions and library::Operation bool Conv2dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0); /// intialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv2dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.configuration.stride_a, conv_workspace_.problem_count ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count ); conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv2d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv2dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } cudaError_t result; // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// intialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification device reference if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_device_reference_( options, report, device_context, operation, problem_space, problem); } // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host refererence instance is selected std::cout << conv2d_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host refernce has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv2d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Intialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find device reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceDevice, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // conv2d device reference minimum cc is 50 and no iterator algorithm library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // device refernce has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Intialize device reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run device reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified; return true; } // // Verify results // results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceDevice); } // Return true means continue profiling return true; } /// Measures performance results bool Conv2dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// intialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Method to profile a CUTLASS Operation Status Conv2dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments); if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// intialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// intialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv2dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/iterator/counting_iterator.h> namespace cudf { namespace strings { namespace detail { namespace { /** * @brief Compute string sizes, string validities, and concatenate strings functor. * * This functor is executed twice. In the first pass, the sizes and validities of the output strings * will be computed. In the second pass, this will concatenate the strings within each list element * of the given lists column and apply the separator. The null-replacement string scalar * `string_narep_dv` (if valid) is used in place of any null string. * * @tparam Functor The functor which can check for validity of the input list at a given list index * as well as access to the separator corresponding to the list index. */ template <class Functor> struct compute_size_and_concatenate_fn { Functor const func; column_device_view const lists_dv; offset_type const* const list_offsets; column_device_view const strings_dv; string_scalar_device_view const string_narep_dv; separator_on_nulls const separate_nulls; output_if_empty_list const empty_list_policy; offset_type* d_offsets{nullptr}; // If d_chars == nullptr: only compute sizes and validities of the output strings. // If d_chars != nullptr: only concatenate strings. char* d_chars{nullptr}; [[nodiscard]] __device__ bool output_is_null(size_type const idx, size_type const start_idx, size_type const end_idx) const noexcept { if (func.is_null_list(lists_dv, idx)) { return true; } return empty_list_policy == output_if_empty_list::NULL_ELEMENT && start_idx == end_idx; } __device__ void operator()(size_type const idx) const noexcept { // If this is the second pass, and the row `idx` is known to be a null or empty string if (d_chars && (d_offsets[idx] == d_offsets[idx + 1])) { return; } // Indices of the strings within the list row auto const start_idx = list_offsets[idx]; auto const end_idx = list_offsets[idx + 1]; if (!d_chars && output_is_null(idx, start_idx, end_idx)) { d_offsets[idx] = 0; return; } auto const separator = func.separator(idx); char* output_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; bool write_separator = false; auto size_bytes = size_type{0}; bool has_valid_element = false; for (size_type str_idx = start_idx; str_idx < end_idx; ++str_idx) { bool null_element = strings_dv.is_null(str_idx); has_valid_element = has_valid_element || !null_element; if (!d_chars && (null_element && !string_narep_dv.is_valid())) { size_bytes = 0; break; } if (write_separator && (separate_nulls == separator_on_nulls::YES || !null_element)) { if (output_ptr) output_ptr = detail::copy_string(output_ptr, separator); size_bytes += separator.size_bytes(); write_separator = false; } auto const d_str = null_element ? string_narep_dv.value() : strings_dv.element<string_view>(str_idx); if (output_ptr) output_ptr = detail::copy_string(output_ptr, d_str); size_bytes += d_str.size_bytes(); write_separator = write_separator || (separate_nulls == separator_on_nulls::YES) || !null_element; } // If there are all null elements, the output should be the same as having an empty list input: // a null or an empty string if (!d_chars) { d_offsets[idx] = has_valid_element ? size_bytes : 0; } } }; /** * @brief Functor accompanying with `compute_size_and_concatenate_fn` for computing output string * sizes, output string validities, and concatenating strings within list elements; used when the * separator is a string scalar. */ struct scalar_separator_fn { string_scalar_device_view const d_separator; [[nodiscard]] __device__ bool is_null_list(column_device_view const& lists_dv, size_type const idx) const noexcept { return lists_dv.is_null(idx); } [[nodiscard]] __device__ string_view separator(size_type const) const noexcept { return d_separator.value(); } }; template <typename CompFn> struct validities_fn { CompFn comp_fn; validities_fn(CompFn comp_fn) : comp_fn(comp_fn) {} __device__ bool operator()(size_type idx) { auto const start_idx = comp_fn.list_offsets[idx]; auto const end_idx = comp_fn.list_offsets[idx + 1]; bool valid_output = !comp_fn.output_is_null(idx, start_idx, end_idx); if (valid_output) { bool check_elements = false; for (size_type str_idx = start_idx; str_idx < end_idx; ++str_idx) { bool const valid_element = comp_fn.strings_dv.is_valid(str_idx); check_elements = check_elements || valid_element; // if an element is null and narep is invalid, the output row is null if (!valid_element && !comp_fn.string_narep_dv.is_valid()) { return false; } } // handle empty-list-as-null output policy setting valid_output = check_elements || comp_fn.empty_list_policy == output_if_empty_list::EMPTY_STRING; } return valid_output; } }; } // namespace std::unique_ptr<column> join_list_elements(lists_column_view const& lists_strings_column, string_scalar const& separator, string_scalar const& narep, separator_on_nulls separate_nulls, output_if_empty_list empty_list_policy, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(lists_strings_column.child().type().id() == type_id::STRING, "The input column must be a column of lists of strings"); CUDF_EXPECTS(separator.is_valid(stream), "Parameter separator must be a valid string_scalar"); auto const num_rows = lists_strings_column.size(); if (num_rows == 0) { return make_empty_column(type_id::STRING); } // Accessing the child strings column of the lists column must be done by calling `child()` on the // lists column, not `get_sliced_child()`. This is because calling to `offsets_begin()` on the // lists column returns a pointer to the offsets of the original lists column, which may not start // from `0`. auto const strings_col = strings_column_view(lists_strings_column.child()); auto const lists_dv_ptr = column_device_view::create(lists_strings_column.parent(), stream); auto const strings_dv_ptr = column_device_view::create(strings_col.parent(), stream); auto const sep_dv = get_scalar_device_view(const_cast<string_scalar&>(separator)); auto const string_narep_dv = get_scalar_device_view(const_cast<string_scalar&>(narep)); auto const func = scalar_separator_fn{sep_dv}; auto const comp_fn = compute_size_and_concatenate_fn<decltype(func)>{func, *lists_dv_ptr, lists_strings_column.offsets_begin(), *strings_dv_ptr, string_narep_dv, separate_nulls, empty_list_policy}; auto [offsets_column, chars_column] = make_strings_children(comp_fn, num_rows, stream, mr); auto [null_mask, null_count] = cudf::detail::valid_if(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(num_rows), validities_fn{comp_fn}, stream, mr); return make_strings_column( num_rows, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask)); } namespace { /** * @brief Functor accompanying with `compute_size_and_concatenate_fn` for computing output string * sizes, output string validities, and concatenating strings within list elements; used when the * separators are given as a strings column. */ struct column_separators_fn { column_device_view const separators_dv; string_scalar_device_view const sep_narep_dv; [[nodiscard]] __device__ bool is_null_list(column_device_view const& lists_dv, size_type const idx) const noexcept { return lists_dv.is_null(idx) || (separators_dv.is_null(idx) && !sep_narep_dv.is_valid()); } [[nodiscard]] __device__ string_view separator(size_type const idx) const noexcept { return separators_dv.is_valid(idx) ? separators_dv.element<string_view>(idx) : sep_narep_dv.value(); } }; } // namespace std::unique_ptr<column> join_list_elements(lists_column_view const& lists_strings_column, strings_column_view const& separators, string_scalar const& separator_narep, string_scalar const& string_narep, separator_on_nulls separate_nulls, output_if_empty_list empty_list_policy, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(lists_strings_column.child().type().id() == type_id::STRING, "The input column must be a column of lists of strings"); CUDF_EXPECTS(lists_strings_column.size() == separators.size(), "Separators column should be the same size as the lists columns"); auto const num_rows = lists_strings_column.size(); if (num_rows == 0) { return make_empty_column(type_id::STRING); } // Accessing the child strings column of the lists column must be done by calling `child()` on the // lists column, not `get_sliced_child()`. This is because calling to `offsets_begin()` on the // lists column returns a pointer to the offsets of the original lists column, which may not start // from `0`. auto const strings_col = strings_column_view(lists_strings_column.child()); auto const lists_dv_ptr = column_device_view::create(lists_strings_column.parent(), stream); auto const strings_dv_ptr = column_device_view::create(strings_col.parent(), stream); auto const string_narep_dv = get_scalar_device_view(const_cast<string_scalar&>(string_narep)); auto const sep_dv_ptr = column_device_view::create(separators.parent(), stream); auto const sep_narep_dv = get_scalar_device_view(const_cast<string_scalar&>(separator_narep)); auto const func = column_separators_fn{*sep_dv_ptr, sep_narep_dv}; auto const comp_fn = compute_size_and_concatenate_fn<decltype(func)>{func, *lists_dv_ptr, lists_strings_column.offsets_begin(), *strings_dv_ptr, string_narep_dv, separate_nulls, empty_list_policy}; auto [offsets_column, chars_column] = make_strings_children(comp_fn, num_rows, stream, mr); auto [null_mask, null_count] = cudf::detail::valid_if(thrust::counting_iterator<size_type>(0), thrust::counting_iterator<size_type>(num_rows), validities_fn{comp_fn}, stream, mr); return make_strings_column( num_rows, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask)); } } // namespace detail std::unique_ptr<column> join_list_elements(lists_column_view const& lists_strings_column, string_scalar const& separator, string_scalar const& narep, separator_on_nulls separate_nulls, output_if_empty_list empty_list_policy, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::join_list_elements(lists_strings_column, separator, narep, separate_nulls, empty_list_policy, rmm::cuda_stream_default, mr); } std::unique_ptr<column> join_list_elements(lists_column_view const& lists_strings_column, strings_column_view const& separators, string_scalar const& separator_narep, string_scalar const& string_narep, separator_on_nulls separate_nulls, output_if_empty_list empty_list_policy, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::join_list_elements(lists_strings_column, separators, separator_narep, string_narep, separate_nulls, empty_list_policy, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
the_stack
extern "C" { #include <ccv.h> #include <ccv_internal.h> #include <nnc/ccv_nnc.h> #include <nnc/ccv_nnc_easy.h> #include <nnc/ccv_nnc_internal.h> } #include <nnc/gpu/ccv_nnc_compat.h> #ifdef HAVE_CUDA static int _ccv_nnc_gemm_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { assert(input_size >= 2); const ccv_nnc_tensor_view_t* a = (const ccv_nnc_tensor_view_t*)inputs[0]; const ccv_nnc_tensor_view_t* w = (const ccv_nnc_tensor_view_t*)inputs[1]; const ccv_nnc_tensor_view_t* bias = input_size > 2 ? (const ccv_nnc_tensor_view_t*)inputs[2] : 0; assert(output_size == 1); ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)outputs[0]; assert(!bias || (bias->info.dim[1] == 0 || bias->info.dim[2] == 0 || bias->info.dim[3] == 0)); // It is a 1-d array int a_batch_size, a_rows, a_cols, a_batch_inc, a_rows_inc, a_cols_inc; int w_batch_size, w_rows, w_cols, w_batch_inc, w_rows_inc, w_cols_inc; int b_batch_size, b_rows, b_cols, b_batch_inc, b_rows_inc, b_cols_inc; const static int no_transpose[2] = {}; ccv_nnc_tensor_get_matrix_params(a->info, CCV_IS_TENSOR_VIEW(a) ? a->inc : a->info.dim, cmd.info.blas.transpose_a, &a_batch_size, &a_rows, &a_cols, &a_batch_inc, &a_rows_inc, &a_cols_inc); ccv_nnc_tensor_get_matrix_params(w->info, CCV_IS_TENSOR_VIEW(w) ? w->inc : w->info.dim, cmd.info.blas.transpose_b, &w_batch_size, &w_rows, &w_cols, &w_batch_inc, &w_rows_inc, &w_cols_inc); ccv_nnc_tensor_get_matrix_params(b->info, CCV_IS_TENSOR_VIEW(b) ? b->inc : b->info.dim, no_transpose, &b_batch_size, &b_rows, &b_cols, &b_batch_inc, &b_rows_inc, &b_cols_inc); assert(a_batch_size == b_batch_size); assert(a_batch_size == b_batch_size || a_batch_size == 1); if (a_batch_size == 1 && b_batch_size > 1) a_batch_inc = 0; assert(w_batch_size == a_batch_size || w_batch_size == 1); if (w_batch_size == 1 && b_batch_size > 1) w_batch_inc = 0; assert(a_rows == b_rows); assert(a_cols == w_rows); assert(w_cols == b_cols); cublasHandle_t cublas = ccv_nnc_stream_context_get_cublas(stream_context); static const float one = 1; static const float zero = 0; const int transpose_a = ccv_nnc_is_matrix_transpose(a->info, cmd.info.blas.transpose_a); const int transpose_w = ccv_nnc_is_matrix_transpose(w->info, cmd.info.blas.transpose_b); if (bias) { int bias_batch_size, bias_rows, bias_cols, bias_batch_inc, bias_rows_inc, bias_cols_inc; ccv_nnc_tensor_get_matrix_params(bias->info, CCV_IS_TENSOR_VIEW(bias) ? bias->inc : bias->info.dim, no_transpose, &bias_batch_size, &bias_rows, &bias_cols, &bias_batch_inc, &bias_rows_inc, &bias_cols_inc); assert(bias_batch_size == b_batch_size || bias_batch_size == 1); if (bias_batch_size == 1 && b_batch_size > 1) bias_batch_inc = 0; assert(bias_cols == b_cols); const void* const device_ones = ccv_nnc_stream_context_get_ones(stream_context, b_rows, b->info.datatype); if (b_batch_size == 1) { CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, b_cols, b_rows, 1, &one, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, device_ones, ccv_nnc_cuda_datatype(b->info.datatype), 1, &zero, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const cublasOperation_t transa = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transb = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &one, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { #if CUDA_VERSION >= 9100 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, b_cols, b_rows, 1, &one, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, bias_batch_inc, device_ones, ccv_nnc_cuda_datatype(b->info.datatype), 1, 0, &zero, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, b_batch_inc, b_batch_size, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else int i; for (i = 0; i < b_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, b_cols, b_rows, 1, &one, bias->data.u8 + CCV_GET_DATA_TYPE_SIZE(bias->info.datatype) * i * bias_batch_inc, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, device_ones, ccv_nnc_cuda_datatype(b->info.datatype), 1, &zero, b->data.u8 + CCV_GET_DATA_TYPE_SIZE(b->info.datatype) * i * b_batch_inc, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif const cublasOperation_t transa = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transb = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; #if CUDA_VERSION >= 9100 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, w_batch_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, a_batch_inc, &one, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, b_batch_inc, b_batch_size, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < b_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &one, b->data.u8 + CCV_GET_DATA_TYPE_SIZE(b->info.datatype) * i * b_batch_inc, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif } } else { if (b_batch_size == 1) { const cublasOperation_t transa = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transb = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &zero, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { const cublasOperation_t transa = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transb = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; #if CUDA_VERSION >= 9100 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, w_batch_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, a_batch_inc, &zero, b->data.u8, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, b_batch_inc, b_batch_size, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else int i; for (i = 0; i < b_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, transb, b_cols, b_rows, a_cols, &one, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &zero, b->data.u8 + CCV_GET_DATA_TYPE_SIZE(b->info.datatype) * i * b_batch_inc, ccv_nnc_cuda_datatype(b->info.datatype), b_rows_inc, ccv_nnc_cuda_compute_datatype(b->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #endif } } return CCV_NNC_EXEC_SUCCESS; } static int _ccv_nnc_gemm_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context) { // inputs: gradient, forw prop input, [w] // outputs: [output gradient], weight updates, bias updates assert(input_size >= 2 && output_size >= 2); const ccv_nnc_tensor_view_t* g = (const ccv_nnc_tensor_view_t*)inputs[0]; ccv_nnc_tensor_view_t* dw = (ccv_nnc_tensor_view_t*)outputs[1]; ccv_nnc_tensor_view_t* bias = output_size > 2 ? (ccv_nnc_tensor_view_t*)outputs[2] : 0; assert(!bias || (bias->info.dim[1] == 0 || bias->info.dim[2] == 0 || bias->info.dim[3] == 0)); // It is a 2-d or 3-d array. static const float one = 1; static const float zero = 0; cublasHandle_t cublas = ccv_nnc_stream_context_get_cublas(stream_context); int g_batch_size, g_rows, g_cols, g_batch_inc, g_rows_inc, g_cols_inc; const static int no_transpose[2] = {}; ccv_nnc_tensor_get_matrix_params(g->info, CCV_IS_TENSOR_VIEW(g) ? g->inc : g->info.dim, no_transpose, &g_batch_size, &g_rows, &g_cols, &g_batch_inc, &g_rows_inc, &g_cols_inc); int i; if (bias) { int bias_batch_size, bias_rows, bias_cols, bias_batch_inc, bias_rows_inc, bias_cols_inc; ccv_nnc_tensor_get_matrix_params(bias->info, CCV_IS_TENSOR_VIEW(bias) ? bias->inc : bias->info.dim, no_transpose, &bias_batch_size, &bias_rows, &bias_cols, &bias_batch_inc, &bias_rows_inc, &bias_cols_inc); assert(bias_cols == g_cols); assert(bias_batch_size == 1 || bias_batch_size == g_batch_size); if (bias_batch_size == 1 && g_batch_size > 1) bias_batch_inc = 0; const void* const device_ones = ccv_nnc_stream_context_get_ones(stream_context, g_rows, bias->info.datatype); if (g_batch_size > 1 && bias_batch_size == g_batch_size) { #if CUDA_VERSION >= 9100 if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, 0, &zero, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, bias_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, 0, &one, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, bias_batch_inc, bias_batch_size, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < g_batch_size; i++) { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, &zero, bias->data.u8 + CCV_GET_DATA_TYPE_SIZE(bias->info.datatype) * i * bias_batch_inc, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, &one, bias->data.u8 + CCV_GET_DATA_TYPE_SIZE(bias->info.datatype) * i * bias_batch_inc, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } #endif } else { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, &zero, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, &one, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // We cannot use strided batched alternative because on write, the data could race to the same position for (i = 1; i < g_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, CUBLAS_OP_N, bias_cols, bias_rows, g_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, device_ones, ccv_nnc_cuda_datatype(bias->info.datatype), g_rows, &one, bias->data.u8, ccv_nnc_cuda_datatype(bias->info.datatype), bias_rows_inc, ccv_nnc_cuda_compute_datatype(bias->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } } if (dw) { const ccv_nnc_tensor_view_t* a = (const ccv_nnc_tensor_view_t*)inputs[1]; const int transpose_a = ccv_nnc_is_matrix_transpose(a->info, cmd.info.blas.transpose_a); const int transpose_w = ccv_nnc_is_matrix_transpose(dw->info, cmd.info.blas.transpose_b); int a_batch_size, a_rows, a_cols, a_batch_inc, a_rows_inc, a_cols_inc; int dw_batch_size, dw_rows, dw_cols, dw_batch_inc, dw_rows_inc, dw_cols_inc; ccv_nnc_tensor_get_matrix_params(a->info, CCV_IS_TENSOR_VIEW(a) ? a->inc : a->info.dim, cmd.info.blas.transpose_a, &a_batch_size, &a_rows, &a_cols, &a_batch_inc, &a_rows_inc, &a_cols_inc); ccv_nnc_tensor_get_matrix_params(dw->info, CCV_IS_TENSOR_VIEW(dw) ? dw->inc : dw->info.dim, cmd.info.blas.transpose_b, &dw_batch_size, &dw_rows, &dw_cols, &dw_batch_inc, &dw_rows_inc, &dw_cols_inc); assert(a_rows == g_rows); assert(a_cols == dw_rows); assert(dw_cols == g_cols); assert(a_batch_size == g_batch_size || a_batch_size == 1); if (a_batch_size == 1 && g_batch_size > 1) a_batch_inc = 0; assert(dw_batch_size == g_batch_size || dw_batch_size == 1); if (dw_batch_size == 1 && g_batch_size > 1) dw_batch_inc = 0; if (g_batch_size > 1 && g_batch_size == dw_batch_size) { if (transpose_w) { const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_a ? a_cols_inc : a_rows_inc; #if CUDA_VERSION >= 9100 if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, a_batch_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, &zero, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, dw_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, a_batch_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, dw_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < g_batch_size; i++) { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &zero, dw->data.u8 + CCV_GET_DATA_TYPE_SIZE(dw->info.datatype) * i * dw_batch_inc, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, dw->data.u8 + CCV_GET_DATA_TYPE_SIZE(dw->info.datatype) * i * dw_batch_inc, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } #endif } else { const cublasOperation_t transb = transpose_a ? CUBLAS_OP_N : CUBLAS_OP_T; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; #if CUDA_VERSION >= 9100 if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, a_batch_inc, &zero, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, dw_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, a_batch_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, dw_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < g_batch_size; i++) { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &zero, dw->data.u8 + CCV_GET_DATA_TYPE_SIZE(dw->info.datatype) * i * dw_batch_inc, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &one, dw->data.u8 + CCV_GET_DATA_TYPE_SIZE(dw->info.datatype) * i * dw_batch_inc, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } #endif } } else { if (transpose_w) { const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda_inc = transpose_a ? a_cols_inc : a_rows_inc; if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &zero, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); for (i = 1; i < g_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_T, dw_rows, dw_cols, a_rows, &one, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_cols_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { const cublasOperation_t transb = transpose_a ? CUBLAS_OP_N : CUBLAS_OP_T; const int ldb_inc = transpose_a ? a_cols_inc : a_rows_inc; if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &zero, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, a->data.u8, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); for (i = 1; i < g_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_N, transb, dw_cols, dw_rows, a_rows, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, a->data.u8 + CCV_GET_DATA_TYPE_SIZE(a->info.datatype) * i * a_batch_inc, ccv_nnc_cuda_datatype(a->info.datatype), ldb_inc, &one, dw->data.u8, ccv_nnc_cuda_datatype(dw->info.datatype), dw_rows_inc, ccv_nnc_cuda_compute_datatype(dw->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } } } ccv_nnc_tensor_view_t* h = (ccv_nnc_tensor_view_t*)outputs[0]; if (h) { const int transpose_h = ccv_nnc_is_matrix_transpose(h->info, cmd.info.blas.transpose_a); const ccv_nnc_tensor_view_t* w = (const ccv_nnc_tensor_view_t*)inputs[2]; const int transpose_w = ccv_nnc_is_matrix_transpose(w->info, cmd.info.blas.transpose_b); int h_batch_size, h_rows, h_cols, h_batch_inc, h_rows_inc, h_cols_inc; int w_batch_size, w_rows, w_cols, w_batch_inc, w_rows_inc, w_cols_inc; ccv_nnc_tensor_get_matrix_params(h->info, CCV_IS_TENSOR_VIEW(h) ? h->inc : h->info.dim, cmd.info.blas.transpose_a, &h_batch_size, &h_rows, &h_cols, &h_batch_inc, &h_rows_inc, &h_cols_inc); ccv_nnc_tensor_get_matrix_params(w->info, CCV_IS_TENSOR_VIEW(w) ? w->inc : w->info.dim, cmd.info.blas.transpose_b, &w_batch_size, &w_rows, &w_cols, &w_batch_inc, &w_rows_inc, &w_cols_inc); assert(h_rows == g_rows); assert(h_cols == w_rows); assert(w_cols == g_cols); assert(h_batch_size == g_batch_size || h_batch_size == 1); if (h_batch_size == 1 && g_batch_size > 1) h_batch_inc = 0; assert(w_batch_size == g_batch_size || w_batch_size == 1); if (w_batch_size == 1 && g_batch_size > 1) w_batch_inc = 0; if (g_batch_size > 1 && g_batch_size == h_batch_size) { if (transpose_h) { const cublasOperation_t transb = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const int ldb_inc = transpose_w ? w_cols_inc : w_rows_inc; #if CUDA_VERSION >= 9100 if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, w_batch_inc, &zero, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, h_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, w_batch_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, h_batch_inc, g_batch_size, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < g_batch_size; i++) { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, &zero, h->data.u8 + CCV_GET_DATA_TYPE_SIZE(h->info.datatype) * i * h_batch_inc, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, &one, h->data.u8 + CCV_GET_DATA_TYPE_SIZE(h->info.datatype) * i * h_batch_inc, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } #endif } else { const cublasOperation_t transa = transpose_w ? CUBLAS_OP_N : CUBLAS_OP_T; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; #if CUDA_VERSION >= 9100 if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, w_batch_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, &zero, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, h_batch_inc, h_batch_size, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, w_batch_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, g_batch_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, h_batch_inc, h_batch_size, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #else for (i = 0; i < g_batch_size; i++) { if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &zero, h->data.u8 + CCV_GET_DATA_TYPE_SIZE(h->info.datatype) * i * h_batch_inc, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, h->data.u8 + CCV_GET_DATA_TYPE_SIZE(h->info.datatype) * i * h_batch_inc, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } #endif } } else { if (transpose_h) { const cublasOperation_t transb = transpose_w ? CUBLAS_OP_T : CUBLAS_OP_N; const int ldb_inc = transpose_w ? w_cols_inc : w_rows_inc; if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, &zero, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); for (i = 1; i < g_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, CUBLAS_OP_T, transb, h_rows, h_cols, g_cols, &one, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), ldb_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_cols_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { const cublasOperation_t transa = transpose_w ? CUBLAS_OP_N : CUBLAS_OP_T; const int lda_inc = transpose_w ? w_cols_inc : w_rows_inc; if (!(flags & CCV_NNC_ACCUMULATE_OUTPUT)) // reset the gradients to 0 CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &zero, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); else CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, g->data.u8, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); for (i = 1; i < g_batch_size; i++) CUBLAS_ENFORCE(cublasGemmEx(cublas, transa, CUBLAS_OP_N, h_cols, h_rows, g_cols, &one, w->data.u8 + CCV_GET_DATA_TYPE_SIZE(w->info.datatype) * i * w_batch_inc, ccv_nnc_cuda_datatype(w->info.datatype), lda_inc, g->data.u8 + CCV_GET_DATA_TYPE_SIZE(g->info.datatype) * i * g_batch_inc, ccv_nnc_cuda_datatype(g->info.datatype), g_rows_inc, &one, h->data.u8, ccv_nnc_cuda_datatype(h->info.datatype), h_rows_inc, ccv_nnc_cuda_compute_datatype(h->info.datatype), CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } } } return CCV_NNC_EXEC_SUCCESS; } #endif REGISTER_COMMAND_BACKEND(CCV_NNC_GEMM_FORWARD, CCV_NNC_BACKEND_GPU_CUBLAS)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDA registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_gemm_forw; #endif } REGISTER_COMMAND_BACKEND(CCV_NNC_GEMM_BACKWARD, CCV_NNC_BACKEND_GPU_CUBLAS)(ccv_nnc_cmd_backend_registry_t* const registry) { #ifdef HAVE_CUDA registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC; registry->tensor_datatypes = CCV_32F | CCV_16F; registry->tensor_memory = CCV_TENSOR_GPU_MEMORY; registry->algorithms = 1; registry->exec = _ccv_nnc_gemm_back; #endif }
the_stack
//------------------------------------------------------------------------ // Common op attribute parser. static __host__ void parseOpAttributes(OpKernelConstruction* ctx, TextureKernelParams& p) { // Mip and filter modes. OP_REQUIRES_OK(ctx, ctx->GetAttr("filter_mode", &p.filterMode)); OP_REQUIRES(ctx, p.filterMode >= 0 && p.filterMode < TEX_MODE_COUNT, errors::InvalidArgument("filter_mode unsupported")); p.enableMip = (p.filterMode == TEX_MODE_LINEAR_MIPMAP_NEAREST || p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR); // Mip level clamp. if (p.enableMip) { OP_REQUIRES_OK(ctx, ctx->GetAttr("max_mip_level", &p.mipLevelLimit)); OP_REQUIRES(ctx, p.mipLevelLimit >= -1, errors::InvalidArgument("invalid max_mip_level")); ctx->GetAttr("tex_const", &p.texConst); // Only available in forward op. } // Boundary mode. OP_REQUIRES_OK(ctx, ctx->GetAttr("boundary_mode", &p.boundaryMode)); OP_REQUIRES(ctx, p.boundaryMode >= 0 && p.boundaryMode < TEX_BOUNDARY_MODE_COUNT, errors::InvalidArgument("boundary_mode unsupported")); } //------------------------------------------------------------------------ // Forward TensorFlow op. struct TextureFwdOp : public OpKernel { TextureKernelParams m_attribs; PersistentTensor m_persistentMipTensor; // Used if texture is constant and mips are enabled. bool m_persistentMipTensorInitialized; TextureFwdOp(OpKernelConstruction* ctx): OpKernel(ctx) { memset(&m_attribs, 0, sizeof(m_attribs)); m_persistentMipTensorInitialized = false; parseOpAttributes(ctx, m_attribs); } void Compute(OpKernelContext* ctx) { TextureKernelParams& p = m_attribs; cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream(); bool cube_mode = (p.boundaryMode == TEX_BOUNDARY_MODE_CUBE); // Get input. const Tensor& tex = ctx->input(0); const Tensor& uv = ctx->input(1); const Tensor& uv_da = ctx->input(p.enableMip ? 2 : 1); // Extract input dimensions. p.n = (uv.dims() > 0) ? uv.dim_size(0) : 0; p.imgHeight = (uv.dims() > 1) ? uv.dim_size(1) : 0; p.imgWidth = (uv.dims() > 2) ? uv.dim_size(2) : 0; p.texDepth = (tex.dims() > 0) ? tex.dim_size(0) : 0; if (!cube_mode) { p.texHeight = (tex.dims() > 1) ? tex.dim_size(1) : 0; p.texWidth = (tex.dims() > 2) ? tex.dim_size(2) : 0; p.channels = (tex.dims() > 3) ? tex.dim_size(3) : 0; } else { p.texHeight = (tex.dims() > 2) ? tex.dim_size(2) : 0; p.texWidth = (tex.dims() > 3) ? tex.dim_size(3) : 0; p.channels = (tex.dims() > 4) ? tex.dim_size(4) : 0; } // Sanity checks. if (!cube_mode) { OP_REQUIRES(ctx, tex.dims() == 4 && tex.dim_size(0) > 0 && tex.dim_size(1) > 0 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0, errors::InvalidArgument("tex must have shape[>0, >0, >0, >0]")); OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 2, errors::InvalidArgument("uv must have shape [>0, >0, >0, 2]")); } else { OP_REQUIRES(ctx, tex.dims() == 5 && tex.dim_size(0) > 0 && tex.dim_size(1) == 6 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0 && tex.dim_size(4) > 0, errors::InvalidArgument("tex must have shape[>0, 6, >0, >0, >0] in cube map mode")); OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 3, errors::InvalidArgument("uv must have shape [>0, >0, >0, 3] in cube map mode")); OP_REQUIRES(ctx, tex.dim_size(2) == tex.dim_size(3), errors::InvalidArgument("texture shape must be square in cube map mode")); } OP_REQUIRES(ctx, tex.dim_size(0) == 1 || tex.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs tex, uv")); OP_REQUIRES(ctx, p.texWidth <= (1 << TEX_MAX_MIP_LEVEL) && p.texHeight <= (1 << TEX_MAX_MIP_LEVEL), errors::InvalidArgument("texture size too large")); if (p.enableMip) { if (!cube_mode) OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 4, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 4]")); else OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 6, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 6] in cube map mode")); } // Get input pointers. p.tex[0] = tex.flat<float>().data(); p.uv = uv.flat<float>().data(); p.uvDA = p.enableMip ? uv_da.flat<float>().data() : 0; // Allocate output tensor. Tensor* out_tensor = NULL; TensorShape out_shape; out_shape.AddDim(p.n); out_shape.AddDim(p.imgHeight); out_shape.AddDim(p.imgWidth); out_shape.AddDim(p.channels); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out_tensor)); p.out = out_tensor->flat<float>().data(); // Choose kernel variants based on channel count. void* args[] = {&p}; int channel_div_idx = 0; if (!(p.channels & 3)) channel_div_idx = 2; // Channel count divisible by 4. else if (!(p.channels & 1)) channel_div_idx = 1; // Channel count divisible by 2. // Mip-related setup. float* pmip = 0; if (p.enableMip) { // Generate mip offsets. int mipOffsets[TEX_MAX_MIP_LEVEL]; int mipTotal = calculateMipInfo(ctx, p, mipOffsets); // Mip output tensor. Tensor* mip_tensor = NULL; TensorShape mip_shape; mip_shape.AddDim(mipTotal); // If texture is constant, calculate mip stack only once. bool computeMip = true; if (p.texConst) { // First execution? if (!m_persistentMipTensorInitialized) { // Allocate a persistent mip tensor. OP_REQUIRES_OK(ctx, ctx->allocate_persistent(DT_FLOAT, mip_shape, &m_persistentMipTensor, &mip_tensor)); m_persistentMipTensorInitialized = true; } else { // Reuse the persistent tensor, do not recompute mip levels. mip_tensor = m_persistentMipTensor.AccessTensor(ctx); computeMip = false; } // Set as output tensor as well. ctx->set_output(1, *mip_tensor); } else { // Allocate an output tensor as usual. OP_REQUIRES_OK(ctx, ctx->allocate_output(1, mip_shape, &mip_tensor)); } pmip = mip_tensor->flat<float>().data(); // Pointer to data. for (int i=1; i <= p.mipLevelMax; i++) p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels. // Build mip levels if needed. if (computeMip) { for (int i=1; i <= p.mipLevelMax; i++) { int2 ms = mipLevelSize(p, i); int3 sz = make_int3(ms.x, ms.y, p.texDepth); dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_MIP_KERNEL_BLOCK_HEIGHT, sz.x, sz.y); dim3 gridSize = getLaunchGridSize(blockSize, sz.x, sz.y, sz.z * (cube_mode ? 6 : 1)); p.mipLevelOut = i; void* build_func_tbl[3] = { (void*)MipBuildKernel1, (void*)MipBuildKernel2, (void*)MipBuildKernel4 }; OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(build_func_tbl[channel_div_idx], gridSize, blockSize, args, 0, stream)); } } } // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned. if (!cube_mode) OP_REQUIRES(ctx, !((uintptr_t)p.uv & 7), errors::Internal("uv input tensor not aligned to float2")); if ((p.channels & 3) == 0) { OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 15), errors::Internal("tex input tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)p.out & 15), errors::Internal("out output tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)pmip & 15), errors::Internal("mip output tensor not aligned to float4")); } if ((p.channels & 1) == 0) { OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 7), errors::Internal("tex input tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.out & 7), errors::Internal("out output tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)pmip & 7), errors::Internal("mip output tensor not aligned to float2")); } if (!cube_mode) OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 15), errors::Internal("uv_da input tensor not aligned to float4")); else OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 7), errors::Internal("uv_da input tensor not aligned to float2")); // Choose launch parameters for texture lookup kernel. dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight); dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n); // Choose kernel based on filter mode, cube mode, and datatype. void* func_tbl[TEX_MODE_COUNT * 3 * 2] = { (void*)TextureFwdKernelNearest1, (void*)TextureFwdKernelNearest2, (void*)TextureFwdKernelNearest4, (void*)TextureFwdKernelLinear1, (void*)TextureFwdKernelLinear2, (void*)TextureFwdKernelLinear4, (void*)TextureFwdKernelLinearMipmapNearest1, (void*)TextureFwdKernelLinearMipmapNearest2, (void*)TextureFwdKernelLinearMipmapNearest4, (void*)TextureFwdKernelLinearMipmapLinear1, (void*)TextureFwdKernelLinearMipmapLinear2, (void*)TextureFwdKernelLinearMipmapLinear4, (void*)TextureFwdKernelCubeNearest1, (void*)TextureFwdKernelCubeNearest2, (void*)TextureFwdKernelCubeNearest4, (void*)TextureFwdKernelCubeLinear1, (void*)TextureFwdKernelCubeLinear2, (void*)TextureFwdKernelCubeLinear4, (void*)TextureFwdKernelCubeLinearMipmapNearest1, (void*)TextureFwdKernelCubeLinearMipmapNearest2, (void*)TextureFwdKernelCubeLinearMipmapNearest4, (void*)TextureFwdKernelCubeLinearMipmapLinear1, (void*)TextureFwdKernelCubeLinearMipmapLinear2, (void*)TextureFwdKernelCubeLinearMipmapLinear4, }; // Function index. int func_idx = p.filterMode; if (cube_mode) func_idx += TEX_MODE_COUNT; func_idx = func_idx * 3 + channel_div_idx; // Launch kernel. OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream)); } }; REGISTER_OP("TextureFwd") .Input ("tex: float") .Input ("uv: float") .Output ("out: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int"); REGISTER_OP("TextureFwdMip") .Input ("tex: float") .Input ("uv: float") .Input ("uv_da: float") .Output ("out: float") .Output ("mip: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int") .Attr ("tex_const: int") .Attr ("max_mip_level: int"); REGISTER_KERNEL_BUILDER(Name("TextureFwd") .Device(DEVICE_GPU), TextureFwdOp); REGISTER_KERNEL_BUILDER(Name("TextureFwdMip").Device(DEVICE_GPU), TextureFwdOp); //------------------------------------------------------------------------ // Gradient TensorFlow op. struct TextureGradOp : public OpKernel { TextureKernelParams m_attribs; TextureGradOp(OpKernelConstruction* ctx): OpKernel(ctx) { memset(&m_attribs, 0, sizeof(m_attribs)); parseOpAttributes(ctx, m_attribs); } void Compute(OpKernelContext* ctx) { TextureKernelParams& p = m_attribs; cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream(); bool cube_mode = (p.boundaryMode == TEX_BOUNDARY_MODE_CUBE); // Get input. const Tensor& tex = ctx->input(0); const Tensor& uv = ctx->input(1); const Tensor& dy = ctx->input(2); const Tensor& uv_da = ctx->input(p.enableMip ? 3 : 2); const Tensor& mip = ctx->input(p.enableMip ? 4 : 2); // Extract input dimensions. p.n = (uv.dims() > 0) ? uv.dim_size(0) : 0; p.imgHeight = (uv.dims() > 1) ? uv.dim_size(1) : 0; p.imgWidth = (uv.dims() > 2) ? uv.dim_size(2) : 0; p.texDepth = (tex.dims() > 0) ? tex.dim_size(0) : 0; if (!cube_mode) { p.texHeight = (tex.dims() > 1) ? tex.dim_size(1) : 0; p.texWidth = (tex.dims() > 2) ? tex.dim_size(2) : 0; p.channels = (tex.dims() > 3) ? tex.dim_size(3) : 0; } else { p.texHeight = (tex.dims() > 2) ? tex.dim_size(2) : 0; p.texWidth = (tex.dims() > 3) ? tex.dim_size(3) : 0; p.channels = (tex.dims() > 4) ? tex.dim_size(4) : 0; } // Sanity checks. if (!cube_mode) { OP_REQUIRES(ctx, tex.dims() == 4 && tex.dim_size(0) > 0 && tex.dim_size(1) > 0 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0, errors::InvalidArgument("tex must have shape[>0, >0, >0, >0]")); OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 2, errors::InvalidArgument("uv must have shape [>0, >0, >0, 2]")); } else { OP_REQUIRES(ctx, tex.dims() == 5 && tex.dim_size(0) > 0 && tex.dim_size(1) == 6 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0 && tex.dim_size(4) > 0, errors::InvalidArgument("tex must have shape[>0, 6, >0, >0, >0] in cube map mode")); OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 3, errors::InvalidArgument("uv must have shape [>0, >0, >0, 3] in cube map mode")); OP_REQUIRES(ctx, tex.dim_size(2) == tex.dim_size(3), errors::InvalidArgument("texture shape must be square in cube map mode")); } OP_REQUIRES(ctx, tex.dim_size(0) == 1 || tex.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs tex, uv")); OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) == p.n && dy.dim_size(1) == p.imgHeight && dy.dim_size(2) == p.imgWidth && dy.dim_size(3) == p.channels, errors::InvalidArgument("dy must have shape [minibatch_size, height, width, channels]")); if (p.enableMip) { if (!cube_mode) OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 4, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 4]")); else OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 6, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 6] in cube map mode")); } // Get input pointers. p.tex[0] = tex.flat<float>().data(); p.uv = uv.flat<float>().data(); p.dy = dy.flat<float>().data(); p.uvDA = p.enableMip ? uv_da.flat<float>().data() : 0; float* pmip = p.enableMip ? (float*)mip.flat<float>().data() : 0; // Allocate output tensor for tex gradient. Tensor* grad_tex_tensor = NULL; TensorShape grad_tex_shape; grad_tex_shape.AddDim(p.texDepth); if (cube_mode) grad_tex_shape.AddDim(6); grad_tex_shape.AddDim(p.texHeight); grad_tex_shape.AddDim(p.texWidth); grad_tex_shape.AddDim(p.channels); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, grad_tex_shape, &grad_tex_tensor)); p.gradTex[0] = grad_tex_tensor->flat<float>().data(); // Allocate output tensor for uv gradient. if (p.filterMode != TEX_MODE_NEAREST) { TensorShape grad_uv_shape; Tensor* grad_uv_tensor = NULL; grad_uv_shape.AddDim(p.n); grad_uv_shape.AddDim(p.imgHeight); grad_uv_shape.AddDim(p.imgWidth); grad_uv_shape.AddDim(uv.dim_size(3)); OP_REQUIRES_OK(ctx, ctx->allocate_output(1, grad_uv_shape, &grad_uv_tensor)); p.gradUV = grad_uv_tensor->flat<float>().data(); // Allocate output tensor for uv_da gradient. if (p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR) { Tensor* grad_uv_da_tensor = NULL; grad_uv_shape.set_dim(3, uv_da.dim_size(3)); OP_REQUIRES_OK(ctx, ctx->allocate_output(2, grad_uv_shape, &grad_uv_da_tensor)); p.gradUVDA = grad_uv_da_tensor->flat<float>().data(); } } // Choose kernel variants based on channel count. int channel_div_idx = 0; if (!(p.channels & 3)) channel_div_idx = 2; // Channel count divisible by 4. else if (!(p.channels & 1)) channel_div_idx = 1; // Channel count divisible by 2. // Mip-related setup. Tensor grad_mip_tensor; float* pgradMip = 0; if (p.enableMip) { // Generate mip offsets. int mipOffsets[TEX_MAX_MIP_LEVEL]; int mipTotal = calculateMipInfo(ctx, p, mipOffsets); // Get space for temporary mip gradients. TensorShape grad_mip_shape; grad_mip_shape.AddDim(mipTotal); ctx->allocate_temp(DT_FLOAT, grad_mip_shape, &grad_mip_tensor); pgradMip = grad_mip_tensor.flat<float>().data(); for (int i=1; i <= p.mipLevelMax; i++) { p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels. p.gradTex[i] = pgradMip + mipOffsets[i]; // Pointers to mip gradients. } // Clear mip gradients. OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(pgradMip, 0, mipTotal * sizeof(float), stream)); } // Initialize texture gradients to zero. int texBytes = p.texHeight * p.texWidth * p.texDepth * p.channels * sizeof(float); if (cube_mode) texBytes *= 6; OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.gradTex[0], 0, texBytes, stream)); // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned. if (!cube_mode) { OP_REQUIRES(ctx, !((uintptr_t)p.uv & 7), errors::Internal("uv input tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.gradUV & 7), errors::Internal("grad_uv output tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 15), errors::Internal("uv_da input tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)p.gradUVDA & 15), errors::Internal("grad_uv_da output tensor not aligned to float4")); } else { OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 7), errors::Internal("uv_da input tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.gradUVDA & 7), errors::Internal("grad_uv_da output tensor not aligned to float2")); } if ((p.channels & 3) == 0) { OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 15), errors::Internal("tex input tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)p.gradTex[0] & 15), errors::Internal("grad_tex output tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)p.dy & 15), errors::Internal("dy input tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)pmip & 15), errors::Internal("mip input tensor not aligned to float4")); OP_REQUIRES(ctx, !((uintptr_t)pgradMip & 15), errors::Internal("internal mip gradient tensor not aligned to float4")); } if ((p.channels & 1) == 0) { OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 7), errors::Internal("tex input tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.gradTex[0] & 7), errors::Internal("grad_tex output tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)p.dy & 7), errors::Internal("dy output tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)pmip & 7), errors::Internal("mip input tensor not aligned to float2")); OP_REQUIRES(ctx, !((uintptr_t)pgradMip & 7), errors::Internal("internal mip gradient tensor not aligned to float2")); } // Choose launch parameters for main gradient kernel. void* args[] = {&p}; dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight); dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n); void* func_tbl[TEX_MODE_COUNT * 2] = { (void*)TextureGradKernelNearest, (void*)TextureGradKernelLinear, (void*)TextureGradKernelLinearMipmapNearest, (void*)TextureGradKernelLinearMipmapLinear, (void*)TextureGradKernelCubeNearest, (void*)TextureGradKernelCubeLinear, (void*)TextureGradKernelCubeLinearMipmapNearest, (void*)TextureGradKernelCubeLinearMipmapLinear, }; // Function index. int func_idx = p.filterMode; if (cube_mode) func_idx += TEX_MODE_COUNT; // Launch main gradient kernel. OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream)); // Launch kernel to pull gradients from mip levels. if (p.enableMip) { dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_MIP_KERNEL_BLOCK_HEIGHT, p.texWidth, p.texHeight); dim3 gridSize = getLaunchGridSize(blockSize, p.texWidth, p.texHeight, p.texDepth * (cube_mode ? 6 : 1)); int sharedBytes = blockSize.x * blockSize.y * p.channels * sizeof(float); void* mip_grad_func_tbl[3] = { (void*)MipGradKernel1, (void*)MipGradKernel2, (void*)MipGradKernel4 }; OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(mip_grad_func_tbl[channel_div_idx], gridSize, blockSize, args, sharedBytes, stream)); } } }; REGISTER_OP("TextureGradNearest") .Input ("tex: float") .Input ("uv: float") .Input ("dy: float") .Output ("grad_tex: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int"); REGISTER_OP("TextureGradLinear") .Input ("tex: float") .Input ("uv: float") .Input ("dy: float") .Output ("grad_tex: float") .Output ("grad_uv: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int"); REGISTER_OP("TextureGradLinearMipmapNearest") .Input ("tex: float") .Input ("uv: float") .Input ("dy: float") .Input ("uv_da: float") .Input ("mip: float") .Output ("grad_tex: float") .Output ("grad_uv: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int") .Attr ("max_mip_level: int"); REGISTER_OP("TextureGradLinearMipmapLinear") .Input ("tex: float") .Input ("uv: float") .Input ("dy: float") .Input ("uv_da: float") .Input ("mip: float") .Output ("grad_tex: float") .Output ("grad_uv: float") .Output ("grad_uv_da: float") .Attr ("filter_mode: int") .Attr ("boundary_mode: int") .Attr ("max_mip_level: int"); REGISTER_KERNEL_BUILDER(Name("TextureGradNearest") .Device(DEVICE_GPU), TextureGradOp); REGISTER_KERNEL_BUILDER(Name("TextureGradLinear") .Device(DEVICE_GPU), TextureGradOp); REGISTER_KERNEL_BUILDER(Name("TextureGradLinearMipmapNearest").Device(DEVICE_GPU), TextureGradOp); REGISTER_KERNEL_BUILDER(Name("TextureGradLinearMipmapLinear") .Device(DEVICE_GPU), TextureGradOp); //------------------------------------------------------------------------
the_stack
#define NVBIO_CUDA_DEBUG #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/condition.h> #include <cub/cub.cuh> namespace nvbio { namespace condition { __global__ void scan_kernel(const uint32 n_tile_grids, cuda::condition_set_view conditions, uint32* block_values, const uint32 initial_state) { const uint32 prev_block = blockIdx.x ? blockIdx.x-1 : gridDim.x-1; uint32 prev_state = blockIdx.x ? initial_state+1 : initial_state; for (uint32 i = 0; i < n_tile_grids; ++i, ++prev_state) { // compute the tile number const uint32 tile_idx = blockIdx.x + i * gridDim.x; if (threadIdx.x == 0) { if (tile_idx) { // wait on the previous block to post its value conditions[ prev_block ].wait( prev_state ); block_values[tile_idx] = block_values[tile_idx-1] + 1; } else { // set the value for the first tile block_values[0] = 0; } // release the condition for this block conditions[ blockIdx.x ].signal(); // equivalent to set( initial_state+i+1 ) } } } __global__ void chained_kernel(const uint32 n_tile_grids, cuda::condition_set_view conditions, const uint32 initial_state) { const uint32 prev_block = blockIdx.x ? blockIdx.x-1 : gridDim.x-1; uint32 prev_state = blockIdx.x ? initial_state+1 : initial_state; for (uint32 i = 0; i < n_tile_grids; ++i, ++prev_state) { // compute the tile number const uint32 tile_idx = blockIdx.x + i * gridDim.x; if (threadIdx.x == 0) { if (tile_idx) { // wait on the previous block to be ready conditions[ prev_block ].wait( prev_state ); } // release the condition for this block conditions[ blockIdx.x ].signal(); // equivalent to set( initial_state+i+1 ) } } } template <uint32 BLOCKDIM, bool DO_WORK> __global__ void fast_scan_kernel(const uint32 n_tile_grids, cuda::condition_set_view conditions, volatile uint32* partials, volatile uint32* prefixes, const uint32 initial_state) { // // This scan skeleton performs longer range chaining rather than chaining each CTA // to its direct predecessor: the idea is that each CTA writes out its partial, // and then checks for the availability of prefix of predecessor (without blocking), // if not available, it waits for the previous BLOCKDIM-1 partials and the prefix BLOCKDIM // CTAs away, and uses all its threads to reduce them. // Hence, CTA[i] depends on the prefix of a CTA[i-BLOCKDIM]. // typedef cub::BlockReduce<uint32,BLOCKDIM> BlockReduce; const uint32 PARTIAL_READY = initial_state + 1; const uint32 PREFIX_READY = initial_state + 2; __shared__ typename BlockReduce::TempStorage smem_storage; __shared__ uint32 previous_done; const bool is_thread0 = (threadIdx.x == 0); for (uint32 i = 0; i < n_tile_grids; ++i) { //__syncthreads(); // compute the tile number const uint32 tile_idx = blockIdx.x + i * gridDim.x; // write out the partial for this tile if (DO_WORK) { if (is_thread0) partials[tile_idx] = 1; } if (is_thread0 && tile_idx) previous_done = conditions[tile_idx-1].test( PREFIX_READY ); __syncthreads(); if (tile_idx == 0) { // set the value for the first tile if (DO_WORK) { if (is_thread0) prefixes[0] = 1; } } else if (previous_done) { // sum to previous prefix if (DO_WORK) { if (is_thread0) prefixes[tile_idx] = prefixes[tile_idx-1] + 1; } } else { // release the condition variable for the partial if (is_thread0) conditions[tile_idx].set( PARTIAL_READY ); int32 prefix = 0; int32 last_tile = tile_idx; int32 prefix_tile = tile_idx; // keep looking back until we find a 'ready' prefix do { // // lookback up to BLOCKDIM predecessors in parallel, check if any // of them is done (i.e. their prefix is ready), and otherwise // wait on their partial to arrive. // previous_done = 0; __syncthreads(); // compute the first tile in this batch prefix_tile = nvbio::max( int32( prefix_tile - blockDim.x ), 0 ); // check if the any of the predecessors in this block is done if (prefix_tile + threadIdx.x < last_tile) { if (conditions[ prefix_tile + threadIdx.x ].test( PREFIX_READY )) previous_done = prefix_tile + threadIdx.x; } __syncthreads(); // let all threads update the prefix tile if (previous_done) prefix_tile = previous_done; int32 partial = 0; // lookback the predecessors in parallel if (prefix_tile + threadIdx.x < last_tile) { if (previous_done && threadIdx.x == 0) { // let thread0 read the ready prefix if (DO_WORK) partial = prefixes[ prefix_tile ]; } else { // wait on the partials conditions[ prefix_tile + threadIdx.x ].wait( PARTIAL_READY ); if (DO_WORK) partial = partials[ prefix_tile + threadIdx.x ]; } } if (DO_WORK) { // reduce the prefixes prefix += BlockReduce( smem_storage ).Sum( partial ); } last_tile = prefix_tile; } while (prefix_tile && !previous_done); if (DO_WORK) { if (is_thread0) { // write out the final values prefixes[tile_idx] = prefix + 1; } } } // release the condition for the scanned value for this tile if (is_thread0) conditions[tile_idx].set( PREFIX_READY ); } } } // condition namespace int condition_test() { const uint32 n_tile_grids = 100; log_info( stderr, "condition test... started\n" ); const uint32 blockdim = 128; const uint32 n_blocks = (uint32)cuda::max_active_blocks( condition::scan_kernel, blockdim, 0u ); cuda::condition_set_storage condition_st( n_blocks ); cuda::condition_set_view condition_set = condition_st.get(); log_info( stderr, " %u blocks\n", n_blocks ); thrust::device_vector<uint32> dvalues( n_tile_grids*n_blocks ); uint32* dvalues_ptr = thrust::raw_pointer_cast( &dvalues.front() ); thrust::host_vector<uint32> hvalues; log_info( stderr, " correctness test... started\n" ); for (uint32 i = 0; i < 20; ++i) { // call the testing kernel condition::scan_kernel<<<n_blocks,blockdim>>>( n_tile_grids, condition_set, dvalues_ptr, i*n_tile_grids ); cudaDeviceSynchronize(); nvbio::cuda::thrust_copy_vector(hvalues, dvalues); for (uint32 n = 0; n < n_tile_grids*n_blocks; ++n) { const uint32 val = hvalues[n]; if (val != n) { log_error( stderr, " found %u at position %u, launch %u\n", val, n, i ); return 1; } } } log_info( stderr, " correctness test... done\n" ); const uint32 n_tests = 20; log_info( stderr, " speed test... started\n" ); condition_st.set(0); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) condition::chained_kernel<<<n_blocks,blockdim>>>( n_tile_grids, condition_set, i*n_tile_grids ); cudaDeviceSynchronize(); timer.stop(); const float time = timer.seconds() / float(n_tests*n_tile_grids); log_info( stderr, " speed test... done:\n %.3f ns\n %.3f ns/CTA\n %.1fM CTAs retired/s\n", time * 1.0e6f, 1.0e6f * (time/float(n_blocks)), 1.0e-6f * (float(n_blocks)/time) ); { const uint32 blockdim = 128; const uint32 n_blocks = (uint32)cuda::max_active_blocks( condition::fast_scan_kernel<blockdim,true>, blockdim, 0u ); cuda::condition_set_storage condition_st( n_blocks*n_tile_grids ); cuda::condition_set_view condition_set = condition_st.get(); cudaDeviceSynchronize(); log_info( stderr, " fast scan... started (%u CTAs)\n", n_blocks ); thrust::device_vector<uint32> dpartials( n_tile_grids*n_blocks ); uint32* dpartials_ptr = thrust::raw_pointer_cast( &dpartials.front() ); for (uint32 i = 0; i < 20; ++i) { thrust::fill( dpartials.begin(), dpartials.end(), 0 ); thrust::fill( dvalues.begin(), dvalues.end(), 0 ); condition::fast_scan_kernel<blockdim,true><<<n_blocks,blockdim>>>( n_tile_grids, condition_set, dpartials_ptr, dvalues_ptr, i*2 ); cudaDeviceSynchronize(); nvbio::cuda::thrust_copy_vector(hvalues, dvalues); for (uint32 n = 0; n < n_tile_grids*n_blocks; ++n) { const uint32 val = hvalues[n]; if (val != n+1) { log_error( stderr, " found %u at position %u, launch %u\n", val, n, i ); return 1; } } } condition_st.set(0); cudaDeviceSynchronize(); Timer timer; timer.start(); for (uint32 i = 0; i < n_tests; ++i) condition::fast_scan_kernel<blockdim,true><<<n_blocks,blockdim>>>( n_tile_grids, condition_set, dpartials_ptr, dvalues_ptr, i*2 ); cudaDeviceSynchronize(); timer.stop(); { const float time = timer.seconds() / float(n_tests*n_tile_grids); log_info( stderr, " fast scan test... done:\n %.3f ns\n %.3f ns/CTA\n %.1fM CTAs retired/s\n", time * 1.0e6f, 1.0e6f * (time/float(n_blocks)), 1.0e-6f * (float(n_blocks)/time) ); } log_info( stderr, " fast chaining... started\n" ); condition_st.set(0); cudaDeviceSynchronize(); timer.start(); for (uint32 i = 0; i < n_tests; ++i) condition::fast_scan_kernel<blockdim,false><<<n_blocks,blockdim>>>( n_tile_grids, condition_set, NULL, NULL, i*2 ); cudaDeviceSynchronize(); timer.stop(); { const float time = timer.seconds() / float(n_tests*n_tile_grids); log_info( stderr, " fast chaining test... done:\n %.3f ns\n %.3f ns/CTA\n %.1fM CTAs retired/s\n", time * 1.0e6f, 1.0e6f * (time/float(n_blocks)), 1.0e-6f * (float(n_blocks)/time) ); } } log_info( stderr, "condition test... done\n" ); return 0; } } // namespace nvbio
the_stack
* This file contains an implementation of some batched sparse matrix * operations in Compressed Sparse Row representation. * * Important: the implementation is designed to give good performance on * large batches of relatively small matrices (typically one or two * elements per row). In other use cases it might be slower than using * the dense counterparts! */ #pragma once #include <cuml/common/utils.hpp> #include <raft/cudart_utils.h> #include <raft/linalg/cusolver_wrappers.h> #include <linalg/batched/matrix.cuh> #include <raft/matrix/matrix.hpp> #include <rmm/device_uvector.hpp> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <algorithm> #include <cstddef> #include <memory> #include <vector> namespace MLCommon { namespace Sparse { namespace Batched { /** * Kernel to construct batched CSR sparse matrices from batched dense matrices * * @note This kernel is intended to give decent performance for large batches * of small matrices. For larger matrices you might want to store a COO * representation of the matrices and assign threads to the non-zero * elements of each matrix * * @param[in] dense Batched dense matrices. Size: m * n * batch_size * @param[in] col_index CSR column index. Size: nnz * @param[in] row_index CSR row index. Size: m + 1 * @param[out] values CSR values array. Size: nnz * batch_size * @param[in] batch_size Number of matrices in the batch * @param[in] m Number of rows per matrix * @param[in] n Number of columns per matrix * @param[in] nnz Number of non-zero elements in each matrix */ template <typename T> static __global__ void dense_to_csr_kernel(const T* dense, const int* col_index, const int* row_index, T* values, int batch_size, int m, int n, int nnz) { int bid = blockIdx.x * blockDim.x + threadIdx.x; if (bid < batch_size) { int stride = m * n; for (int i = 0; i < m; i++) { for (int idx = row_index[i]; idx < row_index[i + 1]; idx++) { int j = col_index[idx]; values[bid * nnz + idx] = dense[bid * stride + j * m + i]; } } } } /** * Kernel to construct batched dense matrices from batched CSR sparse matrices * * @note This kernel is intended to give decent performance for large batches * of small matrices. * * @param[out] dense Batched dense matrices. Size: m * n * batch_size * @param[in] col_index CSR column index. Size: nnz * @param[in] row_index CSR row index. Size: m + 1 * @param[in] values CSR values array. Size: nnz * batch_size * @param[in] batch_size Number of matrices in the batch * @param[in] m Number of rows per matrix * @param[in] n Number of columns per matrix * @param[in] nnz Number of non-zero elements in each matrix */ template <typename T> static __global__ void csr_to_dense_kernel(T* dense, const int* col_index, const int* row_index, const T* values, int batch_size, int m, int n, int nnz) { int bid = blockIdx.x * blockDim.x + threadIdx.x; if (bid < batch_size) { int stride = m * n; for (int i = 0; i < m; i++) { for (int idx = row_index[i]; idx < row_index[i + 1]; idx++) { int j = col_index[idx]; dense[bid * stride + j * m + i] = values[bid * nnz + idx]; } } } } /** * @brief The Batched::CSR class provides storage and a few operations for * a batch of matrices in Compressed Sparse Row representation, that * share a common structure (index arrays) but different values. * * @note Most of the operations are asynchronous, using the stream that * is given in the constructor (or, if constructing from a dense matrix, * the stream attached to this matrix) */ template <typename T> class CSR { public: using shape_type = std::pair<std::size_t, std::size_t>; /** * @brief Constructor that leaves the matrix uninitialized * * @param[in] m Number of rows per matrix * @param[in] n Number of columns per matrix * @param[in] nnz Number of non-zero elements per matrix * @param[in] batch_size Number of matrices in the batch * @param[in] cublasHandle cuBLAS handle * @param[in] cusolverSpHandle cuSOLVER sparse handle * @param[in] stream CUDA stream */ CSR(std::size_t m, std::size_t n, std::size_t nnz, std::size_t batch_size, cublasHandle_t cublasHandle, cusolverSpHandle_t cusolverSpHandle, cudaStream_t stream) : m_batch_size(batch_size), m_cublasHandle(cublasHandle), m_cusolverSpHandle(cusolverSpHandle), m_stream(stream), m_shape(m, n), m_nnz(nnz), m_values(nnz * batch_size, stream), m_col_index(nnz, stream), m_row_index(m + 1, stream), d_values(m_values.data()), d_row_index(m_row_index.data()), d_col_index(m_col_index.data()) { } /** * @brief Constructor from pre-allocated memory; leaves the matrix uninitialized * * @param[in] m Number of rows per matrix * @param[in] n Number of columns per matrix * @param[in] nnz Number of non-zero elements per matrix * @param[in] batch_size Number of matrices in the batch * @param[in] cublasHandle cuBLAS handle * @param[in] cusolverSpHandle cuSOLVER sparse handle * @param[in] d_values Pre-allocated values array * @param[in] d_col_index Pre-allocated column index array * @param[in] d_row_index Pre-allocated row index array * @param[in] stream CUDA stream */ CSR(std::size_t m, std::size_t n, std::size_t nnz, std::size_t batch_size, cublasHandle_t cublasHandle, cusolverSpHandle_t cusolverSpHandle, T* d_values, int* d_col_index, int* d_row_index, cudaStream_t stream) : m_batch_size(batch_size), m_cublasHandle(cublasHandle), m_cusolverSpHandle(cusolverSpHandle), m_stream(stream), m_shape(m, n), m_nnz(nnz), m_values(nnz * batch_size, stream), m_col_index(nnz, stream), m_row_index(m + 1, stream), d_values(d_values), d_col_index(d_col_index), d_row_index(d_row_index) { } //! Destructor: nothing to destroy explicitely ~CSR() {} //! Copy constructor CSR(const CSR<T>& other) : m_batch_size(other.m_batch_size), m_cublasHandle(other.m_cublasHandle), m_cusolverSpHandle(other.m_cusolverSpHandle), m_stream(other.m_stream), m_shape(other.m_shape), m_nnz(other.m_nnz), m_values(other.m_nnz * other.m_batch_size, other.m_stream), m_col_index(other.m_nnz, other.m_stream), m_row_index(other.m_shape.first + 1, other.m_stream), d_values(m_values.data()), d_row_index(m_row_index.data()), d_col_index(m_col_index.data()) { // Copy the raw data raft::copy(get_values(), other.get_values(), m_nnz * m_batch_size, m_stream); raft::copy(get_col_index(), other.get_col_index(), m_nnz, m_stream); raft::copy(get_row_index(), other.get_row_index(), m_shape.first + 1, m_stream); } //! Copy assignment operator CSR<T>& operator=(const CSR<T>& other) { m_batch_size = other.m_batch_size; m_shape = other.m_shape; m_nnz = other.m_nnz; m_values.resize(m_nnz * m_batch_size, m_stream); m_col_index.resize(m_nnz, m_stream); m_row_index.resize(m_shape.first + 1, m_stream); d_values = m_values.data(); d_col_index = m_col_index.data(); d_row_index = m_row_index.data(); // Copy the raw data raft::copy(get_values(), other.get_values(), m_nnz * m_batch_size, m_stream); raft::copy(get_col_index(), other.get_col_index(), m_nnz, m_stream); raft::copy(get_row_index(), other.get_row_index(), m_shape.first + 1, m_stream); return *this; } /** * @brief Construct from a dense batched matrix and its mask * * @param[in] dense Dense batched matrix * @param[in] mask Col-major host device matrix containing a mask of the * non-zero values common to all matrices in the batch. * Note: the point of using a mask is that some values * might be zero in a few matrices but not generally in * the batch so we shouldn't rely on a single matrix to * get the mask * @param[in] cusolverSpHandle cusolver sparse handle * @param[in] d_values Optional pre-allocated values array * @param[in] d_col_index Optional pre-allocated column index array * @param[in] d_row_index Optional pre-allocated row index array * @return Batched CSR matrix */ static CSR<T> from_dense(const LinAlg::Batched::Matrix<T>& dense, const std::vector<bool>& mask, cusolverSpHandle_t cusolverSpHandle, T* d_values = nullptr, int* d_col_index = nullptr, int* d_row_index = nullptr) { auto shape = dense.shape(); // Create the index arrays from the mask std::vector<int> h_col_index; std::vector<int> h_row_index = std::vector<int>(shape.first + 1); int nnz = 0; for (std::size_t i = 0; i < shape.first; i++) { h_row_index[i] = nnz; for (std::size_t j = 0; j < shape.second; j++) { if (mask[j * shape.first + i]) { h_col_index.push_back(j); nnz++; } } } h_row_index[shape.first] = nnz; CSR<T> out = (d_values == nullptr) ? CSR<T>(shape.first, shape.second, nnz, dense.batches(), dense.cublasHandle(), cusolverSpHandle, dense.stream()) : CSR<T>(shape.first, shape.second, nnz, dense.batches(), dense.cublasHandle(), cusolverSpHandle, d_values, d_col_index, d_row_index, dense.stream()); // Copy the host index arrays to the device raft::copy(out.get_col_index(), h_col_index.data(), nnz, out.stream()); raft::copy(out.get_row_index(), h_row_index.data(), shape.first + 1, out.stream()); // Copy the data from the dense matrix to its sparse representation constexpr int TPB = 256; dense_to_csr_kernel<<<raft::ceildiv<int>(out.batches(), TPB), TPB, 0, out.stream()>>>( dense.raw_data(), out.get_col_index(), out.get_row_index(), out.get_values(), out.batches(), shape.first, shape.second, nnz); CUDA_CHECK(cudaPeekAtLastError()); return out; } /** * @brief Construct a dense batched matrix * * @return Batched::Matrix representing the same data as this object */ LinAlg::Batched::Matrix<T> to_dense() { LinAlg::Batched::Matrix<T> dense( m_shape.first, m_shape.second, m_batch_size, m_cublasHandle, m_stream, true); // Copy the data from the sparse to the dense representation constexpr int TPB = 256; csr_to_dense_kernel<<<raft::ceildiv<int>(m_batch_size, TPB), TPB, 0, m_stream>>>( dense.raw_data(), get_col_index(), get_row_index(), get_values(), m_batch_size, m_shape.first, m_shape.second, m_nnz); CUDA_CHECK(cudaPeekAtLastError()); return dense; } //! Return batch size std::size_t batches() const { return m_batch_size; } //! Return number of non-zero elements std::size_t nnz() const { return m_nnz; } //! Return cublas handle cublasHandle_t cublasHandle() const { return m_cublasHandle; } //! Return cusolver sparse handle cusolverSpHandle_t cusolverSpHandle() const { return m_cusolverSpHandle; } //! Return stream cudaStream_t stream() const { return m_stream; } //! Return shape const shape_type& shape() const { return m_shape; } //! Return values array T* get_values() { return d_values; } const T* get_values() const { return d_values; } //! Return columns index array int* get_col_index() { return d_col_index; } const int* get_col_index() const { return d_col_index; } //! Return rows index array int* get_row_index() { return d_row_index; } const int* get_row_index() const { return d_row_index; } protected: //! Shape (rows, cols) of matrices. shape_type m_shape; //! Number of non-zero values per matrix std::size_t m_nnz; //! Array(pointer) to the values in all the batched matrices. rmm::device_uvector<T> m_values; T* d_values; //! Array(pointer) to the column index of the CSR. rmm::device_uvector<int> m_col_index; int* d_col_index; //! Array(pointer) to the row index of the CSR. rmm::device_uvector<int> m_row_index; int* d_row_index; //! Number of matrices in batch std::size_t m_batch_size; cublasHandle_t m_cublasHandle; cusolverSpHandle_t m_cusolverSpHandle; cudaStream_t m_stream; }; /** * Kernel to compute a batched SpMV: alpha*A*x + beta*y * (where A is a sparse matrix, x and y dense vectors) * * @note One thread per batch (this is intended for very large batches) * Rows don't have the same number of non-zero elements, so an approach * to parallelize on the rows would lead to divergence * * @param[in] alpha Scalar alpha * @param[in] A_col_index CSR column index of batched matrix A * @param[in] A_row_index CSR row index of batched matrix A * @param[in] A_values Values of the non-zero elements of A * @param[in] x Dense vector x * @param[in] beta Scalar beta * @param[in,out] y Dense vector y * @param[in] m Number of rows of A * @param[in] n Number of columns of A * @param[in] batch_size Number of individual matrices in the batch */ template <typename T> __global__ void batched_spmv_kernel(T alpha, const int* A_col_index, const int* A_row_index, const T* A_values, const T* x, T beta, T* y, int m, int n, int batch_size) { int bid = blockIdx.x * blockDim.x + threadIdx.x; if (bid < batch_size) { int nnz = A_row_index[m]; for (int i = 0; i < m; i++) { T acc = 0.0; for (int idx = A_row_index[i]; idx < A_row_index[i + 1]; idx++) { int j = A_col_index[idx]; acc += A_values[bid * nnz + idx] * x[bid * n + j]; } y[bid * m + i] = alpha * acc + (beta == 0.0 ? 0.0 : beta * y[bid * m + i]); } } } /** * Compute a batched SpMV: alpha*A*x + beta*y * (where A is a sparse matrix, x and y dense vectors) * * @note Not supporting transpose yet for simplicity as it isn't needed * Also currently the strides between batched vectors are assumed to * be exactly the dimensions of the problem * * @param[in] alpha Scalar alpha * @param[in] A Batched sparse matrix (CSR) * @param[in] x Batched dense vector x * @param[in] beta Scalar beta * @param[in,out] y Batched dense vector y */ template <typename T> void b_spmv(T alpha, const CSR<T>& A, const LinAlg::Batched::Matrix<T>& x, T beta, LinAlg::Batched::Matrix<T>& y) { auto m = A.shape().first; auto n = A.shape().second; // A few checks ASSERT(std::min(x.shape().first, x.shape().second) == 1 && std::max(x.shape().first, x.shape().second) == n, "SpMV: Dimension mismatch: x"); ASSERT(std::min(y.shape().first, y.shape().second) == 1 && std::max(y.shape().first, y.shape().second) == m, "SpMV: Dimension mismatch: y"); ASSERT(A.batches() == x.batches(), "SpMV: A and x must have the same batch size"); ASSERT(A.batches() == y.batches(), "SpMV: A and y must have the same batch size"); // Execute the kernel constexpr int TPB = 256; batched_spmv_kernel<<<raft::ceildiv<int>(A.batches(), TPB), TPB, 0, A.stream()>>>( alpha, A.get_col_index(), A.get_row_index(), A.get_values(), x.raw_data(), beta, y.raw_data(), m, n, A.batches()); CUDA_CHECK(cudaPeekAtLastError()); } /** * Kernel to compute a batched SpMM: alpha*A*B + beta*C * (where A is a sparse matrix, B and C dense matrices) * * @note Parallelized over the batch and the columns of individual matrices * * @param[in] alpha Scalar alpha * @param[in] A_col_index CSR column index of batched matrix A * @param[in] A_row_index CSR row index of batched matrix A * @param[in] A_values Values of the non-zero elements of A * @param[in] B Dense matrix B * @param[in] beta Scalar beta * @param[in,out] C Dense matrix C * @param[in] m Number of rows of A and C * @param[in] k Number of columns of A, rows of B * @param[in] n Number of columns of B and C * @param[in] batch_size Number of individual matrices in the batch * @param[in] threads_per_bid Number of threads per batch index */ template <typename T> __global__ void batched_spmm_kernel(T alpha, const int* A_col_index, const int* A_row_index, const T* A_values, const T* B, T beta, T* C, int m, int k, int n, int batch_size, int threads_per_bid) { int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; int bid = thread_idx / threads_per_bid; if (bid < batch_size) { int nnz = A_row_index[m]; const T* b_A_values = A_values + bid * nnz; const T* b_B = B + bid * k * n; for (int j = thread_idx % threads_per_bid; j < n; j += threads_per_bid) { for (int i = 0; i < m; i++) { T acc = 0.0; for (int idx = A_row_index[i]; idx < A_row_index[i + 1]; idx++) { int ik = A_col_index[idx]; acc += b_A_values[idx] * b_B[j * k + ik]; } int ci = bid * m * n + j * m + i; C[ci] = alpha * acc + (beta == 0.0 ? 0.0 : beta * C[ci]); } } } } /** * Kernel to compute a batched SpMM: alpha*A*B + beta*C * (where A is a sparse matrix, B and C dense matrices) * * @note: this is more performant when the matrices are large enough and * assuming that almost all elements of B need to be read * * @param[in] alpha Scalar alpha * @param[in] A_col_index CSR column index of batched matrix A * @param[in] A_row_index CSR row index of batched matrix A * @param[in] A_values Values of the non-zero elements of A * @param[in] B Dense matrix B * @param[in] beta Scalar beta * @param[in,out] C Dense matrix C * @param[in] m Number of rows of A and C * @param[in] k Number of columns of A, rows of B * @param[in] n Number of columns of B and C * @param[in] nnz Number of non-zero elements per matrix */ template <typename T> __global__ void batched_spmm_kernel_shared_mem(T alpha, const int* A_col_index, const int* A_row_index, const T* A_values, const T* B, T beta, T* C, int m, int k, int n, int nnz) { int bid = blockIdx.x; int j = threadIdx.x; // Using dynamic shared memory extern __shared__ int8_t shared_mem[]; // Mapping arrays to shared mem ; note: T before int for alignment! T* s_A_values = (T*)shared_mem; T* s_B = (T*)(shared_mem + nnz * sizeof(T)); int* s_A_col_index = (int*)(shared_mem + (nnz + k * n) * sizeof(T)); int* s_A_row_index = (int*)(shared_mem + (nnz + k * n) * sizeof(T) + nnz * sizeof(int)); // Load A in shared memory const T* b_A_values = A_values + bid * nnz; for (int i_nnz = j; i_nnz < nnz; i_nnz += blockDim.x) { s_A_col_index[i_nnz] = A_col_index[i_nnz]; s_A_values[i_nnz] = b_A_values[i_nnz]; } for (int i_m = j; i_m < m; i_m += blockDim.x) { s_A_row_index[i_m] = A_row_index[i_m]; } if (j == 0) s_A_row_index[m] = nnz; // Load B in shared memory const T* b_B = B + bid * k * n; for (int i_kn = j; i_kn < k * n; i_kn += blockDim.x) { s_B[i_kn] = b_B[i_kn]; } __syncthreads(); for (int i = 0; i < m; i++) { T acc = 0.0; for (int idx = s_A_row_index[i]; idx < s_A_row_index[i + 1]; idx++) { int ik = s_A_col_index[idx]; acc += s_A_values[idx] * s_B[j * k + ik]; } int ci = bid * m * n + j * m + i; C[ci] = alpha * acc + (beta == 0.0 ? 0.0 : beta * C[ci]); } } /** * Compute a batched SpMM: alpha*A*B + beta*C * (where A is a sparse matrix, B and C dense matrices) * * @note Not supporting transpose yet for simplicity as it isn't needed * Also not supporting leading dim different than the problem dimensions * * @param[in] alpha Scalar alpha * @param[in] A Batched sparse matrix (CSR) * @param[in] B Batched dense matrix B * @param[in] beta Scalar beta * @param[inout] C Batched dense matrix C * @param[in] use_shared_mem use shared memory based implementation or not */ template <typename T> void b_spmm(T alpha, const CSR<T>& A, const LinAlg::Batched::Matrix<T>& B, T beta, LinAlg::Batched::Matrix<T>& C, bool use_shared_mem = true) { auto m = A.shape().first; auto n = B.shape().second; auto k = A.shape().second; auto nb = A.batches(); auto nnz = A.nnz(); // Check the parameters ASSERT(B.batches() == nb, "SpMM: A and B must have the same batch size"); ASSERT(C.batches() == nb, "SpMM: A and C must have the same batch size"); ASSERT(B.shape().first == k, "SpMM: Dimension mismatch: A and B"); ASSERT(C.shape().first == m && C.shape().second == n, "SpMM: Dimension mismatch: C"); // Execute the kernel if (use_shared_mem) { // Shared memory kernel (large matrices) size_t shared_mem_size = (nnz + m + 1) * sizeof(int) + (nnz + k * n) * sizeof(T); batched_spmm_kernel_shared_mem<<<nb, n, shared_mem_size, A.stream()>>>(alpha, A.get_col_index(), A.get_row_index(), A.get_values(), B.raw_data(), beta, C.raw_data(), m, k, n, nnz); CUDA_CHECK(cudaPeekAtLastError()); } else { // No shared memory (small matrices) constexpr int TPB = 256; int threads_per_bid = nb <= 1024 ? 8 : (nb <= 2048 ? 4 : (nb <= 4096 ? 2 : 1)); batched_spmm_kernel<<<raft::ceildiv<int>(nb * threads_per_bid, TPB), TPB, 0, A.stream()>>>( alpha, A.get_col_index(), A.get_row_index(), A.get_values(), B.raw_data(), beta, C.raw_data(), m, k, n, nb, threads_per_bid); CUDA_CHECK(cudaPeekAtLastError()); } } } // namespace Batched } // namespace Sparse } // namespace MLCommon
the_stack
#include <ATen/cuda/CUDAApplyUtils.cuh> typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = 512; int const maxGridDim = 50000; } // namespace __device__ __forceinline__ static void reduceMax(float *address, float val) { int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old || __int_as_float(old) < val); } __device__ __forceinline__ static void reduceMax(double *address, double val) { unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); } while (assumed != old || __longlong_as_double(old) < val); } // get rid of meaningless warnings when compiling host code #ifdef __CUDA_ARCH__ __device__ __forceinline__ static void reduceAdd(float *address, float val) { #if (__CUDA_ARCH__ < 200) #warning \ "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(val + __int_as_float(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } __device__ __forceinline__ static void reduceAdd(double *address, double val) { #if (__CUDA_ARCH__ < 600) #warning \ "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } #endif template <typename T> __global__ void feats_reduce_kernel(const T *feats, const int32_t *coors_map, T *reduced_feats, // shall be 0 at initialization const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) continue; const T *feats_offset = feats + x * num_feats; T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; if (reduce_type == reduce_t::MAX) { for (int i = 0; i < num_feats; i++) { reduceMax(&reduced_feats_offset[i], feats_offset[i]); } } else { for (int i = 0; i < num_feats; i++) { reduceAdd(&reduced_feats_offset[i], feats_offset[i]); } } } } template <typename T> __global__ void add_reduce_traceback_grad_kernel( T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, const int32_t *reduce_count, const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) { continue; } const int input_offset = x * num_feats; T *grad_feats_offset = grad_feats + input_offset; const int reduced_offset = reduce_to * num_feats; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; if (reduce_type == reduce_t::SUM) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i]; } } else if (reduce_type == reduce_t::MEAN) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i] / static_cast<T>(reduce_count[reduce_to]); } } } } template <typename T> __global__ void max_reduce_traceback_scatter_idx_kernel( const T *feats, const T *reduced_feats, int32_t *reduce_from, const int32_t *coors_map, const int num_input, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; const int input_offset = x * num_feats; const T *feats_offset = feats + input_offset; if (reduce_to == -1) { continue; } const int reduced_offset = reduce_to * num_feats; const T *reduced_feats_offset = reduced_feats + reduced_offset; int32_t *reduce_from_offset = reduce_from + reduced_offset; for (int i = 0; i < num_feats; i++) { if (feats_offset[i] == reduced_feats_offset[i]) { atomicMin(&reduce_from_offset[i], static_cast<int32_t>(x)); } } } } template <typename T> __global__ void max_reduce_scatter_grad_kernel(T *grad_feats, const T *grad_reduced_feats, const int32_t *reduce_from, const int num_reduced, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_reduced; x += gridDim.x * blockDim.x) { const int reduced_offset = x * num_feats; const int32_t *scatter_to_offset = reduce_from + reduced_offset; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; for (int i = 0; i < num_feats; i++) { grad_feats[scatter_to_offset[i] * num_feats + i] = grad_reduced_feats_offset[i]; } } } namespace voxelization { std::vector<at::Tensor> dynamic_point_to_voxel_forward_gpu( const at::Tensor &feats, const at::Tensor &coors, const reduce_t reduce_type) { CHECK_INPUT(feats); CHECK_INPUT(coors); const int num_input = feats.size(0); const int num_feats = feats.size(1); if (num_input == 0) return {feats.clone().detach(), coors.clone().detach(), coors.new_empty({0}, torch::kInt32), coors.new_empty({0}, torch::kInt32)}; at::Tensor out_coors; at::Tensor coors_map; at::Tensor reduce_count; auto coors_clean = coors.masked_fill(coors.lt(0).any(-1, true), -1); std::tie(out_coors, coors_map, reduce_count) = at::unique_dim(coors_clean, 0, true, true, true); if (out_coors.index({0, 0}).lt(0).item<bool>()) { // the first element of out_coors (-1,-1,-1) and should be removed out_coors = out_coors.slice(0, 1); reduce_count = reduce_count.slice(0, 1); coors_map = coors_map - 1; } coors_map = coors_map.to(torch::kInt32); reduce_count = reduce_count.to(torch::kInt32); auto reduced_feats = at::empty({out_coors.size(0), num_feats}, feats.options()); AT_DISPATCH_FLOATING_TYPES( feats.scalar_type(), "feats_reduce_kernel", ([&] { if (reduce_type == reduce_t::MAX) reduced_feats.fill_(-std::numeric_limits<scalar_t>::infinity()); else reduced_feats.fill_(static_cast<scalar_t>(0)); dim3 blocks(std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); feats_reduce_kernel<<<blocks, threads>>>( feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduced_feats.data_ptr<scalar_t>(), num_input, num_feats, reduce_type); if (reduce_type == reduce_t::MEAN) reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); })); AT_CUDA_CHECK(cudaGetLastError()); return {reduced_feats, out_coors, coors_map, reduce_count}; } void dynamic_point_to_voxel_backward_gpu(at::Tensor &grad_feats, const at::Tensor &grad_reduced_feats, const at::Tensor &feats, const at::Tensor &reduced_feats, const at::Tensor &coors_map, const at::Tensor &reduce_count, const reduce_t reduce_type) { CHECK_INPUT(grad_feats); CHECK_INPUT(grad_reduced_feats); CHECK_INPUT(feats); CHECK_INPUT(reduced_feats); CHECK_INPUT(coors_map); CHECK_INPUT(reduce_count); const int num_input = feats.size(0); const int num_reduced = reduced_feats.size(0); const int num_feats = feats.size(1); grad_feats.fill_(0); // copy voxel grad to points if (num_input == 0 || num_reduced == 0) return; if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); add_reduce_traceback_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduce_count.data_ptr<int32_t>(), num_input, num_feats, reduce_type); })); AT_CUDA_CHECK(cudaGetLastError()); } else { auto reduce_from = at::full({num_reduced, num_feats}, num_input, coors_map.options().dtype(torch::kInt32)); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_traceback_scatter_idx_kernel<<<blocks, threads>>>( feats.data_ptr<scalar_t>(), reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), coors_map.data_ptr<int32_t>(), num_input, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks(std::min( at::cuda::ATenCeilDiv(num_reduced, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_scatter_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), num_reduced, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); } return; } } // namespace voxelization
the_stack
#include <torch/extension.h> #include <cstdint> #include "cuda_util.cuh" #include "data_spec_packed.cuh" namespace { namespace device { __global__ void sample_grid_sh_kernel( PackedSparseGridSpec grid, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, // Output torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> out) { CUDA_GET_THREAD_ID(tid, points.size(0) * grid.sh_data_dim); const int idx = tid % grid.sh_data_dim; const int pid = tid / grid.sh_data_dim; float point[3] = {points[pid][0], points[pid][1], points[pid][2]}; transform_coord(point, grid._scaling, grid._offset); int32_t l[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { point[i] = fminf(fmaxf(point[i], 0.f), grid.size[i] - 1.f); l[i] = min((int32_t)point[i], (int32_t)(grid.size[i] - 2)); point[i] -= l[i]; } const int offy = grid.size[2], offx = grid.size[1] * grid.size[2]; const int32_t* __restrict__ link_ptr = &grid.links[l[0] * offx + l[1] * offy + l[2]]; #define MAYBE_READ_LINK(u) ((link_ptr[u] >= 0) ? grid.sh_data[ \ link_ptr[u] * size_t(grid.sh_data_dim) + idx] : 0.f) const float ix0y0 = lerp(MAYBE_READ_LINK(0), MAYBE_READ_LINK(1), point[2]); const float ix0y1 = lerp(MAYBE_READ_LINK(offy), MAYBE_READ_LINK(offy + 1), point[2]); const float ix0 = lerp(ix0y0, ix0y1, point[1]); const float ix1y0 = lerp(MAYBE_READ_LINK(offx), MAYBE_READ_LINK(offx + 1), point[2]); const float ix1y1 = lerp(MAYBE_READ_LINK(offy + offx), MAYBE_READ_LINK(offy + offx + 1), point[2]); const float ix1 = lerp(ix1y0, ix1y1, point[1]); out[pid][idx] = lerp(ix0, ix1, point[0]); } #undef MAYBE_READ_LINK __global__ void sample_grid_density_kernel( PackedSparseGridSpec grid, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, // Output torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> out) { CUDA_GET_THREAD_ID(tid, points.size(0)); float point[3] = {points[tid][0], points[tid][1], points[tid][2]}; transform_coord(point, grid._scaling, grid._offset); int32_t l[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { point[i] = fminf(fmaxf(point[i], 0.f), grid.size[i] - 1.f); l[i] = min((int32_t)point[i], grid.size[i] - 2); point[i] -= l[i]; } const int offy = grid.size[2], offx = grid.size[1] * grid.size[2]; const int32_t* __restrict__ link_ptr = &grid.links[l[0] * offx + l[1] * offy + l[2]]; #define MAYBE_READ_LINK_D(u) ((link_ptr[u] >= 0) ? grid.density_data[link_ptr[u]] : 0.f) const float ix0y0 = lerp(MAYBE_READ_LINK_D(0), MAYBE_READ_LINK_D(1), point[2]); const float ix0y1 = lerp(MAYBE_READ_LINK_D(offy), MAYBE_READ_LINK_D(offy + 1), point[2]); const float ix0 = lerp(ix0y0, ix0y1, point[1]); const float ix1y0 = lerp(MAYBE_READ_LINK_D(offx), MAYBE_READ_LINK_D(offx + 1), point[2]); const float ix1y1 = lerp(MAYBE_READ_LINK_D(offy + offx), MAYBE_READ_LINK_D(offy + offx + 1), point[2]); const float ix1 = lerp(ix1y0, ix1y1, point[1]); out[tid][0] = lerp(ix0, ix1, point[0]); } #undef MAYBE_READ_LINK_D __global__ void sample_grid_sh_backward_kernel( PackedSparseGridSpec grid, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> grad_out, // Output torch::PackedTensorAccessor64<float, 2, torch::RestrictPtrTraits> grad_data) { CUDA_GET_THREAD_ID(tid, points.size(0) * grid.sh_data_dim); const int idx = tid % grid.sh_data_dim; const int pid = tid / grid.sh_data_dim; float point[3] = {points[pid][0], points[pid][1], points[pid][2]}; transform_coord(point, grid._scaling, grid._offset); int32_t l[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { point[i] = fminf(fmaxf(point[i], 0.f), grid.size[i] - 1.f); l[i] = min((int32_t)point[i], grid.size[i] - 2); point[i] -= l[i]; } const int offy = grid.size[2], offx = grid.size[1] * grid.size[2]; const int32_t* __restrict__ link_ptr = &grid.links[l[0] * offx + l[1] * offy + l[2]]; const float go = grad_out[pid][idx]; const float xb = point[0], yb = point[1], zb = point[2]; const float xa = 1.f - point[0], ya = 1.f - point[1], za = 1.f - point[2]; #define MAYBE_ADD_GRAD_LINK_PTR(u, content) if (link_ptr[u] >= 0) \ atomicAdd(&grad_data[link_ptr[u]][idx], content) const float xago = xa * go; float tmp = ya * xago; MAYBE_ADD_GRAD_LINK_PTR(0, tmp * za); MAYBE_ADD_GRAD_LINK_PTR(1, tmp * zb); tmp = yb * xago; MAYBE_ADD_GRAD_LINK_PTR(offy, tmp * za); MAYBE_ADD_GRAD_LINK_PTR(offy + 1, tmp * zb); const float xbgo = xb * go; tmp = ya * xbgo; MAYBE_ADD_GRAD_LINK_PTR(offx, tmp * za); MAYBE_ADD_GRAD_LINK_PTR(offx + 1, tmp * zb); tmp = yb * xbgo; MAYBE_ADD_GRAD_LINK_PTR(offx + offy, tmp * za); MAYBE_ADD_GRAD_LINK_PTR(offx + offy + 1, tmp * zb); } #undef MAYBE_ADD_GRAD_LINK_PTR __global__ void sample_grid_density_backward_kernel( PackedSparseGridSpec grid, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> grad_out, // Output torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> grad_data) { CUDA_GET_THREAD_ID(tid, points.size(0)); float point[3] = {points[tid][0], points[tid][1], points[tid][2]}; transform_coord(point, grid._scaling, grid._offset); int32_t l[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { point[i] = fminf(fmaxf(point[i], 0.f), grid.size[i] - 1.f); l[i] = min((int32_t)point[i], grid.size[i] - 2); point[i] -= l[i]; } const int offy = grid.size[2], offx = grid.size[1] * grid.size[2]; const int32_t* __restrict__ link_ptr = &grid.links[l[0] * offx + l[1] * offy + l[2]]; const float go = grad_out[tid][0]; const float xb = point[0], yb = point[1], zb = point[2]; const float xa = 1.f - point[0], ya = 1.f - point[1], za = 1.f - point[2]; #define MAYBE_ADD_GRAD_LINK_PTR_D(u, content) if (link_ptr[u] >= 0) \ atomicAdd(grad_data[link_ptr[u]].data(), content) const float xago = xa * go; float tmp = ya * xago; MAYBE_ADD_GRAD_LINK_PTR_D(0, tmp * za); MAYBE_ADD_GRAD_LINK_PTR_D(1, tmp * zb); tmp = yb * xago; MAYBE_ADD_GRAD_LINK_PTR_D(offy, tmp * za); MAYBE_ADD_GRAD_LINK_PTR_D(offy + 1, tmp * zb); const float xbgo = xb * go; tmp = ya * xbgo; MAYBE_ADD_GRAD_LINK_PTR_D(offx, tmp * za); MAYBE_ADD_GRAD_LINK_PTR_D(offx + 1, tmp * zb); tmp = yb * xbgo; MAYBE_ADD_GRAD_LINK_PTR_D(offx + offy, tmp * za); MAYBE_ADD_GRAD_LINK_PTR_D(offx + offy + 1, tmp * zb); } } // namespace device } // namespace std::tuple<torch::Tensor, torch::Tensor> sample_grid(SparseGridSpec& grid, torch::Tensor points, bool want_colors) { DEVICE_GUARD(points); grid.check(); CHECK_INPUT(points); TORCH_CHECK(points.ndimension() == 2); const auto Q = points.size(0) * grid.sh_data.size(1); const int cuda_n_threads = std::min<int>(Q, CUDA_MAX_THREADS); const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); const int blocks_density = CUDA_N_BLOCKS_NEEDED(points.size(0), cuda_n_threads); torch::Tensor result_density = torch::empty({points.size(0), grid.density_data.size(1)}, points.options()); torch::Tensor result_sh = torch::empty({want_colors ? points.size(0) : 0, grid.sh_data.size(1)}, points.options()); cudaStream_t stream_1, stream_2; cudaStreamCreate(&stream_1); cudaStreamCreate(&stream_2); device::sample_grid_density_kernel<<<blocks_density, cuda_n_threads, 0, stream_1>>>( grid, points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), // Output result_density.packed_accessor32<float, 2, torch::RestrictPtrTraits>()); if (want_colors) { device::sample_grid_sh_kernel<<<blocks, cuda_n_threads, 0, stream_2>>>( grid, points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), // Output result_sh.packed_accessor32<float, 2, torch::RestrictPtrTraits>()); } cudaStreamSynchronize(stream_1); cudaStreamSynchronize(stream_2); CUDA_CHECK_ERRORS; return std::tuple<torch::Tensor, torch::Tensor>{result_density, result_sh}; } void sample_grid_backward( SparseGridSpec& grid, torch::Tensor points, torch::Tensor grad_out_density, torch::Tensor grad_out_sh, torch::Tensor grad_density_out, torch::Tensor grad_sh_out, bool want_colors) { DEVICE_GUARD(points); grid.check(); CHECK_INPUT(points); CHECK_INPUT(grad_out_density); CHECK_INPUT(grad_out_sh); CHECK_INPUT(grad_density_out); CHECK_INPUT(grad_sh_out); TORCH_CHECK(points.ndimension() == 2); TORCH_CHECK(grad_out_density.ndimension() == 2); TORCH_CHECK(grad_out_sh.ndimension() == 2); const auto Q = points.size(0) * grid.sh_data.size(1); const int cuda_n_threads = std::min<int>(Q, CUDA_MAX_THREADS); const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); const int blocks_density = CUDA_N_BLOCKS_NEEDED(points.size(0), cuda_n_threads); cudaStream_t stream_1, stream_2; cudaStreamCreate(&stream_1); cudaStreamCreate(&stream_2); device::sample_grid_density_backward_kernel<<<blocks_density, cuda_n_threads, 0, stream_1>>>( grid, points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), grad_out_density.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), // Output grad_density_out.packed_accessor32<float, 2, torch::RestrictPtrTraits>()); if (want_colors) { device::sample_grid_sh_backward_kernel<<<blocks, cuda_n_threads, 0, stream_2>>>( grid, points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), grad_out_sh.packed_accessor32<float, 2, torch::RestrictPtrTraits>(), // Output grad_sh_out.packed_accessor64<float, 2, torch::RestrictPtrTraits>()); } cudaStreamSynchronize(stream_1); cudaStreamSynchronize(stream_2); CUDA_CHECK_ERRORS; }
the_stack
namespace AggMIS { bool CheckCudaError(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { std::cout << "\n***************** CUDA Error detected ***************\n"; std::cout << "Error: " << cudaGetErrorString(code) << "\n"; std::cout << "In file " << file << " line " << line << "\n"; std::cout << "\n*****************************************************\n"; } code = cudaGetLastError(); if (code != cudaSuccess) { std::cout << "\n*************** Past CUDA Error detected ************\n"; std::cout << "Error: " << cudaGetErrorString(code) << "\n"; std::cout << "In file " << file << " line " << line << "\n"; std::cout << "\n*****************************************************\n"; } return false; } namespace Types { // My timer implementation JTimer::JTimer() { cudaEventCreate(&startTimeCuda); cudaEventCreate(&endTimeCuda); started = false; stopped = false; startTimeHost = endTimeHost = 0.; } JTimer::~JTimer() {} void JTimer::start() { cudaEventRecord(startTimeCuda, 0); startTimeHost = CLOCK(); started = true; stopped = false; } void JTimer::stop() { if (started && !stopped) { cudaEventRecord(endTimeCuda, 0); cudaEventSynchronize(endTimeCuda); endTimeHost = CLOCK(); stopped = true; } } double JTimer::getElapsedTimeInSec(bool host) { if (!started || !stopped) { printf("Error: elapsed time requested when not valid.\n"); return -1.0; } if (!host) { cudaEventElapsedTime(&elapsedCudaTime, startTimeCuda, endTimeCuda); return (double) elapsedCudaTime / 1000.0; } return 0.; } double JTimer::getElapsedTimeInMilliSec(bool host) { if (!host) { cudaEventElapsedTime(&elapsedCudaTime, startTimeCuda, endTimeCuda); return (double) elapsedCudaTime; } return 0.; } // Graph_d members Graph_d::Graph_d(IntVector_d& indices, IntVector_d& adjacency) { this->indices = new IntVector_d(indices); this->adjacency = new IntVector_d(adjacency); willClean = true; } Graph_d::Graph_d(IntVector_h& indices, IntVector_h& adjacency) { this->indices = new IntVector_d(indices); this->adjacency = new IntVector_d(adjacency); willClean = true; } Graph_d::Graph_d(IntVector_d* indices, IntVector_d* adjacency) { this->indices = indices; this->adjacency = adjacency; willClean = false; } Graph_d::Graph_d(Graph_h& graph) { indices = new IntVector_d(*(graph.indices)); adjacency = new IntVector_d(*(graph.adjacency)); willClean = true; } Graph_d::Graph_d() { indices = new IntVector_d(); adjacency = new IntVector_d(); willClean = true; } Graph_d::~Graph_d() { if (willClean) { indices->clear(); adjacency->clear(); delete indices; delete adjacency; } } int Graph_d::Size() { return indices->size() - 1; } DGraph Graph_d::GetD() { return DGraph(Size(), indStart(), adjStart()); } int* Graph_d::indStart() { return thrust::raw_pointer_cast(indices->data()); } int* Graph_d::adjStart() { return thrust::raw_pointer_cast(adjacency->data()); } // Graph_h members Graph_h::Graph_h(IntVector_d& indices, IntVector_d& adjacency) { this->indices = new IntVector_h(indices); this->adjacency = new IntVector_h(adjacency); willClean = true; } Graph_h::Graph_h(IntVector_h& indices, IntVector_h& adjacency) { this->indices = new IntVector_h(indices); this->adjacency = new IntVector_h(adjacency); willClean = true; } Graph_h::Graph_h(IntVector_h* indices, IntVector_h* adjacency) { this->indices = indices; this->adjacency = adjacency; willClean = false; } Graph_h::Graph_h(Graph_d& graph) { indices = new IntVector_h(*(graph.indices)); adjacency = new IntVector_h(*(graph.adjacency)); willClean = true; } Graph_h::Graph_h() { indices = new IntVector_h(); adjacency = new IntVector_h(); willClean = true; } Graph_h::~Graph_h() { if (willClean) { indices->resize(0); adjacency->resize(0); delete indices; delete adjacency; } } int Graph_h::Size() { return indices->size() - 1; } int* Graph_h::nStart(int node) { return &((*adjacency)[(*indices)[node]]); } int* Graph_h::nEnd(int node) { return &((*adjacency)[(*indices)[node + 1]]); } // Functions int* StartOf(IntVector_d &target) { return thrust::raw_pointer_cast(target.data()); } int* StartOf(IntVector_d *target) { return thrust::raw_pointer_cast(target->data()); } namespace Compare { bool AreEqual(IntVector_h& a, IntVector_h& b, bool verbose) { bool good = true; if (a.size() != b.size()) { if (verbose) printf("Vectors to compare differ in size: a.size()=%d b.size=%d\n", a.size(), b.size()); return false; } for (int i = 0; i < a.size(); i++) if (a[i] != b[i]) { if (verbose) printf("Difference found: a[%d]=%d b[%d]=%d\n", i, a[i], i, b[i]); good = false; } return good; } bool AreEqual(IntVector_d& a, IntVector_d& b, bool verbose) { IntVector_h tempA(a); IntVector_h tempB(b); bool result = AreEqual(tempA, tempB, verbose); tempA.clear(); tempB.clear(); return result; } bool AreEqual(IntVector_h& a, IntVector_d& b, bool verbose) { IntVector_h temp(b); bool result = AreEqual(a, temp, verbose); temp.clear(); return result; } bool AreEqual(IntVector_d& a, IntVector_h& b, bool verbose) { return AreEqual(b, a, verbose); } bool AreEqual(std::vector<std::vector<int> > &a, std::vector<std::vector<int> > &b, bool verbose) { // Check that main containers have matching sizes if (a.size() != b.size()) { if (verbose) printf("Sizes of base vectors do not match! a=%d b=%d\n", a.size(), b.size()); return false; } // Check that sizes of nested containers match for (int i = 0; i < a.size(); i++) { if (a[i].size() != b[i].size()) { if (verbose) { printf("Sizes of secondary vectors %d do not match!\n", i); printf("a[%d].size()=%d b[%d].size()=%d\n", i, a[i].size(), i, b[i].size()); std::stringstream ss; ss << "Contents of A[" << i << "]"; AggMIS::Types::Display::Print(a[i], ss.str()); ss.str("Contents of B["); ss << i << "]"; AggMIS::Types::Display::Print(b[i], ss.str()); } return false; } } // Check that all entries are equal for (int i = 0; i < a.size(); i++) { for (int j = 0; j < a[i].size(); j++) { if (a[i][j] != b[i][j]) { if (verbose) { printf("Element[%d][%d] does not match!\n", i, j); std::stringstream ss; ss << "Contents of A[" << i << "]"; AggMIS::Types::Display::Print(a[i], ss.str()); ss.str("Contents of B["); ss << i << "]"; AggMIS::Types::Display::Print(b[i], ss.str()); } return false; } } } return true; } bool AreEqual(AggMIS::Types::Graph_h& a, AggMIS::Types::Graph_h& b, bool verbose) { bool indicesMatch = AreEqual(*(a.indices), *(b.indices), verbose); bool adjacencyMatch = AreEqual(*(a.adjacency), *(b.adjacency), verbose); if (!indicesMatch && verbose) printf("Indices of graphs differ!\n"); if (!adjacencyMatch && verbose) printf("Adjacency lists of graphs differ!\n"); return indicesMatch && adjacencyMatch; } bool AreEqual(AggMIS::Types::Graph_d& a, AggMIS::Types::Graph_d& b, bool verbose) { bool indicesMatch = AreEqual(*(a.indices), *(b.indices), verbose); bool adjacencyMatch = AreEqual(*(a.adjacency), *(b.adjacency), verbose); if (!indicesMatch && verbose) printf("Indices of graphs differ!\n"); if (!adjacencyMatch && verbose) printf("Adjacency lists of graphs differ!\n"); return indicesMatch && adjacencyMatch; } bool AreEqual(Graph_h& a, Graph_d& b, bool verbose) { bool indicesMatch = AreEqual(*(a.indices), *(b.indices), verbose); bool adjacencyMatch = AreEqual(*(a.adjacency), *(b.adjacency), verbose); if (!indicesMatch && verbose) printf("Indices of graphs differ!\n"); if (!adjacencyMatch && verbose) printf("Adjacency lists of graphs differ!\n"); return indicesMatch && adjacencyMatch; } bool AreEqual(AggMIS::Types::Graph_d& a, AggMIS::Types::Graph_h& b, bool verbose) { bool indicesMatch = AreEqual(*(a.indices), *(b.indices), verbose); bool adjacencyMatch = AreEqual(*(a.adjacency), *(b.adjacency), verbose); if (!indicesMatch && verbose) printf("Indices of graphs differ!\n"); if (!adjacencyMatch && verbose) printf("Adjacency lists of graphs differ!\n"); return indicesMatch && adjacencyMatch; } } namespace Display { void Print(AggMIS::Types::IntVector_h& toPrint, int start, int end, std::string message) { printf("%s:\n", message.c_str()); printf("\n %8d: ", 0); for (int i = start; i < end; i++) { if ((i-start) % 10 == 0 && (i-start) > 0) printf("\n %8d: ", i); int value = toPrint[i]; printf(" %8d", value); } printf("\n"); } void Print(IntVector_d& toPrint, int start, int end, std::string message) { AggMIS::Types::IntVector_h temp(toPrint); Print(temp, start, end, message); temp.clear(); } void Print(AggMIS::Types::IntVector_d& toPrint, std::string message) { AggMIS::Types::IntVector_h temp(toPrint); Print(temp, 0, temp.size(), message); temp.clear(); } void Print(AggMIS::Types::IntVector_h& toPrint, std::string message) { Print(toPrint, 0, toPrint.size(), message); } void Print(std::vector<std::vector<std::vector<int> > >& toPrint, std::string message) { // Print out general info: printf("Triple vector %s has %d entries:\n", message.c_str(), toPrint.size()); for (int i = 0; i < toPrint.size(); i++) { std::cout << message << "[" << i << "]: "; for (int z = 0; z < toPrint[i].size(); z++) { std::cout << "("; for (int zz = 0; zz < toPrint[i][z].size(); zz++) { std::cout << toPrint[i][z][zz]; if (zz < toPrint[i][z].size() -1) std::cout << " "; } std::cout << ") "; } std::cout << "\n"; } std::cout << "\n"; } void Print(std::vector<std::vector<int> >& toPrint, std::string message) { printf("%s:\n", message.c_str()); for (int j = 0; j < toPrint.size(); j++) { printf("\n %4d: ", j); for (int i = 0; i < toPrint[j].size(); i++) { if (i % 10 == 0 && i > 0) printf("\n %4d: ", j); int value = toPrint[j][i]; printf(" %4d", value); } } printf("\n"); } void Print(std::vector<int>& toPrint, int start, int end, std::string message) { IntVector_h temp(toPrint.begin(), toPrint.end()); Print(temp, start, end, message); } void Print(std::vector<int>& toPrint, std::string message) { Print(toPrint, 0, toPrint.size(), message); } } } }
the_stack