text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include "Host/Algorithm.hpp" //xlib::upper_bound_left #include "Device/DataMovement/RegReordering.cuh" //xlib::shuffle_reordering #include "Device/Util/Basic.cuh" //xlib::sync #include "Device/Util/DeviceProperties.cuh" //xlib::WARP_SIZE #include "Host/Metaprogramming.hpp" //xlib::get_arity namespace xlib { template<unsigned ITEMS_PER_BLOCK, typename T> __global__ void binarySearchLBPartition(const T* __restrict__ d_prefixsum, int prefixsum_size, int* __restrict__ d_partitions, int num_partitions) { int id = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = id; i < num_partitions; i += stride) { T searched = static_cast<T>(i) * ITEMS_PER_BLOCK; d_partitions[i] = xlib::upper_bound_left(d_prefixsum, prefixsum_size, searched); } if (id == 0) d_partitions[num_partitions] = prefixsum_size - 2; } //============================================================================== //============================================================================== template<unsigned BLOCK_SIZE, bool LAST_BLOCK, unsigned ITEMS_PER_THREAD, typename T> __device__ __forceinline__ void blockBinarySearchLB(const T* __restrict__ d_prefixsum, int block_search_low, T* __restrict__ smem_prefix, int smem_size, int (&reg_pos)[ITEMS_PER_THREAD], T (&reg_offset)[ITEMS_PER_THREAD]) { T searched = block_search_low + static_cast<T>(threadIdx.x) * ITEMS_PER_THREAD; auto smem_tmp = smem_prefix + threadIdx.x; auto d_tmp = d_prefixsum + threadIdx.x; for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) { *smem_tmp = *d_tmp; smem_tmp += BLOCK_SIZE; d_tmp += BLOCK_SIZE; } // ALTERNATIVE 1 //for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) // smem_prefix[i] = d_prefixsum[i]; // ALTERNATIVE 2 /*auto smem_tmp = smem_prefix + threadIdx.x; #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { const int INDEX = i * BLOCK_SIZE; bool pred = INDEX + threadIdx.x < smem_size; smem_tmp[INDEX] = d_tmp[(pred) ? INDEX : smem_size - 1]; if (INDEX >= smem_size) break; }*/ xlib::sync<BLOCK_SIZE>(); int smem_pos = xlib::upper_bound_left(smem_prefix, smem_size, searched); T next = smem_prefix[smem_pos + 1]; T offset = searched - smem_prefix[smem_pos]; T limit = smem_prefix[smem_size - 1]; const int LOWEST = xlib::numeric_limits<int>::lowest; #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { reg_pos[i] = (!LAST_BLOCK || searched < limit) ? smem_pos : LOWEST; reg_offset[i] = offset; searched++; bool pred = (searched == next); offset = (pred) ? 0 : offset + 1; smem_pos = (pred) ? smem_pos + 1 : smem_pos; next = smem_prefix[smem_pos + 1]; } xlib::sync<BLOCK_SIZE>(); } //============================================================================== //============================================================================== template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, bool LAST_BLOCK, typename T, typename Lambda> __device__ __forceinline__ void binarySearchLB2(const int* __restrict__ d_partitions, int num_partitions, const T* __restrict__ d_prefixsum, int prefixsum_size, void* __restrict__ smem, const Lambda& lambda) { static_assert(xlib::get_arity<Lambda>() == 2, "binarySearchLB2 must have " "lambda expression with two arguments"); const unsigned ITEMS_PER_WARP = xlib::WARP_SIZE * ITEMS_PER_THREAD; const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD; int reg_pos [ITEMS_PER_THREAD]; T reg_offset[ITEMS_PER_THREAD]; int block_start_pos = d_partitions[ blockIdx.x ]; int block_end_pos = d_partitions[ blockIdx.x + 1 ]; int smem_size = block_end_pos - block_start_pos + 2; int block_search_low = blockIdx.x * ITEMS_PER_BLOCK; auto smem_prefix = static_cast<T*>(smem); blockBinarySearchLB<BLOCK_SIZE, LAST_BLOCK> (d_prefixsum + block_start_pos, block_search_low, smem_prefix, smem_size, reg_pos, reg_offset); xlib::smem_reordering(reg_pos, smem_prefix); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { reg_pos[i] += block_start_pos; assert(reg_pos[i] < prefixsum_size); } int id = blockIdx.x * BLOCK_SIZE + threadIdx.x; int index = (id / xlib::WARP_SIZE) * ITEMS_PER_WARP + xlib::lane_id(); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { if (!LAST_BLOCK || reg_pos[i] >= 0) { assert(reg_pos[i] < prefixsum_size); lambda(reg_pos[i], index + i * xlib::WARP_SIZE); } } } template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, bool LAST_BLOCK, typename T, typename Lambda> __device__ __forceinline__ void binarySearchLB3(const int* __restrict__ d_partitions, int num_partitions, const T* __restrict__ d_prefixsum, int prefixsum_size, void* __restrict__ smem, const Lambda& lambda) { static_assert(xlib::get_arity<Lambda>() == 3, "binarySearchLB3 must have " "lambda expression with three arguments"); const unsigned ITEMS_PER_WARP = xlib::WARP_SIZE * ITEMS_PER_THREAD; const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD; int reg_pos [ITEMS_PER_THREAD]; T reg_offset[ITEMS_PER_THREAD]; int block_start_pos = d_partitions[ blockIdx.x ]; int block_end_pos = d_partitions[ blockIdx.x + 1 ]; int smem_size = block_end_pos - block_start_pos + 2; int block_search_low = blockIdx.x * ITEMS_PER_BLOCK; auto smem_prefix = static_cast<T*>(smem); blockBinarySearchLB<BLOCK_SIZE, LAST_BLOCK> (d_prefixsum + block_start_pos, block_search_low, smem_prefix, smem_size, reg_pos, reg_offset); xlib::smem_reordering(reg_pos, reg_offset, smem_prefix); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { reg_pos[i] += block_start_pos; assert(reg_pos[i] < prefixsum_size); } int id = blockIdx.x * BLOCK_SIZE + threadIdx.x; int index = (id / xlib::WARP_SIZE) * ITEMS_PER_WARP + xlib::lane_id(); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { if (!LAST_BLOCK || reg_pos[i] >= 0) { assert(reg_pos[i] < prefixsum_size); lambda(reg_pos[i], reg_offset[i], index + i * xlib::WARP_SIZE); } } } //============================================================================== //============================================================================== /* template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, bool LAST_BLOCK = true, typename T> __device__ __forceinline__ void blockBinarySearchLB3(const T* __restrict__ d_prefixsum, int smem_size, int block_search_low, T* __restrict__ smem_prefix, int (&reg_pos)[ITEMS_PER_THREAD]) { T searched = block_search_low + static_cast<T>(threadIdx.x) * ITEMS_PER_THREAD; auto smem_tmp = smem_prefix + threadIdx.x; auto d_tmp = d_prefixsum + threadIdx.x; for (int i = threadIdx.x; i < smem_size; i += BLOCK_SIZE) { *smem_tmp = *d_tmp; smem_tmp += BLOCK_SIZE; d_tmp += BLOCK_SIZE; } xlib::sync<BLOCK_SIZE>(); int smem_pos = xlib::upper_bound_left(smem_prefix, smem_size, searched); T next = smem_prefix[smem_pos + 1]; T limit = smem_prefix[smem_size - 1]; #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { reg_pos[i] = (!LAST_BLOCK || searched < limit) ? smem_pos : smem_size; searched++; bool pred = (searched == next); smem_pos = (pred) ? smem_pos + 1 : smem_pos; next = smem_prefix[smem_pos + 1]; } xlib::sync<BLOCK_SIZE>(); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) smem_prefix[threadIdx.x * ITEMS_PER_THREAD + i] = reg_pos[i]; xlib::sync<BLOCK_SIZE>(); } template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, bool LAST_BLOCK = true, typename T, typename Lambda> __device__ __forceinline__ void binarySearchLB3(const int* __restrict__ d_partitions, int num_partitions, const T* __restrict__ d_prefixsum, int prefixsum_size, void* __restrict__ smem, const Lambda& lambda) { const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD; int reg_pos [ITEMS_PER_THREAD]; int reg_indices[ITEMS_PER_THREAD]; T reg_offset [ITEMS_PER_THREAD]; int block_start_pos = d_partitions[ blockIdx.x ]; int block_end_pos = d_partitions[ blockIdx.x + 1 ]; int smem_size = block_end_pos - block_start_pos + 2; int block_search_low = blockIdx.x * ITEMS_PER_BLOCK; auto smem_buffer = static_cast<T*>(smem);// + ITEMS_PER_BLOCK; blockBinarySearchLB3<BLOCK_SIZE, ITEMS_PER_THREAD, LAST_BLOCK> (d_prefixsum + block_start_pos, smem_size, block_search_low, smem_buffer, reg_pos); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { int index = threadIdx.x + i * BLOCK_SIZE; reg_pos[i] = smem_buffer[index]; reg_indices[i] = block_search_low + index; } #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) reg_pos[i] += block_start_pos; #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; i++) { if (!LAST_BLOCK || reg_pos[i] < block_start_pos + smem_size) lambda(reg_pos[i], reg_offset[i], reg_indices[i]); } }*/ } // namespace xlib
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include "utilits.h" #include "easywave.h" #include "cOgrd.h" #include "cOkadaEarthquake.h" // CUDA kernels #include "kernels.cuh" double diff(timespec start, timespec end) { timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return (double)((double)temp.tv_nsec / 1000000000.0 + (double)temp.tv_sec); } int commandLineHelp( void ); int main( int argc, char **argv ) { char buf[1024]; int ierr = 0; int argn; long int elapsed; int lastProgress,lastPropagation; int loop; // reading parameters from a file FILE *fp; char fileLabel[5]; unsigned short shval; int isBin,i,j,m,k; float fval; double dval; printf(HEADER); Err.setchannel(MSG_OUTFILE); // Read parameters from command line and use default struct EWPARAMS Par; // Bathymetry if( ( argn = utlCheckCommandLineOption( argc, argv, "grid", 4 ) ) != 0 ) { /* TODO: strdup not necessary here because all arguments in argv reside until program exit -> memory leak */ Par.fileBathymetry = strdup( argv[argn+1] ); } else return commandLineHelp(); // Source: Okada faults or Surfer grid if( ( argn = utlCheckCommandLineOption( argc, argv, "source", 6 ) ) != 0 ) { Par.fileSource = strdup( argv[argn+1] ); } else return commandLineHelp(); // Simulation time, [sec] if( ( argn = utlCheckCommandLineOption( argc, argv, "time", 4 ) ) != 0 ) { Par.timeMax = atoi( argv[argn+1] ); Par.timeMax *= 60; } else return commandLineHelp(); // Optional parameters or their default values // Model name if( ( argn = utlCheckCommandLineOption( argc, argv, "label", 3 ) ) != 0 ) { Par.modelName = strdup( argv[argn+1] ); } else Par.modelName = strdup( "eWave" ); // Deactivate logging if( ( argn = utlCheckCommandLineOption( argc, argv, "nolog", 5 ) ) != 0 ) ; else { Log.start( "easywave.log" ); Log.timestamp_disable(); } // Use Coriolis force //if( ( argn = utlCheckCommandLineOption( argc, argv, "coriolis", 3 ) ) != 0 ) // Par.coriolis = 1; //else Par.coriolis = 0; // Periodic dumping of mariograms and cumulative 2D-plots (wavemax, arrival times), [sec] if( ( argn = utlCheckCommandLineOption( argc, argv, "dump", 4 ) ) != 0 ) Par.outDump = atoi( argv[argn+1] ); else Par.outDump = 0; // Reporting simulation progress, [sec model time] if( ( argn = utlCheckCommandLineOption( argc, argv, "progress", 4 ) ) != 0 ) Par.outProgress = (int)(atof(argv[argn+1])*60); else Par.outProgress = 600; // 2D-wave propagation output, [sec model time] if( ( argn = utlCheckCommandLineOption( argc, argv, "propagation", 4 ) ) != 0 ) Par.outPropagation = (int)(atof(argv[argn+1])*60); else Par.outPropagation = 300; // minimal calculation depth, [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "min_depth", 9 ) ) != 0 ) Par.dmin = (float)atof(argv[argn+1]); else Par.dmin = 10.; // timestep, [sec] if( ( argn = utlCheckCommandLineOption( argc, argv, "step", 4 ) ) != 0 ) Par.dt = atoi(argv[argn+1]); else Par.dt = 0; // will be estimated automatically // Initial uplift: relative threshold if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh0_rel", 8 ) ) != 0 ) Par.ssh0ThresholdRel = (float)atof(argv[argn+1]); else Par.ssh0ThresholdRel = 0.01; // Initial uplift: absolute threshold, [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh0_abs", 8 ) ) != 0 ) Par.ssh0ThresholdAbs = (float)atof(argv[argn+1]); else Par.ssh0ThresholdAbs = 0.0; // Threshold for 2-D arrival time (0 - do not calculate), [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh_arrival", 9 ) ) != 0 ) Par.sshArrivalThreshold = (float)atof(argv[argn+1]); else Par.sshArrivalThreshold = 0.001; // Threshold for clipping of expanding computational area, [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh_clip", 8 ) ) != 0 ) Par.sshClipThreshold = (float)atof(argv[argn+1]); else Par.sshClipThreshold = 1.e-4; // Threshold for resetting the small ssh (keep expanding area from unnesessary growing), [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh_zero", 8 ) ) != 0 ) Par.sshZeroThreshold = (float)atof(argv[argn+1]); else Par.sshZeroThreshold = 1.e-5; // Threshold for transparency (for png-output), [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "ssh_transparency", 8 ) ) != 0 ) Par.sshTransparencyThreshold = (float)atof(argv[argn+1]); else Par.sshTransparencyThreshold = 0.0; // Points Of Interest (POIs) input file if( ( argn = utlCheckCommandLineOption( argc, argv, "poi", 3 ) ) != 0 ) { Par.filePOIs = strdup( argv[argn+1] ); } else Par.filePOIs = NULL; // POI fitting: max search distance, [km] if( ( argn = utlCheckCommandLineOption( argc, argv, "poi_search_dist", 15 ) ) != 0 ) Par.poiDistMax = (float)atof(argv[argn+1]); else Par.poiDistMax = 10.0; Par.poiDistMax *= 1000.; // POI fitting: min depth, [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "poi_min_depth", 13 ) ) != 0 ) Par.poiDepthMin = (float)atof(argv[argn+1]); else Par.poiDepthMin = 1.0; // POI fitting: max depth, [m] if( ( argn = utlCheckCommandLineOption( argc, argv, "poi_max_depth", 13 ) ) != 0 ) Par.poiDepthMax = (float)atof(argv[argn+1]); else Par.poiDepthMax = 10000.0; // report of POI loading if( ( argn = utlCheckCommandLineOption( argc, argv, "poi_report", 7 ) ) != 0 ) Par.poiReport = 1; else Par.poiReport = 0; // POI output interval, [sec] if( ( argn = utlCheckCommandLineOption( argc, argv, "poi_dt_out", 10 ) ) != 0 ) Par.poiDt = atoi(argv[argn+1]); else Par.poiDt = 30; if( ( argn = utlCheckCommandLineOption( argc, argv, "gpu", 3 ) ) != 0 ) Par.gpu = true; else Par.gpu = false; if( ( argn = utlCheckCommandLineOption( argc, argv, "adjust_ztop", 11 ) ) != 0 ) Par.adjustZtop = true; else Par.adjustZtop = false; if( ( argn = utlCheckCommandLineOption( argc, argv, "verbose", 7 ) ) != 0 ) Par.verbose = true; else Par.verbose = false; // Log command line sprintf( buf, "Command line: " ); for( argn=1; argn<argc; argn++ ) { strcat( buf, " " ); strcat( buf, argv[argn] ); } Log.print( "%s", buf ); Log.print( "Loading bathymetry from %s", Par.fileBathymetry ); // check if bathymetry file is in ascii or binary format if( (fp=fopen(Par.fileBathymetry,"rb")) == NULL ) return Err.post( Err.msgOpenFile(Par.fileBathymetry) ); memset( fileLabel, 0, 5 ); ierr = fread( fileLabel, 4, 1, fp ); if( !strcmp( fileLabel,"DSAA" ) ) isBin = 0; else if( !strcmp( fileLabel,"DSBB" ) ) isBin = 1; else return Err.post( "%s: not GRD-file!", Par.fileBathymetry ); fclose(fp); // set the values of NLon and NLat int NLon, NLat; double LonMin, LatMin; double LonMax, LatMax; double DLon, DLat; double Dx, Dy; if( isBin ) { fp = fopen( Par.fileBathymetry, "rb" ); ierr = fread( fileLabel, 4, 1, fp ); ierr = fread( &shval, sizeof(unsigned short), 1, fp ); NLon = shval; ierr = fread( &shval, sizeof(unsigned short), 1, fp ); NLat = shval; } else { fp = fopen( Par.fileBathymetry, "rt" ); ierr = fscanf( fp, "%s", fileLabel ); ierr = fscanf( fp, " %d %d ", &NLon, &NLat ); } // set the values of min/max Lon and Lat if( isBin ) { ierr = fread( &LonMin, sizeof(double), 1, fp ); ierr = fread( &LonMax, sizeof(double), 1, fp ); ierr = fread( &LatMin, sizeof(double), 1, fp ); ierr = fread( &LatMax, sizeof(double), 1, fp ); ierr = fread( &dval, sizeof(double), 1, fp ); ierr = fread( &dval, sizeof(double), 1, fp ); // zmin zmax } else { ierr = fscanf( fp, " %lf %lf ", &LonMin, &LonMax ); ierr = fscanf( fp, " %lf %lf ", &LatMin, &LatMax ); ierr = fscanf( fp, " %*s %*s " ); // zmin, zmax } DLon = (LonMax - LonMin)/(NLon - 1); // in degrees DLat = (LatMax - LatMin)/(NLat - 1); Dx = Re * g2r( DLon ); // in m along the equator Dy = Re * g2r( DLat ); // allocate memory for GRIDNODE structure and for caching arrays float* node = (float*) malloc(sizeof(float)*NLon*NLat*MAX_VARS_PER_NODE); if (node == NULL) return Err.post( Err.msgAllocateMem() ); float* R6 = (float*) malloc( sizeof(float) * (NLat+1) ); if (R6 == NULL) return Err.post( Err.msgAllocateMem() ); float* C1 = (float*) malloc( sizeof(float) * (NLon+1) ); if (C1 == NULL) return Err.post( Err.msgAllocateMem() ); float* C3 = (float*) malloc( sizeof(float) * (NLon+1) ); if (C3 == NULL) return Err.post( Err.msgAllocateMem() ); float* C2 = (float*) malloc( sizeof(float) * (NLat+1) ); if (C2 == NULL) return Err.post( Err.msgAllocateMem() ); float* C4 = (float*) malloc( sizeof(float) * (NLat+1) ); if (C4 == NULL) return Err.post( Err.msgAllocateMem() ); if( isBin ) { /* NOTE: optimal would be reading everything in one step, but that does not work because rows and columns are transposed * (only possible with binary data at all) - use temporary buffer for now (consumes additional memory!) */ float *buf = new float[ NLat*NLon ]; ierr = fread( buf, sizeof(float), NLat*NLon, fp ); for( i=1; i<=NLon; i++ ) { for( j=1; j<=NLat; j++ ) { m = idx(j,i); if( isBin ) fval = buf[ (j-1) * NLon + (i-1) ]; //ierr = fread( &fval, sizeof(float), 1, fp ); Node(m, iTopo) = fval; Node(m, iTime) = -1; Node(m, iD) = -fval; if( Node(m, iD) < 0 ) { Node(m, iD) = 0.0f; } else if( Node(m, iD) < Par.dmin ) { Node(m, iD) = Par.dmin; } } } delete[] buf; } else { for( j=1; j<=NLat; j++ ) { for( i=1; i<=NLon; i++ ) { m = idx(j,i); ierr = fscanf( fp, " %f ", &fval ); Node(m, iTopo) = fval; Node(m, iTime) = -1; Node(m, iD) = -fval; if( Node(m, iD) < 0 ) { Node(m, iD) = 0.0f; } else if( Node(m, iD) < Par.dmin ) { Node(m, iD) = Par.dmin; } } } } for( k=1; k<MAX_VARS_PER_NODE-2; k++ ) { for( int i=1; i<=NLon; i++ ) { for( int j=1; j<=NLat; j++ ) { Node(idx(j,i), k) = 0; } } } fclose( fp ); if( !Par.dt ) { // time step not explicitly defined // Make bathymetry from topography. Compute stable time step. double dtLoc=RealMax; for( i=1; i<=NLon; i++ ) { for( j=1; j<=NLat; j++ ) { m = idx(j,i); if( Node(m, iD) == 0.0f ) continue; dtLoc = My_min( dtLoc, 0.8 * (Dx*cosdeg(getLat(j))) / sqrt(Gravity*Node(m, iD)) ); } } if( dtLoc > 15 ) Par.dt = 15; else if( dtLoc > 10 ) Par.dt = 10; else if( dtLoc > 5 ) Par.dt = 5; else if( dtLoc > 2 ) Par.dt = 2; else if( dtLoc > 1 ) Par.dt = 1; else return Err.post("Bathymetry requires too small time step (<1sec)"); Log.print("Stable CFL time step: %g sec", dtLoc); } // Correct bathymetry for edge artefacts for( i=1; i<=NLon; i++ ) { if( Node(idx(1,i), iD) != 0 && Node(idx(2,i), iD) == 0 ) Node(idx(1,i), iD) = 0.; if( Node(idx(NLat,i), iD) != 0 && Node(idx(NLat-1,i), iD) == 0 ) Node(idx(NLat,i), iD) = 0.; } for( j=1; j<=NLat; j++ ) { if( Node(idx(j,1), iD) != 0 && Node(idx(j,2), iD) == 0 ) Node(idx(j,1), iD) = 0.; if( Node(idx(j,NLon), iD) != 0 && Node(idx(j,NLon-1), iD) == 0 ) Node(idx(j,NLon), iD) = 0.; } // Calculate caching grid parameters for speedup for( j=1; j<=NLat; j++ ) { R6[j] = cosdeg( LatMin + (j-0.5)*DLat ); } for( i=1; i<=NLon; i++ ) { for( j=1; j<=NLat; j++ ) { m = idx(j,i); if( Node(m, iD) == 0 ) continue; Node(m, iR1) = Par.dt/Dy/R6[j]; if( i != NLon ) { if( Node(m+NLat, iD) != 0 ) { Node(m, iR2) = 0.5*Gravity*Par.dt/Dy/R6[j]*(Node(m, iD)+Node(m+NLat, iD)); Node(m, iR3) = 0.5*Par.dt*Omega*sindeg( LatMin + (j-0.5)*DLat ); } } else { Node(m, iR2) = 0.5*Gravity*Par.dt/Dy/R6[j]*Node(m, iD)*2; Node(m, iR3) = 0.5*Par.dt*Omega*sindeg( LatMin + (j-0.5)*DLat ); } if( j != NLat ) { if( Node(m+1, iD) != 0 ) { Node(m, iR4) = 0.5*Gravity*Par.dt/Dy*(Node(m, iD)+Node(m+1, iD)); Node(m, iR5) = 0.5*Par.dt*Omega*sindeg( LatMin + j*DLat ); } } /* FIXME: Bug? */ else { Node(m, iR2) = 0.5*Gravity*Par.dt/Dy*Node(m, iD)*2; Node(m, iR3) = 0.5*Par.dt*Omega*sindeg( LatMin + j*DLat ); } } } for( i=1; i<=NLon; i++ ) { C1[i] = 0; if( Node(idx(1,i), iD) != 0 ) C1[i] = 1./sqrt(Gravity*Node(idx(1,i), iD)); C3[i] = 0; if( Node(idx(NLat,i), iD) != 0 ) C3[i] = 1./sqrt(Gravity*Node(idx(NLat,i), iD)); } for( j=1; j<=NLat; j++ ) { C2[j] = 0; if( Node(idx(j,1), iD) != 0 ) C2[j] = 1./sqrt(Gravity*Node(idx(j,1), iD)); C4[j] = 0; if( Node(idx(j,NLon), iD) != 0 ) C4[j] = 1./sqrt(Gravity*Node(idx(j,NLon), iD)); } int NPOIs = 0; // read first record and get idea about the input type char record[256], /*buf[256],*/id[64]; FILE *fpAcc,*fpRej; int i0,j0,imin,imax,jmin,jmax,flag,it,n; int rad,nmin; double d2,d2min,lenLon,lenLat,depth; //double POIdistMax,POIdepthMin,POIdepthMax; double lon, lat; char **idPOI; long* idxPOI; int* flagRunupPOI; float **sshPOI; int* timePOI = NULL; int NtPOI; // Read points of interest if( Par.filePOIs != NULL ) { Log.print("Loading POIs from %s", Par.filePOIs ); int MaxPOIs = utlGetNumberOfRecords( Par.filePOIs ); if( !MaxPOIs ) return Err.post( "Empty POIs file" ); idPOI = new char*[MaxPOIs]; if( !idPOI ) return Err.post( Err.msgAllocateMem() ); idxPOI = new long[MaxPOIs]; if( !idxPOI ) return Err.post( Err.msgAllocateMem() ); flagRunupPOI = new int[MaxPOIs]; if( !flagRunupPOI ) return Err.post( Err.msgAllocateMem() ); sshPOI = new float*[MaxPOIs]; if( !sshPOI ) return Err.post( Err.msgAllocateMem() ); fp = fopen( Par.filePOIs, "rt" ); int line = 0; utlReadNextRecord( fp, record, &line ); int itype = sscanf( record, "%s %s %s", buf, buf, buf ); fclose( fp ); if( itype == 2 ) { // poi-name and grid-index fp = fopen( Par.filePOIs, "rt" ); line = NPOIs = 0; while( utlReadNextRecord( fp, record, &line ) != EOF ) { i = sscanf( record, "%s %d", id, &nmin ); if( i != 2 ) { Log.print( "! Bad POI record: %s", record ); continue; } idPOI[NPOIs] = strdup(id); idxPOI[NPOIs] = nmin; flagRunupPOI[NPOIs] = 1; NPOIs++; } fclose( fp ); Log.print( "%d POIs of %d loaded successfully; %d POIs rejected", NPOIs, MaxPOIs, (MaxPOIs-NPOIs) ); } else if( itype == 3 ) { // poi-name and coordinates if( Par.poiReport ) { fpAcc = fopen( "poi_accepted.lst", "wt" ); fprintf( fpAcc, "ID lon lat lonIJ latIJ depthIJ dist[km]\n" ); fpRej = fopen( "poi_rejected.lst", "wt" ); } lenLat = My_PI*Re/180; fp = fopen( Par.filePOIs, "rt" ); line = NPOIs = 0; while( utlReadNextRecord( fp, record, &line ) != EOF ) { i = sscanf( record, "%s %lf %lf %d", id, &lon, &lat, &flag ); if( i == 3 ) flag = 1; else if( i == 4 ) ; else { Log.print( "! Bad POI record: %s", record ); if( Par.poiReport ) fprintf( fpRej, "%s\n", record ); continue; } // find the closest water grid node. Local distances could be // treated as cartesian (2 min cell distortion at 60 degrees is only about 2 meters or 0.2%) i0 = (int)((lon - LonMin)/DLon) + 1; j0 = (int)((lat - LatMin)/DLat) + 1; if( i0<1 || i0>NLon || j0<1 || j0>NLat ) { Log.print( "!POI out of grid: %s", record ); if( Par.poiReport ) fprintf( fpRej, "%s\n", record ); continue; } lenLon = lenLat * R6[j0]; for( nmin=-1,rad=0; rad<NLon && rad<NLat; rad++ ) { d2min = RealMax; imin = i0-rad; if( imin < 1 ) imin = 1; imax = i0+rad+1; if( imax > NLon ) imax = NLon; jmin = j0-rad; if( jmin < 1 ) jmin = 1; jmax = j0+rad+1; if( jmax > NLat ) jmax = NLat; for( i=imin; i<=imax; i++ ) for( j=jmin; j<=jmax; j++ ) { if( i != imin && i != imax && j != jmin && j != jmax ) continue; n = idx(j,i); depth = Node(n, iD); if( depth < Par.poiDepthMin || depth > Par.poiDepthMax ) continue; d2 = pow( lenLon*(lon-getLon(i)), 2. ) + pow( lenLat*(lat-getLat(j)), 2. ); if( d2 < d2min ) { d2min = d2; nmin = n; } } if( nmin > 0 ) break; } if( sqrt(d2min) > Par.poiDistMax ) { Log.print( "! Closest water node too far: %s", record ); if( Par.poiReport ) fprintf( fpRej, "%s\n", record ); continue; } idPOI[NPOIs] = strdup(id); idxPOI[NPOIs] = nmin; flagRunupPOI[NPOIs] = flag; NPOIs++; i = nmin/NLat + 1; j = nmin - (i-1)*NLat + 1; if( Par.poiReport ) fprintf( fpAcc, "%s %.4f %.4f %.4f %.4f %.1f %.3f\n", id, lon, lat, getLon(i), getLat(j), Node(nmin, iD), sqrt(d2min)/1000 ); } fclose( fp ); Log.print( "%d POIs of %d loaded successfully; %d POIs rejected", NPOIs, MaxPOIs, (MaxPOIs-NPOIs) ); if( Par.poiReport ) { fclose( fpAcc ); fclose( fpRej ); } } // if mareograms if( Par.poiDt ) { NtPOI = Par.timeMax/Par.poiDt + 1; timePOI = new int[NtPOI]; for( it=0; it<NtPOI; it++ ) timePOI[it] = -1; for( n=0; n<NPOIs; n++ ) { sshPOI[n] = new float[NtPOI]; for( it=0; it<NtPOI; it++ ) sshPOI[n][it] = 0.; } } } // Init tsunami with faults or uplift-grid //ierr = ewSource(); if(ierr) return ierr; char dsaa_label[8]; int srcType; double dz,absuzmax,absuzmin; cOkadaEarthquake eq; cOgrd uZ; // check input file type: GRD or fault if( (fp = fopen( Par.fileSource, "rb" )) == NULL ) return Err.post( Err.msgOpenFile(Par.fileSource) ); memset( dsaa_label, 0, 5 ); ierr = fread( dsaa_label, 4, 1, fp ); if( !strcmp( dsaa_label,"DSAA" ) || !strcmp( dsaa_label,"DSBB" ) ) srcType = 1; else srcType = 2; fclose(fp); // load GRD file if( srcType == 1) { ierr = uZ.readGRD( Par.fileSource ); if(ierr) return ierr; } // read fault(s) from file if( srcType == 2) { int effSymSource = 0; //long l; double dist,energy,factLat,effRad,effMax; ierr = eq.read( Par.fileSource ); if(ierr) return ierr; if( Par.adjustZtop ) { // check fault parameters Err.disable(); ierr = eq.finalizeInput(); while( ierr ) { i = ierr/10; ierr = ierr - 10*i; if( ierr == FLT_ERR_STRIKE ) { Log.print( "No strike on input: Employing effective symmetric source model" ); if( eq.nfault > 1 ) { Err.enable(); return Err.post("Symmetric source assumes only 1 fault"); } eq.fault[0].strike = 0.; effSymSource = 1; } else if( ierr == FLT_ERR_ZTOP ) { Log.print( "Automatic depth correction to fault top @ 10 km" ); eq.fault[i].depth = eq.fault[i].width/2 * sindeg(eq.fault[i].dip) + 10.e3; } else { Err.enable(); return ierr; } ierr = eq.finalizeInput(); } Err.enable(); } else { // check fault parameters Err.disable(); ierr = eq.finalizeInput(); if( ierr ) { i = ierr/10; ierr = ierr - 10*i; if( ierr != FLT_ERR_STRIKE ) { Err.enable(); ierr = eq.finalizeInput(); return ierr; } Log.print( "No strike on input: Employing effective symmetric source model" ); Err.enable(); if( eq.nfault > 1 ) return Err.post("symmetric source assumes only 1 fault"); eq.fault[0].strike = 0.; effSymSource = 1; ierr = eq.finalizeInput(); if(ierr) return ierr; } Err.enable(); } // calculate uplift on a rectangular grid // set grid resolution, grid dimensions will be set automatically uZ.dx = DLon; uZ.dy = DLat; ierr = eq.calculate( uZ ); if(ierr) return ierr; if( effSymSource ) { // integrate for tsunami energy energy = 0.; for( j=0; j<uZ.ny; j++ ) { factLat = Dx*cosdeg(uZ.getY(0,j))*Dy; for( i=0; i<uZ.nx; i++ ) energy += pow(uZ(i,j),2.)*factLat; } energy *= (1000*9.81/2); effRad = eq.fault[0].length/sqrt(2*M_PI); effMax = 1./effRad / sqrt(M_PI/2) / sqrt(1000*9.81/2) * sqrt(energy); Log.print( "Effective source radius: %g km, max height: %g m", effRad/1000, effMax ); // transfer uplift onto tsunami grid and define deformed area for acceleration for( i=0; i<uZ.nx; i++ ) { for( j=0; j<uZ.ny; j++ ) { dist = GeoDistOnSphere( uZ.getX(i,j),uZ.getY(i,j), eq.fault[0].lon,eq.fault[0].lat ) * 1000; if( dist < effRad ) uZ(i,j) = effMax*cos(M_PI/2*dist/effRad); else uZ(i,j) = 0.; } } } // effective source } // src_type == fault // remove noise in the source absuzmax = uZ.getMaxAbsVal(); if( (Par.ssh0ThresholdRel + Par.ssh0ThresholdAbs) != 0 ) { absuzmin = RealMax; if( Par.ssh0ThresholdRel != 0 ) absuzmin = Par.ssh0ThresholdRel*absuzmax; if( Par.ssh0ThresholdAbs != 0 && Par.ssh0ThresholdAbs < absuzmin ) absuzmin = Par.ssh0ThresholdAbs; for( i=0; i<uZ.nx; i++ ) { for( j=0; j<uZ.ny; j++ ) { if( fabs(uZ(i,j)) < absuzmin ) uZ(i,j) = 0; } } } // calculated (if needed) arrival threshold (negative value means it is relative) if( Par.sshArrivalThreshold < 0 ) Par.sshArrivalThreshold = absuzmax * fabs(Par.sshArrivalThreshold); // transfer uplift onto tsunami grid and define deformed area for acceleration // set initial min and max values int Imin = NLon; int Imax = 1; int Jmin = NLat; int Jmax = 1; /* FIXME: change loops */ for( i=1; i<=NLon; i++ ) { for( j=1; j<=NLat; j++ ) { lon = getLon(i); lat = getLat(j); if( Node(idx(j,i), iD) != 0. ) dz = Node(idx(j,i), iH) = uZ.getVal( lon,lat ); else dz = Node(idx(j,i), iH) = 0.; if( fabs(dz) > Par.sshClipThreshold ) { Imin = My_min( Imin, i ); Imax = My_max( Imax, i ); Jmin = My_min( Jmin, j ); Jmax = My_max( Jmax, j ); } } } if( Imin == NLon ) return Err.post( "Zero initial displacement" ); Imin = My_max( Imin - 2, 2 ); Imax = My_min( Imax + 2, NLon-1 ); Jmin = My_max( Jmin - 2, 2 ); Jmax = My_min( Jmax + 2, NLat-1 ); Log.print( "Read source from %s", Par.fileSource ); // Write model parameters into the log Log.print("\nModel parameters for this simulation:"); Log.print("timestep: %d sec", Par.dt); Log.print("max time: %g min", (float)Par.timeMax/60); Log.print("poi_dt_out: %d sec", Par.poiDt); Log.print("poi_report: %s", (Par.poiReport ? "yes" : "no") ); Log.print("poi_search_dist: %g km", Par.poiDistMax/1000.); Log.print("poi_min_depth: %g m", Par.poiDepthMin); Log.print("poi_max_depth: %g m", Par.poiDepthMax); //Log.print("coriolis: %s", (Par.coriolis ? "yes" : "no") ); Log.print("min_depth: %g m", Par.dmin); Log.print("ssh0_rel: %g", Par.ssh0ThresholdRel); Log.print("ssh0_abs: %g m", Par.ssh0ThresholdAbs); Log.print("ssh_arrival: %g m", Par.sshArrivalThreshold); Log.print("ssh_clip: %g m", Par.sshClipThreshold); Log.print("ssh_zero: %g m", Par.sshZeroThreshold); Log.print("ssh_transparency: %g m\n", Par.sshTransparencyThreshold); int Nrec2DOutput; char* IndexFile; if( Par.outPropagation ) { // start index file sprintf( buf, "%s.2D.idx", Par.modelName ); IndexFile = strdup(buf); fp = fopen( IndexFile, "wt" ); fprintf( fp, "%g %g %d %g %g %d\n", LonMin, LonMax, NLon, LatMin, LatMax, NLat ); fclose( fp ); Nrec2DOutput = 0; } short nOutI; short nOutJ; double lonOutMin; double lonOutMax; double latOutMin; double latOutMax; double dtmp; float ftmp; Log.print("Starting main loop..."); timespec start, inter, end; clock_gettime(CLOCK_MONOTONIC, &start); float* d_node; hipMalloc((void**)&d_node, sizeof(float)*NLat*NLon*MAX_VARS_PER_NODE); hipMemcpyAsync(d_node, node, sizeof(float)*NLat*NLon*MAX_VARS_PER_NODE, hipMemcpyHostToDevice, 0); float* d_R6; hipMalloc((void**)&d_R6, sizeof(float)*(NLat+1)); hipMemcpyAsync(d_R6, R6, sizeof(float)*(NLat+1), hipMemcpyHostToDevice, 0); float* d_C2; hipMalloc((void**)&d_C2, sizeof(float)*(NLat+1)); hipMemcpyAsync(d_C2, C2, sizeof(float)*(NLat+1), hipMemcpyHostToDevice, 0); float* d_C4; hipMalloc((void**)&d_C4, sizeof(float)*(NLat+1)); hipMemcpyAsync(d_C4, C4, sizeof(float)*(NLat+1), hipMemcpyHostToDevice, 0); float* d_C1; hipMalloc((void**)&d_C1, sizeof(float)*(NLon+1)); hipMemcpyAsync(d_C1, C1, sizeof(float)*(NLon+1), hipMemcpyHostToDevice, 0); float* d_C3; hipMalloc((void**)&d_C3, sizeof(float)*(NLon+1)); hipMemcpyAsync(d_C3, C3, sizeof(float)*(NLon+1), hipMemcpyHostToDevice, 0); int* d_Imin; hipMalloc((void**)&d_Imin, sizeof(int)); int* d_Imax; hipMalloc((void**)&d_Imax, sizeof(int)); int* d_Jmin; hipMalloc((void**)&d_Jmin, sizeof(int)); int* d_Jmax; hipMalloc((void**)&d_Jmax, sizeof(int)); for( Par.time=0,loop=1,lastProgress=Par.outProgress,lastPropagation=Par.outPropagation; Par.time<=Par.timeMax; loop++,Par.time+=Par.dt,lastProgress+=Par.dt,lastPropagation+=Par.dt ) { /* FIXME: check if Par.poiDt can be used for those purposes */ if( Par.filePOIs && Par.poiDt && ((Par.time/Par.poiDt)*Par.poiDt == Par.time) ) { // SavePOIs hipMemcpy(node, d_node, sizeof(float)*NLat*NLon*MAX_VARS_PER_NODE, hipMemcpyDeviceToHost); it = Par.time / Par.poiDt; timePOI[it] = Par.time; for( n=0; n<NPOIs; n++ ) { float ampFactor = 1.; if( flagRunupPOI[n] ) ampFactor = pow( Node(idxPOI[n], iD), 0.25 ); sshPOI[n][it] = ampFactor * Node(idxPOI[n], iH); } } // sea floor topography (mass conservation) dim3 grids((Jmax-Jmin+16)/16, (Imax-Imin+16)/16); dim3 threads(16, 16); hipLaunchKernelGGL(kernel, grids, threads, 0, 0, d_node, d_R6, Imin, Jmin, Imax, Jmax, NLat, Par.sshZeroThreshold, Par.sshArrivalThreshold, Par.time); hipLaunchKernelGGL(kernel2, dim3(1), dim3(1), 0, 0, d_node, d_C1, d_C2, d_C3, d_C4, Imin, Jmin, Imax, Jmax, NLat, NLon); hipLaunchKernelGGL(kernel3, grids, threads, 0, 0, d_node, d_R6, Imin, Jmin, Imax, Jmax, NLat); hipMemcpy(d_Imin, &Imin, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_Imax, &Imax, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_Jmin, &Jmin, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_Jmax, &Jmax, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(kernel4, dim3(1), dim3(1), 0, 0, d_node, d_C1, d_C2, d_C3, d_C4, d_Imin, d_Jmin, d_Imax, d_Jmax, NLat, NLon, Par.sshClipThreshold); hipMemcpy(&Imin, d_Imin, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&Imax, d_Imax, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&Jmin, d_Jmin, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&Jmax, d_Jmax, sizeof(int), hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &inter); elapsed = diff(start, inter) * 1000; if( Par.outProgress ) { if( lastProgress >= Par.outProgress ) { printf( "Model time = %s, elapsed: %ld msec\n", utlTimeSplitString(Par.time), elapsed ); Log.print( "Model time = %s, elapsed: %ld msec", utlTimeSplitString(Par.time), elapsed ); lastProgress = 0; } } fflush(stdout); if( Par.outPropagation ) { if( lastPropagation >= Par.outPropagation ) { Nrec2DOutput++; fp = fopen( IndexFile, "at" ); fprintf( fp, "%3.3d %s %d %d %d %d\n", Nrec2DOutput, utlTimeSplitString(Par.time), Imin, Imax, Jmin, Jmax ); fclose( fp ); lastPropagation = 0; } } } // main loop hipMemcpy(node, d_node, sizeof(float)*NLat*NLon*MAX_VARS_PER_NODE, hipMemcpyDeviceToHost); hipFree(d_node); hipFree(d_C1); hipFree(d_C2); hipFree(d_C3); hipFree(d_C4); hipFree(d_R6); hipFree(d_Imin); hipFree(d_Imax); hipFree(d_Jmin); hipFree(d_Jmax); clock_gettime(CLOCK_MONOTONIC, &end); Log.print("Finishing main loop"); // Final output Log.print("Final dump..."); if (NPOIs != 0) { // Dump POIs if( Par.poiDt ) { // Time series sprintf( buf, "%s.poi.ssh", Par.modelName ); fp = fopen( buf, "wt" ); fprintf( fp, "Minute" ); for( n=0; n<NPOIs; n++ ) fprintf( fp, " %s", idPOI[n] ); fprintf( fp, "\n" ); for( it=0; (timePOI[it] != -1 && it < NtPOI); it++ ) { fprintf( fp, "%6.2f", (double)timePOI[it]/60 ); for( n=0; n<NPOIs; n++ ) fprintf( fp, " %7.3f", sshPOI[n][it] ); fprintf( fp, "\n" ); } fclose( fp ); } // EAT EWH sprintf( buf, "%s.poi.summary", Par.modelName ); fp = fopen( buf, "wt" ); fprintf( fp, "ID ETA EWH\n" ); for( n=0; n<NPOIs; n++ ) { fprintf( fp, "%s", idPOI[n] ); float dbuf = Node(idxPOI[n], iTime)/60; if( dbuf < 0. ) dbuf = -1.; fprintf( fp, " %6.2f", dbuf ); float ampFactor = 1.; if( flagRunupPOI[n] ) ampFactor = pow( Node(idxPOI[n], iD), 0.25 ); fprintf( fp, " %6.3f\n", (ampFactor * Node(idxPOI[n], iHmax)) ); } fclose( fp ); } //ewDump2D(); nOutI = Imax-Imin+1; lonOutMin = getLon(Imin); lonOutMax = getLon(Imax); nOutJ = Jmax-Jmin+1; latOutMin = getLat(Jmin); latOutMax = getLat(Jmax); // write ssh max sprintf( record, "%s.2D.sshmax", Par.modelName ); fp = fopen( record, "wb" ); fwrite( "DSBB", 4, 1, fp ); fwrite( &nOutI, sizeof(short), 1, fp ); fwrite( &nOutJ, sizeof(short), 1, fp ); fwrite( &lonOutMin, sizeof(double), 1, fp ); fwrite( &lonOutMax, sizeof(double), 1, fp ); fwrite( &latOutMin, sizeof(double), 1, fp ); fwrite( &latOutMax, sizeof(double), 1, fp ); dtmp = 0.; fwrite( &dtmp, sizeof(double), 1, fp ); dtmp = 1.; fwrite( &dtmp, sizeof(double), 1, fp ); for( j=Jmin; j<=Jmax; j++ ) { for( i=Imin; i<=Imax; i++ ) { ftmp = (float)Node(idx(j,i), iHmax); fwrite( &ftmp, sizeof(float), 1, fp ); } } fclose( fp ); // write arrival times sprintf( record, "%s.2D.time", Par.modelName ); fp = fopen( record, "wb" ); fwrite( "DSBB", 4, 1, fp ); fwrite( &nOutI, sizeof(short), 1, fp ); fwrite( &nOutJ, sizeof(short), 1, fp ); fwrite( &lonOutMin, sizeof(double), 1, fp ); fwrite( &lonOutMax, sizeof(double), 1, fp ); fwrite( &latOutMin, sizeof(double), 1, fp ); fwrite( &latOutMax, sizeof(double), 1, fp ); dtmp = 0.; fwrite( &dtmp, sizeof(double), 1, fp ); dtmp = 1.; fwrite( &dtmp, sizeof(double), 1, fp ); for( j=Jmin; j<=Jmax; j++ ) { for( i=Imin; i<=Imax; i++ ) { ftmp = (float)Node(idx(j,i), iTime) / 60; // -1/60 //printf("%f\n", ftmp); fwrite( &ftmp, sizeof(float), 1, fp ); } } fclose( fp ); free( node ); free( R6 ); free( C1 ); free( C2 ); free( C3 ); free( C4 ); printf_v("Runtime: %.3lf\n", diff(start, end) * 1000.0); return 0; } //======================================================================== int commandLineHelp( void ) { printf( "Usage: easywave -grid ... -source ... -time ... [optional parameters]\n" ); printf( "-grid ... bathymetry in GoldenSoftware(C) GRD format (text or binary)\n" ); printf( "-source ... input wave either als GRD-file or file with Okada faults\n" ); printf( "-time ... simulation time in [min]\n" ); printf( "Optional parameters:\n" ); printf( "-step ... simulation time step, default- estimated from bathymetry\n" ); printf( "-coriolis use Coriolis force, default- no\n" ); printf( "-poi ... POIs file\n" ); printf( "-label ... model name, default- 'eWave'\n" ); printf( "-progress ... show simulation progress each ... minutes, default- 10\n" ); printf( "-propagation ... write wave propagation grid each ... minutes, default- 5\n" ); printf( "-dump ... make solution dump each ... physical seconds, default- 0\n" ); printf( "-nolog deactivate logging\n" ); printf( "-poi_dt_out ... output time step for mariograms in [sec], default- 30\n" ); printf( "-poi_search_dist ... in [km], default- 10\n" ); printf( "-poi_min_depth ... in [m], default- 1\n" ); printf( "-poi_max_depth ... in [m], default- 10 000\n" ); printf( "-poi_report enable POIs loading report, default- disabled\n" ); printf( "-ssh0_rel ... relative threshold for initial wave, default- 0.01\n" ); printf( "-ssh0_abs ... absolute threshold for initial wave in [m], default- 0\n" ); printf( "-ssh_arrival ... threshold for arrival times in [m], default- 0.001\n" ); printf( " negative value considered as relative threshold\n" ); printf( "-gpu start GPU version of EasyWave (requires a CUDA capable device)\n" ); printf( "-verbose generate verbose output on stdout\n" ); printf( "\nExample:\n" ); printf( "\t easyWave -grid gebcoIndonesia.grd -source fault.inp -time 120\n\n" ); return -1; }
the_stack
#include <cuco/detail/pair.cuh> namespace cuco { namespace detail { namespace cg = cooperative_groups; /** * @brief Initializes each slot in the flat `slots` storage to contain `k` and `v`. * * Each space in `slots` that can hold a key value pair is initialized to a * `pair_atomic_type` containing the key `k` and the value `v`. * * @tparam atomic_key_type Type of the `Key` atomic container * @tparam atomic_mapped_type Type of the `Value` atomic container * @tparam Key key type * @tparam Value value type * @tparam pair_atomic_type key/value pair type * @param slots Pointer to flat storage for the map's key/value pairs * @param k Key to which all keys in `slots` are initialized * @param v Value to which all values in `slots` are initialized * @param size Size of the storage pointed to by `slots` */ template <typename atomic_key_type, typename atomic_mapped_type, typename Key, typename Value, typename pair_atomic_type> __global__ void initialize(pair_atomic_type* const slots, Key k, Value v, std::size_t size) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < size) { new (&slots[tid].first) atomic_key_type{k}; new (&slots[tid].second) atomic_mapped_type{v}; tid += gridDim.x * blockDim.x; } } /** * @brief Inserts all key/value pairs in the range `[first, last)`. * * Uses the CUDA Cooperative Groups API to leverage groups of multiple threads to perform each * key/value insertion. This provides a significant boost in throughput compared to the non * Cooperative Group `insert` at moderate to high load factors. * * @tparam block_size The size of the thread block * @tparam tile_size The number of threads in the Cooperative Groups used to perform * inserts * @tparam InputIt Device accessible random access input iterator where * `std::is_convertible<std::iterator_traits<InputIt>::value_type, * static_multimap<K, V>::value_type>` is `true` * @tparam viewT Type of device view allowing access of hash map storage * * @param first Beginning of the sequence of key/value pairs * @param last End of the sequence of key/value pairs * @param view Mutable device view used to access the hash map's slot storage */ template <uint32_t block_size, uint32_t tile_size, typename InputIt, typename viewT> __global__ void insert(InputIt first, InputIt last, viewT view) { auto tile = cg::tiled_partition<tile_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto it = first + tid / tile_size; while (it < last) { // force conversion to value_type typename viewT::value_type const insert_pair{*it}; view.insert(tile, insert_pair); it += (gridDim.x * block_size) / tile_size; } } /** * @brief Inserts key/value pairs in the range `[first, first + n)` if `pred` of the * corresponding stencil returns true. * * The key/value pair `*(first + i)` is inserted if `pred( *(stencil + i) )` returns true. * * Uses the CUDA Cooperative Groups API to leverage groups of multiple threads to perform each * key/value insertion. This provides a significant boost in throughput compared to the non * Cooperative Group `insert` at moderate to high load factors. * * @tparam block_size The size of the thread block * @tparam tile_size The number of threads in the Cooperative Groups used to perform * inserts * @tparam InputIt Device accessible random access input iterator where * `std::is_convertible<std::iterator_traits<InputIt>::value_type, * static_multimap<K, V>::value_type>` is `true` * @tparam StencilIt Device accessible random access iterator whose value_type is * convertible to Predicate's argument type * @tparam viewT Type of device view allowing access of hash map storage * @tparam Predicate Unary predicate callable whose return type must be convertible to `bool` and * argument type is convertible from `std::iterator_traits<StencilIt>::value_type`. * @param first Beginning of the sequence of key/value pairs * @param s Beginning of the stencil sequence * @param n Number of elements to insert * @param view Mutable device view used to access the hash map's slot storage * @param pred Predicate to test on every element in the range `[s, s + n)` */ template <uint32_t block_size, uint32_t tile_size, typename InputIt, typename StencilIt, typename viewT, typename Predicate> __global__ void insert_if_n(InputIt first, StencilIt s, std::size_t n, viewT view, Predicate pred) { auto tile = cg::tiled_partition<tile_size>(cg::this_thread_block()); auto const tid = block_size * blockIdx.x + threadIdx.x; auto i = tid / tile_size; while (i < n) { if (pred(*(s + i))) { typename viewT::value_type const insert_pair{*(first + i)}; // force conversion to value_type view.insert(tile, insert_pair); } i += (gridDim.x * block_size) / tile_size; } } /** * @brief Indicates whether the keys in the range `[first, last)` are contained in the map. * * Stores `true` or `false` to `(output + i)` indicating if the key `*(first + i)` exists in the * map. * * Uses the CUDA Cooperative Groups API to leverage groups of multiple threads to perform the * contains operation for each key. This provides a significant boost in throughput compared * to the non Cooperative Group `contains` at moderate to high load factors. * * @tparam block_size The size of the thread block * @tparam tile_size The number of threads in the Cooperative Groups * @tparam InputIt Device accessible input iterator whose `value_type` is * convertible to the map's `key_type` * @tparam OutputIt Device accessible output iterator whose `value_type` is convertible from `bool` * @tparam viewT Type of device view allowing access of hash map storage * @tparam KeyEqual Binary callable type * @param first Beginning of the sequence of keys * @param last End of the sequence of keys * @param output_begin Beginning of the sequence of booleans for the presence of each key * @param view Device view used to access the hash map's slot storage * @param key_equal The binary function to compare two keys for equality */ template <uint32_t block_size, uint32_t tile_size, typename InputIt, typename OutputIt, typename viewT, typename KeyEqual> __global__ void contains( InputIt first, InputIt last, OutputIt output_begin, viewT view, KeyEqual key_equal) { auto tile = cg::tiled_partition<tile_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto key_idx = tid / tile_size; __shared__ bool writeBuffer[block_size]; while (first + key_idx < last) { auto key = *(first + key_idx); auto found = view.contains(tile, key, key_equal); /* * The ld.relaxed.gpu instruction used in view.find causes L1 to * flush more frequently, causing increased sector stores from L2 to global memory. * By writing results to shared memory and then synchronizing before writing back * to global, we no longer rely on L1, preventing the increase in sector stores from * L2 to global and improving performance. */ if (tile.thread_rank() == 0) { writeBuffer[threadIdx.x / tile_size] = found; } __syncthreads(); if (tile.thread_rank() == 0) { *(output_begin + key_idx) = writeBuffer[threadIdx.x / tile_size]; } key_idx += (gridDim.x * block_size) / tile_size; } } /** * @brief Counts the occurrences of keys in `[first, last)` contained in the multimap. * * For each key, `k = *(first + i)`, counts all matching keys, `k'`, as determined by `key_equal(k, * k')` and stores the sum of all matches for all keys to `num_matches`. If `k` does not have any * matches, it contributes 1 to the final sum only if `is_outer` is true. * * @tparam block_size The size of the thread block * @tparam tile_size The number of threads in the Cooperative Groups used to perform counts * @tparam uses_vector_load Boolean flag indicating whether vector loads are used or not * @tparam is_outer Boolean flag indicating whether non-matches are counted * @tparam InputIt Device accessible input iterator whose `value_type` is convertible to the map's * `key_type` * @tparam atomicT Type of atomic storage * @tparam viewT Type of device view allowing access of hash map storage * @tparam KeyEqual Binary callable * @param first Beginning of the sequence of keys to count * @param last End of the sequence of keys to count * @param num_matches The number of all the matches for a sequence of keys * @param view Device view used to access the hash map's slot storage * @param key_equal Binary function to compare two keys for equality */ template <uint32_t block_size, uint32_t tile_size, bool is_outer, typename InputIt, typename atomicT, typename viewT, typename KeyEqual> __global__ void count( InputIt first, InputIt last, atomicT* num_matches, viewT view, KeyEqual key_equal) { auto tile = cg::tiled_partition<tile_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto key_idx = tid / tile_size; typedef cub::BlockReduce<std::size_t, block_size> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; std::size_t thread_num_matches = 0; while (first + key_idx < last) { auto key = *(first + key_idx); if constexpr (is_outer) { thread_num_matches += view.count_outer(tile, key, key_equal); } else { thread_num_matches += view.count(tile, key, key_equal); } key_idx += (gridDim.x * block_size) / tile_size; } // compute number of successfully inserted elements for each block // and atomically add to the grand total std::size_t block_num_matches = BlockReduce(temp_storage).Sum(thread_num_matches); if (threadIdx.x == 0) { num_matches->fetch_add(block_num_matches, cuda::std::memory_order_relaxed); } } /** * @brief Counts the occurrences of key/value pairs in `[first, last)` contained in the multimap. * * For pair, `p = *(first + i)`, counts all matching pairs, `p'`, as determined by `pair_equal(p, * p')` and stores the sum of all matches for all pairs to `num_matches`. If `p` does not have any * matches, it contributes 1 to the final sum only if `is_outer` is true. * * @tparam block_size The size of the thread block * @tparam tile_size The number of threads in the Cooperative Groups used to perform counts * @tparam is_outer Boolean flag indicating whether non-matches are counted * @tparam InputIt Device accessible random access input iterator where * `std::is_convertible<std::iterator_traits<InputIt>::value_type, * static_multimap<K, V>::value_type>` is `true` * @tparam atomicT Type of atomic storage * @tparam viewT Type of device view allowing access of hash map storage * @tparam PairEqual Binary callable * @param first Beginning of the sequence of pairs to count * @param last End of the sequence of pairs to count * @param num_matches The number of all the matches for a sequence of pairs * @param view Device view used to access the hash map's slot storage * @param pair_equal Binary function to compare two pairs for equality */ template <uint32_t block_size, uint32_t tile_size, bool is_outer, typename InputIt, typename atomicT, typename viewT, typename PairEqual> __global__ void pair_count( InputIt first, InputIt last, atomicT* num_matches, viewT view, PairEqual pair_equal) { auto tile = cg::tiled_partition<tile_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto pair_idx = tid / tile_size; typedef cub::BlockReduce<std::size_t, block_size> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; std::size_t thread_num_matches = 0; while (first + pair_idx < last) { typename viewT::value_type const pair = *(first + pair_idx); if constexpr (is_outer) { thread_num_matches += view.pair_count_outer(tile, pair, pair_equal); } else { thread_num_matches += view.pair_count(tile, pair, pair_equal); } pair_idx += (gridDim.x * block_size) / tile_size; } // compute number of successfully inserted elements for each block // and atomically add to the grand total std::size_t block_num_matches = BlockReduce(temp_storage).Sum(thread_num_matches); if (threadIdx.x == 0) { num_matches->fetch_add(block_num_matches, cuda::std::memory_order_relaxed); } } /** * @brief Retrieves all the values corresponding to all keys in the range `[first, last)`. * * For key `k = *(first + i)` existing in the map, copies `k` and all associated values to * unspecified locations in `[output_begin, output_end)`. If `k` does not have any matches, copies * `k` and `empty_value_sentinel()` into the output only if `is_outer` is true. * * Behavior is undefined if the total number of matching keys exceeds `std::distance(output_begin, * output_begin + *num_matches - 1)`. Use `count()` to determine the size of the output range. * * @tparam block_size The size of the thread block * @tparam flushing_cg_size The size of the CG used to flush output buffers * @tparam probing_cg_size The size of the CG for parallel retrievals * @tparam buffer_size Size of the output buffer * @tparam is_outer Boolean flag indicating whether non-matches are included in the output * @tparam InputIt Device accessible input iterator whose `value_type` is * convertible to the map's `key_type` * @tparam OutputIt Device accessible output iterator whose `value_type` is * constructible from the map's `value_type` * @tparam atomicT Type of atomic storage * @tparam viewT Type of device view allowing access of hash map storage * @tparam KeyEqual Binary callable type * @param first Beginning of the sequence of keys * @param last End of the sequence of keys * @param output_begin Beginning of the sequence of values retrieved for each key * @param num_matches Size of the output sequence * @param view Device view used to access the hash map's slot storage * @param key_equal The binary function to compare two keys for equality */ template <uint32_t block_size, uint32_t flushing_cg_size, uint32_t probing_cg_size, uint32_t buffer_size, bool is_outer, typename InputIt, typename OutputIt, typename atomicT, typename viewT, typename KeyEqual> __global__ void retrieve(InputIt first, InputIt last, OutputIt output_begin, atomicT* num_matches, viewT view, KeyEqual key_equal) { using pair_type = typename viewT::value_type; constexpr uint32_t num_flushing_cgs = block_size / flushing_cg_size; const uint32_t flushing_cg_id = threadIdx.x / flushing_cg_size; auto flushing_cg = cg::tiled_partition<flushing_cg_size>(cg::this_thread_block()); auto probing_cg = cg::tiled_partition<probing_cg_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto key_idx = tid / probing_cg_size; __shared__ pair_type output_buffer[num_flushing_cgs][buffer_size]; // TODO: replace this with shared memory cuda::atomic variables once the dynamiic initialization // warning issue is solved __shared__ atomicT counter[num_flushing_cgs][buffer_size]; __shared__ uint32_t flushing_cg_counter[num_flushing_cgs]; if (flushing_cg.thread_rank() == 0) { flushing_cg_counter[flushing_cg_id] = 0; } while (flushing_cg.any(first + key_idx < last)) { bool active_flag = first + key_idx < last; auto active_flushing_cg = cg::binary_partition<flushing_cg_size>(flushing_cg, active_flag); if (active_flag) { auto key = *(first + key_idx); if constexpr (is_outer) { view.retrieve_outer<buffer_size>(active_flushing_cg, probing_cg, key, &flushing_cg_counter[flushing_cg_id], output_buffer[flushing_cg_id], num_matches, output_begin, key_equal); } else { view.retrieve<buffer_size>(active_flushing_cg, probing_cg, key, &flushing_cg_counter[flushing_cg_id], output_buffer[flushing_cg_id], num_matches, output_begin, key_equal); } } key_idx += (gridDim.x * block_size) / probing_cg_size; } // Final flush of output buffer if (flushing_cg_counter[flushing_cg_id] > 0) { view.flush_output_buffer(flushing_cg, flushing_cg_counter[flushing_cg_id], output_buffer[flushing_cg_id], num_matches, output_begin); } } /** * @brief Retrieves all pairs matching the input probe pair in the range `[first, last)`. * * If pair_equal(*(first + i), slot[j]) returns true, then *(first+i) is stored to unspecified * locations in `probe_output_begin`, and slot[j] is stored to unspecified locations in * `contained_output_begin`. If the given pair has no matches in the map, copies *(first + i) in * `probe_output_begin` and a pair of `empty_key_sentinel` and `empty_value_sentinel` in * `contained_output_begin` only when `is_outer` is `true`. * * Behavior is undefined if the total number of matching pairs exceeds `std::distance(output_begin, * output_begin + *num_matches - 1)`. Use `pair_count()` to determine the size of the output range. * * @tparam block_size The size of the thread block * @tparam flushing_cg_size The size of the CG used to flush output buffers * @tparam probing_cg_size The size of the CG for parallel retrievals * @tparam buffer_size Size of the output buffer * @tparam is_outer Boolean flag indicating whether non-matches are included in the output * @tparam InputIt Device accessible random access input iterator where * `std::is_convertible<std::iterator_traits<InputIt>::value_type, * static_multimap<K, V>::value_type>` is `true` * @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from * `InputIt`s `value_type`. * @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from * the map's `value_type`. * @tparam atomicT Type of atomic storage * @tparam viewT Type of device view allowing access of hash map storage * @tparam PairEqual Binary callable type * @param first Beginning of the sequence of keys * @param last End of the sequence of keys * @param probe_output_begin Beginning of the sequence of the matched probe pairs * @param contained_output_begin Beginning of the sequence of the matched contained pairs * @param num_matches Size of the output sequence * @param view Device view used to access the hash map's slot storage * @param pair_equal The binary function to compare two pairs for equality */ template <uint32_t block_size, uint32_t flushing_cg_size, uint32_t probing_cg_size, uint32_t buffer_size, bool is_outer, typename InputIt, typename OutputIt1, typename OutputIt2, typename atomicT, typename viewT, typename PairEqual> __global__ void pair_retrieve(InputIt first, InputIt last, OutputIt1 probe_output_begin, OutputIt2 contained_output_begin, atomicT* num_matches, viewT view, PairEqual pair_equal) { using pair_type = typename viewT::value_type; constexpr uint32_t num_flushing_cgs = block_size / flushing_cg_size; const uint32_t flushing_cg_id = threadIdx.x / flushing_cg_size; auto flushing_cg = cg::tiled_partition<flushing_cg_size>(cg::this_thread_block()); auto probing_cg = cg::tiled_partition<probing_cg_size>(cg::this_thread_block()); auto tid = block_size * blockIdx.x + threadIdx.x; auto pair_idx = tid / probing_cg_size; __shared__ pair_type probe_output_buffer[num_flushing_cgs][buffer_size]; __shared__ pair_type contained_output_buffer[num_flushing_cgs][buffer_size]; // TODO: replace this with shared memory cuda::atomic variables once the dynamiic initialization // warning issue is solved __shared__ atomicT counter[num_flushing_cgs][buffer_size]; __shared__ uint32_t flushing_cg_counter[num_flushing_cgs]; if (flushing_cg.thread_rank() == 0) { flushing_cg_counter[flushing_cg_id] = 0; } while (flushing_cg.any(first + pair_idx < last)) { bool active_flag = first + pair_idx < last; auto active_flushing_cg = cg::binary_partition<flushing_cg_size>(flushing_cg, active_flag); if (active_flag) { pair_type pair = *(first + pair_idx); if constexpr (is_outer) { view.pair_retrieve_outer<buffer_size>(active_flushing_cg, probing_cg, pair, &flushing_cg_counter[flushing_cg_id], probe_output_buffer[flushing_cg_id], contained_output_buffer[flushing_cg_id], num_matches, probe_output_begin, contained_output_begin, pair_equal); } else { view.pair_retrieve<buffer_size>(active_flushing_cg, probing_cg, pair, &flushing_cg_counter[flushing_cg_id], probe_output_buffer[flushing_cg_id], contained_output_buffer[flushing_cg_id], num_matches, probe_output_begin, contained_output_begin, pair_equal); } } pair_idx += (gridDim.x * block_size) / probing_cg_size; } // Final flush of output buffer if (flushing_cg_counter[flushing_cg_id] > 0) { view.flush_output_buffer(flushing_cg, flushing_cg_counter[flushing_cg_id], probe_output_buffer[flushing_cg_id], contained_output_buffer[flushing_cg_id], num_matches, probe_output_begin, contained_output_begin); } } } // namespace detail } // namespace cuco
the_stack
namespace lightseq { namespace cuda { template <OperationType OpType_> MoeDecoder<OpType_>::MoeDecoder(int max_batch_size, const int* p_d_padding_mask, const _DataType* p_d_encoder_output, int* p_d_result, MoeWeight<OpType_>& tw, cudaStream_t stream, cublasHandle_t hd, bool output_topk, const int* p_d_lang_id) : _max_batch_size(max_batch_size), _max_thread_per_block(1024), _h_can_num_batch(0), _cub_sort_buffer_bytes(max_batch_size * tw._beam_size * tw._trg_vocab_size * sizeof(_DataType)), _p_d_padding_mask(p_d_padding_mask), _p_d_encoder_output(p_d_encoder_output), _p_d_result(p_d_result), _p_d_trg_emb_wei(tw.get_trg_emb_wei()), _p_d_dec_wei(tw.get_dec_wei()), _tw(tw), _stream(stream), _hd(hd), _output_topk(output_topk), _p_d_lang_id(p_d_lang_id), // source token id _layer_size_encdec_k(max_batch_size * tw._max_step * tw._hidden_size), _layer_size_self_k(max_batch_size * tw._max_step * tw._hidden_size * tw._beam_size), _type_one(1.f), _type_zero(0.f), _fzero(0.f), _atten_scaler(sqrt(1.f / tw._dim_per_head)), _logit_scaler(_tw._no_scale_embedding ? 1.f : sqrt(1.f / tw._hidden_size)), _h_alive_seq_probs(max_batch_size * tw._beam_size, min_log_probability / 2), _h_length_norm(tw._max_step, 1.f), _h_unfinished(1), _gate_weight_offset(0), _p_d_dec_gate_wei(tw.get_dec_gate_wei()), _max_step_token_num(max_batch_size * tw._beam_size) { for (int i = 0; i < _h_alive_seq_probs.size(); i += tw._beam_size) { _h_alive_seq_probs[i] = 0.f; } if (tw._length_penalty >= 0) { for (int i = 0; i < _h_length_norm.size(); i++) { _h_length_norm[i] = length_norm(i + 1, tw._length_penalty); } } return; } /** Compute GPU memory size needed by moe decoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> long MoeDecoder<OpType_>::compute_buffer_bytesize() { long cache_bytesize = 4 * _tw._n_dec_layer * _layer_size_self_k + 2 * _tw._n_dec_layer * _layer_size_encdec_k + _max_batch_size * _tw._beam_size * _tw._hidden_size; cache_bytesize *= sizeof(_DataType); long decode_buffer_bytesize = _max_batch_size * _tw._beam_size * _tw._hidden_size * 4 + _max_batch_size * _tw._beam_size * max(_tw._hidden_size, _tw._inner_size) + _max_batch_size * _tw._head_num * _tw._beam_size * _tw._max_step + _max_step_token_num * _tw._expert_num_decoder * (_tw._hidden_size + _tw._inner_size + 1); decode_buffer_bytesize *= sizeof(_DataType); decode_buffer_bytesize += (_max_step_token_num * _tw._expert_num_decoder * sizeof(float) + _tw._moe_topk_decoder * _max_step_token_num * sizeof(int)); long sf = _max_batch_size * _tw._beam_size * _tw._trg_vocab_size * 2 + _max_batch_size * _tw._beam_size * 2; long si = _max_batch_size * _tw._beam_size * _tw._max_step * 2 + _max_batch_size * _tw._beam_size * _tw._trg_vocab_size + _max_batch_size * _tw._beam_size + 1; long beam_buffer_bytesize = sf * sizeof(float) + si * sizeof(int); return cache_bytesize + max(decode_buffer_bytesize, beam_buffer_bytesize); } /** Init the GPU memory pointer which point to the memory buffer needed by decoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void MoeDecoder<OpType_>::init_buffer(void* pbuf) { std::cout << "decoder buffer init start" << std::endl; _DataType* curp = reinterpret_cast<_DataType*>(pbuf); for (int i = 0; i < _tw._n_dec_layer; i++) { // encoder ouput after project, the "key" of enc_dec attention _p_d_encdec_k_bgeem.push_back(curp); curp += _layer_size_encdec_k; } for (int i = 0; i < _tw._n_dec_layer; i++) { // encoder ouput after project, the "value" of enc_dec attention _p_d_encdec_v_bgeem.push_back(curp); curp += _layer_size_encdec_k; } // reused buffer with _p_d_self_k_bgeem _p_d_self_v_bgeem, // no need to add curp because _p_d_encoder_out_buf is smaller // and no need to use it any more after get _p_d_encdec_k_bgeem // and _p_d_encdec_v_bgeem _p_d_encoder_out_buf = curp; for (int i = 0; i < _tw._n_dec_layer * 2; i++) { // the "key" of decoder self attention, we need to maintain it by twice // one for current step's "key", one for "key" of beam_search cache // after finishing current step's search, we will copy the first one // to the second one to refresh beam_search cache // based on the selected beam id _p_d_self_k_bgeem.push_back(curp); curp += _layer_size_self_k; } for (int i = 0; i < _tw._n_dec_layer * 2; i++) { // the "value" of decoder self attention, we need to maintain it by twice // one for current step's "value", one for "value" of beam_search cache // after finishing current step's search, we will copy the first one // to the second one to refresh beam_search cache // based on the selected beam id _p_d_self_v_bgeem.push_back(curp); curp += _layer_size_self_k; } _p_d_self_k_bgeem1 = _p_d_self_k_bgeem.data(); _p_d_self_k_bgeem2 = _p_d_self_k_bgeem.data() + _tw._n_dec_layer; _p_d_self_v_bgeem1 = _p_d_self_v_bgeem.data(); _p_d_self_v_bgeem2 = _p_d_self_v_bgeem.data() + _tw._n_dec_layer; // GPU memory buffer to save "query", // In all layers, using the same buffer _p_d_cur_step_query = curp; curp += _max_batch_size * _tw._beam_size * _tw._hidden_size; // we can use the same buffer for decoder network computation // and beam search, since they're serial. _DataType* reuse_p = curp; // for decode network computation _p_d_self_step_qkv = curp; // [q, k, v], result of gemm curp += _max_batch_size * _tw._beam_size * _tw._hidden_size * 3; _p_d_query_buf1 = curp; // "query" buffer curp += _max_batch_size * _tw._beam_size * _tw._hidden_size; _p_d_query_buf2 = curp; // "query" buffer curp += _max_batch_size * _tw._beam_size * max(_tw._hidden_size, _tw._inner_size); _p_d_c = curp; // correlation(attention score) buffer curp += _max_batch_size * _tw._head_num * _tw._beam_size * _tw._max_step; _p_d_gate = curp; // moe gate buffer curp += _tw._expert_num_decoder * _max_step_token_num; _p_d_moe_input_buf = curp; // moe input buffer curp += _tw._expert_num_decoder * _max_step_token_num * _tw._hidden_size; _p_d_moe_inner_buf = curp; // moe ffns buffer curp += _tw._expert_num_decoder * _max_step_token_num * _tw._inner_size; _p_d_score_routed = reinterpret_cast<float*>(curp); // expert routing score // ids of routed experts in moe _p_d_expert_id_routed = reinterpret_cast<int*>( _p_d_score_routed + _max_step_token_num * _tw._expert_num_decoder); // for beam search curp = reuse_p; _p_d_logit_buf = curp; // vocab ligit curp += _max_batch_size * _tw._beam_size * _tw._trg_vocab_size; // always be float float* fcurp = (float*)curp; // seq score ended with every target token for current step _p_d_can_score = fcurp; fcurp += _max_batch_size * _tw._beam_size * _tw._trg_vocab_size; _p_d_alive_seq_probs = fcurp; // alive seq probability fcurp += _max_batch_size * _tw._beam_size; _p_d_alive_seq_score = fcurp; // alive seq score fcurp += _max_batch_size * _tw._beam_size; int* pint = reinterpret_cast<int*>(fcurp); // FIXME std::vector<int> start_id_vec( _max_batch_size * _tw._beam_size * _tw._max_step * 2, _tw._start_id); usleep(3000); CHECK_GPU_ERROR(cudaMemcpyAsync(pint, start_id_vec.data(), sizeof(int) * start_id_vec.size(), cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); // token id of alive seq, we need to maintain it by twice // one for current step, one for beam_search cache // after finishing current step's search, we will copy the first one // to the second one to refresh beam_search cache // based on the selected beam id _p_d_alive_seq = pint; pint += _max_batch_size * _tw._beam_size * _tw._max_step; _p_d_alive_seq_buf = pint; pint += _max_batch_size * _tw._beam_size * _tw._max_step; // candidate token id for every beam, selected by rough top-beam_size op _p_d_can_idx = pint; pint += _max_batch_size * _tw._beam_size * _tw._trg_vocab_size; // candidate token number for every beam, selected by rough top-beam_size op _p_d_can_num = pint; pint += _max_batch_size * _tw._beam_size + 1; CHECK_GPU_ERROR(cudaMalloc((void**)&_p_d_sample_unfinished, sizeof(int))); CHECK_GPU_ERROR(cudaMalloc((void**)&_p_d_curandstate, _max_batch_size * sizeof(curandState))); ker_curand_setup<<<_max_batch_size, 1, 0, _stream>>>(_p_d_curandstate); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); CHECK_GPU_ERROR(cudaGetLastError()); std::cout << "decoder buffer init succeed" << std::endl; return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string MoeDecoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { std::cout << "decoder_dim_per_head:" << _tw._dim_per_head << std::endl; return "violate dim_per_head % 2 = 0"; } if (_tw._multilg_type == 0 && _p_d_trg_emb_wei.size() != 7) { return "violate p_d_trg_emb_wei.size() = 7"; } if (_tw._multilg_type != 0 && _p_d_trg_emb_wei.size() != 8) { return "violate p_d_trg_emb_wei.size() = 8"; } if (_p_d_dec_wei.size() != _tw._weight_per_dec_layer * _tw._n_dec_layer) { return "violate p_d_dec_wei.size() = weight_per_dec_layer * n_dec_layer"; } bool btmp = false; for (int i = 1; i < 64; i *= 2) { if (i == _tw._beam_size) { btmp = true; break; } } if (!btmp) { return "wrong beam_size, should be 1, 2, 4, 8, 16 or 32"; } std::string sampling_method = _tw._sampling_method; if (kSamplingMethods.find(sampling_method) == kSamplingMethods.end()) { return std::string("unsupported sampling_method: ") + sampling_method; } if (sampling_method == "topk" || sampling_method == "topp") { _output_topk = false; } if (sampling_method == "topk_greedy") { _output_topk = true; } if (_tw._multilg_type != 0 && _p_d_lang_id == nullptr) { return "lang id should not be null when multilg"; } if (_tw._moe_topk_decoder != 1 && _tw._moe_topk_decoder != 2) { return "moe topk should be 1 or 2"; } if (_tw._expert_num_decoder > 1024) { return "number of moe expert should not be greater than 1024"; } return ""; } /** Decoder inference */ template <OperationType OpType_> void MoeDecoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { if (batch_size > _max_batch_size) { throw std::runtime_error("batch size of input greater than max_batch_size"); } if (batch_seq_len > _tw._max_step) { throw std::runtime_error("seq len of input greater than max_step"); } /* ---step1. init--- */ _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; _step_token_num = batch_size * _tw._beam_size; _batch_max_decode_length = min(_tw._max_step, batch_seq_len + _tw._extra_decode_length) - 1; _is_sampling = (_tw._sampling_method == "topk" || _tw._sampling_method == "topp" || _tw._sampling_method == "topk_greedy"); if (_is_sampling) { _batch_max_decode_length = _tw._max_step; } project_encoder_output(); // project encoder output // init the first step's token id with target start_id CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_alive_seq_probs, _h_alive_seq_probs.data(), sizeof(float) * _batch_size * _tw._beam_size, cudaMemcpyHostToDevice, _stream)); /* ---step2. autoregressive decoding--- */ for (_cur_step = 0; _cur_step < _batch_max_decode_length - 1; _cur_step++) { #ifdef DEBUG_RESULT std::cout << "*** run step " << _cur_step << " ***" << std::endl; #endif if (run_step()) { // one step break; } } /* ---step3. output the decoding result--- */ if (_output_topk || _is_sampling) { if (_cur_step == _batch_max_decode_length) { _cur_step -= 1; } ker_write_topk_result<<<_batch_size * _tw._beam_size, _cur_step + 1, 0, _stream>>>( _p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._trg_vocab_size, _tw._max_step, _tw._beam_size, _tw._end_id); return; } if (_tw._length_penalty >= 0.f || _cur_step == _batch_max_decode_length) { ker_write_trg_tokenid_pos_penalty<<<_batch_size, _cur_step + 1, 0, _stream>>>( _p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._max_step, _tw._beam_size); } else { ker_write_trg_tokenid_neg_penalty<<<_batch_size, _cur_step + 1, 0, _stream>>>( _p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._max_step, _tw._beam_size, _tw._trg_vocab_size, _tw._end_id); } #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { print_vec(_p_d_result + i * (_cur_step + 1), "finial res", _cur_step + 1); } #endif return; } /** Project encoder output */ template <OperationType OpType_> void MoeDecoder<OpType_>::project_encoder_output() { int kv_dim = _tw._hidden_size * 2 * _tw._n_dec_layer; #ifdef DEBUG_RESULT CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); print_vec(_p_d_encoder_output, "_p_d_encoder_output(head):", 5); print_vec(_p_d_encoder_output + _batch_token_num * _tw._hidden_size - 5, "_p_d_encoder_output(tail)", 5); print_vec(_p_d_trg_emb_wei[4], "encoder project(head):", 10); #endif CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, kv_dim, _batch_token_num, _tw._hidden_size, &_type_one, _p_d_trg_emb_wei[4], _AType, kv_dim, _p_d_encoder_output, _BType, _tw._hidden_size, &_type_zero, _p_d_encoder_out_buf, _CType, kv_dim, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // _p_d_encoder_out_buf: [batch_size, batch_seq_len, layer_num, 2, // hidden_size] #ifdef DEBUG_RESULT CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); print_vec(_p_d_encoder_out_buf, "encoder out(head):", 5); print_vec(_p_d_encoder_out_buf + _batch_token_num * _tw._hidden_size * _tw._n_dec_layer - 5, "encoder out(tail):", 5); #endif ker_arrange_encdec_kv_launcher<_DataType>( _batch_token_num, _tw._n_dec_layer, _tw._hidden_size, _stream, _p_d_encoder_out_buf, _p_d_trg_emb_wei[5], _p_d_encdec_k_bgeem[0], _p_d_encdec_v_bgeem[0], _layer_size_encdec_k, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); return; } /** Decode one step */ template <OperationType OpType_> bool MoeDecoder<OpType_>::run_step() { embedding(); decoder_stack(); /* --- Project hidden states to vocab logits--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._trg_vocab_size, _step_token_num, _tw._hidden_size, &_logit_scaler, _p_d_trg_emb_wei[0], _AType, _tw._trg_vocab_size, _p_d_cur_step_query, _BType, _tw._hidden_size, //&_type_zero, _p_d_logit_buf, _CType, _tw._trg_vocab_size, _computeType, &_fzero, _p_d_logit_buf, _CType, _tw._trg_vocab_size, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _tw._beam_size; j++) { // beam_id std::cout << "decoder output: batch-" << i << ", beam-" << j << std::endl; print_vec(_p_d_cur_step_query + i * _tw._beam_size * _tw._hidden_size + j * _tw._hidden_size, "hidden", 10); print_vec(_p_d_logit_buf + i * _tw._beam_size * _tw._trg_vocab_size + j * _tw._trg_vocab_size, "logits", 10); } } #endif if (_tw._sampling_method == "topk") { return sample(); } else if (_tw._sampling_method == "topp") { return sample(); } else if (_tw._sampling_method == "topk_greedy") { return topk_greedy_search(); } else if (_tw._sampling_method == "beam_search") { return beam_search(); } else { throw std::runtime_error("not supported sampling_method"); } } // namespace cuda /** Decode embedding */ template <OperationType OpType_> void MoeDecoder<OpType_>::embedding() { // _p_d_trg_emb_wei: {token_emb, position_emb, norm_scale, norm_bias, // enc_out_kernel_kv, enc_out_bias_kv, logit_bias} launch_dec_emb<_DataType>(_p_d_trg_emb_wei[0], _p_d_trg_emb_wei[1], _p_d_alive_seq, _p_d_trg_emb_wei[7], _p_d_lang_id, _p_d_cur_step_query, _batch_size, _tw._beam_size, _tw._hidden_size, _tw._trg_vocab_size, _cur_step, _tw._max_step, _tw._multilg_type, _stream); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _tw._beam_size; j++) { // beam_id std::cout << "decoder emb: batch-" << i << ", beam-" << j << std::endl; print_vec(_p_d_cur_step_query + i * _tw._beam_size * _tw._hidden_size + j * _tw._hidden_size, "emb", 10); } } #endif return; } /** Decoder feedforward, composed by self_atten, enc-dec-atten, ffn */ template <OperationType OpType_> void MoeDecoder<OpType_>::decoder_stack() { // _p_d_dec_wei = {self_norm_scale, self_norm_bias, // self_qkv_kernel, self_qkv_bias, self_output_kernel, self_output_bias // encdec_norm_scale, encdec_norm_bias, // encdec_q_kernel, encdec_q_bias, encdec_output_kernel, encdec_output_bias // ffn_norm_scale, ffn_norm_bias, ffn_first_kernel, ffn_first_bias, // ffn_second_kernel, ffn_second_bias} * encoder_layer_num _gate_weight_offset = 0; for (_layer_id = 0; _layer_id < _tw._n_dec_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_dec_layer; self_attention(); encdec_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query, _p_d_trg_emb_wei[2], _p_d_trg_emb_wei[3], _max_thread_per_block); return; } /** Decoder self attention */ template <OperationType OpType_> void MoeDecoder<OpType_>::self_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query, _p_d_query_buf1, _p_d_dec_wei[_weight_offset], _p_d_dec_wei[_weight_offset + 1], _p_d_dec_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln); #ifdef DEBUG_RESULT print_vec(_p_d_query_buf1, "self attn ln(head): ", 5); print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5, "self attn ln(tail): ", 5); #endif /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_query_buf1, _BType, _tw._hidden_size, &_type_zero, _p_d_self_step_qkv, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_self_step_qkv, "self qkv(head): ", 5); print_vec(_p_d_self_step_qkv + _step_token_num * _tw._hidden_size * 3 - 5, "self qkv(tail): ", 5); #endif // get q, k, v by split and reshape qkv ker_arrange_decself_qkv_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_self_step_qkv, _p_d_dec_wei[_weight_offset + 3], _p_d_query_buf1, _p_d_self_k_bgeem1[_layer_id], _p_d_self_v_bgeem1[_layer_id], _tw._head_num, _tw._dim_per_head, _tw._max_step, _cur_step, _max_thread_per_block); #ifdef DEBUG_RESULT print_vec(_p_d_query_buf1, "self attn q(head): ", 5); print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5, "self attn q(tail): ", 5); print_vec(_p_d_self_k_bgeem1[_layer_id] + _cur_step * _tw._hidden_size / _tw._head_num, "self attn k(head): ", 5); print_vec( _p_d_self_k_bgeem1[_layer_id] + _step_token_num * _tw._hidden_size * _tw._max_step - ((_tw._max_step - _cur_step - 1) * _tw._hidden_size / _tw._head_num) - 5, "self attn k(tail): ", 5); print_vec(_p_d_self_v_bgeem1[_layer_id] + _cur_step * _tw._hidden_size / _tw._head_num, "self attn v(head): ", 5); print_vec( _p_d_self_v_bgeem1[_layer_id] + _step_token_num * _tw._hidden_size * _tw._max_step - ((_tw._max_step - _cur_step - 1) * _tw._hidden_size / _tw._head_num) - 5, "self attn v(tail): ", 5); #endif /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _cur_step + 1, 1, _tw._dim_per_head, &_atten_scaler, _p_d_self_k_bgeem1[_layer_id], _AType, _tw._dim_per_head, _tw._max_step * _tw._dim_per_head, _p_d_query_buf1, _BType, _tw._dim_per_head, _tw._dim_per_head, &_type_zero, _p_d_c, _CType, _cur_step + 1, _cur_step + 1, _step_token_num * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_decself_launcher(_step_token_num * _tw._head_num, _cur_step + 1, _stream, _p_d_c); #ifdef DEBUG_RESULT print_vec(_p_d_c, "self attn corr(head): ", 5); print_vec(_p_d_c + _step_token_num * _tw._head_num * (_cur_step + 1) - 5, "self attn corr(tail): ", 5); #endif /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, 1, _cur_step + 1, &_type_one, _p_d_self_v_bgeem1[_layer_id], _AType, _tw._dim_per_head, _tw._max_step * _tw._dim_per_head, _p_d_c, _BType, _cur_step + 1, _cur_step + 1, &_type_zero, _p_d_query_buf1, _CType, _tw._dim_per_head, _tw._dim_per_head, _step_token_num * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_query_buf1, "self attn before ffn(head): ", 5); print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5, "self attn before ffn(tail): ", 5); #endif /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_query_buf1, _BType, _tw._hidden_size, &_type_one, _p_d_cur_step_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_cur_step_query, "self attn out(head): ", 3); print_vec(_p_d_cur_step_query + _step_token_num * _tw._hidden_size - 3, "self attn out(tail): ", 3); #endif } /** Encode-Decoder attention */ template <OperationType OpType_> void MoeDecoder<OpType_>::encdec_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query, _p_d_query_buf1, _p_d_dec_wei[_weight_offset + 6], _p_d_dec_wei[_weight_offset + 7], _p_d_dec_wei[_weight_offset + 11], _max_thread_per_block, _tw._is_post_ln); #ifdef DEBUG_RESULT print_vec(_p_d_query_buf1, "encdec attn ln(head): ", 5); print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5, "encdec attn ln(tail): ", 5); #endif /* ---step 1. new_q = ori_q * q_wei + bias, reshape new_q for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 8], _AType, _tw._hidden_size, _p_d_query_buf1, _BType, _tw._hidden_size, &_type_zero, _p_d_query_buf2, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_arrange_encdec_q_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_query_buf2, _p_d_dec_wei[_weight_offset + 9], _p_d_query_buf1, _tw._beam_size, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _tw._beam_size, _tw._dim_per_head, &_atten_scaler, _p_d_encdec_k_bgeem[_layer_id], _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_query_buf1, _BType, _tw._dim_per_head, _tw._beam_size * _tw._dim_per_head, &_type_zero, _p_d_c, _CType, _batch_seq_len, _tw._beam_size * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_encdec_launcher<_DataType>( _batch_size, _tw._head_num * _tw._beam_size, _batch_seq_len, _stream, _p_d_c, _p_d_padding_mask); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _tw._beam_size, _batch_seq_len, &_type_one, _p_d_encdec_v_bgeem[_layer_id], _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _tw._beam_size * _batch_seq_len, &_type_zero, _p_d_query_buf1, _CType, _tw._dim_per_head, _tw._beam_size * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_arrange_atten_output_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_query_buf1, _p_d_query_buf2, _tw._beam_size, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_query_buf2, _BType, _tw._hidden_size, &_type_one, _p_d_cur_step_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void MoeDecoder<OpType_>::ffn_add_norm() { if (_tw._is_moe_layer_decoder[_layer_id]) { moe_fw(); ++_gate_weight_offset; } else { ffn(); } return; } template <OperationType OpType_> void MoeDecoder<OpType_>::ffn() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query, _p_d_query_buf1, _p_d_dec_wei[_weight_offset + 12], _p_d_dec_wei[_weight_offset + 13], _p_d_dec_wei[_weight_offset + 17], _max_thread_per_block, _tw._is_post_ln); #ifdef DEBUG_RESULT print_vec(_p_d_query_buf1, "ffn ln(head): ", 5); print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5, "ffn ln(tail): ", 5); #endif /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 14], _AType, _tw._inner_size, _p_d_query_buf1, _BType, _tw._hidden_size, &_type_zero, _p_d_query_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (_tw._use_gelu) { ker_bias_gelu_launcher<_DataType>( _step_token_num, _max_thread_per_block, _stream, _p_d_query_buf2, _p_d_dec_wei[_weight_offset + 15], _tw._inner_size); } else { ker_bias_relu_launcher<_DataType>( _step_token_num, _max_thread_per_block, _stream, _p_d_query_buf2, _p_d_dec_wei[_weight_offset + 15], _tw._inner_size); } /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _step_token_num, _tw._inner_size, &_type_one, _p_d_dec_wei[_weight_offset + 16], _AType, _tw._hidden_size, _p_d_query_buf2, _BType, _tw._inner_size, &_type_one, _p_d_cur_step_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void MoeDecoder<OpType_>::moe_fw() { ker_norm_layer_prepost_launcher<_DataType>( _step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query, _p_d_query_buf1, _p_d_dec_wei[_weight_offset + 12], _p_d_dec_wei[_weight_offset + 13], _max_thread_per_block, _tw._is_post_ln); CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._expert_num_decoder, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_gate_wei[_gate_weight_offset], _AType, _tw._expert_num_decoder, _p_d_query_buf1, _BType, _tw._hidden_size, &_type_zero, _p_d_gate, _CType, _tw._expert_num_decoder, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_softmax_topk_router_launcher<_DataType>( _step_token_num, _tw._expert_num_decoder, _max_step_token_num, _tw._moe_topk_decoder, _stream, _p_d_gate, _p_d_score_routed, _p_d_expert_id_routed); ker_reorder_tokens_launcher<_DataType>( _step_token_num, _tw._expert_num_decoder, _max_step_token_num, _tw._hidden_size, _max_thread_per_block, _stream, _p_d_query_buf1, _p_d_score_routed, _p_d_moe_input_buf); CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _step_token_num, _tw._hidden_size, &_type_one, _p_d_dec_wei[_weight_offset + 14], _AType, _tw._inner_size, _tw._hidden_size * _tw._inner_size, _p_d_moe_input_buf, _BType, _tw._hidden_size, _tw._hidden_size * _max_step_token_num, &_type_zero, _p_d_moe_inner_buf, _CType, _tw._inner_size, _tw._inner_size * _max_step_token_num, _tw._expert_num_decoder, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (_tw._use_gelu) { ker_strided_bias_gelu_launcher<_DataType>( _step_token_num, _tw._expert_num_decoder, _max_step_token_num, _tw._inner_size, _max_thread_per_block, _stream, _p_d_moe_inner_buf, _p_d_dec_wei[_weight_offset + 15]); } else { ker_strided_bias_relu_launcher<_DataType>( _step_token_num, _tw._expert_num_decoder, _max_step_token_num, _tw._inner_size, _max_thread_per_block, _stream, _p_d_moe_inner_buf, _p_d_dec_wei[_weight_offset + 15]); } CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _step_token_num, _tw._inner_size, &_type_one, _p_d_dec_wei[_weight_offset + 16], _AType, _tw._hidden_size, _tw._hidden_size * _tw._inner_size, _p_d_moe_inner_buf, _BType, _tw._inner_size, _tw._inner_size * _max_step_token_num, &_type_zero, _p_d_moe_input_buf, _CType, _tw._hidden_size, _tw._hidden_size * _max_step_token_num, _tw._expert_num_decoder, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_bias_redirect_residual_launcher<_DataType>( _tw._hidden_size, _max_step_token_num, _tw._moe_topk_decoder, _step_token_num, _max_thread_per_block, _stream, _p_d_moe_input_buf, _p_d_dec_wei[_weight_offset + 17], _p_d_score_routed, _p_d_expert_id_routed, _p_d_cur_step_query); } template <OperationType OpType_> bool MoeDecoder<OpType_>::sample() { CHECK_GPU_ERROR( cudaMemsetAsync(_p_d_sample_unfinished, 0, sizeof(int), _stream)); /* --- Sample new tokens from logits --- */ if (_tw._sampling_method == "topk") { ker_topk_sample_launcher<_DataType>( _batch_size, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block, _stream, _p_d_logit_buf, _p_d_trg_emb_wei[6], _p_d_alive_seq, _p_d_alive_seq_buf, _tw._trg_vocab_size, _tw._topk, _p_d_sample_unfinished, _p_d_curandstate, _tw._end_id); } else { ker_topp_sample_launcher<_DataType>( _batch_size, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block, _stream, _p_d_logit_buf, _p_d_trg_emb_wei[6], _p_d_alive_seq, _p_d_alive_seq_buf, _tw._trg_vocab_size, _tw._topp, _p_d_sample_unfinished, _p_d_curandstate, _tw._end_id); } #ifdef DEBUG_RESULT print_vec(_p_d_sample_unfinished, "unfinished flag", 1); for (int ii = 0; ii < _batch_size; ii++) { print_vec(_p_d_alive_seq + ii * _tw._max_step, "Batch token ids: ", _cur_step + 2); } #endif CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_sample_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _h_unfinished == 1 ? false : true; } template <OperationType OpType_> bool MoeDecoder<OpType_>::beam_search() { /* step 1. logits bias and softmax, select rough topk candidate for every batch item, record the candidate's beam_id, vocab_id and probability */ update_new_seq_probs(); /* ---step 2. sort the candidate with their probability--- */ CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_can_num_batch, _p_d_can_num, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); if (_tw._diverse_lambda != 0) { if (_h_can_num_batch < _cub_sort_buffer_bytes / 160) { CHECK_GPU_ERROR(cub::DeviceRadixSort::SortPairsDescending( (float*)_p_d_logit_buf, _cub_sort_buffer_bytes, _p_d_can_score, _p_d_can_score, _p_d_can_idx, _p_d_can_idx, _h_can_num_batch, 0, sizeof(float) * 8, _stream)); } else { thrust::sort_by_key(thrust::cuda::par.on(_stream), _p_d_can_score, _p_d_can_score + _h_can_num_batch, _p_d_can_idx, thrust::greater<float>()); } ker_diverse_beam_search_launcher(_p_d_can_score, _p_d_can_idx, _p_d_can_num, _step_token_num, _max_thread_per_block, _stream, _tw._beam_size, _tw._diverse_lambda, _tw._trg_vocab_size); } thrust::sort_by_key(thrust::cuda::par.on(_stream), _p_d_can_score, _p_d_can_score + _h_can_num_batch, _p_d_can_idx, thrust::greater<float>()); #ifdef DEBUG_RESULT print_vec(_p_d_can_score, "can score", _h_can_num_batch); print_vec(_p_d_can_idx, "can idx", _h_can_num_batch); #endif /* step 3. refresh alive_seq, seq_probs, seq_score, num_finish_beam based on sorted candidate. Deciding whether early stop based on num_finish_beam */ CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_can_num, 0, sizeof(int), _stream)); ker_refresh_result<<<dim3(_batch_size, _tw._beam_size), _tw._max_step, 0, _stream>>>( _p_d_can_idx, _p_d_can_score, _p_d_can_num + 1, _p_d_alive_seq, _p_d_alive_seq_buf, _p_d_alive_seq_probs, _p_d_alive_seq_score, _p_d_can_num, _tw._trg_vocab_size, _cur_step, _h_length_norm[_cur_step], _tw._diverse_lambda, _tw._end_id); int* tmp = _p_d_alive_seq_buf; _p_d_alive_seq_buf = _p_d_alive_seq; _p_d_alive_seq = tmp; CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_can_num_batch, _p_d_can_num, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); #ifdef DEBUG_RESULT for (int ii = 0; ii < _batch_size; ii++) { for (int jj = 0; jj < _tw._beam_size; jj++) { print_vec(_p_d_alive_seq + (ii * _tw._beam_size + jj) * _tw._max_step, "Batch token ids: ", _cur_step + 2); print_vec(_p_d_alive_seq_probs + ii * _tw._beam_size + jj, "Batch probs: ", 1); print_vec(_p_d_alive_seq_score + ii * _tw._beam_size + jj, "Batch scores: ", 1); } } #endif if (_h_can_num_batch == _step_token_num) { #ifdef DEBUG_RESULT std::cout << "early stop beam search!" << std::endl; #endif return true; } /* ---step 4. refresh cache: k, v for decoder self attention--- */ if (_cur_step > 0) { ker_refresh_cache_launcher<_DataType>( _tw._n_dec_layer * (_cur_step + 1), _step_token_num * 2, _max_thread_per_block, _stream, _p_d_can_num + 1, _p_d_can_idx, _p_d_self_k_bgeem1[0], _p_d_self_v_bgeem1[0], _p_d_self_k_bgeem2[0], _p_d_self_v_bgeem2[0], _layer_size_self_k, _tw._beam_size, _tw._dim_per_head, _tw._head_num, _tw._trg_vocab_size, _cur_step, _tw._max_step, _tw._diverse_lambda != 0, _tw._end_id); _DataType** ftmp = _p_d_self_k_bgeem2; _p_d_self_k_bgeem2 = _p_d_self_k_bgeem1; _p_d_self_k_bgeem1 = ftmp; ftmp = _p_d_self_v_bgeem2; _p_d_self_v_bgeem2 = _p_d_self_v_bgeem1; _p_d_self_v_bgeem1 = ftmp; } return false; } /** Logits bias and softmax. Select rough topk candidate for every batch item. Record the candidate's beam_id, vocab_id and probability */ template <OperationType OpType_> void MoeDecoder<OpType_>::update_new_seq_probs() { CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_can_num, 0, sizeof(int), _stream)); select_beam_rough_topk_launcher( _p_d_logit_buf, _p_d_trg_emb_wei[6], _p_d_alive_seq_probs, _p_d_alive_seq_score, _p_d_alive_seq, _p_d_can_idx, _p_d_can_score, _p_d_can_num, _tw._trg_vocab_size, _tw._max_step, _h_length_norm[_cur_step], _cur_step, _step_token_num, _max_thread_per_block, _stream, _tw._beam_size, _tw._diverse_lambda, _tw._end_id); thrust::exclusive_scan(thrust::cuda::par.on(_stream), _p_d_can_num + 1, _p_d_can_num + 1 + _step_token_num, _p_d_can_num + 1); return; } template <OperationType OpType_> bool MoeDecoder<OpType_>::topk_greedy_search() { _tw._diverse_lambda = 0; if (_cur_step == 0) { return beam_search(); } CHECK_GPU_ERROR( cudaMemsetAsync(_p_d_sample_unfinished, 0, sizeof(int), _stream)); /* --- Sample new tokens from logits --- */ ker_topk_sample_launcher<_DataType>( _step_token_num, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block, _stream, _p_d_logit_buf, _p_d_trg_emb_wei[6], _p_d_alive_seq, _p_d_alive_seq_buf, _tw._trg_vocab_size, 1, _p_d_sample_unfinished, _p_d_curandstate, _tw._end_id); #ifdef DEBUG_RESULT print_vec(_p_d_sample_unfinished, "unfinished flag", 1); for (int ii = 0; ii < _batch_size; ii++) { for (int jj = 0; jj < _tw._beam_size; jj++) { print_vec(_p_d_alive_seq + (ii * _tw._beam_size + jj) * _tw._max_step, "Batch token ids: ", _cur_step + 2); } } #endif CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_sample_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _h_unfinished == 1 ? false : true; } template class MoeDecoder<OperationType::FP16>; template class MoeDecoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
the_stack
namespace at { namespace native { namespace { #define MAX_THREADS 512 // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest3d_out_frame( const scalar_t* input, size_t dim_b, size_t dim_c, size_t src_dim_d, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_d, size_t dst_dim_h, size_t dst_dim_w, scalar_t* output, float depth_scale, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w; int src_c_stride = src_dim_d * src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d; int src_z = nearest_neighbor_compute_source_index(depth_scale, dst_z, src_dim_d); int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; int src_y = nearest_neighbor_compute_source_index(height_scale, dst_y, src_dim_h); int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_compute_source_index(width_scale, dst_x, src_dim_w); int src_idx = c * src_c_stride + src_z * src_dim_h * src_dim_w + src_y * src_dim_w + src_x; for (int b = 0; b < dim_b; b++) { output[dst_idx] = input[src_idx]; src_idx += dim_c * src_c_stride; dst_idx += dim_c * dst_c_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] // Backward operation template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest3d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_d, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_d, size_t dst_dim_h, size_t dst_dim_w, scalar_t* grad_i, float depth_scale, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w; int src_c_stride = src_dim_d * src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d; // note that we do not want to clamp src_z to src_dim_z, since we might // intentionally want to skip in case of scale_factor < 1.0 int src_z = nearest_neighbor_bw_compute_source_index(depth_scale, dst_z, src_dim_d); int src_z_up = nearest_neighbor_bw_compute_source_index(depth_scale, dst_z+1, src_dim_d); int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; // note that we do not want to clamp src_y to src_dim_y, since we might // intentionally want to skip in case of scale_factor < 1.0 int src_y = nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h); int src_y_up = nearest_neighbor_bw_compute_source_index(height_scale, dst_y+1, src_dim_h); int dst_x = dst_idx % dst_dim_w; // note that we do not want to clamp src_x to src_dim_w, since we might // intentionally want to skip in case of scale_factor < 1.0 int src_x = nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w); int src_x_up = nearest_neighbor_bw_compute_source_index(width_scale, dst_x+1, src_dim_w); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; for (int z = src_z; z < src_z_up; z++) { for (int y = src_y; y < src_y_up; y++) { for (int x = src_x; x < src_x_up; x++) { int src_idx = b * dim_c * src_c_stride + c * src_c_stride + z * src_dim_h * src_dim_w + y * src_dim_w + x; grad += grad_o[src_idx]; } } } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_c_stride; } } static void upsample_nearest3d_out_cuda_template( const Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); // TODO: remove this when the cuda kernel is updated to support the channels_last memory format. // This is a temporary hack to prevent a silence correctness issue when calling this kernel // with tensors in channels_last format. auto output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options()); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_.size(0); int channels = input_.size(1); int input_depth = input_.size(2); int input_height = input_.size(3); int input_width = input_.size(4); Tensor input = input_.contiguous(); if (input.numel() == 0) { return; } // upsample_nearest3d meta call makes sure `nbatch != 0` unsigned int n = output.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(output.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte,input.scalar_type(), "upsample_nearest3d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output_c.data_ptr<scalar_t>(); const float depth_scale = compute_scales_value<float>(scales_d, input_depth, output_depth); const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height); const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width); upsample_nearest3d_out_frame<scalar_t><<<gdim, bdim, 0, stream>>>( idata, nbatch, channels, input_depth, input_height, input_width, output_depth, output_height, output_width, odata, depth_scale, height_scale, width_scale); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); if (!output.is_contiguous()) { output.copy_(output_c); } } static void upsample_nearest3d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( __func__, {grad_output_arg, grad_input_arg}); int output_depth = output_size[0]; int output_height = output_size[1]; int output_width = output_size[2]; int nbatch = input_size[0]; int channels = input_size[1]; int input_depth = input_size[2]; int input_height = input_size[3]; int input_width = input_size[4]; Tensor grad_output = grad_output_.contiguous(); if (grad_input.numel() == 0) { return; } // upsample_nearest3d meta call makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{ceil_div(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest3d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); float depth_scale = compute_scales_value_backwards<float>(scales_d, output_depth, input_depth); float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height); float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width); upsample_nearest3d_backward_out_frame<scalar_t, accscalar_t> <<<gdim, bdim, 0, stream>>>( odata, nbatch, channels, output_depth, output_height, output_width, input_depth, input_height, input_width, idata, depth_scale, height_scale, width_scale); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_nearest3d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& output) { upsample_nearest3d_out_cuda_template(output, input, output_size, scales_d, scales_h, scales_w); } TORCH_IMPL_FUNC(upsample_nearest3d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, const Tensor& grad_input) { upsample_nearest3d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_d, scales_h, scales_w); } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_nearest3d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_d = get_scale_value(scale_factors, 0); auto scale_h = get_scale_value(scale_factors, 1); auto scale_w = get_scale_value(scale_factors, 2); return at::upsample_nearest3d(input, osize, scale_d, scale_h, scale_w); } // when structured kernels can handle QuantizedCPU, update these overloads to be CompositeExplicitAutograd Tensor upsample_nearest3d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_d = get_scale_value(scale_factors, 0); auto scale_h = get_scale_value(scale_factors, 1); auto scale_w = get_scale_value(scale_factors, 2); return at::upsample_nearest3d_backward(grad_output, osize, input_size, scale_d, scale_h, scale_w); } } // namespace native } // namespace at
the_stack
#include <thrust/copy.h> #include <thrust/fill.h> #include <amg_solver.h> #include "amg_signal.h" #include <matrix_io.h> #include "amgx_c.h" #include "amgxP_c.h" #include "../../core/include/version.h" #include "distributed/distributed_manager.h" #include "distributed/comms_mpi_gpudirect.h" #include "distributed/comms_mpi_hostbuffer_stream.h" #include "distributed/distributed_arranger.h" #include "distributed/distributed_io.h" #include "resources.h" #include "matrix_distribution.h" #include <amgx_timer.h> #include "util.h" #include "reorder_partition.h" #include <algorithm> #include <solvers/solver.h> #include <matrix.h> #include <vector.h> #include <thrust_wrapper.h> #include "amgx_types/util.h" #include "amgx_types/rand.h" #include "amgx_c_wrappers.inl" #include "amgx_c_common.h" #include "multiply.h" namespace amgx { AMGX_RC getResourcesFromSolverHandle(AMGX_solver_handle slv, Resources **resources) { AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ *resources = get_mode_object_from<CASE,AMG_Solver,AMGX_solver_handle>(slv)->getResources();\ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, NULL); } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL) return AMGX_RC_OK; } AMGX_RC getResourcesFromMatrixHandle(AMGX_matrix_handle mtx, Resources **resources) { AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ *resources = get_mode_object_from<CASE,Matrix,AMGX_matrix_handle>(mtx)->getResources(); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, NULL); } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL) return AMGX_RC_OK; } AMGX_RC getResourcesFromVectorHandle(AMGX_vector_handle vec, Resources **resources) { AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ *resources = get_mode_object_from<CASE,Vector,AMGX_vector_handle>(vec)->getResources(); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, NULL); } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL) return AMGX_RC_OK; } } namespace amgx { //function object (functor) for thrust calls (it is a binary subtraction operator) template<typename T> class subtract_op { public: subtract_op() {} __host__ __device__ T operator()(const T &x, const T &y) const { return y - x; } }; //function object (functor) for thrust calls (it is a unary operator to add a constant) template<typename T> class add_constant_op { const T c; public: add_constant_op(T _c) : c(_c) {} __host__ __device__ T operator()(const T &x) const { return x + c; } }; #ifdef AMGX_WITH_MPI template<class TConfig> int create_part_offsets(int &root, int &rank, MPI_Comm &mpicm, Matrix<TConfig> *nv_mtx) { /* WARNING: Notice that part_offsets_h & part_offsets have type int64_t. Therefore we need to use MPI_INT64_T (or MPI_LONG_LONG) in MPI_Allgather. Also, we need the send & recv buffers to be of the same type, therefore we will create a temporary variable n64 of the correct type below. */ //create TConfig64, which is the same as TConfig, but with index type being int64_t typedef typename TConfig::template setVecPrec<AMGX_vecInt64>::Type TConfig64; typedef typename TConfig64::VecPrec t_VecPrec; //t_VecPrec = int64_t int n, offset, mpist; int nranks = 0; //nv_mtx->manager->get_num_partitions(); if (nv_mtx->manager != NULL) { //some initializations nv_mtx->getOffsetAndSizeForView(OWNED, &offset, &n); MPI_Comm_size(mpicm, &nranks); nv_mtx->manager->part_offsets_h.resize(nranks + 1); //printf("[%d,%d]: n=%d\n",rank,nranks,n); //gather the number of rows per partition on the host (on all ranks) t_VecPrec n64 = n; nv_mtx->manager->part_offsets_h[0] = 0; //first element is zero (the # of rows is gathered afterwards) if (typeid(t_VecPrec) == typeid(int64_t)) { mpist = MPI_Allgather(&n64, 1, MPI_INT64_T, nv_mtx->manager->part_offsets_h.raw() + 1, 1, MPI_INT64_T, mpicm); } else { FatalError("MPI_Gatherv of the vector has failed - incorrect vector data type", AMGX_ERR_CORE); } if (mpist != MPI_SUCCESS) { FatalError("MPI_Gatherv of the vector has failed - detected incorrect MPI return code", AMGX_ERR_CORE); } //perform a prefix sum thrust::inclusive_scan(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.end(), nv_mtx->manager->part_offsets_h.begin()); //create the corresponding array on device (this is important) nv_mtx->manager->part_offsets.resize(nranks + 1); thrust::copy(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.end(), nv_mtx->manager->part_offsets.begin()); } return 0; } template<class TConfig> int construct_global_matrix(int &root, int &rank, Matrix<TConfig> *nv_mtx, Matrix<TConfig> &gA, int &partition_vector_size, const int *partition_vector) { typedef typename TConfig::IndPrec t_IndPrec; typedef typename TConfig::MatPrec t_MatPrec; int n, nnz, offset, l, k, i; int start, end, shift; int mpist; MPI_Comm mpicm; //MPI call parameters t_IndPrec *rc_ptr, *di_ptr; t_IndPrec *hli_ptr, *hgi_ptr; t_MatPrec *hlv_ptr, *hgv_ptr; thrust::host_vector<t_IndPrec> rc; thrust::host_vector<t_IndPrec> di; //unpacked local matrix on the device and host device_vector_alloc<t_IndPrec> Bp; device_vector_alloc<t_IndPrec> Bi; device_vector_alloc<t_MatPrec> Bv; thrust::host_vector<t_IndPrec> hBp; thrust::host_vector<t_IndPrec> hBi; thrust::host_vector<t_MatPrec> hBv; //constructed global matrix on the host thrust::host_vector<t_IndPrec> hAp; thrust::host_vector<t_IndPrec> hAi; thrust::host_vector<t_MatPrec> hAv; //WARNING: this routine currently supports matrix only with block size =1 (it can be generalized in the future, though) //initialize the defaults root = 0; rank = 0; mpist = MPI_SUCCESS; mpicm = MPI_COMM_WORLD; if (nv_mtx->manager != NULL) { // some initializations rank = nv_mtx->manager->global_id(); if (nv_mtx->manager->getComms() != NULL) { mpicm = *(nv_mtx->getResources()->getMpiComm()); } nv_mtx->getOffsetAndSizeForView(OWNED, &offset, &n); nv_mtx->getNnzForView(OWNED, &nnz); if (nv_mtx->manager->part_offsets_h.size() == 0) // create part_offsets_h & part_offsets { create_part_offsets(root, rank, mpicm, nv_mtx); // (if needed for aggregation path) } l = nv_mtx->manager->part_offsets_h.size() - 1; // number of partitions k = nv_mtx->manager->part_offsets_h[l]; // global number of rows //some allocations/resizing Bp.resize(n + 1); Bi.resize(nnz); Bv.resize(nnz); hBp.resize(n + 1); hBi.resize(nnz); hBv.resize(nnz); if (rank == root) { hAp.resize(k + 1); // extra +1 is needed because row_offsets have one extra element at the end //hAi.resize(global nnz); //not known yet //hAv.resize(global nnz); //not known yet rc.resize(l); di.resize(l); } cudaCheckError(); //--- unpack the matrix --- nv_mtx->manager->unpack_partition(thrust::raw_pointer_cast(Bp.data()), thrust::raw_pointer_cast(Bi.data()), thrust::raw_pointer_cast(Bv.data())); cudaCheckError(); //copy to host (should be able to optimize this out later on) hBp = Bp; hBi = Bi; hBv = Bv; cudaCheckError(); // --- construct global matrix --- //Step 1. construct global row pointers //compute recvcounts and displacements for MPI_Gatherv if (rank == root) { thrust::transform(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.end() - 1, nv_mtx->manager->part_offsets_h.begin() + 1, rc.begin(), subtract_op<t_IndPrec>()); cudaCheckError(); //thrust::copy(nv_mtx->manager->part_offsets_h.begin(),nv_mtx->manager->part_offsets_h.end(),di.begin()); thrust::transform(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.begin() + l, di.begin(), add_constant_op<t_IndPrec>(1)); cudaCheckError(); } //alias raw pointers to thrust vector data (see thrust example unwrap_pointer for details) rc_ptr = thrust::raw_pointer_cast(rc.data()); di_ptr = thrust::raw_pointer_cast(di.data()); hli_ptr = thrust::raw_pointer_cast(hBp.data()); hgi_ptr = thrust::raw_pointer_cast(hAp.data()); cudaCheckError(); //gather (on the host) if (typeid(t_IndPrec) == typeid(int)) { mpist = MPI_Gatherv(hli_ptr + 1, n, MPI_INT, hgi_ptr, rc_ptr, di_ptr, MPI_INT, root, mpicm); } else { FatalError("MPI_Gatherv of the vector has failed - incorrect vector data type", AMGX_ERR_CORE); } if (mpist != MPI_SUCCESS) { FatalError("MPI_Gatherv of the vector has failed - detected incorrect MPI return code", AMGX_ERR_CORE); } //Step 2. adjust row pointers, construct global column indices and values (recvcounts and displacements were computed above) if (rank == root) { //adjust global row pointers and setup the recvcounts & displacements for subsequent MPI calls for (i = 0; i < l; i++) { start = nv_mtx->manager->part_offsets_h[i]; end = nv_mtx->manager->part_offsets_h[i + 1]; shift = hAp[start]; //if (rank == 0) printf("# %d %d %d\n",start,end,shift); thrust::transform(hAp.begin() + start + 1, hAp.begin() + end + 1, hAp.begin() + start + 1, add_constant_op<t_IndPrec>(shift)); cudaCheckError(); di[i] = shift; rc[i] = hAp[end] - hAp[start]; //if (rank == 0) printf("& %d %d %d\n",hAp[start],hAp[end],hAp[end]-hAp[start]); } //some allocations/resizing hAi.resize(hAp[k]); //now we know global nnz and can allocate storage hAv.resize(hAp[k]); //now we know global nnz and can allocate storage } //alias raw pointers to thrust vector data (see thrust example unwrap_pointer for details) rc_ptr = thrust::raw_pointer_cast(rc.data()); di_ptr = thrust::raw_pointer_cast(di.data()); hli_ptr = thrust::raw_pointer_cast(hBi.data()); hgi_ptr = thrust::raw_pointer_cast(hAi.data()); hlv_ptr = thrust::raw_pointer_cast(hBv.data()); hgv_ptr = thrust::raw_pointer_cast(hAv.data()); cudaCheckError(); //gather (on the host) //columns indices if (typeid(t_IndPrec) == typeid(int)) { mpist = MPI_Gatherv(hli_ptr, nnz, MPI_INT, hgi_ptr, rc_ptr, di_ptr, MPI_INT, root, mpicm); } else { FatalError("MPI_Gatherv of the vector has failed - incorrect vector data type", AMGX_ERR_CORE); } if (mpist != MPI_SUCCESS) { FatalError("MPI_Gatherv of the vector has failed - detected incorrect MPI return code", AMGX_ERR_CORE); } //values if (typeid(t_MatPrec) == typeid(float)) { mpist = MPI_Gatherv(hlv_ptr, nnz, MPI_FLOAT, hgv_ptr, rc_ptr, di_ptr, MPI_FLOAT, root, mpicm); } else if (typeid(t_MatPrec) == typeid(double)) { mpist = MPI_Gatherv(hlv_ptr, nnz, MPI_DOUBLE, hgv_ptr, rc_ptr, di_ptr, MPI_DOUBLE, root, mpicm); } else { FatalError("MPI_Gatherv of the vector has failed - incorrect vector data type", AMGX_ERR_CORE); } if (mpist != MPI_SUCCESS) { FatalError("MPI_Gatherv of the vector has failed - detected incorrect MPI return code", AMGX_ERR_CORE); } if (rank == root) { if (partition_vector != NULL) { //sanity check if (partition_vector_size != (hAp.size() - 1)) { FatalError("partition_vector_size does not match the global vector size", AMGX_ERR_CORE); } //construct a map (based on partition vector) int i, j, nranks; MPI_Comm_size(mpicm, &nranks); thrust::host_vector<t_IndPrec> c(nranks, 0); thrust::host_vector<t_IndPrec> map(hAp.size() - 1); thrust::host_vector<t_IndPrec> imap(hAp.size() - 1); for (i = 0; i < (hAp.size() - 1); i++) { j = partition_vector[i]; map[i] = nv_mtx->manager->part_offsets_h[j] + c[j]; imap[map[i]] = i; c[j]++; } //permute rows according to map during copy (host -> host or device depending on vector type) hBp.resize(hAp.size()); hBi.resize(hAi.size()); hBv.resize(hAv.size()); reorder_partition_host<t_IndPrec, t_MatPrec, true, true> (hAp.size() - 1, hAi.size(), thrust::raw_pointer_cast(hAp.data()), thrust::raw_pointer_cast(hAi.data()), thrust::raw_pointer_cast(hAv.data()), thrust::raw_pointer_cast(hBp.data()), thrust::raw_pointer_cast(hBi.data()), thrust::raw_pointer_cast(hBv.data()), imap.size(), thrust::raw_pointer_cast(imap.data())); cudaCheckError(); gA.addProps(CSR); //need to add this property, so that row_offsets, col_indices & values are resized appropriately in the next call gA.resize(hBp.size() - 1, hBp.size() - 1, hBi.size()); thrust::copy(hBp.begin(), hBp.end(), gA.row_offsets.begin()); thrust::copy(hBi.begin(), hBi.end(), gA.col_indices.begin()); thrust::copy(hBv.begin(), hBv.end(), gA.values.begin()); cudaCheckError(); } else { //copy (host -> host or device depending on matrix type) gA.addProps(CSR); //need to add this property, so that row_offsets, col_indices & values are resized appropriately in the next call gA.resize(hAp.size() - 1, hAp.size() - 1, hAi.size()); thrust::copy(hAp.begin(), hAp.end(), gA.row_offsets.begin()); thrust::copy(hAi.begin(), hAi.end(), gA.col_indices.begin()); thrust::copy(hAv.begin(), hAv.end(), gA.values.begin()); cudaCheckError(); } } } else { /* ASSUMPTION: when manager has not been allocated you are running on a single rank */ gA.addProps(CSR); //need to add this property, so that row_offsets, col_indices & values are resized appropriately in the next call gA.resize(nv_mtx->row_offsets.size() - 1, nv_mtx->row_offsets.size() - 1, nv_mtx->col_indices.size()); thrust::copy(nv_mtx->row_offsets.begin(), nv_mtx->row_offsets.end(), gA.row_offsets.begin()); thrust::copy(nv_mtx->col_indices.begin(), nv_mtx->col_indices.end(), gA.col_indices.begin()); thrust::copy(nv_mtx->values.begin(), nv_mtx->values.end(), gA.values.begin()); cudaCheckError(); } return 0; } template<class TConfig> int construct_global_vector(int &root, int &rank, Matrix<TConfig> *nv_mtx, Vector<TConfig> *nv_vec, Vector<TConfig> &gvec, int &partition_vector_size, const int *partition_vector) { typedef typename TConfig::IndPrec t_IndPrec; typedef typename TConfig::VecPrec t_VecPrec; int n, nnz, offset, l; int mpist; MPI_Comm mpicm; //MPI call parameters t_IndPrec *rc_ptr, *di_ptr; t_VecPrec *hv_ptr, *hg_ptr; thrust::host_vector<t_IndPrec> rc; thrust::host_vector<t_IndPrec> di; //unreordered local vector on the host thrust::host_vector<t_VecPrec> hv; //constructed global vector on the host thrust::host_vector<t_VecPrec> hg; //WARNING: this routine currently supports vectors only with block size =1 (it can be generalized in the future, though) //initialize the defaults root = 0; rank = 0; mpist = MPI_SUCCESS; mpicm = MPI_COMM_WORLD; if (nv_mtx->manager != NULL) { // some initializations rank = nv_mtx->manager->global_id(); if (nv_mtx->manager->getComms() != NULL) { mpicm = *(nv_mtx->getResources()->getMpiComm()); } nv_mtx->getOffsetAndSizeForView(OWNED, &offset, &n); nv_mtx->getNnzForView(OWNED, &nnz); if (nv_mtx->manager->part_offsets_h.size() == 0) // create part_offsets_h & part_offsets { create_part_offsets(root, rank, mpicm, nv_mtx); // (if needed for aggregation path) } l = nv_mtx->manager->part_offsets_h.size() - 1; // number of partitions //some allocations/resizing hv.resize(nv_vec->size()); // host copy of nv_vec if (rank == root) { hg.resize(nv_mtx->manager->part_offsets_h[l]); // host copy of gvec rc.resize(l); di.resize(l); } cudaCheckError(); //--- unreorder the vector back (just like you did with the matrix, but only need to undo the interior-boundary reordering, because others do not apply) --- //Approach 1: just copy the vector (host or device depending on vector type -> host) //thrust::copy(nv_vec->begin(),nv_vec->end(),hv.begin()); //Approach 2: unreorder and copy the vector thrust::copy(thrust::make_permutation_iterator(nv_vec->begin(), nv_vec->getManager()->inverse_renumbering.begin() ), thrust::make_permutation_iterator(nv_vec->begin(), nv_vec->getManager()->inverse_renumbering.begin() + n), hv.begin()); cudaCheckError(); // --- construct global vector (rhs/sol) --- //compute recvcounts and displacements for MPI_Gatherv if (rank == root) { thrust::transform(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.end() - 1, nv_mtx->manager->part_offsets_h.begin() + 1, rc.begin(), subtract_op<t_IndPrec>()); cudaCheckError(); thrust::copy(nv_mtx->manager->part_offsets_h.begin(), nv_mtx->manager->part_offsets_h.begin() + l, di.begin()); cudaCheckError(); } //alias raw pointers to thrust vector data (see thrust example unwrap_pointer for details) rc_ptr = thrust::raw_pointer_cast(rc.data()); di_ptr = thrust::raw_pointer_cast(di.data()); hv_ptr = thrust::raw_pointer_cast(hv.data()); hg_ptr = thrust::raw_pointer_cast(hg.data()); cudaCheckError(); //gather (on the host) if (typeid(t_VecPrec) == typeid(float)) { mpist = MPI_Gatherv(hv_ptr, n, MPI_FLOAT, hg_ptr, rc_ptr, di_ptr, MPI_FLOAT, root, mpicm); } else if (typeid(t_VecPrec) == typeid(double)) { mpist = MPI_Gatherv(hv_ptr, n, MPI_DOUBLE, hg_ptr, rc_ptr, di_ptr, MPI_DOUBLE, root, mpicm); } else { FatalError("MPI_Gatherv of the vector has failed - incorrect vector data type", AMGX_ERR_CORE); } if (mpist != MPI_SUCCESS) { FatalError("MPI_Gatherv of the vector has failed - detected incorrect MPI return code", AMGX_ERR_CORE); } if (rank == root) { if (partition_vector != NULL) { //sanity check if (partition_vector_size != hg.size()) { FatalError("partition_vector_size does not match the global vector size", AMGX_ERR_CORE); } //construct a map (based on partition vector) int i, j, nranks; MPI_Comm_size(mpicm, &nranks); thrust::host_vector<t_IndPrec> c(nranks, 0); thrust::host_vector<t_IndPrec> map(hg.size()); thrust::host_vector<t_IndPrec> imap(hg.size()); for (i = 0; i < hg.size(); i++) { j = partition_vector[i]; map[i] = nv_mtx->manager->part_offsets_h[j] + c[j]; imap[map[i]] = i; c[j]++; } //permute according to map during copy (host -> host or device depending on vector type) gvec.resize(hg.size()); thrust::copy(thrust::make_permutation_iterator(hg.begin(), imap.begin()), thrust::make_permutation_iterator(hg.begin(), imap.end()), gvec.begin()); cudaCheckError(); } else { //copy (host -> host or device depending on vector type) gvec.resize(hg.size()); thrust::copy(hg.begin(), hg.end(), gvec.begin()); cudaCheckError(); } } } else { /* ASSUMPTION: when manager has not been allocated you are running on a single rank */ gvec.resize(nv_vec->size()); thrust::copy(nv_vec->begin(), nv_vec->end(), gvec.begin()); cudaCheckError(); } return 0; } #endif typedef CWrapHandle<AMGX_config_handle, AMG_Configuration> ConfigW; typedef CWrapHandle<AMGX_resources_handle, Resources> ResourceW; typedef CWrapHandle<AMGX_distribution_handle, MatrixDistribution> MatrixDistributionW; namespace { template<AMGX_Mode CASE, template<typename> class SolverType, template<typename> class MatrixType> inline AMGX_ERROR set_solver_with(AMGX_solver_handle slv, AMGX_matrix_handle mtx, Resources *resources, AMGX_ERROR (SolverType<typename TemplateMode<CASE>::Type>::*memf)(MatrixType<typename TemplateMode<CASE>::Type> &)) { typedef SolverType<typename TemplateMode<CASE>::Type> SolverLetterT; typedef CWrapHandle<AMGX_solver_handle, SolverLetterT> SolverW; typedef MatrixType<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); SolverW wrapSolver(slv); SolverLetterT &solver = *wrapSolver.wrapped(); if (wrapA.mode() != wrapSolver.mode() ) { FatalError("Error: mismatch between Matrix mode and Solver Mode.\n", AMGX_ERR_BAD_PARAMETERS); } if (A.getResources() != solver.getResources()) { FatalError("Error: matrix and solver use different resources object, exiting", AMGX_ERR_BAD_PARAMETERS); } cudaSetDevice(solver.getResources()->getDevice(0)); return (solver.*memf)(A); } template<AMGX_Mode CASE, template<typename> class SolverType, template<typename> class MatrixType> inline AMGX_ERROR set_solver_with_shared(AMGX_solver_handle slv, AMGX_matrix_handle mtx, Resources *resources, AMGX_ERROR (SolverType<typename TemplateMode<CASE>::Type>::*memf)(std::shared_ptr<MatrixType<typename TemplateMode<CASE>::Type>>)) { typedef SolverType<typename TemplateMode<CASE>::Type> SolverLetterT; typedef CWrapHandle<AMGX_solver_handle, SolverLetterT> SolverW; typedef MatrixType<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); SolverW wrapSolver(slv); SolverLetterT &solver = *wrapSolver.wrapped(); if (wrapA.mode() != wrapSolver.mode() ) { FatalError("Error: mismatch between Matrix mode and Solver Mode.\n", AMGX_ERR_BAD_PARAMETERS); } if (A.getResources() != solver.getResources()) { FatalError("Error: matrix and solver use different resources object, exiting", AMGX_ERR_BAD_PARAMETERS); } cudaSetDevice(solver.getResources()->getDevice(0)); return (solver.*memf)(wrapA.wrapped()); } template<AMGX_Mode CASE, template<typename> class SolverType, template<typename> class VectorType> inline AMGX_ERROR solve_with(AMGX_solver_handle slv, AMGX_vector_handle rhs, AMGX_vector_handle sol, Resources *resources, bool xIsZero = false) { typedef SolverType<typename TemplateMode<CASE>::Type> SolverLetterT; typedef CWrapHandle<AMGX_solver_handle, SolverLetterT> SolverW; typedef VectorType<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; SolverW wrapSolver(slv); SolverLetterT &solver = *wrapSolver.wrapped(); //AMGX_STATUS& slv_stat = wrapSolver.last_solve_status(slv); VectorW wrapRhs(rhs); VectorLetterT &b = *wrapRhs.wrapped(); VectorW wrapSol(sol); VectorLetterT &x = *wrapSol.wrapped(); if (wrapRhs.mode() != wrapSolver.mode()) { FatalError("Error: mismatch between RHS mode and Solver Mode.\n", AMGX_ERR_BAD_PARAMETERS); } if (wrapRhs.mode() != wrapSol.mode()) { FatalError("Error: mismatch between RHS mode and Sol Mode.\n", AMGX_ERR_BAD_PARAMETERS); } if ((b.getResources() != solver.getResources()) || (x.getResources() != solver.getResources())) { FatalError("Error: Inconsistency between solver and rhs/sol resources object, exiting", AMGX_ERR_BAD_PARAMETERS); } cudaSetDevice(solver.getResources()->getDevice(0)); AMGX_ERROR ret = solver.solve(b, x, wrapSolver.last_solve_status(), xIsZero); return ret; } template<AMGX_Mode CASE, template<typename> class MatrixType, template<typename> class VectorType> inline AMGX_ERROR matrix_vector_multiply(AMGX_matrix_handle mtx, AMGX_vector_handle x, AMGX_vector_handle rhs, Resources *resources) { typedef MatrixType<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); typedef VectorType<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; //AMGX_STATUS& slv_stat = wrapSolver.last_solve_status(slv); VectorW wrapRhs(rhs); VectorLetterT &v_rhs = *wrapRhs.wrapped(); VectorW wrapX(x); VectorLetterT &v_x = *wrapX.wrapped(); if (wrapX.mode() != wrapA.mode()) { FatalError("Error: mismatch between vector x mode and matrix mode.\n", AMGX_ERR_BAD_PARAMETERS); } if (wrapX.mode() != wrapRhs.mode()) { FatalError("Error: mismatch between vector y mode and vector x mode.\n", AMGX_ERR_BAD_PARAMETERS); } if ((A.getResources() != v_rhs.getResources()) || (A.getResources() != v_x.getResources())) { FatalError("Error: Inconsistency between matrix and vectors resources object, exiting", AMGX_ERR_BAD_PARAMETERS); } cudaSetDevice(resources->getDevice(0)); // latency hiding disable /*if (A.getManager() != NULL) { A.manager->exchange_halo_wait(v_x, v_x.tag); v_x.dirtybit = 0; }*/ multiply(A, v_x, v_rhs); return AMGX_OK; } template<AMGX_Mode CASE, template<typename> class MatrixType, template<typename> class VectorType, template<typename> class SolverType> inline AMGX_ERROR solver_calculate_residual_norm( AMGX_solver_handle slv, AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle x, Resources *resources, void *norm_data) { typedef MatrixType<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef VectorType<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef SolverType<typename TemplateMode<CASE>::Type> SolverLetterT; typedef CWrapHandle<AMGX_solver_handle, SolverLetterT> SolverW; SolverW wrapSolver(slv); SolverLetterT &solver = *wrapSolver.wrapped(); MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); VectorW wrapRhs(rhs); VectorLetterT &v_rhs = *wrapRhs.wrapped(); VectorW wrapX(x); VectorLetterT &v_x = *wrapX.wrapped(); if (wrapX.mode() != wrapA.mode()) { FatalError("Error: mismatch between vector x mode and matrix mode.\n", AMGX_ERR_BAD_PARAMETERS); } if (wrapX.mode() != wrapRhs.mode()) { FatalError("Error: mismatch between vector y mode and vector x mode.\n", AMGX_ERR_BAD_PARAMETERS); } if ((A.getResources() != v_rhs.getResources()) || (A.getResources() != v_x.getResources())) { FatalError("Error: Inconsistency between matrix and vectors resources object, exiting", AMGX_ERR_BAD_PARAMETERS); } cudaSetDevice(resources->getDevice(0)); solver.getSolverObject()->compute_residual_norm_external(A, v_rhs, v_x, (typename amgx::types::PODTypes<typename VectorLetterT::value_type>::type *)norm_data); return AMGX_OK; } template<AMGX_Mode CASE> inline AMGX_RC matrix_upload_all(AMGX_matrix_handle mtx, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag_data, Resources *resources) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); if (!wrapA.is_valid() || n < 1 || nnz < 0 || block_dimx < 1 || block_dimy < 1) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) //FatalError("Error: Failure in matrix_upload_all().\n", AMGX_ERR_BAD_PARAMETERS); typedef typename MatPrecisionMap<AMGX_GET_MODE_VAL(AMGX_MatPrecision, CASE)>::Type ValueType; A.set_initialized(0); cudaSetDevice(A.getResources()->getDevice(0)); A.addProps(CSR); A.setColsReorderedByColor(false); A.delProps(COO); A.delProps(DIAG); if (diag_data) { A.addProps(DIAG); } /*If manager doesn't exist (single GPU), then upload matrix, otherwise call manager*/ if (A.manager == NULL) { int _t = A.resize(n, n, nnz, block_dimx, block_dimy); cudaMemcpy(A.row_offsets.raw(), row_ptrs, sizeof(int) * (n + 1), cudaMemcpyDefault); cudaMemcpy(A.col_indices.raw(), col_indices, sizeof(int) * nnz, cudaMemcpyDefault); cudaMemcpy(A.values.raw(), data, sizeof(ValueType) * nnz * block_dimx * block_dimy, cudaMemcpyDefault); if (diag_data) { cudaMemcpy(A.values.raw() + A.diagOffset()*A.get_block_size(), diag_data, sizeof(ValueType) * n * block_dimx * block_dimy, cudaMemcpyDefault); } else { A.computeDiagonal(); } cudaCheckError(); } else { A.manager->uploadMatrix(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag_data, A); } /* if (A.manager != NULL) A.manager->printToFile("M_clf_ua",""); */ /* A.printToFile("A_clf_ua","",-1,-1); */ A.set_initialized(1); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC matrix_replace_coefficients(AMGX_matrix_handle mtx, int n, int nnz, const void *data, const void *diag_data, Resources *resources) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); cudaSetDevice(A.getResources()->getDevice(0)); typedef typename MatPrecisionMap<AMGX_GET_MODE_VAL(AMGX_MatPrecision, CASE)>::Type ValueType; if (A.manager != NULL && (A.manager->isFineLevelConsolidated() && A.manager->getFineLevelComms()->halo_coloring != LAST || !A.manager->isFineLevelConsolidated() && A.manager->getComms()->halo_coloring != LAST) ) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } else if (A.manager != NULL && (A.manager->isFineLevelConsolidated() || A.manager->isFineLevelGlued())) { A.manager->replaceMatrixCoefficientsWithCons(n, nnz, (const ValueType *)data, (const ValueType *)diag_data); } else if (A.manager != NULL && !A.is_matrix_singleGPU()) { A.manager->replaceMatrixCoefficientsNoCons(n, nnz, (const ValueType *)data, (const ValueType *)diag_data); } else { if (n != A.get_num_rows()) { std::string err = "Data passed to replace_coefficients doesn't correspond matrix object"; amgx_output(err.c_str(), err.length()); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) } if (data) { cudaMemcpy(A.values.raw(), (ValueType *)data, sizeof(ValueType) * (nnz * A.get_block_size()), cudaMemcpyDefault); } if (diag_data) { cudaMemcpy(A.values.raw() + nnz * A.get_block_size(), (ValueType *)diag_data, sizeof(ValueType) * (n * A.get_block_size()), cudaMemcpyDefault); } cudaCheckError(); } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline void matrix_attach_geometry(AMGX_matrix_handle mtx, double *geox, double *geoy, double *geoz, int n, int dimension) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef typename TemplateMode<CASE>::Type TConfig; typedef TemplateConfig<AMGX_host, TConfig::vecPrec, TConfig::matPrec, TConfig::indPrec> TConfig_h; typedef typename Matrix<TConfig_h>::MVector Vector_h; typedef typename Matrix<TConfig>::MVector VVector; MatrixLetterT *obj = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); cudaSetDevice(obj->getResources()->getDevice(0)); Vector_h hgeo_x, hgeo_y, hgeo_z; VVector *geo_x = new VVector; VVector *geo_y = new VVector; hgeo_x.resize(n); hgeo_y.resize(n); if (dimension == 3) { VVector *geo_z = new VVector; hgeo_z.resize(n); for (int i = 0; i < n; i++) { hgeo_x[i] = geox[i]; hgeo_y[i] = geoy[i]; hgeo_z[i] = geoz[i]; } *geo_z = hgeo_z; obj->template setParameterPtr< VVector >("geo.z", geo_z); } else if (dimension == 2) { for (int i = 0; i < n; i++) { hgeo_x[i] = geox[i]; hgeo_y[i] = geoy[i]; } } *geo_y = hgeo_y; *geo_x = hgeo_x; obj->template setParameter<int>("dim", dimension); obj->template setParameter<int>("geo_size", n); obj->template setParameterPtr< VVector >("geo.x", geo_x); obj->template setParameterPtr< VVector >("geo.y", geo_y); } template<AMGX_Mode CASE> inline void matrix_attach_coloring(AMGX_matrix_handle mtx, int *row_coloring, int num_rows, int num_colors) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef typename TemplateMode<CASE>::Type TConfig; typedef TemplateConfig<AMGX_host, TConfig::vecPrec, TConfig::matPrec, TConfig::indPrec> TConfig_h; typedef typename Matrix<TConfig_h>::IVector IVector_h; MatrixLetterT *obj = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); cudaSetDevice(obj->getResources()->getDevice(0)); IVector_h *row_colors = new IVector_h; row_colors->resize(num_rows); for (int i = 0; i < num_rows; i++) { (*row_colors)[i] = row_coloring[i]; } obj->template setParameter<int>("coloring_size", num_rows); obj->template setParameter<int>("colors_num", num_colors); obj->template setParameterPtr< IVector_h >("coloring", row_colors); } template<AMGX_Mode CASE> inline AMGX_RC matrix_sort(AMGX_matrix_handle mtx) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; MatrixLetterT &A = *get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); cudaSetDevice(A.getResources()->getDevice(0)); if (A.get_block_size() == 1) { A.sortByRowAndColumn(); return AMGX_RC_OK; } else { return AMGX_RC_NOT_SUPPORTED_BLOCKSIZE; } } template<AMGX_Mode CASE> inline AMGX_RC vector_upload(AMGX_vector_handle vec, int n, int block_dim, const void *data) { typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename VecPrecisionMap<AMGX_GET_MODE_VAL(AMGX_VecPrecision, CASE)>::Type ValueTypeB; VectorW wrapV(vec); VectorLetterT &v = *wrapV.wrapped(); cudaSetDevice(v.getResources()->getDevice(0)); v.set_block_dimx(1); v.set_block_dimy(block_dim); if (v.getManager() != NULL) { v.dirtybit = 1; } if (v.is_transformed()) { v.unset_transformed(); } if (v.getManager() != NULL && !v.is_transformed()) { v.getManager()->transformAndUploadVector(v, data, n, block_dim); } else { v.resize(n * block_dim); cudaMemcpy(v.raw(), data, sizeof(ValueTypeB) * n * block_dim, cudaMemcpyDefault); cudaCheckError(); } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC vector_set_zero(AMGX_vector_handle vec, int n, int block_dim, Resources *resources) { typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename VecPrecisionMap<AMGX_GET_MODE_VAL(AMGX_VecPrecision, CASE)>::Type ValueTypeB; VectorW wrapV(vec); VectorLetterT &v = *wrapV.wrapped(); if (!wrapV.is_valid() || n < 0 || block_dim < 1) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) cudaSetDevice(v.getResources()->getDevice(0)); v.resize(n * block_dim); v.set_block_dimy(block_dim); thrust::fill(v.begin(), v.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC vector_set_random(AMGX_vector_handle vec, int n, Resources *resources) { typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename VecPrecisionMap<AMGX_GET_MODE_VAL(AMGX_VecPrecision, CASE)>::Type ValueTypeB; VectorW wrapV(vec); VectorLetterT &v = *wrapV.wrapped(); if (!wrapV.is_valid() || n < 0) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) cudaSetDevice(v.getResources()->getDevice(0)); Vector<typename VectorLetterT::TConfig_h> t_vec(n); for (int i = 0; i < n; ++i) { t_vec[i] = types::get_rand<ValueTypeB>(); } v = t_vec; cudaCheckError(); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC vector_download_impl(const AMGX_vector_handle vec, void *data) { typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename VecPrecisionMap<AMGX_GET_MODE_VAL(AMGX_VecPrecision, CASE)>::Type ValueTypeB; VectorW wrapV(vec); VectorLetterT &v = *wrapV.wrapped(); /*if (!wrapV.is_valid() || n < 0 || block_dim < 1) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources)*/ cudaSetDevice(v.getResources()->getDevice(0)); if (v.getManager() != NULL) { int n, nnz; int block_dimy = v.get_block_dimy(); v.getManager()->getView(OWNED, n, nnz); if (v.is_transformed() || v.getManager()->isFineLevelGlued()) { v.getManager()->revertAndDownloadVector(v, data, n, block_dimy); } else { cudaMemcpy((ValueTypeB *)data, v.raw(), n * block_dimy * sizeof(ValueTypeB), cudaMemcpyDefault); cudaCheckError(); } } else { cudaMemcpy((ValueTypeB *)data, v.raw(), v.size() * sizeof(ValueTypeB), cudaMemcpyDefault); cudaCheckError(); } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC vector_get_size(AMGX_vector_handle vec, int *n, int *block_dim) { typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename VecPrecisionMap<AMGX_GET_MODE_VAL(AMGX_VecPrecision, CASE)>::Type ValueTypeB; VectorW wrapV(vec); VectorLetterT &v = *wrapV.wrapped(); //if (!wrapV.is_valid()) // AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) if (v.getManager() != NULL && (v.getManager()->isFineLevelConsolidated() || v.getManager()->isFineLevelGlued() ) ) { *n = v.get_unconsolidated_size() / v.get_block_dimy(); } else { *n = v.size() / v.get_block_dimy(); } *block_dim = v.get_block_dimy(); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC read_system(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, unsigned int props, int block_convert, AMG_Config &amgx_cfg, AMGX_ERROR &read_error) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef MatrixIO<TConfig> MatrixIOLetterT; typedef Vector<TConfig> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename TConfig::template setMemSpace<AMGX_host>::Type TConfig_h; typedef Matrix<TConfig_h> MatrixLetterT0; typedef MatrixIO<TConfig_h> MatrixIOLetterT0; typedef Vector<TConfig_h> VectorLetterT0; MatrixLetterT *mtx_ptr = NULL; VectorLetterT *rhs_ptr = NULL; VectorLetterT *sol_ptr = NULL; //if ((mtx == NULL) || (rhs == NULL) || (sol == NULL)) if (mtx != NULL) { mtx_ptr = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); } else { return AMGX_RC_BAD_PARAMETERS; } if (rhs != NULL) { rhs_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(rhs); } if (sol != NULL) { sol_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(sol); } //typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_value_type_h; //typedef typename Vector<ivec_value_type_h> IVector_h; switch (AMGX_GET_MODE_VAL(AMGX_MemorySpace, CASE)) { case (AMGX_device) : { MatrixLetterT0 Ah; MatrixLetterT0 Ahc; Ah.setResources(mtx_ptr->getResources()); Ahc.setResources(mtx_ptr->getResources()); VectorLetterT0 bh; VectorLetterT0 xh; read_error = MatrixIOLetterT0::readSystem(filename, Ah, bh, xh, amgx_cfg, props); if (mtx != NULL) { if (block_convert != 0) { Ahc.convert(Ah, Ah.getProps(), block_convert, block_convert); Ah.set_initialized(0); Ah = Ahc; Ah.set_initialized(1); if (rhs != NULL) { bh.set_block_dimy(block_convert); } if (sol != NULL) { xh.set_block_dimy(block_convert); } } mtx_ptr->set_initialized(0); *mtx_ptr = Ah; mtx_ptr->set_initialized(1); } if (rhs != NULL) { *rhs_ptr = bh; } if (sol != NULL) { *sol_ptr = xh; } } break; case (AMGX_host) : { std::shared_ptr<MatrixLetterT> Ah; std::shared_ptr<VectorLetterT> bh; std::shared_ptr<VectorLetterT> xh; if (mtx == NULL) { Ah.reset(new MatrixLetterT()); } else { MatrixW wMtx(mtx); Ah = wMtx.wrapped(); } if (rhs == NULL) { bh.reset(new VectorLetterT()); } else { VectorW wRhs(rhs); bh = wRhs.wrapped(); } if (sol == NULL) { xh.reset(new VectorLetterT()); } else { VectorW wSol(sol); xh = wSol.wrapped(); } read_error = MatrixIOLetterT::readSystem((char *)filename, *Ah, *bh, *xh, amgx_cfg, props); if (block_convert != 0) { if (mtx != NULL) { MatrixLetterT0 Ahc; Ahc.convert(*Ah, Ah->getProps(), block_convert, block_convert); Ah->set_initialized(0); *Ah = Ahc; Ah->set_initialized(1); } if (rhs != NULL) { bh->set_block_dimy(block_convert); } if (sol != NULL) { xh->set_block_dimy(block_convert); } } } break; } return AMGX_RC_OK; } #ifdef AMGX_WITH_MPI template<AMGX_Mode CASE> inline AMGX_RC mpi_write_system_distributed(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector, AMGX_ERROR &rc) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef Vector<TConfig> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; if (mtx == NULL && rhs == NULL && sol == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } MatrixLetterT *mtx_ptr = NULL; VectorLetterT *rhs_ptr = NULL; VectorLetterT *sol_ptr = NULL; if (mtx != NULL) { mtx_ptr = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); } if (rhs != NULL) { rhs_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(rhs); } if (sol != NULL) { sol_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(sol); } /*global objects*/ MatrixLetterT gA; VectorLetterT grhs; VectorLetterT gsol; int root = 0, rank = 0; if (mtx != NULL) { gA.setResources(mtx_ptr->getResources()); construct_global_matrix<TConfig>(root, rank, mtx_ptr, gA, partition_vector_size, partition_vector); } if (rhs != NULL) { grhs.setResources(rhs_ptr->getResources()); construct_global_vector<TConfig>(root, rank, mtx_ptr, rhs_ptr, grhs, partition_vector_size, partition_vector); } if (sol != NULL) { gsol.setResources(sol_ptr->getResources()); construct_global_vector<TConfig>(root, rank, mtx_ptr, sol_ptr, gsol, partition_vector_size, partition_vector); } if (rank == root) { if (mtx_ptr) { cudaSetDevice(mtx_ptr->getResources()->getDevice(0)); } else if (rhs_ptr) { cudaSetDevice(rhs_ptr->getResources()->getDevice(0)); } else { cudaSetDevice(sol_ptr->getResources()->getDevice(0)); } rc = MatrixIO<TConfig>::writeSystem(filename, &gA, &grhs, &gsol); } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC read_system_distributed(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector, std::stringstream &msg, int &num_ranks, Resources *resources, int part, unsigned int props, AMGX_ERROR &read_error) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef Vector<TConfig> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename TConfig::template setMemSpace<AMGX_host>::Type TConfig_h; typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_value_type_h; typedef Vector<ivec_value_type_h> IVector_h; IVector_h partitionVec; IVector_h partSize; MatrixLetterT *mtx_ptr = NULL; VectorLetterT *rhs_ptr = NULL; VectorLetterT *sol_ptr = NULL; if (mtx != NULL) { mtx_ptr = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); } if (rhs != NULL) { rhs_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(rhs); } if (sol != NULL) { sol_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(sol); } MPI_Comm *mpi_comm; if (mtx != NULL) { mpi_comm = mtx_ptr->getResources()->getMpiComm(); } else if (rhs != NULL) { mpi_comm = rhs_ptr->getResources()->getMpiComm(); } else if (sol != NULL) { mpi_comm = sol_ptr->getResources()->getMpiComm(); } MPI_Comm_size(*mpi_comm, &num_ranks); MPI_Comm_rank(*mpi_comm, &part); if (partition_vector != NULL) { if (partition_sizes != NULL && num_partitions != num_ranks) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) partitionVec.resize(partition_vector_size); thrust::copy(partition_vector, partition_vector + partition_vector_size, partitionVec.begin()); cudaCheckError(); if (num_partitions == 0) { num_partitions = num_ranks; } if (num_partitions % num_ranks != 0) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) partSize.resize(num_ranks); if (partition_sizes != NULL) { thrust::copy(partition_sizes, partition_sizes + num_partitions, partSize.begin()); cudaCheckError(); } else { int partsPerRank = num_partitions / num_ranks; thrust::fill(partSize.begin(), partSize.end(), 0); cudaCheckError(); for (int i = 0; i < partitionVec.size(); i++) { int p = partitionVec[i] / partsPerRank; partitionVec[i] = p; partSize[p]++; } } msg << "Read consolidated partition sizes: "; for (int i = 0; i < num_ranks; i++) { msg << partSize[i] << " "; } msg << "n"; } else { num_partitions = num_ranks; } switch (AMGX_GET_MODE_VAL(AMGX_MemorySpace, CASE)) { case (AMGX_device) : { if (rhs != NULL && sol != NULL) { read_error = DistributedRead<TConfig>::distributedRead((char *)filename, *mtx_ptr, *rhs_ptr, *sol_ptr, allocated_halo_depth, part, num_ranks, partSize, partitionVec, props); } else if (rhs != NULL) { read_error = DistributedRead<TConfig>::distributedRead((char *)filename, *mtx_ptr, *rhs_ptr, allocated_halo_depth, part, num_ranks, partSize, partitionVec, props); } else { read_error = DistributedRead<TConfig>::distributedRead((char *)filename, *mtx_ptr, *sol_ptr, allocated_halo_depth, part, num_ranks, partSize, partitionVec, props); } } break; case (AMGX_host) : { //local effect only, no need to be allocated on the pool // std::shared_ptr<MatrixLetterT> Ah; std::shared_ptr<VectorLetterT> bh; std::shared_ptr<VectorLetterT> xh; if (mtx == NULL) { Ah.reset(new MatrixLetterT()); } else { MatrixW wMtx(mtx); Ah = wMtx.wrapped(); } if (rhs == NULL) { bh.reset(new VectorLetterT()); } else { VectorW wRhs(rhs); bh = wRhs.wrapped(); } if (sol == NULL) { xh.reset(new VectorLetterT()); } else { VectorW wSol(sol); xh = wSol.wrapped(); } read_error = DistributedRead<TConfig>::distributedRead((char *)filename, *Ah, *bh, *xh, allocated_halo_depth, part, num_partitions, partSize, partitionVec, props); } break; } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC generate_distributed_poisson_7pt(AMGX_matrix_handle mtx, AMGX_vector_handle rhs_, AMGX_vector_handle sol_, int allocated_halo_depth, int num_import_rings, int nx, int ny, int nz, int px, int py, int pz) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef Vector<TConfig> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; typedef typename Vector<TConfig>::value_type ValueTypeB; MatrixW wrapA(mtx); MatrixLetterT &A_part = *wrapA.wrapped(); VectorW wrapRhs(rhs_); VectorLetterT &rhs = *wrapRhs.wrapped(); VectorW wrapSol(sol_); VectorLetterT &sol = *wrapSol.wrapped(); cudaSetDevice(A_part.getResources()->getDevice(0)); MPI_Comm *mpi_comm = A_part.getResources()->getMpiComm(); int num_ranks; MPI_Comm_size(*mpi_comm, &num_ranks); if ((px * py * pz != num_ranks) || (px * py * pz == 0)) { amgx_printf(" Invalid number of processors or processor topologyn "); return AMGX_RC_BAD_PARAMETERS; } /* Create distributed manager */ if (A_part.manager != NULL) { delete A_part.manager; A_part.manager = NULL; } A_part.manager = new DistributedManager<TConfig>(A_part); A_part.setManagerExternal(); /* Generate 7pt Poisson matrix */ A_part.manager->generatePoisson7pt(nx, ny, nz, px, py, pz); /* Create B2L_maps for comm */ A_part.manager->renumberMatrixOneRing(); /* Exchange 1 ring halo rows (for d2 interp) */ if (num_import_rings == 2) { A_part.manager->createOneRingHaloRows(); } A_part.manager->getComms()->set_neighbors(A_part.manager->num_neighbors()); A_part.setView(OWNED); A_part.set_initialized(1); /* Create rhs and solution */ rhs.resize(A_part.get_num_rows()); thrust::fill(rhs.begin(), rhs.end(), types::util<ValueTypeB>::get_one()); sol.resize(A_part.get_num_rows()); thrust::fill(sol.begin(), sol.end(), types::util<ValueTypeB>::get_one()); cudaCheckError(); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC matrix_upload_distributed(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, AMGX_distribution_handle dist) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef typename MatPrecisionMap<AMGX_GET_MODE_VAL(AMGX_MatPrecision, CASE)>::Type ValueType; MatrixDistributionW wrapDist(dist); MatrixDistribution &mdist = *wrapDist.wrapped(); MatrixW wrapA(mtx); MatrixLetterT &A_part = *wrapA.wrapped(); cudaSetDevice(A_part.getResources()->getDevice(0)); MPI_Comm *mpi_comm = A_part.getResources()->getMpiComm(); int num_ranks; MPI_Comm_size(*mpi_comm, &num_ranks); /* Create distributed manager */ if (A_part.manager != NULL) { delete A_part.manager; A_part.manager = NULL; } A_part.manager = new DistributedManager<TConfig>(A_part); A_part.setManagerExternal(); /* Load distributed matrix Choose correct overload based on column index type */ if (mdist.get32BitColIndices()) { A_part.manager->loadDistributedMatrix(n, nnz, block_dimx, block_dimy, row_ptrs, (int *)col_indices_global, (ValueType *)data, num_ranks, n_global, diag_data, mdist); } else { A_part.manager->loadDistributedMatrix(n, nnz, block_dimx, block_dimy, row_ptrs, (int64_t *)col_indices_global, (ValueType *)data, num_ranks, n_global, diag_data, mdist); } /* Create B2L_maps for comm */ A_part.manager->renumberMatrixOneRing(); /* Exchange 1 ring halo rows (for d2 interp) */ if (mdist.getNumImportRings() == 2) { A_part.manager->createOneRingHaloRows(); } A_part.manager->getComms()->set_neighbors(A_part.manager->num_neighbors()); A_part.setView(OWNED); /* if (A_part.manager != NULL) A_part.manager->printToFile("M_clf_gua",""); */ /* A_part.printToFile("A_clf_gua","",-1,-1); */ A_part.set_initialized(1); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC matrix_upload_all_global(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, // TODO: unused parameter int num_import_rings, const int *partition_vector) { AMGX_distribution_handle dist; AMGX_distribution_create(&dist, NULL); MatrixDistributionW wrapDist(dist); MatrixDistribution &mdist = *wrapDist.wrapped(); mdist.setPartitionVec(partition_vector); mdist.setNumImportRings(num_import_rings); auto rc = matrix_upload_distributed<CASE>(mtx, n_global, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices_global, data, diag_data, dist); AMGX_distribution_destroy(dist); return rc; } template<AMGX_Mode CASE> inline AMGX_RC matrix_upload_all_global_32(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, // TODO: unused parameter int num_import_rings, const int *partition_vector) { AMGX_distribution_handle dist; AMGX_distribution_create(&dist, NULL); MatrixDistributionW wrapDist(dist); MatrixDistribution &mdist = *wrapDist.wrapped(); mdist.setPartitionVec(partition_vector); mdist.setNumImportRings(num_import_rings); mdist.set32BitColIndices(true); auto rc = matrix_upload_distributed<CASE>(mtx, n_global, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices_global, data, diag_data, dist); AMGX_distribution_destroy(dist); return rc; } #endif template<AMGX_Mode CASE> inline AMGX_RC matrix_comm_from_maps(AMGX_matrix_handle mtx, int allocated_halo_depth, int num_import_rings, int max_num_neighbors, const int *neighbors, const int *send_ptrs, int const *send_maps, const int *recv_ptrs, int const *recv_maps) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A_part = *wrapA.wrapped(); cudaSetDevice(A_part.getResources()->getDevice(0)); if (allocated_halo_depth > 1) { amgx_printf("Allocated_halo_depth > 1 currently not supported"); return AMGX_RC_BAD_PARAMETERS; } if (num_import_rings > 1) { amgx_printf("num_import_rings > 1 currently not supported"); return AMGX_RC_BAD_PARAMETERS; } if (allocated_halo_depth != num_import_rings) { amgx_printf("num_import_rings != allocated_halo_depth currently not supported"); return AMGX_RC_BAD_PARAMETERS; } if (A_part.manager != NULL) { delete A_part.manager; A_part.manager = NULL; } if (max_num_neighbors > 0) { A_part.manager = new DistributedManager<TConfig>(A_part, allocated_halo_depth, num_import_rings, max_num_neighbors, neighbors); A_part.manager->cacheMaps(send_maps, send_ptrs, recv_maps, recv_ptrs); A_part.setManagerExternal(); A_part.manager->createComms(A_part.getResources()); } return AMGX_RC_OK; } template<AMGX_Mode CASE> inline AMGX_RC write_system(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, const char *filename, AMGX_ERROR &rc) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef Vector<TConfig> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; if (mtx == NULL && rhs == NULL && sol == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } MatrixLetterT *mtx_ptr = NULL; VectorLetterT *rhs_ptr = NULL; VectorLetterT *sol_ptr = NULL; if (mtx != NULL) { mtx_ptr = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx); } if (rhs != NULL) { rhs_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(rhs); } if (sol != NULL) { sol_ptr = get_mode_object_from<CASE, Vector, AMGX_vector_handle>(sol); } if (mtx_ptr) { cudaSetDevice(mtx_ptr->getResources()->getDevice(0)); } else if (rhs_ptr) { cudaSetDevice(rhs_ptr->getResources()->getDevice(0)); } else { cudaSetDevice(sol_ptr->getResources()->getDevice(0)); } rc = MatrixIO<TConfig>::writeSystem(filename, mtx_ptr, rhs_ptr, sol_ptr); return AMGX_RC_OK; } template<AMGX_Mode CASE> inline void solver_get_iterations_number(AMGX_solver_handle slv, int *n) { auto *solver = get_mode_object_from<CASE, AMG_Solver, AMGX_solver_handle>(slv); cudaSetDevice(solver->getResources()->getDevice(0)); *n = solver->get_num_iters(); } template<AMGX_Mode CASE> inline AMGX_RC solver_get_iteration_residual(AMGX_solver_handle slv, int it, int idx, double *res) { auto *solver = get_mode_object_from<CASE, AMG_Solver, AMGX_solver_handle>(slv); cudaSetDevice(solver->getResources()->getDevice(0)); if (idx < 0 || idx >= solver->get_residual(it).size()) { amgx_printf("Incorrect block index"); return AMGX_RC_BAD_PARAMETERS; } *res = (double)solver->get_residual(it)[idx]; return AMGX_RC_OK; } template<AMGX_Mode CASE> inline void solver_get_status(AMGX_solver_handle slv, AMGX_SOLVE_STATUS *st) { typedef AMG_Solver<typename TemplateMode<CASE>::Type> SolverLetterT; typedef CWrapHandle<AMGX_solver_handle, SolverLetterT> SolverW; SolverW wrapSolver(slv); switch (wrapSolver.last_solve_status()) { case AMGX_ST_CONVERGED: *st = AMGX_SOLVE_SUCCESS; break; case AMGX_ST_DIVERGED: *st = AMGX_SOLVE_DIVERGED; break; case AMGX_ST_NOT_CONVERGED: *st = AMGX_SOLVE_NOT_CONVERGED; break; default: *st = AMGX_SOLVE_FAILED; } } template<AMGX_Mode CASE> inline void matrix_download_all(const AMGX_matrix_handle mtx, int *row_ptrs, int *col_indices, void *data, void **diag_data) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); typedef typename MatPrecisionMap<AMGX_GET_MODE_VAL(AMGX_MatPrecision, CASE)>::Type ValueType; cudaSetDevice(A.getResources()->getDevice(0)); int n, nnz, block_size; n = A.get_num_rows(); block_size = A.get_block_size(); nnz = A.get_num_nz(); if (A.hasProps(DIAG)) { int sizeof_m_val = ((AMGX_GET_MODE_VAL(AMGX_MatPrecision, CASE) == AMGX_matDouble)) ? sizeof(double) : sizeof(float); *diag_data = get_c_arr_mem_manager().allocate(n * block_size * sizeof_m_val); cudaMemcpy((ValueType *)(*diag_data), A.values.raw() + nnz * block_size, n * block_size * sizeof(ValueType), cudaMemcpyDefault); } else { *diag_data = NULL; } cudaMemcpy(row_ptrs, A.row_offsets.raw(), A.row_offsets.size()*sizeof(int), cudaMemcpyDefault); cudaMemcpy(col_indices, A.col_indices.raw(), A.col_indices.size()*sizeof(int), cudaMemcpyDefault); cudaMemcpy(data, A.values.raw(), nnz * block_size * sizeof(ValueType), cudaMemcpyDefault); cudaCheckError(); } template<AMGX_Mode CASE> inline void vector_bind(AMGX_vector_handle vec, const AMGX_matrix_handle mtx) { typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; typedef Vector<typename TemplateMode<CASE>::Type> VectorLetterT; typedef CWrapHandle<AMGX_vector_handle, VectorLetterT> VectorW; VectorW wrapV(vec); VectorLetterT &x = *wrapV.wrapped(); MatrixW wrapA(mtx); MatrixLetterT &A = *wrapA.wrapped(); cudaSetDevice(A.getResources()->getDevice(0)); if (A.getResources() != x.getResources()) { FatalError("Matrix and vector don't use same resources, exiting", AMGX_ERR_CONFIGURATION); } if (A.manager != NULL) { x.setManager(*(A.manager)); } cudaCheckError(); } template<AMGX_Mode CASE> inline void read_system_maps_one_ring_impl( const AMGX_matrix_handle A_part, int *num_neighbors, int **neighbors, int **btl_sizes, int ***btl_maps, int **lth_sizes, int ***lth_maps, int64_t **local_to_global_map) { typedef typename TemplateMode<CASE>::Type TConfig; typedef typename TConfig::template setMemSpace<AMGX_device>::Type SType; typedef Matrix<SType> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(A_part); MatrixLetterT &A = *wrapA.wrapped(); cudaSetDevice(A.getResources()->getDevice(0)); A.manager->malloc_export_maps(btl_maps, btl_sizes, lth_maps, lth_sizes); *num_neighbors = A.manager->num_neighbors(); *neighbors = (int *)get_c_arr_mem_manager().allocate((*num_neighbors) * sizeof(int)); A.manager->export_neighbors(*neighbors); /* check if we're in read_system_global */ if (local_to_global_map != NULL) //??? maybe meant == NULL ? { /* now setup local to global maps */ int map_size = A.manager->local_to_global_map.size(); *local_to_global_map = (int64_t *)get_c_arr_mem_manager().allocate(map_size * sizeof(int64_t)); for (int i = 0; i < map_size; i++) { (*local_to_global_map)[i] = A.manager->local_to_global_map[i]; } } } template<AMGX_Mode CASE> inline AMGX_RC matrix_comm_from_maps_one_ring(AMGX_matrix_handle mtx, int allocated_halo_depth, int max_num_neighbors, const int *neighbors, const int *send_sizes, int const **send_maps, const int *recv_sizes, int const **recv_maps, Resources *resources) { typedef typename TemplateMode<CASE>::Type TConfig; typedef Matrix<TConfig> MatrixLetterT; typedef CWrapHandle<AMGX_matrix_handle, MatrixLetterT> MatrixW; MatrixW wrapA(mtx); MatrixLetterT &A_part = *wrapA.wrapped(); cudaSetDevice(A_part.getResources()->getDevice(0)); if (allocated_halo_depth > 1) { amgx_printf("Allocated_halo_depth > 1 currently not supported"); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) } if (A_part.manager != NULL) { delete A_part.manager; A_part.manager = NULL; } if (max_num_neighbors > 0) { // Create a new manager, and save neighbor list A_part.manager = new DistributedManager<TConfig>(A_part, 1, 1, max_num_neighbors, neighbors); // save boundary and halo lists to manager A_part.manager->cacheMapsOneRing(send_maps, send_sizes, recv_maps, recv_sizes); A_part.setManagerExternal(); // Create comms module, "communicator" config string either MPI or MPI_DIRECT A_part.manager->createComms(A_part.getResources()); } return AMGX_RC_OK; } }//end unnamed namespace AMGX_RC write_system_preamble(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, Resources *&resources, AMGX_Mode &mode) { if (mtx == NULL && rhs == NULL && sol == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } mode = AMGX_unset; AMGX_Mode m_mode = AMGX_unset; AMGX_Mode r_mode = AMGX_unset; AMGX_Mode s_mode = AMGX_unset; resources = NULL; if (mtx != NULL) { AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) //if (!c_mtx || !c_mtx->is_valid()) return AMGX_RC_BAD_PARAMETERS; mode = m_mode = get_mode_from<AMGX_matrix_handle>(mtx); } if (rhs != NULL) { if (resources == NULL) AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(rhs, &resources)), NULL) //no need to check validity here, //it's done elsewhere via: //get_mode_object_from<...>(...)-> //CWrapHandle(Envelope) cstrctr -> //is_valid() // //if (!wrapRhs.is_valid()) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); mode = r_mode = get_mode_from<AMGX_vector_handle>(rhs); } if (sol != NULL) { if (resources == NULL) AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(sol, &resources)), NULL) //no need to check validity here, //it's done elsewhere via: //get_mode_object_from<...>(...)-> //CWrapHandle(Envelope) cstrctr -> //is_valid() // //if (!wrapSol.is_valid()) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); mode = s_mode = get_mode_from<AMGX_vector_handle>(sol); } if (mtx != NULL) { if (m_mode != mode) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } } if (rhs != NULL) { if (r_mode != mode) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } } if (sol != NULL) { if (s_mode != mode) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } } return AMGX_RC_OK; } AMGX_RC read_system_preamble(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, Resources *&resources, AMGX_Mode &mode, unsigned int &props, bool try_any = false) { if (mtx == NULL && rhs == NULL && sol == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } mode = AMGX_unset; resources = NULL; props = io_config::NONE; if (mtx != NULL) { AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) io_config::addProps(io_config::MTX, props); mode = get_mode_from<AMGX_matrix_handle>(mtx); } else if (!try_any) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) // there are no valid resources without Matrix object if (rhs != NULL) { io_config::addProps(io_config::RHS, props); if (mode == AMGX_unset) { mode = get_mode_from<AMGX_vector_handle>(rhs); } if (try_any) { if (resources == NULL) AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(rhs, &resources)), NULL) } //no need to check validity here, //it's done elsewhere via: //get_mode_object_from<...>(...)-> //CWrapHandle(Envelope) cstrctr -> //is_valid() // //if (!wrapRhs.is_valid()) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } if (sol != NULL) { io_config::addProps(io_config::SOLN, props); if (mode == AMGX_unset) { mode = get_mode_from<AMGX_vector_handle>(sol); } if (try_any) { if (resources == NULL) AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(sol, &resources)), NULL) } //no need to check validity here, //it's done elsewhere via: //get_mode_object_from<...>(...)-> //CWrapHandle(Envelope) cstrctr -> //is_valid() // //if (!wrapSol.is_valid()) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); } if (props == io_config::NONE) { return AMGX_RC_BAD_PARAMETERS; } return AMGX_RC_OK; } } // end namespace using namespace amgx; extern "C" { void AMGX_API AMGX_abort(AMGX_resources_handle rsc, int err) { Resources *resources = NULL; if (rsc != NULL) { AMGX_ERROR rc = AMGX_OK; try { ///amgx::CWrapper<AMGX_resources_handle> *c_resources= (amgx::CWrapper<AMGX_resources_handle>*)rsc; ResourceW c_r(rsc); ///if (!c_resources) if (!c_r.wrapped()) { fprintf(stderr, "AMGX_abort warning: provided wrong resources, using defaults"); amgx_error_exit(NULL, err); } ///resources = (Resources*)(c_resources->hdl); resources = c_r.wrapped().get(); } AMGX_CATCHES(rc) if (AMGX_OK != rc) { fprintf(stderr, "AMGX_abort warning: catched %d\n",rc); } } amgx_error_exit(resources, err); } AMGX_RC AMGX_API AMGX_initialize() { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_initialize " ); AMGX_CHECK_API_ERROR(amgx::initialize(), NULL); return AMGX_RC_OK; //return getCAPIerror(amgx::initialize()); } AMGX_RC AMGX_API AMGX_initialize_plugins() { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_initialize_plugins " ); return getCAPIerror_x(amgx::initializePlugins()); } AMGX_RC AMGX_API AMGX_finalize() { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_finalize " ); AMGX_ERROR rc = AMGX_OK; try { amgx::finalize(); } AMGX_CATCHES(rc) //AMGX_CHECK_API_ERROR(rc, NULL); //return AMGX_RC_OK; return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_finalize_plugins() { AMGX_CPU_PROFILER( "AMGX_finalize_plugins " ); AMGX_ERROR rc = AMGX_OK; try { amgx::finalizePlugins(); } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL); return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_config_create(AMGX_config_handle *cfg_h, const char *options) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_config_create " ); AMGX_ERROR rc = AMGX_OK; AMGX_ERROR err; try { auto *cfg0 = create_managed_object<AMG_Configuration, AMGX_config_handle>(cfg_h); err = cfg0->wrapped()->parseParameterString(options); } AMGX_CATCHES(rc) if (rc != AMGX_OK) AMGX_CHECK_API_ERROR(rc, NULL) //return getCAPIerror(rc); else AMGX_CHECK_API_ERROR(err, NULL) //return getCAPIerror(err); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_config_create_from_file(AMGX_config_handle *cfg_h, const char *param_file) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_config_create_from_file " ); AMGX_ERROR rc = AMGX_OK; AMGX_ERROR err; ConfigW *cfg = nullptr; try { ///cfg = get_mem_manager<ConfigW>().allocate<ConfigW>(AMG_Configuration()).get(); cfg = create_managed_object<AMG_Configuration, AMGX_config_handle>(cfg_h); ///err = (*(AMG_Configuration *)cfg->hdl()).parseFile(param_file); err = cfg->wrapped()->parseFile(param_file); ///*cfg_h = (AMGX_config_handle)cfg; } AMGX_CATCHES(rc) if (rc != AMGX_OK) AMGX_CHECK_API_ERROR(rc, NULL) //return getCAPIerror(rc); else { AMGX_CHECK_API_ERROR(err, NULL); } //return getCAPIerror(err); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_config_create_from_file_and_string(AMGX_config_handle *cfg_h, const char *param_file, const char *options) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_config_create_from_file_and_string " ); AMGX_ERROR rc = AMGX_OK; AMGX_ERROR err; ConfigW *cfg = nullptr; try { //use create_*() // ///cfg = get_mem_manager<ConfigW>().allocate<ConfigW>(AMG_Configuration()).get(); cfg = create_managed_object<AMG_Configuration, AMGX_config_handle>(cfg_h); err = cfg->wrapped()->parseParameterStringAndFile(options, param_file); *cfg_h = (AMGX_config_handle)cfg;//need (AMGX_config_handle)(cfg->handle().get()) ??? No! } AMGX_CATCHES(rc) if (rc != AMGX_OK) AMGX_CHECK_API_ERROR(rc, NULL) //return getCAPIerror(rc); else { AMGX_CHECK_API_ERROR(err, NULL); } //return getCAPIerror(err); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_config_add_parameters(AMGX_config_handle *cfg_h, const char *options) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_config_add_parameters" ); AMGX_ERROR rc = AMGX_OK; AMGX_ERROR err; ///CWrapper<AMGX_config_handle>* cfg; try { ///cfg = (CWrapper<AMGX_config_handle>*)(*cfg_h); //cfg = (ConfigW*)(*cfg_h); ConfigW cfg(*cfg_h); ///((AMG_Configuration *)cfg->hdl())->setAllowConfigurationMod(1); cfg.wrapped()->setAllowConfigurationMod(1); ///err = ((AMG_Configuration *)cfg->hdl())->parseParameterString(options); err = cfg.wrapped()->parseParameterString(options); ///((AMG_Configuration *)cfg->hdl())->setAllowConfigurationMod(0); cfg.wrapped()->setAllowConfigurationMod(0); } AMGX_CATCHES(rc) if (rc != AMGX_OK) AMGX_CHECK_API_ERROR(rc, NULL) //return getCAPIerror(rc); else AMGX_CHECK_API_ERROR(err, NULL) //return getCAPIerror(err); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_config_get_default_number_of_rings(AMGX_config_handle cfg_h, int *num_import_rings) { nvtxRange nvrf(__func__); std::string s_scope, s_value, p_scope, p_value; AlgorithmType s_algorithm, p_algorithm; AMGX_ERROR rc = AMGX_OK; try { ///CWrapper<AMGX_config_handle>* cfg = (CWrapper<AMGX_config_handle> *)cfg_h; ConfigW cfg(cfg_h); ///AMG_Config *cfg_obj = ((AMG_Configuration *)(cfg->hdl))->getConfigObject(); AMG_Config *cfg_obj = cfg.wrapped()->getConfigObject(); if (cfg_obj != NULL) { // find out what solver and preconditioner are being used /* WARNING: notice that there is no need to check the smoother, because in order to use a smoother you must have selected, either solver or preconditioner to be AMG[CLASSICAL|AGGREGATION]. */ cfg_obj->getParameter<std::string>("solver", s_value, "default", s_scope); cfg_obj->getParameter<std::string>("preconditioner", p_value, s_scope, p_scope); s_algorithm = cfg_obj->getParameter<AlgorithmType>("algorithm", s_scope); p_algorithm = cfg_obj->getParameter<AlgorithmType>("algorithm", p_scope); /* WARNING: Two assumptions: (i) this routine assumes that you can not mix CLASSICAL and AGGREGATION AMG in the same config string, because they require different number of rings. For example, you can not solve AGGREGATION AMG coarse level with CLASSICAL AMG, and vice-versa. It seems to be a reasonable assumption. (ii) we are only checking two levels of hierarchy, so that if you use CG, preconditioned by CG, preconditioned by AMG, this routine will not check the AMG in the third level of precodnitioning. */ if (s_value.compare("AMG") == 0) { // if solver is AMG than simply check whether // classical or aggregation path is selected if (s_algorithm == CLASSICAL) { *num_import_rings = 2; } else //(s_alg == AGGREGATION) { *num_import_rings = 1; } } else { // if solver is not AMG than check preconditioner if (p_value.compare("AMG") == 0) { if (p_algorithm == CLASSICAL) { *num_import_rings = 2; } else //(p_alg == AGGREGATION) { *num_import_rings = 1; } } else { //neither solver nor precondiioner are AMG *num_import_rings = 1; } } } else { *num_import_rings = 0; return AMGX_RC_BAD_CONFIGURATION; } } AMGX_CATCHES(rc); return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_config_destroy(AMGX_config_handle cfg_h) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_config_destroy " ); AMGX_ERROR rc = AMGX_OK; try { if (!remove_managed_object<AMGX_config_handle, AMG_Configuration>(cfg_h)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, NULL) } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_solver_create(AMGX_solver_handle *slv, AMGX_resources_handle rsc, AMGX_Mode mode, const AMGX_config_handle cfg_h) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_create" ); AMGX_ERROR rc = AMGX_OK; AMGX_RC rc_solver; Resources *resources = NULL; try { ResourceW c_r(rsc); ConfigW cfg(cfg_h); if (!c_r.wrapped()) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } resources = c_r.wrapped().get(); cudaSetDevice(resources->getDevice(0)); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ auto* solver = create_managed_mode_object<CASE,AMG_Solver,AMGX_solver_handle>(slv, mode, resources, cfg.wrapped().get()); \ solver->set_last_solve_status(AMGX_ST_ERROR); \ rc_solver = solver->is_valid() ? AMGX_RC_OK : AMGX_RC_UNKNOWN; \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources); return rc_solver; } AMGX_RC AMGX_API AMGX_solver_calculate_residual_norm(AMGX_solver_handle solver, AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle x, void *norm_vector) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_solve " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ AMGX_ERROR rcs = solver_calculate_residual_norm<CASE, Matrix, Vector, AMG_Solver>(solver, mtx, rhs, x, resources, norm_vector); \ AMGX_CHECK_API_ERROR(rcs, resources); break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_solver_destroy(AMGX_solver_handle slv) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_destroy " ); //cudaSetDevice(...) is called below because //device deallocator must be invoked //to free device resources Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ cudaSetDevice(get_mode_object_from<CASE,AMG_Solver,AMGX_solver_handle>(slv)->getResources()->getDevice(0));\ remove_managed_object<CASE,AMG_Solver,AMGX_solver_handle>(slv); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_solver_setup(AMGX_solver_handle slv, AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_setup " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ typedef TemplateMode<CASE>::Type TConfig; \ AMGX_ERROR rcs = set_solver_with_shared<CASE,AMG_Solver,Matrix>(slv, mtx, resources, &AMG_Solver<TConfig>::setup_capi); \ AMGX_CHECK_API_ERROR(rcs, resources); \ break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) \ } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_solver_resetup(AMGX_solver_handle slv, AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_resetup " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ typedef TemplateMode<CASE>::Type TConfig; \ AMGX_ERROR rcs = set_solver_with_shared<CASE,AMG_Solver,Matrix>(slv, mtx, resources, &AMG_Solver<TConfig>::resetup_capi); \ AMGX_CHECK_API_ERROR(rcs, resources); \ break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) \ } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_solver_solve(AMGX_solver_handle slv, AMGX_vector_handle rhs, AMGX_vector_handle sol) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_solve " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ AMGX_ERROR rcs = solve_with<CASE,AMG_Solver,Vector>(slv, rhs, sol, resources, false); \ AMGX_CHECK_API_ERROR(rcs, resources); break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_solver_solve_with_0_initial_guess(AMGX_solver_handle slv, AMGX_vector_handle rhs, AMGX_vector_handle sol) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_solve_with_0_initial_guess " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_solver_handle>(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ AMGX_ERROR rcs = solve_with<CASE,AMG_Solver,Vector>(slv, rhs, sol, resources, true); \ AMGX_CHECK_API_ERROR(rcs, resources); break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_matrix_create_impl(AMGX_matrix_handle *mtx, AMGX_resources_handle rsc, AMGX_Mode mode) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_create " ); AMGX_ERROR rc = AMGX_OK; AMGX_ERROR rc_mtx = AMGX_OK; Resources *resources = NULL; try { ResourceW c_r(rsc); if (!c_r.wrapped() ) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } resources = c_r.wrapped().get(); cudaSetDevice(resources->getDevice(0)); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ auto* wmtx = create_managed_mode_object<CASE,Matrix,AMGX_matrix_handle>(mtx, mode); \ rc_mtx = wmtx->is_valid() ? AMGX_OK : AMGX_ERR_UNKNOWN; \ wmtx->wrapped()->setResources(resources);\ }\ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc_mtx, resources) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_matrix_create(AMGX_matrix_handle *mtx, AMGX_resources_handle rsc, AMGX_Mode mode) { nvtxRange nvrf(__func__); return AMGX_matrix_create_impl(mtx, rsc, mode); } AMGX_RC AMGX_API AMGX_matrix_vector_multiply(AMGX_matrix_handle mtx, AMGX_vector_handle x, AMGX_vector_handle y) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_solver_solve " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ AMGX_ERROR rcs = matrix_vector_multiply<CASE,Matrix,Vector>(mtx, x, y, resources); \ AMGX_CHECK_API_ERROR(rcs, resources); break;\ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_matrix_destroy_impl(AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_destroy " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ cudaSetDevice(get_mode_object_from<CASE,Matrix,AMGX_matrix_handle>(mtx)->getResources()->getDevice(0));\ remove_managed_matrix<CASE>(mtx); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_destroy(AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); return AMGX_matrix_destroy_impl(mtx); } AMGX_RC AMGX_API AMGX_matrix_upload_all_impl(AMGX_matrix_handle mtx, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag_data) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_upload_all " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) // should change to the convert(). this routine will catch possible memory exceptions and return corresponding errors. temporary catch. AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE:{ \ rc0 = matrix_upload_all<CASE>(mtx,n,nnz,block_dimx,block_dimy,row_ptrs,col_indices,data,diag_data,resources); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_upload_all(AMGX_matrix_handle mtx, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const int *col_indices, const void *data, const void *diag_data) { nvtxRange nvrf(__func__); return AMGX_matrix_upload_all_impl(mtx, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag_data); } AMGX_RC AMGX_API AMGX_matrix_replace_coefficients(AMGX_matrix_handle mtx, int n, int nnz, const void *data, const void *diag_data) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_replace_coefficients " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); //if (!c_mtx || !c_mtx->is_valid()) return AMGX_RC_BAD_PARAMETERS; switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = matrix_replace_coefficients<CASE>(mtx,n,nnz,data,diag_data,resources); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_matrix_get_size_impl(const AMGX_matrix_handle mtx, int *n, int *block_dimx, int *block_dimy) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_get_size " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ typedef Matrix<typename TemplateMode<CASE>::Type> MatrixLetterT; \ MatrixLetterT* mtx_ptr = get_mode_object_from<CASE,Matrix,AMGX_matrix_handle>(mtx); \ if (mtx_ptr->manager != NULL) \ { \ if (mtx_ptr->manager->isFineLevelGlued()) \ { \ *n = mtx_ptr->manager->halo_offsets_before_glue[0]; \ } \ else \ { \ *n = mtx_ptr->get_num_rows(); \ } \ } \ else \ { \ *n = mtx_ptr->get_num_rows(); \ } \ *block_dimx = mtx_ptr->get_block_dimx(); \ *block_dimy = mtx_ptr->get_block_dimy(); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) // return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_get_size(const AMGX_matrix_handle mtx, int *n, int *block_dimx, int *block_dimy) { nvtxRange nvrf(__func__); return AMGX_matrix_get_size_impl(mtx, n, block_dimx, block_dimy); } AMGX_RC AMGX_API AMGX_matrix_attach_geometry( AMGX_matrix_handle mtx, double *geox, double *geoy, double *geoz, int n) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_attach_geometry " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) int dimension = (geoz == NULL ? 2 : 3); AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ matrix_attach_geometry<CASE>(mtx, geox, geoy, geoz, n,dimension); \ break; \ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_attach_coloring( AMGX_matrix_handle mtx, int *row_coloring, int num_rows, int num_colors) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_attach_coloring " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ matrix_attach_coloring<CASE>(mtx, row_coloring, num_rows, num_colors); \ break; \ } AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_sort(AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_matrix_sort " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_matrix_handle>(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = matrix_sort<CASE>(mtx); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } // previously: AMGX_vector_create(AMGX_vector_handle *ret, AMGX_Mode mode) AMGX_RC AMGX_vector_create_impl(AMGX_vector_handle *vec, AMGX_resources_handle rsc, AMGX_Mode mode) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_create " ); Resources *resources = NULL; AMGX_ERROR rc = AMGX_OK; AMGX_ERROR rc_vec = AMGX_OK; try { ResourceW c_r(rsc); if (!c_r.wrapped()) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } resources = c_r.wrapped().get(); cudaSetDevice(resources->getDevice(0)); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ auto* wvec = create_managed_mode_object<CASE,Vector,AMGX_vector_handle>(vec, mode); \ rc_vec = wvec->is_valid() ? AMGX_OK : AMGX_ERR_UNKNOWN; \ wvec->wrapped()->setResources(resources); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) AMGX_FORINTVEC_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc_vec, resources) return getCAPIerror_x(rc); } AMGX_RC AMGX_API AMGX_vector_create(AMGX_vector_handle *vec, AMGX_resources_handle rsc, AMGX_Mode mode) { nvtxRange nvrf(__func__); return AMGX_vector_create_impl(vec, rsc, mode); } AMGX_RC AMGX_vector_destroy_impl(AMGX_vector_handle vec) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_destroy " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ cudaSetDevice(get_mode_object_from<CASE,Vector,AMGX_vector_handle>(vec)->getResources()->getDevice(0));\ remove_managed_object<CASE,Vector,AMGX_vector_handle>(vec);\ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) AMGX_FORINTVEC_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_vector_destroy(AMGX_vector_handle vec) { nvtxRange nvrf(__func__); return AMGX_vector_destroy_impl(vec); } AMGX_RC AMGX_vector_upload_impl(AMGX_vector_handle vec, int n, int block_dim, const void *data) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_upload " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: {\ rc0 = vector_upload<CASE>(vec, n, block_dim, data); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_vector_upload(AMGX_vector_handle vec, int n, int block_dim, const void *data) { nvtxRange nvrf(__func__); return AMGX_vector_upload_impl(vec, n, block_dim, data); } AMGX_RC AMGX_vector_set_zero_impl(AMGX_vector_handle vec, int n, int block_dim) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_set_zero " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; //since resize falls directly into the thrust we can only catch bad_alloc here: try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = vector_set_zero<CASE>(vec, n, block_dim, resources); \ }\ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_vector_set_zero(AMGX_vector_handle vec, int n, int block_dim) { nvtxRange nvrf(__func__); return AMGX_vector_set_zero_impl(vec, n, block_dim); } AMGX_RC AMGX_API AMGX_vector_set_random(AMGX_vector_handle vec, int n) { nvtxRange nvrf(__func__); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; //since resize falls directly into the thrust we can only catch bad_alloc here: try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = vector_set_random<CASE>(vec, n, resources); \ }\ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; } AMGX_RC AMGX_vector_download_impl(const AMGX_vector_handle vec, void *data) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_download " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) //if (!c_vec || !c_vec->is_valid()) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: {\ rc0 = vector_download_impl<CASE>(vec, data);\ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_vector_download(const AMGX_vector_handle vec, void *data) { nvtxRange nvrf(__func__); return AMGX_vector_download_impl(vec, data); } AMGX_RC AMGX_vector_get_size_impl(const AMGX_vector_handle vec, int *n, int *block_dim) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_get_size " ); Resources *resources; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromVectorHandle(vec, &resources)), NULL) //if (!c_vec || !c_vec->is_valid()) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from<AMGX_vector_handle>(vec); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: {\ rc0 = vector_get_size<CASE>(vec, n, block_dim); \ }\ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_vector_get_size(const AMGX_vector_handle vec, int *n, int *block_dim) { nvtxRange nvrf(__func__); return AMGX_vector_get_size_impl(vec, n, block_dim); } #ifdef AMGX_WITH_MPI AMGX_RC AMGX_API AMGX_write_system_distributed(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_write_full_system " ); AMGX_Mode mode; Resources *resources = NULL; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { write_system_preamble(mtx, rhs, sol, resources, mode); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: {\ rc0 = mpi_write_system_distributed<CASE>(mtx, rhs, sol, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector, rc); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } #else AMGX_RC AMGX_API AMGX_write_system_distributed(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } #endif AMGX_RC AMGX_API AMGX_write_system(const AMGX_matrix_handle mtx, const AMGX_vector_handle rhs, const AMGX_vector_handle sol, const char *filename) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_write_full_system " ); AMGX_Mode mode; Resources *resources = NULL; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { write_system_preamble(mtx, rhs, sol, resources, mode); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: {\ rc0 = write_system<CASE>(mtx, rhs, sol, filename, rc); \ } break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) //return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_solver_get_iterations_number(AMGX_solver_handle slv, int *n) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_get_iterations_number " ); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL) //if (!c_solver || !c_solver->is_valid()) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ solver_get_iterations_number<CASE>(slv, n); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: *n = -1; AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_solver_get_iteration_residual(AMGX_solver_handle slv, int it, int idx, double *res) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_get_iteration_residual " ); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL) //if (!c_solver || !c_solver->is_valid()) return AMGX_RC_BAD_PARAMETERS; *res = -1.; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = solver_get_iteration_residual<CASE>(slv, it, idx, res); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_get_build_info_strings(char **version, char **date, char **time) { nvtxRange nvrf(__func__); *version = const_cast<char *>(__AMGX_BUILD_ID__); *date = const_cast<char *>(__AMGX_BUILD_DATE__); *time = const_cast<char *>(__AMGX_BUILD_TIME__); return AMGX_RC_OK; } AMGX_RC AMGX_pin_memory_impl(void *ptr, unsigned int bytes) { AMGX_CPU_PROFILER( "AMGX_pin_memory " ); if (ptr == 0) { return AMGX_RC_OK; } cudaError_t rc = cudaHostRegister(ptr, bytes, cudaHostRegisterMapped); if (cudaSuccess == rc) { return AMGX_RC_OK; } else AMGX_CHECK_API_ERROR(AMGX_ERR_CUDA_FAILURE, NULL) return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_pin_memory(void *ptr, unsigned int bytes) { nvtxRange nvrf(__func__); return AMGX_pin_memory_impl(ptr, bytes); } AMGX_RC AMGX_unpin_memory_impl(void *ptr) { AMGX_CPU_PROFILER( "AMGX_unpin_memory " ); if (ptr == 0) { return AMGX_RC_OK; } cudaError_t rc = cudaHostUnregister(ptr); if (cudaSuccess == rc) { return AMGX_RC_OK; } else AMGX_CHECK_API_ERROR(AMGX_ERR_CUDA_FAILURE, NULL) return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_unpin_memory(void *ptr) { nvtxRange nvrf(__func__); return AMGX_unpin_memory_impl(ptr); } AMGX_RC AMGX_API AMGX_solver_get_status(AMGX_solver_handle slv, AMGX_SOLVE_STATUS *st) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromSolverHandle(slv, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(slv); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ solver_get_status<CASE>(slv, st); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) return getCAPIerror_x(rc); } AMGX_RC AMGX_solver_register_print_callback(AMGX_print_callback func) { nvtxRange nvrf(__func__); return AMGX_register_print_callback(func); } AMGX_RC AMGX_register_print_callback(AMGX_print_callback func) { nvtxRange nvrf(__func__); amgx_output = func; return AMGX_RC_OK; } AMGX_RC AMGX_get_error_string(AMGX_RC err, char *buf, int buf_len) { nvtxRange nvrf(__func__); AMGX_GetErrorString(getAMGXerror(err), buf, buf_len); return AMGX_RC_OK; } AMGX_RC AMGX_install_signal_handler() { nvtxRange nvrf(__func__); SignalHandler::hook(); return AMGX_RC_OK; } AMGX_RC AMGX_reset_signal_handler() { nvtxRange nvrf(__func__); SignalHandler::unhook(); return AMGX_RC_OK; } AMGX_RC AMGX_get_api_version(int *major, int *minor) { nvtxRange nvrf(__func__); *major = __AMGX_API_VERSION_MAJOR; *minor = __AMGX_API_VERSION_MINOR; return AMGX_RC_OK; } AMGX_RC AMGX_matrix_get_nnz_impl(const AMGX_matrix_handle mtx, int *nnz) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) //if (!c_mtx || !c_mtx->is_valid()) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ *nnz = get_mode_object_from<CASE,Matrix,AMGX_matrix_handle>(mtx)->get_num_nz();} \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_get_nnz(const AMGX_matrix_handle mtx, int *nnz) { nvtxRange nvrf(__func__); return AMGX_matrix_get_nnz_impl(mtx, nnz); } AMGX_RC AMGX_matrix_download_all_impl(const AMGX_matrix_handle mtx, int *row_ptrs, int *col_indices, void *data, void **diag_data) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) //if (!c_mtx || !c_mtx->is_valid()) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ matrix_download_all<CASE>(mtx, row_ptrs, col_indices, data, diag_data); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_download_all(const AMGX_matrix_handle mtx, int *row_ptrs, int *col_indices, void *data, void **diag_data) { nvtxRange nvrf(__func__); return AMGX_matrix_download_all_impl(mtx, row_ptrs, col_indices, data, diag_data); } AMGX_RC AMGX_API AMGX_vector_bind_impl(AMGX_vector_handle vec, const AMGX_matrix_handle matrix) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(matrix, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(matrix); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ vector_bind<CASE>(vec, matrix); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_vector_bind(AMGX_vector_handle vec, const AMGX_matrix_handle mtx) { nvtxRange nvrf(__func__); return AMGX_vector_bind_impl(vec, mtx); } #ifdef AMGX_WITH_MPI AMGX_RC AMGX_read_system_distributed_impl(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_read_system_distributed " ); std::stringstream msg; AMGX_Mode mode = AMGX_unset; unsigned int props = io_config::NONE; Resources *resources = NULL; AMGX_ERROR read_error = AMGX_OK; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { rc0 = read_system_preamble(mtx, rhs, sol, resources, mode, props); if (rc0 != AMGX_RC_OK) { return rc0; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) //WARNING: num_partitions (= # of ranks) might be set without anything else being set. Removing this error check for now. //if (partition_sizes == NULL && num_partitions != 0 || partition_sizes != NULL && num_partitions == 0) // AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) if (partition_vector == NULL && partition_vector_size != 0 || partition_vector != NULL && partition_vector_size == 0) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) if (partition_vector == NULL && partition_sizes != NULL) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) int num_ranks = 1, part = 0; if (partition_sizes == NULL && partition_vector != NULL) { // solve for partition sizes, if they are not provided //num_partitions = std::max_element(partition_vector, partition_vector + partition_vector_size); num_partitions = 0; for (int i = 0; i < partition_vector_size; i++) { num_partitions = max(num_partitions, partition_vector[i]); } num_partitions++; msg << "Processing partition vector, consisting of " << partition_vector_size << " rows and " << num_partitions << " partitions\n"; //printf("Processing partition vector, consisting of %d rows and %d partitions\n", partition_vector_size, num_partitions); // change output } rc = AMGX_OK; rc0 = AMGX_RC_OK; try { switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: { \ rc0 = read_system_distributed<CASE>(mtx, rhs, sol, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector, msg, num_ranks, resources, part, props, read_error); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) amgx_distributed_output(msg.str().c_str(), msg.str().length()); AMGX_CHECK_API_ERROR(rc, resources) AMGX_CHECK_API_ERROR(read_error, resources) return rc0; } #else AMGX_RC AMGX_read_system_distributed_impl(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } #endif AMGX_RC AMGX_matrix_set_boundary_separation_impl(AMGX_matrix_handle mtx, int boundary_separation) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) //if (!c_mtx) return AMGX_RC_BAD_PARAMETERS; AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ get_mode_object_from<CASE,Matrix,AMGX_matrix_handle>(mtx)->set_allow_boundary_separation(boundary_separation);\ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_set_boundary_separation(AMGX_matrix_handle mtx, int boundary_separation) { nvtxRange nvrf(__func__); return AMGX_matrix_set_boundary_separation_impl(mtx, boundary_separation); } AMGX_RC AMGX_read_system_maps_one_ring_impl(int *n, int *nnz, int *block_dimx, int *block_dimy, int **row_ptrs, int **col_indices, void **data, void **diag_data, void **rhs, void **sol, int *num_neighbors, int **neighbors, int **btl_sizes, int ***btl_maps, int **lth_sizes, int ***lth_maps, AMGX_resources_handle rsc, AMGX_Mode mode, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector, int64_t **local_to_global_map) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_ERROR rc_rs = AMGX_OK; try { ResourceW c_r(rsc); if (!c_r.wrapped()) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } else { resources = c_r.wrapped().get(); } } AMGX_CATCHES(rc_rs) AMGX_CHECK_API_ERROR(rc_rs, resources) std::string solver_scope, solver_value; std::string precond_scope, precond_value; AlgorithmType algorithm_s, algorithm_p; resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope); algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope); resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope); algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope); if ((local_to_global_map == NULL) // means we're in AMGX_read_system_one_ring not in AMGX_read_system_global && algorithm_s == CLASSICAL && algorithm_p == CLASSICAL) { std::stringstream msg; msg << "CLASSICAL is not supported in AMGX_read_system_maps_one_ring.\n"; amgx_distributed_output(msg.str().c_str(), msg.str().length()); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) } // import data AMGX_matrix_handle A_part; AMGX_vector_handle b_dev, x_dev; AMGX_RC rc = AMGX_matrix_create_impl(&A_part, rsc, mode); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_matrix_set_boundary_separation_impl(A_part, 0); // switch off reordering, since will download later if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_vector_create_impl(&x_dev, rsc, mode); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_vector_create_impl(&b_dev, rsc, mode); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_read_system_distributed_impl(A_part, b_dev, x_dev, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_matrix_get_nnz_impl(A_part, nnz); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_matrix_get_size_impl(A_part, n, block_dimx, block_dimy); if (rc != AMGX_RC_OK) { return rc; } int x_sz, x_block_dim; rc = AMGX_vector_get_size_impl(x_dev, &x_sz, &x_block_dim); if (rc != AMGX_RC_OK) { return rc; } if (x_sz == 0) { std::stringstream msg; msg << "Initializing solution vector with zeroes...\n"; amgx_distributed_output(msg.str().c_str(), msg.str().length()); rc = AMGX_vector_set_zero_impl(x_dev, *n, *block_dimy); if (rc != AMGX_RC_OK) { return rc; } } int sizeof_m_val = ((AMGX_GET_MODE_VAL(AMGX_MatPrecision, mode) == AMGX_matDouble)) ? sizeof(double) : sizeof(float); int sizeof_v_val = ((AMGX_GET_MODE_VAL(AMGX_VecPrecision, mode) == AMGX_vecDouble)) ? sizeof(double) : sizeof(float); *sol = get_c_arr_mem_manager().allocate(sizeof_v_val * (*n) * (*block_dimx)); *rhs = get_c_arr_mem_manager().allocate(sizeof_v_val * (*n) * (*block_dimy)); rc = AMGX_vector_download_impl(b_dev, *rhs); if (rc != AMGX_RC_OK) { return rc; } //save partitioned vectors on host rc = AMGX_vector_download_impl(x_dev, *sol); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_vector_destroy_impl(x_dev); if (rc != AMGX_RC_OK) { return rc; } rc = AMGX_vector_destroy_impl(b_dev); if (rc != AMGX_RC_OK) { return rc; } int block_size = (*block_dimx) * (*block_dimy); *row_ptrs = (int *)get_c_arr_mem_manager().allocate((*n + 1) * sizeof(int)); *col_indices = (int *)get_c_arr_mem_manager().allocate((*nnz) * sizeof(int)); *data = get_c_arr_mem_manager().allocate((*nnz) * block_size * sizeof_m_val); *diag_data = NULL; // will be allocated in AMGX_matrix_download_all if the matrix has DIAG property // save matrix before reordering. rc = AMGX_matrix_download_all_impl(A_part, *row_ptrs, *col_indices, *data, diag_data); if (rc != AMGX_RC_OK) { return rc; } AMGX_ERROR nvrc = AMGX_OK; try { switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ read_system_maps_one_ring_impl<CASE>(A_part, num_neighbors, neighbors, btl_sizes, btl_maps, lth_sizes, lth_maps, local_to_global_map); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(nvrc) AMGX_CHECK_API_ERROR(nvrc, resources) rc = AMGX_matrix_destroy_impl(A_part); if (rc != AMGX_RC_OK) { return rc; } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_read_system_maps_one_ring(int *n, int *nnz, int *block_dimx, int *block_dimy, int **row_ptrs, int **col_indices, void **data, void **diag_data, void **rhs, void **sol, int *num_neighbors, int **neighbors, int **btl_sizes, int ***btl_maps, int **lth_sizes, int ***lth_maps, AMGX_resources_handle rsc, AMGX_Mode mode, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); int64_t **local_to_global_map = NULL; return AMGX_read_system_maps_one_ring_impl(n, nnz, block_dimx, block_dimy, row_ptrs, col_indices, data, diag_data, rhs, sol, num_neighbors, neighbors, btl_sizes, btl_maps, lth_sizes, lth_maps, rsc, mode, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector, local_to_global_map); } AMGX_RC AMGX_API AMGX_read_system_global(int *n, int *nnz, int *block_dimx, int *block_dimy, int **row_ptrs, void **col_indices_global, void **data, void **diag_data, void **rhs, void **sol, AMGX_resources_handle rsc, AMGX_Mode mode, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); // TODO: we can avoid one-ring construction since we don't need it in this function // although the overhead is probably small and this won't be benchmarked anyways // so it's more convenient to just reuse AMGX_read_system_maps_one_ring int *col_indices; int *btl_sizes = NULL; int **btl_maps = NULL; int *lth_sizes = NULL; int **lth_maps = NULL; int num_neighbors; int *neighbors = NULL; int64_t *local_to_global_map = NULL; // set to flag the following function that we're coming from read_system_global (i.e. we need local_to_global_map values) AMGX_RC rc = AMGX_read_system_maps_one_ring_impl(n, nnz, block_dimx, block_dimy, row_ptrs, &col_indices, data, diag_data, rhs, sol, &num_neighbors, &neighbors, &btl_sizes, &btl_maps, &lth_sizes, &lth_maps, rsc, mode, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector, &local_to_global_map); Resources *resources = NULL; AMGX_ERROR rc_rs = AMGX_OK; try { ResourceW c_r(rsc); if (!c_r.wrapped()) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } else { resources = c_r.wrapped().get(); } } AMGX_CATCHES(rc_rs) AMGX_CHECK_API_ERROR(rc_rs, resources) // get global number of rows int num_rows_global; int my_id; #ifdef AMGX_WITH_MPI MPI_Comm_rank(*resources->getMpiComm(), &my_id); MPI_Allreduce(&(*n), &num_rows_global, 1, MPI_INT, MPI_SUM, *resources->getMpiComm()); #else my_id = 0; num_rows_global = (*n); #endif // setup partition vector int num_ranks; #ifdef AMGX_WITH_MPI MPI_Comm_size(*resources->getMpiComm(), &num_ranks); #else num_ranks = 1; #endif int *partitionVec = (int *)get_c_arr_mem_manager().allocate(num_rows_global * sizeof(int)); if (partition_vector == NULL) { // initialize equal partitioning int *scanPartSize = (int *)get_c_arr_mem_manager().allocate((num_ranks + 1) * sizeof(int)); for (int p = 0; p < num_ranks; p++) { scanPartSize[p] = p * num_rows_global / num_ranks; } scanPartSize[num_ranks] = num_rows_global; int p = 0; for (int i = 0; i < num_rows_global; i++) { if (i >= scanPartSize[p + 1]) { p++; } partitionVec[i] = p; } } else { // use existing partition info for (int i = 0; i < num_rows_global; i++) { partitionVec[i] = partition_vector[i]; } } // allocate global indices array *col_indices_global = get_c_arr_mem_manager().allocate((*nnz) * sizeof(int64_t)); // global to local mapping for non-halo (interior) // compute partition offsets (based on number of elements per partition) int64_t *partition_offsets = get_c_arr_mem_manager().callocate<int64_t>(num_ranks + 1); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; partition_offsets[pvi + 1]++; } thrust::inclusive_scan(partition_offsets, partition_offsets + num_ranks + 1, partition_offsets); // compute partition map (which tells you how the global elements are mapped into the partitions) int64_t *partition_map = get_c_arr_mem_manager().callocate<int64_t>(num_rows_global); for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; int64_t poi = partition_offsets[pvi]; partition_map[poi] = i; //increment used offset/counter for the next iteration partition_offsets[pvi]++; } // restore the offsets back to their original setting for (int i = 0; i < num_rows_global; i++) { int pvi = partitionVec[i]; partition_offsets[pvi]--; } // find global column indices, simply use local_to_global map for (int i = 0; i < (*nnz); i++) { int col = col_indices[i]; if (col >= (*n)) { ((int64_t *)*col_indices_global)[i] = partition_map[local_to_global_map[col - (*n)]]; } else { ((int64_t *)*col_indices_global)[i] = partition_map[partition_offsets[my_id] + col]; } } // free (temporary) host memory get_c_arr_mem_manager().free(partitionVec); get_c_arr_mem_manager().free(partition_offsets); get_c_arr_mem_manager().free(partition_map); return rc; } AMGX_RC AMGX_free_system_maps_one_ring_impl(int *row_ptrs, int *col_indices, void *data, void *diag_data, void *rhs, void *sol, int num_neighbors, int *neighbors, int *btl_sizes, int **btl_maps, int *lth_sizes, int **lth_maps) { nvtxRange nvrf(__func__); if (row_ptrs != NULL) { get_c_arr_mem_manager().free(row_ptrs); } if (col_indices != NULL) { get_c_arr_mem_manager().free(col_indices); } if (neighbors != NULL) { get_c_arr_mem_manager().free(neighbors); } if (btl_maps != NULL) { for (int i = 0; i < num_neighbors; i++) if (btl_maps[i] != NULL) { free(btl_maps[i]); } free(btl_maps); } if (lth_maps != NULL) { for (int i = 0; i < num_neighbors; i++) if (lth_maps[i] != NULL) { free(lth_maps[i]); } free(lth_maps); } if (btl_sizes != NULL) { free(btl_sizes); } if (lth_sizes != NULL) { free(lth_sizes); } if (data != NULL) { get_c_arr_mem_manager().free(data); } if (diag_data != NULL) { get_c_arr_mem_manager().free(diag_data); } if (rhs != NULL) { get_c_arr_mem_manager().free(rhs); } if (sol != NULL) { get_c_arr_mem_manager().free(sol); } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_free_system_maps_one_ring(int *row_ptrs, int *col_indices, void *data, void *diag_data, void *rhs, void *sol, int num_neighbors, int *neighbors, int *btl_sizes, int **btl_maps, int *lth_sizes, int **lth_maps) { nvtxRange nvrf(__func__); return AMGX_free_system_maps_one_ring_impl(row_ptrs, col_indices, data, diag_data, rhs, sol, num_neighbors, neighbors, btl_sizes, btl_maps, lth_sizes, lth_maps); } AMGX_RC AMGX_matrix_comm_from_maps_one_ring_impl( AMGX_matrix_handle mtx, int allocated_halo_depth, int max_num_neighbors, const int *neighbors, const int *send_sizes, int const **send_maps, const int *recv_sizes, int const **recv_maps) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; std::string solver_scope, solver_value; std::string precond_scope, precond_value; AlgorithmType algorithm_s, algorithm_p; resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope); algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope); resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope); algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope); if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL) { std::stringstream msg; msg << "CLASSICAL is not supported in AMGX_read_system_maps_one_ring.\n"; amgx_distributed_output(msg.str().c_str(), msg.str().length()); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources) } AMGX_RC rc0; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = matrix_comm_from_maps_one_ring<CASE>(mtx, allocated_halo_depth, max_num_neighbors, neighbors, send_sizes, send_maps, recv_sizes, recv_maps, resources); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_comm_from_maps_one_ring(AMGX_matrix_handle mtx, int allocated_halo_depth, int max_num_neighbors, const int *neighbors, const int *send_sizes, int const **send_maps, const int *recv_sizes, int const **recv_maps) { nvtxRange nvrf(__func__); return AMGX_matrix_comm_from_maps_one_ring_impl(mtx, allocated_halo_depth, max_num_neighbors, neighbors, send_sizes, send_maps, recv_sizes, recv_maps); } #ifdef AMGX_WITH_MPI AMGX_RC AMGX_API AMGX_read_system_distributed(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); return AMGX_read_system_distributed_impl(mtx, rhs, sol, filename, allocated_halo_depth, num_partitions, partition_sizes, partition_vector_size, partition_vector); } #else AMGX_RC AMGX_API AMGX_read_system_distributed(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, int allocated_halo_depth, int num_partitions, const int *partition_sizes, int partition_vector_size, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } #endif #ifdef AMGX_WITH_MPI AMGX_RC AMGX_API AMGX_generate_distributed_poisson_7pt( AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, int allocated_halo_depth, int num_import_rings, int nx, int ny, int nz, int px, int py, int pz) { nvtxRange nvrf(__func__); /* This routine will create 3D (7 point) discretization of the Poisson operator. The discretization is performed on a the 3D domain consisting of nx, ny and nz points in x-, y- and z-dimension, respectively. This 3D domain will be replicated in px, py and pz times in x-, y- and z-dimension. Therefore, creating a large "cube", composed of smaller "sub-cubes" each of which is going to be handled on a separate ranks/processor. Later on p, q and r will indicate the position of the "sub-cube" in the "cube" for a particular rank. Finally, the rhs and solution are set to a vector of ones and zeros, respectively. */ AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = generate_distributed_poisson_7pt<CASE>(mtx, rhs, sol, allocated_halo_depth, num_import_rings, nx, ny, nz, px, py, pz); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) return AMGX_OK != rc ? getCAPIerror_x(rc) : rc0; } AMGX_RC AMGX_API AMGX_matrix_upload_all_global( AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, int num_import_rings, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = matrix_upload_all_global<CASE>(mtx, n_global, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices_global, data, diag_data, allocated_halo_depth, num_import_rings, partition_vector); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) return AMGX_OK != rc ? getCAPIerror_x(rc) : rc0; } AMGX_RC AMGX_API AMGX_matrix_upload_all_global_32(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, int num_import_rings, const int *partition_vector) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = matrix_upload_all_global_32<CASE>(mtx, n_global, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices_global, data, diag_data, allocated_halo_depth, num_import_rings, partition_vector); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) return AMGX_OK != rc ? getCAPIerror_x(rc) : rc0; } AMGX_RC AMGX_API AMGX_matrix_upload_distributed(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, AMGX_distribution_handle dist) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = matrix_upload_distributed<CASE>(mtx, n_global, n, nnz, block_dimx, block_dimy, row_ptrs, col_indices_global, data, diag_data, dist); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: return AMGX_RC_BAD_MODE; } } AMGX_CATCHES(rc) return AMGX_OK != rc ? getCAPIerror_x(rc) : rc0; } #else AMGX_RC AMGX_API AMGX_generate_distributed_poisson_7pt(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, int allocated_halo_depth, int num_import_rings, int nx, int ny, int nz, int px, int py, int pz) { nvtxRange nvrf(__func__); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_matrix_upload_all_global(AMGX_matrix_handle mtx, const int n_global, const int n, const int nnz, const int block_dimx, const int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, int num_import_rings, const int *partition_vector) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_matrix_upload_all_global_int(AMGX_matrix_handle mtx, const int n_global, const int n, const int nnz, const int block_dimx, const int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, int allocated_halo_depth, int num_import_rings, const int *partition_vector) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_matrix_upload_distributed(AMGX_matrix_handle mtx, int n_global, int n, int nnz, int block_dimx, int block_dimy, const int *row_ptrs, const void *col_indices_global, const void *data, const void *diag_data, AMGX_distribution_handle distribution) { nvtxRange nvrf(__func__); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); return AMGX_RC_OK; } #endif AMGX_RC AMGX_API AMGX_matrix_comm_from_maps(AMGX_matrix_handle mtx, int allocated_halo_depth, int num_import_rings, int max_num_neighbors, const int *neighbors, const int *send_ptrs, int const *send_maps, const int *recv_ptrs, int const *recv_maps) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ rc0 = matrix_comm_from_maps<CASE>(mtx, allocated_halo_depth, num_import_rings, max_num_neighbors, neighbors, send_ptrs, send_maps, recv_ptrs, recv_maps); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return rc0; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_matrix_get_size_neighbors(const AMGX_matrix_handle mtx, int *num_neighbors) { nvtxRange nvrf(__func__); Resources *resources = NULL; AMGX_CHECK_API_ERROR(getAMGXerror(getResourcesFromMatrixHandle(mtx, &resources)), NULL) AMGX_ERROR rc = AMGX_OK; try { AMGX_Mode mode = get_mode_from(mtx); switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ { \ *num_neighbors = get_mode_object_from<CASE, Matrix, AMGX_matrix_handle>(mtx)->manager->num_neighbors(); \ } \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources) } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources) return AMGX_RC_OK; //return getCAPIerror(rc); } AMGX_RC AMGX_API AMGX_resources_create(AMGX_resources_handle *rsc, AMGX_config_handle cfg_h, void *comm, int device_num, const int *devices) { nvtxRange nvrf(__func__); if (rsc == NULL || devices == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } AMGX_ERROR rc = AMGX_OK; try { ConfigW cfg(cfg_h); auto *resources = create_managed_object<Resources, AMGX_resources_handle>(rsc, cfg.wrapped().get(), comm, device_num, devices); } AMGX_CATCHES(rc) if (rc != AMGX_OK) { AMGX_CHECK_API_ERROR(rc, NULL) } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_resources_create_simple(AMGX_resources_handle *rsc, AMGX_config_handle cfg_h) { nvtxRange nvrf(__func__); const size_t num_devices = 1; const int devices[1] = { 0 }; AMGX_ERROR rc = AMGX_OK; try { ConfigW cfg(cfg_h); auto *resources = create_managed_object<Resources, AMGX_resources_handle>(rsc, cfg.wrapped().get(), nullptr, num_devices, devices); } AMGX_CATCHES(rc) if (rc != AMGX_OK) { AMGX_CHECK_API_ERROR(rc, NULL) } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_resources_destroy(AMGX_resources_handle rsc) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; try { bool found = remove_managed_object<AMGX_resources_handle, Resources>(rsc); } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, NULL); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_distribution_create(AMGX_distribution_handle *dist, AMGX_config_handle cfg) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; try { auto *mdist = create_managed_object<MatrixDistribution, AMGX_distribution_handle>(dist); if (cfg != NULL) { int ring; rc = getAMGXerror(AMGX_config_get_default_number_of_rings(cfg, &ring)); mdist->wrapped()->setAllocatedHaloDepth(ring); mdist->wrapped()->setNumImportRings(ring); } } AMGX_CATCHES(rc); if (rc != AMGX_OK) { AMGX_CHECK_API_ERROR(rc, NULL); } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_distribution_destroy(AMGX_distribution_handle dist) { nvtxRange nvrf(__func__); AMGX_ERROR rc = AMGX_OK; try { if (!remove_managed_object<AMGX_distribution_handle, MatrixDistribution>(dist)) { rc = AMGX_ERR_BAD_PARAMETERS; } } AMGX_CATCHES(rc); if (rc != AMGX_OK) { AMGX_CHECK_API_ERROR(rc, NULL); } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_distribution_set_partition_data(AMGX_distribution_handle dist, AMGX_DIST_PARTITION_INFO info, const void *partition_data) { nvtxRange nvrf(__func__); if (dist == NULL || partition_data == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } typedef CWrapHandle<AMGX_distribution_handle, MatrixDistribution> MatrixDistributionW; MatrixDistributionW wrapDist(dist); MatrixDistribution &mdist = *wrapDist.wrapped(); switch (info) { case AMGX_DIST_PARTITION_VECTOR: mdist.setPartitionVec((const int*)partition_data); break; case AMGX_DIST_PARTITION_OFFSETS: mdist.setPartitionOffsets(partition_data); break; default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); break; } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_distribution_set_32bit_colindices(AMGX_distribution_handle dist, int use32bit) { nvtxRange nvrf(__func__); if (dist == NULL) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL); } typedef CWrapHandle<AMGX_distribution_handle, MatrixDistribution> MatrixDistributionW; MatrixDistributionW wrapDist(dist); MatrixDistribution &mdist = *wrapDist.wrapped(); mdist.set32BitColIndices(use32bit); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_read_system(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_read_system " ); Resources *resources = NULL; AMGX_Mode mode = AMGX_unset; unsigned int props = io_config::NONE; AMGX_ERROR read_error = AMGX_OK; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { rc0 = read_system_preamble(mtx, rhs, sol, resources, mode, props, true); if (rc0 != AMGX_RC_OK) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); //return AMGX_RC_BAD_PARAMETERS; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources); std::string solver_value, solver_scope; resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope); int rhs_from_a; resources->getResourcesConfig()->getParameter<int>("rhs_from_a", rhs_from_a, "default", solver_scope); if (rhs_from_a == 1) { io_config::addProps(io_config::GEN_RHS, props); } // reading solution approximation is not available now through C API int block_convert; resources->getResourcesConfig()->getParameter<int>("block_convert", block_convert, "default", solver_scope); AMG_Configuration t_amgx_cfg; AMG_Config *amgx_cfg = t_amgx_cfg.getConfigObject(); rc = AMGX_OK; rc0 = AMGX_RC_OK; try { switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ rc0 = read_system<CASE>(mtx, rhs, sol, filename, props, block_convert, *amgx_cfg, read_error); \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources); } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources); AMGX_CHECK_API_ERROR(read_error, resources); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_write_parameters_description(char *filename, AMGX_GET_PARAMS_DESC_FLAG mode) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_write_parameters_description " ); switch (mode) { case AMGX_GET_PARAMS_DESC_JSON_TO_FILE: // handles all exceptions inside AMGX_CHECK_API_ERROR(AMG_Config::write_parameters_description_json(filename), NULL); break; default: AMGX_CHECK_API_ERROR(AMGX_ERR_NOT_IMPLEMENTED, NULL); } return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_timer_create(const char *label, unsigned int flags) // create new timer { nvtxRange nvrf(__func__); if (getTimers().createTimer(label, flags)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //return AMGX_RC_BAD_PARAMETERS; return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_timer_start(const char *label) // start a timer if it's not started yet { nvtxRange nvrf(__func__); if (getTimers().startTimer(label)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //return AMGX_RC_BAD_PARAMETERS; return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_timer_elapsed(const char *label, double *sec) // timer continues to run, just get elapsed time since last start() call { nvtxRange nvrf(__func__); *sec = getTimers().elapsedTimer(label); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_timer_get_total(const char *label, double *sec) // retrieves timer's accumulated value { nvtxRange nvrf(__func__); *sec = getTimers().getTotalTime(label); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_timer_stop(const char *label, double *sec) // timer stops, get time since last start() call { nvtxRange nvrf(__func__); *sec = getTimers().stopTimer(label); return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_read_geometry( const char *fname, double **geo_x, double **geo_y, double **geo_z, int *dim, int *numrows) { nvtxRange nvrf(__func__); printf("Reading geometry from file: '%s'\n", fname); FILE *fin = fopen(fname, "r"); if (!fin) { printf("Error opening file '%s'\n", fname); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //exit(1); } int n, dimension; if (2 != fscanf(fin, "%d %d\n", &n, &dimension)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //exit(1); //errAndExit("Bad format\n"); *geo_x = (double *)get_c_arr_mem_manager().allocate(n * sizeof(double)); *geo_y = (double *)get_c_arr_mem_manager().allocate(n * sizeof(double)); if (dimension == 3) { *geo_y = (double *)get_c_arr_mem_manager().allocate(n * sizeof(double)); for (int i = 0; i < n; i ++) if (3 != fscanf(fin, "%lf %lf %lf\n", *geo_x + i, *geo_y + i, *geo_z + i)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //exit(1); //errAndExit("Bad format\n"); } else if (dimension == 2) { for (int i = 0; i < n; i ++) if ( 2 != fscanf(fin, "%lf %lf\n", *geo_x + i, *geo_y + i)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //exit(1); //errAndExit("Bad format\n"); } *dim = dimension; *numrows = n; return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_read_coloring( const char *fname, int **row_coloring, int *colored_rows, int *num_colors) { nvtxRange nvrf(__func__); printf("Reading coloring from file: '%s'\n", fname); FILE *fin = fopen(fname, "r"); if (!fin) { printf("Error opening file '%s'\n", fname); AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) } int n, colors_num; if (2 != fscanf(fin, "%d %d\n", &n, &colors_num)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //errAndExit("Bad format\n"); *row_coloring = (int *)get_c_arr_mem_manager().allocate(n * sizeof(int)); for (int i = 0; i < n; i ++) if ( 1 != fscanf(fin, "%d\n", *row_coloring + i)) AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, NULL) //errAndExit("Bad format\n"); *colored_rows = n; *num_colors = colors_num; return AMGX_RC_OK; } AMGX_RC AMGX_API AMGX_read_system_with_cfg(AMGX_matrix_handle mtx, AMGX_vector_handle rhs, AMGX_vector_handle sol, const char *filename, const AMGX_config_handle cfg_h) { nvtxRange nvrf(__func__); AMGX_CPU_PROFILER( "AMGX_vector_read_system " ); Resources *resources = NULL; AMGX_Mode mode = AMGX_unset; unsigned int props = io_config::NONE; AMGX_ERROR read_error = AMGX_OK; AMGX_ERROR rc = AMGX_OK; AMGX_RC rc0 = AMGX_RC_OK; try { rc0 = read_system_preamble(mtx, rhs, sol, resources, mode, props, true); if (rc0 != AMGX_RC_OK) { AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_PARAMETERS, resources); //return AMGX_RC_BAD_PARAMETERS; } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources); std::string solver_value, solver_scope; resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope); int rhs_from_a; resources->getResourcesConfig()->getParameter<int>("rhs_from_a", rhs_from_a, "default", solver_scope); if (rhs_from_a == 1) { io_config::addProps(io_config::GEN_RHS, props); } // reading solution approximation is not available now through C API int block_convert; resources->getResourcesConfig()->getParameter<int>("block_convert", block_convert, "default", solver_scope); ConfigW cfg(cfg_h); AMG_Config *amgx_cfg = cfg.wrapped().get()->getConfigObject(); rc = AMGX_OK; rc0 = AMGX_RC_OK; try { switch (mode) { #define AMGX_CASE_LINE(CASE) case CASE: \ rc0 = read_system<CASE>(mtx, rhs, sol, filename, props, block_convert, *amgx_cfg, read_error); \ break; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE default: AMGX_CHECK_API_ERROR(AMGX_ERR_BAD_MODE, resources); } } AMGX_CATCHES(rc) AMGX_CHECK_API_ERROR(rc, resources); AMGX_CHECK_API_ERROR(read_error, resources); return AMGX_RC_OK; } int AMGX_Debug_get_resource_count(AMGX_resources_handle rsc) { return ((ResourceW *)rsc)->wrapped().use_count(); } }//extern "C"
the_stack
* sha-512 kernel implementation. * * ==========================(LICENSE BEGIN)============================ * * Copyright (c) 2014 djm34 * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * ===========================(LICENSE END)============================= * * @author phm <phm@inbox.com> */ #include <stdio.h> #include <memory.h> #define USE_SHARED 1 #define SPH_C64(x) ((uint64_t)(x ## ULL)) #include "cuda_helper.h" __constant__ uint32_t pTarget[8]; __constant__ uint32_t c_data[48]; static uint32_t *d_found[MAX_GPUS]; #define SWAP64(u64) cuda_swab64(u64) #define SPH_ROTL32(x, n) ROTL32(x, n) #define SPH_ROTR32(x, n) ROTR32(x, n) static __constant__ uint64_t H_512[8] = { SPH_C64(0x6A09E667F3BCC908), SPH_C64(0xBB67AE8584CAA73B), SPH_C64(0x3C6EF372FE94F82B), SPH_C64(0xA54FF53A5F1D36F1), SPH_C64(0x510E527FADE682D1), SPH_C64(0x9B05688C2B3E6C1F), SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179) }; static __constant__ uint64_t K_512[80] = { SPH_C64(0x428A2F98D728AE22), SPH_C64(0x7137449123EF65CD), SPH_C64(0xB5C0FBCFEC4D3B2F), SPH_C64(0xE9B5DBA58189DBBC), SPH_C64(0x3956C25BF348B538), SPH_C64(0x59F111F1B605D019), SPH_C64(0x923F82A4AF194F9B), SPH_C64(0xAB1C5ED5DA6D8118), SPH_C64(0xD807AA98A3030242), SPH_C64(0x12835B0145706FBE), SPH_C64(0x243185BE4EE4B28C), SPH_C64(0x550C7DC3D5FFB4E2), SPH_C64(0x72BE5D74F27B896F), SPH_C64(0x80DEB1FE3B1696B1), SPH_C64(0x9BDC06A725C71235), SPH_C64(0xC19BF174CF692694), SPH_C64(0xE49B69C19EF14AD2), SPH_C64(0xEFBE4786384F25E3), SPH_C64(0x0FC19DC68B8CD5B5), SPH_C64(0x240CA1CC77AC9C65), SPH_C64(0x2DE92C6F592B0275), SPH_C64(0x4A7484AA6EA6E483), SPH_C64(0x5CB0A9DCBD41FBD4), SPH_C64(0x76F988DA831153B5), SPH_C64(0x983E5152EE66DFAB), SPH_C64(0xA831C66D2DB43210), SPH_C64(0xB00327C898FB213F), SPH_C64(0xBF597FC7BEEF0EE4), SPH_C64(0xC6E00BF33DA88FC2), SPH_C64(0xD5A79147930AA725), SPH_C64(0x06CA6351E003826F), SPH_C64(0x142929670A0E6E70), SPH_C64(0x27B70A8546D22FFC), SPH_C64(0x2E1B21385C26C926), SPH_C64(0x4D2C6DFC5AC42AED), SPH_C64(0x53380D139D95B3DF), SPH_C64(0x650A73548BAF63DE), SPH_C64(0x766A0ABB3C77B2A8), SPH_C64(0x81C2C92E47EDAEE6), SPH_C64(0x92722C851482353B), SPH_C64(0xA2BFE8A14CF10364), SPH_C64(0xA81A664BBC423001), SPH_C64(0xC24B8B70D0F89791), SPH_C64(0xC76C51A30654BE30), SPH_C64(0xD192E819D6EF5218), SPH_C64(0xD69906245565A910), SPH_C64(0xF40E35855771202A), SPH_C64(0x106AA07032BBD1B8), SPH_C64(0x19A4C116B8D2D0C8), SPH_C64(0x1E376C085141AB53), SPH_C64(0x2748774CDF8EEB99), SPH_C64(0x34B0BCB5E19B48A8), SPH_C64(0x391C0CB3C5C95A63), SPH_C64(0x4ED8AA4AE3418ACB), SPH_C64(0x5B9CCA4F7763E373), SPH_C64(0x682E6FF3D6B2B8A3), SPH_C64(0x748F82EE5DEFB2FC), SPH_C64(0x78A5636F43172F60), SPH_C64(0x84C87814A1F0AB72), SPH_C64(0x8CC702081A6439EC), SPH_C64(0x90BEFFFA23631E28), SPH_C64(0xA4506CEBDE82BDE9), SPH_C64(0xBEF9A3F7B2C67915), SPH_C64(0xC67178F2E372532B), SPH_C64(0xCA273ECEEA26619C), SPH_C64(0xD186B8C721C0C207), SPH_C64(0xEADA7DD6CDE0EB1E), SPH_C64(0xF57D4F7FEE6ED178), SPH_C64(0x06F067AA72176FBA), SPH_C64(0x0A637DC5A2C898A6), SPH_C64(0x113F9804BEF90DAE), SPH_C64(0x1B710B35131C471B), SPH_C64(0x28DB77F523047D84), SPH_C64(0x32CAAB7B40C72493), SPH_C64(0x3C9EBE0A15C9BEBC), SPH_C64(0x431D67C49C100D4C), SPH_C64(0x4CC5D4BECB3E42B6), SPH_C64(0x597F299CFC657E2A), SPH_C64(0x5FCB6FAB3AD6FAEC), SPH_C64(0x6C44198C4A475817) }; //#define BSG5_0(x) (ROTR64(x, 28) ^ ROTR64(x, 34) ^ ROTR64(x, 39)) #define BSG5_0(x) xor3(ROTR64(x, 28),ROTR64(x, 34),ROTR64(x, 39)) //#define BSG5_1(x) (ROTR64(x, 14) ^ ROTR64(x, 18) ^ ROTR64(x, 41)) #define BSG5_1(x) xor3(ROTR64(x, 14),ROTR64(x, 18),ROTR64(x, 41)) //#define SSG5_0(x) (ROTR64(x, 1) ^ ROTR64(x, 8) ^ SPH_T64((x) >> 7)) #define SSG5_0(x) xor3(ROTR64(x, 1),ROTR64(x, 8),shr_t64(x,7)) //#define SSG5_1(x) (ROTR64(x, 19) ^ ROTR64(x, 61) ^ SPH_T64((x) >> 6)) #define SSG5_1(x) xor3(ROTR64(x, 19),ROTR64(x, 61),shr_t64(x,6)) //#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z)) #define CH(x, y, z) xandx(x,y,z) //#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z))) #define MAJ(x, y, z) andor(x,y,z) #define SHA3_STEP(ord,r,i) { \ uint64_t T1, T2; \ int a = 8-ord; \ T1 = r[(7+a)&7] + BSG5_1(r[(4+a)&7]) + CH(r[(4+a)&7], r[(5+a)&7], r[(6+a)&7]) + K_512[i] + W[i]; \ T2 = BSG5_0(r[(0+a)&7]) + MAJ(r[(0+a)&7], r[(1+a)&7], r[(2+a)&7]); \ r[(3+a)&7] = r[(3+a)&7] + T1; \ r[(7+a)&7] = T1 + T2; \ } __device__ __forceinline__ uint64_t Tone(const uint64_t* sharedMemory, uint64_t r[8], uint64_t W[80], uint32_t a, uint32_t i) { uint64_t e = r[(4 + a) & 7]; //uint64_t BSG51 = ROTR64(e, 14) ^ ROTR64(e, 18) ^ ROTR64(e, 41); uint64_t BSG51 = xor3(ROTR64(e, 14), ROTR64(e, 18), ROTR64(e, 41)); //uint64_t CHl = (((f) ^ (g)) & (e)) ^ (g); uint64_t CHl = xandx(e, r[(5 + a) & 7], r[(6 + a) & 7]); uint64_t result = r[(7 + a) & 7] + BSG51 + CHl + sharedMemory[i] + W[i]; return result; } #define SHA3_STEP2(truc,ord,r,i) { \ uint64_t T1, T2; \ int a = 8-ord; \ T1 = Tone(truc,r,W,a,i); \ T2 = BSG5_0(r[(0+a)&7]) + MAJ(r[(0+a)&7], r[(1+a)&7], r[(2+a)&7]); \ r[(3+a)&7] = r[(3+a)&7] + T1; \ r[(7+a)&7] = T1 + T2; \ } #define TPB 128 __global__ __launch_bounds__(TPB, 6) void x17_sha512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint64_t *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); // if (thread < threads) { uint64_t *inpHash = &g_hash[8 * thread]; uint64_t hash[8]; #pragma unroll for (int i = 0; i<8; i++) { hash[i] = inpHash[i]; } uint64_t W[80] = { 0 }; uint64_t r[8]; #pragma unroll for (int i = 0; i < 8; i++) { W[i] = SWAP64(hash[i]); r[i] = H_512[i]; } W[8] = 0x8000000000000000; W[15] = 0x0000000000000200; #pragma unroll 64 for (int i = 16; i < 80; i++) W[i] = SSG5_1(W[i - 2]) + W[i - 7] + SSG5_0(W[i - 15]) + W[i - 16]; #pragma unroll 10 for (int i = 0; i < 80; i += 8) { #pragma unroll 8 for (int ord = 0; ord<8; ord++) { SHA3_STEP2(K_512, ord, r, i + ord); } } #pragma unroll 8 for (int i = 0; i < 8; i++) { r[i] = r[i] + H_512[i]; } #pragma unroll 8 for (int i = 0; i<8; i++) { hash[i] = SWAP64(r[i]); } #pragma unroll 16 for (int u = 0; u < 8; u++) { inpHash[u] = hash[u]; } } } __host__ void x17_sha512_cpu_init(int thr_id, uint32_t threads) { // cudaMemcpyToSymbol(K_512,K512,80*sizeof(uint64_t),0, cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(H_512,H512,sizeof(H512),0, cudaMemcpyHostToDevice); } __host__ void x17_sha512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash) { const uint32_t threadsperblock = TPB; dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); x17_sha512_gpu_hash_64<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash ); } static const __constant__ uint64_t K512[80] = { 0x428A2F98D728AE22UL, 0x7137449123EF65CDUL, 0xB5C0FBCFEC4D3B2FUL, 0xE9B5DBA58189DBBCUL, 0x3956C25BF348B538UL, 0x59F111F1B605D019UL, 0x923F82A4AF194F9BUL, 0xAB1C5ED5DA6D8118UL, 0xD807AA98A3030242UL, 0x12835B0145706FBEUL, 0x243185BE4EE4B28CUL, 0x550C7DC3D5FFB4E2UL, 0x72BE5D74F27B896FUL, 0x80DEB1FE3B1696B1UL, 0x9BDC06A725C71235UL, 0xC19BF174CF692694UL, 0xE49B69C19EF14AD2UL, 0xEFBE4786384F25E3UL, 0x0FC19DC68B8CD5B5UL, 0x240CA1CC77AC9C65UL, 0x2DE92C6F592B0275UL, 0x4A7484AA6EA6E483UL, 0x5CB0A9DCBD41FBD4UL, 0x76F988DA831153B5UL, 0x983E5152EE66DFABUL, 0xA831C66D2DB43210UL, 0xB00327C898FB213FUL, 0xBF597FC7BEEF0EE4UL, 0xC6E00BF33DA88FC2UL, 0xD5A79147930AA725UL, 0x06CA6351E003826FUL, 0x142929670A0E6E70UL, 0x27B70A8546D22FFCUL, 0x2E1B21385C26C926UL, 0x4D2C6DFC5AC42AEDUL, 0x53380D139D95B3DFUL, 0x650A73548BAF63DEUL, 0x766A0ABB3C77B2A8UL, 0x81C2C92E47EDAEE6UL, 0x92722C851482353BUL, 0xA2BFE8A14CF10364UL, 0xA81A664BBC423001UL, 0xC24B8B70D0F89791UL, 0xC76C51A30654BE30UL, 0xD192E819D6EF5218UL, 0xD69906245565A910UL, 0xF40E35855771202AUL, 0x106AA07032BBD1B8UL, 0x19A4C116B8D2D0C8UL, 0x1E376C085141AB53UL, 0x2748774CDF8EEB99UL, 0x34B0BCB5E19B48A8UL, 0x391C0CB3C5C95A63UL, 0x4ED8AA4AE3418ACBUL, 0x5B9CCA4F7763E373UL, 0x682E6FF3D6B2B8A3UL, 0x748F82EE5DEFB2FCUL, 0x78A5636F43172F60UL, 0x84C87814A1F0AB72UL, 0x8CC702081A6439ECUL, 0x90BEFFFA23631E28UL, 0xA4506CEBDE82BDE9UL, 0xBEF9A3F7B2C67915UL, 0xC67178F2E372532BUL, 0xCA273ECEEA26619CUL, 0xD186B8C721C0C207UL, 0xEADA7DD6CDE0EB1EUL, 0xF57D4F7FEE6ED178UL, 0x06F067AA72176FBAUL, 0x0A637DC5A2C898A6UL, 0x113F9804BEF90DAEUL, 0x1B710B35131C471BUL, 0x28DB77F523047D84UL, 0x32CAAB7B40C72493UL, 0x3C9EBE0A15C9BEBCUL, 0x431D67C49C100D4CUL, 0x4CC5D4BECB3E42B6UL, 0x597F299CFC657E2AUL, 0x5FCB6FAB3AD6FAECUL, 0x6C44198C4A475817UL }; static const __constant__ uint64_t SHA512_INIT[8] = { 0x6A09E667F3BCC908UL, 0xBB67AE8584CAA73BUL, 0x3C6EF372FE94F82BUL, 0xA54FF53A5F1D36F1UL, 0x510E527FADE682D1UL, 0x9B05688C2B3E6C1FUL, 0x1F83D9ABFB41BD6BUL, 0x5BE0CD19137E2179UL }; //#define ROTR64(x, y) rotate((x), 64UL - (y)) #define nvidia_bitalign(src0,src1,src2) (((src0) << (src2)) | ((src1) >> (32-(src2)))) //uint64_t FAST_ROTR64_LO(const uint2 x, const uint32_t y) { return(devectorize(nvidia_bitalign(x.y, x.x, y))); } //uint64_t FAST_ROTR64_HI(const uint2 x, const uint32_t y) { return(devectorize(nvidia_bitalign(x.x, x.y, (y - 32)))); } /* #define BSG5_0(x) (FAST_ROTR64_LO(x, 28) ^ FAST_ROTR64_HI(x, 34) ^ FAST_ROTR64_HI(x, 39)) #define BSG5_1(x) (FAST_ROTR64_LO(x, 14) ^ FAST_ROTR64_LO(x, 18) ^ ROTR64(x, 41)) #define SSG5_0(x) (FAST_ROTR64_LO(x, 1) ^ FAST_ROTR64_LO(x, 8) ^ ((x) >> 7)) #define SSG5_1(x) (FAST_ROTR64_LO(x, 19) ^ FAST_ROTR64_HI(x, 61) ^ ((x) >> 6)) */ //#define BSG5_0(x) (FAST_ROTR64_LO(as_uint2(x), 28) ^ FAST_ROTR64_HI(as_uint2(x), 34) ^ FAST_ROTR64_HI(as_uint2(x), 39)) //#define BSG5_1(x) (FAST_ROTR64_LO(as_uint2(x), 14) ^ FAST_ROTR64_LO(as_uint2(x), 18) ^ FAST_ROTR64_HI(as_uint2(x), 41)) //#define SSG5_0(x) (FAST_ROTR64_LO(as_uint2(x), 1) ^ FAST_ROTR64_LO(as_uint2(x), 8) ^ ((x) >> 7)) //#define SSG5_1(x) (FAST_ROTR64_LO(as_uint2(x), 19) ^ FAST_ROTR64_HI(as_uint2(x), 61) ^ ((x) >> 6)) //#define CH(X, Y, Z) bitselect(Z, Y, X) //#define MAJ(X, Y, Z) CH((X ^ Z), Y, Z) __device__ __forceinline__ void SHA2_512_STEP2(const uint64_t *W, uint64_t ord, uint64_t *r, int i) { uint64_t T1; int x = 8 - ord; uint64_t a = r[x & 7], b = r[(x + 1) & 7], c = r[(x + 2) & 7], d = r[(x + 3) & 7]; uint64_t e = r[(x + 4) & 7], f = r[(x + 5) & 7], g = r[(x + 6) & 7], h = r[(x + 7) & 7]; T1 = h + BSG5_1(e) + CH(e, f, g) + W[i] + K512[i]; r[(3 + x) & 7] = d + T1; r[(7 + x) & 7] = T1 + BSG5_0(a) + MAJ(a, b, c); } __device__ __forceinline__ void SHA512Block(uint64_t *data, uint64_t *buf) { uint64_t W[80], r[8]; for (int i = 0; i < 8; ++i) r[i] = buf[i]; for (int i = 0; i < 16; ++i) W[i] = data[i]; #pragma unroll 4 for (int i = 16; i < 80; ++i) W[i] = SSG5_1(W[i - 2]) + W[i - 7] + SSG5_0(W[i - 15]) + W[i - 16]; #pragma unroll 1 for (int i = 0; i < 80; i += 8) { #pragma unroll for (int j = 0; j < 8; ++j) { SHA2_512_STEP2(W, j, r, i + j); } } for (int i = 0; i < 8; ++i) buf[i] += r[i]; } #define RIPEMD160_IN(x) W[x] // Round functions for RIPEMD-128 and RIPEMD-160. #define F1(x, y, z) ((x) ^ (y) ^ (z)) #define F2(x, y, z) ((((y) ^ (z)) & (x)) ^ (z)) #define F3(x, y, z) (((x) | ~(y)) ^ (z)) #define F4(x, y, z) ((((x) ^ (y)) & (z)) ^ (y)) #define F5(x, y, z) ((x) ^ ((y) | ~(z))) #define K11 0x00000000 #define K12 0x5A827999 #define K13 0x6ED9EBA1 #define K14 0x8F1BBCDC #define K15 0xA953FD4E #define K21 0x50A28BE6 #define K22 0x5C4DD124 #define K23 0x6D703EF3 #define K24 0x7A6D76E9 #define K25 0x00000000 const __constant__ uint32_t RMD160_IV[5] = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }; #define RR(a, b, c, d, e, f, s, r, k) do { \ const uint32_t rrtmp = a + f(b, c, d) + r + k; \ a = nvidia_bitalign(rrtmp, rrtmp, 32U - (uint32_t)s) + e; \ c = nvidia_bitalign(c, c, 32U - 10U); \ } while (0) #define ROUND1(a, b, c, d, e, f, s, r, k) \ RR(a ## 1, b ## 1, c ## 1, d ## 1, e ## 1, f, s, r, K1 ## k) #define ROUND2(a, b, c, d, e, f, s, r, k) \ RR(a ## 2, b ## 2, c ## 2, d ## 2, e ## 2, f, s, r, K2 ## k) /* * This macro defines the body for a RIPEMD-160 compression function * implementation. The "in" parameter should evaluate, when applied to a * numerical input parameter from 0 to 15, to an expression which yields * the corresponding input block. The "h" parameter should evaluate to * an array or pointer expression designating the array of 5 words which * contains the input and output of the compression function. */ //#define RIPEMD160_ROUND_BODY(in, h) do { \ uint A1, B1, C1, D1, E1; \ uint A2, B2, C2, D2, E2; \ uint tmp; \ \ A1 = A2 = (h)[0]; \ B1 = B2 = (h)[1]; \ C1 = C2 = (h)[2]; \ D1 = D2 = (h)[3]; \ E1 = E2 = (h)[4]; \ \ ROUND1(A, B, C, D, E, F1, 11, (in)[ 0], 1); \ ROUND1(E, A, B, C, D, F1, 14, (in)[ 1], 1); \ ROUND1(D, E, A, B, C, F1, 15, (in)[ 2], 1); \ ROUND1(C, D, E, A, B, F1, 12, (in)[ 3], 1); \ ROUND1(B, C, D, E, A, F1, 5, (in)[ 4], 1); \ ROUND1(A, B, C, D, E, F1, 8, (in)[ 5], 1); \ ROUND1(E, A, B, C, D, F1, 7, (in)[ 6], 1); \ ROUND1(D, E, A, B, C, F1, 9, (in)[ 7], 1); \ ROUND1(C, D, E, A, B, F1, 11, (in)[ 8], 1); \ ROUND1(B, C, D, E, A, F1, 13, (in)[ 9], 1); \ ROUND1(A, B, C, D, E, F1, 14, (in)[10], 1); \ ROUND1(E, A, B, C, D, F1, 15, (in)[11], 1); \ ROUND1(D, E, A, B, C, F1, 6, (in)[12], 1); \ ROUND1(C, D, E, A, B, F1, 7, (in)[13], 1); \ ROUND1(B, C, D, E, A, F1, 9, (in)[14], 1); \ ROUND1(A, B, C, D, E, F1, 8, (in)[15], 1); \ \ ROUND1(E, A, B, C, D, F2, 7, (in)[ 7], 2); \ ROUND1(D, E, A, B, C, F2, 6, (in)[ 4], 2); \ ROUND1(C, D, E, A, B, F2, 8, (in)[13], 2); \ ROUND1(B, C, D, E, A, F2, 13, (in)[ 1], 2); \ ROUND1(A, B, C, D, E, F2, 11, (in)[10], 2); \ ROUND1(E, A, B, C, D, F2, 9, (in)[ 6], 2); \ ROUND1(D, E, A, B, C, F2, 7, (in)[15], 2); \ ROUND1(C, D, E, A, B, F2, 15, (in)[ 3], 2); \ ROUND1(B, C, D, E, A, F2, 7, (in)[12], 2); \ ROUND1(A, B, C, D, E, F2, 12, (in)[ 0], 2); \ ROUND1(E, A, B, C, D, F2, 15, (in)[ 9], 2); \ ROUND1(D, E, A, B, C, F2, 9, (in)[ 5], 2); \ ROUND1(C, D, E, A, B, F2, 11, (in)[ 2], 2); \ ROUND1(B, C, D, E, A, F2, 7, (in)[14], 2); \ ROUND1(A, B, C, D, E, F2, 13, (in)[11], 2); \ ROUND1(E, A, B, C, D, F2, 12, (in)[ 8], 2); \ \ ROUND1(D, E, A, B, C, F3, 11, (in)[ 3], 3); \ ROUND1(C, D, E, A, B, F3, 13, (in)[10], 3); \ ROUND1(B, C, D, E, A, F3, 6, (in)[14], 3); \ ROUND1(A, B, C, D, E, F3, 7, (in)[ 4], 3); \ ROUND1(E, A, B, C, D, F3, 14, (in)[ 9], 3); \ ROUND1(D, E, A, B, C, F3, 9, (in)[15], 3); \ ROUND1(C, D, E, A, B, F3, 13, (in)[ 8], 3); \ ROUND1(B, C, D, E, A, F3, 15, (in)[ 1], 3); \ ROUND1(A, B, C, D, E, F3, 14, (in)[ 2], 3); \ ROUND1(E, A, B, C, D, F3, 8, (in)[ 7], 3); \ ROUND1(D, E, A, B, C, F3, 13, (in)[ 0], 3); \ ROUND1(C, D, E, A, B, F3, 6, (in)[ 6], 3); \ ROUND1(B, C, D, E, A, F3, 5, (in)[13], 3); \ ROUND1(A, B, C, D, E, F3, 12, (in)[11], 3); \ ROUND1(E, A, B, C, D, F3, 7, (in)[ 5], 3); \ ROUND1(D, E, A, B, C, F3, 5, (in)[12], 3); \ \ ROUND1(C, D, E, A, B, F4, 11, (in)[ 1], 4); \ ROUND1(B, C, D, E, A, F4, 12, (in)[ 9], 4); \ ROUND1(A, B, C, D, E, F4, 14, (in)[11], 4); \ ROUND1(E, A, B, C, D, F4, 15, (in)[10], 4); \ ROUND1(D, E, A, B, C, F4, 14, (in)[ 0], 4); \ ROUND1(C, D, E, A, B, F4, 15, (in)[ 8], 4); \ ROUND1(B, C, D, E, A, F4, 9, (in)[12], 4); \ ROUND1(A, B, C, D, E, F4, 8, (in)[ 4], 4); \ ROUND1(E, A, B, C, D, F4, 9, (in)[13], 4); \ ROUND1(D, E, A, B, C, F4, 14, (in)[ 3], 4); \ ROUND1(C, D, E, A, B, F4, 5, (in)[ 7], 4); \ ROUND1(B, C, D, E, A, F4, 6, (in)[15], 4); \ ROUND1(A, B, C, D, E, F4, 8, (in)[14], 4); \ ROUND1(E, A, B, C, D, F4, 6, (in)[ 5], 4); \ ROUND1(D, E, A, B, C, F4, 5, (in)[ 6], 4); \ ROUND1(C, D, E, A, B, F4, 12, (in)[ 2], 4); \ \ ROUND1(B, C, D, E, A, F5, 9, (in)[ 4], 5); \ ROUND1(A, B, C, D, E, F5, 15, (in)[ 0], 5); \ ROUND1(E, A, B, C, D, F5, 5, (in)[ 5], 5); \ ROUND1(D, E, A, B, C, F5, 11, (in)[ 9], 5); \ ROUND1(C, D, E, A, B, F5, 6, (in)[ 7], 5); \ ROUND1(B, C, D, E, A, F5, 8, (in)[12], 5); \ ROUND1(A, B, C, D, E, F5, 13, (in)[ 2], 5); \ ROUND1(E, A, B, C, D, F5, 12, (in)[10], 5); \ ROUND1(D, E, A, B, C, F5, 5, (in)[14], 5); \ ROUND1(C, D, E, A, B, F5, 12, (in)[ 1], 5); \ ROUND1(B, C, D, E, A, F5, 13, (in)[ 3], 5); \ ROUND1(A, B, C, D, E, F5, 14, (in)[ 8], 5); \ ROUND1(E, A, B, C, D, F5, 11, (in)[11], 5); \ ROUND1(D, E, A, B, C, F5, 8, (in)[ 6], 5); \ ROUND1(C, D, E, A, B, F5, 5, (in)[15], 5); \ ROUND1(B, C, D, E, A, F5, 6, (in)[13], 5); \ \ ROUND2(A, B, C, D, E, F5, 8, (in)[ 5], 1); \ ROUND2(E, A, B, C, D, F5, 9, (in)[14], 1); \ ROUND2(D, E, A, B, C, F5, 9, (in)[ 7], 1); \ ROUND2(C, D, E, A, B, F5, 11, (in)[ 0], 1); \ ROUND2(B, C, D, E, A, F5, 13, (in)[ 9], 1); \ ROUND2(A, B, C, D, E, F5, 15, (in)[ 2], 1); \ ROUND2(E, A, B, C, D, F5, 15, (in)[11], 1); \ ROUND2(D, E, A, B, C, F5, 5, (in)[ 4], 1); \ ROUND2(C, D, E, A, B, F5, 7, (in)[13], 1); \ ROUND2(B, C, D, E, A, F5, 7, (in)[ 6], 1); \ ROUND2(A, B, C, D, E, F5, 8, (in)[15], 1); \ ROUND2(E, A, B, C, D, F5, 11, (in)[ 8], 1); \ ROUND2(D, E, A, B, C, F5, 14, (in)[ 1], 1); \ ROUND2(C, D, E, A, B, F5, 14, (in)[10], 1); \ ROUND2(B, C, D, E, A, F5, 12, (in)[ 3], 1); \ ROUND2(A, B, C, D, E, F5, 6, (in)[12], 1); \ \ ROUND2(E, A, B, C, D, F4, 9, (in)[ 6], 2); \ ROUND2(D, E, A, B, C, F4, 13, (in)[11], 2); \ ROUND2(C, D, E, A, B, F4, 15, (in)[ 3], 2); \ ROUND2(B, C, D, E, A, F4, 7, (in)[ 7], 2); \ ROUND2(A, B, C, D, E, F4, 12, (in)[ 0], 2); \ ROUND2(E, A, B, C, D, F4, 8, (in)[13], 2); \ ROUND2(D, E, A, B, C, F4, 9, (in)[ 5], 2); \ ROUND2(C, D, E, A, B, F4, 11, (in)[10], 2); \ ROUND2(B, C, D, E, A, F4, 7, (in)[14], 2); \ ROUND2(A, B, C, D, E, F4, 7, (in)[15], 2); \ ROUND2(E, A, B, C, D, F4, 12, (in)[ 8], 2); \ ROUND2(D, E, A, B, C, F4, 7, (in)[12], 2); \ ROUND2(C, D, E, A, B, F4, 6, (in)[ 4], 2); \ ROUND2(B, C, D, E, A, F4, 15, (in)[ 9], 2); \ ROUND2(A, B, C, D, E, F4, 13, (in)[ 1], 2); \ ROUND2(E, A, B, C, D, F4, 11, (in)[ 2], 2); \ \ ROUND2(D, E, A, B, C, F3, 9, (in)[15], 3); \ ROUND2(C, D, E, A, B, F3, 7, (in)[ 5], 3); \ ROUND2(B, C, D, E, A, F3, 15, (in)[ 1], 3); \ ROUND2(A, B, C, D, E, F3, 11, (in)[ 3], 3); \ ROUND2(E, A, B, C, D, F3, 8, (in)[ 7], 3); \ ROUND2(D, E, A, B, C, F3, 6, (in)[14], 3); \ ROUND2(C, D, E, A, B, F3, 6, (in)[ 6], 3); \ ROUND2(B, C, D, E, A, F3, 14, (in)[ 9], 3); \ ROUND2(A, B, C, D, E, F3, 12, (in)[11], 3); \ ROUND2(E, A, B, C, D, F3, 13, (in)[ 8], 3); \ ROUND2(D, E, A, B, C, F3, 5, (in)[12], 3); \ ROUND2(C, D, E, A, B, F3, 14, (in)[ 2], 3); \ ROUND2(B, C, D, E, A, F3, 13, (in)[10], 3); \ ROUND2(A, B, C, D, E, F3, 13, (in)[ 0], 3); \ ROUND2(E, A, B, C, D, F3, 7, (in)[ 4], 3); \ ROUND2(D, E, A, B, C, F3, 5, (in)[13], 3); \ \ ROUND2(C, D, E, A, B, F2, 15, (in)[ 8], 4); \ ROUND2(B, C, D, E, A, F2, 5, (in)[ 6], 4); \ ROUND2(A, B, C, D, E, F2, 8, (in)[ 4], 4); \ ROUND2(E, A, B, C, D, F2, 11, (in)[ 1], 4); \ ROUND2(D, E, A, B, C, F2, 14, (in)[ 3], 4); \ ROUND2(C, D, E, A, B, F2, 14, (in)[11], 4); \ ROUND2(B, C, D, E, A, F2, 6, (in)[15], 4); \ ROUND2(A, B, C, D, E, F2, 14, (in)[ 0], 4); \ ROUND2(E, A, B, C, D, F2, 6, (in)[ 5], 4); \ ROUND2(D, E, A, B, C, F2, 9, (in)[12], 4); \ ROUND2(C, D, E, A, B, F2, 12, (in)[ 2], 4); \ ROUND2(B, C, D, E, A, F2, 9, (in)[13], 4); \ ROUND2(A, B, C, D, E, F2, 12, (in)[ 9], 4); \ ROUND2(E, A, B, C, D, F2, 5, (in)[ 7], 4); \ ROUND2(D, E, A, B, C, F2, 15, (in)[10], 4); \ ROUND2(C, D, E, A, B, F2, 8, (in)[14], 4); \ \ ROUND2(B, C, D, E, A, F1, 8, (in)[12], 5); \ ROUND2(A, B, C, D, E, F1, 5, (in)[15], 5); \ ROUND2(E, A, B, C, D, F1, 12, (in)[10], 5); \ ROUND2(D, E, A, B, C, F1, 9, (in)[ 4], 5); \ ROUND2(C, D, E, A, B, F1, 12, (in)[ 1], 5); \ ROUND2(B, C, D, E, A, F1, 5, (in)[ 5], 5); \ ROUND2(A, B, C, D, E, F1, 14, (in)[ 8], 5); \ ROUND2(E, A, B, C, D, F1, 6, (in)[ 7], 5); \ ROUND2(D, E, A, B, C, F1, 8, (in)[ 6], 5); \ ROUND2(C, D, E, A, B, F1, 13, (in)[ 2], 5); \ ROUND2(B, C, D, E, A, F1, 6, (in)[13], 5); \ ROUND2(A, B, C, D, E, F1, 5, (in)[14], 5); \ ROUND2(E, A, B, C, D, F1, 15, (in)[ 0], 5); \ ROUND2(D, E, A, B, C, F1, 13, (in)[ 3], 5); \ ROUND2(C, D, E, A, B, F1, 11, (in)[ 9], 5); \ ROUND2(B, C, D, E, A, F1, 11, (in)[11], 5); \ \ tmp = (h)[1] + C1 + D2; \ (h)[1] = (h)[2] + D1 + E2; \ (h)[2] = (h)[3] + E1 + A2; \ (h)[3] = (h)[4] + A1 + B2; \ (h)[4] = (h)[0] + B1 + C2; \ (h)[0] = tmp; \ } while (0) void __device__ __forceinline__ RIPEMD160_ROUND_BODY(uint32_t *in, uint32_t *h) { uint32_t A1, B1, C1, D1, E1; uint32_t A2, B2, C2, D2, E2; uint32_t tmp; A1 = A2 = (h)[0]; B1 = B2 = (h)[1]; C1 = C2 = (h)[2]; D1 = D2 = (h)[3]; E1 = E2 = (h)[4]; ROUND1(A, B, C, D, E, F1, 11, (in)[0], 1); ROUND1(E, A, B, C, D, F1, 14, (in)[1], 1); ROUND1(D, E, A, B, C, F1, 15, (in)[2], 1); ROUND1(C, D, E, A, B, F1, 12, (in)[3], 1); ROUND1(B, C, D, E, A, F1, 5, (in)[4], 1); ROUND1(A, B, C, D, E, F1, 8, (in)[5], 1); ROUND1(E, A, B, C, D, F1, 7, (in)[6], 1); ROUND1(D, E, A, B, C, F1, 9, (in)[7], 1); ROUND1(C, D, E, A, B, F1, 11, (in)[8], 1); ROUND1(B, C, D, E, A, F1, 13, (in)[9], 1); ROUND1(A, B, C, D, E, F1, 14, (in)[10], 1); ROUND1(E, A, B, C, D, F1, 15, (in)[11], 1); ROUND1(D, E, A, B, C, F1, 6, (in)[12], 1); ROUND1(C, D, E, A, B, F1, 7, (in)[13], 1); ROUND1(B, C, D, E, A, F1, 9, (in)[14], 1); ROUND1(A, B, C, D, E, F1, 8, (in)[15], 1); ROUND1(E, A, B, C, D, F2, 7, (in)[7], 2); ROUND1(D, E, A, B, C, F2, 6, (in)[4], 2); ROUND1(C, D, E, A, B, F2, 8, (in)[13], 2); ROUND1(B, C, D, E, A, F2, 13, (in)[1], 2); ROUND1(A, B, C, D, E, F2, 11, (in)[10], 2); ROUND1(E, A, B, C, D, F2, 9, (in)[6], 2); ROUND1(D, E, A, B, C, F2, 7, (in)[15], 2); ROUND1(C, D, E, A, B, F2, 15, (in)[3], 2); ROUND1(B, C, D, E, A, F2, 7, (in)[12], 2); ROUND1(A, B, C, D, E, F2, 12, (in)[0], 2); ROUND1(E, A, B, C, D, F2, 15, (in)[9], 2); ROUND1(D, E, A, B, C, F2, 9, (in)[5], 2); ROUND1(C, D, E, A, B, F2, 11, (in)[2], 2); ROUND1(B, C, D, E, A, F2, 7, (in)[14], 2); ROUND1(A, B, C, D, E, F2, 13, (in)[11], 2); ROUND1(E, A, B, C, D, F2, 12, (in)[8], 2); ROUND1(D, E, A, B, C, F3, 11, (in)[3], 3); ROUND1(C, D, E, A, B, F3, 13, (in)[10], 3); ROUND1(B, C, D, E, A, F3, 6, (in)[14], 3); ROUND1(A, B, C, D, E, F3, 7, (in)[4], 3); ROUND1(E, A, B, C, D, F3, 14, (in)[9], 3); ROUND1(D, E, A, B, C, F3, 9, (in)[15], 3); ROUND1(C, D, E, A, B, F3, 13, (in)[8], 3); ROUND1(B, C, D, E, A, F3, 15, (in)[1], 3); ROUND1(A, B, C, D, E, F3, 14, (in)[2], 3); ROUND1(E, A, B, C, D, F3, 8, (in)[7], 3); ROUND1(D, E, A, B, C, F3, 13, (in)[0], 3); ROUND1(C, D, E, A, B, F3, 6, (in)[6], 3); ROUND1(B, C, D, E, A, F3, 5, (in)[13], 3); ROUND1(A, B, C, D, E, F3, 12, (in)[11], 3); ROUND1(E, A, B, C, D, F3, 7, (in)[5], 3); ROUND1(D, E, A, B, C, F3, 5, (in)[12], 3); ROUND1(C, D, E, A, B, F4, 11, (in)[1], 4); ROUND1(B, C, D, E, A, F4, 12, (in)[9], 4); ROUND1(A, B, C, D, E, F4, 14, (in)[11], 4); ROUND1(E, A, B, C, D, F4, 15, (in)[10], 4); ROUND1(D, E, A, B, C, F4, 14, (in)[0], 4); ROUND1(C, D, E, A, B, F4, 15, (in)[8], 4); ROUND1(B, C, D, E, A, F4, 9, (in)[12], 4); ROUND1(A, B, C, D, E, F4, 8, (in)[4], 4); ROUND1(E, A, B, C, D, F4, 9, (in)[13], 4); ROUND1(D, E, A, B, C, F4, 14, (in)[3], 4); ROUND1(C, D, E, A, B, F4, 5, (in)[7], 4); ROUND1(B, C, D, E, A, F4, 6, (in)[15], 4); ROUND1(A, B, C, D, E, F4, 8, (in)[14], 4); ROUND1(E, A, B, C, D, F4, 6, (in)[5], 4); ROUND1(D, E, A, B, C, F4, 5, (in)[6], 4); ROUND1(C, D, E, A, B, F4, 12, (in)[2], 4); ROUND1(B, C, D, E, A, F5, 9, (in)[4], 5); ROUND1(A, B, C, D, E, F5, 15, (in)[0], 5); ROUND1(E, A, B, C, D, F5, 5, (in)[5], 5); ROUND1(D, E, A, B, C, F5, 11, (in)[9], 5); ROUND1(C, D, E, A, B, F5, 6, (in)[7], 5); ROUND1(B, C, D, E, A, F5, 8, (in)[12], 5); ROUND1(A, B, C, D, E, F5, 13, (in)[2], 5); ROUND1(E, A, B, C, D, F5, 12, (in)[10], 5); ROUND1(D, E, A, B, C, F5, 5, (in)[14], 5); ROUND1(C, D, E, A, B, F5, 12, (in)[1], 5); ROUND1(B, C, D, E, A, F5, 13, (in)[3], 5); ROUND1(A, B, C, D, E, F5, 14, (in)[8], 5); ROUND1(E, A, B, C, D, F5, 11, (in)[11], 5); ROUND1(D, E, A, B, C, F5, 8, (in)[6], 5); ROUND1(C, D, E, A, B, F5, 5, (in)[15], 5); ROUND1(B, C, D, E, A, F5, 6, (in)[13], 5); ROUND2(A, B, C, D, E, F5, 8, (in)[5], 1); ROUND2(E, A, B, C, D, F5, 9, (in)[14], 1); ROUND2(D, E, A, B, C, F5, 9, (in)[7], 1); ROUND2(C, D, E, A, B, F5, 11, (in)[0], 1); ROUND2(B, C, D, E, A, F5, 13, (in)[9], 1); ROUND2(A, B, C, D, E, F5, 15, (in)[2], 1); ROUND2(E, A, B, C, D, F5, 15, (in)[11], 1); ROUND2(D, E, A, B, C, F5, 5, (in)[4], 1); ROUND2(C, D, E, A, B, F5, 7, (in)[13], 1); ROUND2(B, C, D, E, A, F5, 7, (in)[6], 1); ROUND2(A, B, C, D, E, F5, 8, (in)[15], 1); ROUND2(E, A, B, C, D, F5, 11, (in)[8], 1); ROUND2(D, E, A, B, C, F5, 14, (in)[1], 1); ROUND2(C, D, E, A, B, F5, 14, (in)[10], 1); ROUND2(B, C, D, E, A, F5, 12, (in)[3], 1); ROUND2(A, B, C, D, E, F5, 6, (in)[12], 1); ROUND2(E, A, B, C, D, F4, 9, (in)[6], 2); ROUND2(D, E, A, B, C, F4, 13, (in)[11], 2); ROUND2(C, D, E, A, B, F4, 15, (in)[3], 2); ROUND2(B, C, D, E, A, F4, 7, (in)[7], 2); ROUND2(A, B, C, D, E, F4, 12, (in)[0], 2); ROUND2(E, A, B, C, D, F4, 8, (in)[13], 2); ROUND2(D, E, A, B, C, F4, 9, (in)[5], 2); ROUND2(C, D, E, A, B, F4, 11, (in)[10], 2); ROUND2(B, C, D, E, A, F4, 7, (in)[14], 2); ROUND2(A, B, C, D, E, F4, 7, (in)[15], 2); ROUND2(E, A, B, C, D, F4, 12, (in)[8], 2); ROUND2(D, E, A, B, C, F4, 7, (in)[12], 2); ROUND2(C, D, E, A, B, F4, 6, (in)[4], 2); ROUND2(B, C, D, E, A, F4, 15, (in)[9], 2); ROUND2(A, B, C, D, E, F4, 13, (in)[1], 2); ROUND2(E, A, B, C, D, F4, 11, (in)[2], 2); ROUND2(D, E, A, B, C, F3, 9, (in)[15], 3); ROUND2(C, D, E, A, B, F3, 7, (in)[5], 3); ROUND2(B, C, D, E, A, F3, 15, (in)[1], 3); ROUND2(A, B, C, D, E, F3, 11, (in)[3], 3); ROUND2(E, A, B, C, D, F3, 8, (in)[7], 3); ROUND2(D, E, A, B, C, F3, 6, (in)[14], 3); ROUND2(C, D, E, A, B, F3, 6, (in)[6], 3); ROUND2(B, C, D, E, A, F3, 14, (in)[9], 3); ROUND2(A, B, C, D, E, F3, 12, (in)[11], 3); ROUND2(E, A, B, C, D, F3, 13, (in)[8], 3); ROUND2(D, E, A, B, C, F3, 5, (in)[12], 3); ROUND2(C, D, E, A, B, F3, 14, (in)[2], 3); ROUND2(B, C, D, E, A, F3, 13, (in)[10], 3); ROUND2(A, B, C, D, E, F3, 13, (in)[0], 3); ROUND2(E, A, B, C, D, F3, 7, (in)[4], 3); ROUND2(D, E, A, B, C, F3, 5, (in)[13], 3); ROUND2(C, D, E, A, B, F2, 15, (in)[8], 4); ROUND2(B, C, D, E, A, F2, 5, (in)[6], 4); ROUND2(A, B, C, D, E, F2, 8, (in)[4], 4); ROUND2(E, A, B, C, D, F2, 11, (in)[1], 4); ROUND2(D, E, A, B, C, F2, 14, (in)[3], 4); ROUND2(C, D, E, A, B, F2, 14, (in)[11], 4); ROUND2(B, C, D, E, A, F2, 6, (in)[15], 4); ROUND2(A, B, C, D, E, F2, 14, (in)[0], 4); ROUND2(E, A, B, C, D, F2, 6, (in)[5], 4); ROUND2(D, E, A, B, C, F2, 9, (in)[12], 4); ROUND2(C, D, E, A, B, F2, 12, (in)[2], 4); ROUND2(B, C, D, E, A, F2, 9, (in)[13], 4); ROUND2(A, B, C, D, E, F2, 12, (in)[9], 4); ROUND2(E, A, B, C, D, F2, 5, (in)[7], 4); ROUND2(D, E, A, B, C, F2, 15, (in)[10], 4); ROUND2(C, D, E, A, B, F2, 8, (in)[14], 4); ROUND2(B, C, D, E, A, F1, 8, (in)[12], 5); ROUND2(A, B, C, D, E, F1, 5, (in)[15], 5); ROUND2(E, A, B, C, D, F1, 12, (in)[10], 5); ROUND2(D, E, A, B, C, F1, 9, (in)[4], 5); ROUND2(C, D, E, A, B, F1, 12, (in)[1], 5); ROUND2(B, C, D, E, A, F1, 5, (in)[5], 5); ROUND2(A, B, C, D, E, F1, 14, (in)[8], 5); ROUND2(E, A, B, C, D, F1, 6, (in)[7], 5); ROUND2(D, E, A, B, C, F1, 8, (in)[6], 5); ROUND2(C, D, E, A, B, F1, 13, (in)[2], 5); ROUND2(B, C, D, E, A, F1, 6, (in)[13], 5); ROUND2(A, B, C, D, E, F1, 5, (in)[14], 5); ROUND2(E, A, B, C, D, F1, 15, (in)[0], 5); ROUND2(D, E, A, B, C, F1, 13, (in)[3], 5); ROUND2(C, D, E, A, B, F1, 11, (in)[9], 5); ROUND2(B, C, D, E, A, F1, 11, (in)[11], 5); tmp = (h)[1] + C1 + D2; (h)[1] = (h)[2] + D1 + E2; (h)[2] = (h)[3] + E1 + A2; (h)[3] = (h)[4] + A1 + B2; (h)[4] = (h)[0] + B1 + C2; (h)[0] = tmp; } #define ROL32(x, y) ROTL32(x,y) //rotate(x, y ## U) #define SHR(x, y) (x >> y) //#define SWAP32(a) (as_uint(as_uchar4(a).wzyx)) #define S0(x) (ROL32(x, 25) ^ ROL32(x, 14) ^ SHR(x, 3)) #define S1(x) (ROL32(x, 15) ^ ROL32(x, 13) ^ SHR(x, 10)) #define S2(x) (ROL32(x, 30) ^ ROL32(x, 19) ^ ROL32(x, 10)) #define S3(x) (ROL32(x, 26) ^ ROL32(x, 21) ^ ROL32(x, 7)) #define P(a,b,c,d,e,f,g,h,x,K) \ { \ temp1 = h + S3(e) + F1(e,f,g) + (K + x); \ d += temp1; h = temp1 + S2(a) + F0(a,b,c); \ } #define bitselect(a, b, c) ((a) ^ ((c) & ((b) ^ (a)))) #define F0(y, x, z) bitselect(z, y, z ^ x) //#define F1(x, y, z) bitselect(z, y, x) #define R0 (W0 = S1(W14) + W9 + S0(W1) + W0) #define R1 (W1 = S1(W15) + W10 + S0(W2) + W1) #define R2 (W2 = S1(W0) + W11 + S0(W3) + W2) #define R3 (W3 = S1(W1) + W12 + S0(W4) + W3) #define R4 (W4 = S1(W2) + W13 + S0(W5) + W4) #define R5 (W5 = S1(W3) + W14 + S0(W6) + W5) #define R6 (W6 = S1(W4) + W15 + S0(W7) + W6) #define R7 (W7 = S1(W5) + W0 + S0(W8) + W7) #define R8 (W8 = S1(W6) + W1 + S0(W9) + W8) #define R9 (W9 = S1(W7) + W2 + S0(W10) + W9) #define R10 (W10 = S1(W8) + W3 + S0(W11) + W10) #define R11 (W11 = S1(W9) + W4 + S0(W12) + W11) #define R12 (W12 = S1(W10) + W5 + S0(W13) + W12) #define R13 (W13 = S1(W11) + W6 + S0(W14) + W13) #define R14 (W14 = S1(W12) + W7 + S0(W15) + W14) #define R15 (W15 = S1(W13) + W8 + S0(W0) + W15) #define RD14 (S1(W12) + W7 + S0(W15) + W14) #define RD15 (S1(W13) + W8 + S0(W0) + W15) struct uint8 { uint32_t s0; uint32_t s1; uint32_t s2; uint32_t s3; uint32_t s4; uint32_t s5; uint32_t s6; uint32_t s7; }; struct uint16 { uint32_t s0; uint32_t s1; uint32_t s2; uint32_t s3; uint32_t s4; uint32_t s5; uint32_t s6; uint32_t s7; uint32_t s8; uint32_t s9; uint32_t sA; uint32_t sB; uint32_t sC; uint32_t sD; uint32_t sE; uint32_t sF; }; __device__ __forceinline__ uint8 sha256_round(uint16 data, uint8 buf) { uint32_t temp1; uint8 res; uint32_t W0 = (data.s0); uint32_t W1 = (data.s1); uint32_t W2 = (data.s2); uint32_t W3 = (data.s3); uint32_t W4 = (data.s4); uint32_t W5 = (data.s5); uint32_t W6 = (data.s6); uint32_t W7 = (data.s7); uint32_t W8 = (data.s8); uint32_t W9 = (data.s9); uint32_t W10 = (data.sA); uint32_t W11 = (data.sB); uint32_t W12 = (data.sC); uint32_t W13 = (data.sD); uint32_t W14 = (data.sE); uint32_t W15 = (data.sF); uint32_t v0 = buf.s0; uint32_t v1 = buf.s1; uint32_t v2 = buf.s2; uint32_t v3 = buf.s3; uint32_t v4 = buf.s4; uint32_t v5 = buf.s5; uint32_t v6 = buf.s6; uint32_t v7 = buf.s7; P(v0, v1, v2, v3, v4, v5, v6, v7, W0, 0x428A2F98); P(v7, v0, v1, v2, v3, v4, v5, v6, W1, 0x71374491); P(v6, v7, v0, v1, v2, v3, v4, v5, W2, 0xB5C0FBCF); P(v5, v6, v7, v0, v1, v2, v3, v4, W3, 0xE9B5DBA5); P(v4, v5, v6, v7, v0, v1, v2, v3, W4, 0x3956C25B); P(v3, v4, v5, v6, v7, v0, v1, v2, W5, 0x59F111F1); P(v2, v3, v4, v5, v6, v7, v0, v1, W6, 0x923F82A4); P(v1, v2, v3, v4, v5, v6, v7, v0, W7, 0xAB1C5ED5); P(v0, v1, v2, v3, v4, v5, v6, v7, W8, 0xD807AA98); P(v7, v0, v1, v2, v3, v4, v5, v6, W9, 0x12835B01); P(v6, v7, v0, v1, v2, v3, v4, v5, W10, 0x243185BE); P(v5, v6, v7, v0, v1, v2, v3, v4, W11, 0x550C7DC3); P(v4, v5, v6, v7, v0, v1, v2, v3, W12, 0x72BE5D74); P(v3, v4, v5, v6, v7, v0, v1, v2, W13, 0x80DEB1FE); P(v2, v3, v4, v5, v6, v7, v0, v1, W14, 0x9BDC06A7); P(v1, v2, v3, v4, v5, v6, v7, v0, W15, 0xC19BF174); P(v0, v1, v2, v3, v4, v5, v6, v7, R0, 0xE49B69C1); P(v7, v0, v1, v2, v3, v4, v5, v6, R1, 0xEFBE4786); P(v6, v7, v0, v1, v2, v3, v4, v5, R2, 0x0FC19DC6); P(v5, v6, v7, v0, v1, v2, v3, v4, R3, 0x240CA1CC); P(v4, v5, v6, v7, v0, v1, v2, v3, R4, 0x2DE92C6F); P(v3, v4, v5, v6, v7, v0, v1, v2, R5, 0x4A7484AA); P(v2, v3, v4, v5, v6, v7, v0, v1, R6, 0x5CB0A9DC); P(v1, v2, v3, v4, v5, v6, v7, v0, R7, 0x76F988DA); P(v0, v1, v2, v3, v4, v5, v6, v7, R8, 0x983E5152); P(v7, v0, v1, v2, v3, v4, v5, v6, R9, 0xA831C66D); P(v6, v7, v0, v1, v2, v3, v4, v5, R10, 0xB00327C8); P(v5, v6, v7, v0, v1, v2, v3, v4, R11, 0xBF597FC7); P(v4, v5, v6, v7, v0, v1, v2, v3, R12, 0xC6E00BF3); P(v3, v4, v5, v6, v7, v0, v1, v2, R13, 0xD5A79147); P(v2, v3, v4, v5, v6, v7, v0, v1, R14, 0x06CA6351); P(v1, v2, v3, v4, v5, v6, v7, v0, R15, 0x14292967); P(v0, v1, v2, v3, v4, v5, v6, v7, R0, 0x27B70A85); P(v7, v0, v1, v2, v3, v4, v5, v6, R1, 0x2E1B2138); P(v6, v7, v0, v1, v2, v3, v4, v5, R2, 0x4D2C6DFC); P(v5, v6, v7, v0, v1, v2, v3, v4, R3, 0x53380D13); P(v4, v5, v6, v7, v0, v1, v2, v3, R4, 0x650A7354); P(v3, v4, v5, v6, v7, v0, v1, v2, R5, 0x766A0ABB); P(v2, v3, v4, v5, v6, v7, v0, v1, R6, 0x81C2C92E); P(v1, v2, v3, v4, v5, v6, v7, v0, R7, 0x92722C85); P(v0, v1, v2, v3, v4, v5, v6, v7, R8, 0xA2BFE8A1); P(v7, v0, v1, v2, v3, v4, v5, v6, R9, 0xA81A664B); P(v6, v7, v0, v1, v2, v3, v4, v5, R10, 0xC24B8B70); P(v5, v6, v7, v0, v1, v2, v3, v4, R11, 0xC76C51A3); P(v4, v5, v6, v7, v0, v1, v2, v3, R12, 0xD192E819); P(v3, v4, v5, v6, v7, v0, v1, v2, R13, 0xD6990624); P(v2, v3, v4, v5, v6, v7, v0, v1, R14, 0xF40E3585); P(v1, v2, v3, v4, v5, v6, v7, v0, R15, 0x106AA070); P(v0, v1, v2, v3, v4, v5, v6, v7, R0, 0x19A4C116); P(v7, v0, v1, v2, v3, v4, v5, v6, R1, 0x1E376C08); P(v6, v7, v0, v1, v2, v3, v4, v5, R2, 0x2748774C); P(v5, v6, v7, v0, v1, v2, v3, v4, R3, 0x34B0BCB5); P(v4, v5, v6, v7, v0, v1, v2, v3, R4, 0x391C0CB3); P(v3, v4, v5, v6, v7, v0, v1, v2, R5, 0x4ED8AA4A); P(v2, v3, v4, v5, v6, v7, v0, v1, R6, 0x5B9CCA4F); P(v1, v2, v3, v4, v5, v6, v7, v0, R7, 0x682E6FF3); P(v0, v1, v2, v3, v4, v5, v6, v7, R8, 0x748F82EE); P(v7, v0, v1, v2, v3, v4, v5, v6, R9, 0x78A5636F); P(v6, v7, v0, v1, v2, v3, v4, v5, R10, 0x84C87814); P(v5, v6, v7, v0, v1, v2, v3, v4, R11, 0x8CC70208); P(v4, v5, v6, v7, v0, v1, v2, v3, R12, 0x90BEFFFA); P(v3, v4, v5, v6, v7, v0, v1, v2, R13, 0xA4506CEB); P(v2, v3, v4, v5, v6, v7, v0, v1, RD14, 0xBEF9A3F7); P(v1, v2, v3, v4, v5, v6, v7, v0, RD15, 0xC67178F2); res.s0 = (v0 + buf.s0); res.s1 = (v1 + buf.s1); res.s2 = (v2 + buf.s2); res.s3 = (v3 + buf.s3); res.s4 = (v4 + buf.s4); res.s5 = (v5 + buf.s5); res.s6 = (v6 + buf.s6); res.s7 = (v7 + buf.s7); return (res); } __global__ void search(uint32_t threads, uint32_t startNounce, uint8 *ctx) { // SHA256 takes 16 uints of input per block - we have 112 bytes to process // 8 * 16 == 64, meaning two block transforms. uint32_t SHA256Buf[16]; uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (startNounce + thread); uint32_t hashPosition = (nounce - startNounce); // uint32_t gid = 1;// get_global_id(0); // Remember the last four is the nonce - so 108 bytes / 4 bytes per dword #pragma unroll for (int i = 0; i < 16; ++i) SHA256Buf[i] = cuda_swab32(c_data[i]); // SHA256 initialization constants // uint8 outbuf; = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); uint8 outbuf; outbuf.s0 = 0x6A09E667; outbuf.s1 = 0xBB67AE85; outbuf.s2 = 0x3C6EF372; outbuf.s3 = 0xA54FF53A; outbuf.s4 = 0x510E527F; outbuf.s5 = 0x9B05688C; outbuf.s6 = 0x1F83D9AB; outbuf.s7 = 0x5BE0CD19; #pragma unroll for (int i = 0; i < 3; ++i) { if (i == 1) { #pragma unroll for (int i = 0; i < 11; ++i) SHA256Buf[i] = cuda_swab32(c_data[i + 16]); SHA256Buf[11] = cuda_swab32(hashPosition); SHA256Buf[12] = 0x80000000; SHA256Buf[13] = 0x00000000; SHA256Buf[14] = 0x00000000; SHA256Buf[15] = 0x00000380; } if (i == 2) { ((uint8 *)SHA256Buf)[0] = outbuf; SHA256Buf[8] = 0x80000000; #pragma unroll for (int i = 9; i < 15; ++i) SHA256Buf[i] = 0x00000000; SHA256Buf[15] = 0x00000100; // outbuf = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); outbuf.s0 = 0x6A09E667; outbuf.s1 = 0xBB67AE85; outbuf.s2 = 0x3C6EF372; outbuf.s3 = 0xA54FF53A; outbuf.s4 = 0x510E527F; outbuf.s5 = 0x9B05688C; outbuf.s6 = 0x1F83D9AB; outbuf.s7 = 0x5BE0CD19; } outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); } /* outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); #pragma unroll for(int i = 0; i < 11; ++i) SHA256Buf[i] = SWAP32(input[i + 16]); SHA256Buf[11] = SWAP32(gid); SHA256Buf[12] = 0x80000000; SHA256Buf[13] = 0x00000000; SHA256Buf[14] = 0x00000000; SHA256Buf[15] = 0x00000380; outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); ((uint8 *)SHA256Buf)[0] = outbuf; SHA256Buf[8] = 0x80000000; for(int i = 9; i < 15; ++i) SHA256Buf[i] = 0x00000000; SHA256Buf[15] = 0x00000100; outbuf = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); */ /* //outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); //outbuf = sha256_round(((uint16 *)SHA256Buf)[1], outbuf); // outbuf would normall be SWAP32'd here, but it'll need it again // once we use it as input to the next SHA256, so it negates. ((uint8 *)SHA256Buf)[0] = outbuf; SHA256Buf[8] = 0x80000000; for(int i = 9; i < 15; ++i) SHA256Buf[i] = 0x00000000; SHA256Buf[15] = 0x00000100; outbuf = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); */ /* outbuf.s0 = cuda_swab32(outbuf.s0); outbuf.s1 = cuda_swab32(outbuf.s1); outbuf.s2 = cuda_swab32(outbuf.s2); outbuf.s3 = cuda_swab32(outbuf.s3); outbuf.s4 = cuda_swab32(outbuf.s4); outbuf.s5 = cuda_swab32(outbuf.s5); outbuf.s6 = cuda_swab32(outbuf.s6); outbuf.s7 = cuda_swab32(outbuf.s7); ctx[hashPosition] = outbuf; */ // ctx[get_global_id(0) - get_global_offset(0)] = outbuf; } } __global__ void search1(uint32_t threads, uint32_t startNounce, uint8 *ctx) { uint64_t W[16] = { 0UL }, SHA512Out[8]; uint32_t SHA256Buf[16]; uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (startNounce + thread); uint32_t hashPosition = (nounce - startNounce); uint8 outbuf = ctx[hashPosition]; //[get_global_id(0) - get_global_offset(0)]; ((uint8 *)W)[0] = outbuf; for (int i = 0; i < 4; ++i) W[i] = SWAP64(W[i]); W[4] = 0x8000000000000000UL; W[15] = 0x0000000000000100UL; for (int i = 0; i < 8; ++i) SHA512Out[i] = SHA512_INIT[i]; SHA512Block(W, SHA512Out); for (int i = 0; i < 8; ++i) SHA512Out[i] = SWAP64(SHA512Out[i]); uint32_t RMD160_0[16] = { 0U }; uint32_t RMD160_1[16] = { 0U }; uint32_t RMD160_0_Out[5], RMD160_1_Out[5]; for (int i = 0; i < 4; ++i) { ((uint64_t *)RMD160_0)[i] = SHA512Out[i]; ((uint64_t *)RMD160_1)[i] = SHA512Out[i + 4]; } RMD160_0[8] = RMD160_1[8] = 0x00000080; RMD160_0[14] = RMD160_1[14] = 0x00000100; for (int i = 0; i < 5; ++i) { RMD160_0_Out[i] = RMD160_IV[i]; RMD160_1_Out[i] = RMD160_IV[i]; } RIPEMD160_ROUND_BODY(RMD160_0, RMD160_0_Out); RIPEMD160_ROUND_BODY(RMD160_1, RMD160_1_Out); for (int i = 0; i < 5; ++i) SHA256Buf[i] = cuda_swab32(RMD160_0_Out[i]); for (int i = 5; i < 10; ++i) SHA256Buf[i] = cuda_swab32(RMD160_1_Out[i - 5]); SHA256Buf[10] = 0x80000000; for (int i = 11; i < 15; ++i) SHA256Buf[i] = 0x00000000U; SHA256Buf[15] = 0x00000140; // outbuf = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); outbuf.s0 = 0x6A09E667; outbuf.s1 = 0xBB67AE85; outbuf.s2 = 0x3C6EF372; outbuf.s3 = 0xA54FF53A; outbuf.s4 = 0x510E527F; outbuf.s5 = 0x9B05688C; outbuf.s6 = 0x1F83D9AB; outbuf.s7 = 0x5BE0CD19; outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); ctx[hashPosition] = outbuf; } } __global__ void search2(uint32_t threads, uint32_t startNounce, uint8 *ctx, uint32_t *d_found) { uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t nounce = (startNounce + thread); uint32_t hashPosition = (nounce - startNounce); uint32_t SHA256Buf[16] = { 0U }; uint8 outbuf = ctx[hashPosition];//get_global_id(0) - get_global_offset(0)]; ((uint8 *)SHA256Buf)[0] = outbuf; SHA256Buf[8] = 0x80000000; for (int i = 9; i < 15; ++i) SHA256Buf[i] = 0x00000000; SHA256Buf[15] = 0x00000100; // outbuf = (uint8)(0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19); outbuf.s0 = 0x6A09E667; outbuf.s1 = 0xBB67AE85; outbuf.s2 = 0x3C6EF372; outbuf.s3 = 0xA54FF53A; outbuf.s4 = 0x510E527F; outbuf.s5 = 0x9B05688C; outbuf.s6 = 0x1F83D9AB; outbuf.s7 = 0x5BE0CD19; outbuf = sha256_round(((uint16 *)SHA256Buf)[0], outbuf); outbuf.s6 = cuda_swab32(outbuf.s6); outbuf.s7 = cuda_swab32(outbuf.s7); uint64_t test = MAKE_ULONGLONG(outbuf.s7, outbuf.s6); //if(!(outbuf.s7)) output[atomic_inc(output+0xFF)] = SWAP32(gid); if (test <= ((uint64_t *)pTarget)[3]) { //yai. uint32_t tmp = atomicCAS(d_found, 0xffffffff, nounce); if (tmp != 0xffffffff) d_found[1] = nounce; } // output[atomic_inc(output + 0xFF)] = SWAP32(gid); } } __host__ void lbrcredit_cpu_hash(uint32_t thr_id, int threads, uint32_t startNounce, const uint32_t *const __restrict__ g_hash, uint32_t *h_found) { const int threadsperblock = 256; dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); cudaMemset(d_found[thr_id], 0xffffffff, 2 * sizeof(uint32_t)); search << <grid, block >> >(threads, startNounce, (uint8*)g_hash); search1 << <grid, block >> >(threads, startNounce, (uint8 *)g_hash); search2 << <grid, block >> >(threads, startNounce, (uint8 *)g_hash, d_found[thr_id]); cudaMemcpy(h_found, d_found[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); } __host__ void lbrcredit_setBlockTarget(uint32_t* pdata, const void *target) { unsigned char PaddedMessage[192]; memcpy(PaddedMessage, pdata, 168); memset(PaddedMessage + 168, 0, 24); ((uint32_t*)PaddedMessage)[42] = 0x80000000; ((uint32_t*)PaddedMessage)[47] = 0x0540; CUDA_SAFE_CALL(cudaMemcpyToSymbol(pTarget, target, 8 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_data, PaddedMessage, 48 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); }
the_stack
inline hipError_t checkCuda(hipError_t result, int s){ if (result != hipSuccess) { fprintf(stderr, "HIP Runtime Error at line %d: %s\n", s, hipGetErrorString(result)); assert(result == hipSuccess); } return result; } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbr+fr];//slc; tmp_val = 0; for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; //2PR } if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } tmp = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2= 0; if(fbrS < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbrS+fr];//slc; tmp = 0; for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU2[idx2 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { tmp2 += tmp * dU1[idx1 * R + r] ; } if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } tmp2 = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp; if(fbrS < nFibers - 1){ unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx0 = fbrIdx2[fbr]; tmp = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp += vals[x] * dU1[idx1 * R + r]; //2MR } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp * dU2[idx2 * R + r] * dU3[idx3 * R + r]) ; } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3, ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){ ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val = 0;; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ ITYPE *dInds2, *dInds3, *dFbrPtr0, *dFbrIdx0, *dFbrPtr1, *dFbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // All m same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totSlcPtr += TiledX[m].fbrPtr[0].size() ; totSlcIdx += TiledX[m].fbrIdx[0].size() ; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } // Allocate Tensor on a device checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrPtr0, totSlcPtr * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrIdx0, totSlcIdx * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrPtr1, totFbrPtr * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrIdx1, totFbrIdx * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), __LINE__); } // device memory copy for tiled parts for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ; } } if (TiledX[m].totNnz == 0) continue; checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); if(TiledX[m].ndims == 3){ if(m <= 2) checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); } if(TiledX[m].ndims == 4){ checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), __LINE__); } } //Matrices unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; //Matrices DTYPE *dU; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), __LINE__); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; hipMemset(dU, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); if(TiledX[0].ndims == 4) checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); float GPUTime = 0, allModeGPUTime = 0; dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ GPUTime = 0; dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) U[mode].vals[r * U[mode].nCols + c] = mode + .5; } if(MTTKRPmode == 1){ checkCuda(hipMemcpy(dU, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), __LINE__); hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } if (TiledX[m].totNnz == 0) continue; // time the device execution int BLOCKSIZE; if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Slc atomics\n" ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ std::cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << std::endl << "hint: increase -b!" << std::endl; return -1; } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: FbrS atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Fbr atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: nnz atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) hipLaunchKernelGGL(mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D, grid, block, 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dFbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } hipDeviceSynchronize(); } } /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), __LINE__); hipFree(dVals); hipFree(dU); hipFree(dFbrIdx0); hipFree(dFbrIdx1); hipFree(dFbrPtr0); hipFree(dFbrPtr1); hipFree(dFbrLikeSlcInds); if(TiledX[0].ndims == 3) hipFree(dInds2); if(TiledX[0].ndims == 4){ hipFree(dFbrIdx2); hipFree(dFbrPtr2); hipFree(dInds3); } delete[] dULoc; delete[] szDU; int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;; for (int m = 0; m < TiledX[0].ndims; ++m){ if(TiledX[m].totNnz){ if(TiledX[m].ndims == 3){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMIfibers += TiledX[m].fbrPtr[1].size(); totalMInnz += TiledX[m].totNnz; } if(TiledX[m].ndims == 4){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMISfibers += TiledX[m].fbrPtr[1].size(); totalMIfibers += TiledX[m].fbrPtr[2].size(); totalMInnz += TiledX[m].totNnz; } } } std::cout << "Resource usage: " << std::endl; if(TiledX[0].ndims == 3) std::cout << " nSlc:" << totalMIslics << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; else if(TiledX[0].ndims == 4) std::cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; return 0; }
the_stack
using namespace std; #define MAXNAMESIZE 1024 // max filename length #define SEED 1 /* increase this to reduce probability of random error */ /* increasing it also ups running time of "speedy" part of the code */ /* SP = 1 seems to be fine */ #define SP 1 // number of repetitions of speedy must be >=1 /* higher ITER --> more likely to get correct # of centers */ /* higher ITER also scales the running time almost linearly */ #define ITER 3 // iterate ITER* k log k times; ITER >= 1 //#define PRINTINFO //comment this out to disable output //#define PROFILE_TMP // comment this out to disable instrumentation code //#define ENABLE_THREADS // comment this out to disable threads //#define INSERT_WASTE //uncomment this to insert waste computation into dist function #define CACHE_LINE 512 // cache line in byte /* global */ static char *switch_membership; //whether to switch membership in pgain static bool *is_center; //whether a point is a center static int *center_table; //index table of centers static int nproc; //# of threads /* timing info */ static double serial; static double cpu_gpu_memcpy; static double memcpy_back; static double gpu_malloc; static double kernel_time; static int cnt_speedy; // instrumentation code #ifdef PROFILE_TMP static double gpu_free; double time_local_search; double time_speedy; double time_select_feasible; double time_gain; double time_shuffle; double time_gain_dist; double time_gain_init; double time_FL; #endif double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } void inttofile(int data, char *filename){ FILE *fp = fopen(filename, "w"); fprintf(fp, "%d ", data); fclose(fp); } int isIdentical(float *i, float *j, int D){ // tells whether two points of D dimensions are identical int a = 0; int equal = 1; while (equal && a < D) { if (i[a] != j[a]) equal = 0; else a++; } if (equal) return 1; else return 0; } /* comparator for floating point numbers static int floatcomp(const void *i, const void *j) { float a, b; a = *(float *)(i); b = *(float *)(j); if (a > b) return (1); if (a < b) return (-1); return(0); }*/ /* shuffle points into random order */ void shuffle(Points *points) { #ifdef PROFILE_TMP double t1 = gettime(); #endif long i, j; Point temp; for (i=0;i<points->num-1;i++) { j=(lrand48()%(points->num - i)) + i; temp = points->p[i]; points->p[i] = points->p[j]; points->p[j] = temp; } #ifdef PROFILE_TMP double t2 = gettime(); time_shuffle += t2-t1; #endif } /* shuffle an array of integers */ void intshuffle(int *intarray, int length) { #ifdef PROFILE_TMP double t1 = gettime(); #endif long i, j; int temp; for (i=0;i<length;i++) { j=(lrand48()%(length - i))+i; temp = intarray[i]; intarray[i]=intarray[j]; intarray[j]=temp; } #ifdef PROFILE_TMP double t2 = gettime(); time_shuffle += t2-t1; #endif } #ifdef INSERT_WASTE float waste(float s ) { for( int i =0 ; i< 4; i++ ) { s += pow(s,0.78); } return s; } #endif /* compute Euclidean distance squared between two points */ float dist(Point p1, Point p2, int dim) { int i; float result=0.0; for (i=0;i<dim;i++) result += (p1.coord[i] - p2.coord[i])*(p1.coord[i] - p2.coord[i]); #ifdef INSERT_WASTE float s = waste(result); result += s; result -= s; #endif return(result); } /* run speedy on the points, return total cost of solution */ float pspeedy(Points *points, float z, long *kcenter, int pid, pthread_barrier_t* barrier) { #ifdef PROFILE_TMP double t1 = gettime(); #endif cnt_speedy++; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif //my block long bsize = points->num/nproc; long k1 = bsize * pid; long k2 = k1 + bsize; if( pid == nproc-1 ) k2 = points->num; static float totalcost; static bool open = false; static float* costs; //cost for each thread. static int i; #ifdef ENABLE_THREADS static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; #endif #ifdef PRINTINFO if( pid == 0 ){ fprintf(stderr, "Speedy: facility cost %lf\n", z); } #endif /* create center at first point, send it to itself */ for( int k = k1; k < k2; k++ ) { float distance = dist(points->p[k],points->p[0],points->dim); points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=0; } if( pid==0 ) { *kcenter = 1; costs = (float*)malloc(sizeof(float)*nproc); } if( pid != 0 ) { // we are not the master threads. we wait until a center is opened. while(1) { #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); while(!open) pthread_cond_wait(&cond,&mutex); pthread_mutex_unlock(&mutex); #endif if( i >= points->num ) break; for( int k = k1; k < k2; k++ ) { float distance = dist(points->p[i],points->p[k],points->dim); if( distance*points->p[k].weight < points->p[k].cost ) { points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=i; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); pthread_barrier_wait(barrier); #endif } } else { // I am the master thread. I decide whether to open a center and notify others if so. for(i = 1; i < points->num; i++ ) { bool to_open = ((float)lrand48()/(float)INT_MAX)<(points->p[i].cost/z); //--cambine: what standard? if( to_open ) { (*kcenter)++; #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); #endif open = true; #ifdef ENABLE_THREADS pthread_mutex_unlock(&mutex); pthread_cond_broadcast(&cond); #endif for( int k = k1; k < k2; k++ ) { //--cambine: for a new open, compute new cost and center. float distance = dist(points->p[i],points->p[k],points->dim); if( distance*points->p[k].weight < points->p[k].cost ) { points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=i; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif open = false; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } } #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); #endif open = true; #ifdef ENABLE_THREADS pthread_mutex_unlock(&mutex); pthread_cond_broadcast(&cond); #endif } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif open = false; float mytotal = 0; for( int k = k1; k < k2; k++ ) { mytotal += points->p[k].cost; } costs[pid] = mytotal; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif // aggregate costs from each thread if( pid == 0 ) { totalcost=z*(*kcenter); for( int i = 0; i < nproc; i++ ) { totalcost += costs[i]; } free(costs); } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif #ifdef PRINTINFO if( pid == 0 ) { fprintf(stderr, "Speedy opened %d facilities for total cost %lf\n", *kcenter, totalcost); fprintf(stderr, "Distance Cost %lf\n", totalcost - z*(*kcenter)); } #endif #ifdef PROFILE_TMP double t2 = gettime(); if( pid== 0 ) { time_speedy += t2 -t1; } #endif return(totalcost); } /* facility location on the points using local search */ /* z is the facility cost, returns the total cost and # of centers */ /* assumes we are seeded with a reasonable solution */ /* cost should represent this solution's cost */ /* halt if there is < e improvement after iter calls to gain */ /* feasible is an array of numfeasible points which may be centers */ float pFL(Points *points, int *feasible, int numfeasible, float z, long *k, int kmax, float cost, long iter, float e, int pid, pthread_barrier_t* barrier) { #ifdef PROFILE_TMP double t1 = gettime(); #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif long i; long x; float change; change = cost; /* continue until we run iter iterations without improvement */ /* stop instead if improvement is less than e */ while (change/cost > 1.0*e) { change = 0.0; /* randomize order in which centers are considered */ if( pid == 0 ) { intshuffle(feasible, numfeasible); } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif // allocate device buffer here. for (i=0;i<iter;i++) { x = i%numfeasible; //printf("--cambine: feasible x=%ld, z=%f, k=%ld, kmax=%d\n", x, z, *k, kmax); change += pgain(feasible[x], points, z, k, kmax, is_center, center_table, switch_membership, &serial, &cpu_gpu_memcpy, &memcpy_back, &gpu_malloc, &kernel_time); } cost -= change; #ifdef PRINTINFO if( pid == 0 ) { fprintf(stderr, "%d centers, cost %lf, total distance %lf\n", *k, cost, cost - z*(*k)); } #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } #ifdef PROFILE_TMP double t2 = gettime(); time_FL += t2 - t1; #endif return(cost); } int selectfeasible_fast(Points *points, int **feasible, int kmin, int pid, pthread_barrier_t* barrier) { #ifdef PROFILE_TMP double t1 = gettime(); #endif int numfeasible = points->num; if (numfeasible > (ITER*kmin*log((float)kmin))) numfeasible = (int)(ITER*kmin*log((float)kmin)); *feasible = (int *)malloc(numfeasible*sizeof(int)); float* accumweight; float totalweight; /* Calcuate my block. For now this routine does not seem to be the bottleneck, so it is not parallelized. When necessary, this can be parallelized by setting k1 and k2 to proper values and calling this routine from all threads ( it is called only by thread 0 for now ). Note that when parallelized, the randomization might not be the same and it might not be difficult to measure the parallel speed-up for the whole program. */ // long bsize = numfeasible; long k1 = 0; long k2 = numfeasible; float w; int l,r,k; /* not many points, all will be feasible */ if (numfeasible == points->num) { for (int i=k1;i<k2;i++) (*feasible)[i] = i; return numfeasible; } accumweight= (float*)malloc(sizeof(float)*points->num); accumweight[0] = points->p[0].weight; totalweight=0; for( int i = 1; i < points->num; i++ ) { accumweight[i] = accumweight[i-1] + points->p[i].weight; } totalweight=accumweight[points->num-1]; for(int i=k1; i<k2; i++ ) { w = (lrand48()/(float)INT_MAX)*totalweight; //binary search l=0; r=points->num-1; if( accumweight[0] > w ) { (*feasible)[i]=0; continue; } while( l+1 < r ) { k = (l+r)/2; if( accumweight[k] > w ) { r = k; } else { l=k; } } (*feasible)[i]=r; } free(accumweight); #ifdef PROFILE_TMP double t2 = gettime(); time_select_feasible += t2-t1; #endif return numfeasible; } /* compute approximate kmedian on the points */ float pkmedian(Points *points, long kmin, long kmax, long* kfinal, int pid, pthread_barrier_t* barrier ) { int i; float cost; float lastcost; float hiz, loz, z; static long k; static int *feasible; static int numfeasible; static float* hizs; if( pid==0 ) hizs = (float*)calloc(nproc,sizeof(float)); hiz = loz = 0.0; long ptDimension = points->dim; //my block long bsize = points->num/nproc; long k1 = bsize * pid; long k2 = k1 + bsize; if( pid == nproc-1 ) k2 = points->num; #ifdef PRINTINFO if( pid == 0 ) { printf("Starting Kmedian procedure\n"); printf("%i points in %i dimensions\n", points->num, ptDimension); } #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif float myhiz = 0; for (long kk=k1;kk < k2; kk++ ) { myhiz += dist(points->p[kk], points->p[0], ptDimension)*points->p[kk].weight; } hizs[pid] = myhiz; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif for( int i = 0; i < nproc; i++ ) { hiz += hizs[i]; } loz=0.0; z = (hiz+loz)/2.0; /* NEW: Check whether more centers than points! */ if (points->num <= kmax) { //--cambine: just ignore for the timebeing /* just return all points as facilities */ for (long kk=k1;kk<k2;kk++) { points->p[kk].assign = kk; points->p[kk].cost = 0; } cost = 0; if( pid== 0 ) { free(hizs); *kfinal = k; } return cost; } if( pid == 0 ) shuffle(points); //--cambine: why need shuffle? cost = pspeedy(points, z, &k, pid, barrier); #ifdef PRINTINFO if( pid == 0 ) printf("thread %d: Finished first call to speedy, cost=%lf, k=%i\n",pid,cost,k); #endif i=0; /* give speedy SP chances to get at least kmin/2 facilities */ while ((k < kmin)&&(i<SP)) { cost = pspeedy(points, z, &k, pid, barrier); i++; } #ifdef PRINTINFO if( pid==0) printf("thread %d: second call to speedy, cost=%lf, k=%d\n",pid,cost,k); #endif /* if still not enough facilities, assume z is too high */ while (k < kmin) { #ifdef PRINTINFO if( pid == 0 ) { printf("%lf %lf\n", loz, hiz); printf("Speedy indicates we should try lower z\n"); } #endif if (i >= SP) {hiz=z; z=(hiz+loz)/2.0; i=0;} if( pid == 0 ) shuffle(points); cost = pspeedy(points, z, &k, pid, barrier); i++; } /* now we begin the binary search for real */ /* must designate some points as feasible centers */ /* this creates more consistancy between FL runs */ /* helps to guarantee correct # of centers at the end */ if( pid == 0 ) { numfeasible = selectfeasible_fast(points,&feasible,kmin,pid,barrier); //--cambine? for( int i = 0; i< points->num; i++ ) { is_center[points->p[i].assign]= true; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif while(1) { #ifdef PRINTINFO if( pid==0 ) { printf("loz = %lf, hiz = %lf\n", loz, hiz); printf("Running Local Search...\n"); } #endif /* first get a rough estimate on the FL solution */ // pthread_barrier_wait(barrier); lastcost = cost; cost = pFL(points, feasible, numfeasible, z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.1, pid, barrier); /* if number of centers seems good, try a more accurate FL */ if (((k <= (1.1)*kmax)&&(k >= (0.9)*kmin))|| ((k <= kmax+2)&&(k >= kmin-2))) { #ifdef PRINTINFO if( pid== 0) { printf("Trying a more accurate local search...\n"); } #endif /* may need to run a little longer here before halting without improvement */ cost = pFL(points, feasible, numfeasible, z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.001, pid, barrier); } if (k > kmax) { /* facilities too cheap */ /* increase facility cost and up the cost accordingly */ loz = z; z = (hiz+loz)/2.0; cost += (z-loz)*k; } if (k < kmin) { /* facilities too expensive */ /* decrease facility cost and reduce the cost accordingly */ hiz = z; z = (hiz+loz)/2.0; cost += (z-hiz)*k; } /* if k is good, return the result */ /* if we're stuck, just give up and return what we have */ if (((k <= kmax)&&(k >= kmin))||((loz >= (0.999)*hiz)) ) { break; } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } //clean up... if( pid==0 ) { free(feasible); free(hizs); *kfinal = k; } return cost; } /* compute the means for the k clusters */ int contcenters(Points *points) { long i, ii; float relweight; for (i=0;i<points->num;i++) { /* compute relative weight of this point to the cluster */ if (points->p[i].assign != i) { relweight=points->p[points->p[i].assign].weight + points->p[i].weight; relweight = points->p[i].weight/relweight; for (ii=0;ii<points->dim;ii++) { points->p[points->p[i].assign].coord[ii]*=1.0-relweight; points->p[points->p[i].assign].coord[ii]+= points->p[i].coord[ii]*relweight; } points->p[points->p[i].assign].weight += points->p[i].weight; } } return 0; } /* copy centers from points to centers */ void copycenters(Points *points, Points* centers, long* centerIDs, long offset) { long i; long k; bool *is_a_median = (bool *) calloc(points->num, sizeof(bool)); /* mark the centers */ for ( i = 0; i < points->num; i++ ) { is_a_median[points->p[i].assign] = 1; } k=centers->num; /* count how many */ for ( i = 0; i < points->num; i++ ) { if ( is_a_median[i] ) { memcpy( centers->p[k].coord, points->p[i].coord, points->dim * sizeof(float)); centers->p[k].weight = points->p[i].weight; centerIDs[k] = i + offset; k++; } } centers->num = k; free(is_a_median); } void* localSearchSub(void* arg_) { pkmedian_arg_t* arg= (pkmedian_arg_t*)arg_; pkmedian(arg->points,arg->kmin,arg->kmax,arg->kfinal,arg->pid,arg->barrier); return NULL; } void localSearch( Points* points, long kmin, long kmax, long* kfinal ) { #ifdef PROFILE_TMP double t1 = gettime(); #endif pthread_barrier_t barrier; #ifdef ENABLE_THREADS pthread_barrier_init(&barrier,NULL,nproc); #endif pthread_t* threads = new pthread_t[nproc]; pkmedian_arg_t* arg = new pkmedian_arg_t[nproc]; for( int i = 0; i < nproc; i++ ) { arg[i].points = points; arg[i].kmin = kmin; arg[i].kmax = kmax; arg[i].pid = i; arg[i].kfinal = kfinal; arg[i].barrier = &barrier; #ifdef ENABLE_THREADS pthread_create(threads+i,NULL,localSearchSub,(void*)&arg[i]); #else localSearchSub(&arg[0]); #endif } for ( int i = 0; i < nproc; i++) { #ifdef ENABLE_THREADS pthread_join(threads[i],NULL); #endif } delete[] threads; delete[] arg; #ifdef ENABLE_THREADS pthread_barrier_destroy(&barrier); #endif #ifdef PROFILE_TMP double t2 = gettime(); time_local_search += t2-t1; #endif } void outcenterIDs( Points* centers, long* centerIDs, char* outfile ) { FILE* fp = fopen(outfile, "w"); if( fp==NULL ) { fprintf(stderr, "error opening %s\n",outfile); exit(1); } int* is_a_median = (int*)calloc( sizeof(int), centers->num ); for( int i =0 ; i< centers->num; i++ ) { is_a_median[centers->p[i].assign] = 1; } for( int i = 0; i < centers->num; i++ ) { if( is_a_median[i] ) { fprintf(fp, "%ld\n", centerIDs[i]); fprintf(fp, "%lf\n", centers->p[i].weight); for( int k = 0; k < centers->dim; k++ ) { fprintf(fp, "%lf ", centers->p[i].coord[k]); } fprintf(fp,"\n\n"); } } fclose(fp); } void streamCluster( PStream* stream, long kmin, long kmax, int dim, long chunksize, long centersize, char* outfile ) { float* block = (float*)malloc( chunksize*dim*sizeof(float) ); float* centerBlock = (float*)malloc(centersize*dim*sizeof(float) ); long* centerIDs = (long*)malloc(centersize*dim*sizeof(long)); if( block == NULL ) { fprintf(stderr,"not enough memory for a chunk!\n"); exit(1); } Points points; points.dim = dim; points.num = chunksize; points.p = (Point *)malloc(chunksize*sizeof(Point)); for( int i = 0; i < chunksize; i++ ) { points.p[i].coord = &block[i*dim]; } Points centers; centers.dim = dim; centers.p = (Point *)malloc(centersize*sizeof(Point)); centers.num = 0; for( int i = 0; i< centersize; i++ ) { centers.p[i].coord = &centerBlock[i*dim]; centers.p[i].weight = 1.0; } long IDoffset = 0; long kfinal; while(1) { size_t numRead = stream->read(block, dim, chunksize ); fprintf(stderr,"read %zu points\n",numRead); if( stream->ferror() || (numRead < (unsigned int)chunksize && !stream->feof()) ) { fprintf(stderr, "error reading data!\n"); exit(1); } points.num = numRead; for( int i = 0; i < points.num; i++ ) { points.p[i].weight = 1.0; } switch_membership = (char*)malloc(points.num*sizeof(char)); is_center = (bool*)calloc(points.num,sizeof(bool)); center_table = (int*)malloc(points.num*sizeof(int)); localSearch(&points,kmin, kmax,&kfinal); fprintf(stderr,"finish local search\n"); contcenters(&points); if( kfinal + centers.num > centersize ) { //here we don't handle the situation where # of centers gets too large. fprintf(stderr,"oops! no more space for centers\n"); exit(1); } #ifdef PRINTINFO printf("finish cont center\n"); #endif copycenters(&points, &centers, centerIDs, IDoffset); IDoffset += numRead; #ifdef PRINTINFO printf("finish copy centers\n"); #endif free(is_center); free(switch_membership); free(center_table); if( stream->feof() ) { break; } } //finally cluster all temp centers switch_membership = (char*)malloc(centers.num*sizeof(char)); is_center = (bool*)calloc(centers.num,sizeof(bool)); center_table = (int*)malloc(centers.num*sizeof(int)); localSearch( &centers, kmin, kmax ,&kfinal ); contcenters(&centers); outcenterIDs( &centers, centerIDs, outfile); } int main(int argc, char **argv) { char *outfilename = new char[MAXNAMESIZE]; char *infilename = new char[MAXNAMESIZE]; long kmin, kmax, n, chunksize, clustersize; int dim; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_streamcluster); #endif if (argc<9) { fprintf(stderr,"usage: %s k1 k2 d n chunksize clustersize infile outfile nproc\n", argv[0]); fprintf(stderr," k1: Min. number of centers allowed\n"); fprintf(stderr," k2: Max. number of centers allowed\n"); fprintf(stderr," d: Dimension of each data point\n"); fprintf(stderr," n: Number of data points\n"); fprintf(stderr," chunksize: Number of data points to handle per step\n"); fprintf(stderr," clustersize: Maximum number of intermediate centers\n"); fprintf(stderr," infile: Input file (if n<=0)\n"); fprintf(stderr," outfile: Output file\n"); fprintf(stderr," nproc: Number of threads to use\n"); fprintf(stderr,"\n"); fprintf(stderr, "if n > 0, points will be randomly generated instead of reading from infile.\n"); exit(1); } kmin = atoi(argv[1]); kmax = atoi(argv[2]); dim = atoi(argv[3]); n = atoi(argv[4]); chunksize = atoi(argv[5]); clustersize = atoi(argv[6]); strcpy(infilename, argv[7]); strcpy(outfilename, argv[8]); nproc = atoi(argv[9]); srand48(SEED); PStream* stream; if( n > 0 ) { stream = new SimStream(n); } else { stream = new FileStream(infilename); } #ifdef PROFILE_TMP double t1 = gettime(); #endif #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef PROFILE_TMP serial = 0.0; cpu_gpu_memcpy = 0.0; gpu_malloc = 0.0; gpu_free = 0.0; kernel_time = 0.0; time_FL = 0.0; cnt_speedy = 0; #endif double sc_start = gettime(); streamCluster(stream, kmin, kmax, dim, chunksize, clustersize, outfilename ); double sc_end = gettime(); printf("Streamcluster time = %lf (s)\n", sc_end-sc_start); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif #ifdef PROFILE_TMP gpu_free = gettime(); #endif free(coord_h); free(gl_lower); free(work_mem_h); free(p_h); cudaFree(work_mem_d); cudaFree(coord_d); cudaFree(center_table_d); cudaFree(switch_membership_d); cudaFree(p_d); #ifdef PROFILE_TMP gpu_free = gettime() - gpu_free; #endif #ifdef PROFILE_TMP double t2 = gettime(); printf("time = %lf\n",t2-t1); #endif delete stream; #ifdef PROFILE_TMP printf("time pgain = %lf\n", time_gain); printf("time pgain_dist = %lf\n", time_gain_dist); printf("time pgain_init = %lf\n", time_gain_init); printf("time pselect = %lf\n", time_select_feasible); printf("time pspeedy = %lf\n", time_speedy); printf("time pshuffle = %lf\n", time_shuffle); printf("time FL = %lf\n", time_FL); printf("time localSearch = %lf\n", time_local_search); printf("\n"); printf("====GPU Timing info====\n"); printf("time serial = %lf\n", serial); printf("time CPU to GPU memory copy = %lf\n", cpu_gpu_memcpy); printf("time GPU to CPU memory copy back = %lf\n", memcpy_back); printf("time GPU malloc = %lf\n", gpu_malloc); printf("time GPU free = %lf\n", gpu_free); printf("time kernel = %lf\n", kernel_time); FILE *fp = fopen("PD.txt", "w"); fprintf(fp, "%lf, %lf, %lf, %lf, %lf, %lf\n", time_FL, cpu_gpu_memcpy, memcpy_back, kernel_time, gpu_malloc, gpu_free); fclose(fp); #endif #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
the_stack
#include "optimization/optimization.h" #include "util/mirrored_memory.h" #include "kernel_common.h" namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_selfIntersectionCount(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } } template <bool dbgErr> __global__ void gpu_normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * potentialIntersection, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } // if (dbgErr) { debugError[index] = NAN; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> & dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[threadIdx.x*(dims-6)]; // dims-6 because self-intersection doesn't depend on global transform const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(J,v_m,srcFrame,dstSdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); float * JTr = result; float * JTJ = &result[dims-6]; float * e = &result[dims-6 + JTJSize(dims-6)]; computeSquaredLossResult(dims-6,residual,J,e,JTr, JTJ); if (dbgErr) { debugError[index] = residual*residual; } return; } } } } } __global__ void gpu_normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); // reduction doPoseGradientReductionArticulationOnly(J,de_dtheta,dtheta_dalpha,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const int * dMapping, const int * potentialIntersection, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames[srcGrid]; v_src_f.w = 1; const float4 v_m = T_mfs[srcFrame]*v_src_f; for (int dstGrid=0; dstGrid<nSdfs; ++dstGrid) { if (potentialIntersection[dstGrid + srcGrid*nSdfs]) { const int dstFrame = sdfFrames[dstGrid]; const float4 v_dst_f = T_fms[dstFrame]*v_m; const Grid3D<float> &dstSdf = sdfs[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { //printf("*"); // collision detected float * de_dtheta = &s[threadIdx.x*(fullDims-6+redDims-6)]; // redDims-6 because self-intersection doesn't depend on global transform float * J = &s[threadIdx.x*(fullDims-6+redDims-6) + fullDims-6]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_m = make_float3(SE3Rotate(T_mfs[dstFrame],make_float4(dstSdfGrad_dst_f,0))); getErrorJacobianOfModelPointArticulationOnly(de_dtheta,v_m,srcFrame,dstSdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); doParamMappingArticulationOnly(J,de_dtheta,dMapping,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims-6]; float * e = &result[redDims-6 + JTJSize(redDims-6)]; computeSquaredLossResult(redDims-6,residual,J,e,JTr,JTJ); return; } } } } } __global__ void gpu_initDebugIntersectionError(float * debugError, const int nSites) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } debugError[index] = NAN; } __global__ void gpu_intersectionCount(const float4 * testSites, const int nSites, const SE3 T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, int * nCollisions) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); //printf("%f %f %f ",v_dst_g.x,v_dst_g.y,v_dst_g.z); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float d = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (d < 0) { // collision detected atomicAdd(nCollisions,1); return; } } } } template <bool dbgErr> __global__ void gpu_normEquationsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, float * result, float * debugError) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * J = &s[tid*dims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(J,v_src_m,srcFrame,dstSdfGrad_src_m,dims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); float * JTr = result; float * JTJ = &result[dims]; float * e = &result[dims + JTJSize(dims)]; computeSquaredLossResult(dims,residual,J,e,JTr,JTJ); if (dbgErr) { debugError[index] += (residual*residual); } return; } } } } __global__ void gpu_normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const float * dtheta_dalpha_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doPoseGradientReduction(J,de_dtheta,dtheta_dalpha_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const SE3 * T_mfs_src, const SE3 * T_fms_src, const int * sdfFrames_src, const SE3 * T_mfs_dst, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst, const int * dependencies_src, const JointType * jointTypes_src, const float3 * jointAxes_src, const int * dMapping_src, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + threadIdx.y*blockDim.x; // overflow if (index >= nSites) { return; } float4 v_src_f = testSites[index]; const int srcGrid = round(v_src_f.w); const int srcFrame = sdfFrames_src[srcGrid]; v_src_f.w = 1; const float4 v_src_m = T_mfs_src[srcFrame]*v_src_f; const float4 v_dst_m = T_ds*v_src_m; for (int dstGrid=0; dstGrid<nSdfs_dst; ++dstGrid) { const int dstFrame = sdfFrames_dst[dstGrid]; const float4 v_dst_f = T_fms_dst[dstFrame]*v_dst_m; const Grid3D<float> & dstSdf = sdfs_dst[dstGrid]; const float3 v_dst_g = dstSdf.getGridCoords(make_float3(v_dst_f)); if (dstSdf.isInBoundsGradientInterp(v_dst_g)) { const float residual = dstSdf.getValueInterpolated(v_dst_g)*dstSdf.resolution; if (residual < 0) { // collision detected float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims)+fullDims]; const float3 dstSdfGrad_dst_f = dstSdf.getGradientInterpolated(v_dst_g); const float3 dstSdfGrad_dst_m = make_float3(SE3Rotate(T_mfs_dst[dstFrame],make_float4(dstSdfGrad_dst_f,0))); const float3 dstSdfGrad_src_m = SE3Rotate(T_sd,dstSdfGrad_dst_m); getErrorJacobianOfModelPoint(de_dtheta,v_src_m,srcFrame,dstSdfGrad_src_m,fullDims,dependencies_src,jointTypes_src,jointAxes_src,T_fms_src,T_mfs_src); doParamMapping(J,de_dtheta,dMapping_src,fullDims,redDims); float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; computeSquaredLossResult(redDims,residual,J,e,JTr,JTJ); return; } } } } __global__ void gpu_intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { extern __shared__ float s[]; const int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_o = testSites[index]; v_o.w = 1; const float4 v_h = T_ho*v_o; for (int hGrid=0; hGrid<nSdfs_h; ++hGrid) { const int hFrame = sdfFrames_h[hGrid]; const float4 v_f = T_fms_h[hFrame]*v_h; const Grid3D<float> &hSdf = sdfs_h[hGrid]; const float3 v_g = hSdf.getGridCoords(make_float3(v_f)); if (hSdf.isInBoundsGradientInterp(v_g)) { const float d = hSdf.getValueInterpolated(v_g)*hSdf.resolution; if (d < 0) { // collision detected float * J = &s[tid*12]; const float3 hSdfGrad_f = hSdf.getGradientInterpolated(v_g); const float3 hSdfGrad_h = SE3Rotate(T_mfs_h[hFrame],hSdfGrad_f); const float3 hSdfGrad_o = SE3Rotate(T_oh,hSdfGrad_h); // hand derivative J[0] = dot(hSdfGrad_h,make_float3(-1, 0, 0)); J[1] = dot(hSdfGrad_h,make_float3( 0,-1, 0)); J[2] = dot(hSdfGrad_h,make_float3( 0, 0,-1)); J[3] = dot(hSdfGrad_h,make_float3( 0, v_h.z,-v_h.y)); J[4] = dot(hSdfGrad_h,make_float3(-v_h.z, 0, v_h.x)); J[5] = dot(hSdfGrad_h,make_float3( v_h.y,-v_h.x, 0)); // object derivative J[6] = dot(hSdfGrad_o,make_float3(-1, 0, 0)); J[7] = dot(hSdfGrad_o,make_float3( 0,-1, 0)); J[8] = dot(hSdfGrad_o,make_float3( 0, 0,-1)); J[9] = dot(hSdfGrad_o,make_float3( 0, v_o.z,-v_o.y)); J[10] = dot(hSdfGrad_o,make_float3(-v_o.z, 0, v_o.x)); J[11] = dot(hSdfGrad_o,make_float3( v_o.y,-v_o.x, 0)); float * eJ = result; float * JTJ = &result[12]; float * e = &result[12 + JTJSize(12)]; for (int i=0; i<12; ++i) { if (J[i] == 0.0f) { continue; } float eJval = -d*-J[i]; atomicAdd(&eJ[i],eJval); for (int j=0; j<=i; ++j) { float JTJval = J[i]*J[j]; atomicAdd(&JTJ[((i*(i+1))>>1) + j],JTJval); } } atomicAdd(e,d*d); return; } } } } __global__ void gpu_getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances) { const int index = blockIdx.x*blockDim.x + threadIdx.x; // overflow if (index >= nSites) { return; } float4 v_src = testSites[index]; v_src.w = 1.0; float4 v_dst = T_ds*v_src; const float3 v_dst_g = sdf_dst->getGridCoords(make_float3(v_dst)); if (!sdf_dst->isInBoundsGradientInterp(v_dst_g)) { distances[index] = 1e20; // printf("%f ",sdf_dst->resolution); } else { distances[index] = sdf_dst->getValueInterpolated(v_dst_g)*sdf_dst->resolution; } } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- int countSelfIntersections(const float4 * testSites, const int nSites, const SE3 * T_mfs, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const int * potentialIntersection) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); cudaMemset(nCollisions.devicePtr(),0,sizeof(int)); gpu_selfIntersectionCount<<<grid,block>>>(testSites,nSites,T_mfs,T_fms, sdfFrames,sdfs,nSdfs,potentialIntersection, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } int countIntersections(const float4 * testSites, const int nSites, const SE3 & T_ds, const SE3 * T_mfs_src, const int * sdfFrames_src, const SE3 * T_fms_dst, const int * sdfFrames_dst, const Grid3D<float> * sdfs_dst, const int nSdfs_dst) { dim3 block(128,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); MirroredVector<int> nCollisions(1); cudaMemset(nCollisions.devicePtr(),0,sizeof(int)); gpu_intersectionCount<<<grid,block>>>(testSites,nSites,T_ds,T_mfs_src,sdfFrames_src, T_fms_dst,sdfFrames_dst,sdfs_dst,nSdfs_dst, nCollisions.devicePtr()); nCollisions.syncDeviceToHost(); return nCollisions.hostPtr()[0]; } void normEqnsSelfIntersection(const float4 * testSites, const int nSites, const int dims, const MirroredModel & model, const int * potentialIntersection, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((dims-6)+JTJSize(dims-6)+1)*sizeof(float)); if (debugError == 0) { gpu_normEqnsSelfIntersection<false><<<grid,block,64*(dims-6)*sizeof(float)>>>(testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } else { gpu_normEqnsSelfIntersection<true><<<grid,block,64*(dims-6)*sizeof(float)>>>(testSites, nSites, dims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), potentialIntersection, result, debugError); } } void normEqnsIntersection(const float4 * testSites, const int nSites, const int dims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, float * result, float * debugError) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((dims)+JTJSize(dims)+1)*sizeof(float)); if (debugError == 0) { gpu_normEquationsIntersection<false><<<grid,block,64*(dims)*sizeof(float)>>>(testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } else { gpu_normEquationsIntersection<true><<<grid,block,64*(dims)*sizeof(float)>>>(testSites, nSites, dims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), result, debugError); } } void normEqnsSelfIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const float * dtheta_dalpha, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); gpu_normEqnsSelfIntersectionReduced<<<grid,block,64*(fullDims-6+redDims-6)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dtheta_dalpha, potentialIntersection, result); } void normEqnsSelfIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const MirroredModel & model, const int * dMapping, const int * potentialIntersection, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims-6)+JTJSize(redDims-6)+1)*sizeof(float)); gpu_normEqnsSelfIntersectionParamMap<<<grid,block,64*(fullDims-6+redDims-6)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, model.getDeviceTransformsFrameToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), dMapping, potentialIntersection, result); } void normEqnsIntersectionReduced(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const float * dtheta_dalpha_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); gpu_normEqnsIntersectionReduced<<<grid,block,64*(fullDims+redDims)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dtheta_dalpha_src, result); } void normEqnsIntersectionParamMap(const float4 * testSites, const int nSites, const int fullDims, const int redDims, const SE3 T_ds, const SE3 T_sd, const MirroredModel & srcModel, const MirroredModel & dstModel, const int * dMapping_src, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,((redDims)+JTJSize(redDims)+1)*sizeof(float)); gpu_normEqnsIntersectionParamMap<<<grid,block,64*(fullDims+redDims)*sizeof(float)>>>(testSites, nSites, fullDims, redDims, T_ds, T_sd, srcModel.getDeviceTransformsFrameToModel(), srcModel.getDeviceTransformsModelToFrame(), srcModel.getDeviceSdfFrames(), dstModel.getDeviceTransformsFrameToModel(), dstModel.getDeviceTransformsModelToFrame(), dstModel.getDeviceSdfFrames(), dstModel.getDeviceSdfs(), dstModel.getNumSdfs(), srcModel.getDeviceDependencies(), srcModel.getDeviceJointTypes(), srcModel.getDeviceJointAxes(), dMapping_src, result); } void intersectionCheckRigidObjInHand(const float4 * testSites, const int nSites, const SE3 T_ho, const SE3 T_oh, const SE3 * T_mfs_h, const SE3 * T_fms_h, const int * sdfFrames_h, const Grid3D<float> * sdfs_h, const int nSdfs_h, float * result) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); cudaMemset(result,0,(12+JTJSize(12)+1)*sizeof(float)); gpu_intersectionCheckRigidObjInHand<<<grid,block,64*12*sizeof(float)>>>(testSites, nSites, T_ho, T_oh, T_mfs_h, T_fms_h, sdfFrames_h, sdfs_h, nSdfs_h, result); } void getDistanceToSdf(const float4 * testSites, const int nSites, const SE3 T_ds, const Grid3D<float> * sdf_dst, float * distances, const cudaStream_t stream) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); gpu_getDistanceToSdf<<<grid,block,0,stream>>>(testSites,nSites,T_ds,sdf_dst,distances); } void initDebugIntersectionError(float * debugError, const int nSites) { dim3 block(64,1,1); dim3 grid(ceil(nSites/(float)block.x),1,1); gpu_initDebugIntersectionError<<<grid,block>>>(debugError, nSites); } }
the_stack
#include <algorithm> #include <ctime> #include <limits> #include <random> #include "best_splits.h" #include "continuous_tree_grower.h" #include "cub/cub.cuh" #include "cuda_helpers.h" #include "gain.cuh" #include "garden.h" #include "hist_tree_grower.h" #include "histogram.h" #include "model_helper.h" #include "objective.h" #include "param.h" #include "split.cuh" namespace arboretum { namespace core { using namespace thrust; using namespace thrust::cuda; using thrust::device_vector; using thrust::host_vector; template <typename T> __global__ void set_segment(T *row2Node, const unsigned *count_prefix_sum, int shift, int level, const size_t n) { for (unsigned i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { T leaf = row2Node[i]; unsigned segment = linear_search_2steps(count_prefix_sum, i, unsigned(1 << level) + 1, unsigned(leaf >> (shift + 1)) << 1); row2Node[i] = (segment << shift) | (leaf & 1 << (shift - 1)); } } template <typename SUM_T, typename NODE_T> __global__ void update_by_last_tree(float *y, const SUM_T *best_sum, const unsigned *best_count, const SUM_T *sum_prefix_sum, const unsigned *count_prefix_sum, const NODE_T *row2Node, const TreeParam param, const size_t n) { for (size_t i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { NODE_T leaf = row2Node[i]; unsigned segment = leaf >> 2; float delta = 0.0; const SUM_T left_sum = best_sum[segment]; const SUM_T right_sum = sum_prefix_sum[segment + 1] - sum_prefix_sum[segment] - left_sum; const unsigned left_count = best_count[segment]; const unsigned right_count = count_prefix_sum[segment + 1] - count_prefix_sum[segment] - left_count; if (leaf % 2 == 0) { delta = Weight(left_sum, left_count, param) * param.eta; } else { delta = Weight(right_sum, right_count, param) * param.eta; } assert(isfinite(delta)); y[i] += delta; } } template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T, typename TREE_GROWER> class ContinuousGardenBuilder : public GardenBuilderBase { public: ContinuousGardenBuilder(const TreeParam &param, io::DataMatrix *data, const InternalConfiguration &config, ApproximatedObjectiveBase *objective, const bool verbose) : verbose(verbose), rnd(config.seed), overlap_depth(config.overlap), param(param), gain_param(param.min_leaf_size, param.min_child_weight, param.gamma_absolute, param.gamma_relative, param.lambda, param.alpha, param.max_leaf_weight), objective(static_cast<ApproximatedObjective<GRAD_T> *>(objective)), best(1 << param.depth, config.hist_size), features_histograms(1 << param.depth, config.hist_size, data->columns_dense) { active_fids.resize(data->columns); row2Node.resize(data->rows, 0); partitioning_index.resize(data->rows, 0); _bestSplit.resize(1 << (param.depth - 2)); _nodeStat.resize(1 << (param.depth - 2)); grad.resize(data->rows); grad_buffer.resize(data->rows); data->y_hat.resize(data->rows, objective->IntoInternal(param.initial_y)); y_hat_d = data->y_hat; y_d = data->y; y_buffer.resize(data->rows); growers = new TREE_GROWER *[overlap_depth]; for (size_t i = 0; i < overlap_depth; ++i) { growers[i] = new TREE_GROWER(data->rows, param.depth, config.hist_size, &best, &features_histograms, &config); } } virtual ~ContinuousGardenBuilder() { for (auto i = 0; i < overlap_depth; ++i) { delete growers[i]; } delete[] growers; } virtual void InitGrowingTree(const size_t columns) override { int take = (int)(param.colsample_bytree * columns); if (take == 0) { printf("colsample_bytree is too small %f for %ld columns \n", param.colsample_bytree, columns); throw "colsample_bytree is too small"; } take = (int)(param.colsample_bytree * param.colsample_bylevel * columns); if (take == 0) { printf( "colsample_bytree and colsample_bylevel are too small %f %f for " "%ld columns \n", param.colsample_bytree, param.colsample_bylevel, columns); throw "colsample_bytree and colsample_bylevel are too small"; } for (size_t i = 0; i < columns; ++i) { active_fids[i] = i; } shuffle(active_fids.begin(), active_fids.end(), rnd); thrust::fill(row2Node.begin(), row2Node.end(), 0); for (size_t i = 0; i < _nodeStat.size(); ++i) { _nodeStat[i].Clean(); } for (size_t i = 0; i < _bestSplit.size(); ++i) { _bestSplit[i].Clean(); } this->features_histograms.Clear(); OK(cudaDeviceSynchronize()); for (size_t i = 0; i < overlap_depth; i++) { OK(cudaStreamSynchronize(growers[i]->stream)); } } virtual void InitTreeLevel(const int level, const size_t columns) override { int take = (int)(param.colsample_bytree * columns); shuffle(active_fids.begin(), active_fids.begin() + take, rnd); } virtual void GrowTree(RegTree *tree, io::DataMatrix *data, const unsigned short label) override { grad_slice = const_cast<GRAD_T *>( thrust::raw_pointer_cast(grad.data() + label * data->rows)); InitGrowingTree(data->columns); for (unsigned int i = 0; (i + 1) < param.depth; ++i) { InitTreeLevel(i, data->columns); UpdateNodeStat(i, data, tree); FindBestSplits(i, data); UpdateTree(i, tree, data); } for (size_t i = 0; i < overlap_depth; i++) { OK(cudaStreamSynchronize(growers[i]->stream)); } OK(cudaDeviceSynchronize()); OK(cudaGetLastError()); UpdateLeafWeight(tree); for (size_t i = 0; i < overlap_depth; i++) { OK(cudaStreamSynchronize(growers[i]->stream)); } OK(cudaDeviceSynchronize()); OK(cudaGetLastError()); UpdateByLastTree(data); } void UpdateByLastTree(io::DataMatrix *data) { int gridSize = 0; int blockSize = 0; compute1DInvokeConfig(data->rows, &gridSize, &blockSize, update_by_last_tree<SUM_T, NODE_T>); update_by_last_tree<SUM_T, NODE_T><<<gridSize, blockSize>>>( thrust::raw_pointer_cast(y_hat_d.data()), thrust::raw_pointer_cast(this->best.sum.data()), thrust::raw_pointer_cast(this->best.count.data()), thrust::raw_pointer_cast(this->best.parent_node_sum.data()), thrust::raw_pointer_cast(this->best.parent_node_count.data()), thrust::raw_pointer_cast(row2Node.data()), param, data->rows); } virtual void PredictByGrownTree( RegTree *tree, io::DataMatrix *data, thrust::host_vector<float> &out) const override { // tree->Predict(data, _rowIndex2Node, out); } virtual void UpdateGrad() override { objective->UpdateGrad(grad, y_hat_d, y_d); } private: bool verbose; std::default_random_engine rnd; std::vector<unsigned int> active_fids; const unsigned short overlap_depth; const TreeParam param; const GainFunctionParameters gain_param; GRAD_T *grad_slice; ApproximatedObjective<GRAD_T> *objective; std::vector<NodeStat<SUM_T>> _nodeStat; std::vector<Split<SUM_T>> _bestSplit; device_vector<NODE_T> row2Node; device_vector<unsigned> partitioning_index; size_t temp_bytes_per_rec = 0; TREE_GROWER **growers; BestSplit<SUM_T> best; Histogram<SUM_T> features_histograms; device_vector<GRAD_T> grad; device_vector<GRAD_T> grad_buffer; device_vector<float> y_buffer; device_vector<float> y_d; device_vector<float> y_hat_d; void FindBestSplits(const unsigned int level, io::DataMatrix *data) { unsigned length = 1 << level; unsigned int take = (unsigned int)(param.colsample_bylevel * param.colsample_bytree * data->columns); if (level != 0) { this->best.NextLevel(length); int gridSize = 0; int blockSize = 0; compute1DInvokeConfig(data->rows, &gridSize, &blockSize, set_segment<NODE_T>); set_segment<NODE_T><<<gridSize, blockSize, 0, growers[0]->stream>>>( thrust::raw_pointer_cast(row2Node.data()), thrust::raw_pointer_cast(this->best.parent_node_count.data()), param.depth - level, level, data->rows); growers[0]->CreatePartitioningIndexes(partitioning_index, row2Node, this->best.parent_node_count, level, param.depth); growers[0]->template PartitionByIndex<GRAD_T>( thrust::raw_pointer_cast(grad_buffer.data()), thrust::raw_pointer_cast(grad.data()), partitioning_index); OK(cudaStreamSynchronize(growers[0]->stream)); grad.swap(grad_buffer); growers[0]->template PartitionByIndex<float>( thrust::raw_pointer_cast(y_buffer.data()), thrust::raw_pointer_cast(y_hat_d.data()), partitioning_index); OK(cudaStreamSynchronize(growers[0]->stream)); y_buffer.swap(y_hat_d); growers[0]->template PartitionByIndex<float>( thrust::raw_pointer_cast(y_buffer.data()), thrust::raw_pointer_cast(y_d.data()), partitioning_index); OK(cudaStreamSynchronize(growers[0]->stream)); y_buffer.swap(y_d); } OK(cudaStreamSynchronize(growers[0]->stream)); OK(cudaStreamSynchronize(growers[0]->copy_d2h_stream)); for (size_t j = 0; j < data->columns; ++j) { for (size_t i = 0; i < overlap_depth && (j + i) < data->columns; ++i) { if (j != 0 && (i + 1) < overlap_depth) { continue; } size_t active_fid = active_fids[j + i]; size_t circular_fid = (j + i) % overlap_depth; if (active_fid < data->columns_dense) { ProcessDenseFeature(active_fid, circular_fid, level, data, (j + i) >= take); } else { ProcessCategoryFeature(active_fid - data->columns_dense, circular_fid, level, data); } } size_t circular_fid = j % overlap_depth; if (active_fids[j] < data->columns_dense) { GetBestSplitForDenseFeature(active_fids[j], circular_fid, level, data->data_reduced_mapping[active_fids[j]], data->reduced_size[active_fids[j]], j >= take); } else { if ((data->category_size[active_fids[j] - data->columns_dense] + level) < sizeof(unsigned char) * CHAR_BIT) { GetBestSplitForCategoryFeature<unsigned char>( active_fids[j] - data->columns_dense, data->columns_dense, circular_fid, length, data); } else if ((data->category_size[active_fids[j] - data->columns_dense] + level) < sizeof(unsigned short) * CHAR_BIT) { GetBestSplitForCategoryFeature<unsigned short>( active_fids[j] - data->columns_dense, data->columns_dense, circular_fid, length, data); } else if ((data->category_size[active_fids[j] - data->columns_dense] + level) < sizeof(unsigned int) * CHAR_BIT) { GetBestSplitForCategoryFeature<unsigned int>( active_fids[j] - data->columns_dense, data->columns_dense, circular_fid, length, data); } else { GetBestSplitForCategoryFeature<NODE_T>( active_fids[j] - data->columns_dense, data->columns_dense, circular_fid, length, data); } } } } inline void GetBestSplitForDenseFeature( const int active_fid, const size_t circular_fid, const unsigned level, const std::vector<float> &data_reduced_mapping, const unsigned reduced_size, const bool partition_only) { if (!partition_only) { const unsigned length = 1 << level; OK(cudaStreamSynchronize(growers[circular_fid]->stream)); growers[circular_fid]->FindBest( this->best, this->row2Node, this->best.parent_node_sum, this->best.parent_node_count, active_fid, level, param.depth, length); } OK(cudaStreamSynchronize(growers[circular_fid]->stream)); OK(cudaStreamSynchronize(growers[circular_fid]->copy_d2h_stream)); } template <typename NODE_VALUE_T> inline void GetBestSplitForCategoryFeature(const int active_fid, const size_t columns_dense, const size_t circular_fid, const size_t lenght, const io::DataMatrix *data) {} void ProcessDenseFeature(const size_t active_fid, const size_t circular_fid, const size_t level, io::DataMatrix *data, const bool partition_only) { growers[circular_fid]->ProcessDenseFeature( partitioning_index, row2Node, grad, data->GetDeviceData<BIN_T>(active_fid), thrust::raw_pointer_cast(data->GetHostData<BIN_T>(active_fid).data()), this->best.parent_node_sum, this->best.parent_node_count, data->reduced_size[active_fid], level, param.depth, gain_param, partition_only, active_fid); } inline void ProcessCategoryFeature(const size_t active_fid, const size_t circular_fid, const size_t level, const io::DataMatrix *data) { if ((data->category_size[active_fid] + level) < sizeof(unsigned char) * CHAR_BIT) { growers[circular_fid]->template ProcessCategoryFeature<unsigned char>( row2Node, grad, data->data_category_device[active_fid], data->data_categories[active_fid], this->best.parent_node_sum, this->best.parent_node_count, data->category_size[active_fid], level, gain_param); } else if ((data->category_size[active_fid] + level) < sizeof(unsigned short) * CHAR_BIT) { growers[circular_fid]->template ProcessCategoryFeature<unsigned short>( row2Node, grad, data->data_category_device[active_fid], data->data_categories[active_fid], this->best.parent_node_sum, this->best.parent_node_count, data->category_size[active_fid], level, gain_param); } else if ((data->category_size[active_fid] + level) < sizeof(unsigned int) * CHAR_BIT) { growers[circular_fid]->template ProcessCategoryFeature<unsigned int>( row2Node, grad, data->data_category_device[active_fid], data->data_categories[active_fid], this->best.parent_node_sum, this->best.parent_node_count, data->category_size[active_fid], level, gain_param); } else { growers[circular_fid]->template ProcessCategoryFeature<NODE_T>( row2Node, grad, data->data_category_device[active_fid], data->data_categories[active_fid], this->best.parent_node_sum, this->best.parent_node_count, data->category_size[active_fid], level, gain_param); } } void UpdateNodeStat(const int level, const io::DataMatrix *data, const RegTree *tree) { const unsigned len = 1 << level; best.Clear(len); if (level == 0) { SUM_T zero; init(zero); best.parent_node_count[0] = 0; best.parent_node_count[1] = unsigned(data->rows); best.parent_node_sum[0] = zero; OK(cub::DeviceReduce::Sum( this->growers[0]->temp_bytes, this->growers[0]->temp_bytes_allocated, thrust::raw_pointer_cast(grad.data()), thrust::raw_pointer_cast(&best.parent_node_sum[1]), data->rows)); OK(cudaDeviceSynchronize()); } for (unsigned i = 0; i < len; ++i) { _nodeStat[i].gain = 0.0; // todo: gain_func(_nodeStat[i].count, _nodeStat[i].sum_grad); _bestSplit[i].Clean(); } } void UpdateTree(const int level, RegTree *tree, io::DataMatrix *data) { const unsigned int offset = Node::HeapOffset(level); const size_t len = 1 << level; OK(cudaDeviceSynchronize()); best.Sync(1 << level); OK(cudaDeviceSynchronize()); for (unsigned i = 0; i < len; ++i) { const unsigned quantized = best.split_value_h[i]; const my_atomics &gain_feature = best.gain_feature_h[i]; _bestSplit[i].quantized = quantized; _bestSplit[i].count = best.count_h[i]; _bestSplit[i].fid = gain_feature.Feature(); _bestSplit[i].sum_grad = best.sum_h[i]; if (gain_feature.Feature() != -1) { _bestSplit[i].split_value = quantized >= data->data_reduced_mapping[gain_feature.Feature()].size() ? std::numeric_limits<float>::infinity() : data->data_reduced_mapping[gain_feature.Feature()][quantized]; } else { _bestSplit[i].gain = 0.0; _bestSplit[i].fid = 0; _bestSplit[i].split_value = std::numeric_limits<float>::infinity(); _bestSplit[i].count = best.parent_node_count_h[i + 1] - best.parent_node_count_h[i]; _bestSplit[i].sum_grad = best.parent_node_sum_h[i + 1] - best.parent_node_sum_h[i]; } const Split<SUM_T> &best = _bestSplit[i]; tree->nodes[i + offset].threshold = best.split_value; tree->nodes[i + offset].category = best.category; tree->nodes[i + offset].fid = best.fid < 0 ? 0 : best.fid; tree->nodes[i + offset].quantized = best.quantized; } } void UpdateLeafWeight(RegTree *tree) const { const unsigned int offset_1 = Node::HeapOffset(tree->depth - 2); const unsigned int offset = Node::HeapOffset(tree->depth - 1); for (unsigned int i = 0, len = (1 << (tree->depth - 2)); i < len; ++i) { const Split<SUM_T> &split = _bestSplit[i]; tree->weights[tree->ChildNode(i + offset_1, true) - offset] = split.LeafWeight(param) * param.eta; tree->weights[tree->ChildNode(i + offset_1, false) - offset] = split.LeafWeight( this->best.parent_node_count_h[i + 1] - this->best.parent_node_count_h[i], this->best.parent_node_sum_h[i + 1] - this->best.parent_node_sum_h[i], param) * param.eta; } } }; template <typename NODE_TYPE, typename GRAD_TYPE, typename SUM_TYPE> GardenBuilderBase *chained(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective) { if (cfg.method == Exact) return new ContinuousGardenBuilder< NODE_TYPE, unsigned int, GRAD_TYPE, SUM_TYPE, ContinuousTreeGrower<NODE_TYPE, unsigned int, GRAD_TYPE, SUM_TYPE>>( cfg.tree_param, data, cfg.internal, objective, cfg.verbose.booster); else if (cfg.internal.hist_size < (1 << sizeof(unsigned char) * CHAR_BIT)) return new ContinuousGardenBuilder< NODE_TYPE, unsigned char, GRAD_TYPE, SUM_TYPE, HistTreeGrower<NODE_TYPE, unsigned char, GRAD_TYPE, SUM_TYPE>>( cfg.tree_param, data, cfg.internal, objective, cfg.verbose.booster); else return new ContinuousGardenBuilder< NODE_TYPE, unsigned short, GRAD_TYPE, SUM_TYPE, HistTreeGrower<NODE_TYPE, unsigned short, GRAD_TYPE, SUM_TYPE>>( cfg.tree_param, data, cfg.internal, objective, cfg.verbose.booster); } template <typename GRAD_TYPE, typename SUM_TYPE> GardenBuilderBase *chained(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective) { if (cfg.tree_param.depth < sizeof(unsigned char) * CHAR_BIT) return chained<unsigned char, GRAD_TYPE, SUM_TYPE>(cfg, data, objective); else if (cfg.tree_param.depth < sizeof(unsigned short) * CHAR_BIT) return chained<unsigned short, GRAD_TYPE, SUM_TYPE>(cfg, data, objective); else if (cfg.tree_param.depth < sizeof(unsigned int) * CHAR_BIT) return chained<unsigned int, GRAD_TYPE, SUM_TYPE>(cfg, data, objective); else throw "unsupported depth"; } template <typename GRAD_TYPE> GardenBuilderBase *chained(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective); template <> GardenBuilderBase *chained<float>(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective) { if (cfg.internal.double_precision) return chained<float, double>(cfg, data, objective); else return chained<float, float>(cfg, data, objective); } template <> GardenBuilderBase *chained<float2>(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective) { if (cfg.internal.double_precision) return chained<float2, mydouble2>(cfg, data, objective); else return chained<float2, float2>(cfg, data, objective); } GardenBuilderBase *chained(const Configuration &cfg, io::DataMatrix *data, ApproximatedObjectiveBase *objective) { if (cfg.objective == LinearRegression) return chained<float>(cfg, data, objective); else return chained<float2>(cfg, data, objective); } Garden::Garden(const Configuration cfg) : cfg(cfg), _init(false), _builder(nullptr), _objective(nullptr) { switch (cfg.objective) { case LinearRegression: _objective = new RegressionObjective(cfg.tree_param.initial_y); break; case LogisticRegression: _objective = new LogisticRegressionObjective(cfg.tree_param.initial_y); break; default: throw "Unknown objective function " + cfg.objective; } } void Garden::GrowTree(io::DataMatrix *data, float *grad) { if (cfg.method == Method::Exact) data->InitExact(cfg.verbose.data); else data->InitHist(cfg.internal.hist_size, cfg.verbose.data); if (!_init) { _builder = chained(cfg, data, _objective); size_t total; size_t free; cudaMemGetInfo(&free, &total); if (cfg.verbose.gpu) { printf("Total bytes %ld avaliable %ld \n", total, free); } if (cfg.internal.upload_features) data->TransferToGPU(free * 9 / 10, cfg.verbose.gpu); _init = true; } if (grad == NULL) { _builder->UpdateGrad(); } else { // todo: fix // data->grad = std::vector<float>(grad, grad + data->rows); } for (unsigned short i = 0; i < cfg.tree_param.labels_count; ++i) { RegTree *tree = new RegTree(cfg.tree_param.depth, i); _builder->GrowTree(tree, data, i); _trees.push_back(tree); if (grad == NULL) { // tree->PredictByQuantized(data, data->y_internal); // _builder->PredictByGrownTree(tree, data, data->y_internal); } } } void Garden::UpdateByLastTree(io::DataMatrix *data) { if (data->y_hat.size() == 0) data->y_hat.resize(data->rows * cfg.tree_param.labels_count, _objective->IntoInternal(cfg.tree_param.initial_y)); for (auto it = _trees.end() - cfg.tree_param.labels_count; it != _trees.end(); ++it) { (*it)->Predict(data, data->y_hat); } } void Garden::GetY(arboretum::io::DataMatrix *data, std::vector<float> &out) const { out.resize(data->y_hat.size()); _objective->FromInternal(data->y_hat, out); } void Garden::Predict(const arboretum::io::DataMatrix *data, std::vector<float> &out, const int n_rounds) const { out.resize(data->rows * cfg.tree_param.labels_count); thrust::host_vector<float> tmp(data->rows * cfg.tree_param.labels_count); thrust::fill(tmp.begin(), tmp.end(), _objective->IntoInternal(cfg.tree_param.initial_y)); auto size = min(n_rounds, int(_trees.size())); for (auto i = 0; i < size; ++i) { _trees[i]->Predict(data, tmp); } _objective->FromInternal(tmp, out); } const char *Garden::GetModel() const { std::vector<DecisionTree> tmp; for (size_t i = 0; i < this->_trees.size(); ++i) { tmp.push_back(*this->_trees[i]); } return DumpModel(this->cfg, tmp); } void Garden::Restore(const char *json_model) { std::vector<DecisionTree> trees = LoadModel(json_model); for (auto it = trees.begin(); it != trees.end(); ++it) { // TODO: get label RegTree *tree = new RegTree(cfg.tree_param.depth, 0); tree->weights = it->weights; tree->nodes = it->nodes; _trees.push_back(tree); } } Garden::~Garden() { if (_builder) delete _builder; if (_objective) delete _objective; for (size_t i = 0; i < _trees.size(); ++i) { delete _trees[i]; } } } // namespace core } // namespace arboretum
the_stack
#include <iostream> #include <algorithm> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" ////////////////////////////// // forward ////////////////////////////// #define IM2COL_BORDER_CONSTANT 0 #define IM2COL_BORDER_REFLECT 1 #define IM2COL_BORDER_REFLECT_101 2 #define IM2COL_BORDER_REPLICATE 3 #define IM2COL_BORDER_WRAP 4 __device__ __forceinline__ bool device_Im2Col_Border(int mode, int &x, int &y, int w, int h) { switch ( mode ) { case IM2COL_BORDER_REFLECT: if ( x < 0 ) { x = -x - 1; } if ( y < 0 ) { y = -y - 1; } if ( x >= w ) { x = (w - 1) - (x - w); } if ( y >= h ) { y = (h - 1) - (y - h); } return true; case IM2COL_BORDER_REFLECT_101: if ( x < 0 ) { x = -x; } if ( y < 0 ) { y = -y; } if ( x >= w ) { x = (w - 2) - (x - w); } if ( y >= h ) { y = (h - 2) - (y - h); } return true; case IM2COL_BORDER_REPLICATE: if ( x < 0 ) { x = 0; } if ( y < 0 ) { y = 0; } if ( x >= w ) { x = w - 1; } if ( y >= h ) { y = h - 1; } return true; case IM2COL_BORDER_WRAP: if ( x < 0 ) { x += w; } if ( y < 0 ) { y += h; } if ( x >= w ) { x -= w; } if ( y >= h ) { y -= h; } return true; } return false; } __global__ void kernal_fp32_Im2Col_Forward( float const *x_buf, float *y_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_stride, int input_w_size, int input_h_size, int output_frame_size, int output_frame_stride, int output_w_size, int output_size, int border_mode, float border_value ) { int filter_w_size = blockDim.y; int filter_h_size = blockDim.z; int output_frame = blockDim.x * blockIdx.x + threadIdx.x; if ( output_frame < output_frame_size ) { int fx = threadIdx.y; int fy = threadIdx.z; int c = blockIdx.y; int input_frame = output_frame / output_size; int f = output_frame % output_size; int iy = (f / output_w_size) * y_stride - y_offset + fy; int ix = (f % output_w_size) * x_stride - x_offset + fx; float x = border_value; if ( iy >= 0 && iy < input_h_size && ix >= 0 && ix < input_w_size ) { int input_node = (c * input_h_size + iy) * input_w_size + ix; x = x_buf[input_node * input_frame_stride + input_frame]; } else { if ( device_Im2Col_Border(border_mode, ix, iy, input_w_size, input_h_size) ) { int input_node = (c * input_h_size + iy) * input_w_size + ix; x = x_buf[input_node * input_frame_stride + input_frame]; } } int output_node = (c * filter_h_size + fy) * filter_w_size + fx; y_buf[output_node * output_frame_stride + output_frame] = x; } } BBCU_DLL_EXPORT int bbcu_fp32_Im2Col_Forward ( float const *dev_x_buf, float *dev_y_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_size, int input_frame_stride, int input_w_size, int input_h_size, int input_c_size, int output_w_size, int output_h_size, int output_frame_stride, int filter_w_size, int filter_h_size, int border_mode, float border_value, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); int output_c_size = input_c_size; int output_size = output_w_size * output_h_size; int output_frame_size = input_frame_size * output_size; int frame_unit = 1024; while ( frame_unit * filter_w_size * filter_h_size > 1024 ) { frame_unit /= 2; } BBCU_ASSERT(frame_unit > 0); dim3 block(frame_unit, filter_w_size, filter_h_size); dim3 grid((output_frame_size + (frame_unit-1))/frame_unit, output_c_size); kernal_fp32_Im2Col_Forward<<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, x_stride, y_stride, x_offset, y_offset, input_frame_stride, input_w_size, input_h_size, output_frame_size, output_frame_stride, output_w_size, output_size, border_mode, border_value ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } __global__ void kernal_bit_Im2Col_Forward( int const *x_buf, int *y_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_stride, int input_w_size, int input_h_size, int output_frame_size, int output_frame_stride, int output_w_size, int output_size, int border_mode ) { int output_frame_unit = blockDim.x * blockIdx.x + threadIdx.x; if ( output_frame_unit < output_frame_stride ) { int filter_w_size = blockDim.y; int filter_h_size = blockDim.z; int fx = threadIdx.y; int fy = threadIdx.z; int c = blockIdx.y; int output_node = (c * filter_h_size + fy) * filter_w_size + fx; int y = 0; for ( int i = 0; i < 32; ++i ) { int output_frame = output_frame_unit * 32 + i; if ( output_frame < output_frame_size ) { int input_frame = output_frame / output_size; int f = output_frame % output_size; int iy = (f / output_w_size) * y_stride - y_offset + fy; int ix = (f % output_w_size) * x_stride - x_offset + fx; if ( iy >= 0 && iy < input_h_size && ix >= 0 && ix < input_w_size ) { int input_node = (c * input_h_size + iy) * input_w_size + ix; int const *x_ptr = &x_buf[input_node * input_frame_stride]; int x = ((x_ptr[input_frame / 32] >> (input_frame % 32)) & 1); y |= (x << i); } else { if ( device_Im2Col_Border(border_mode, ix, iy, input_w_size, input_h_size) ) { int input_node = (c * input_h_size + iy) * input_w_size + ix; int const *x_ptr = &x_buf[input_node * input_frame_stride]; int x = ((x_ptr[input_frame / 32] >> (input_frame % 32)) & 1); y |= (x << i); } } } } int *y_ptr = &y_buf[output_node * output_frame_stride]; y_ptr[output_frame_unit] = y; } } BBCU_DLL_EXPORT int bbcu_bit_Im2Col_Forward ( int const *dev_x_buf, int *dev_y_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_size, int input_frame_stride, int input_w_size, int input_h_size, int input_c_size, int output_w_size, int output_h_size, int output_frame_stride, int filter_w_size, int filter_h_size, int border_mode, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); int output_c_size = input_c_size; int output_size = output_w_size * output_h_size; int output_frame_size = input_frame_size * output_size; int output_frame_unit = (output_frame_size + 31) / 32; int frame_unit = 16; dim3 grid((output_frame_unit + (frame_unit-1))/frame_unit, output_c_size); dim3 block(frame_unit, filter_w_size, filter_h_size); kernal_bit_Im2Col_Forward<<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, x_stride, y_stride, x_offset, y_offset, input_frame_stride, input_w_size, input_h_size, output_frame_size, output_frame_stride, output_w_size, output_size, border_mode ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////////////////// // backward ////////////////////////////// __global__ void kernal_fp32_Im2Col_Backward( float const *dy_buf, float *dx_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_size, int input_frame_stride, int input_w_size, int input_h_size, int input_c_size, int output_frame_size, int output_frame_stride, int output_w_size, int output_h_size, int filter_w_size, int filter_h_size ) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int c = blockDim.z * blockIdx.z + threadIdx.z; if ( x < input_w_size && y < input_h_size && c < input_c_size ) { float const *dy_ptr = &dy_buf[c * filter_h_size * filter_w_size * output_frame_stride]; int iy_limit = (output_h_size - 1) * y_stride; int ix_limit = (output_w_size - 1) * x_stride; int x_align = x % x_stride; int y_align = y % y_stride; for ( int input_frame = 0; input_frame < input_frame_size; ++input_frame ) { float dx = 0; for (int fy = y_align; fy < filter_h_size; fy += y_stride ) { int iy = y - fy + y_offset; if ( iy >= 0 && iy <= iy_limit ) { for (int fx = x_align; fx < filter_w_size; fx += x_stride) { int ix = x - fx + x_offset; if (ix >= 0 && ix <= ix_limit ) { int output_frame = (input_frame * output_h_size + (iy/y_stride)) * output_w_size + (ix/x_stride); int output_node = fy * filter_w_size + fx; dx += dy_ptr[output_node * output_frame_stride + output_frame]; } } } } dx_buf[((c * input_h_size + y) * input_w_size + x) * input_frame_stride + input_frame] = dx; } } } BBCU_DLL_EXPORT int bbcu_fp32_Im2Col_Backward ( float const *dev_dy_buf, float *dev_dx_buf, int x_stride, int y_stride, int x_offset, int y_offset, int input_frame_size, int input_frame_stride, int input_w_size, int input_h_size, int input_c_size, int output_w_size, int output_h_size, int output_frame_stride, int filter_w_size, int filter_h_size, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // int output_c_size = input_c_size; // int output_w_size = input_w_size - filter_w_size + 1; // int output_h_size = input_h_size - filter_h_size + 1; int output_size = output_w_size * output_h_size; int output_frame_size = input_frame_size * output_size; dim3 block(1024, 1, 1); while ( (int)block.x / 2 >= input_w_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= input_h_size ) { block.y /= 2; block.z *= 2; } while ( (int)block.z / 2 >= input_c_size ) { block.z /= 2; } block.z = std::min(64, (int)block.z); dim3 grid; grid.x = (input_w_size + block.x - 1) / block.x; grid.y = (input_h_size + block.y - 1) / block.y; grid.z = (input_c_size + block.z - 1) / block.z; // dim3 grid(input_w_size, input_h_size, 1); // dim3 block(1, 1, input_c_size); kernal_fp32_Im2Col_Backward<<<grid, block, 0, streamId>>>( dev_dy_buf, dev_dx_buf, x_stride, y_stride, x_offset, y_offset, input_frame_size, input_frame_stride, input_w_size, input_h_size, input_c_size, output_frame_size, output_frame_stride, output_w_size, output_h_size, filter_w_size, filter_h_size ); BB_CUDA_CHECK_LAST_ERROR(); return 0; }
the_stack
namespace oneflow { namespace { class LayerNormCudnnBnCtx final { public: LayerNormCudnnBnCtx(const ShapeView& data_shape, const ShapeView& param_shape, DataType data_type) { const int64_t cudnn_c = param_shape.elem_cnt(); CHECK_EQ(data_shape.elem_cnt() % cudnn_c, 0); const int64_t cudnn_w = data_shape.elem_cnt() / cudnn_c; CHECK_LT(cudnn_c, GetMaxVal<int32_t>()); CHECK_LT(cudnn_w, GetMaxVal<int32_t>()); data_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, data_type, 1, static_cast<int32_t>(cudnn_c), 1, static_cast<int32_t>(cudnn_w))); DataType param_dtype = data_type == DataType::kFloat16 ? DataType::kFloat : data_type; param_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, param_dtype, 1, static_cast<int32_t>(cudnn_c), 1, 1)); #if (CUDNN_VERSION >= 7000) mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif } ~LayerNormCudnnBnCtx() = default; const cudnnTensorDescriptor_t& data_tensor_desc() const { return data_tensor_desc_->Get(); } const cudnnTensorDescriptor_t& param_tensor_desc() const { return param_tensor_desc_->Get(); } cudnnBatchNormMode_t mode() const { return mode_; }; private: std::unique_ptr<CudnnTensorDesc> data_tensor_desc_; std::unique_ptr<CudnnTensorDesc> param_tensor_desc_; cudnnBatchNormMode_t mode_; }; template<typename T, bool do_scale, bool do_center> __global__ void InstanceScaleCenterGpu(const int64_t elem_cnt, const int64_t instance_size, const T* in, const T* gamma, const T* beta, T* out) { CUDA_1D_KERNEL_LOOP_T(int64_t, i, elem_cnt) { const int64_t elem_id = i % instance_size; T v = in[i]; if (do_scale) { v *= gamma[elem_id]; } if (do_center) { v += beta[elem_id]; } out[i] = v; } } template<bool do_scale, bool do_center> __global__ void InstanceScaleCenterH2Gpu(const int64_t h2_elem_cnt, const int64_t h2_instance_size, const half* in, const half* gamma, const half* beta, half* out) { const auto* in_h2 = reinterpret_cast<const half2*>(in); const auto* gamma_h2 = reinterpret_cast<const half2*>(gamma); const auto* beta_h2 = reinterpret_cast<const half2*>(beta); auto* out_h2 = reinterpret_cast<half2*>(out); CUDA_1D_KERNEL_LOOP_T(int64_t, i, h2_elem_cnt) { const int64_t elem_id = i % h2_instance_size; half2 v2 = in_h2[i]; if (do_scale) { v2 = __hmul2(v2, gamma_h2[elem_id]); } if (do_center) { v2 = __hadd2(v2, beta_h2[elem_id]); } out_h2[i] = v2; } } template<typename T> void InstanceScaleCenter(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size, const T* in, const T* gamma, const T* beta, T* out) { const int64_t elem_cnt = batch_size * instance_size; if (beta != nullptr && gamma != nullptr) { // scale and center InstanceScaleCenterGpu<T, true, true> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, instance_size, in, gamma, beta, out); } else if (gamma != nullptr) { // scale only InstanceScaleCenterGpu<T, true, false> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, instance_size, in, gamma, nullptr, out); } else if (beta != nullptr) { // center only InstanceScaleCenterGpu<T, false, true> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, instance_size, in, nullptr, beta, out); } else { UNIMPLEMENTED(); } } void InstanceScaleCenterH2(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size, const half* in, const half* gamma, const half* beta, half* out) { CHECK_EQ(instance_size % 2, 0); const int64_t elem_cnt_h2 = batch_size * instance_size / 2; const int64_t instance_size_h2 = instance_size / 2; if (beta != nullptr && gamma != nullptr) { // scale and center InstanceScaleCenterH2Gpu<true, true> <<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt_h2, instance_size_h2, in, gamma, beta, out); } else if (gamma != nullptr) { // scale only InstanceScaleCenterH2Gpu<true, false> <<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt_h2, instance_size_h2, in, gamma, nullptr, out); } else if (beta != nullptr) { // center only InstanceScaleCenterH2Gpu<false, true> <<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt_h2, instance_size_h2, in, nullptr, beta, out); } else { UNIMPLEMENTED(); } } template<> void InstanceScaleCenter<float16>(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size, const float16* in, const float16* gamma, const float16* beta, float16* out) { if (instance_size % 2 == 0) { InstanceScaleCenterH2(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out)); } else { InstanceScaleCenter<half>(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out)); } } constexpr int64_t kLayerNormForwardGpuBlockSize = 256; template<typename T> struct LayerNormUtil { using ComputeType = T; __device__ static ComputeType ToComputeType(T v) { return v; } __device__ static T FromComputeType(ComputeType v) { return v; } }; template<> struct LayerNormUtil<half> { using ComputeType = float; __device__ static ComputeType ToComputeType(half v) { return __half2float(v); } __device__ static half FromComputeType(ComputeType v) { return __float2half(v); } }; template<typename T> int GetForwardDynamicSharedMemorySize(const int norm_size) { return norm_size * sizeof(typename LayerNormUtil<T>::ComputeType); } int GetLayerNormForwardBlockSize() { return kLayerNormForwardGpuBlockSize; } int GetLayerNormForwardNumBlocks(const int num_instances) { return std::min(static_cast<int>(num_instances), kCudaMaxBlocksNum); } template<typename T, typename ComputeType> __global__ void LayerNormForwardImpl(const int num_instances, const int norm_size, const double epsilon, const T* x, const T* gamma, const T* beta, ComputeType* mean, ComputeType* inv_variance, T* normalized, T* y) { using LU = LayerNormUtil<T>; extern __shared__ __align__(sizeof(double)) unsigned char fw_shared_buf[]; auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf); __shared__ ComputeType row_mean_shared; __shared__ ComputeType row_inv_var_shared; typedef cub::BlockReduce<ComputeType, kLayerNormForwardGpuBlockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage cub_mean_reduce_tmp_storage; __shared__ typename BlockReduce::TempStorage cub_variance_reduce_tmp_storage; ComputeType inv_norm_size = static_cast<ComputeType>(1.0) / static_cast<ComputeType>(norm_size); for (int row = blockIdx.x; row < num_instances; row += gridDim.x) { const int row_offset = row * norm_size; const T* x_row = x + row_offset; ComputeType thread_sum = 0; ComputeType thread_square_sum = 0; const int tid = threadIdx.x; for (int col = tid; col < norm_size; col += blockDim.x) { const ComputeType val = LU::ToComputeType(x_row[col]); compute_buf[col] = val; thread_sum += val; thread_square_sum += val * val; } __syncthreads(); ComputeType block_sum = BlockReduce(cub_mean_reduce_tmp_storage).Reduce(thread_sum, cub::Sum()); ComputeType block_square_sum = BlockReduce(cub_variance_reduce_tmp_storage).Reduce(thread_square_sum, cub::Sum()); if (tid == 0) { ComputeType row_mean = block_sum * inv_norm_size; row_mean_shared = row_mean; mean[row] = row_mean; ComputeType row_variance = max(block_square_sum * inv_norm_size - row_mean * row_mean, static_cast<ComputeType>(0)); ComputeType row_inv_var = rsqrt(row_variance + static_cast<ComputeType>(epsilon)); row_inv_var_shared = row_inv_var; inv_variance[row] = row_inv_var; } __syncthreads(); ComputeType mean = row_mean_shared; ComputeType inv_var = row_inv_var_shared; for (int col = threadIdx.x; col < norm_size; col += blockDim.x) { int offset = row_offset + col; ComputeType val = compute_buf[col]; val = (val - mean) * inv_var; if (gamma != nullptr || beta != nullptr) { int elem_id = col; if (gamma != nullptr) { normalized[offset] = LU::FromComputeType(val); val *= LU::ToComputeType(gamma[elem_id]); } if (beta != nullptr) { val += LU::ToComputeType(beta[elem_id]); } } y[offset] = LU::FromComputeType(val); } } } template<typename T> void LayerNormForwardGpu(DeviceCtx* ctx, const int num_instances, const int norm_size, const double epsilon, const T* x_ptr, const T* gamma_ptr, const T* beta_ptr, T* normalized_ptr, T* y_ptr, user_op::Tensor* mean, user_op::Tensor* inv_variance) { LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType> <<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size), ctx->cuda_stream()>>>( num_instances, norm_size, epsilon, x_ptr, gamma_ptr, beta_ptr, mean->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), inv_variance->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), normalized_ptr, y_ptr); } template<> void LayerNormForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int norm_size, const double epsilon, const float16* x_ptr, const float16* gamma_ptr, const float16* beta_ptr, float16* normalized_ptr, float16* y_ptr, user_op::Tensor* mean, user_op::Tensor* inv_variance) { LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType> <<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size), ctx->cuda_stream()>>>( num_instances, norm_size, epsilon, reinterpret_cast<const half*>(x_ptr), reinterpret_cast<const half*>(gamma_ptr), reinterpret_cast<const half*>(beta_ptr), mean->mut_dptr<typename LayerNormUtil<half>::ComputeType>(), inv_variance->mut_dptr<typename LayerNormUtil<half>::ComputeType>(), reinterpret_cast<half*>(normalized_ptr), reinterpret_cast<half*>(y_ptr)); } int GetForwardFusedKernelMinNormSize() { return 64; } template<typename T> int GetForwardFusedKernelMaxActiveBlocks(const int32_t norm_size) { int max_active_blocks; OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>, GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size))); return max_active_blocks; } template<> int GetForwardFusedKernelMaxActiveBlocks<float16>(const int32_t norm_size) { int max_active_blocks; OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>, GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size))); return max_active_blocks; } template<typename T> bool IsForwardFusedKernelSupported(const int32_t norm_size, const int32_t instance_size) { if (norm_size >= GetForwardFusedKernelMinNormSize() && norm_size % 32 == 0 && GetForwardFusedKernelMaxActiveBlocks<T>(norm_size) > 0 && (instance_size == 0 || norm_size == instance_size)) { return true; } else { return false; } } constexpr int64_t kLayerNormParamGradGpuBlockSize = 512; int64_t GetLayerNormParamGradBlockSize() { return kLayerNormParamGradGpuBlockSize; } int64_t GetLayerNormParamGradNumBlocks(const int64_t elem_cnt) { return std::min(static_cast<int>((elem_cnt + kLayerNormParamGradGpuBlockSize - 1) / kLayerNormParamGradGpuBlockSize), 256); } template<typename T> int64_t GetParamGradDynamicSharedMemorySize(const int64_t instance_size) { return 2 * instance_size * sizeof(T); } template<> int64_t GetParamGradDynamicSharedMemorySize<float16>(const int64_t instance_size) { return 2 * instance_size * sizeof(float); } template<typename T, typename I> __global__ void LayerNormParamGradImpl(const I n, const I instance_size, const T* dy, const T* normalized, const T* gamma, T* gamma_diff, T* beta_diff, T* normalized_diff) { extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[]; auto* gamma_diff_sum_buf = reinterpret_cast<T*>(bw_shared_buf); auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size; const I tid = threadIdx.x; for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) { gamma_diff_sum_buf[elem_id] = 0; beta_diff_sum_buf[elem_id] = 0; } __syncthreads(); CUDA_1D_KERNEL_LOOP_T(I, i, n) { const I elem_id = i % instance_size; T dy_val = dy[i]; T normalized_val = normalized[i]; cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], dy_val * normalized_val); cuda::atomic::Add(&beta_diff_sum_buf[elem_id], dy_val); T gamma_val = gamma[elem_id]; normalized_diff[i] = gamma_val * dy_val; } __syncthreads(); for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) { cuda::atomic::Add(gamma_diff + elem_id, gamma_diff_sum_buf[elem_id]); cuda::atomic::Add(beta_diff + elem_id, beta_diff_sum_buf[elem_id]); } } template<typename I> __global__ void LayerNormParamGradHalfImpl(const I n, const I instance_size, const half* dy, const half* normalized, const half* gamma, half* tmp_gamma_diff, half* tmp_beta_diff, half* normalized_diff) { extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[]; auto* gamma_diff_sum_buf = reinterpret_cast<float*>(bw_shared_buf); auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size; const I tid = threadIdx.x; for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) { gamma_diff_sum_buf[elem_id] = 0; beta_diff_sum_buf[elem_id] = 0; } __syncthreads(); CUDA_1D_KERNEL_LOOP_T(I, i, n) { const I elem_id = i % instance_size; half dy_val = dy[i]; half normalized_val = normalized[i]; cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], __half2float(dy_val) * __half2float(normalized_val)); cuda::atomic::Add(&beta_diff_sum_buf[elem_id], __half2float(dy_val)); half gamma_val = gamma[elem_id]; normalized_diff[i] = __hmul(gamma_val, dy_val); } __syncthreads(); for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) { const I offset = blockIdx.x * instance_size + elem_id; tmp_gamma_diff[offset] = __float2half(gamma_diff_sum_buf[elem_id]); tmp_beta_diff[offset] = __float2half(beta_diff_sum_buf[elem_id]); } } } // namespace template<typename T, typename BNParamT> class LayerNormGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: LayerNormGpuKernel() = default; ~LayerNormGpuKernel() = default; private: using user_op::OpKernel::Compute; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); const bool scale = ctx->Attr<bool>("scale"); const bool center = ctx->Attr<bool>("center"); user_op::Tensor* normalized = scale ? ctx->Tensor4ArgNameAndIndex("normalized", 0) : y; const double epsilon = ctx->Attr<double>("epsilon"); CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON); const int32_t num_instances = mean->shape().elem_cnt(); const int32_t norm_size = x->shape().elem_cnt() / num_instances; int32_t instance_size = 0; const T* gamma_ptr = nullptr; const T* beta_ptr = nullptr; if (scale || center) { if (scale) { const user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); instance_size = gamma->shape().elem_cnt(); gamma_ptr = gamma->dptr<T>(); } if (center) { const user_op::Tensor* beta = ctx->Tensor4ArgNameAndIndex("beta", 0); if (gamma_ptr) { CHECK_EQ(beta->shape().elem_cnt(), instance_size); } else { instance_size = beta->shape().elem_cnt(); } beta_ptr = beta->dptr<T>(); } CHECK_EQ(y->shape().elem_cnt() % instance_size, 0); } if (IsForwardFusedKernelSupported<T>(norm_size, instance_size)) { LayerNormForwardGpu<T>(ctx->device_ctx(), num_instances, norm_size, epsilon, x->dptr<T>(), gamma_ptr, beta_ptr, normalized->mut_dptr<T>(), y->mut_dptr<T>(), mean, inv_variance); } else { LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type()); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const size_t aligned_buffer_size = GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type())); char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>(); char* cudnn_bn_bias_zeros_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size; NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(), static_cast<BNParamT>(1), reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr)); NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(), static_cast<BNParamT>(0), reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr)); OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining( ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(), normalized->mut_dptr<T>(), bn_ctx.param_tensor_desc(), reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr), reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr), 1.0, nullptr, nullptr, epsilon, mean->mut_dptr(), inv_variance->mut_dptr())); if (scale || center) { const int64_t batch_size = y->shape().elem_cnt() / instance_size; InstanceScaleCenter<T>(ctx->device_ctx(), batch_size, instance_size, normalized->dptr<T>(), gamma_ptr, beta_ptr, y->mut_dptr<T>()); } } }; }; #define REGISTER_LAYER_NORM_GPU_KERNEL(dtype, bn_param_dtype) \ REGISTER_USER_KERNEL("layer_norm") \ .SetCreateFn<LayerNormGpuKernel<dtype, bn_param_dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \ user_op::TensorDesc* mean = ctx->OutputTensorDesc("mean", 0); \ const DataType& data_type = mean->data_type(); \ const int64_t elem_cnt = mean->shape().elem_cnt(); \ return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 2; \ }); REGISTER_LAYER_NORM_GPU_KERNEL(float, float) REGISTER_LAYER_NORM_GPU_KERNEL(double, double) REGISTER_LAYER_NORM_GPU_KERNEL(float16, float) template<typename T, typename BNParamT> class LayerNormGradGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: LayerNormGradGpuKernel() = default; ~LayerNormGradGpuKernel() = default; private: using user_op::OpKernel::Compute; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0); const user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0); user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const size_t aligned_buffer_size = GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type())); char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>(); char* cudnn_bn_scale_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size; char* cudnn_bn_bias_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size; NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(), static_cast<BNParamT>(1), reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr)); const void* sp_alpha = CudnnSPOnePtr<T>(); const void* sp_beta; if (ctx->has_input("_add_to_output", 0)) { const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0); CHECK_EQ(add_to_output->data_type(), dx->data_type()); CHECK_EQ(add_to_output->shape(), dx->shape()); Memcpy<DeviceType::kGPU>( ctx->device_ctx(), dx->mut_dptr<void>(), add_to_output->dptr<void>(), add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type())); sp_beta = CudnnSPOnePtr<T>(); } else { sp_beta = CudnnSPZeroPtr<T>(); } const double epsilon = ctx->Attr<double>("epsilon"); CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON); LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type()); OF_CUDNN_CHECK(cudnnBatchNormalizationBackward( ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), sp_alpha, sp_beta, CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(), dy->dptr<T>(), bn_ctx.data_tensor_desc(), dx->mut_dptr<T>(), bn_ctx.param_tensor_desc(), reinterpret_cast<const BNParamT*>(cudnn_bn_scale_ones_dptr), reinterpret_cast<BNParamT*>(cudnn_bn_scale_diff_buf_dptr), reinterpret_cast<BNParamT*>(cudnn_bn_bias_diff_buf_dptr), epsilon, mean->dptr(), inv_variance->dptr())); }; }; #define REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(dtype, bn_param_dtype) \ REGISTER_USER_KERNEL("layer_norm_grad") \ .SetCreateFn<LayerNormGradGpuKernel<dtype, bn_param_dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \ const user_op::TensorDesc& mean = ctx->InputTensorDesc("mean", 0); \ const DataType& data_type = mean.data_type(); \ const int64_t elem_cnt = mean.shape().elem_cnt(); \ return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 3; \ }) \ .SetInplaceProposalFn([](const user_op::InferContext& ctx, \ user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \ if (ctx.has_input("_add_to_output", 0)) { \ OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "_add_to_output", 0, true)); \ } \ return Maybe<void>::Ok(); \ }); REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float, float) REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(double, double) REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float16, float) template<typename T> class LayerNormParamGradGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: LayerNormParamGradGpuKernel() = default; ~LayerNormParamGradGpuKernel() = default; private: using user_op::OpKernel::Compute; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } void Compute(user_op::KernelComputeContext* ctx) const override { using NdUtil = NdarrayUtil<DeviceType::kGPU, T>; auto Val = NdUtil::GetValNdarrayBuilder(); auto Var = NdUtil::GetVarNdarrayBuilder(); const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0); user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const bool has_beta_diff = beta_diff != nullptr; const bool has_gamma_diff = gamma_diff != nullptr; const bool has_normalized_diff = normalized_diff != nullptr; const bool has_gamma = gamma != nullptr; const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis"); const int64_t elem_cnt = dy->shape().elem_cnt(); const int64_t m = dy->shape().Count(begin_params_axis); int max_active_blocks; OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, LayerNormParamGradImpl<T, int64_t>, GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<T>(m))); if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) { const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0); Memset<DeviceType::kGPU>(ctx->device_ctx(), gamma_diff->mut_dptr<T>(), 0, gamma_diff->shape().elem_cnt() * sizeof(T)); Memset<DeviceType::kGPU>(ctx->device_ctx(), beta_diff->mut_dptr<T>(), 0, beta_diff->shape().elem_cnt() * sizeof(T)); if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) { LayerNormParamGradImpl<T, int64_t> <<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>( elem_cnt, m, dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>()); } else { LayerNormParamGradImpl<T, int32_t> <<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>( static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>()); } } else { if (has_beta_diff) { user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0); CHECK_EQ(m, beta_diff->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<T>()), Val({n, m}, dy->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>())); } if (has_gamma_diff) { const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0); user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0); CHECK_EQ(m, gamma_diff->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<T>()), Val({n, m}, normalized->dptr<T>()), Val({n, m}, dy->dptr<T>())); NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<T>()), Val({n, m}, reduce_buf->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>())); } if (has_normalized_diff) { if (has_gamma) { CHECK_EQ(m, gamma->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<T>()), Val({n, m}, dy->dptr<T>()), Val({1, m}, gamma->dptr<T>())); } else { Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(), dy->dptr<void>(), dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); } } } }; }; #define REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("layer_norm_param_grad") \ .SetCreateFn<LayerNormParamGradGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)); REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(float) REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(double) class LayerNormParamGradGpuHalfKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: LayerNormParamGradGpuHalfKernel() = default; ~LayerNormParamGradGpuHalfKernel() = default; private: using user_op::OpKernel::Compute; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } void Compute(user_op::KernelComputeContext* ctx) const override { using NdUtil = NdarrayUtil<DeviceType::kGPU, float16>; auto Val = NdUtil::GetValNdarrayBuilder(); auto Var = NdUtil::GetVarNdarrayBuilder(); const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0); user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0); user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0); user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0); const bool has_beta_diff = beta_diff != nullptr; const bool has_gamma_diff = gamma_diff != nullptr; const bool has_normalized_diff = normalized_diff != nullptr; const bool has_gamma = gamma != nullptr; const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis"); const int64_t elem_cnt = dy->shape().elem_cnt(); const int64_t m = dy->shape().Count(begin_params_axis); int max_active_blocks; OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &max_active_blocks, LayerNormParamGradHalfImpl<int64_t>, GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<float16>(m))); if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) { const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const int64_t num_blocks = GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt()); const size_t tmp_diff_size = GetCudaAlignedSize(num_blocks * m * sizeof(float16)); float16* tmp_gamma_diff = tmp_buffer->mut_dptr<float16>(); float16* tmp_beta_diff = reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + tmp_diff_size); float16* tmp_reduce_buf = reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + 2 * tmp_diff_size); CHECK_GE(tmp_buffer->shape().elem_cnt(), 3 * tmp_diff_size); if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) { LayerNormParamGradHalfImpl<int64_t> <<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>( elem_cnt, m, dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(), reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff), normalized_diff->mut_dptr<half>()); } else { LayerNormParamGradHalfImpl<int32_t> <<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(), GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>( static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(), reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff), normalized_diff->mut_dptr<half>()); } NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()), Val({num_blocks, m}, tmp_gamma_diff), Var({num_blocks, m}, tmp_reduce_buf)); NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()), Val({num_blocks, m}, tmp_beta_diff), Var({num_blocks, m}, tmp_reduce_buf)); } else { if (has_beta_diff) { user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0); CHECK_EQ(m, beta_diff->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()), Val({n, m}, dy->dptr<float16>()), Var({n, m}, reduce_buf->mut_dptr<float16>())); } if (has_gamma_diff) { const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0); user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0); CHECK_EQ(m, gamma_diff->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<float16>()), Val({n, m}, normalized->dptr<float16>()), Val({n, m}, dy->dptr<float16>())); NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()), Val({n, m}, reduce_buf->dptr<float16>()), Var({n, m}, reduce_buf->mut_dptr<float16>())); } if (has_normalized_diff) { if (has_gamma) { CHECK_EQ(m, gamma->shape().elem_cnt()); CHECK_EQ(dy->shape().elem_cnt() % m, 0); const int64_t n = dy->shape().elem_cnt() / m; NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<float16>()), Val({n, m}, dy->dptr<float16>()), Val({1, m}, gamma->dptr<float16>())); } else { Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(), dy->dptr<void>(), dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type())); } } } } }; REGISTER_USER_KERNEL("layer_norm_param_grad") .SetCreateFn<LayerNormParamGradGpuHalfKernel>() .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") & (user_op::HobDataType("dy", 0) == DataType::kFloat16)) .SetInferTmpSizeFn([](user_op::InferContext* ctx) { const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis"); const bool has_gamma_diff = ctx->has_output("gamma_diff", 0); const bool has_beta_diff = ctx->has_output("beta_diff", 0); const bool has_normalized_diff = ctx->has_output("normalized_diff", 0); const auto& dy = ctx->InputTensorDesc("dy", 0); const int64_t instance_size = dy.shape().Count(begin_params_axis); size_t tmp_buffer_size = 0; if (has_gamma_diff && has_beta_diff && has_normalized_diff) { const size_t tmp_gamma_diff = GetCudaAlignedSize(GetLayerNormParamGradNumBlocks(dy.shape().elem_cnt()) * instance_size * sizeof(float16)); const size_t tmp_beta_diff = tmp_gamma_diff; const size_t tmp_reduce_buf = tmp_gamma_diff; tmp_buffer_size = tmp_gamma_diff + tmp_beta_diff + tmp_reduce_buf; } else { tmp_buffer_size = 0; } return tmp_buffer_size; }); } // namespace oneflow
the_stack
// TODO: Have 2 groups of 16 threads collaborate // TODO: Add support for outside diagonal // TODO: Add support for unsorted rows #define EXPERIMENTAL_LU_FACTORS #define EXPERIMENTAL_LU_FORWARD #define EXPERIMENTAL_LU_BACKWARD using namespace std; namespace amgx { namespace multicolor_ilu_solver { // ----------- // Kernels // ----------- #ifdef EXPERIMENTAL_LU_FORWARD template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int CtaSize, int bsize, bool ROW_MAJOR, bool hasDiag> __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CtaSize, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CtaSize, 16 ) #endif void LU_forward_4x4_kernel_warp( const IndexType *LU_row_offsets, const IndexType *LU_smaller_color_offsets, const IndexType *LU_column_indices, const ValueTypeA *LU_nonzero_values, const IndexType *A_row_offsets, const IndexType *A_column_indices, const ValueTypeA *A_nonzero_values, const IndexType *A_dia_indices, const ValueTypeB *x, const ValueTypeB *b, ValueTypeB *delta, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, bool xIsZero ) { const int nHalfWarps = CtaSize / 16; // Number of half warps per Cta const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); const int halfWarpId = threadIdx.x / 16; const int halfLaneId = threadIdx.x % 16; const int halfLaneId_div_4 = halfLaneId / 4; const int halfLaneId_mod_4 = halfLaneId % 4; const int upperHalf = 16 * (laneId / 16); // Shared memory needed to exchange X and delta. __shared__ volatile ValueTypeB s_mem[CtaSize]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId]; // Iterate over the rows of the matrix. One warp per row. for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps ) { int aRowId = sorted_rows_by_color[aRowIt]; // Load one block of B. ValueTypeB my_bmAx(0); unsigned int active_mask = utils::activemask(); if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * aRowId + halfLaneId_div_4]); } } else { if ( halfLaneId_div_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * aRowId + halfLaneId_mod_4]); } } // Don't do anything if X is zero. if ( !xIsZero ) { int aColBegin = A_row_offsets[aRowId ]; int aColEnd = A_row_offsets[aRowId + 1]; int aColMax = aColEnd; if ( hasDiag ) { ++aColMax; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( aColBegin < aColMax, active_mask ) ; aColBegin += 16 ) { int aColIt = aColBegin + halfLaneId; // Get the ID of the column. int aColId = -1; if ( aColIt < aColEnd ) { aColId = A_column_indices[aColIt]; } if ( hasDiag && aColIt == aColEnd ) { aColId = aRowId; } // Count the number of active columns. int vote = utils::ballot(aColId != -1, active_mask); // The number of iterations. int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < nCols ; k += 4 ) { int my_k = k + halfLaneId_div_4; // Load 8 blocks of X. int waColId = utils::shfl( aColId, upperHalf + my_k, warpSize, active_mask ); ValueTypeB my_x(0); if ( waColId != -1 ) { my_x = __cachingLoad(&x[4 * waColId + halfLaneId_mod_4]); } my_s_mem[halfLaneId] = my_x; // Load 8 blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { int w_aColTmp = aColBegin + k + i, w_aColIt = -1; if ( w_aColTmp < aColEnd ) { w_aColIt = w_aColTmp; } if ( hasDiag && w_aColTmp == aColEnd ) { w_aColIt = A_dia_indices[aRowId]; } ValueTypeA my_val(0); if ( w_aColIt != -1 ) { my_val = A_nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_mod_4]; } else { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_div_4]; } } } // Loop over k } // Loop over aColIt } // if xIsZero // Contribution from each nonzero column that has color less than yours if ( current_color != 0 ) { // TODO: Use constant or texture here int aColBegin = LU_row_offsets[aRowId]; int aColEnd = LU_smaller_color_offsets[aRowId]; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( aColBegin < aColEnd, active_mask ) ; aColBegin += 16 ) { int aColIt = aColBegin + halfLaneId; int aColId = -1; if ( aColIt < aColEnd ) { aColId = LU_column_indices[aColIt]; } // Count the number of active columns. int vote = utils::ballot(aColId != -1, active_mask); // The number of iterations. int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); for ( int k = 0 ; k < nCols ; k += 4 ) { int my_k = k + halfLaneId_div_4; // Load 8 blocks of X. int waColId = utils::shfl( aColId, upperHalf + my_k, warpSize, active_mask ); ValueTypeB my_delta(0); if ( waColId != -1 ) { my_delta = delta[4 * waColId + halfLaneId_mod_4]; } my_s_mem[halfLaneId] = my_delta; utils::syncwarp(); // making sure smem write propagated // Update b-Ax. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { int w_aColTmp = aColBegin + k + i, w_aColIt = -1; if ( w_aColTmp < aColEnd ) { w_aColIt = w_aColTmp; } ValueTypeA my_val(0); if ( w_aColIt != -1 ) { my_val = LU_nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_mod_4]; } else { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_div_4]; } } } // Loop over k } // Loop over aColIt } // If current_color != 0 // Reduce bmAx terms. if ( ROW_MAJOR ) { my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask ); } else { my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask ); } // Store the results. if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { delta[4 * aRowId + halfLaneId_div_4] = my_bmAx; } } else { if ( halfLaneId_div_4 == 0 ) { delta[4 * aRowId + halfLaneId_mod_4] = my_bmAx; } } } } #else template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, bool ROW_MAJOR> __global__ void LU_forward_4x4_kernel(const IndexType *LU_row_offsets, const IndexType *LU_smaller_color_offsets, const IndexType *LU_column_indices, const ValueTypeA *LU_nonzero_values, const IndexType *A_row_offsets, const IndexType *A_column_indices, const ValueTypeA *A_nonzero_values, const ValueTypeB *x, const ValueTypeB *b, ValueTypeB *delta, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, bool xIsZero) { int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x & 31; // padding row blocks to fit in a single warp if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; } // new thread id with padding int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id; // Here we use one thread per row (not block row) int cta_blockrow_id = (tid) / bsize; int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id; const int vec_entry_index = tid - cta_blockrow_id * bsize; volatile __shared__ ValueTypeB s_delta_temp[ bsize * blockrows_per_cta]; int offset, s_offset, i; ValueTypeB bmAx, temp[bsize]; while (blockrow_id < num_rows_per_color && cta_blockrow_id < blockrows_per_cta) { i = sorted_rows_by_color[blockrow_id]; // Load RHS and x offset = i * bsize + vec_entry_index; bmAx = b[offset]; if (!xIsZero) { int jmin = A_row_offsets[i]; int jmax = A_row_offsets[i + 1]; //TODO: Assumes inside diagonal for (int jind = jmin; jind < jmax; jind++) { IndexType jcol = A_column_indices[jind]; offset = jcol * bsize + vec_entry_index; s_delta_temp[tid] = x[offset]; // Load nonzero_values if (ROW_MAJOR) { offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(A_nonzero_values + offset, temp); } else { offset = jind * bsize * bsize + vec_entry_index; #pragma unroll for (int m = 0; m < bsize; m++) { temp[m] = A_nonzero_values[offset + bsize * m]; } } // Do matrix multiply s_offset = cta_blockrow_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx -= temp[m] * s_delta_temp[s_offset++]; } } } // Contribution from each nonzero column that has color less than yours if (current_color != 0) { int jmin = LU_row_offsets[i]; int jmax = LU_smaller_color_offsets[i]; for (int jind = jmin; jind < jmax; jind++) { IndexType jcol = LU_column_indices[jind]; offset = jcol * bsize + vec_entry_index; s_delta_temp[tid] = ld_cg(delta + offset); // Load nonzero_values if (ROW_MAJOR) { offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(LU_nonzero_values + offset, temp); } else { offset = jind * bsize * bsize + vec_entry_index; #pragma unroll for (int m = 0; m < bsize; m++) { temp[m] = LU_nonzero_values[offset + bsize * m]; } } // Do matrix multiply s_offset = cta_blockrow_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx -= temp[m] * s_delta_temp[s_offset++]; } } } delta[i * bsize + vec_entry_index] = bmAx; blockrow_id += blockrows_per_cta * gridDim.x; } } #endif #ifdef EXPERIMENTAL_LU_BACKWARD template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int CtaSize, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CtaSize, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CtaSize, 16 ) #endif void LU_backward_4x4_kernel_warp( const IndexType *row_offsets, const IndexType *larger_color_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeB *delta, ValueTypeB *Delta, ValueTypeB *x, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int num_colors, const ValueTypeB weight, bool xIsZero ) { const int nHalfWarps = CtaSize / 16; // Number of half warps per CTA. const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); const int halfWarpId = threadIdx.x / 16; const int halfLaneId = threadIdx.x % 16; const int halfLaneId_div_4 = halfLaneId / 4; const int halfLaneId_mod_4 = halfLaneId % 4; const int upperHalf = 16 * (laneId / 16); // Shared memory needed to exchange X and delta. __shared__ volatile ValueTypeB s_mem[CtaSize]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId]; // Iterate over the rows of the matrix. One warp per two rows. for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps ) { int aRowId = sorted_rows_by_color[aRowIt]; unsigned int active_mask = utils::activemask(); // Load one block of B. ValueTypeB my_bmAx(0); if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { my_bmAx = delta[4 * aRowId + halfLaneId_div_4]; } } else { if ( halfLaneId_div_4 == 0 ) { my_bmAx = delta[4 * aRowId + halfLaneId_mod_4]; } } // Don't do anything if the color is not the interesting one. if ( current_color != num_colors - 1 ) { // The range of the rows. int aColBegin = larger_color_offsets[aRowId], aColEnd = row_offsets[aRowId + 1]; // Each warp load column indices of 16 nonzero blocks for ( ; utils::any( aColBegin < aColEnd, active_mask ) ; aColBegin += 16 ) { int aColIt = aColBegin + halfLaneId; // Get the ID of the column. int aColId = -1; if ( aColIt < aColEnd ) { aColId = column_indices[aColIt]; } // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + halfLaneId_div_4; // Exchange column indices. int waColId = utils::shfl( aColId, upperHalf + my_k, warpSize, active_mask ); // Load 8 blocks of X if needed. ValueTypeB *my_ptr = Delta; if ( xIsZero ) { my_ptr = x; } ValueTypeB my_x(0); if ( waColId != -1 ) { my_x = my_ptr[4 * waColId + halfLaneId_mod_4]; } my_s_mem[halfLaneId] = my_x; utils::syncwarp(); // Load 8 blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { int w_aColTmp = aColBegin + k + i, w_aColIt = -1; if ( w_aColTmp < aColEnd ) { w_aColIt = w_aColTmp; } ValueTypeA my_val(0); if ( w_aColIt != -1 ) { my_val = nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_mod_4]; } else { my_bmAx -= my_val * my_s_mem[4 * i + halfLaneId_div_4]; } } } // Loop over k } // Loop over aColIt // Reduce bmAx terms. if ( ROW_MAJOR ) { my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask ); } else { my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask ); } } // if current_color != num_colors-1 // Update the shared terms. if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { my_s_mem[halfLaneId_div_4] = my_bmAx; } } else { if ( halfLaneId_div_4 == 0 ) { my_s_mem[halfLaneId_mod_4] = my_bmAx; } } // Update the diagonal term. int w_aColIt = dia_indices[aRowId]; ValueTypeA my_val(0); utils::syncwarp(); if ( w_aColIt != -1 ) { my_val = nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_bmAx = my_val * my_s_mem[halfLaneId_mod_4]; } else { my_bmAx = my_val * my_s_mem[halfLaneId_div_4]; } // Regroup results. if ( ROW_MAJOR ) { my_bmAx += utils::shfl_xor( my_bmAx, 1 ); my_bmAx += utils::shfl_xor( my_bmAx, 2 ); } else { my_bmAx += utils::shfl_xor( my_bmAx, 4 ); my_bmAx += utils::shfl_xor( my_bmAx, 8 ); } // Store the results. if ( ROW_MAJOR ) { ValueTypeB my_x(0); if ( !xIsZero && halfLaneId_mod_4 == 0 ) { my_x = x[4 * aRowId + halfLaneId_div_4]; } my_x += weight * my_bmAx; if ( !xIsZero && halfLaneId_mod_4 == 0 ) { Delta[4 * aRowId + halfLaneId_div_4] = my_bmAx; } if ( halfLaneId_mod_4 == 0 ) { x[4 * aRowId + halfLaneId_div_4] = my_x; } } else { ValueTypeB my_x(0); if ( !xIsZero && halfLaneId_div_4 == 0 ) { my_x = x[4 * aRowId + halfLaneId_mod_4]; } my_x += weight * my_bmAx; if ( !xIsZero && halfLaneId_div_4 == 0 ) { Delta[4 * aRowId + halfLaneId_mod_4] = my_bmAx; } if ( halfLaneId_div_4 == 0 ) { x[4 * aRowId + halfLaneId_mod_4] = my_x; } } } } #else template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, bool ROW_MAJOR> __global__ void LU_backward_4x4_kernel(const IndexType *row_offsets, const IndexType *larger_color_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeB *delta, ValueTypeB *Delta, ValueTypeB *x, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int num_colors, const ValueTypeB weight, bool xIsZero) { int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x & 31; // padding row blocks to fit in a single warp if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; } // new thread id with padding int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id; // Here we use one thread per row (not block row) int cta_blockrow_id = (tid) / bsize; int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id; const int vec_entry_index = tid - cta_blockrow_id * bsize; volatile __shared__ ValueTypeB s_x_temp[ bsize * blockrows_per_cta]; int offset, s_offset, i; ValueTypeB bmAx, temp[bsize]; while (blockrow_id < num_rows_per_color && cta_blockrow_id < blockrows_per_cta) { i = sorted_rows_by_color[blockrow_id]; // Load RHS and x offset = i * bsize + vec_entry_index; bmAx = delta[offset]; // Contribution from each nonzero column that has color less than yours if (current_color != num_colors) { int jmin = larger_color_offsets[i]; int jmax = row_offsets[i + 1]; for (int jind = jmin; jind < jmax; jind++) { IndexType jcol = column_indices[jind]; offset = jcol * bsize + vec_entry_index; if (xIsZero) { s_x_temp[tid] = ld_cg(x + offset); } else { s_x_temp[tid] = ld_cg(Delta + offset); } // Load nonzero_values if (ROW_MAJOR) { offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(nonzero_values + offset, temp); } else { offset = jind * bsize * bsize + vec_entry_index; #pragma unroll for (int m = 0; m < bsize; m++) { temp[m] = nonzero_values[offset + bsize * m]; } } // Do matrix multiply s_offset = cta_blockrow_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx -= temp[m] * s_x_temp[s_offset++]; } } } s_x_temp[tid] = bmAx; bmAx = 0.; // Load diagonals (which store the inverse) if (ROW_MAJOR) { offset = dia_indices[i] * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(nonzero_values + offset, temp); } else { offset = dia_indices[i] * bsize * bsize + vec_entry_index; #pragma unroll for (int m = 0; m < bsize; m++) { temp[m] = nonzero_values[offset + bsize * m]; } } // Do matrix-vector multiply s_offset = cta_blockrow_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx += temp[m] * s_x_temp[s_offset++]; } offset = i * bsize + vec_entry_index; if (xIsZero) { x[offset] = weight * bmAx; } else { Delta[offset] = bmAx; x[offset] += weight * bmAx ; } blockrow_id += blockrows_per_cta * gridDim.x; } } #endif // Assumptions: // CtaSize must be multiple of 32 // SMemSize should be larger than the maximum number of columns in the matrix // Matrix B is superset of matrix A template< int CtaSize, int SMemSize> __global__ __launch_bounds__( CtaSize ) void computeAtoLUmapping_kernel( int A_nRows, const int *__restrict A_row_offsets, const int *__restrict A_col_indices, const int *__restrict B_row_offsets, const int *__restrict B_col_indices, int *__restrict AtoBmapping, int *wk_returnValue ) { const int nWarps = CtaSize / 32; // Number of warps per Cta const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); // Rows are stored in SMEM. Linear storage. __shared__ volatile int s_colInd[nWarps][SMemSize]; // The row this warp is responsible for int aRowId = blockIdx.x * nWarps + warpId; // Loop over rows of A. for ( ; aRowId < A_nRows ; aRowId += nWarps * gridDim.x ) { // Insert all the column indices of matrix B in the shared memory table int bColBeg = B_row_offsets[aRowId]; int bColEnd = B_row_offsets[aRowId + 1]; // The number of columns. const int nCols = bColEnd - bColBeg; //TODO: Add fallback for cases where number of nonzeros exceed SMemSize if ( nCols > SMemSize ) { wk_returnValue[0] = 1; return; } // Fill-in the local table. const int NUM_STEPS = SMemSize / 32; #pragma unroll for ( int step = 0, k = laneId ; step < NUM_STEPS ; ++step, k += 32 ) { int bColIt = bColBeg + k; int bColId = -1; if ( bColIt < bColEnd ) { bColId = B_col_indices[bColIt]; } s_colInd[warpId][k] = bColId; } // Now load column indices of current row of A int aColIt = A_row_offsets[aRowId]; int aColEnd = A_row_offsets[aRowId + 1]; for ( aColIt += laneId ; utils::any(aColIt < aColEnd) ; aColIt += 32 ) { // The column. int aColId = -1; if ( aColIt < aColEnd ) { aColId = A_col_indices[aColIt]; } // Each thread searches for its column id, and gets the corresponding bColIt // TODO: Try binary search or using hash table int foundOffset = -1; if ( aColId == -1 ) { foundOffset = -2; } for ( int i = 0 ; i < nCols && utils::any(foundOffset == -1) ; ++i ) if ( foundOffset == -1 && s_colInd[warpId][i] == aColId ) { foundOffset = i; } // Store the result. if ( aColIt < aColEnd ) { AtoBmapping[aColIt] = bColBeg + foundOffset; } } } // if RowId < A_nRows; } template< int CtaSize, int SMemSize> __global__ __launch_bounds__( CtaSize ) void computeAtoLUmappingExtDiag_kernel( int A_nRows, const int *__restrict A_row_offsets, const int *__restrict A_col_indices, const int *__restrict A_dia_indices, const int *__restrict B_row_offsets, const int *__restrict B_col_indices, int *__restrict AtoBmapping, int *wk_returnValue ) { const int nWarps = CtaSize / 32; // Number of warps per Cta const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); // Rows are stored in SMEM. Linear storage. __shared__ volatile int s_colInd[nWarps][SMemSize]; // The row this warp is responsible for int aRowId = blockIdx.x * nWarps + warpId; // Loop over rows of A. for ( ; aRowId < A_nRows ; aRowId += nWarps * gridDim.x ) { // Insert all the column indices of matrix B in the shared memory table int bColBeg = B_row_offsets[aRowId]; int bColEnd = B_row_offsets[aRowId + 1]; // The number of columns. const int nCols = bColEnd - bColBeg; //TODO: Add fallback for cases where number of nonzeros exceed SMemSize if ( nCols > SMemSize ) { wk_returnValue[0] = 1; return; } // Fill-in the local table. const int NUM_STEPS = SMemSize / 32; #pragma unroll for ( int step = 0, k = laneId ; step < NUM_STEPS ; ++step, k += 32 ) { int bColIt = bColBeg + k; int bColId = -1; if ( bColIt < bColEnd ) { bColId = B_col_indices[bColIt]; } s_colInd[warpId][k] = bColId; } // Now load column indices of current row of A int aColIt = A_row_offsets[aRowId]; int aColEnd = A_row_offsets[aRowId + 1]; for ( aColIt += laneId ; utils::any(aColIt <= aColEnd) ; aColIt += 32 ) { // The column. int aColId = -1; if ( aColIt < aColEnd ) { aColId = A_col_indices[aColIt]; } if ( aColIt == aColEnd ) { aColId = aRowId; } // Each thread searches for its column id, and gets the corresponding bColIt // TODO: Try binary search or using hash table int foundOffset = -1; if ( aColId == -1 ) { foundOffset = -2; } for ( int i = 0 ; i < nCols && utils::any(foundOffset == -1) ; ++i ) if ( foundOffset == -1 && s_colInd[warpId][i] == aColId ) { foundOffset = i; } // Store the result. int aDst = -1; if ( aColIt < aColEnd ) { aDst = aColIt; } if ( aColIt == aColEnd ) { aDst = A_dia_indices[aRowId]; } if ( aDst != -1 ) { AtoBmapping[aDst] = bColBeg + foundOffset; } } } } #ifdef EXPERIMENTAL_LU_FACTORS template< typename ValueTypeA, int CtaSize, int SMemSize, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CtaSize, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CtaSize, 12 ) #endif void compute_LU_factors_4x4_kernel_warp( int A_nRows, const int *__restrict A_row_offsets, const int *__restrict A_col_indices, const int *__restrict A_dia_indices, ValueTypeA *__restrict A_nonzero_values, const int *__restrict A_smaller_color_offsets, const int *__restrict A_larger_color_offsets, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, int *wk_returnValue ) { const int nWarps = CtaSize / 32; // Number of warps per Cta const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); // Lane ID in the 2 16-wide segments. const int lane_id_div_16 = laneId / 16; const int lane_id_mod_16 = laneId % 16; // Coordinates inside a 4x4 block of the matrix. const int idx_i = lane_id_mod_16 / 4; const int idx_j = lane_id_mod_16 % 4; int globalWarpId = blockIdx.x * nWarps + warpId; // Shared memory to store the blocks to process __shared__ volatile ValueTypeA s_C_mtx[nWarps][32]; __shared__ volatile ValueTypeA s_F_mtx[nWarps][16]; // Shared memory to store the proposed column to load __shared__ volatile int s_aColSrc[nWarps][32]; // Shared memory to store the column indices of the current row __shared__ volatile int s_keys[nWarps][SMemSize]; while (globalWarpId < num_rows_per_color) { int storedRowId[2]; int I = 0; for (; I < 2 && globalWarpId < num_rows_per_color ; I++) { int aRowId = sorted_rows_by_color[globalWarpId]; storedRowId[I] = aRowId; int aColBeg = A_row_offsets[aRowId + 0]; int aColEnd = A_row_offsets[aRowId + 1]; int aColSmaller = A_smaller_color_offsets[aRowId]; // The number of columns. const int nCols = aColEnd - aColBeg; //TODO: Add fallback for cases where number of nonzeros exceed SMemSize if ( nCols > SMemSize ) { wk_returnValue[0] = 1; return; } // Fill-in the local table. const int NUM_STEPS = SMemSize / 32; #pragma unroll for ( int step = 0, k = laneId ; step < NUM_STEPS ; ++step, k += 32 ) { int aColIt = aColBeg + k; int aColId = -1; if ( aColIt < aColEnd ) { aColId = A_col_indices[aColIt]; } s_keys[warpId][k] = aColId; } // Now load all column indices of neighbours that have colors smaller than yours for ( int aColIt = aColBeg; aColIt < aColSmaller ; aColIt++) { unsigned int active_mask = utils::activemask(); // Read the row to process, should be a broadcast int waRowId = s_keys[warpId][aColIt - aColBeg]; // Compute multiplicative factor, load C_jj in first half, C_ij in second half int aColIdx = aColIt; if ( lane_id_div_16 == 0 ) { aColIdx = A_dia_indices[waRowId]; } s_C_mtx[warpId][laneId] = A_nonzero_values[16 * aColIdx + lane_id_mod_16]; // Threads 0-15 perform the matrix product ValueTypeA tmp(0); if (ROW_MAJOR) { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][16 + 4 * idx_i + m] * s_C_mtx[warpId][4 * m + idx_j]; } } else { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][16 + 4 * m + idx_j] * s_C_mtx[warpId][4 * idx_i + m]; } } if ( lane_id_div_16 == 0 ) { s_F_mtx[warpId][laneId] = tmp; A_nonzero_values[16 * aColIt + laneId] = tmp; } int waColIt = ld_cg(A_larger_color_offsets + waRowId); int waColEnd = ld_cg(A_row_offsets + waRowId + 1); // Load the first 32 columns of waRowId for (waColIt += laneId ; utils::any(waColIt < waColEnd, active_mask ); waColIt += 32 ) { // Each thread loads its column id int waColId = -1; if ( waColIt < waColEnd ) { waColId = A_col_indices[waColIt]; } // Find the right column. int found_aColIt = -1; #pragma unroll 4 for ( int i = 0, num_keys = aColEnd - aColBeg ; i < num_keys ; ++i ) if ( s_keys[warpId][i] == waColId ) { found_aColIt = i; } if ( found_aColIt != -1 ) { found_aColIt += aColBeg; } // Store all the columns that have been found const int pred = found_aColIt != -1; int vote = utils::ballot( pred, active_mask ); const int idst = __popc(vote & utils::lane_mask_lt()); if (pred) { s_aColSrc[warpId][idst] = laneId; } utils::syncwarp(active_mask); const int n_cols = __popc( vote ); // Process all columns that have been found for ( int k = 0 ; k < n_cols ; k += 2 ) { const int my_k = k + lane_id_div_16; // Where to get columns from. int a_col_it = -1, w_col_it = -1; // Load column to load a_col_it = utils::shfl(found_aColIt, s_aColSrc[warpId][my_k], warpSize, active_mask); w_col_it = utils::shfl(waColIt, s_aColSrc[warpId][my_k], warpSize, active_mask); if ( my_k >= n_cols ) { a_col_it = -1; w_col_it = -1; } ValueTypeA my_C(0); if ( w_col_it != -1 ) { my_C = A_nonzero_values[16 * w_col_it + lane_id_mod_16]; } s_C_mtx[warpId][laneId] = my_C; // Run the matrix-matrix product. ValueTypeA tmp(0); utils::syncwarp( active_mask ); if (ROW_MAJOR) { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_F_mtx[warpId][4 * idx_i + m] * s_C_mtx[warpId][16 * lane_id_div_16 + 4 * m + idx_j]; } } else { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_F_mtx[warpId][4 * m + idx_j] * s_C_mtx[warpId][16 * lane_id_div_16 + 4 * idx_i + m]; } } if ( a_col_it != -1 ) { A_nonzero_values[16 * a_col_it + lane_id_mod_16] -= tmp; } } // Loop over columns that have a match (for k=0;k<n_cols) } // Loop over the columns of waRowId //} // Loop j=0;j<32 } // Loop over the columns of aRowId globalWarpId += nWarps * gridDim.x; } // end of loop over I // Now compute the inverse of the block C_jj if ( lane_id_div_16 == 0 || I == 2 ) { const int offset = 16 * A_dia_indices[storedRowId[lane_id_div_16]] + lane_id_mod_16; s_C_mtx[warpId][laneId] = A_nonzero_values[offset]; utils::syncwarp(utils::activemask()); if (ROW_MAJOR) { compute_block_inverse_row_major4x4_formula2<int, ValueTypeA, 4, true>( s_C_mtx[warpId], 16 * lane_id_div_16, offset, idx_i, idx_j, A_nonzero_values ); } else { compute_block_inverse_col_major4x4_formula2<int, ValueTypeA, 4, true>( s_C_mtx[warpId], 16 * lane_id_div_16, offset, idx_i, idx_j, A_nonzero_values ); } } // End of if statement } // End of while loop } #else template< typename ValueTypeA, int CtaSize, int SMemSize, bool ROW_MAJOR> __global__ __launch_bounds__( CtaSize ) void computeLUFactors_4x4_kernel( int A_nRows, const int *__restrict A_row_offsets, const int *__restrict A_col_indices, const int *__restrict A_dia_indices, ValueTypeA *__restrict A_nonzero_values, const int *__restrict A_smaller_color_offsets, const int *__restrict A_larger_color_offsets, const int *sorted_rows_by_color, const int num_rows_per_color, const int current_color, int *wk_returnValue ) { const int nWarps = CtaSize / 32; // Number of warps per Cta const int warpId = utils::warp_id(); const int laneId = utils::lane_id(); int lane_mask_lt = utils::lane_mask_lt(); // Lane ID in the 2 16-wide segments. const int lane_id_div_16 = laneId / 16; const int lane_id_mod_16 = laneId % 16; // Coordinates inside a 4x4 block of the matrix. const int idx_i = lane_id_mod_16 / 4; const int idx_j = lane_id_mod_16 % 4; int globalWarpId = blockIdx.x * nWarps + warpId; // Shared memory to store the blocks to process __shared__ volatile ValueTypeA s_C_mtx[nWarps][32]; // Shared memory to store the proposed column to load __shared__ volatile int s_aColItToLoad[nWarps][32]; __shared__ volatile int s_waColItToLoad[nWarps][32]; // Shared memory to store the proposed column to load __shared__ volatile unsigned s_aColIds[nWarps][32]; // The size of the hash table (one per warp - shared memory). __shared__ volatile int s_size[nWarps][2]; // Shared memory to store the column indices of the current row __shared__ volatile int s_keys[nWarps][SMemSize]; while (globalWarpId < num_rows_per_color) { int aRowId = sorted_rows_by_color[globalWarpId]; // Insert all the column indices in shared memory // TODO: Use texture here int aColBeg = A_row_offsets[aRowId]; int aColEnd = A_row_offsets[aRowId + 1]; int aColIt = aColBeg; // Check if number of nonzeros will fit in shared memory if ( (aColEnd - aColBeg) > SMemSize ) { wk_returnValue[0] = 1; return; } // Load the all the column indices of row into shared memory for ( aColIt += laneId ; utils::any( aColIt < aColEnd ) ; aColIt += 32 ) { int aColId = aColIt < aColEnd ? (int) A_col_indices[aColIt] : -1; s_keys[warpId][aColIt - aColBeg] = aColId; } // Now load all column indices of neighbours that have colors smaller than yours aColIt = aColBeg; int aColSmaller = A_smaller_color_offsets[aRowId]; for ( ; utils::any( (aColIt + laneId) < aColSmaller ) ; aColIt += 32 ) { int aColId = (aColIt + laneId) < aColSmaller ? (int) A_col_indices[aColIt + laneId] : -1; // Each thread pushes its column s_aColIds[warpId][laneId] = aColId; // Have warp collaborate to load each row for ( int j = 0; j < 32; j++) { // Check if row to load is valid if ( ( aColIt + j ) >= aColSmaller ) { break; } // Read the row to process, should be a broadcast int waRowId = s_aColIds[warpId][j]; // Compute multiplicative factor, load C_jj in first half, C_ij in second half if (lane_id_div_16 == 0) { s_C_mtx[warpId][laneId] = A_nonzero_values[ 16 * A_dia_indices[waRowId] + lane_id_mod_16 ]; } else { s_C_mtx[warpId][laneId] = A_nonzero_values[ 16 * (aColIt + j) + lane_id_mod_16 ]; } // Threads 0-15 perform the matrix product utils::syncwarp(); if (lane_id_div_16 == 0) { ValueTypeA tmp(0); if (ROW_MAJOR) { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][16 + 4 * idx_i + m] * s_C_mtx[warpId][4 * m + idx_j]; } } else { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][16 + 4 * m + idx_j] * s_C_mtx[warpId][4 * idx_i + m]; } } s_C_mtx[warpId][laneId] = tmp; A_nonzero_values[16 * (aColIt + j) + laneId] = tmp; } int waColIt = A_larger_color_offsets[waRowId]; int waColEnd = A_row_offsets[waRowId + 1]; //// Load the first 32 columns of waRowId for (waColIt += laneId ; utils::any(waColIt < waColEnd ); waColIt += 32 ) { // Each thread loads its column id int waColId = waColIt < waColEnd ? A_col_indices[waColIt] : int (-1); // TODO: Try binary search if columns are ordered int found_aColIt = -1; //TODO: if invalid waColId, don't search for (int i = 0 ; utils::any(found_aColIt == -1) && i < aColEnd - aColBeg ; i++) { if (s_keys[warpId][i] == waColId) { found_aColIt = aColBeg + i; } } // Store all the columns that have been found const int pred = found_aColIt != -1; const int vote = utils::ballot( pred ); const int idst = __popc(vote & lane_mask_lt); if (pred) { s_aColItToLoad [warpId][idst] = found_aColIt; s_waColItToLoad[warpId][idst] = waColIt; } const int n_cols = __popc( vote ); // Process all columns that have been found for ( int k = 0 ; k < n_cols ; k++ ) { // Load column to load const int a_col_it = k < n_cols ? s_aColItToLoad [warpId][k] : -1; const int w_col_it = k < n_cols ? s_waColItToLoad[warpId][k] : -1; if (lane_id_div_16 == 1) { s_C_mtx[warpId][laneId] = A_nonzero_values[16 * w_col_it + lane_id_mod_16]; // Run the matrix-matrix product. ValueTypeA tmp(0); utils::syncwarp(utils::activemask()); if (ROW_MAJOR) { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][4 * idx_i + m] * s_C_mtx[warpId][16 + 4 * m + idx_j]; } } else { #pragma unroll for ( int m = 0 ; m < 4 ; ++m ) { tmp += s_C_mtx[warpId][4 * m + idx_j] * s_C_mtx[warpId][16 + 4 * idx_i + m]; } } A_nonzero_values[16 * a_col_it + lane_id_mod_16] -= tmp; } } // Loop over columns that have a match (for k=0;k<n_cols) } // Loop over the columns of waRowId } // Loop j=0;j<32 } // Loop over the columns of aRowId // TODO: Have one warp deal with two rows // Now compute the inverse of the block C_jj if (lane_id_div_16 == 0) { const int offset = 16 * A_dia_indices[aRowId] + lane_id_mod_16; s_C_mtx[warpId][laneId] = A_nonzero_values[offset]; utils::syncwarp(utils::activemask()); if (ROW_MAJOR) { compute_block_inverse_row_major<int, ValueTypeA, 0, 4, 16> (s_C_mtx[warpId], 0, offset, idx_i, idx_j, A_nonzero_values); } else { compute_block_inverse_col_major<int, ValueTypeA, 0, 4, 16> (s_C_mtx[warpId], 0, offset, idx_i, idx_j, A_nonzero_values); } } globalWarpId += nWarps * gridDim.x; } // if RowId < A_nRows; } #endif // ---------- // Methods // ---------- // Constructor template<class T_Config> MulticolorILUSolver_Base<T_Config>::MulticolorILUSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope) { m_sparsity_level = cfg.AMG_Config::getParameter<int>("ilu_sparsity_level", cfg_scope); m_weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope); this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::getParameter<int>("reorder_cols_by_color", cfg_scope) != 0); this->m_insert_diagonal_desired = (cfg.AMG_Config::getParameter<int>("insert_diag_while_reordering", cfg_scope) != 0); if (cfg.AMG_Config::getParameter<int>("use_bsrxmv", cfg_scope)) { this->m_use_bsrxmv = 1; } else { this->m_use_bsrxmv = 0; } if (m_weight == ValueTypeB(0.)) { m_weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor DILU smoother\n"); } } // Destructor template<class T_Config> MulticolorILUSolver_Base<T_Config>::~MulticolorILUSolver_Base() { m_LU.set_initialized(0); m_A_to_LU_mapping.clear(); m_A_to_LU_mapping.shrink_to_fit(); m_LU.resize(0, 0, 0, 1); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAtoLUmapping() { FatalError("Haven't implemented Multicolor ILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAtoLUmapping() { Matrix<TConfig_d> &m_A = *this->m_explicit_A; const int CtaSize = 128; // Number of threads per CTA const int SMemSize = 128; // per warp const int nWarps = CtaSize / 32; int GridSize = std::min( AMGX_GRID_MAX_SIZE, ( this->m_explicit_A->get_num_rows( ) + nWarps - 1 ) / nWarps ); // Global memory workspaces device_vector_alloc<int> returnValue(1); returnValue[0] = 0; if (this->m_explicit_A->hasProps(DIAG)) { computeAtoLUmappingExtDiag_kernel<CtaSize, SMemSize> <<< GridSize, CtaSize >>> ( m_A.get_num_rows( ), thrust::raw_pointer_cast( &m_A.row_offsets[0] ), thrust::raw_pointer_cast( &m_A.col_indices[0] ), thrust::raw_pointer_cast( &m_A.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_A_to_LU_mapping[0] ), thrust::raw_pointer_cast( &returnValue[0] )); } else { computeAtoLUmapping_kernel<CtaSize, SMemSize> <<< GridSize, CtaSize >>> ( m_A.get_num_rows( ), thrust::raw_pointer_cast( &m_A.row_offsets[0] ), thrust::raw_pointer_cast( &m_A.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_A_to_LU_mapping[0] ), thrust::raw_pointer_cast( &returnValue[0] )); } cudaCheckError(); // fallback path that allows 1024 nonzeros per row if (returnValue[0] == 1) { returnValue[0] = 0; const int SMemSize2 = 1024 ; // per warp if (this->m_explicit_A->hasProps(DIAG)) { computeAtoLUmappingExtDiag_kernel<CtaSize, SMemSize2> <<< GridSize, CtaSize >>> ( m_A.get_num_rows( ), thrust::raw_pointer_cast( &m_A.row_offsets[0] ), thrust::raw_pointer_cast( &m_A.col_indices[0] ), thrust::raw_pointer_cast( &m_A.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_A_to_LU_mapping[0] ), thrust::raw_pointer_cast( &returnValue[0] )); } else { computeAtoLUmapping_kernel<CtaSize, SMemSize2> <<< GridSize, CtaSize >>> ( m_A.get_num_rows( ), thrust::raw_pointer_cast( &m_A.row_offsets[0] ), thrust::raw_pointer_cast( &m_A.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_A_to_LU_mapping[0] ), thrust::raw_pointer_cast( &returnValue[0] )); } cudaCheckError(); } if (returnValue[0] == 1) { FatalError( "Number of nonzeros per row exceeds allocated shared memory", AMGX_ERR_NO_MEMORY); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::fillLUValuesWithAValues() { FatalError("Haven't implemented Multicolor ILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::fillLUValuesWithAValues() { if (this->m_sparsity_level == 0) { this->m_LU.values = this->m_explicit_A->values; } else { // TODO: Should probably store the inverse mapping of AtoLUmapping instead // This will allow to use unpermuteVector and have coalesced writes // instead of coalesced reads thrust::fill(this->m_LU.values.begin(), this->m_LU.values.end(), 0.); cudaCheckError(); if (this->m_explicit_A->hasProps(DIAG)) { amgx::permuteVector(this->m_explicit_A->values, this->m_LU.values, this->m_A_to_LU_mapping, (this->m_explicit_A->get_num_nz() + this->m_explicit_A->get_num_rows())*this->m_explicit_A->get_block_size()); } else { amgx::permuteVector(this->m_explicit_A->values, this->m_LU.values, this->m_A_to_LU_mapping, this->m_explicit_A->get_num_nz()*this->m_explicit_A->get_block_size()); } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeLUSparsityPattern() { FatalError("Haven't implemented Multicolor ILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeLUSparsityPattern() { // ILU0 if (this->m_sparsity_level == 0) { // Copy everything except the values this->m_LU.copy_structure(*this->m_explicit_A); } // ILU1 else if (this->m_sparsity_level == 1) { this->sparsity_wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *this->m_cfg, "default" ); CSR_Multiply<TConfig_d>::csr_sparsity_ilu1( *this->m_explicit_A, this->m_LU, this->sparsity_wk ); CSR_Multiply<TConfig_d>::csr_workspace_delete( this->sparsity_wk ); if (this->m_use_bsrxmv) { this->m_LU.set_initialized(0); this->m_LU.computeDiagonal(); this->m_LU.set_initialized(1); } this->m_LU.setMatrixColoring(&(this->m_explicit_A->getMatrixColoring())); } else { FatalError("Haven't implemented Multicolor ILU smoother for this sparsity level. ", AMGX_ERR_NOT_IMPLEMENTED); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeLUFactors() { FatalError("Haven't implemented Multicolor ILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeLUFactors() { const int CtaSize = 128; // Number of threads per CTA const int SMemSize = 128; const int nWarps = CtaSize / 32; device_vector_alloc<int> returnValue(1); returnValue[0] = 0; int num_colors = this->m_LU.getMatrixColoring().getNumColors(); const IndexType *LU_sorted_rows_by_color_ptr = this->m_LU.getMatrixColoring().getSortedRowsByColor().raw(); for (int i = 0; i < num_colors; i++) { const IndexType color_offset = this->m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i]; const IndexType num_rows_per_color = this->m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; #ifdef EXPERIMENTAL_LU_FACTORS int GridSize = std::min( 2048, ( num_rows_per_color + nWarps - 1 ) / nWarps ); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if ( this->m_LU.get_block_dimx() == 4 && this->m_LU.get_block_dimy() == 4 ) { if ( this->m_explicit_A->getBlockFormat() == ROW_MAJOR ) { compute_LU_factors_4x4_kernel_warp<ValueTypeA, CtaSize, SMemSize, true> <<< GridSize, CtaSize>>>( this->m_LU.get_num_rows( ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.values[0] ), thrust::raw_pointer_cast( &this->m_LU.m_smaller_color_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.m_larger_color_offsets[0] ), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, thrust::raw_pointer_cast( &returnValue[0] ) ); } else { compute_LU_factors_4x4_kernel_warp<ValueTypeA, CtaSize, SMemSize, false> <<< GridSize, CtaSize>>>( this->m_LU.get_num_rows( ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.values[0] ), thrust::raw_pointer_cast( &this->m_LU.m_smaller_color_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.m_larger_color_offsets[0] ), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, thrust::raw_pointer_cast( &returnValue[0] ) ); } cudaCheckError(); } else { FatalError("Unsupported block size for Multicolor ILU solver, computeLUFactors", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } #else int GridSize = std::min( AMGX_GRID_MAX_SIZE, ( num_rows_per_color + nWarps - 1 ) / nWarps ); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if ( this->m_LU.get_block_dimx() == 4 && this->m_LU.get_block_dimy() == 4 ) { //computeLUFactors_4x4_kernel<ValueTypeA,CtaSize,SMemSize> <<< GridSize, CtaSize>>> ( if (this->m_explicit_A->getBlockFormat() == ROW_MAJOR) { computeLUFactors_4x4_kernel<ValueTypeA, CtaSize, SMemSize, true> <<< GridSize, CtaSize>>> ( this->m_LU.get_num_rows( ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.values[0] ), thrust::raw_pointer_cast( &this->m_LU.m_smaller_color_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.m_larger_color_offsets[0] ), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, thrust::raw_pointer_cast( &returnValue[0] ) ); } else { computeLUFactors_4x4_kernel<ValueTypeA, CtaSize, SMemSize, false> <<< GridSize, CtaSize>>> ( this->m_LU.get_num_rows( ), thrust::raw_pointer_cast( &this->m_LU.row_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.col_indices[0] ), thrust::raw_pointer_cast( &this->m_LU.diag[0] ), thrust::raw_pointer_cast( &this->m_LU.values[0] ), thrust::raw_pointer_cast( &this->m_LU.m_smaller_color_offsets[0] ), thrust::raw_pointer_cast( &this->m_LU.m_larger_color_offsets[0] ), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, thrust::raw_pointer_cast( &returnValue[0] ) ); } cudaCheckError(); } else { FatalError("Unsupported block size for Multicolor ILU solver, computeLUFactors", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } #endif } // Check returnValue flag if ( returnValue[0] == 1 ) { FatalError( "Number of nonzeros per row exceeds allocated shared memory", AMGX_ERR_NO_MEMORY); } } // Solver pre-setup template<class T_Config> void MulticolorILUSolver_Base<T_Config>::pre_setup() { // Check if matrix is colored if (this->m_explicit_A->getColoringLevel() < m_sparsity_level + 1) { FatalError("Matrix must be colored with coloring_level > sparsity_level for the multicolorILUsolver", AMGX_ERR_CONFIGURATION); } // Compute extended sparsity pattern based on coloring and matrix A computeLUSparsityPattern(); if (this->m_LU.hasProps(DIAG)) { FatalError("Multicolor ILU smoother does not support outside diagonal. Try setting reorder_cols_by_color=1 and insert_diag_while_reordering=1 in the multicolor_ilu solver scope in configuration file", AMGX_ERR_NOT_IMPLEMENTED); } if (m_sparsity_level == 0 && !this->m_LU.getColsReorderedByColor()) { FatalError("Multicolor ILU smoother requires matrix to be reordered by color with ILU0 solver. Try setting reorder_cols_by_color=1 and insert_diag_while_reordering=1 in the multicolor_ilu solver scope in configuration file", AMGX_ERR_NOT_IMPLEMENTED); } // Reorder the columns of LU by color if (m_sparsity_level != 0) { // Reorder columns of LU by color m_LU.reorderColumnsByColor(false); // Compute mapping between entries in A and entries in LU if (this->m_explicit_A->hasProps(DIAG)) { m_A_to_LU_mapping.resize(this->m_explicit_A->get_num_nz() + this->m_explicit_A->get_num_rows()); } else { m_A_to_LU_mapping.resize(this->m_explicit_A->get_num_nz()); } computeAtoLUmapping(); } int N = this->m_LU.get_num_rows() * this->m_LU.get_block_dimy(); m_delta.resize(N); m_Delta.resize(N); m_Delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_Delta.set_block_dimx(1); m_delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_delta.set_block_dimx(1); } template<class T_Config> void MulticolorILUSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor = " << this->m_weight << std::endl; std::cout << "use_bsrxmv = " << this->m_use_bsrxmv << std::endl; std::cout << "ilu_sparsity_level = " << this->m_sparsity_level << std::endl; } // Solver setup template<class T_Config> void MulticolorILUSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { this->m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!this->m_explicit_A) { FatalError("MulticolorILUSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } if (this->m_explicit_A->getColoringLevel() < 1) { FatalError("Matrix must be colored to use multicolor ilu solver. Try setting: coloring_level=1 or coloring_level=2 in the configuration file", AMGX_ERR_NOT_IMPLEMENTED); } if (!reuse_matrix_structure) { this->pre_setup(); } // Fill LU sparsity pattern fillLUValuesWithAValues(); // Compute LU factors in place (update LU.values) computeLUFactors(); } // template<class T_Config> void MulticolorILUSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) {} // Solve one iteration template<class T_Config> bool MulticolorILUSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { if ( !m_use_bsrxmv && (this->m_LU.get_block_dimx() == 4 && this->m_LU.get_block_dimy() == 4) ) { smooth_4x4(b, x, xIsZero); } else { smooth_bxb(b, x, xIsZero); } // Do we converge ? return this->converged(b, x); } template<class T_Config> void MulticolorILUSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const VVector &b, VVector &x, bool xIsZero) { FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const VVector &b, VVector &x, bool xIsZero) { Matrix<TConfig_d> &m_LU = this->m_LU; Matrix<TConfig_d> &m_A = *this->m_explicit_A; int N = m_LU.get_num_rows() * m_LU.get_block_dimy(); cudaCheckError(); if (!m_LU.getColsReorderedByColor()) { FatalError("ILU solver currently only works if columns are reordered by color. Try setting reordering_cols_by_color=1 in the multicolor_ilu solver scope in the configuration file", AMGX_ERR_NOT_IMPLEMENTED); } // --------------------------------------------------------- // Solving Lower triangular system, with identity diagonal // --------------------------------------------------------- const IndexType *LU_sorted_rows_by_color_ptr = m_LU.getMatrixColoring().getSortedRowsByColor().raw(); int num_colors = this->m_LU.getMatrixColoring().getNumColors(); for (int i = 0; i < num_colors; i++) { const IndexType color_offset = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i]; const IndexType num_rows_per_color = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; #ifdef EXPERIMENTAL_LU_FORWARD const int CtaSize = 128; // Number of threads per CTA const int nHalfWarps = CtaSize / 16; int GridSize = std::min( 2048, ( num_rows_per_color + nHalfWarps - 1 ) / nHalfWarps ); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if ( this->m_explicit_A->getBlockFormat() == ROW_MAJOR ) { if (m_A.hasProps(DIAG)) { LU_forward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, 4, true, true> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), m_A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero ); } else { LU_forward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, 4, true, false> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), m_A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero ); } } else { // COL_MAJOR if (m_A.hasProps(DIAG)) { LU_forward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, 4, false, true> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), m_A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero ); } else { LU_forward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, 4, false, false> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), m_A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero ); } } #else const int CtaSize = 128; const int blockrows_per_cta = CtaSize / 4; const int GridSize = min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color + blockrows_per_cta - 1) / blockrows_per_cta); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if (this->m_explicit_A->hasProps(DIAG)) { FatalError("this implementation of LU forward solve does not support A with external diagonal", AMGX_ERR_NOT_IMPLEMENTED); } if (this->m_explicit_A->getBlockFormat() == ROW_MAJOR) { LU_forward_4x4_kernel<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 8, 4, true> <<< GridSize, CtaSize>>> (m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), x.raw(), b.raw(), delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero); } else { LU_forward_4x4_kernel<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 8, 4, false> <<< GridSize, CtaSize>>> (m_LU.row_offsets.raw(), m_LU.m_smaller_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.values.raw(), m_A.row_offsets.raw(), m_A.col_indices.raw(), m_A.values.raw(), x.raw(), b.raw(), delta.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, xIsZero); } #endif cudaCheckError(); } // -------------------- // Backward Sweep // -------------------- for (int i = num_colors - 1; i >= 0; i--) { const IndexType color_offset = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i]; const IndexType num_rows_per_color = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; #ifdef EXPERIMENTAL_LU_BACKWARD const int CtaSize = 128; // Number of threads per CTA const int nHalfWarps = CtaSize / 16; int GridSize = std::min( 2048, ( num_rows_per_color + nHalfWarps - 1 ) / nHalfWarps ); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if (this->m_explicit_A->getBlockFormat() == ROW_MAJOR) { LU_backward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, true> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_larger_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.diag.raw(), m_LU.values.raw(), this->m_delta.raw(), this->m_Delta.raw(), x.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, num_colors, this->m_weight, xIsZero); } else { LU_backward_4x4_kernel_warp<IndexType, ValueTypeA, ValueTypeB, CtaSize, false> <<< GridSize, CtaSize>>>( m_LU.row_offsets.raw(), m_LU.m_larger_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.diag.raw(), m_LU.values.raw(), this->m_delta.raw(), this->m_Delta.raw(), x.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, num_colors, this->m_weight, xIsZero); } #else const int CtaSize = 128; const int blockrows_per_cta = CtaSize / 4; const int GridSize = min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color + blockrows_per_cta - 1) / blockrows_per_cta); if ( GridSize == 0 ) { continue; // if perfect coloring (color 0 has no vertices) } if (this->m_explicit_A->getBlockFormat() == ROW_MAJOR) { LU_backward_4x4_kernel<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 8, 4, true> <<< GridSize, CtaSize>>> (m_LU.row_offsets.raw(), m_LU.m_larger_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.diag.raw(), m_LU.values.raw(), this->m_delta.raw(), this->m_Delta.raw(), x.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, num_colors, this->m_weight, xIsZero); } else { LU_backward_4x4_kernel<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 8, 4, false> <<< GridSize, CtaSize>>> (m_LU.row_offsets.raw(), m_LU.m_larger_color_offsets.raw(), m_LU.col_indices.raw(), m_LU.diag.raw(), m_LU.values.raw(), this->m_delta.raw(), this->m_Delta.raw(), x.raw(), LU_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, i, num_colors, this->m_weight, xIsZero); } #endif } cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_bxb(const VVector &b, VVector &x, bool xIsZero) { FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MulticolorILUSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_bxb(const VVector &b, VVector &x, bool xIsZero) { Matrix<TConfig_d> &m_LU = this->m_LU; Matrix<TConfig_d> &m_A = *this->m_explicit_A; int N = m_LU.get_num_rows() * m_LU.get_block_dimy(); if (!m_LU.getColsReorderedByColor()) { FatalError("ILU solver currently only works if columns are reordered by color. Try setting reorder_cols_by_color=1 in the multicolor_ilu solver scope in the configuration file", AMGX_ERR_NOT_IMPLEMENTED); } if (this->m_explicit_A->getBlockFormat() == COL_MAJOR) { FatalError("ILU solver for arbitrary block sizes only works with ROW_MAJOR matrices", AMGX_ERR_NOT_IMPLEMENTED); } // --------------------------------------------------------- // Solving Lower triangular system, with identity diagonal // --------------------------------------------------------- const IndexType *LU_sorted_rows_by_color_ptr = m_LU.getMatrixColoring().getSortedRowsByColor().raw(); int num_colors = this->m_LU.getMatrixColoring().getNumColors(); //delta = b; thrust::copy(b.begin(), b.end(), this->m_delta.begin()); //delta = delta - Ax; Cusparse::bsrmv((ValueTypeA) - 1.0, m_A, x, (ValueTypeA)1.0, this->m_delta); cudaCheckError(); // Setting Delta to zero thrust::fill(this->m_Delta.begin(), this->m_Delta.end(), (ValueTypeB)0.0f); cudaCheckError(); bool skipped_end = false; for (int i = 0; i < num_colors; i++) { const IndexType color_offset = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i]; const IndexType num_rows_per_color = m_LU.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; if (num_rows_per_color == 0) { continue; } // if perfect coloring (color 0 has no vertices) if (skipped_end) { // delta = delta - LU*Delta smaller colors Cusparse::bsrmv(Cusparse::SMALLER_COLORS, i, (ValueTypeA) - 1.0f, m_LU, this->m_delta, (ValueTypeA)1.0f, this->m_delta); } if (num_rows_per_color > 0) { skipped_end = true; } } cudaCheckError(); skipped_end = false; // -------------------- // Backward Sweep // -------------------- for (int i = num_colors - 1; i >= 0; i--) { // delta = delta - LU*Delta larger colors Cusparse::bsrmv(Cusparse::LARGER_COLORS, i, (ValueTypeA) - 1.0f, m_LU, this->m_Delta, (ValueTypeA)1.0f, this->m_delta); // Multiple by inverse stored on diagonal Cusparse::bsrmv(Cusparse::DIAG_COL, i, (ValueTypeA) 1.0f, m_LU, this->m_delta, 0.0f, this->m_Delta); } cudaCheckError(); axpy(this->m_Delta, x, this->m_weight, 0, x.size()); cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MulticolorILUSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MulticolorILUSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } // namespace amgx
the_stack
* Test of BlockScan utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <iostream> #include <limits> #include <typeinfo> #include <cub/block/block_scan.cuh> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/util_ptx.cuh> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); /** * Primitive variant to test */ enum TestMode { BASIC, AGGREGATE, PREFIX, }; /** * Scan mode to test */ enum ScanMode { EXCLUSIVE, INCLUSIVE }; /** * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) */ template<typename OpT> struct WrapperFunctor { OpT op; WrapperFunctor(OpT op) : op(op) {} template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return op(a, b); } }; /** * Stateful prefix functor */ template < typename T, typename ScanOpT> struct BlockPrefixCallbackOp { int linear_tid; T prefix; ScanOpT scan_op; __device__ __forceinline__ BlockPrefixCallbackOp(int linear_tid, T prefix, ScanOpT scan_op) : linear_tid(linear_tid), prefix(prefix), scan_op(scan_op) {} __device__ __forceinline__ T operator()(T block_aggregate) { // For testing purposes T retval = (linear_tid == 0) ? prefix : T(); prefix = scan_op(prefix, block_aggregate); return retval; } }; //--------------------------------------------------------------------- // Exclusive scan //--------------------------------------------------------------------- /// Exclusive scan (BASIC, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data[0], data[0], initial_value, scan_op); } /// Exclusive scan (BASIC, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data, data, initial_value, scan_op); } /// Exclusive scan (AGGREGATE, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data[0], data[0], initial_value, scan_op, block_aggregate); } /// Exclusive scan (AGGREGATE, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data, data, initial_value, scan_op, block_aggregate); } /// Exclusive scan (PREFIX, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data[0], data[0], scan_op, prefix_op); } /// Exclusive scan (PREFIX, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, IsPrimitiveT is_primitive) { block_scan.ExclusiveScan(data, data, scan_op, prefix_op); } //--------------------------------------------------------------------- // Exclusive sum //--------------------------------------------------------------------- /// Exclusive sum (BASIC, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data[0], data[0]); } /// Exclusive sum (BASIC, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data, data); } /// Exclusive sum (AGGREGATE, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data[0], data[0], block_aggregate); } /// Exclusive sum (AGGREGATE, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data, data, block_aggregate); } /// Exclusive sum (PREFIX, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data[0], data[0], prefix_op); } /// Exclusive sum (PREFIX, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<EXCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, Int2Type<true> is_primitive) { block_scan.ExclusiveSum(data, data, prefix_op); } //--------------------------------------------------------------------- // Inclusive scan //--------------------------------------------------------------------- /// Inclusive scan (BASIC, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data[0], data[0], scan_op); } /// Inclusive scan (BASIC, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data, data, scan_op); } /// Inclusive scan (AGGREGATE, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data[0], data[0], scan_op, block_aggregate); } /// Inclusive scan (AGGREGATE, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data, data, scan_op, block_aggregate); } /// Inclusive scan (PREFIX, 1) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data[0], data[0], scan_op, prefix_op); } /// Inclusive scan (PREFIX, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename ScanOpT, typename PrefixCallbackOp, int ITEMS_PER_THREAD, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, IsPrimitiveT is_primitive) { block_scan.InclusiveScan(data, data, scan_op, prefix_op); } //--------------------------------------------------------------------- // Inclusive sum //--------------------------------------------------------------------- /// Inclusive sum (BASIC, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data[0], data[0]); } /// Inclusive sum (BASIC, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data, data); } /// Inclusive sum (AGGREGATE, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data[0], data[0], block_aggregate); } /// Inclusive sum (AGGREGATE, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data, data, block_aggregate); } /// Inclusive sum (PREFIX, 1) template <typename BlockScanT, typename T, typename PrefixCallbackOp> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data[0], data[0], prefix_op); } /// Inclusive sum (PREFIX, ITEMS_PER_THREAD) template <typename BlockScanT, typename T, typename PrefixCallbackOp, int ITEMS_PER_THREAD> __device__ __forceinline__ void DeviceTest( BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, Int2Type<INCLUSIVE> scan_mode, Int2Type<PREFIX> test_mode, Int2Type<true> is_primitive) { block_scan.InclusiveSum(data, data, prefix_op); } //--------------------------------------------------------------------- // Test kernels //--------------------------------------------------------------------- /** * BlockScan test kernel. */ template < int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_DIM_Z, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, BlockScanAlgorithm ALGORITHM, typename T, typename ScanOpT> __launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) __global__ void BlockScanKernel( T *d_in, T *d_out, T *d_aggregate, ScanOpT scan_op, T initial_value, clock_t *d_elapsed) { const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Parameterize BlockScan type for our thread block typedef BlockScan<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockScanT; // Allocate temp storage in shared memory __shared__ typename BlockScanT::TempStorage temp_storage; int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); // Per-thread tile data T data[ITEMS_PER_THREAD]; LoadDirectBlocked(linear_tid, d_in, data); __threadfence_block(); // workaround to prevent clock hoisting clock_t start = clock(); __threadfence_block(); // workaround to prevent clock hoisting // Test scan T block_aggregate; BlockScanT block_scan(temp_storage); BlockPrefixCallbackOp<T, ScanOpT> prefix_op(linear_tid, initial_value, scan_op); DeviceTest(block_scan, data, initial_value, scan_op, block_aggregate, prefix_op, Int2Type<SCAN_MODE>(), Int2Type<TEST_MODE>(), Int2Type<Traits<T>::PRIMITIVE>()); // Stop cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t stop = clock(); __threadfence_block(); // workaround to prevent clock hoisting // Store output StoreDirectBlocked(linear_tid, d_out, data); // Store block_aggregate if (TEST_MODE != BASIC) d_aggregate[linear_tid] = block_aggregate; // Store prefix if (TEST_MODE == PREFIX) { if (linear_tid == 0) d_out[TILE_SIZE] = prefix_op.prefix; } // Store time if (linear_tid == 0) *d_elapsed = (start > stop) ? start - stop : stop - start; } //--------------------------------------------------------------------- // Host utility subroutines //--------------------------------------------------------------------- /** * Initialize exclusive-scan problem (and solution) */ template <typename T, typename ScanOpT> T Initialize( GenMode gen_mode, T *h_in, T *h_reference, int num_items, ScanOpT scan_op, T initial_value, Int2Type<EXCLUSIVE>) { InitValue(gen_mode, h_in[0], 0); T block_aggregate = h_in[0]; h_reference[0] = initial_value; T inclusive = scan_op(initial_value, h_in[0]); for (int i = 1; i < num_items; ++i) { InitValue(gen_mode, h_in[i], i); h_reference[i] = inclusive; inclusive = scan_op(inclusive, h_in[i]); block_aggregate = scan_op(block_aggregate, h_in[i]); } return block_aggregate; } /** * Initialize inclusive-scan problem (and solution) */ template <typename T, typename ScanOpT> T Initialize( GenMode gen_mode, T *h_in, T *h_reference, int num_items, ScanOpT scan_op, T initial_value, Int2Type<INCLUSIVE>) { InitValue(gen_mode, h_in[0], 0); T block_aggregate = h_in[0]; T inclusive = scan_op(initial_value, h_in[0]); h_reference[0] = inclusive; for (int i = 1; i < num_items; ++i) { InitValue(gen_mode, h_in[i], i); inclusive = scan_op(inclusive, h_in[i]); block_aggregate = scan_op(block_aggregate, h_in[i]); h_reference[i] = inclusive; } return block_aggregate; } /** * Test thread block scan. (Specialized for sufficient resources) */ template < int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_DIM_Z, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, BlockScanAlgorithm ALGORITHM, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value, Int2Type<true> sufficient_resources) { const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Allocate host arrays T *h_in = new T[TILE_SIZE]; T *h_reference = new T[TILE_SIZE]; T *h_aggregate = new T[BLOCK_THREADS]; // Initialize problem T block_aggregate = Initialize( gen_mode, h_in, h_reference, TILE_SIZE, scan_op, initial_value, Int2Type<SCAN_MODE>()); // Test reference block_aggregate is returned in all threads for (int i = 0; i < BLOCK_THREADS; ++i) { h_aggregate[i] = block_aggregate; } // Run kernel printf("Test-mode %d, gen-mode %d, policy %d, %s %s BlockScan, %d (%d,%d,%d) thread block threads, %d items per thread, %d tile size, %s (%d bytes) elements:\n", TEST_MODE, gen_mode, ALGORITHM, (SCAN_MODE == INCLUSIVE) ? "Inclusive" : "Exclusive", typeid(ScanOpT).name(), BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, TILE_SIZE, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Initialize/clear device arrays T *d_in = NULL; T *d_out = NULL; T *d_aggregate = NULL; clock_t *d_elapsed = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (TILE_SIZE + 2))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * BLOCK_THREADS)); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * TILE_SIZE, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * (TILE_SIZE + 1))); CubDebugExit(cudaMemset(d_aggregate, 0, sizeof(T) * BLOCK_THREADS)); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < TILE_SIZE; i++) { std::cout << CoutCast(h_in[i]) << ", "; } printf("\n\n"); } // Run block_aggregate/prefix kernel dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); BlockScanKernel<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, ALGORITHM><<<1, block_dims>>>( d_in, d_out, d_aggregate, scan_op, initial_value, d_elapsed); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Copy out and display results printf("\tScan results: "); int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); if (TEST_MODE == AGGREGATE) { // Copy out and display block_aggregate printf("\tScan block aggregate: "); compare = CompareDeviceResults(h_aggregate, d_aggregate, BLOCK_THREADS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } if (TEST_MODE == PREFIX) { // Copy out and display updated prefix printf("\tScan running total: "); T running_total = scan_op(initial_value, block_aggregate); compare = CompareDeviceResults(&running_total, d_out + TILE_SIZE, 1, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } printf("\tElapsed clocks: "); DisplayDeviceResults(d_elapsed, 1); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_aggregate) delete[] h_aggregate; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); } /** * Test thread block scan. (Specialized for insufficient resources) */ template < int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_DIM_Z, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, BlockScanAlgorithm ALGORITHM, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value, Int2Type<false> sufficient_resources) {} /** * Test thread block scan. */ template < int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_DIM_Z, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, BlockScanAlgorithm ALGORITHM, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { // Check size of smem storage for the target arch to make sure it will fit typedef BlockScan<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockScanT; enum { #if defined(SM100) || defined(SM110) || defined(SM130) sufficient_smem = (sizeof(typename BlockScanT::TempStorage) <= 16 * 1024), sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512), #else sufficient_smem = (sizeof(typename BlockScanT::TempStorage) <= 16 * 1024), sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024), #endif #if defined(_WIN32) || defined(_WIN64) // Accommodate ptxas crash bug (access violation) on Windows special_skip = ((TEST_ARCH <= 130) && (Equals<T, TestBar>::VALUE) && (BLOCK_DIM_Z > 1)), #else special_skip = false, #endif sufficient_resources = (sufficient_smem && sufficient_threads && !special_skip), }; Test<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, ALGORITHM>( gen_mode, scan_op, initial_value, Int2Type<sufficient_resources>()); } /** * Run test for different thread block dimensions */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, BlockScanAlgorithm ALGORITHM, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { Test<BLOCK_THREADS, 1, 1, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, ALGORITHM>(gen_mode, scan_op, initial_value); Test<BLOCK_THREADS, 2, 2, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, ALGORITHM>(gen_mode, scan_op, initial_value); } /** * Run test for different policy types */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, ScanMode SCAN_MODE, TestMode TEST_MODE, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { #ifdef TEST_RAKING Test<BLOCK_THREADS, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, BLOCK_SCAN_RAKING>(gen_mode, scan_op, initial_value); #endif #ifdef TEST_RAKING_MEMOIZE Test<BLOCK_THREADS, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, BLOCK_SCAN_RAKING_MEMOIZE>(gen_mode, scan_op, initial_value); #endif #ifdef TEST_WARP_SCANS Test<BLOCK_THREADS, ITEMS_PER_THREAD, SCAN_MODE, TEST_MODE, BLOCK_SCAN_WARP_SCANS>(gen_mode, scan_op, initial_value); #endif } /** * Run tests for different primitive variants */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T identity, T initial_value) { // Exclusive (use identity as initial value because it will dispatch to *Sum variants that don't take initial values) Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, BASIC>(gen_mode, scan_op, identity); Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, AGGREGATE>(gen_mode, scan_op, identity); Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, PREFIX>(gen_mode, scan_op, identity); // Exclusive (non-specialized, so we can use initial-value) Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, BASIC>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, AGGREGATE>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); Test<BLOCK_THREADS, ITEMS_PER_THREAD, EXCLUSIVE, PREFIX>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); // Inclusive Test<BLOCK_THREADS, ITEMS_PER_THREAD, INCLUSIVE, BASIC>(gen_mode, scan_op, identity); // This scan doesn't take an initial value Test<BLOCK_THREADS, ITEMS_PER_THREAD, INCLUSIVE, AGGREGATE>(gen_mode, scan_op, identity); // This scan doesn't take an initial value Test<BLOCK_THREADS, ITEMS_PER_THREAD, INCLUSIVE, PREFIX>(gen_mode, scan_op, initial_value); } /** * Run tests for different problem-generation options */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename ScanOpT, typename T> void Test( ScanOpT scan_op, T identity, T initial_value) { Test<BLOCK_THREADS, ITEMS_PER_THREAD>(UNIFORM, scan_op, identity, initial_value); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(INTEGER_SEED, scan_op, identity, initial_value); // Don't test randomly-generated floats b/c of stability if (Traits<T>::CATEGORY != FLOATING_POINT) Test<BLOCK_THREADS, ITEMS_PER_THREAD>(RANDOM, scan_op, identity, initial_value); } /** * Run tests for different data types and scan ops */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> void Test() { // Get ptx version int ptx_version; CubDebugExit(PtxVersion(ptx_version)); // primitive Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), (unsigned char) 0, (unsigned char) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), (unsigned short) 0, (unsigned short) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), (unsigned int) 0, (unsigned int) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), (unsigned long long) 0, (unsigned long long) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), (float) 0, (float) 99); // primitive (alternative scan op) Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Max(), std::numeric_limits<char>::min(), (char) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Max(), std::numeric_limits<short>::min(), (short) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Max(), std::numeric_limits<int>::min(), (int) 99); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Max(), std::numeric_limits<long long>::min(), (long long) 99); if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Max(), std::numeric_limits<double>::max() * -1, (double) 99); // vec-1 Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_uchar1(0), make_uchar1(17)); // vec-2 Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_uchar2(0, 0), make_uchar2(17, 21)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_ushort2(0, 0), make_ushort2(17, 21)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_uint2(0, 0), make_uint2(17, 21)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_ulonglong2(0, 0), make_ulonglong2(17, 21)); // vec-4 Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_char4(0, 0, 0, 0), make_char4(17, 21, 32, 85)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_short4(0, 0, 0, 0), make_short4(17, 21, 32, 85)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_int4(0, 0, 0, 0), make_int4(17, 21, 32, 85)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), make_longlong4(0, 0, 0, 0), make_longlong4(17, 21, 32, 85)); // complex Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), TestFoo::MakeTestFoo(0, 0, 0, 0), TestFoo::MakeTestFoo(17, 21, 32, 85)); Test<BLOCK_THREADS, ITEMS_PER_THREAD>(Sum(), TestBar(0, 0), TestBar(17, 21)); } /** * Run tests for different items per thread */ template <int BLOCK_THREADS> void Test() { Test<BLOCK_THREADS, 1>(); Test<BLOCK_THREADS, 2>(); Test<BLOCK_THREADS, 9>(); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #ifdef QUICK_TEST Test<128, 1, 1, 1, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), int(0)); // Compile/run quick tests Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), int(0)); Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_RAKING>(UNIFORM, Sum(), int(0)); Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_RAKING_MEMOIZE>(UNIFORM, Sum(), int(0)); Test<128, 1, 1, 2, INCLUSIVE, PREFIX, BLOCK_SCAN_RAKING>(INTEGER_SEED, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); Test<128, 1, 1, 1, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), make_longlong4(17, 21, 32, 85)); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Run tests for different thread block sizes Test<17>(); Test<32>(); Test<62>(); Test<65>(); // Test<96>(); // TODO: file bug for UNREACHABLE error for Test<96, 9, BASIC, BLOCK_SCAN_RAKING>(UNIFORM, Sum(), NullType(), make_ulonglong2(17, 21)); Test<128>(); } #endif return 0; }
the_stack
namespace faiss { namespace gpu { // Kernel responsible for calculating distance from residual vector to // each product quantizer code centroid template <typename OutCodeT, typename CentroidT, int DimsPerSubQuantizer, bool L2Distance> __global__ void __launch_bounds__(288, 4) pqCodeDistances(Tensor<float, 2, true> queries, int queriesPerBlock, Tensor<CentroidT, 2, true> coarseCentroids, Tensor<float, 3, true> pqCentroids, Tensor<int, 2, true> topQueryToCentroid, // (query id)(coarse)(subquantizer)(code) -> dist Tensor<OutCodeT, 4, true> outCodeDistances) { const auto numSubQuantizers = pqCentroids.getSize(0); const auto dimsPerSubQuantizer = pqCentroids.getSize(1); assert(DimsPerSubQuantizer == dimsPerSubQuantizer); const auto codesPerSubQuantizer = pqCentroids.getSize(2); bool isLoadingThread = threadIdx.x >= codesPerSubQuantizer; int loadingThreadId = threadIdx.x - codesPerSubQuantizer; extern __shared__ float smem[]; // Each thread calculates a single code float subQuantizerData[DimsPerSubQuantizer]; auto code = threadIdx.x; auto subQuantizer = blockIdx.y; // Each thread will load the pq centroid data for the code that it // is processing if(!isLoadingThread) { #pragma unroll for (int i = 0; i < DimsPerSubQuantizer; ++i) { subQuantizerData[i] = pqCentroids[subQuantizer][i][code].ldg(); } } // Where we store our query vector float* smemQuery = smem; // Where we store our residual vector; this is double buffered so we // can be loading the next one while processing the current one float* smemResidual1 = &smemQuery[DimsPerSubQuantizer]; float* smemResidual2 = &smemResidual1[DimsPerSubQuantizer]; // Where we pre-load the coarse centroid IDs int* coarseIds = (int*) &smemResidual2[DimsPerSubQuantizer]; // Each thread is calculating the distance for a single code, // performing the reductions locally // Handle multiple queries per block auto startQueryId = blockIdx.x * queriesPerBlock; auto numQueries = queries.getSize(0) - startQueryId; if (numQueries > queriesPerBlock) { numQueries = queriesPerBlock; } for (int query = 0; query < numQueries; ++query) { auto queryId = startQueryId + query; auto querySubQuantizer = queries[queryId][subQuantizer * DimsPerSubQuantizer].data(); // Load current query vector for (int i = threadIdx.x; i < DimsPerSubQuantizer; i += blockDim.x) { smemQuery[i] = querySubQuantizer[i]; } // Load list of coarse centroids found for (int i = threadIdx.x; i < topQueryToCentroid.getSize(1); i += blockDim.x) { coarseIds[i] = topQueryToCentroid[queryId][i]; } // We need coarseIds below // FIXME: investigate loading separately, so we don't need this __syncthreads(); // Preload first buffer of residual data if (isLoadingThread) { for (int i = loadingThreadId; i < DimsPerSubQuantizer; i += blockDim.x - codesPerSubQuantizer) { auto coarseId = coarseIds[0]; // In case NaNs were in the original query data coarseId = coarseId == -1 ? 0 : coarseId; auto coarseCentroidSubQuantizer = coarseCentroids[coarseId][subQuantizer * dimsPerSubQuantizer].data(); if (L2Distance) { smemResidual1[i] = smemQuery[i] - ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } else { smemResidual1[i] = ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } } } // The block walks the list for a single query for (int coarse = 0; coarse < topQueryToCentroid.getSize(1); ++coarse) { // Wait for smemResidual1 to be loaded __syncthreads(); if (isLoadingThread) { // Preload second buffer of residual data for (int i = loadingThreadId; i < DimsPerSubQuantizer; i += blockDim.x - codesPerSubQuantizer) { // FIXME: try always making this centroid id 0 so we can // terminate if (coarse != (topQueryToCentroid.getSize(1) - 1)) { auto coarseId = coarseIds[coarse + 1]; // In case NaNs were in the original query data coarseId = coarseId == -1 ? 0 : coarseId; auto coarseCentroidSubQuantizer = coarseCentroids[coarseId] [subQuantizer * dimsPerSubQuantizer].data(); if (L2Distance) { smemResidual2[i] = smemQuery[i] - ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } else { smemResidual2[i] = ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } } } } else { // These are the processing threads float dist = 0.0f; constexpr int kUnroll = 4; constexpr int kRemainder = DimsPerSubQuantizer % kUnroll; constexpr int kRemainderBase = DimsPerSubQuantizer - kRemainder; float vals[kUnroll]; // Calculate residual - pqCentroid for each dim that we're // processing // Unrolled loop if (L2Distance) { #pragma unroll for (int i = 0; i < DimsPerSubQuantizer / kUnroll; ++i) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] = smemResidual1[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] -= subQuantizerData[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] *= vals[j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { dist += vals[j]; } } } else { // Inner product: query slice against the reconstructed sub-quantizer // for this coarse cell (query o (centroid + subQCentroid)) #pragma unroll for (int i = 0; i < DimsPerSubQuantizer / kUnroll; ++i) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] = smemResidual1[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] += subQuantizerData[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] *= smemQuery[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { dist += vals[j]; } } } // Remainder loop if (L2Distance) { #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] = smemResidual1[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] -= subQuantizerData[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] *= vals[j]; } } else { // Inner product // Inner product: query slice against the reconstructed sub-quantizer // for this coarse cell (query o (centroid + subQCentroid)) #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] = smemResidual1[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] += subQuantizerData[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] *= smemQuery[kRemainderBase + j]; } } #pragma unroll for (int j = 0; j < kRemainder; ++j) { dist += vals[j]; } // We have the distance for our code; write it out outCodeDistances[queryId][coarse][subQuantizer][code] = ConvertTo<OutCodeT>::to(dist); } // !isLoadingThread // Swap residual buffers float* tmp = smemResidual1; smemResidual1 = smemResidual2; smemResidual2 = tmp; } } } template <typename CentroidT> __global__ void residualVector(Tensor<float, 2, true> queries, Tensor<CentroidT, 2, true> coarseCentroids, Tensor<int, 2, true> topQueryToCentroid, int numSubDim, // output is transposed: // (sub q)(query id)(centroid id)(sub dim) Tensor<float, 4, true> residual) { // block x is query id // block y is centroid id // thread x is dim auto queryId = blockIdx.x; auto centroidId = blockIdx.y; int realCentroidId = topQueryToCentroid[queryId][centroidId]; for (int dim = threadIdx.x; dim < queries.getSize(1); dim += blockDim.x) { float q = queries[queryId][dim]; float c = ConvertTo<float>::to(coarseCentroids[realCentroidId][dim]); residual[dim / numSubDim][queryId][centroidId][dim % numSubDim] = q - c; } } template <typename CentroidT> void runResidualVector(Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<int, 2, true>& topQueryToCentroid, Tensor<float, 4, true>& residual, cudaStream_t stream) { auto grid = dim3(topQueryToCentroid.getSize(0), topQueryToCentroid.getSize(1)); auto block = dim3(std::min(queries.getSize(1), getMaxThreadsCurrentDevice())); residualVector<<<grid, block, 0, stream>>>( queries, coarseCentroids, topQueryToCentroid, pqCentroids.getSize(1), residual); CUDA_TEST_ERROR(); } template <typename CentroidT> void runPQCodeDistancesMM(Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<int, 2, true>& topQueryToCentroid, NoTypeTensor<4, true>& outCodeDistances, bool useFloat16Lookup, DeviceMemory& mem, cublasHandle_t handle, cudaStream_t stream) { // Calculate (q - c) residual vector // (sub q)(query id)(centroid id)(sub dim) DeviceTensor<float, 4, true> residual( mem, {pqCentroids.getSize(0), topQueryToCentroid.getSize(0), topQueryToCentroid.getSize(1), pqCentroids.getSize(1)}, stream); runResidualVector(pqCentroids, queries, coarseCentroids, topQueryToCentroid, residual, stream); // Calculate ||q - c||^2 DeviceTensor<float, 1, true> residualNorms( mem, {pqCentroids.getSize(0) * topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1)}, stream); auto residualView2 = residual.view<2>( {pqCentroids.getSize(0) * topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), pqCentroids.getSize(1)}); runL2Norm(residualView2, true, residualNorms, true, stream); // Perform a batch MM: // (sub q) x {(q * c)(sub dim) x (sub dim)(code)} => // (sub q) x {(q * c)(code)} auto residualView3 = residual.view<3>( {pqCentroids.getSize(0), topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), pqCentroids.getSize(1)}); DeviceTensor<float, 3, true> residualDistance( mem, {pqCentroids.getSize(0), topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), pqCentroids.getSize(2)}, stream); runIteratedMatrixMult(residualDistance, false, residualView3, false, pqCentroids, false, -2.0f, 0.0f, handle, stream); // Sum ||q - c||^2 along rows auto residualDistanceView2 = residualDistance.view<2>( {pqCentroids.getSize(0) * topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), pqCentroids.getSize(2)}); runSumAlongRows(residualNorms, residualDistanceView2, false, stream); Tensor<float, 4, true> outCodeDistancesF; DeviceTensor<float, 4, true> outCodeDistancesFloatMem; if (useFloat16Lookup) { outCodeDistancesFloatMem = DeviceTensor<float, 4, true>( mem, {outCodeDistances.getSize(0), outCodeDistances.getSize(1), outCodeDistances.getSize(2), outCodeDistances.getSize(3)}, stream); outCodeDistancesF = outCodeDistancesFloatMem; } else { outCodeDistancesF = outCodeDistances.toTensor<float>(); } // Transpose -2(sub q)(q * c)(code) to -2(q * c)(sub q)(code) (which // is where we build our output distances) auto outCodeDistancesView = outCodeDistancesF.view<3>( {topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), outCodeDistances.getSize(2), outCodeDistances.getSize(3)}); runTransposeAny(residualDistance, 0, 1, outCodeDistancesView, stream); // Calculate code norms per each sub-dim // (sub q)(sub dim)(code) is pqCentroids // transpose to (sub q)(code)(sub dim) DeviceTensor<float, 3, true> pqCentroidsTranspose( mem, {pqCentroids.getSize(0), pqCentroids.getSize(2), pqCentroids.getSize(1)}, stream); runTransposeAny(pqCentroids, 1, 2, pqCentroidsTranspose, stream); auto pqCentroidsTransposeView = pqCentroidsTranspose.view<2>( {pqCentroids.getSize(0) * pqCentroids.getSize(2), pqCentroids.getSize(1)}); DeviceTensor<float, 1, true> pqCentroidsNorm( mem, {pqCentroids.getSize(0) * pqCentroids.getSize(2)}, stream); runL2Norm(pqCentroidsTransposeView, true, pqCentroidsNorm, true, stream); // View output as (q * c)(sub q * code), and add centroid norm to // each row auto outDistancesCodeViewCols = outCodeDistancesView.view<2>( {topQueryToCentroid.getSize(0) * topQueryToCentroid.getSize(1), outCodeDistances.getSize(2) * outCodeDistances.getSize(3)}); runSumAlongColumns(pqCentroidsNorm, outDistancesCodeViewCols, stream); #ifdef FAISS_USE_FLOAT16 if (useFloat16Lookup) { // Need to convert back auto outCodeDistancesH = outCodeDistances.toTensor<half>(); convertTensor<float, half, 4>(stream, outCodeDistancesF, outCodeDistancesH); } #endif } template <typename CentroidT> void runPQCodeDistances(Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<int, 2, true>& topQueryToCentroid, NoTypeTensor<4, true>& outCodeDistances, bool l2Distance, bool useFloat16Lookup, cudaStream_t stream) { const auto numSubQuantizers = pqCentroids.getSize(0); const auto dimsPerSubQuantizer = pqCentroids.getSize(1); const auto codesPerSubQuantizer = pqCentroids.getSize(2); // FIXME: tune // Reuse of pq centroid data is based on both # of queries * nprobe, // and we should really be tiling in both dimensions constexpr int kQueriesPerBlock = 8; auto grid = dim3(utils::divUp(queries.getSize(0), kQueriesPerBlock), numSubQuantizers); // Reserve one block of threads for double buffering // FIXME: probably impractical for large # of dims? auto loadingThreads = utils::roundUp(dimsPerSubQuantizer, kWarpSize); auto block = dim3(codesPerSubQuantizer + loadingThreads); auto smem = (3 * dimsPerSubQuantizer) * sizeof(float) + topQueryToCentroid.getSize(1) * sizeof(int); #ifdef FAISS_USE_FLOAT16 #define RUN_CODE(DIMS, L2) \ do { \ if (useFloat16Lookup) { \ auto outCodeDistancesT = outCodeDistances.toTensor<half>(); \ \ pqCodeDistances<half, CentroidT, DIMS, L2><<<grid, block, smem, stream>>>( \ queries, kQueriesPerBlock, \ coarseCentroids, pqCentroids, \ topQueryToCentroid, outCodeDistancesT); \ } else { \ auto outCodeDistancesT = outCodeDistances.toTensor<float>(); \ \ pqCodeDistances<float, CentroidT, DIMS, L2><<<grid, block, smem, stream>>>( \ queries, kQueriesPerBlock, \ coarseCentroids, pqCentroids, \ topQueryToCentroid, outCodeDistancesT); \ } \ } while (0) #else #define RUN_CODE(DIMS, L2) \ do { \ auto outCodeDistancesT = outCodeDistances.toTensor<float>(); \ pqCodeDistances<float, CentroidT, DIMS, L2><<<grid, block, smem, stream>>>( \ queries, kQueriesPerBlock, \ coarseCentroids, pqCentroids, \ topQueryToCentroid, outCodeDistancesT); \ } while (0) #endif #define CODE_L2(DIMS) \ do { \ if (l2Distance) { \ RUN_CODE(DIMS, true); \ } else { \ RUN_CODE(DIMS, false); \ } \ } while (0) switch (dimsPerSubQuantizer) { case 1: CODE_L2(1); break; case 2: CODE_L2(2); break; case 3: CODE_L2(3); break; case 4: CODE_L2(4); break; case 6: CODE_L2(6); break; case 8: CODE_L2(8); break; case 10: CODE_L2(10); break; case 12: CODE_L2(12); break; case 16: CODE_L2(16); break; case 20: CODE_L2(20); break; case 24: CODE_L2(24); break; case 28: CODE_L2(28); break; case 32: CODE_L2(32); break; // FIXME: larger sizes require too many registers - we need the // MM implementation working default: FAISS_THROW_MSG("Too many dimensions (>32) per subquantizer " "not currently supported"); } #undef RUN_CODE #undef CODE_L2 CUDA_TEST_ERROR(); } } } // namespace
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/limits.hpp" #include "opencv2/core/cuda/vec_distance.hpp" #include "opencv2/core/cuda/datamov_utils.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" namespace cv { namespace cuda { namespace device { namespace bf_knnmatch { /////////////////////////////////////////////////////////////////////////////// // Reduction template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, float* s_distance, int* s_trainIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); float d1, d2; int i1, i2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; #endif } template <int BLOCK_SIZE> __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2, float* s_distance, int* s_trainIdx, int* s_imgIdx) { #if __CUDA_ARCH__ >= 300 CV_UNUSED(s_distance); CV_UNUSED(s_trainIdx); CV_UNUSED(s_imgIdx); float d1, d2; int i1, i2; int j1, j2; #pragma unroll for (int i = BLOCK_SIZE / 2; i >= 1; i /= 2) { d1 = shfl_down(bestDistance1, i, BLOCK_SIZE); d2 = shfl_down(bestDistance2, i, BLOCK_SIZE); i1 = shfl_down(bestTrainIdx1, i, BLOCK_SIZE); i2 = shfl_down(bestTrainIdx2, i, BLOCK_SIZE); j1 = shfl_down(bestImgIdx1, i, BLOCK_SIZE); j2 = shfl_down(bestImgIdx2, i, BLOCK_SIZE); if (bestDistance1 < d1) { if (d1 < bestDistance2) { bestDistance2 = d1; bestTrainIdx2 = i1; bestImgIdx2 = j1; } } else { bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx2 = bestImgIdx1; bestDistance1 = d1; bestTrainIdx1 = i1; bestImgIdx1 = j1; if (d2 < bestDistance2) { bestDistance2 = d2; bestTrainIdx2 = i2; bestImgIdx2 = j2; } } } #else float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; s_distance += threadIdx.y * BLOCK_SIZE; s_trainIdx += threadIdx.y * BLOCK_SIZE; s_imgIdx += threadIdx.y * BLOCK_SIZE; s_distance[threadIdx.x] = bestDistance1; s_trainIdx[threadIdx.x] = bestTrainIdx1; s_imgIdx[threadIdx.x] = bestImgIdx1; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance1) { myBestDistance2 = myBestDistance1; myBestTrainIdx2 = myBestTrainIdx1; myBestImgIdx2 = myBestImgIdx1; myBestDistance1 = val; myBestTrainIdx1 = s_trainIdx[i]; myBestImgIdx1 = s_imgIdx[i]; } else if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } __syncthreads(); s_distance[threadIdx.x] = bestDistance2; s_trainIdx[threadIdx.x] = bestTrainIdx2; s_imgIdx[threadIdx.x] = bestImgIdx2; __syncthreads(); if (threadIdx.x == 0) { #pragma unroll for (int i = 0; i < BLOCK_SIZE; ++i) { float val = s_distance[i]; if (val < myBestDistance2) { myBestDistance2 = val; myBestTrainIdx2 = s_trainIdx[i]; myBestImgIdx2 = s_imgIdx[i]; } } } bestDistance1 = myBestDistance1; bestDistance2 = myBestDistance2; bestTrainIdx1 = myBestTrainIdx1; bestTrainIdx2 = myBestTrainIdx2; bestImgIdx1 = myBestImgIdx1; bestImgIdx2 = myBestImgIdx2; #endif } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled Cached template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> __device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query) { #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < train.cols) { T val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN); loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match Unrolled template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Match template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __device__ void loop(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask, typename Dist::value_type* s_query, typename Dist::value_type* s_train, float& bestDistance1, float& bestDistance2, int& bestTrainIdx1, int& bestTrainIdx2, int& bestImgIdx1, int& bestImgIdx2) { for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t) { Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; if (loadX < query.cols) { T val; ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val); s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val; ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val); s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } typename Dist::result_type distVal = dist; const int trainIdx = t * BLOCK_SIZE + threadIdx.x; if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx)) { if (distVal < bestDistance1) { bestImgIdx2 = bestImgIdx1; bestDistance2 = bestDistance1; bestTrainIdx2 = bestTrainIdx1; bestImgIdx1 = imgIdx; bestDistance1 = distVal; bestTrainIdx1 = trainIdx; } else if (distVal < bestDistance2) { bestImgIdx2 = imgIdx; bestDistance2 = distVal; bestTrainIdx2 = trainIdx; } } } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2); __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance) { extern __shared__ int smem[]; const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); float myBestDistance1 = numeric_limits<float>::max(); float myBestDistance2 = numeric_limits<float>::max(); int myBestTrainIdx1 = -1; int myBestTrainIdx2 = -1; int myBestImgIdx1 = -1; int myBestImgIdx2 = -1; Mask m = mask; for (int imgIdx = 0; imgIdx < n; ++imgIdx) { const PtrStepSz<T> train = trains[imgIdx]; m.next(); loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2); } __syncthreads(); float* s_distance = (float*)(smem); int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE); int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE); findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx); if (queryIdx < query.rows && threadIdx.x == 0) { bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2); bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2); bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2); } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // knnMatch 2 dispatcher template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } template <typename Dist, typename T, typename Mask> void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (query.cols <= 64) { matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 128) { matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } /*else if (query.cols <= 256) { matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 512) { matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } else if (query.cols <= 1024) { matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); }*/ else { match<16, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream); } } /////////////////////////////////////////////////////////////////////////////// // Calc distance kernel template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> __global__ void calcDistanceUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; #pragma unroll for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> void calcDistanceUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> __global__ void calcDistance(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist) { extern __shared__ int smem[]; const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y; const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x; typename Dist::value_type* s_query = (typename Dist::value_type*)(smem); typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE); Dist dist; for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i) { const int loadX = threadIdx.x + i * BLOCK_SIZE; if (loadX < query.cols) { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX]; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX]; } else { s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0; s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0; } __syncthreads(); #pragma unroll for (int j = 0; j < BLOCK_SIZE; ++j) dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]); __syncthreads(); } if (queryIdx < query.rows && trainIdx < train.rows) { float distVal = numeric_limits<float>::max(); if (mask(queryIdx, trainIdx)) distVal = (typename Dist::result_type)dist; allDist.ptr(queryIdx)[trainIdx] = distVal; } } template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> void calcDistance(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, BLOCK_SIZE); const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE)); const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int); calcDistance<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////////////////////// // Calc Distance dispatcher template <typename Dist, typename T, typename Mask> void calcDistanceDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream) { if (query.cols <= 64) { calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 128) { calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream); } /*else if (query.cols <= 256) { calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 512) { calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream); } else if (query.cols <= 1024) { calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream); }*/ else { calcDistance<16, Dist>(query, train, mask, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // find knn match kernel template <int BLOCK_SIZE> __global__ void findBestMatch(PtrStepSzf allDist, int i, PtrStepi trainIdx, PtrStepf distance) { const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64; __shared__ float s_dist[SMEM_SIZE]; __shared__ int s_trainIdx[SMEM_SIZE]; const int queryIdx = blockIdx.x; float* allDistRow = allDist.ptr(queryIdx); float dist = numeric_limits<float>::max(); int bestIdx = -1; for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE) { float reg = allDistRow[i]; if (reg < dist) { dist = reg; bestIdx = i; } } s_dist[threadIdx.x] = dist; s_trainIdx[threadIdx.x] = bestIdx; __syncthreads(); reduceKeyVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<float>()); if (threadIdx.x == 0) { if (dist < numeric_limits<float>::max()) { allDistRow[bestIdx] = numeric_limits<float>::max(); trainIdx.ptr(queryIdx)[i] = bestIdx; distance.ptr(queryIdx)[i] = dist; } } } template <int BLOCK_SIZE> void findKnnMatch(int k, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSzf& allDist, cudaStream_t stream) { const dim3 block(BLOCK_SIZE, 1, 1); const dim3 grid(trainIdx.rows, 1, 1); for (int i = 0; i < k; ++i) { findBestMatch<BLOCK_SIZE><<<grid, block, 0, stream>>>(allDist, i, trainIdx, distance); cudaSafeCall( cudaGetLastError() ); } if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void findKnnMatchDispatcher(int k, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { findKnnMatch<256>(k, static_cast<PtrStepSzi>(trainIdx), static_cast<PtrStepSzf>(distance), allDist, stream); } /////////////////////////////////////////////////////////////////////////////// // knn match Dispatcher template <typename Dist, typename T, typename Mask> void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, int k, const Mask& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (k == 2) { match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, stream); } else { calcDistanceDispatcher<Dist>(query, train, mask, allDist, stream); findKnnMatchDispatcher(k, trainIdx, distance, allDist, stream); } } /////////////////////////////////////////////////////////////////////////////// // knn match caller template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } //template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream) { if (mask.data) matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, stream); else matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, stream); } template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); //template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, cudaStream_t stream); template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2L1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } //template void match2L2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2L2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2L2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream) { if (masks.data) match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, stream); else match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, stream); } template void match2Hamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2Hamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2Hamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); //template void match2Hamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); template void match2Hamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, cudaStream_t stream); } // namespace bf_knnmatch }}} // namespace cv { namespace cuda { namespace cudev { #endif /* CUDA_DISABLER */
the_stack
#include "super_scaler.h" #include <cstdint> #include <cstdio> #include <cstdlib> #include <mutex> #include <queue> #include <thread> #include "mpi.h" #include "nccl.h" #include "unistd.h" #define NNSCALER_MPICHECK(cmd) \ do \ { \ int e = cmd; \ if (e != MPI_SUCCESS) \ { \ printf("Failed: MPI error %s:%d '%d'\n", __FILE__, __LINE__, e); \ exit(EXIT_FAILURE); \ } \ } while (0) #define NNSCALER_CUDACHECK(cmd) \ do \ { \ cudaError_t e = cmd; \ if (e != cudaSuccess) \ { \ printf("Failed: Cuda error %s:%d '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } while (0) #define NNSCALER_NCCLCHECK(cmd) \ do \ { \ ncclResult_t r = cmd; \ if (r != ncclSuccess) \ { \ printf("Failed, NCCL error %s:%d '%s'\n", __FILE__, __LINE__, ncclGetErrorString(r)); \ exit(EXIT_FAILURE); \ } \ } while (0) class TaskQueue { public: TaskQueue() { task_enable = true; monitor = std::thread(&TaskQueue::run, this); } void push(std::thread&& thread) { mtx.lock(); threads.push(move(thread)); mtx.unlock(); } bool is_empty() { bool isempty; mtx.lock(); isempty = threads.empty(); mtx.unlock(); return isempty; } void run() { while (task_enable) { if (!is_empty()) { mtx.lock(); auto& task = threads.front(); mtx.unlock(); task.join(); threads.pop(); } else { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } } } void start() { monitor.detach(); } void end() { task_enable = false; } private: std::queue<std::thread> threads; std::mutex mtx; std::thread monitor; bool task_enable; } task_queue; // Context of super_scaler instance static int super_scaler_myRank = -1; static int super_scaler_nRanks = -1; static int super_scaler_localRank = -1; static int nDev = 1; static cudaStream_t* stream; static ncclUniqueId id; static ncclComm_t* comm; static uint32_t align_to_block_size(uint32_t threads, uint32_t block_size) { if (threads > (1u << 31) - 1) { throw std::runtime_error("Cuda can't handle threads > 2^31 - 1."); } uint32_t r = (threads + block_size - 1) / block_size; return r; } static uint64_t super_scaler_getHostHash(const char* string) { // Based on DJB2, result = result * 33 + char uint64_t result = 5381; for (int c = 0; string[c] != '\0'; c++) { result = ((result << 5) + result) + string[c]; } return result; } static void super_scaler_getHostName(char* hostname, int maxlen) { gethostname(hostname, maxlen); for (int i = 0; i < maxlen; i++) { if (hostname[i] == '.') { hostname[i] = '\0'; return; } } } __global__ static void gradientsAverage(float* gradients, int size, int super_scaler_nRanks) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { gradients[tid] /= super_scaler_nRanks; } } void super_scaler_initialization() { //initializing MPI NNSCALER_MPICHECK(MPI_Init(NULL, NULL)); NNSCALER_MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &super_scaler_myRank)); NNSCALER_MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &super_scaler_nRanks)); //calculating super_scaler_localRank which is used in selecting a GPU uint64_t hostHashs[super_scaler_nRanks]; char hostname[1024]; super_scaler_getHostName(hostname, 1024); hostHashs[super_scaler_myRank] = super_scaler_getHostHash(hostname); NNSCALER_MPICHECK(MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, hostHashs, sizeof(uint64_t), MPI_BYTE, MPI_COMM_WORLD)); //printf("[SuperScaler:Info] TotalRank = %d; MyRank = %d; HostName = %s; HostHash = %u;\n", // super_scaler_nRanks, super_scaler_myRank, hostname, hostHashs[super_scaler_myRank]); auto dev_no = getenv("NNFUSION_DEV_NO"); if(dev_no == nullptr) { super_scaler_localRank = 0; for (int p = 0; p < super_scaler_nRanks; p++) { if (p == super_scaler_myRank) { break; } if (hostHashs[p] == hostHashs[super_scaler_myRank]) { super_scaler_localRank++; } } } else { super_scaler_localRank = atoi(dev_no); } int dev_cnt; cudaGetDeviceCount(&dev_cnt); if(super_scaler_localRank >= dev_cnt) { // printf("[SuperScaler:Warning] localRand is more than device count %d >= %d.\n", // super_scaler_localRank, dev_cnt); super_scaler_localRank = 0; } // printf("[SuperScaler:Info] Choose device: %d.\n", super_scaler_localRank); NNSCALER_CUDACHECK(cudaSetDevice(super_scaler_localRank)); // Stream per device; stream = new cudaStream_t; NNSCALER_CUDACHECK(cudaStreamCreate(stream)); //generating NCCL unique ID at one process and broadcasting it to all if (super_scaler_myRank == 0) ncclGetUniqueId(&id); NNSCALER_MPICHECK(MPI_Bcast((void*)&id, sizeof(id), MPI_BYTE, 0, MPI_COMM_WORLD)); //initializing NCCL, group API is required around ncclCommInitRank as it is called across multiple GPUs in each thread/process comm = new ncclComm_t; NNSCALER_NCCLCHECK(ncclGroupStart()); NNSCALER_NCCLCHECK(ncclCommInitRank(comm, super_scaler_nRanks, id, super_scaler_myRank)); NNSCALER_NCCLCHECK(ncclGroupEnd()); //todo: how to know how many processes issued and sync? task_queue.start(); } void super_scaler_finalization() { task_queue.end(); MPI_Barrier(MPI_COMM_WORLD); //finalizing NCCL NNSCALER_NCCLCHECK(ncclCommDestroy(*comm)); NNSCALER_CUDACHECK(cudaStreamDestroy(*stream)); //finalizing MPI NNSCALER_MPICHECK(MPI_Finalize()); delete comm; delete stream; } void super_scaler_sync() { while (!task_queue.is_empty()) std::this_thread::sleep_for(std::chrono::milliseconds(10)); MPI_Barrier(MPI_COMM_WORLD); } void super_scaler_all_reduce(float* gradients, float* out_gradients, int size, void* exestream, void (*callback)(void*), void* callback_context) { cudaStream_t* run_on_stream = exestream==nullptr?stream:(cudaStream_t*)exestream; //get gradients after allreduce if(super_scaler_nRanks>1) { uint32_t block_size_x = 512; size_t block_cnt = align_to_block_size(size, block_size_x); //todo: Could this change device for main thread? gradientsAverage<<<block_cnt, block_size_x, 0, *run_on_stream>>>( gradients, size, super_scaler_nRanks); } //calling NCCL communication API. Group API is required when using multiple devices per thread/process NNSCALER_NCCLCHECK(ncclGroupStart()); NNSCALER_NCCLCHECK(ncclAllReduce((const void*)gradients, (void*)out_gradients, size, ncclFloat, ncclSum, *comm, *run_on_stream)); NNSCALER_NCCLCHECK(ncclGroupEnd()); //call back if (callback != nullptr) (*callback)(callback_context); } /* void super_scaler_all_reduce_async(float* gradients, float* out_gradients, int size, cudaStream_t* exestream, void (*callback)(void*), void* callback_context) { std::thread NNSCALER_allreduce( super_scaler_all_reduce, gradients, out_gradients, size, exestream, callback, callback_context); task_queue.push(move(NNSCALER_allreduce)); } */ int super_scaler_get_localrank() { return super_scaler_localRank; }
the_stack
* \file dnn/src/cuda/cutlass/util.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) #include <complex> #include <iosfwd> #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wreorder" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wunused-parameter" #include "cutlass/complex.h" #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/numeric_types.h" #include "src/cuda/cutlass/util.h" #pragma GCC diagnostic pop ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; Provider enumerant; } Provider_enumerants[] = { {"none", "None", Provider::kNone}, {"cutlass", "CUTLASS", Provider::kCUTLASS}, {"host", "reference_host", Provider::kReferenceHost}, {"device", "reference_device", Provider::kReferenceDevice}, {"cublas", "cuBLAS", Provider::kCUBLAS}, {"cudnn", "cuDNN", Provider::kCUDNN}, }; /// Converts a Provider enumerant to a string char const* to_string(Provider provider, bool pretty) { for (auto const& possible : Provider_enumerants) { if (provider == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a Provider enumerant from a string template <> Provider from_string<Provider>(std::string const& str) { for (auto const& possible : Provider_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return Provider::kInvalid; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; GemmKind enumerant; } GemmKind_enumerants[] = { {"gemm", "<Gemm>", GemmKind::kGemm}, {"spgemm", "<Sparse>", GemmKind::kSparse}, {"universal", "<Universal>", GemmKind::kUniversal}, {"planar_complex", "<PlanarComplex>", GemmKind::kPlanarComplex}, {"planar_complex_array", "<PlanarComplexArray>", GemmKind::kPlanarComplexArray}, }; /// Converts a GemmKind enumerant to a string char const* to_string(GemmKind type, bool pretty) { for (auto const& possible : GemmKind_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; OperationKind enumerant; } OperationKind_enumerants[] = { {"eq_gemm", "EqGemm", OperationKind::kEqGemm}, {"gemm", "Gemm", OperationKind::kGemm}, {"conv2d", "Conv2d", OperationKind::kConv2d}, {"conv3d", "Conv3d", OperationKind::kConv3d}, {"spgemm", "SparseGemm", OperationKind::kSparseGemm}, }; /// Converts a Status enumerant to a string char const* to_string(OperationKind enumerant, bool pretty) { for (auto const& possible : OperationKind_enumerants) { if (enumerant == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a Status enumerant from a string template <> OperationKind from_string<OperationKind>(std::string const& str) { for (auto const& possible : OperationKind_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return OperationKind::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; Status enumerant; } Status_enumerants[] = { {"success", "Success", Status::kSuccess}, {"misaligned_operand", "Error: misaligned operand", Status::kErrorMisalignedOperand}, {"invalid_problem", "Error: invalid problem", Status::kErrorInvalidProblem}, {"not_supported", "Error: not supported", Status::kErrorNotSupported}, {"internal", "Error: internal", Status::kErrorInternal}}; /// Converts a Status enumerant to a string char const* to_string(Status status, bool pretty) { for (auto const& possible : Status_enumerants) { if (status == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a Status enumerant from a string template <> Status from_string<Status>(std::string const& str) { for (auto const& possible : Status_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return Status::kInvalid; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; NumericTypeID enumerant; } NumericTypeID_enumerants[] = { {"unknown", "<unkown>", NumericTypeID::kUnknown}, {"void", "Void", NumericTypeID::kVoid}, {"b1", "B1", NumericTypeID::kB1}, {"u2", "U2", NumericTypeID::kU2}, {"u4", "U4", NumericTypeID::kU4}, {"u8", "U8", NumericTypeID::kU8}, {"u16", "U16", NumericTypeID::kU16}, {"u32", "U32", NumericTypeID::kU32}, {"u64", "U64", NumericTypeID::kU64}, {"s2", "S2", NumericTypeID::kS2}, {"s4", "S4", NumericTypeID::kS4}, {"s8", "S8", NumericTypeID::kS8}, {"s16", "S16", NumericTypeID::kS16}, {"s32", "S32", NumericTypeID::kS32}, {"s64", "S64", NumericTypeID::kS64}, {"f16", "F16", NumericTypeID::kF16}, {"bf16", "BF16", NumericTypeID::kBF16}, {"f32", "F32", NumericTypeID::kF32}, {"tf32", "TF32", NumericTypeID::kTF32}, {"f64", "F64", NumericTypeID::kF64}, {"cf16", "CF16", NumericTypeID::kCF16}, {"cbf16", "CBF16", NumericTypeID::kCBF16}, {"cf32", "CF32", NumericTypeID::kCF32}, {"ctf32", "CTF32", NumericTypeID::kCTF32}, {"cf64", "CF64", NumericTypeID::kCF64}, {"cu2", "CU2", NumericTypeID::kCU2}, {"cu4", "CU4", NumericTypeID::kCU4}, {"cu8", "CU8", NumericTypeID::kCU8}, {"cu16", "CU16", NumericTypeID::kCU16}, {"cu32", "CU32", NumericTypeID::kCU32}, {"cu64", "CU64", NumericTypeID::kCU64}, {"cs2", "CS2", NumericTypeID::kCS2}, {"cs4", "CS4", NumericTypeID::kCS4}, {"cs8", "CS8", NumericTypeID::kCS8}, {"cs16", "CS16", NumericTypeID::kCS16}, {"cs32", "CS32", NumericTypeID::kCS32}, {"cs64", "CS64", NumericTypeID::kCS64}, {"*", "<unkown/enumerate all>", NumericTypeID::kUnknown}}; /// Converts a NumericTypeID enumerant to a string char const* to_string(NumericTypeID type, bool pretty) { for (auto const& possible : NumericTypeID_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Parses a NumericTypeID enumerant from a string template <> NumericTypeID from_string<NumericTypeID>(std::string const& str) { for (auto const& possible : NumericTypeID_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return NumericTypeID::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns the size of a data type in bits int sizeof_bits(NumericTypeID type) { switch (type) { case NumericTypeID::kF16: return 16; case NumericTypeID::kBF16: return 16; case NumericTypeID::kTF32: return 32; case NumericTypeID::kF32: return 32; case NumericTypeID::kF64: return 64; case NumericTypeID::kCF16: return 32; case NumericTypeID::kCBF16: return 32; case NumericTypeID::kCF32: return 64; case NumericTypeID::kCTF32: return 64; case NumericTypeID::kCF64: return 128; case NumericTypeID::kS2: return 2; case NumericTypeID::kS4: return 4; case NumericTypeID::kS8: return 8; case NumericTypeID::kS16: return 16; case NumericTypeID::kS32: return 32; case NumericTypeID::kS64: return 64; case NumericTypeID::kU2: return 2; case NumericTypeID::kU4: return 4; case NumericTypeID::kU8: return 8; case NumericTypeID::kU16: return 16; case NumericTypeID::kU32: return 32; case NumericTypeID::kU64: return 64; case NumericTypeID::kB1: return 1; default: break; } return 0; } /// Returns true if the numeric type is a complex data type or false if /// real-valued. bool is_complex_type(NumericTypeID type) { switch (type) { case NumericTypeID::kCF16: return true; case NumericTypeID::kCF32: return true; case NumericTypeID::kCF64: return true; case NumericTypeID::kCBF16: return true; case NumericTypeID::kCTF32: return true; default: break; } return false; } /// Returns the field underlying a complex valued type NumericTypeID get_real_type(NumericTypeID type) { switch (type) { case NumericTypeID::kCF16: return NumericTypeID::kF16; case NumericTypeID::kCF32: return NumericTypeID::kF32; case NumericTypeID::kCF64: return NumericTypeID::kF64; case NumericTypeID::kCBF16: return NumericTypeID::kBF16; case NumericTypeID::kCTF32: return NumericTypeID::kTF32; default: break; } return type; } /// Returns true if numeric type is integer bool is_integer_type(NumericTypeID type) { switch (type) { case NumericTypeID::kS2: return true; case NumericTypeID::kS4: return true; case NumericTypeID::kS8: return true; case NumericTypeID::kS16: return true; case NumericTypeID::kS32: return true; case NumericTypeID::kS64: return true; case NumericTypeID::kU2: return true; case NumericTypeID::kU4: return true; case NumericTypeID::kU8: return true; case NumericTypeID::kU16: return true; case NumericTypeID::kU32: return true; case NumericTypeID::kU64: return true; default: break; } return false; } /// Returns true if numeric type is signed bool is_signed_type(NumericTypeID type) { switch (type) { case NumericTypeID::kF16: return true; case NumericTypeID::kBF16: return true; case NumericTypeID::kTF32: return true; case NumericTypeID::kF32: return true; case NumericTypeID::kF64: return true; case NumericTypeID::kS2: return true; case NumericTypeID::kS4: return true; case NumericTypeID::kS8: return true; case NumericTypeID::kS16: return true; case NumericTypeID::kS32: return true; case NumericTypeID::kS64: return true; default: break; } return false; } /// Returns true if numeric type is a signed integer bool is_signed_integer(NumericTypeID type) { return is_integer_type(type) && is_signed_type(type); } /// returns true if numeric type is an unsigned integer bool is_unsigned_integer(NumericTypeID type) { return is_integer_type(type) && !is_signed_type(type); } /// Returns true if numeric type is floating-point type bool is_float_type(NumericTypeID type) { switch (type) { case NumericTypeID::kF16: return true; case NumericTypeID::kBF16: return true; case NumericTypeID::kTF32: return true; case NumericTypeID::kF32: return true; case NumericTypeID::kF64: return true; case NumericTypeID::kCF16: return true; case NumericTypeID::kCBF16: return true; case NumericTypeID::kCTF32: return true; case NumericTypeID::kCF32: return true; case NumericTypeID::kCF64: return true; default: break; } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { LayoutTypeID layout; char const* alias; } layout_aliases[] = { {LayoutTypeID::kUnknown, "unknown"}, {LayoutTypeID::kRowMajor, "row"}, {LayoutTypeID::kRowMajor, "t"}, {LayoutTypeID::kColumnMajor, "column"}, {LayoutTypeID::kColumnMajor, "col"}, {LayoutTypeID::kColumnMajor, "n"}, {LayoutTypeID::kColumnMajorInterleavedK2, "nk2"}, {LayoutTypeID::kRowMajorInterleavedK2, "tk2"}, {LayoutTypeID::kColumnMajorInterleavedK4, "nk4"}, {LayoutTypeID::kRowMajorInterleavedK4, "tk4"}, {LayoutTypeID::kColumnMajorInterleavedK16, "nk16"}, {LayoutTypeID::kRowMajorInterleavedK16, "tk16"}, {LayoutTypeID::kColumnMajorInterleavedK32, "nk32"}, {LayoutTypeID::kRowMajorInterleavedK32, "tk32"}, {LayoutTypeID::kColumnMajorInterleavedK64, "nk64"}, {LayoutTypeID::kRowMajorInterleavedK64, "tk64"}, {LayoutTypeID::kTensorNCHW, "nchw"}, {LayoutTypeID::kTensorNCDHW, "ncdhw"}, {LayoutTypeID::kTensorNHWC, "nhwc"}, {LayoutTypeID::kTensorNDHWC, "ndhwc"}, {LayoutTypeID::kTensorNC4HW4, "nc4hw4"}, {LayoutTypeID::kTensorNC8HW8, "nc8hw8"}, {LayoutTypeID::kTensorNC16HW16, "nc16hw16"}, {LayoutTypeID::kTensorNC32HW32, "nc32hw32"}, {LayoutTypeID::kTensorNC64HW64, "nc64hw64"}, {LayoutTypeID::kTensorC4RSK4, "c4rsk4"}, {LayoutTypeID::kTensorC8RSK8, "c8rsk8"}, {LayoutTypeID::kTensorC16RSK16, "c16rsk16"}, {LayoutTypeID::kTensorC32RSK32, "c32rsk32"}, {LayoutTypeID::kTensorC64RSK64, "c64rsk64"}, {LayoutTypeID::kTensorK4RSC4, "k4rsc4"}, {LayoutTypeID::kTensorCK4RS4, "ck4rs4"}, {LayoutTypeID::kTensorCK8RS8, "ck8rs8"}, {LayoutTypeID::kTensorCK16RS16, "ck16rs16"}, {LayoutTypeID::kUnknown, "*"}, {LayoutTypeID::kInvalid, nullptr}}; /// Converts a LayoutTypeID enumerant to a string char const* to_string(LayoutTypeID layout, bool pretty) { for (auto const& alias : layout_aliases) { if (alias.layout == layout) { return alias.alias; } } return pretty ? "Invalid" : "invalid"; } /// Parses a LayoutTypeID enumerant from a string template <> LayoutTypeID from_string<LayoutTypeID>(std::string const& str) { for (auto const& alias : layout_aliases) { if (str.compare(alias.alias) == 0) { return alias.layout; } } return LayoutTypeID::kInvalid; } /// Gets stride rank for the layout_id (static function) int get_layout_stride_rank(LayoutTypeID layout_id) { switch (layout_id) { case LayoutTypeID::kColumnMajor: return cutlass::layout::ColumnMajor::kStrideRank; case LayoutTypeID::kRowMajor: return cutlass::layout::RowMajor::kStrideRank; case LayoutTypeID::kColumnMajorInterleavedK2: return cutlass::layout::ColumnMajorInterleaved<2>::kStrideRank; case LayoutTypeID::kRowMajorInterleavedK2: return cutlass::layout::RowMajorInterleaved<2>::kStrideRank; case LayoutTypeID::kColumnMajorInterleavedK4: return cutlass::layout::ColumnMajorInterleaved<4>::kStrideRank; case LayoutTypeID::kRowMajorInterleavedK4: return cutlass::layout::RowMajorInterleaved<4>::kStrideRank; case LayoutTypeID::kColumnMajorInterleavedK16: return cutlass::layout::ColumnMajorInterleaved<16>::kStrideRank; case LayoutTypeID::kRowMajorInterleavedK16: return cutlass::layout::RowMajorInterleaved<16>::kStrideRank; case LayoutTypeID::kColumnMajorInterleavedK32: return cutlass::layout::ColumnMajorInterleaved<32>::kStrideRank; case LayoutTypeID::kRowMajorInterleavedK32: return cutlass::layout::RowMajorInterleaved<32>::kStrideRank; case LayoutTypeID::kColumnMajorInterleavedK64: return cutlass::layout::ColumnMajorInterleaved<64>::kStrideRank; case LayoutTypeID::kRowMajorInterleavedK64: return cutlass::layout::RowMajorInterleaved<64>::kStrideRank; case LayoutTypeID::kTensorNCHW: return cutlass::layout::TensorNCHW::kStrideRank; case LayoutTypeID::kTensorNHWC: return cutlass::layout::TensorNHWC::kStrideRank; case LayoutTypeID::kTensorNDHWC: return cutlass::layout::TensorNDHWC::kStrideRank; case LayoutTypeID::kTensorNC32HW32: return cutlass::layout::TensorNCxHWx<32>::kStrideRank; case LayoutTypeID::kTensorNC64HW64: return cutlass::layout::TensorNCxHWx<64>::kStrideRank; case LayoutTypeID::kTensorC32RSK32: return cutlass::layout::TensorCxRSKx<32>::kStrideRank; case LayoutTypeID::kTensorC64RSK64: return cutlass::layout::TensorCxRSKx<64>::kStrideRank; default: throw std::runtime_error( "Unsupported LayoutTypeID in LayoutType::get_stride_rank"); } } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; OpcodeClassID enumerant; } OpcodeClassID_enumerants[] = { {"simt", "<simt>", OpcodeClassID::kSimt}, {"tensorop", "<tensorop>", OpcodeClassID::kTensorOp}, {"wmmatensorop", "<wmmatensorop>", OpcodeClassID::kWmmaTensorOp}, {"wmma", "<wmma>", OpcodeClassID::kWmmaTensorOp}, }; /// Converts a OpcodeClassID enumerant to a string char const* to_string(OpcodeClassID type, bool pretty) { for (auto const& possible : OpcodeClassID_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a OpcodeClassID enumerant from a string template <> OpcodeClassID from_string<OpcodeClassID>(std::string const& str) { for (auto const& possible : OpcodeClassID_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return OpcodeClassID::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; ComplexTransform enumerant; } ComplexTransform_enumerants[] = { {"n", "none", ComplexTransform::kNone}, {"c", "conj", ComplexTransform::kConjugate}}; /// Converts a ComplexTransform enumerant to a string char const* to_string(ComplexTransform type, bool pretty) { for (auto const& possible : ComplexTransform_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a ComplexTransform enumerant from a string template <> ComplexTransform from_string<ComplexTransform>(std::string const& str) { for (auto const& possible : ComplexTransform_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return ComplexTransform::kInvalid; } static struct { char const* text; char const* pretty; SplitKMode enumerant; } SplitKMode_enumerants[] = { {"none", "<none>", SplitKMode::kNone}, {"serial", "<serial>", SplitKMode::kSerial}, {"parallel", "<parallel>", SplitKMode::kParallel}, }; /// Converts a SplitKMode enumerant to a string char const* to_string(SplitKMode type, bool pretty) { for (auto const& possible : SplitKMode_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a SplitKMode enumerant from a string template <> SplitKMode from_string<SplitKMode>(std::string const& str) { for (auto const& possible : SplitKMode_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return SplitKMode::kInvalid; } ///////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; ConvModeID enumerant; } ConvModeID_enumerants[] = { {"cross", "<cross>", ConvModeID::kCrossCorrelation}, {"conv", "<conv>", ConvModeID::kConvolution}, }; /// Converts a ConvModeID enumerant to a string char const* to_string(ConvModeID type, bool pretty) { for (auto const& possible : ConvModeID_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a ConvModeID enumerant from a string template <> ConvModeID from_string<ConvModeID>(std::string const& str) { for (auto const& possible : ConvModeID_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return ConvModeID::kInvalid; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; IteratorAlgorithmID enumerant; } IteratorAlgorithmID_enumerants[] = { {"none", "<none>", IteratorAlgorithmID::kNone}, {"analytic", "<analytic>", IteratorAlgorithmID::kAnalytic}, {"optimized", "<optimized>", IteratorAlgorithmID::kOptimized}, }; /// Converts a ConvModeID enumerant to a string char const* to_string(IteratorAlgorithmID type, bool pretty) { for (auto const& possible : IteratorAlgorithmID_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a ConvModeID enumerant from a string template <> IteratorAlgorithmID from_string<IteratorAlgorithmID>(std::string const& str) { for (auto const& possible : IteratorAlgorithmID_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return IteratorAlgorithmID::kInvalid; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; ConvKind enumerant; } ConvKind_enumerants[] = { {"unknown", "<unkown>", ConvKind::kUnknown}, {"fprop", "<fprop>", ConvKind::kFprop}, {"dgrad", "<dgrad>", ConvKind::kDgrad}, {"wgrad", "<wgrad>", ConvKind::kWgrad}, }; /// Converts a ConvKind enumerant to a string char const* to_string(ConvKind type, bool pretty) { for (auto const& possible : ConvKind_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /// Converts a ConvKind enumerant from a string template <> ConvKind from_string<ConvKind>(std::string const& str) { for (auto const& possible : ConvKind_enumerants) { if ((str.compare(possible.text) == 0) || (str.compare(possible.pretty) == 0)) { return possible.enumerant; } } return ConvKind::kInvalid; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Lexical cast a string to a byte array. Returns true if cast is successful or /// false if invalid. bool lexical_cast( std::vector<uint8_t>& bytes, NumericTypeID type, std::string const& str) { int size_bytes = sizeof_bits(type) / 8; if (!size_bytes) { return false; } bytes.resize(size_bytes, 0); std::stringstream ss; ss << str; switch (type) { case NumericTypeID::kU8: { ss >> *reinterpret_cast<uint8_t*>(bytes.data()); } break; case NumericTypeID::kU16: { ss >> *reinterpret_cast<uint16_t*>(bytes.data()); } break; case NumericTypeID::kU32: { ss >> *reinterpret_cast<uint32_t*>(bytes.data()); } break; case NumericTypeID::kU64: { ss >> *reinterpret_cast<uint64_t*>(bytes.data()); } break; case NumericTypeID::kS8: { ss >> *reinterpret_cast<int8_t*>(bytes.data()); } break; case NumericTypeID::kS16: { ss >> *reinterpret_cast<int16_t*>(bytes.data()); } break; case NumericTypeID::kS32: { ss >> *reinterpret_cast<int32_t*>(bytes.data()); } break; case NumericTypeID::kS64: { ss >> *reinterpret_cast<int64_t*>(bytes.data()); } break; case NumericTypeID::kF16: { float tmp; ss >> tmp; *reinterpret_cast<half_t*>(bytes.data()) = static_cast<half_t>(tmp); } break; case NumericTypeID::kBF16: { float tmp; ss >> tmp; *reinterpret_cast<bfloat16_t*>(bytes.data()) = static_cast<bfloat16_t>(tmp); } break; case NumericTypeID::kTF32: { float tmp; ss >> tmp; *reinterpret_cast<tfloat32_t*>(bytes.data()) = static_cast<tfloat32_t>(tmp); } break; case NumericTypeID::kF32: { ss >> *reinterpret_cast<float*>(bytes.data()); } break; case NumericTypeID::kF64: { ss >> *reinterpret_cast<double*>(bytes.data()); } break; case NumericTypeID::kCF16: { std::complex<float> tmp; ss >> tmp; cutlass::complex<cutlass::half_t>* x = reinterpret_cast<cutlass::complex<half_t>*>(bytes.data()); x->real() = static_cast<half_t>(std::real(tmp)); x->imag() = static_cast<half_t>(std::imag(tmp)); } break; case NumericTypeID::kCBF16: { std::complex<float> tmp; ss >> tmp; cutlass::complex<cutlass::bfloat16_t>* x = reinterpret_cast<cutlass::complex<bfloat16_t>*>(bytes.data()); x->real() = static_cast<bfloat16_t>(std::real(tmp)); x->imag() = static_cast<bfloat16_t>(std::imag(tmp)); } break; case NumericTypeID::kCF32: { ss >> *reinterpret_cast<std::complex<float>*>(bytes.data()); } break; case NumericTypeID::kCTF32: { std::complex<float> tmp; ss >> tmp; cutlass::complex<cutlass::tfloat32_t>* x = reinterpret_cast<cutlass::complex<tfloat32_t>*>(bytes.data()); x->real() = static_cast<tfloat32_t>(std::real(tmp)); x->imag() = static_cast<tfloat32_t>(std::imag(tmp)); } break; case NumericTypeID::kCF64: { ss >> *reinterpret_cast<std::complex<double>*>(bytes.data()); } break; default: return false; } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// std::string lexical_cast(int64_t int_value) { std::stringstream ss; ss << int_value; return ss.str(); } /// Lexical cast TO a string FROM a byte array. Returns true if cast is /// successful or false if invalid. std::string lexical_cast(std::vector<uint8_t>& bytes, NumericTypeID type) { size_t size_bytes = sizeof_bits(type) / 8; if (!size_bytes || size_bytes != bytes.size()) { return "<invalid>"; } bytes.resize(size_bytes, 0); std::stringstream ss; switch (type) { case NumericTypeID::kU8: { ss << *reinterpret_cast<uint8_t*>(bytes.data()); } break; case NumericTypeID::kU16: { ss << *reinterpret_cast<uint16_t*>(bytes.data()); } break; case NumericTypeID::kU32: { ss << *reinterpret_cast<uint32_t*>(bytes.data()); } break; case NumericTypeID::kU64: { ss << *reinterpret_cast<uint64_t*>(bytes.data()); } break; case NumericTypeID::kS8: { ss << *reinterpret_cast<int8_t*>(bytes.data()); } break; case NumericTypeID::kS16: { ss << *reinterpret_cast<int16_t*>(bytes.data()); } break; case NumericTypeID::kS32: { ss << *reinterpret_cast<int32_t*>(bytes.data()); } break; case NumericTypeID::kS64: { ss << *reinterpret_cast<int64_t*>(bytes.data()); } break; case NumericTypeID::kF16: { float tmp = *reinterpret_cast<half_t*>(bytes.data()); ss << tmp; } break; case NumericTypeID::kBF16: { float tmp = *reinterpret_cast<bfloat16_t*>(bytes.data()); ; ss << tmp; } break; case NumericTypeID::kTF32: { float tmp = *reinterpret_cast<tfloat32_t*>(bytes.data()); ; ss << tmp; } break; case NumericTypeID::kF32: { ss << *reinterpret_cast<float*>(bytes.data()); } break; case NumericTypeID::kF64: { ss << *reinterpret_cast<double*>(bytes.data()); } break; case NumericTypeID::kCF16: { cutlass::complex<half_t> const* x = reinterpret_cast<cutlass::complex<half_t> const*>(bytes.data()); ss << float(x->real()); if (x->imag() != cutlass::half_t()) { ss << "+i" << float(x->imag()); } } break; case NumericTypeID::kCBF16: { cutlass::complex<bfloat16_t> const* x = reinterpret_cast<cutlass::complex<bfloat16_t> const*>(bytes.data()); ss << float(x->real()); if (x->imag() != cutlass::bfloat16_t()) { ss << "+i" << float(x->imag()); } } break; case NumericTypeID::kCF32: { cutlass::complex<float> const* x = reinterpret_cast<cutlass::complex<float> const*>(bytes.data()); ss << x->real(); if (x->imag() != float()) { ss << "+i" << x->imag(); } } break; case NumericTypeID::kCTF32: { cutlass::complex<tfloat32_t> const* x = reinterpret_cast<cutlass::complex<tfloat32_t> const*>(bytes.data()); ss << float(x->real()); if (x->imag() != tfloat32_t()) { ss << "+i" << float(x->imag()); } } break; case NumericTypeID::kCF64: { cutlass::complex<double> const* x = reinterpret_cast<cutlass::complex<double> const*>(bytes.data()); ss << x->real(); if (x->imag() != double()) { ss << "+i" << x->imag(); } } break; default: return "<unknown>"; } return ss.str(); } /// Casts from a signed int64 to the destination type. Returns true if /// successful. bool cast_from_int64(std::vector<uint8_t>& bytes, NumericTypeID type, int64_t src) { int size_bytes = sizeof_bits(type) / 8; if (!size_bytes) { return false; } bytes.resize(size_bytes, 0); switch (type) { case NumericTypeID::kU8: { *reinterpret_cast<uint8_t*>(bytes.data()) = static_cast<uint8_t>(src); } break; case NumericTypeID::kU16: { *reinterpret_cast<uint16_t*>(bytes.data()) = static_cast<uint16_t>(src); } break; case NumericTypeID::kU32: { *reinterpret_cast<uint32_t*>(bytes.data()) = static_cast<uint32_t>(src); } break; case NumericTypeID::kU64: { *reinterpret_cast<uint64_t*>(bytes.data()) = static_cast<uint64_t>(src); } break; case NumericTypeID::kS8: { *reinterpret_cast<int8_t*>(bytes.data()) = static_cast<int8_t>(src); } break; case NumericTypeID::kS16: { *reinterpret_cast<int16_t*>(bytes.data()) = static_cast<int16_t>(src); } break; case NumericTypeID::kS32: { *reinterpret_cast<int32_t*>(bytes.data()) = static_cast<int32_t>(src); } break; case NumericTypeID::kS64: { *reinterpret_cast<int64_t*>(bytes.data()) = static_cast<int64_t>(src); } break; case NumericTypeID::kF16: { *reinterpret_cast<half_t*>(bytes.data()) = static_cast<half_t>(float(src)); } break; case NumericTypeID::kBF16: { *reinterpret_cast<bfloat16_t*>(bytes.data()) = static_cast<bfloat16_t>(float(src)); } break; case NumericTypeID::kTF32: { *reinterpret_cast<tfloat32_t*>(bytes.data()) = static_cast<tfloat32_t>(float(src)); } break; case NumericTypeID::kF32: { *reinterpret_cast<float*>(bytes.data()) = static_cast<float>(src); } break; case NumericTypeID::kF64: { *reinterpret_cast<double*>(bytes.data()) = double(src); } break; case NumericTypeID::kCF16: { cutlass::complex<cutlass::half_t>* x = reinterpret_cast<cutlass::complex<half_t>*>(bytes.data()); x->real() = static_cast<half_t>(float(src)); x->imag() = static_cast<half_t>(float(0)); } break; case NumericTypeID::kCF32: { *reinterpret_cast<cutlass::complex<float>*>(bytes.data()) = cutlass::complex<float>(float(src), float(0)); } break; case NumericTypeID::kCF64: { *reinterpret_cast<cutlass::complex<double>*>(bytes.data()) = cutlass::complex<double>(double(src), double(0)); } break; default: return false; } return true; } /// Casts from an unsigned int64 to the destination type. Returns true if /// successful. bool cast_from_uint64(std::vector<uint8_t>& bytes, NumericTypeID type, uint64_t src) { int size_bytes = sizeof_bits(type) / 8; if (!size_bytes) { return false; } bytes.resize(size_bytes, 0); switch (type) { case NumericTypeID::kU8: { *reinterpret_cast<uint8_t*>(bytes.data()) = static_cast<uint8_t>(src); } break; case NumericTypeID::kU16: { *reinterpret_cast<uint16_t*>(bytes.data()) = static_cast<uint16_t>(src); } break; case NumericTypeID::kU32: { *reinterpret_cast<uint32_t*>(bytes.data()) = static_cast<uint32_t>(src); } break; case NumericTypeID::kU64: { *reinterpret_cast<uint64_t*>(bytes.data()) = static_cast<uint64_t>(src); } break; case NumericTypeID::kS8: { *reinterpret_cast<int8_t*>(bytes.data()) = static_cast<int8_t>(src); } break; case NumericTypeID::kS16: { *reinterpret_cast<int16_t*>(bytes.data()) = static_cast<int16_t>(src); } break; case NumericTypeID::kS32: { *reinterpret_cast<int32_t*>(bytes.data()) = static_cast<int32_t>(src); } break; case NumericTypeID::kS64: { *reinterpret_cast<int64_t*>(bytes.data()) = static_cast<int64_t>(src); } break; case NumericTypeID::kF16: { *reinterpret_cast<half_t*>(bytes.data()) = static_cast<half_t>(float(src)); } break; case NumericTypeID::kBF16: { *reinterpret_cast<bfloat16_t*>(bytes.data()) = static_cast<bfloat16_t>(float(src)); } break; case NumericTypeID::kTF32: { *reinterpret_cast<tfloat32_t*>(bytes.data()) = static_cast<tfloat32_t>(float(src)); } break; case NumericTypeID::kF32: { *reinterpret_cast<float*>(bytes.data()) = static_cast<float>(src); } break; case NumericTypeID::kF64: { *reinterpret_cast<double*>(bytes.data()) = double(src); } break; case NumericTypeID::kCF16: { cutlass::complex<cutlass::half_t>* x = reinterpret_cast<cutlass::complex<half_t>*>(bytes.data()); x->real() = static_cast<half_t>(float(src)); x->imag() = static_cast<half_t>(float(0)); } break; case NumericTypeID::kCF32: { *reinterpret_cast<std::complex<float>*>(bytes.data()) = std::complex<float>(float(src), float(0)); } break; case NumericTypeID::kCF64: { *reinterpret_cast<std::complex<double>*>(bytes.data()) = std::complex<double>(double(src), double(0)); } break; default: return false; } return true; } /// Lexical cast a string to a byte array. Returns true if cast is successful or /// false if invalid. bool cast_from_double(std::vector<uint8_t>& bytes, NumericTypeID type, double src) { int size_bytes = sizeof_bits(type) / 8; if (!size_bytes) { return false; } bytes.resize(size_bytes, 0); switch (type) { case NumericTypeID::kU8: { *reinterpret_cast<uint8_t*>(bytes.data()) = static_cast<uint8_t>(src); } break; case NumericTypeID::kU16: { *reinterpret_cast<uint16_t*>(bytes.data()) = static_cast<uint16_t>(src); } break; case NumericTypeID::kU32: { *reinterpret_cast<uint32_t*>(bytes.data()) = static_cast<uint32_t>(src); } break; case NumericTypeID::kU64: { *reinterpret_cast<uint64_t*>(bytes.data()) = static_cast<uint64_t>(src); } break; case NumericTypeID::kS8: { *reinterpret_cast<int8_t*>(bytes.data()) = static_cast<int8_t>(src); } break; case NumericTypeID::kS16: { *reinterpret_cast<int16_t*>(bytes.data()) = static_cast<int16_t>(src); } break; case NumericTypeID::kS32: { *reinterpret_cast<int32_t*>(bytes.data()) = static_cast<int32_t>(src); } break; case NumericTypeID::kS64: { *reinterpret_cast<int64_t*>(bytes.data()) = static_cast<int64_t>(src); } break; case NumericTypeID::kF16: { *reinterpret_cast<half_t*>(bytes.data()) = static_cast<half_t>(float(src)); } break; case NumericTypeID::kBF16: { *reinterpret_cast<bfloat16_t*>(bytes.data()) = static_cast<bfloat16_t>(float(src)); } break; case NumericTypeID::kTF32: { *reinterpret_cast<tfloat32_t*>(bytes.data()) = static_cast<tfloat32_t>(float(src)); } break; case NumericTypeID::kF32: { *reinterpret_cast<float*>(bytes.data()) = static_cast<float>(src); } break; case NumericTypeID::kF64: { *reinterpret_cast<double*>(bytes.data()) = src; } break; case NumericTypeID::kCF16: { cutlass::complex<cutlass::half_t>* x = reinterpret_cast<cutlass::complex<half_t>*>(bytes.data()); x->real() = static_cast<half_t>(float(src)); x->imag() = static_cast<half_t>(float(0)); } break; case NumericTypeID::kCBF16: { cutlass::complex<cutlass::bfloat16_t>* x = reinterpret_cast<cutlass::complex<bfloat16_t>*>(bytes.data()); x->real() = static_cast<bfloat16_t>(bfloat16_t(src)); x->imag() = static_cast<bfloat16_t>(bfloat16_t(0)); } break; case NumericTypeID::kCF32: { *reinterpret_cast<cutlass::complex<float>*>(bytes.data()) = cutlass::complex<float>(float(src), float()); } break; case NumericTypeID::kCTF32: { *reinterpret_cast<cutlass::complex<tfloat32_t>*>(bytes.data()) = cutlass::complex<tfloat32_t>(tfloat32_t(src), tfloat32_t()); } break; case NumericTypeID::kCF64: { *reinterpret_cast<cutlass::complex<double>*>(bytes.data()) = cutlass::complex<double>(src, double()); } break; default: return false; } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; conv::Operator enumerant; } ConvOperator_enumerants[] = { {"fprop", "Fprop", conv::Operator::kFprop}, {"dgrad", "Dgrad", conv::Operator::kDgrad}, {"wgrad", "Wgrad", conv::Operator::kWgrad}, }; /// Converts a conv::Operator enumerant to a string char const* to_string(conv::Operator conv_op, bool pretty) { for (auto const& possible : ConvOperator_enumerants) { if (conv_op == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; conv::ConvType enumerant; } ConvType_enumerants[] = { {"convolution", "Convolution", conv::ConvType::kConvolution}, {"batch_convolution", "BatchConvolution", conv::ConvType::kBatchConvolution}, {"local", "Local", conv::ConvType::kLocal}, {"local_share", "LocalShare", conv::ConvType::kLocalShare}, }; /// Converts a ConvType enumerant to a string char const* to_string(conv::ConvType type, bool pretty) { for (auto const& possible : ConvType_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; ArchTagID enumerant; } ArchTagID_enumerants[] = { {"sm_50", "Sm50", ArchTagID::kSm50}, {"sm_60", "Sm60", ArchTagID::kSm60}, {"sm_61", "Sm61", ArchTagID::kSm61}, {"sm_70", "Sm70", ArchTagID::kSm70}, {"sm_72", "Sm72", ArchTagID::kSm72}, {"sm_75", "Sm75", ArchTagID::kSm75}, {"sm_80", "Sm80", ArchTagID::kSm80}, {"sm_86", "Sm86", ArchTagID::kSm86}, }; /// Converts an ArchTagID enumerant to a string char const* to_string(ArchTagID tag, bool pretty) { for (auto const& possible : ArchTagID_enumerants) { if (tag == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; epilogue::EpilogueType enumerant; } EpilogueType_enumerants[] = { {"bias_add_linear_combination", "BiasAddLinearCombination", epilogue::EpilogueType::kBiasAddLinearCombination}, {"bias_add_linear_combination_clamp", "BiasAddLinearCombinationClamp", epilogue::EpilogueType::kBiasAddLinearCombinationClamp}, {"bias_add_linear_combination_hswish", "BiasAddLinearCombinationHSwish", epilogue::EpilogueType::kBiasAddLinearCombinationHSwish}, {"bias_add_linear_combination_hswish_clamp", "BiasAddLinearCombinationHSwishClamp", epilogue::EpilogueType::kBiasAddLinearCombinationHSwishClamp}, {"bias_add_linear_combination_relu", "BiasAddLinearCombinationRelu", epilogue::EpilogueType::kBiasAddLinearCombinationRelu}, {"bias_add_linear_combination_relu_clamp", "BiasAddLinearCombinationReluClamp", epilogue::EpilogueType::kBiasAddLinearCombinationReluClamp}, {"conversion", "Conversion", epilogue::EpilogueType::kConversion}, {"linear_combination", "LinearCombination", epilogue::EpilogueType::kLinearCombination}, {"linear_combination_clamp", "LinearCombination_clamp", epilogue::EpilogueType::kLinearCombinationClamp}, {"linear_combination_planar_complex", "LinearCombinationPlanarComplex", epilogue::EpilogueType::kLinearCombinationPlanarComplex}, {"linear_combination_relu", "LinearCombinationRelu", epilogue::EpilogueType::kLinearCombinationRelu}, {"linear_combination_sigmoid", "LinearCombinationSigmoid", epilogue::EpilogueType::kLinearCombinationSigmoid}, }; /// Converts an EpilogueType enumerant to a string char const* to_string(epilogue::EpilogueType type, bool pretty) { for (auto const& possible : EpilogueType_enumerants) { if (type == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; ThreadblockSwizzleID enumerant; } ThreadblockSwizzleID_enumerants[] = { {"gemm_identity", "GemmIdentityThreadblockSwizzle", ThreadblockSwizzleID::kGemmIdentity}, {"gemm_horizontal", "GemmHorizontalThreadblockSwizzle", ThreadblockSwizzleID::kGemmHorizontal}, {"gemm_batched_identity", "GemmBatchedIdentityThreadblockSwizzle", ThreadblockSwizzleID::kGemmBatchedIdentity}, {"gemm_split_k_identity", "GemmSplitKIdentityThreadblockSwizzle", ThreadblockSwizzleID::kGemmSplitKIdentity}, {"gemm_split_k_horizontal", "GemmSplitKHorizontalThreadblockSwizzle", ThreadblockSwizzleID::kGemmSplitKHorizontal}, {"gemv_batched_strided_default", "GemvBatchedStridedThreadblockDefaultSwizzle", ThreadblockSwizzleID::kGemvBatchedStridedDefault}, {"gemv_batched_strided_reduction", "GemvBatchedStridedThreadblockReductionSwizzle", ThreadblockSwizzleID::kGemvBatchedStridedReduction}, {"convolution_fprop_cxrskx", "ConvolutionFpropCxRSKxThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionFpropCxRSKx}, {"convolution_dgrad_cxrskx", "ConvolutionDgradCxRSKxThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionDgradCxRSKx}, {"convolution_fprop_ncxhwx", "ConvolutionFpropNCxHWxThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionFpropNCxHWx}, {"convolution_fprop_nhwc", "ConvolutionFpropTransThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionFpropTrans}, {"convolution_dgrad_ncxhwx", "ConvolutionDgradNCxHWxThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionDgradNCxHWx}, {"convolution_dgrad_ncxhwx", "ConvolutionDgradTransThreadblockSwizzle", ThreadblockSwizzleID::kConvolutionDgradTrans}, }; /// Converts a ThreadblockSwizzleID enumerant to a string char const* to_string(ThreadblockSwizzleID threadblock_swizzle, bool pretty) { for (auto const& possible : ThreadblockSwizzleID_enumerants) { if (threadblock_swizzle == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Converts a bool value to a string char const* to_string(bool val, bool pretty) { if (val) { return pretty ? "True" : "true"; } else { return pretty ? "False" : "false"; } } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; MathOperationID enumerant; } MathOperationID_enumerants[] = { {"add", "Add", MathOperationID::kAdd}, {"multiply_add", "MultiplyAdd", MathOperationID::kMultiplyAdd}, {"multiply_add_saturate", "MultiplyAddSaturate", MathOperationID::kMultiplyAddSaturate}, {"multiply_add_fast_bf16", "MultiplyAddFastBF16", MathOperationID::kMultiplyAddFastBF16}, {"multiply_add_fast_f16", "MultiplyAddFastF16", MathOperationID::kMultiplyAddFastF16}, {"multiply_add_complex", "MultiplyAddComplex", MathOperationID::kMultiplyAddComplex}, {"multiply_add_gaussian_complex", "MultiplyAddGaussianComplex", MathOperationID::kMultiplyAddGaussianComplex}, {"xor_popc", "XorPopc", MathOperationID::kXorPopc}, }; /// Converts a MathOperationID enumerant to a string char const* to_string(MathOperationID math_op, bool pretty) { for (auto const& possible : MathOperationID_enumerants) { if (math_op == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; conv::SpecialOptimizeDesc enumerant; } SpecialOptimizeDesc_enumerants[] = { {"none_special_opt", "NoneSpecialOpt", conv::SpecialOptimizeDesc::NONE}, {"conv_filter_unity", "ConvFilterUnity", conv::SpecialOptimizeDesc::CONV_FILTER_UNITY}, {"deconv_double_upsampling", "DeconvDoubleUpsampling", conv::SpecialOptimizeDesc::DECONV_DOUBLE_UPSAMPLING}, }; /// Converts an SpecialOptimizeDesc enumerant to a string char const* to_string(conv::SpecialOptimizeDesc special_opt, bool pretty) { for (auto const& possible : SpecialOptimizeDesc_enumerants) { if (special_opt == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// static struct { char const* text; char const* pretty; conv::ImplicitGemmMode enumerant; } ImplicitGemmMode_enumerants[] = { {"gemm_nt", "GemmNT", conv::ImplicitGemmMode::GEMM_NT}, {"gemm_tn", "GemmTN", conv::ImplicitGemmMode::GEMM_TN}, }; /// Converts an ImplicitGemmMode enumerant to a string char const* to_string(conv::ImplicitGemmMode mode, bool pretty) { for (auto const& possible : ImplicitGemmMode_enumerants) { if (mode == possible.enumerant) { if (pretty) { return possible.pretty; } else { return possible.text; } } } return pretty ? "Invalid" : "invalid"; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// #endif
the_stack
* \brief Implements helper routines for PME gather and spline routines. * * \author Aleksei Iupinov <a.yupinov@gmail.com> */ #include "gmxpre.h" #include <cassert> #include "gromacs/gpu_utils/cuda_kernel_utils.cuh" #include "gromacs/gpu_utils/vectype_ops.cuh" #include "pme.cuh" #include "pme_grid.h" /*! \internal \brief * Gets a base of the unique index to an element in a spline parameter buffer (theta/dtheta), * which is laid out for GPU spread/gather kernels. The base only corresponds to the atom index within the execution block. * Feed the result into getSplineParamIndex() to get a full index. * TODO: it's likely that both parameters can be just replaced with a single atom index, as they are derived from it. * Do that, verifying that the generated code is not bloated, and/or revise the spline indexing scheme. * Removing warp dependency would also be nice (and would probably coincide with removing c_pmeSpreadGatherAtomsPerWarp). * * \tparam order PME order * \tparam atomsPerWarp Number of atoms processed by a warp * \param[in] warpIndex Warp index wrt the block. * \param[in] atomWarpIndex Atom index wrt the warp (from 0 to atomsPerWarp - 1). * * \returns Index into theta or dtheta array using GPU layout. */ template<int order, int atomsPerWarp> int __device__ __forceinline__ getSplineParamIndexBase(int warpIndex, int atomWarpIndex) { assert((atomWarpIndex >= 0) && (atomWarpIndex < atomsPerWarp)); const int dimIndex = 0; const int splineIndex = 0; // The zeroes are here to preserve the full index formula for reference return (((splineIndex + order * warpIndex) * DIM + dimIndex) * atomsPerWarp + atomWarpIndex); } /*! \internal \brief * Gets a unique index to an element in a spline parameter buffer (theta/dtheta), * which is laid out for GPU spread/gather kernels. The index is wrt to the execution block, * in range(0, atomsPerBlock * order * DIM). * This function consumes result of getSplineParamIndexBase() and adjusts it for \p dimIndex and \p splineIndex. * * \tparam order PME order * \tparam atomsPerWarp Number of atoms processed by a warp * \param[in] paramIndexBase Must be result of getSplineParamIndexBase(). * \param[in] dimIndex Dimension index (from 0 to 2) * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1) * * \returns Index into theta or dtheta array using GPU layout. */ template<int order, int atomsPerWarp> int __device__ __forceinline__ getSplineParamIndex(int paramIndexBase, int dimIndex, int splineIndex) { assert((dimIndex >= XX) && (dimIndex < DIM)); assert((splineIndex >= 0) && (splineIndex < order)); return (paramIndexBase + (splineIndex * DIM + dimIndex) * atomsPerWarp); } /*! \internal \brief * An inline CUDA function for skipping the zero-charge atoms. * * \returns Non-0 if atom should be processed, 0 otherwise. * \param[in] coefficient The atom charge. * * This is called from the spline_and_spread and gather PME kernels. */ bool __device__ __forceinline__ pme_gpu_check_atom_charge(const float coefficient) { assert(isfinite(coefficient)); return c_skipNeutralAtoms ? (coefficient != 0.0F) : true; } //! Controls if the atom and charge data is prefeched into shared memory or loaded per thread from global static const bool c_useAtomDataPrefetch = true; /*! \brief Asserts if the argument is finite. * * The function works for any data type, that can be casted to float. Note that there is also * a specialized implementation for float3 data type. * * \param[in] arg Argument to check. */ template<typename T> __device__ inline void assertIsFinite(T arg); template<> __device__ inline void assertIsFinite(float3 gmx_unused arg) { assert(isfinite(static_cast<float>(arg.x))); assert(isfinite(static_cast<float>(arg.y))); assert(isfinite(static_cast<float>(arg.z))); } template<typename T> __device__ inline void assertIsFinite(T gmx_unused arg) { assert(isfinite(static_cast<float>(arg))); } /*! \brief * General purpose function for loading atom-related data from global to shared memory. * * \tparam T Data type (float/int/...) * \tparam atomsPerBlock Number of atoms processed by a block - should be accounted for in * the size of the shared memory array. * \tparam dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec * coordinates array). * \param[out] sm_destination Shared memory array for output. * \param[in] gm_source Global memory array for input. */ template<typename T, int atomsPerBlock, int dataCountPerAtom> __device__ __forceinline__ void pme_gpu_stage_atom_data(T* __restrict__ sm_destination, const T* __restrict__ gm_source) { const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x; const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x; const int localIndex = threadLocalIndex; const int globalIndexBase = blockIndex * atomsPerBlock * dataCountPerAtom; const int globalIndex = globalIndexBase + localIndex; if (localIndex < atomsPerBlock * dataCountPerAtom) { assertIsFinite(gm_source[globalIndex]); sm_destination[localIndex] = gm_source[globalIndex]; } } /*! \brief * PME GPU spline parameter and gridline indices calculation. * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines(). * First stage of the whole kernel. * * \tparam order PME interpolation order. * \tparam atomsPerBlock Number of atoms processed by a block - should be accounted for * in the sizes of the shared memory arrays. * \tparam atomsPerWarp Number of atoms processed by a warp * \tparam writeSmDtheta Bool controlling if the theta derivative should be written to * shared memory. Enables calculation of dtheta if set. * \tparam writeGlobal A boolean which tells if the theta values and gridlines should * be written to global memory. Enables calculation of dtheta if * set. * \tparam numGrids The number of grids using the splines. * \param[in] kernelParams Input PME CUDA data in constant memory. * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory. * \param[in] atomX Atom coordinate of atom processed by thread. * \param[in] atomCharge Atom charge/coefficient of atom processed by thread. * \param[out] sm_theta Atom spline values in the shared memory. * \param[out] sm_dtheta Derivative of atom spline values in shared memory. * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory. */ template<int order, int atomsPerBlock, int atomsPerWarp, bool writeSmDtheta, bool writeGlobal, int numGrids> __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams, const int atomIndexOffset, const float3 atomX, const float atomCharge, float* __restrict__ sm_theta, float* __restrict__ sm_dtheta, int* __restrict__ sm_gridlineIndices) { assert(numGrids == 1 || numGrids == 2); assert(numGrids == 1 || c_skipNeutralAtoms == false); /* Global memory pointers for output */ float* __restrict__ gm_theta = kernelParams.atoms.d_theta; float* __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta; int* __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices; /* Fractional coordinates */ __shared__ float sm_fractCoords[atomsPerBlock * DIM]; /* Thread index w.r.t. block */ const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; /* Warp index w.r.t. block - could probably be obtained easier? */ const int warpIndex = threadLocalId / warp_size; /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */ const int atomWarpIndex = threadIdx.z % atomsPerWarp; /* Atom index w.r.t. block/shared memory */ const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex; /* Spline contribution index in one dimension */ const int threadLocalIdXY = (threadIdx.y * blockDim.x) + threadIdx.x; const int orderIndex = threadLocalIdXY / DIM; /* Dimension index */ const int dimIndex = threadLocalIdXY % DIM; /* Multi-purpose index of rvec/ivec atom data */ const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex; float splineData[order]; const int localCheck = (dimIndex < DIM) && (orderIndex < 1); /* we have 4 threads per atom, but can only use 3 here for the dimensions */ if (localCheck) { /* Indices interpolation */ if (orderIndex == 0) { int tableIndex, tInt; float n, t; assert(atomIndexLocal < DIM * atomsPerBlock); /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset * puts them into local memory(!) instead of accessing the constant memory directly. * That's the reason for the switch, to unroll explicitly. * The commented parts correspond to the 0 components of the recipbox. */ switch (dimIndex) { case XX: tableIndex = kernelParams.grid.tablesOffsets[XX]; n = kernelParams.grid.realGridSizeFP[XX]; t = atomX.x * kernelParams.current.recipBox[dimIndex][XX] + atomX.y * kernelParams.current.recipBox[dimIndex][YY] + atomX.z * kernelParams.current.recipBox[dimIndex][ZZ]; break; case YY: tableIndex = kernelParams.grid.tablesOffsets[YY]; n = kernelParams.grid.realGridSizeFP[YY]; t = /*atomX.x * kernelParams.current.recipBox[dimIndex][XX] + */ atomX.y * kernelParams.current.recipBox[dimIndex][YY] + atomX.z * kernelParams.current.recipBox[dimIndex][ZZ]; break; case ZZ: tableIndex = kernelParams.grid.tablesOffsets[ZZ]; n = kernelParams.grid.realGridSizeFP[ZZ]; t = /*atomX.x * kernelParams.current.recipBox[dimIndex][XX] + atomX.y * kernelParams.current.recipBox[dimIndex][YY] + */ atomX .z * kernelParams.current.recipBox[dimIndex][ZZ]; break; } const float shift = c_pmeMaxUnitcellShift; /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */ t = (t + shift) * n; tInt = static_cast<int>(t); assert(sharedMemoryIndex < atomsPerBlock * DIM); sm_fractCoords[sharedMemoryIndex] = t - tInt; tableIndex += tInt; assert(tInt >= 0); assert(tInt < c_pmeNeighborUnitcellCount * n); // TODO have shared table for both parameters to share the fetch, as index is always same? // TODO compare texture/LDG performance sm_fractCoords[sharedMemoryIndex] += fetchFromParamLookupTable( kernelParams.grid.d_fractShiftsTable, kernelParams.fractShiftsTableTexture, tableIndex); sm_gridlineIndices[sharedMemoryIndex] = fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable, kernelParams.gridlineIndicesTableTexture, tableIndex); if (writeGlobal) { gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex]; } } /* B-spline calculation */ const int chargeCheck = pme_gpu_check_atom_charge(atomCharge); /* With FEP (numGrids == 2), we might have 0 charge in state A, but !=0 in state B, so we always calculate splines */ if (numGrids == 2 || chargeCheck) { float div; int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1 const float dr = sm_fractCoords[sharedMemoryIndex]; assert(isfinite(dr)); /* dr is relative offset from lower cell limit */ splineData[order - 1] = 0.0F; splineData[1] = dr; splineData[0] = 1.0F - dr; #pragma unroll for (int k = 3; k < order; k++) { div = 1.0F / (k - 1.0F); splineData[k - 1] = div * dr * splineData[k - 2]; #pragma unroll for (int l = 1; l < (k - 1); l++) { splineData[k - l - 1] = div * ((dr + l) * splineData[k - l - 2] + (k - l - dr) * splineData[k - l - 1]); } splineData[0] = div * (1.0F - dr) * splineData[0]; } const int thetaIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex); const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order; /* only calculate dtheta if we are saving it to shared or global memory */ if (writeSmDtheta || writeGlobal) { /* Differentiation and storing the spline derivatives (dtheta) */ #pragma unroll for (o = 0; o < order; o++) { const int thetaIndex = getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o); const float dtheta = ((o > 0) ? splineData[o - 1] : 0.0F) - splineData[o]; assert(isfinite(dtheta)); assert(thetaIndex < order * DIM * atomsPerBlock); if (writeSmDtheta) { sm_dtheta[thetaIndex] = dtheta; } if (writeGlobal) { const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex; gm_dtheta[thetaGlobalIndex] = dtheta; } } } div = 1.0F / (order - 1.0F); splineData[order - 1] = div * dr * splineData[order - 2]; #pragma unroll for (int k = 1; k < (order - 1); k++) { splineData[order - k - 1] = div * ((dr + k) * splineData[order - k - 2] + (order - k - dr) * splineData[order - k - 1]); } splineData[0] = div * (1.0F - dr) * splineData[0]; /* Storing the spline values (theta) */ #pragma unroll for (o = 0; o < order; o++) { const int thetaIndex = getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o); assert(thetaIndex < order * DIM * atomsPerBlock); sm_theta[thetaIndex] = splineData[o]; assert(isfinite(sm_theta[thetaIndex])); if (writeGlobal) { const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex; gm_theta[thetaGlobalIndex] = splineData[o]; } } } } }
the_stack
// thread block size #define BLOCK_SIZE 256 // integration direction #define integrateXcoord 1 #define integrateYcoord 0 __global__ void pad_projections_kernel( double* d_img, const int nDetXMap, const int nDetYMap, const int nElem, const int np) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < nElem) d_img[(np*nDetYMap *nDetXMap) + (gid*nDetYMap)] = 0; } __global__ void map_boudaries_kernel( double* d_pBound, const int nElem, const double valueLeftBound, const double sizeElem, const double offset) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < nElem) d_pBound[gid] = (gid - valueLeftBound) * sizeElem + offset; } __global__ void rot_detector_kernel( double* __restrict__ d_pRdetY, double* __restrict__ d_pRdetZ, const double* __restrict__ d_pYcoord, const double* __restrict__ d_pZcoord, const double yOffset, const double zOffset, const double phi, const int nElem) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < nElem) { // cos and sin are in measured in radians. d_pRdetY[gid] = ((d_pYcoord[gid] - yOffset) * cos(phi) - (d_pZcoord[gid] - zOffset) * sin(phi)) + yOffset; d_pRdetZ[gid] = ((d_pYcoord[gid] - yOffset) * sin(phi) + (d_pZcoord[gid] - zOffset) * cos(phi)) + zOffset; } } __global__ void mapDet2Slice_kernel( double* __restrict__ const pXmapp, double* __restrict__ const pYmapp, double tubeX, double tubeY, double tubeZ, const double* __restrict__ const pXcoord, const double* __restrict__ const pYcoord, const double* __restrict__ const pZcoord, const double* __restrict__ const pZSlicecoord, const int nDetXMap, const int nDetYMap, const int nz) { const int px = blockIdx.x * blockDim.x + threadIdx.x; const int py = blockIdx.y * blockDim.y + threadIdx.y; if (px < nDetYMap && py < nDetXMap) { const int pos = py * nDetYMap + px; pXmapp[pos] = ((pXcoord[py] - tubeX)*(pZSlicecoord[nz] - pZcoord[px]) - (pXcoord[py] * tubeZ) + (pXcoord[py] * pZcoord[px])) / (-tubeZ + pZcoord[px]); if (py == 0) pYmapp[px] = ((pYcoord[px] - tubeY)*(pZSlicecoord[nz] - pZcoord[px]) - (pYcoord[px] * tubeZ) + (pYcoord[px] * pZcoord[px])) / (-tubeZ + pZcoord[px]); } } __global__ void img_integration_kernel( double* d_img, const int nPixX, const int nPixY, const bool direction, const int offsetX, const int offsetY, const int nSlices) { /* Integration of 2D slices over the whole volume (S.1.Integration. - Liu et al(2017)) Perform an inclusive scan */ const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; const int px = tx + offsetX; const int py = ty + offsetY; const int pz = blockIdx.z * blockDim.z + threadIdx.z; if (px >= nPixY || py >= nPixX || pz >= nSlices) return; if (direction == integrateXcoord) { for (int s = 1; s <= blockDim.y; s *= 2) { int spot = ty - s; double val = 0; if (spot >= 0) { val = d_img[(pz*nPixY*nPixX) + (offsetY + spot) * nPixY + px]; } if (spot >= 0) { d_img[(pz*nPixY*nPixX) + (py * nPixY) + px] += val; } } } else { for (int s = 1; s <= blockDim.x; s *= 2) { int spot = tx - s; double val = 0; if (spot >= 0) { val = d_img[(pz*nPixY*nPixX) + py * nPixY + spot + offsetX]; } if (spot >= 0) { d_img[(pz*nPixY*nPixX) + (py * nPixY) + px] += val; } } } } __global__ void bilinear_interpolation_kernel( double* __restrict__ d_sliceI, const double* __restrict__ d_pProj, const double* __restrict__ d_pObjX, const double* __restrict__ d_pObjY, const double* __restrict__ d_pDetmX, const double* __restrict__ d_pDetmY, const int nPixXMap, const int nPixYMap, const int nDetXMap, const int nDetYMap, const int nDetX, const int nDetY, const int np) { const int px = blockIdx.x * blockDim.x + threadIdx.x; const int py = blockIdx.y * blockDim.y + threadIdx.y; // Make sure we don't try and access memory outside the detector // by having any threads mapped there return early if (px >= nPixYMap || py >= nPixXMap) return; // S.2. Interpolation - Liu et al (2017) // Adjust the mapped coordinates to cross the range of (0-nDetX).*duMap // Divide by pixelSize to get a unitary pixel size const double xNormData = nDetX - d_pObjX[py] / d_pDetmX[0]; const int xData = floor(xNormData); const double alpha = xNormData - xData; // Adjust the mapped coordinates to cross the range of (0-nDetY).*dyMap // Divide by pixelSize to get a unitary pixel size const double yNormData = (d_pObjY[px] / d_pDetmX[0]) - (d_pDetmY[0] / d_pDetmX[0]); const int yData = floor(yNormData); const double beta = yNormData - yData; double d00, d01, d10, d11; if (((xNormData) >= 0) && ((xNormData) <= nDetX) && ((yNormData) >= 0) && ((yNormData) <= nDetY)) d00 = d_pProj[(np*nDetYMap*nDetXMap) + (xData*nDetYMap + yData)]; else d00 = 0.0; if (((xData + 1) > 0) && ((xData + 1) <= nDetX) && ((yNormData) >= 0) && ((yNormData) <= nDetY)) d10 = d_pProj[(np*nDetYMap*nDetXMap) + ((xData + 1)*nDetYMap + yData)]; else d10 = 0.0; if (((xNormData) >= 0) && ((xNormData) <= nDetX) && ((yData + 1) > 0) && ((yData + 1) <= nDetY)) d01 = d_pProj[(np*nDetYMap*nDetXMap) + (xData*nDetYMap + yData + 1)]; else d01 = 0.0; if (((xData + 1) > 0) && ((xData + 1) <= nDetX) && ((yData + 1) > 0) && ((yData + 1) <= nDetY)) d11 = d_pProj[(np*nDetYMap*nDetXMap) + ((xData + 1)*nDetYMap + yData + 1)]; else d11 = 0.0; double result_temp1 = alpha * d10 + (-d00 * alpha + d00); double result_temp2 = alpha * d11 + (-d01 * alpha + d01); d_sliceI[py * nPixYMap + px] = beta * result_temp2 + (-result_temp1 * beta + result_temp1); } __global__ void differentiation_kernel( double* __restrict__ d_pVolume, const double* __restrict__ d_sliceI, double tubeX, double rtubeY, double rtubeZ, const double* __restrict__ const d_pObjX, const double* __restrict__ const d_pObjY, const double* __restrict__ const d_pObjZ, const int nPixX, const int nPixY, const int nPixXMap, const int nPixYMap, const double du, const double dv, const double dx, const double dy, const double dz, const int nz) { const int px = blockIdx.x * blockDim.x + threadIdx.x; const int py = blockIdx.y * blockDim.y + threadIdx.y; /* S.3. Differentiation - Eq. 24 - Liu et al (2017) Detector integral projection ___________ |_A_|_B_|___| |_C_|_D_|___| |___|___|___| (px,py) ________________ |_A_|__B__|_____| |_C_|(0,0)|(0,1)| |___|(1,0)|(1,1)| Threads are lauched from D up to nPixX (py) and nPixY (px) i.e., they are running on the detector image. Thread (0,0) is on D. Coordinates on intergal projection: A = py * nPixYMap + px B = ((py+1) * nPixYMap) + px C = py * nPixYMap + px + 1 D = ((py+1) * nPixYMap) + px + 1 */ if (px < nPixY && py < nPixX) { const int pos = (nPixX*nPixY*nz) + (py * nPixY) + px; int coordA = py * nPixYMap + px; int coordB = ((py + 1) * nPixYMap) + px; int coordC = coordA + 1; int coordD = coordB + 1; // x - ray angle in X coord double gamma = atan((d_pObjX[py] + (dx / 2.0) - tubeX) / (rtubeZ - d_pObjZ[nz])); // x - ray angle in Y coord double alpha = atan((d_pObjY[px] + (dy / 2.0) - rtubeY) / (rtubeZ - d_pObjZ[nz])); double dA, dB, dC, dD; dA = d_sliceI[coordA]; dB = d_sliceI[coordB]; dC = d_sliceI[coordC]; dD = d_sliceI[coordD]; // Treat border of interpolated integral detector if (dC == 0 && dD == 0) { dC = dA; dD = dB; } // S.3.Differentiation - Eq. 24 - Liu et al(2017) d_pVolume[pos] += ((dD - dC - dB + dA)*(du*dv*dz / (cos(alpha)*cos(gamma)*dx*dy))); } } __global__ void division_kernel( double* d_img, const int nPixX, const int nPixY, const int nSlices, const int nProj) { const int px = blockIdx.x * blockDim.x + threadIdx.x; const int py = blockIdx.y * blockDim.y + threadIdx.y; const int pz = blockIdx.z * blockDim.z + threadIdx.z; if (px < nPixY && py < nPixX && pz < nSlices) { const int pos = (nPixX*nPixY*pz) + (py * nPixY) + px; d_img[pos] /= (double) nProj; } } // Branchless distance-driven backprojection void backprojectionDDb( double* const h_pVolume, const double* const h_pProj, const double* const h_pTubeAngle, const double* const h_pDetAngle, const int idXProj, const int nProj, const int nPixX, const int nPixY, const int nSlices, const int nDetX, const int nDetY, const double dx, const double dy, const double dz, const double du, const double dv, const double DSD, const double DDR, const double DAG) { // Number of mapped detectors const int nDetXMap = nDetX + 1; const int nDetYMap = nDetY + 1; // Number of mapped pixels const int nPixXMap = nPixX + 1; const int nPixYMap = nPixY + 1; double* d_pProj; double* d_sliceI; double* d_pVolume; hipMalloc((void **)&d_pProj, nDetXMap*nDetYMap*nProj * sizeof(double)); hipMalloc((void **)&d_sliceI, nPixXMap*nPixYMap * sizeof(double)); hipMalloc((void **)&d_pVolume, nPixX*nPixY*nSlices * sizeof(double)); // Will reuse grid configurations dim3 threadsPerBlock (1,1,1); dim3 blockSize (1,1,1); const int maxThreadsPerBlock = BLOCK_SIZE; // Copy projection data padding with zeros for image integation // Initialize first column and row with zeros const double* h_pProj_tmp; double* d_pProj_tmp; threadsPerBlock.x = maxThreadsPerBlock; blockSize.x = (nDetXMap / maxThreadsPerBlock) + 1; for (int np = 0; np < nProj; np++) { // Pad on X coord direction hipLaunchKernelGGL(pad_projections_kernel, blockSize, threadsPerBlock, 0, 0, d_pProj, nDetXMap, nDetYMap, nDetXMap, np); // Pad on Y coord direction d_pProj_tmp = d_pProj + (nDetXMap*nDetYMap*np) + 1; hipMemset(d_pProj_tmp, 0, nPixY * sizeof(double)); } // Copy projections data from host to device for (int np = 0; np < nProj; np++) for (int c = 0; c < nDetX; c++) { h_pProj_tmp = h_pProj + (c * nDetY) + (nDetX*nDetY*np); d_pProj_tmp = d_pProj + (((c + 1) * nDetYMap) + 1) + (nDetXMap*nDetYMap*np); hipMemcpy(d_pProj_tmp, h_pProj_tmp, nDetY * sizeof(double), hipMemcpyHostToDevice); } // device memory for projections coordinates double* d_pDetX; double* d_pDetY; double* d_pDetZ; double* d_pObjX; double* d_pObjY; double* d_pObjZ; hipMalloc((void **)&d_pDetX, nDetXMap * sizeof(double)); hipMalloc((void **)&d_pDetY, nDetYMap * sizeof(double)); hipMalloc((void **)&d_pDetZ, nDetYMap * sizeof(double)); hipMalloc((void **)&d_pObjX, nPixXMap * sizeof(double)); hipMalloc((void **)&d_pObjY, nPixYMap * sizeof(double)); hipMalloc((void **)&d_pObjZ, nSlices * sizeof(double)); // device memory for mapped coordinates double* d_pDetmY; double* d_pDetmX; hipMalloc((void **)&d_pDetmY, nDetYMap * sizeof(double)); hipMalloc((void **)&d_pDetmX, nDetYMap * nDetXMap * sizeof(double)); // device memory for rotated detector coords double* d_pRdetY; double* d_pRdetZ; hipMalloc((void **)&d_pRdetY, nDetYMap * sizeof(double)); hipMalloc((void **)&d_pRdetZ, nDetYMap * sizeof(double)); // Generate detector and object boudaries threadsPerBlock.x = maxThreadsPerBlock; blockSize.x = (nDetX / maxThreadsPerBlock) + 1; hipLaunchKernelGGL(map_boudaries_kernel, blockSize, threadsPerBlock, 0, 0, d_pDetX, nDetXMap, (double)nDetX, -du, 0.0); blockSize.x = (nDetY / maxThreadsPerBlock) + 1; hipLaunchKernelGGL(map_boudaries_kernel, blockSize, threadsPerBlock, 0, 0, d_pDetY, nDetYMap, nDetY / 2.0, dv, 0.0); blockSize.x = (nPixX / maxThreadsPerBlock) + 1; hipLaunchKernelGGL(map_boudaries_kernel, blockSize, threadsPerBlock, 0, 0, d_pObjX, nPixXMap, (double)nPixX, -dx, 0.0); blockSize.x = (nPixY / maxThreadsPerBlock) + 1; hipLaunchKernelGGL(map_boudaries_kernel, blockSize, threadsPerBlock, 0, 0, d_pObjY, nPixYMap, nPixY / 2.0, dy, 0.0); blockSize.x = (nSlices / maxThreadsPerBlock) + 1; hipLaunchKernelGGL(map_boudaries_kernel, blockSize, threadsPerBlock, 0, 0, d_pObjZ, nSlices, 0.0, dz, DAG + (dz / 2.0)); // Initiate variables value with 0 hipMemset(d_pDetZ, 0, nDetYMap * sizeof(double)); hipMemset(d_pVolume, 0, nPixX * nPixY * nSlices * sizeof(double)); // X - ray tube initial position double tubeX = 0; double tubeY = 0; double tubeZ = DSD; // Iso - center position double isoY = 0; double isoZ = DDR; // Integration of 2D projection over the whole projections // (S.1.Integration. - Liu et al(2017)) // Naive integration o the X coord threadsPerBlock.x = 8; threadsPerBlock.y = 4; threadsPerBlock.z = 8; blockSize.x = (int)ceilf((float)nDetYMap / (threadsPerBlock.x - 1)); blockSize.y = 1; blockSize.z = (int)ceilf((float)nProj / threadsPerBlock.z); int Xk = (int)ceilf((float)nDetXMap / (threadsPerBlock.x - 1)); for (int k = 0; k < Xk; k++) { hipLaunchKernelGGL(img_integration_kernel, blockSize, threadsPerBlock, 0, 0, d_pProj, nDetXMap, nDetYMap, integrateXcoord, 0, k * 9, nProj); } // Naive integration o the Y coord threadsPerBlock.x = 4; threadsPerBlock.y = 8; threadsPerBlock.z = 8; blockSize.x = 1; blockSize.y = (int)ceilf((float)nDetXMap / (threadsPerBlock.y - 1)); blockSize.z = (int)ceilf((float)nProj / threadsPerBlock.z); int Yk = (int)ceilf((float)nDetYMap / (threadsPerBlock.y - 1)); for (int k = 0; k < Yk; k++) { hipLaunchKernelGGL(img_integration_kernel, blockSize, threadsPerBlock, 0, 0, d_pProj, nDetXMap, nDetYMap, integrateYcoord, k * 9, 0, nProj); } double* d_pDetmX_tmp = d_pDetmX + (nDetYMap * (nDetXMap-2)); int projIni, projEnd, nProj2Run; if (idXProj == -1) { projIni = 0; projEnd = nProj; nProj2Run = nProj; } else { projIni = idXProj; projEnd = idXProj + 1; nProj2Run = 1; } // For each projection for (int p = projIni; p < projEnd; p++) { // Get specif tube angle for the projection double theta = h_pTubeAngle[p] * M_PI / 180.0; // Get specif detector angle for the projection double phi = h_pDetAngle[p] * M_PI / 180.0; //printf("Tube angle:%f Det angle:%f\n", theta, phi); // Tube rotation double rtubeY = ((tubeY - isoY)*cos(theta) - (tubeZ - isoZ)*sin(theta)) + isoY; double rtubeZ = ((tubeY - isoY)*sin(theta) + (tubeZ - isoZ)*cos(theta)) + isoZ; //printf("R tube Y:%f R tube Z:%f\n", rtubeY, rtubeZ); // Detector rotation threadsPerBlock.x = maxThreadsPerBlock; threadsPerBlock.y = 1; threadsPerBlock.z = 1; blockSize.x = (nDetYMap / maxThreadsPerBlock) + 1; blockSize.y = 1; blockSize.z = 1; hipLaunchKernelGGL(rot_detector_kernel, blockSize, threadsPerBlock, 0, 0, d_pRdetY, d_pRdetZ, d_pDetY, d_pDetZ, isoY, isoZ, phi, nDetYMap); threadsPerBlock.x = 16; threadsPerBlock.y = 16; threadsPerBlock.z = 1; // For each slice for (int nz = 0; nz < nSlices; nz++) { // Map detector onto XY plane(Inside proj loop in case detector rotates) blockSize.x = (nDetYMap / threadsPerBlock.x) + 1; blockSize.y = (nDetXMap / threadsPerBlock.y) + 1; blockSize.z = 1; hipLaunchKernelGGL(mapDet2Slice_kernel, blockSize, threadsPerBlock, 0, 0, d_pDetmX, d_pDetmY, tubeX, rtubeY, rtubeZ, d_pDetX, d_pRdetY, d_pRdetZ, d_pObjZ, nDetXMap, nDetYMap, nz); // S.2. Interpolation - Liu et al (2017) blockSize.x = (nPixYMap / threadsPerBlock.x) + 1; blockSize.y = (nPixXMap / threadsPerBlock.y) + 1; hipLaunchKernelGGL(bilinear_interpolation_kernel, blockSize, threadsPerBlock, 0, 0, d_sliceI, d_pProj, d_pObjX, d_pObjY, d_pDetmX_tmp, d_pDetmY, nPixXMap, nPixYMap, nDetXMap, nDetYMap, nDetX, nDetY, p); // S.3. Differentiation - Eq. 24 - Liu et al (2017) blockSize.x = (nPixY / threadsPerBlock.x) + 1; blockSize.y = (nPixX / threadsPerBlock.y) + 1; hipLaunchKernelGGL(differentiation_kernel, blockSize, threadsPerBlock, 0, 0, d_pVolume, d_sliceI, tubeX, rtubeY, rtubeZ, d_pObjX, d_pObjY, d_pObjZ, nPixX, nPixY, nPixXMap, nPixYMap, du, dv, dx, dy, dz, nz); } // Loop end slices } // Loop end Projections // Normalize volume dividing by the number of projections threadsPerBlock.x = 8; threadsPerBlock.y = 8; threadsPerBlock.z = 4; blockSize.x = (nPixY / threadsPerBlock.x) + 1; blockSize.y = (nPixX / threadsPerBlock.y) + 1; blockSize.z = (nSlices / threadsPerBlock.z) + 1; hipLaunchKernelGGL(division_kernel, blockSize, threadsPerBlock, 0, 0, d_pVolume, nPixX, nPixY, nSlices, nProj2Run); hipMemcpy(h_pVolume, d_pVolume, nSlices* nPixX * nPixY * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_pProj); hipFree(d_sliceI); hipFree(d_pVolume); hipFree(d_pDetX); hipFree(d_pDetY); hipFree(d_pDetZ); hipFree(d_pObjX); hipFree(d_pObjY); hipFree(d_pObjZ); hipFree(d_pDetmY); hipFree(d_pDetmX); hipFree(d_pRdetY); hipFree(d_pRdetZ); } int main() { // image voxel density const int nPixX = 1996; // number of voxels const int nPixY = 2457; // number of voxels const int nSlices = 78; // detector panel pixel density const int nDetX = 1664; // number of pixels const int nDetY = 2048; // number of pixels const int nProj = 15; // number of projections const int idXProj = -1; // loop over all projections const double dx = 0.112; // single voxel size (mm) const double dy = 0.112; const double dz = 1.0; const double du = 0.14; // single detector size (mm) const double dv = 0.14; const double DSD = 700; // distance from source to detector (mm) const double DDR = 0.0; // distance from detector to pivot (mm) const double DAG = 25.0; // distance of air gap (mm) const size_t pixVol = nPixX * nPixY * nSlices; const size_t detVol = nDetX * nDetY * nProj; double *h_pVolume = (double*) malloc (pixVol * sizeof(double)); double *h_pProj = (double*) malloc (detVol * sizeof(double)); double *h_pTubeAngle = (double*) malloc (nProj * sizeof(double)); double *h_pDetAngle = (double*) malloc (nProj * sizeof(double)); // tube angles in degrees for (int i = 0; i < nProj; i++) h_pTubeAngle[i] = -7.5 + i * 15.0/nProj; // detector angles in degrees for (int i = 0; i < nProj; i++) h_pDetAngle[i] = -2.1 + i * 4.2/nProj; // random values srand(123); for (size_t i = 0; i < pixVol; i++) h_pVolume[i] = (double)rand() / (double)RAND_MAX; for (size_t i = 0; i < detVol; i++) h_pProj[i] = (double)rand() / (double)RAND_MAX; backprojectionDDb( h_pVolume, h_pProj, h_pTubeAngle, h_pDetAngle, idXProj, nProj, nPixX, nPixY, nSlices, nDetX, nDetY, dx, dy, dz, du, dv, DSD, DDR, DAG); double checkSum = 0; for (size_t i = 0; i < pixVol; i++) checkSum += h_pVolume[i]; printf("checksum = %lf\n", checkSum); free(h_pVolume); free(h_pTubeAngle); free(h_pDetAngle); free(h_pProj); return 0; }
the_stack
extern "C"{ #endif #include "reduce_kernel.h" #include <stdio.h> #define CUDA_1D_KERNEL_LOOP(i,n) \ for (int i=blockIdx.x*blockDim.x + threadIdx.x;i<n; \ i += blockDim.x * gridDim.x) __global__ void SoftmaxForward( const int nthreads, const float * input_data, const int64_t * offsets_data, float * output_data, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; // get max float v_max = input_data[s*n_dim+k]; for (int j=s+1;j<e;++j) { if (input_data[j*n_dim+k]>v_max) { v_max = input_data[j*n_dim+k]; } } // subtract max and exp, accumulate sum float sum = 0; for (int j=s;j<e;++j) { output_data[j*n_dim+k] = exp(input_data[j*n_dim+k]-v_max); sum += output_data[j*n_dim+k]; } // divide sum for (int j=s;j<e;++j) { output_data[j*n_dim+k] /= sum; } } } __global__ void ReduceMaxForward( const int nthreads, const float * input, const int64_t * offsets, float * output, int64_t * buffer, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; output[index] = input[s*n_dim+dim_idx]; buffer[index] = s*n_dim+dim_idx; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; if (input[input_idx]>output[index]) { output[index] = input[input_idx]; buffer[index] = input_idx; } } } } __global__ void ReduceMeanForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } output[index] = output[index]/(e-s); } } __global__ void ReduceForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } } } int SoftmaxForwardLauncher( const float * input_data, const int64_t * offsets_data, float * output_data, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func SoftmaxForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input_data, offsets_data, output_data, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxForwardLauncher( const float * input, const int64_t * offsets, float * output, int64_t * buffer, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMaxForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, buffer, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMeanForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void SoftmaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; for (int j=s;j<e;++j) { input_grad[j*n_dim+k] = output_grad[j*n_dim+k]; for (int l=s;l<e;++l) { input_grad[j*n_dim+k] -= output_grad[l*n_dim+k]*output_data[l*n_dim+k]; } input_grad[j*n_dim+k] *= output_data[j*n_dim+k]; } } } __global__ void ReduceMaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; //int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = buffer[index]; input_grad[input_idx] = output_grad[index]; } } } __global__ void ReduceMeanBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]/(e-s); } } } __global__ void ReduceBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]; } } } int SoftmaxBackwardLauncher( const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func SoftmaxBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets_data, output_data, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMaxBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, buffer, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMeanBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; output[o_idx] = input[index]; } } } int ReplicateForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReplicateForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; input_grad[index] += output_grad[o_idx]; } } } int ReplicateBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReplicateBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cpluscplus extern } #endif
the_stack
#include <cuda.h> #include <math_constants.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include <builtin_types.h> #include <vector_types.h> #include <vector_functions.h> #define CHARACTER_MAP_DIGITS_OFFSET (16) #define CHARACTER_MAP_X (56) #define CHARACTER_MAP_SPACE (0) #define CHARACTER_MAP_PLUS (11) #define CHARACTER_MAP_MINUS (13) #define CHARACTER_MAP_DOT (14) #define CHARACTER_MAP_a (65) #define CHARACTER_MAP_E (37) #define CHARACTER_MAP_f (70) #define CHARACTER_MAP_I (41) #define CHARACTER_MAP_N (46) #define CHARACTER_MAP_n (78) #define DATA_TYPE_FLOAT 0 #define DATA_TYPE_INTEGER 1 extern "C" { __constant__ unsigned int* D_CANVAS; __constant__ unsigned int* D_CHARACTER_MAP; __constant__ int D_CHARACTER_MAP_NB_CHARS; __constant__ int* D_VALUES_INTEGER; __constant__ float* D_VALUES_FLOAT; __constant__ int D_NB_VALUES; __constant__ int D_START_COL; __constant__ int D_LENGTH_COL; __constant__ int D_START_ROW; __constant__ int D_LENGTH_ROW; __constant__ int D_TEXTURE_WIDTH; __constant__ int D_TEXTURE_HEIGHT; __constant__ int D_CHARACTER_WIDTH; __constant__ int D_CHARACTER_HEIGHT; __constant__ int D_CHARACTER_SIZE; __constant__ int D_MATRIX_COLS; __constant__ int D_MATRIX_ROWS; __constant__ int D_NB_CHARACTER_PER_BOX; __constant__ int D_NB_DECIMALS; __device__ float roundToNDecimals(float number, int nbDecimals); __device__ void regularValue(int characterId, float value, int& characterMapAddress); __device__ void regularValueNormalDisplay(int characterId, float value, int& characterMapAddress); __device__ void regularValueScientificDisplay(int characterId, float value, int& characterMapAddress); __device__ void nanValue(int characterId, int& characterMapAddress, unsigned int& rgbColor); __device__ void infinityValue(int characterId, float value, int& characterMapAddress, unsigned int& rgbColor); //kernel code __global__ void DrawMatrixKernel(int dataType) { int pixelId = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if (pixelId >= D_TEXTURE_WIDTH * D_TEXTURE_HEIGHT) return; int boxId = pixelId / (D_NB_CHARACTER_PER_BOX * D_CHARACTER_SIZE); if (boxId >= D_NB_VALUES) // In case where we dont have a perfect rectangle, some boxes must be left empty { // TODO Write spaces return; } int col = D_START_COL + boxId % D_LENGTH_COL; int row = D_START_ROW + boxId / D_LENGTH_COL; int valueIndex = row * D_MATRIX_COLS + col; float value; switch (dataType) { case DATA_TYPE_FLOAT: value = D_VALUES_FLOAT[valueIndex]; break; case DATA_TYPE_INTEGER: value = D_VALUES_INTEGER[valueIndex]; break; default: // Shouldn't happen value = 0; break; } // Find the character id // // Example: for drawing +1.75e2, we need to draw 5 characters: // Syntax + 1 . 7 5 e 2 _ // Index 0 1 2 3 4 5 6 7 int inBoxPixelId = pixelId % (D_NB_CHARACTER_PER_BOX * D_CHARACTER_SIZE); int characterId = inBoxPixelId / D_CHARACTER_SIZE; int inCharacterPixelId = inBoxPixelId % D_CHARACTER_SIZE; unsigned int rgbColor = 0x0; // Black; // Find the character to draw int characterMapAddress; if (isinf(value)) infinityValue(characterId, value, characterMapAddress, rgbColor); else if (isnan(value)) nanValue(characterId, characterMapAddress, rgbColor); else regularValue(characterId, value, characterMapAddress); if (characterMapAddress < 0) return; int inCharacterPixelPositionX = inCharacterPixelId % D_CHARACTER_WIDTH; int inCharacterPixelPositionY = inCharacterPixelId / D_CHARACTER_WIDTH; int inDigitMapPositionX = characterMapAddress * D_CHARACTER_WIDTH + inCharacterPixelPositionX; int inDigitMapPositionY = inCharacterPixelPositionY; unsigned int characterMapIndex = inDigitMapPositionX + inDigitMapPositionY * (D_CHARACTER_MAP_NB_CHARS * D_CHARACTER_WIDTH); float factor = D_CHARACTER_MAP[characterMapIndex]; unsigned int red = (float)(rgbColor >> 16) * factor + 255.0 * (1.0 - factor); unsigned int green = (float)((rgbColor & 0x00FF00) >> 8) * factor + 255.0 * (1.0 - factor); unsigned int blue = (float)(rgbColor & 0x0000FF) * factor + 255.0 * (1.0 - factor); unsigned int pixelColor = 0xFF000000 + (red << 16) + (green << 8) + (blue); int canvasPositionX = ((boxId % D_LENGTH_COL) * (D_NB_CHARACTER_PER_BOX + 1) + characterId) * D_CHARACTER_WIDTH + inCharacterPixelPositionX; int canvasPositionY = (boxId / D_LENGTH_COL) * D_CHARACTER_HEIGHT + inCharacterPixelPositionY; D_CANVAS[canvasPositionX + canvasPositionY * D_TEXTURE_WIDTH] = pixelColor; } __device__ float roundToNDecimals(float number, int nbDecimals) { float scale = pow((double)10, (double)nbDecimals); return round(number * scale) / scale; } __device__ void regularValue(int characterId, float value, int& characterMapAddress) { if (value != 0) value = roundToNDecimals(value, D_NB_DECIMALS); // 0 is a special value if (abs(value) < pow((double)10, (double)(-D_NB_DECIMALS))) { if (characterId == 1) characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + 0; // 0 else if (D_NB_DECIMALS > 0) { if (characterId == 2) characterMapAddress = CHARACTER_MAP_DOT; else if (characterId > 0 && characterId <= 2 + D_NB_DECIMALS) characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + 0; // 0 else characterMapAddress = CHARACTER_MAP_SPACE; } else { characterMapAddress = CHARACTER_MAP_SPACE; } } else { // Different of 0 if (characterId == 0) // Sign { if (value >= 0) characterMapAddress = CHARACTER_MAP_SPACE; else characterMapAddress = CHARACTER_MAP_MINUS; } else { if (value < 0) value = -value; if (1 + floor(log10(value)) + ((D_NB_DECIMALS > 0) ? (D_NB_DECIMALS + 1) : 0) < D_NB_CHARACTER_PER_BOX) { // Enough space to write the plain value regularValueNormalDisplay(characterId, value, characterMapAddress); } else { // Not enough space, we write it in scientific mode regularValueScientificDisplay(characterId, value, characterMapAddress); } } } } __device__ void regularValueNormalDisplay(int characterId, float value, int& characterMapAddress) { int scale = floor(log10(value)); if (scale >= 0) { if (characterId <= scale + 1) { // Integer part int digit = (int)floor((value / pow((double)10, scale - characterId + 1))) % 10; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + digit; // Digit address } else if (characterId > scale + 2) { // Decimal part int decimalNum = characterId - scale - 2; if (decimalNum <= D_NB_DECIMALS) { int digit = (int)floor(value * pow((double)10, decimalNum)) % 10; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + digit; // Digit address } else { characterMapAddress = CHARACTER_MAP_SPACE; } } else { // Dot if (D_NB_DECIMALS > 0) characterMapAddress = CHARACTER_MAP_DOT; else characterMapAddress = CHARACTER_MAP_SPACE; } } else { if (characterId == 1) characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + 0; // 0 else if (characterId == 2) characterMapAddress = CHARACTER_MAP_DOT; else if (characterId - 2 <= D_NB_DECIMALS) { // Decimal part int decimalNum = characterId - 2; if (decimalNum <= D_NB_DECIMALS) { int digit = (int)floor(value * pow((double)10, decimalNum)) % 10; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + digit; // Digit address } else { characterMapAddress = CHARACTER_MAP_SPACE; } } else characterMapAddress = CHARACTER_MAP_SPACE; } } __device__ void regularValueScientificDisplay(int characterId, float value, int& characterMapAddress) { if (D_NB_DECIMALS > 0 && characterId == 2) // Dot { characterMapAddress = CHARACTER_MAP_DOT; } else if (characterId == D_NB_CHARACTER_PER_BOX - 4) // E { characterMapAddress = CHARACTER_MAP_E; } else { int power; if (value != 0) power = floor(log10(value)); else power = 0; if (characterId == D_NB_CHARACTER_PER_BOX - 2) // 1st power digit { if (power < 0) power = -power; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + power / 10; // Digit address } else if (characterId == D_NB_CHARACTER_PER_BOX - 1) // 2nd power digit { if (power < 0) power = -power; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + power % 10; // Digit address } else if (characterId == D_NB_CHARACTER_PER_BOX - 3) // power sign { if (power >= 0) characterMapAddress = CHARACTER_MAP_PLUS; else characterMapAddress = CHARACTER_MAP_MINUS; } else { float number = value / pow((double)10, (double)power); if (characterId == 1) // Mantisse integer { int firstDigit = number; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + firstDigit; // Digit address } else // Mantisse decimal { int digit = (int)(number * pow((double)(10), (double)(characterId - 2))) % 10; characterMapAddress = CHARACTER_MAP_DIGITS_OFFSET + digit; } } } } __device__ void nanValue(int characterId, int& characterMapAddress, unsigned int& rgbColor) { rgbColor = 0xFFFF0000; if (characterId == 0 || characterId == 2) { characterMapAddress = CHARACTER_MAP_N; } else if (characterId == 1) { characterMapAddress = CHARACTER_MAP_a; } else { characterMapAddress = CHARACTER_MAP_SPACE; } } __device__ void infinityValue(int characterId, float value, int& characterMapAddress, unsigned int& rgbColor) { rgbColor = 0xFFFF0000; if (characterId == 0) // Sign { if (value >= 0) characterMapAddress = CHARACTER_MAP_PLUS; else characterMapAddress = CHARACTER_MAP_MINUS; } else if (characterId == 1) { characterMapAddress = CHARACTER_MAP_I; } else if (characterId == 2) { characterMapAddress = CHARACTER_MAP_n; } else if (characterId == 3) { characterMapAddress = CHARACTER_MAP_f; } else { characterMapAddress = CHARACTER_MAP_SPACE; } } }
the_stack
// Use 1024 threads per block, which requires cuda sm_2x or above const int CUDA_NUM_THREADS = 512; const int BLOCK_SIZE_LIMIT = 32768; typedef unsigned long long int DegreeType; // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { int ret = (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; return (ret > BLOCK_SIZE_LIMIT) ? BLOCK_SIZE_LIMIT : ret; } __global__ void load_kernel(V_ID nv, Vertex* old_pr_fb, const Vertex* old_pr_zc) { for (V_ID i = blockIdx.x * blockDim.x + threadIdx.x; i < nv; i+= blockDim.x * gridDim.x) { old_pr_fb[i] = old_pr_zc[i]; } } __device__ __forceinline__ void process_edge_dense(const EdgeStruct* colIdxs, Vertex* myNewPrFb, E_ID colIdx, V_ID myRowLeft, Vertex newLabel) { EdgeStruct dst = cub::ThreadLoad<cub::LOAD_CG>(colIdxs + colIdx); Vertex oldLabel = cub::ThreadLoad<cub::LOAD_CG>(myNewPrFb + dst - myRowLeft); if (newLabel > oldLabel) { //atomicMin(&myNewPrFb[dst - myRowLeft], newLabel); atomicMax(myNewPrFb + dst - myRowLeft, newLabel); } } __device__ __forceinline__ bool process_edge_sparse(const EdgeStruct* colIdxs, Vertex* myOldPrFb, Vertex* myNewPrFb, E_ID colIdx, V_ID myRowLeft, Vertex newLabel, V_ID &dstVtx) { EdgeStruct es = cub::ThreadLoad<cub::LOAD_CG>(colIdxs + colIdx); dstVtx = es; Vertex oldLabel = cub::ThreadLoad<cub::LOAD_CG>(myNewPrFb + dstVtx - myRowLeft); if (newLabel > oldLabel) { Vertex lastLabel = cub::ThreadLoad<cub::LOAD_CG>(myOldPrFb + dstVtx - myRowLeft); Vertex actOldLabel = atomicMax(myNewPrFb + dstVtx - myRowLeft, newLabel); if (actOldLabel == lastLabel) return true; } return false; } __global__ void cc_pull_kernel(V_ID rowLeft, V_ID rowRight, E_ID colLeft, const NodeStruct* row_ptrs, const EdgeStruct2* col_idxs, Vertex* old_pr_fb, Vertex* new_pr_fb) { typedef cub::BlockScan<E_ID, CUDA_NUM_THREADS> BlockScan; __shared__ BlockScan::TempStorage temp_storage; __shared__ E_ID blkColStart; for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft; blkRowStart <= rowRight; blkRowStart += blockDim.x * gridDim.x) { E_ID myNumEdges = 0, scratchOffset, totalNumEdges = 0; V_ID curVtx = blkRowStart + threadIdx.x; if (curVtx <= rowRight) { NodeStruct ns = row_ptrs[curVtx - rowLeft]; E_ID start_col_idx, end_col_idx = ns.index; if (curVtx == rowLeft) start_col_idx = colLeft; else start_col_idx = row_ptrs[curVtx - rowLeft - 1].index; myNumEdges = end_col_idx - start_col_idx; if (threadIdx.x == 0) blkColStart = start_col_idx; new_pr_fb[curVtx - rowLeft] = old_pr_fb[curVtx]; } __syncthreads(); BlockScan(temp_storage).ExclusiveSum(myNumEdges, scratchOffset, totalNumEdges); E_ID done = 0; while (totalNumEdges > 0) { if (threadIdx.x < totalNumEdges) { EdgeStruct2 es = col_idxs[blkColStart + done + threadIdx.x - colLeft]; Vertex srcLabel = old_pr_fb[es.src]; atomicMax(new_pr_fb + es.dst - rowLeft, srcLabel); } done += CUDA_NUM_THREADS; totalNumEdges -= (totalNumEdges > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : totalNumEdges; } __syncthreads(); } } __global__ void cc_push_kernel(V_ID inRowLeft, V_ID inRowRight, V_ID myRowLeft, E_ID colLeft, const NodeStruct* row_ptrs, const EdgeStruct* col_idxs, char* old_fq_fb, char* new_fq_fb, const Vertex* in_old_pr_zc, Vertex* my_old_pr_fb, Vertex* my_new_pr_fb, bool oldDense, bool newDense, V_ID maxNumNodes) { typedef cub::BlockScan<E_ID, CUDA_NUM_THREADS> BlockScan; __shared__ BlockScan::TempStorage temp_storage; __shared__ Vertex srcLabels[CUDA_NUM_THREADS]; __shared__ E_ID offset[CUDA_NUM_THREADS], edgeOffset[CUDA_NUM_THREADS]; __shared__ int queueIdx; char *oldBitmap = NULL; V_ID *oldQueue = NULL, *newQueue = NULL; V_ID *numNodes = NULL; if (!newDense) { FrontierHeader* header = (FrontierHeader*) new_fq_fb; numNodes = &(header->numNodes); newQueue = (V_ID*)(new_fq_fb + sizeof(FrontierHeader)); } if (oldDense) oldBitmap = old_fq_fb + sizeof(FrontierHeader); else oldQueue = (V_ID*)(old_fq_fb + sizeof(FrontierHeader)); for (V_ID blkRowStart = blockIdx.x * blockDim.x + inRowLeft; blkRowStart <= inRowRight; blkRowStart += blockDim.x * gridDim.x) { E_ID myOffset = 0, myNumEdges = 0, scratchOffset, totalNumEdges = 0; V_ID curIdx = blkRowStart + threadIdx.x; if (curIdx <= inRowRight) { V_ID curVtx = (oldDense) ? curIdx : oldQueue[curIdx]; NodeStruct ns = row_ptrs[curVtx]; E_ID start_col_idx, end_col_idx = ns.index; if (curVtx == 0) start_col_idx = colLeft; else start_col_idx = row_ptrs[curVtx - 1].index; if (oldDense) { V_ID pos = (curVtx - inRowLeft) / 8; V_ID off = (curVtx - inRowLeft) % 8; if (oldBitmap[pos] & (1<<off)) { myNumEdges = end_col_idx - start_col_idx; myOffset = start_col_idx; srcLabels[threadIdx.x] = in_old_pr_zc[curVtx]; } } else { myNumEdges = end_col_idx - start_col_idx; myOffset = start_col_idx; srcLabels[threadIdx.x] = in_old_pr_zc[curVtx]; } } __syncthreads(); BlockScan(temp_storage).ExclusiveSum(myNumEdges, scratchOffset, totalNumEdges); offset[threadIdx.x] = scratchOffset; edgeOffset[threadIdx.x] = myOffset; __syncthreads(); E_ID done = 0; int srcIdx = 0; if (newDense) { while (totalNumEdges > 0) { if (threadIdx.x < totalNumEdges) { while (srcIdx + 1 < CUDA_NUM_THREADS && done + threadIdx.x >= offset[srcIdx + 1]) srcIdx ++; E_ID colIdx = edgeOffset[srcIdx] + done + threadIdx.x - offset[srcIdx] - colLeft; process_edge_dense(col_idxs, my_new_pr_fb, colIdx, myRowLeft, srcLabels[srcIdx]); } done += CUDA_NUM_THREADS; totalNumEdges -= (totalNumEdges > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : totalNumEdges; } } else { while (totalNumEdges > 0) { E_ID myCnt = 0, myOffset, totalCnt; V_ID dstVtx; __syncthreads(); if (threadIdx.x < totalNumEdges) { while (srcIdx + 1 < CUDA_NUM_THREADS && done + threadIdx.x >= offset[srcIdx + 1]) srcIdx ++; E_ID colIdx = edgeOffset[srcIdx] + done + threadIdx.x - offset[srcIdx] - colLeft; if (process_edge_sparse(col_idxs, my_old_pr_fb, my_new_pr_fb, colIdx, myRowLeft, srcLabels[srcIdx], dstVtx)) { myCnt = 1; } } __syncthreads(); BlockScan(temp_storage).ExclusiveSum(myCnt, myOffset, totalCnt); if (threadIdx.x == 0) { queueIdx = atomicAdd(numNodes, (V_ID)totalCnt); } __syncthreads(); if (myCnt == 1) { if (queueIdx + myOffset < maxNumNodes) newQueue[queueIdx + myOffset] = dstVtx; } done += CUDA_NUM_THREADS; totalNumEdges -= (totalNumEdges > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : totalNumEdges; } } } } __global__ void bitmap_kernel(V_ID rowLeft, V_ID rowRight, char* new_fq_fb, Vertex* old_pr_fb, Vertex* new_pr_fb) { typedef cub::BlockScan<V_ID, CUDA_NUM_THREADS> BlockScan; __shared__ BlockScan::TempStorage temp_storage; FrontierHeader* header = (FrontierHeader*) new_fq_fb; char* bitmap = new_fq_fb + sizeof(FrontierHeader); V_ID *numNodes = &(header->numNodes); for (V_ID blkStart = blockIdx.x * blockDim.x; blkStart * 8 + rowLeft <= rowRight; blkStart += blockDim.x * gridDim.x) { V_ID idx = blkStart + threadIdx.x; char bit = 0; V_ID cnt = 0, totalCnt = 0; for (int i = 0; i < 8; i ++) { V_ID curVtx = idx * 8 + rowLeft + i; if (curVtx <= rowRight) if (old_pr_fb[curVtx - rowLeft] != new_pr_fb[curVtx - rowLeft]) { bit = bit | (1 << i); cnt = cnt + 1; } } bitmap[idx] = bit; __syncthreads(); BlockScan(temp_storage).ExclusiveSum(cnt, cnt, totalCnt); if (threadIdx.x == 0) atomicAdd(numNodes, totalCnt); __syncthreads(); } } __global__ void convert_d2s_kernel(V_ID rowLeft, V_ID rowRight, char* old_fq_fb, char* new_fq_fb) { typedef cub::BlockScan<V_ID, CUDA_NUM_THREADS> BlockScan; __shared__ BlockScan::TempStorage temp_storage; __shared__ V_ID queueIdx; const char* oldBitmap = (const char*) old_fq_fb + sizeof(FrontierHeader); V_ID* newQueue = (V_ID*)(new_fq_fb + sizeof(FrontierHeader)); FrontierHeader* header = (FrontierHeader*) new_fq_fb; V_ID* numNodes = &(header->numNodes); for (V_ID blkRowStart = blockIdx.x * blockDim.x + rowLeft; blkRowStart <= rowRight; blkRowStart += blockDim.x * gridDim.x) { V_ID curVtx = blkRowStart + threadIdx.x; V_ID cnt = 0, offset, totalCnt; if (curVtx <= rowRight) { V_ID pos = (curVtx - rowLeft) / 8; V_ID off = (curVtx - rowLeft) % 8; if (oldBitmap[pos] & (1<<off)) cnt = 1; } __syncthreads(); BlockScan(temp_storage).ExclusiveSum(cnt, offset, totalCnt); if (threadIdx.x == 0) queueIdx = atomicAdd(numNodes, totalCnt); __syncthreads(); if (cnt == 1) newQueue[queueIdx + offset] = curVtx; } } __global__ void copy_kernel(V_ID numNodes, V_ID rowLeft, char* new_fq_fb, Vertex* new_pr_fb, Vertex* new_pr_zc) { V_ID* newQueue = (V_ID*)(new_fq_fb + sizeof(FrontierHeader)); for (V_ID idx = blockIdx.x * blockDim.x + threadIdx.x; idx < numNodes; idx += blockDim.x * gridDim.x) { if (idx < numNodes) { V_ID curVtx = newQueue[idx]; new_pr_zc[curVtx - rowLeft] = new_pr_fb[curVtx - rowLeft]; } } } V_ID push_app_task_impl(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 8); assert(task->regions.size() == 8); const Graph* graph = (Graph*) task->args; const GraphPiece *piece = (GraphPiece*) task->local_args; const AccessorRO<NodeStruct, 1> acc_pull_row_ptr(regions[0], FID_DATA); const AccessorRO<EdgeStruct2, 1> acc_pull_col_idx(regions[1], FID_DATA); const AccessorRO<NodeStruct, 1> acc_push_row_ptr(regions[2], FID_DATA); const AccessorRO<EdgeStruct, 1> acc_push_col_idx(regions[3], FID_DATA); const AccessorRO<char, 1> acc_old_fq(regions[4], FID_DATA); const AccessorWO<char, 1> acc_new_fq(regions[5], FID_DATA); const AccessorRO<Vertex, 1> acc_old_pr(regions[6], FID_DATA); const AccessorWO<Vertex, 1> acc_new_pr(regions[7], FID_DATA); Rect<1> rect_pull_row_ptr = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<1> rect_pull_col_idx = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<1> rect_push_row_ptr = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<1> rect_push_col_idx = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<1> rect_old_fq = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); Rect<1> rect_new_fq = runtime->get_index_space_domain( ctx, task->regions[5].region.get_index_space()); Rect<1> rect_old_pr = runtime->get_index_space_domain( ctx, task->regions[6].region.get_index_space()); Rect<1> rect_new_pr = runtime->get_index_space_domain( ctx, task->regions[7].region.get_index_space()); assert(acc_pull_row_ptr.accessor.is_dense_arbitrary(rect_pull_row_ptr)); assert(acc_pull_col_idx.accessor.is_dense_arbitrary(rect_pull_col_idx)); assert(acc_push_row_ptr.accessor.is_dense_arbitrary(rect_push_row_ptr)); assert(acc_push_col_idx.accessor.is_dense_arbitrary(rect_push_col_idx)); assert(acc_old_fq.accessor.is_dense_arbitrary(rect_old_fq)); assert(acc_new_fq.accessor.is_dense_arbitrary(rect_new_fq)); assert(acc_old_pr.accessor.is_dense_arbitrary(rect_old_pr)); assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr)); assert(rect_push_col_idx == rect_pull_col_idx); const NodeStruct* pull_row_ptrs = acc_pull_row_ptr.ptr(rect_pull_row_ptr); const EdgeStruct2* pull_col_idxs = acc_pull_col_idx.ptr(rect_pull_col_idx); const NodeStruct* push_row_ptrs = acc_push_row_ptr.ptr(rect_push_row_ptr); const EdgeStruct* push_col_idxs = acc_push_col_idx.ptr(rect_push_col_idx); const char* old_fq = acc_old_fq.ptr(rect_old_fq); char* new_fq = acc_new_fq.ptr(rect_new_fq); const Vertex* old_pr = acc_old_pr.ptr(rect_old_pr); Vertex* new_pr = acc_new_pr.ptr(rect_new_pr); V_ID rowLeft = rect_new_pr.lo[0], rowRight = rect_new_pr.hi[0]; E_ID colLeft = rect_pull_col_idx.lo[0], colRight = rect_pull_col_idx.hi[0]; V_ID fqLeft = rect_new_fq.lo[0], fqRight = rect_new_fq.hi[0]; double ts_start = Realm::Clock::current_time_in_microseconds(); // Copy piece->newPrFb to piece->oldPrFb by swaping the two pointers checkCUDA(cudaMemcpy(piece->oldPrFb, piece->newPrFb, sizeof(Vertex) * (rowRight - rowLeft + 1), cudaMemcpyDeviceToDevice)); // Decide whether we should use sparse/dense frontier int denseParts = 0, sparseParts = 0; V_ID oldFqSize = 0; for (int i = 0; i < graph->numParts; i++) { FrontierHeader* header = (FrontierHeader*)(old_fq + graph->fqLeft[i]); oldFqSize += header->numNodes; if (header->type == FrontierHeader::DENSE_BITMAP) denseParts ++; else if (header->type == FrontierHeader::SPARSE_QUEUE) sparseParts ++; else assert(false); } FrontierHeader* newFqHeader = (FrontierHeader*) new_fq; bool denseFq = (denseParts >= sparseParts) ? true : false; assert((fqRight - fqLeft + 1 - sizeof(FrontierHeader)) % sizeof(V_ID) == 0); V_ID maxNumNodes = (fqRight - fqLeft + 1 - sizeof(FrontierHeader)) / sizeof(V_ID); // Initialize new frontier queue checkCUDA(cudaMemset(piece->newFqFb, 0, sizeof(FrontierHeader))); double cp0 = Realm::Clock::current_time_in_microseconds(); if (oldFqSize > graph->nv / 16) { // If oldFqSize too large, use pull model denseFq = true; // Always use dense frontier queue for pull model load_kernel<<<GET_BLOCKS(graph->nv), CUDA_NUM_THREADS>>>( graph->nv, piece->oldAllPrFb, old_pr); cc_pull_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, pull_row_ptrs, pull_col_idxs, piece->oldAllPrFb, piece->newPrFb); } else { // Otherwise use push model for (int i = 0; i < graph->numParts; i ++) { FrontierHeader* old_header = (FrontierHeader*)(old_fq + graph->fqLeft[i]); if (old_header->type == FrontierHeader::DENSE_BITMAP) { checkCUDA(cudaMemcpyAsync(piece->oldFqFb + graph->fqLeft[i], old_fq + graph->fqLeft[i], (graph->rowRight[i] - graph->rowLeft[i]) / 8 + 1 + sizeof(FrontierHeader), cudaMemcpyHostToDevice, piece->streams[i])); int numBlocks = GET_BLOCKS(graph->rowRight[i] - graph->rowLeft[i] + 1); //printf("push_row_ptrs(%llx) push_col_idxs(%llx) oldFqFb(%llx) newFqFb(%llx) oldPrFb(%llx) newPrFb(%llx) oldPrZC(%llx) oldFqZC(%llx)\n", // push_row_ptrs, push_col_idxs, piece->oldFqFb+graph->fqLeft[i], piece->newFqFb, piece->oldPrFb, piece->newPrFb, old_pr, old_fq); cc_push_kernel<<<numBlocks, CUDA_NUM_THREADS, 0, piece->streams[i]>>>( graph->rowLeft[i], graph->rowRight[i], rowLeft, colLeft, push_row_ptrs, push_col_idxs, piece->oldFqFb + graph->fqLeft[i], piece->newFqFb, old_pr, piece->oldPrFb, piece->newPrFb, true/*old_dense*/, denseFq, maxNumNodes); } else if (old_header->type == FrontierHeader::SPARSE_QUEUE) { checkCUDA(cudaMemcpyAsync(piece->oldFqFb + graph->fqLeft[i], old_fq + graph->fqLeft[i], old_header->numNodes * sizeof(V_ID) + sizeof(FrontierHeader), cudaMemcpyHostToDevice, piece->streams[i])); int numBlocks = GET_BLOCKS(old_header->numNodes); // Avoid launching empty kernel if (numBlocks > 0) { cc_push_kernel<<<numBlocks, CUDA_NUM_THREADS, 0, piece->streams[i]>>>( 0, old_header->numNodes - 1, rowLeft, colLeft, push_row_ptrs, push_col_idxs, piece->oldFqFb + graph->fqLeft[i], piece->newFqFb, old_pr, piece->oldPrFb, piece->newPrFb, false/*old_dense*/, denseFq, maxNumNodes); } } else { // Must be either dense or sparse frontier queue assert(false); } } }// else if checkCUDA(cudaDeviceSynchronize()); double cp1 = Realm::Clock::current_time_in_microseconds(); if (denseFq) { int numBlocks = GET_BLOCKS((rowRight - rowLeft) / 8 + 1); bitmap_kernel<<<numBlocks, CUDA_NUM_THREADS>>>( rowLeft, rowRight, piece->newFqFb, piece->oldPrFb, piece->newPrFb); checkCUDA(cudaDeviceSynchronize()); checkCUDA(cudaMemcpy(newFqHeader, piece->newFqFb, sizeof(FrontierHeader), cudaMemcpyDeviceToHost)); if (newFqHeader->numNodes < maxNumNodes) { // copy piece->newFqFb to piece->oldFqFb checkCUDA(cudaMemcpy(piece->oldFqFb, piece->newFqFb, fqRight - fqLeft + 1, cudaMemcpyDeviceToDevice)); checkCUDA(cudaMemset(piece->newFqFb, 0, sizeof(FrontierHeader))); numBlocks = GET_BLOCKS(rowRight - rowLeft + 1); denseFq = false; convert_d2s_kernel<<<numBlocks, CUDA_NUM_THREADS>>>( rowLeft, rowRight, piece->oldFqFb, piece->newFqFb); checkCUDA(cudaMemcpy(newFqHeader, piece->newFqFb, sizeof(FrontierHeader), cudaMemcpyDeviceToHost)); } } else { checkCUDA(cudaMemcpy(newFqHeader, piece->newFqFb, sizeof(FrontierHeader), cudaMemcpyDeviceToHost)); if (newFqHeader->numNodes >= maxNumNodes) { denseFq = true; int numBlocks = GET_BLOCKS((rowRight - rowLeft) / 8 + 1); bitmap_kernel<<<numBlocks, CUDA_NUM_THREADS>>>( rowLeft, rowRight, piece->newFqFb, piece->oldPrFb, piece->newPrFb); } } // Copy piece->newFqFb to new_fq // Copy piece->newPrFb to new_pr checkCUDA(cudaDeviceSynchronize()); double cp2 = Realm::Clock::current_time_in_microseconds(); if (denseFq) { checkCUDA(cudaMemcpy(new_fq, piece->newFqFb, rect_new_fq.hi[0] - rect_new_fq.lo[0] + 1, cudaMemcpyDeviceToHost)); checkCUDA(cudaMemcpy(new_pr, piece->newPrFb, (rowRight - rowLeft + 1) * sizeof(Vertex), cudaMemcpyDeviceToHost)); } else { checkCUDA(cudaMemcpy(new_fq + sizeof(FrontierHeader), piece->newFqFb + sizeof(FrontierHeader), sizeof(Vertex) * newFqHeader->numNodes, cudaMemcpyDeviceToHost)); checkCUDA(cudaMemcpy(new_pr, piece->newPrFb, (rowRight - rowLeft + 1) * sizeof(Vertex), cudaMemcpyDeviceToHost)); } checkCUDA(cudaDeviceSynchronize()); double ts_end = Realm::Clock::current_time_in_microseconds(); newFqHeader->type = denseFq ? FrontierHeader::DENSE_BITMAP : FrontierHeader::SPARSE_QUEUE; if (graph->verbose) printf("rowLeft(%u) activeNodes(%u) loadTime(%.0lf) compTime(%.0lf) updateTime(%.0lf)\n", rowLeft, newFqHeader->numNodes, cp0 - ts_start, cp1 - cp0, ts_end - cp1); //for (V_ID n = 0; n < 5; n++) printf("oldPr[%u]: %u\n", n + rowLeft, old_pr[n + rowLeft]); //for (V_ID n = 0; n < 5; n++) printf("newPr[%u]: %u\n", n + rowLeft, new_pr[n]); return newFqHeader->numNodes; } __global__ void init_pull_kernel(V_ID rowLeft, V_ID rowRight, E_ID colLeft, NodeStruct* pull_row_ptrs, EdgeStruct2* pull_col_idxs, const E_ID* raw_rows, const V_ID* raw_cols) { for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x; n + rowLeft <= rowRight; n += blockDim.x * gridDim.x) { E_ID startColIdx, endColIdx = raw_rows[n]; if (n == 0) startColIdx = colLeft; else startColIdx = raw_rows[n - 1]; pull_row_ptrs[n].index = endColIdx; for (E_ID e = startColIdx; e < endColIdx; e++) { pull_col_idxs[e - colLeft].src = raw_cols[e - colLeft]; pull_col_idxs[e - colLeft].dst = n + rowLeft; } } } __global__ void init_push_row_ptrs(V_ID nv, E_ID colLeft, DegreeType* outDegrees, NodeStruct* push_row_ptrs) { for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x; n == 0; n += blockDim.x * gridDim.x) { E_ID partialSum = colLeft; for (V_ID i = 0; i < nv; i++) { partialSum += outDegrees[i]; push_row_ptrs[i].index = partialSum; } } } __global__ void init_push_col_idxs(V_ID rowLeft, V_ID rowRight, E_ID colLeft, DegreeType* outDegrees, NodeStruct* push_row_ptrs, EdgeStruct* push_col_idxs, const E_ID* raw_rows, const V_ID* raw_cols) { for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x; n + rowLeft <= rowRight; n += blockDim.x * gridDim.x) { E_ID startColIdx, endColIdx = raw_rows[n]; if (n == 0) startColIdx = colLeft; else startColIdx = raw_rows[n - 1]; for (E_ID e = startColIdx; e < endColIdx; e++) { V_ID src = raw_cols[e - colLeft]; E_ID colIdx = (src == 0) ? 0 : push_row_ptrs[src-1].index; E_ID offset = atomicAdd(outDegrees + src, 1); push_col_idxs[colIdx + offset - colLeft] = n + rowLeft; } } } __global__ void init_push_kernel(E_ID colLeft, E_ID colRight, DegreeType* outDegree, const V_ID* raw_cols) { for (E_ID n = blockIdx.x * blockDim.x + threadIdx.x; n + colLeft <= colRight; n += blockDim.x * gridDim.x) { V_ID src = raw_cols[n]; atomicAdd(outDegree + src, 1); } } //static inline bool compareLess(const EdgeStruct2& a, const EdgeStruct2& b) //{ // return a.src < b.src; //} GraphPiece push_init_task_impl(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 8); assert(task->regions.size() == 8); const Graph *graph = (Graph*) task->args; const AccessorWO<NodeStruct, 1> acc_pull_row_ptr(regions[0], FID_DATA); const AccessorWO<EdgeStruct2, 1> acc_pull_col_idx(regions[1], FID_DATA); const AccessorWO<NodeStruct, 1> acc_push_row_ptr(regions[2], FID_DATA); const AccessorWO<EdgeStruct, 1> acc_push_col_idx(regions[3], FID_DATA); const AccessorWO<char, 1> acc_frontier(regions[4], FID_DATA); const AccessorWO<Vertex, 1> acc_new_pr(regions[5], FID_DATA); const AccessorRO<E_ID, 1> acc_raw_rows(regions[6], FID_DATA); const AccessorRO<V_ID, 1> acc_raw_cols(regions[7], FID_DATA); Rect<1> rect_pull_row_ptr = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); Rect<1> rect_pull_col_idx = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); Rect<1> rect_push_row_ptr = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<1> rect_push_col_idx = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<1> rect_frontier = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); Rect<1> rect_new_pr = runtime->get_index_space_domain( ctx, task->regions[5].region.get_index_space()); Rect<1> rect_raw_rows = runtime->get_index_space_domain( ctx, task->regions[6].region.get_index_space()); Rect<1> rect_raw_cols = runtime->get_index_space_domain( ctx, task->regions[7].region.get_index_space()); assert(graph->nv == rect_push_row_ptr.hi[0] - rect_push_row_ptr.lo[0] + 1); assert(acc_pull_row_ptr.accessor.is_dense_arbitrary(rect_pull_row_ptr)); assert(acc_pull_col_idx.accessor.is_dense_arbitrary(rect_pull_col_idx)); assert(acc_push_row_ptr.accessor.is_dense_arbitrary(rect_push_row_ptr)); assert(acc_push_col_idx.accessor.is_dense_arbitrary(rect_push_col_idx)); assert(acc_frontier.accessor.is_dense_arbitrary(rect_frontier)); assert(acc_new_pr.accessor.is_dense_arbitrary(rect_new_pr)); assert(acc_raw_rows.accessor.is_dense_arbitrary(rect_raw_rows)); assert(acc_raw_cols.accessor.is_dense_arbitrary(rect_raw_cols)); assert(rect_pull_col_idx == rect_push_col_idx); NodeStruct* pull_row_ptrs = acc_pull_row_ptr.ptr(rect_pull_row_ptr); EdgeStruct2* pull_col_idxs = acc_pull_col_idx.ptr(rect_pull_col_idx); NodeStruct* push_row_ptrs = acc_push_row_ptr.ptr(rect_push_row_ptr); EdgeStruct* push_col_idxs = acc_push_col_idx.ptr(rect_push_col_idx); char* frontier = acc_frontier.ptr(rect_frontier); Vertex* new_pr = acc_new_pr.ptr(rect_new_pr); const E_ID* raw_rows = acc_raw_rows.ptr(rect_raw_rows); const V_ID* raw_cols = acc_raw_cols.ptr(rect_raw_cols); V_ID rowLeft = rect_raw_rows.lo[0], rowRight = rect_raw_rows.hi[0]; E_ID colLeft = rect_pull_col_idx.lo[0], colRight = rect_pull_col_idx.hi[0]; // Init pull_row_ptrs and pull_col_idxs init_pull_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, pull_row_ptrs, pull_col_idxs, raw_rows, raw_cols); checkCUDA(cudaDeviceSynchronize()); // Init push_row_ptrs and push_col_idxs std::set<Memory> memFB; regions[0].get_memories(memFB); assert(memFB.size() == 1); assert(memFB.begin()->kind() == Memory::GPU_FB_MEM); Realm::MemoryImpl* memImpl = Realm::get_runtime()->get_memory_impl(*memFB.begin()); Realm::Cuda::GPUFBMemory* memFBImpl = (Realm::Cuda::GPUFBMemory*) memImpl; off_t offset = memFBImpl->alloc_bytes(sizeof(DegreeType) * graph->nv); assert(offset >= 0); DegreeType* outDegrees = (DegreeType*) memFBImpl->get_direct_ptr(offset, 0); checkCUDA(cudaMemset(outDegrees, 0, sizeof(DegreeType) * graph->nv)); init_push_kernel<<<GET_BLOCKS(colRight - colLeft + 1), CUDA_NUM_THREADS>>>( colLeft, colRight, outDegrees, raw_cols); init_push_row_ptrs<<<GET_BLOCKS(1), CUDA_NUM_THREADS>>>( graph->nv, colLeft, outDegrees, push_row_ptrs); checkCUDA(cudaDeviceSynchronize()); checkCUDA(cudaMemset(outDegrees, 0, sizeof(DegreeType) * graph->nv)); init_push_col_idxs<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, outDegrees, push_row_ptrs, push_col_idxs, raw_rows, raw_cols); memFBImpl->free_bytes(offset, sizeof(DegreeType) * graph->nv); //std::vector<EdgeStruct2> edges(colRight - colLeft + 1); //E_ID startColIdx = colLeft; //for (V_ID n = rowLeft; n <= rowRight; n++) { // E_ID endColIdx = raw_rows[n - rowLeft]; // for (E_ID e = startColIdx; e < endColIdx; e++) { // edges[e - colLeft].src = raw_cols[e - colLeft]; // edges[e - colLeft].dst = n; // } // startColIdx = endColIdx; //} //std::sort(edges.begin(), edges.end(), compareLess); // Allocate nodes on the same memory as new_pr //std::set<Memory> memZC; //regions[5].get_memories(memZC); //assert(memZC.size() == 1); //assert(memZC.begin()->kind() == Memory::Z_COPY_MEM); //Realm::MemoryImpl* memImpl = // Realm::get_runtime()->get_memory_impl(*memZC.begin()); //Realm::Cuda::GPUZCMemory* memZCImpl = (Realm::Cuda::GPUZCMemory*) memImpl; //off_t offset = memZCImpl->alloc_bytes(sizeof(NodeStruct) * graph->nv); //assert(offset >= 0); //NodeStruct* nodes = (NodeStruct*) memZCImpl->get_direct_ptr(offset, 0); //off_t offset2 = memZCImpl->alloc_bytes(sizeof(EdgeStruct) * (colRight - colLeft + 1)); //assert(offset2 >= 0); //EdgeStruct* dsts = (EdgeStruct*) memZCImpl->get_direct_ptr(offset2, 0); //E_ID cur = colLeft; //for (V_ID n = 0; n < graph->nv; n++) { // while ((cur <= colRight ) && (edges[cur - colLeft].src <= n)) // cur ++; // nodes[n].index = cur; //} //checkCUDA(cudaMemcpy(push_row_ptrs, nodes, sizeof(NodeStruct) * graph->nv, // cudaMemcpyHostToDevice)); //for (E_ID e = colLeft; e <= colRight; e++) // dsts[e - colLeft] = edges[e - colLeft].dst; //checkCUDA(cudaMemcpy(push_col_idxs, dsts, // sizeof(EdgeStruct) * (colRight - colLeft + 1), // cudaMemcpyHostToDevice)); //memZCImpl->free_bytes(offset, sizeof(NodeStruct) * graph->nv); //memZCImpl->free_bytes(offset2, sizeof(EdgeStruct) * (colRight - colLeft + 1)); FrontierHeader* header = (FrontierHeader*) frontier; header->type = FrontierHeader::DENSE_BITMAP; header->numNodes = rowRight - rowLeft + 1; char* bitmap = frontier + sizeof(FrontierHeader); memset(bitmap, 0xFF, (rowRight - rowLeft) / 8 + 1); for (V_ID n = rowLeft; n <= rowRight; n++) new_pr[n - rowLeft] = n; GraphPiece piece; piece.nv = graph->nv; piece.ne = graph->ne; // Allocate oldPrFb/newPrFb on the same memory as row_ptr offset = memFBImpl->alloc_bytes(sizeof(Vertex) * (rowRight - rowLeft + 1)); assert(offset >= 0); piece.oldPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0); offset = memFBImpl->alloc_bytes(sizeof(Vertex) * (rowRight - rowLeft + 1)); assert(offset >= 0); piece.newPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0); offset = memFBImpl->alloc_bytes(graph->frontierSize); assert(offset >= 0); piece.oldFqFb = (char*) memFBImpl->get_direct_ptr(offset, 0); offset = memFBImpl->alloc_bytes(rect_frontier.hi[0] - rect_frontier.lo[0] + 1); assert(offset >= 0); piece.newFqFb = (char*) memFBImpl->get_direct_ptr(offset, 0); offset = memFBImpl->alloc_bytes(sizeof(Vertex) * graph->nv); assert(offset >= 0); piece.oldAllPrFb = (Vertex*) memFBImpl->get_direct_ptr(offset, 0); // Initialize newPrFb checkCUDA(cudaMemcpy(piece.newPrFb, new_pr, sizeof(Vertex) * (rowRight - rowLeft + 1), cudaMemcpyHostToDevice)); for (int i = 0; i < graph->numParts; i++) checkCUDA(cudaStreamCreate(&(piece.streams[i]))); return piece; } __global__ void check_kernel(V_ID rowLeft, V_ID rowRight, E_ID colLeft, const NodeStruct* pull_row_ptrs, const EdgeStruct2* pull_col_idxs, const Vertex* labels, V_ID* numMistakes) { for (V_ID n = blockIdx.x * blockDim.x + threadIdx.x; n + rowLeft <= rowRight; n += blockDim.x * gridDim.x) { E_ID startColIdx, endColIdx = pull_row_ptrs[n].index; V_ID dstVtx = n + rowLeft; if (n == 0) startColIdx = colLeft; else startColIdx = pull_row_ptrs[n - 1].index; for (E_ID e = startColIdx; e < endColIdx; e++) { V_ID srcVtx = pull_col_idxs[e - colLeft].src; if (labels[dstVtx] < labels[srcVtx]) atomicAdd(numMistakes, 1); } } } void check_task_impl(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3); assert(regions.size() == 3); const Graph* graph = (Graph*) task->args; const GraphPiece *piece = (GraphPiece*) task->local_args; const AccessorRO<NodeStruct, 1> acc_pull_row_ptr(regions[0], FID_DATA); const AccessorRO<EdgeStruct2, 1> acc_pull_col_idx(regions[1], FID_DATA); const AccessorRO<Vertex, 1> acc_pr(regions[2], FID_DATA); Rect<1> rect_pull_row_ptr = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<1> rect_pull_col_idx = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<1> rect_pr = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(acc_pull_row_ptr.accessor.is_dense_arbitrary(rect_pull_row_ptr)); assert(acc_pull_col_idx.accessor.is_dense_arbitrary(rect_pull_col_idx)); assert(acc_pr.accessor.is_dense_arbitrary(rect_pr)); const NodeStruct* pull_row_ptrs = acc_pull_row_ptr.ptr(rect_pull_row_ptr); const EdgeStruct2* pull_col_idxs = acc_pull_col_idx.ptr(rect_pull_col_idx); const Vertex* pr = acc_pr.ptr(rect_pr); V_ID rowLeft = rect_pull_row_ptr.lo[0], rowRight = rect_pull_row_ptr.hi[0]; E_ID colLeft = rect_pull_col_idx.lo[0], colRight = rect_pull_col_idx.hi[0]; V_ID *numMistakes; checkCUDA(cudaHostAlloc(&numMistakes, sizeof(V_ID), cudaHostAllocPortable | cudaHostAllocMapped)); *numMistakes = 0; load_kernel<<<GET_BLOCKS(graph->nv), CUDA_NUM_THREADS>>>( graph->nv, piece->oldAllPrFb, pr); // check correctness check_kernel<<<GET_BLOCKS(rowRight - rowLeft + 1), CUDA_NUM_THREADS>>>( rowLeft, rowRight, colLeft, pull_row_ptrs, pull_col_idxs, piece->oldAllPrFb, numMistakes); checkCUDA(cudaDeviceSynchronize()); if (*numMistakes == 0) printf("[PASS] Check task: rowLeft(%u) numMistakes(%u)\n", rowLeft, *numMistakes); else printf("[FAIL] Check task: rowLeft(%u) numMistakes(%u)\n", rowLeft, *numMistakes); }
the_stack
#include "UnpackGPU.h" namespace ecal { namespace raw { __forceinline__ __device__ void print_raw_buffer(uint8_t const* const buffer, uint32_t const nbytes, uint32_t const nbytes_per_row = 20) { for (uint32_t i = 0; i < nbytes; i++) { if (i % nbytes_per_row == 0 && i > 0) printf("\n"); printf("%02X ", buffer[i]); } } __forceinline__ __device__ void print_first3bits(uint64_t const* buffer, uint32_t size) { for (uint32_t i = 0; i < size; ++i) { uint8_t const b61 = (buffer[i] >> 61) & 0x1; uint8_t const b62 = (buffer[i] >> 62) & 0x1; uint8_t const b63 = (buffer[i] >> 63) & 0x1; printf("[word: %u] %u%u%u\n", i, b63, b62, b61); } } __forceinline__ __device__ bool is_barrel(uint8_t dccid) { return dccid >= ElectronicsIdGPU::MIN_DCCID_EBM && dccid <= ElectronicsIdGPU::MAX_DCCID_EBP; } __forceinline__ __device__ uint8_t fed2dcc(int fed) { return static_cast<uint8_t>(fed - 600); } __forceinline__ __device__ int zside_for_eb(ElectronicsIdGPU const& eid) { int dcc = eid.dccId(); return ((dcc >= ElectronicsIdGPU::MIN_DCCID_EBM && dcc <= ElectronicsIdGPU::MAX_DCCID_EBM)) ? -1 : 1; } __forceinline__ __device__ bool is_synced_towerblock(uint16_t const dccbx, uint16_t const bx, uint16_t const dccl1, uint16_t const l1) { bool const bxsync = (bx == 0 && dccbx == 3564) || (bx == dccbx && dccbx != 3564); bool const l1sync = (l1 == ((dccl1 - 1) & 0xfff)); return bxsync && l1sync; } __forceinline__ __device__ bool right_tower_for_eb(int tower) { // for EB, two types of tower (LVRB top/bottom) if ((tower > 12 && tower < 21) || (tower > 28 && tower < 37) || (tower > 44 && tower < 53) || (tower > 60 && tower < 69)) return true; else return false; } __forceinline__ __device__ uint32_t compute_ebdetid(ElectronicsIdGPU const& eid) { // as in Geometry/EcalMaping/.../EcalElectronicsMapping auto const dcc = eid.dccId(); auto const tower = eid.towerId(); auto const strip = eid.stripId(); auto const xtal = eid.xtalId(); int smid = 0; int iphi = 0; bool EBPlus = (zside_for_eb(eid) > 0); bool EBMinus = !EBPlus; if (zside_for_eb(eid) < 0) { smid = dcc + 19 - ElectronicsIdGPU::DCCID_PHI0_EBM; iphi = (smid - 19) * ElectronicsIdGPU::kCrystalsInPhi; iphi += 5 * ((tower - 1) % ElectronicsIdGPU::kTowersInPhi); } else { smid = dcc + 1 - ElectronicsIdGPU::DCCID_PHI0_EBP; iphi = (smid - 1) * ElectronicsIdGPU::kCrystalsInPhi; iphi += 5 * (ElectronicsIdGPU::kTowersInPhi - ((tower - 1) % ElectronicsIdGPU::kTowersInPhi) - 1); } bool RightTower = right_tower_for_eb(tower); int ieta = 5 * ((tower - 1) / ElectronicsIdGPU::kTowersInPhi) + 1; if (RightTower) { ieta += (strip - 1); if (strip % 2 == 1) { if (EBMinus) iphi += (xtal - 1) + 1; else iphi += (4 - (xtal - 1)) + 1; } else { if (EBMinus) iphi += (4 - (xtal - 1)) + 1; else iphi += (xtal - 1) + 1; } } else { ieta += 4 - (strip - 1); if (strip % 2 == 1) { if (EBMinus) iphi += (4 - (xtal - 1)) + 1; else iphi += (xtal - 1) + 1; } else { if (EBMinus) iphi += (xtal - 1) + 1; else iphi += (4 - (xtal - 1)) + 1; } } if (zside_for_eb(eid) < 0) ieta = -ieta; DetId did{DetId::Ecal, EcalBarrel}; return did.rawId() | ((ieta > 0) ? (0x10000 | (ieta << 9)) : ((-ieta) << 9)) | (iphi & 0x1FF); } __forceinline__ __device__ int adc(uint16_t sample) { return sample & 0xfff; } __forceinline__ __device__ int gainId(uint16_t sample) { return (sample >> 12) & 0x3; } template <int NTHREADS> __global__ void kernel_unpack_test(unsigned char const* __restrict__ data, uint32_t const* __restrict__ offsets, int const* __restrict__ feds, uint16_t* samplesEB, uint16_t* samplesEE, uint32_t* idsEB, uint32_t* idsEE, uint32_t* pChannelsCounterEBEE, uint32_t const* eid2did, uint32_t const nbytesTotal) { // indices auto const ifed = blockIdx.x; // offset in bytes auto const offset = offsets[ifed]; // fed id auto const fed = feds[ifed]; auto const isBarrel = is_barrel(static_cast<uint8_t>(fed - 600)); // size auto const size = ifed == gridDim.x - 1 ? nbytesTotal - offset : offsets[ifed + 1] - offset; auto* samples = isBarrel ? samplesEB : samplesEE; auto* ids = isBarrel ? idsEB : idsEE; auto* pChannelsCounter = isBarrel ? &pChannelsCounterEBEE[0] : &pChannelsCounterEBEE[1]; // offset to the right raw buffer uint64_t const* buffer = reinterpret_cast<uint64_t const*>(data + offset); // dump first 3 bits for each 64-bit word //print_first3bits(buffer, size / 8); // // fed header // auto const fed_header = buffer[0]; uint32_t bx = (fed_header >> 20) & 0xfff; uint32_t lv1 = (fed_header >> 32) & 0xffffff; // 9 for fed + dcc header // 36 for 4 EE TCC blocks or 18 for 1 EB TCC block // 6 for SR block size // dcc header w2 auto const w2 = buffer[2]; uint8_t const fov = (w2 >> 48) & 0xf; // // print Tower block headers // uint8_t ntccblockwords = isBarrel ? 18 : 36; auto const* tower_blocks_start = buffer + 9 + ntccblockwords + 6; auto const* trailer = buffer + (size / 8 - 1); auto const* current_tower_block = tower_blocks_start; while (current_tower_block != trailer) { auto const w = *current_tower_block; uint8_t ttid = w & 0xff; uint16_t bxlocal = (w >> 16) & 0xfff; uint16_t lv1local = (w >> 32) & 0xfff; uint16_t block_length = (w >> 48) & 0x1ff; uint16_t const dccbx = bx & 0xfff; uint16_t const dccl1 = lv1 & 0xfff; // fov>=1 is required to support simulated data for which bx==bxlocal==0 if (fov >= 1 && !is_synced_towerblock(dccbx, bxlocal, dccl1, lv1local)) { current_tower_block += block_length; continue; } // go through all the channels // get the next channel coordinates uint32_t nchannels = (block_length - 1) / 3; // 1 threads per channel in this block for (uint32_t ich = 0; ich < nchannels; ich += NTHREADS) { auto const i_to_access = ich + threadIdx.x; // threads outside of the range -> leave the loop if (i_to_access >= nchannels) break; // inc the channel's counter and get the pos where to store auto const wdata = current_tower_block[1 + i_to_access * 3]; uint8_t const stripid = wdata & 0x7; uint8_t const xtalid = (wdata >> 4) & 0x7; ElectronicsIdGPU eid{fed2dcc(fed), ttid, stripid, xtalid}; auto const didraw = isBarrel ? compute_ebdetid(eid) : eid2did[eid.linearIndex()]; // FIXME: what kind of channels are these guys if (didraw == 0) continue; // get samples uint16_t sampleValues[10]; sampleValues[0] = (wdata >> 16) & 0x3fff; sampleValues[1] = (wdata >> 32) & 0x3fff; sampleValues[2] = (wdata >> 48) & 0x3fff; auto const wdata1 = current_tower_block[2 + i_to_access * 3]; sampleValues[3] = wdata1 & 0x3fff; sampleValues[4] = (wdata1 >> 16) & 0x3fff; sampleValues[5] = (wdata1 >> 32) & 0x3fff; sampleValues[6] = (wdata1 >> 48) & 0x3fff; auto const wdata2 = current_tower_block[3 + i_to_access * 3]; sampleValues[7] = wdata2 & 0x3fff; sampleValues[8] = (wdata2 >> 16) & 0x3fff; sampleValues[9] = (wdata2 >> 32) & 0x3fff; // check gain bool isSaturation = true; short firstGainZeroSampID{-1}, firstGainZeroSampADC{-1}; for (uint32_t si = 0; si < 10; si++) { if (gainId(sampleValues[si]) == 0) { firstGainZeroSampID = si; firstGainZeroSampADC = adc(sampleValues[si]); break; } } if (firstGainZeroSampID != -1) { unsigned int plateauEnd = std::min(10u, (unsigned int)(firstGainZeroSampID + 5)); for (unsigned int s = firstGainZeroSampID; s < plateauEnd; s++) { if (gainId(sampleValues[s]) == 0 && adc(sampleValues[s]) == firstGainZeroSampADC) { ; } else { isSaturation = false; break; } //it's not saturation } // get rid of channels which are stuck in gain0 if (firstGainZeroSampID < 3) { isSaturation = false; } if (!isSaturation) continue; } else { // there is no zero gainId sample // gain switch check short numGain = 1; bool gainSwitchError = false; for (unsigned int si = 1; si < 10; si++) { if ((gainId(sampleValues[si - 1]) > gainId(sampleValues[si])) && numGain < 5) gainSwitchError = true; if (gainId(sampleValues[si - 1]) == gainId(sampleValues[si])) numGain++; else numGain = 1; } if (gainSwitchError) continue; } auto const pos = atomicAdd(pChannelsCounter, 1); // store to global ids[pos] = didraw; samples[pos * 10] = sampleValues[0]; samples[pos * 10 + 1] = sampleValues[1]; samples[pos * 10 + 2] = sampleValues[2]; samples[pos * 10 + 3] = sampleValues[3]; samples[pos * 10 + 4] = sampleValues[4]; samples[pos * 10 + 5] = sampleValues[5]; samples[pos * 10 + 6] = sampleValues[6]; samples[pos * 10 + 7] = sampleValues[7]; samples[pos * 10 + 8] = sampleValues[8]; samples[pos * 10 + 9] = sampleValues[9]; } current_tower_block += block_length; } } void entryPoint(InputDataCPU const& inputCPU, InputDataGPU& inputGPU, OutputDataGPU& outputGPU, ScratchDataGPU& scratchGPU, OutputDataCPU& outputCPU, ConditionsProducts const& conditions, cudaStream_t cudaStream, uint32_t const nfedsWithData, uint32_t const nbytesTotal) { // transfer cudaCheck(cudaMemcpyAsync(inputGPU.data.get(), inputCPU.data.get(), nbytesTotal * sizeof(unsigned char), cudaMemcpyHostToDevice, cudaStream)); cudaCheck(cudaMemcpyAsync(inputGPU.offsets.get(), inputCPU.offsets.get(), nfedsWithData * sizeof(uint32_t), cudaMemcpyHostToDevice, cudaStream)); cudaCheck(cudaMemsetAsync(scratchGPU.pChannelsCounter.get(), 0, sizeof(uint32_t) * 2, // EB + EE cudaStream)); cudaCheck(cudaMemcpyAsync( inputGPU.feds.get(), inputCPU.feds.get(), nfedsWithData * sizeof(int), cudaMemcpyHostToDevice, cudaStream)); kernel_unpack_test<32><<<nfedsWithData, 32, 0, cudaStream>>>(inputGPU.data.get(), inputGPU.offsets.get(), inputGPU.feds.get(), outputGPU.digisEB.data.get(), outputGPU.digisEE.data.get(), outputGPU.digisEB.ids.get(), outputGPU.digisEE.ids.get(), scratchGPU.pChannelsCounter.get(), conditions.eMappingProduct.eid2did, nbytesTotal); cudaCheck(cudaGetLastError()); // transfer the counters for how many eb and ee channels we got cudaCheck(cudaMemcpyAsync(outputCPU.nchannels.get(), scratchGPU.pChannelsCounter.get(), sizeof(uint32_t) * 2, cudaMemcpyDeviceToHost, cudaStream)); } } // namespace raw } // namespace ecal
the_stack
#include "OtsuForThree.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏: DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 存储在常量内存中的概率集和均值集 __constant__ float dev_W[256]; __constant__ float dev_U[256]; // Kernel 函数:_OtsuForThree_ForwardKer(前向三值化) static __global__ void // Kernel 函数无返回值 _OtsuForThree_ForwardKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char thresholda, // 阈值1 unsigned char thresholdb // 阈值2 ); // Kernel 函数:_OtsuForThree_ForwardKer(前向三值化) static __global__ void _OtsuForThree_ForwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像 素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。线程中处理的第一个点。 if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。 if (intemp <= thresholda) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp > thresholda && intemp <= thresholdb) { outimg.imgMeta.imgData[outidx] = thresholdb; } else if (intemp > thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = 255; } } } // Kernel 函数:_OtsuForThree_BackwardKer(后向三值化) static __global__ void // Kernel 函数无返回值 _OtsuForThree_BackwardKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char thresholda, // 阈值1 unsigned char thresholdb // 阈值2 ); // Kernel 函数:_OtsuForThree_BackwardKer(后向三值化) static __global__ void _OtsuForThree_BackwardKer(ImageCuda inimg, ImageCuda outimg, unsigned char thresholda, unsigned char thresholdb) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像 素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。线程中处理的第一个点。 if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。 if (intemp < thresholda) { outimg.imgMeta.imgData[outidx] = 0; } else if (intemp >= thresholda && intemp < thresholdb) { outimg.imgMeta.imgData[outidx] = thresholda; } else if (intemp >= thresholdb && intemp <= 255) { outimg.imgMeta.imgData[outidx] = thresholdb; } } } // Kernel 函数:_CalcuVarianceKer (计算最小类内方差) static __global__ void // Kernel 函数无返回值 _CalcuVarianceKer( float * thres ); // Kernel 函数:_CalcuVarianceKer (计算最小类内方差) static __global__ void _CalcuVarianceKer(float * thres) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线 程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理, 一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= 128 || r >= 128) return; int index = c * 128 + r; int counti = c; int countj = r + 128; float vara, varb, varc; // 每个线程计算一种分割情况下的类内方差总和,并通过对应关系,存储在相应下标的 // 数组元素中。计算时,分别计算(0-t1)、(t1-t2)、(t2-255)三个类内方差。 // 计算(0-t1)的类内方差 vara float Wk, Uk; Wk = dev_W[counti] - dev_W[0]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[counti] - dev_U[0]) / Wk; vara = 0.0; for (int count = 1; count <= counti; count++) { vara += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 计算(t1-t2)的类内方差 varb Wk = dev_W[countj] - dev_W[counti]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[countj] - dev_U[counti]) / Wk; varb = 0.0; for (int count = counti; count <= countj; count++) { if (count < 1) continue; varb += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 计算(t2-255)的类内方差varc Wk = dev_W[255] - dev_W[countj]; if (Wk == 0.0) Uk = 0.0; else Uk = (dev_U[255] - dev_U[countj]) / Wk; varc = 0.0; for (int count = countj; count <= 255; count++) { varc += abs(count - Uk) * abs(count - Uk) * (dev_W[count] - dev_W[count - 1]); } // 将计算得到的方差和存储在数组中。 thres[index] = vara + varb + varc; } // Host 成员方法:OtsuForThree(最佳二值化自动生成) __host__ int OtsuForThree::otsuForThree(Image *inimg, Image *outimg) { // 检查输入图像和输出图像是否为 NULL,如果为 NULL 直接报错返回 。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice (outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败), 则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长, 宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 调用直方图,获取图像的像素信息 Histogram h; // 图像的像素信息 unsigned int his[256]; h.histogram(inimg, his, true); // 图像总像素数 int sumpixel = 0; for (int i = 0; i < 256; i++) { sumpixel += his[i]; } // 计算图像的概率信息、有聚合度的概率集和有聚合度的均值集合。 float P[256]; float W[256]; float U[256]; P[0] = (float)his[0] / (float)sumpixel; W[0] = P[0]; U[0] = 0.0; for(int i = 1; i < 256; i++) { P[i] = (float)his[i] / (float)sumpixel; W[i] = P[i] + W[i-1]; U[i] = i * P[i] + U[i-1]; } // 将概率集和均值集复制到常量内存中 cudaMemcpyToSymbol(dev_W, W, sizeof(float) * 256); cudaMemcpyToSymbol(dev_U, U, sizeof(float) * 256); // 存储128×128个类内方差总和的数组 float *hostthresholds = new float[16384]; float *devthreshlods; // 为标记数组分配大小。 errcode = cudaMalloc((void **)&devthreshlods, 16384 * sizeof (float)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 为标记数组设定初值。 errcode = cudaMemset(devthreshlods, 0, 16384 * sizeof (float)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 将数组复制至 device 端。 errcode = cudaMemcpy(devthreshlods, hostthresholds, 16384 * sizeof (float), cudaMemcpyHostToDevice); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (128 + blocksize.x - 1) / blocksize.x; gridsize.y = (128 + blocksize.y - 1) / blocksize.y; // 调用核函数,计算128×128种分割方式下的方差集合 _CalcuVarianceKer<<<gridsize, blocksize>>>(devthreshlods); // 将数组复制至 host 端。 errcode = cudaMemcpy(hostthresholds, devthreshlods, 16384 * sizeof (float), cudaMemcpyDeviceToHost); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 串行计算,找出128×128个方差元素中的最小值 float min = 10000.0; int thresa = 0; int thresb = 0; // 计算数组的最小值 for (int i = 0; i < 16384; i++) { if (min > hostthresholds[i]) { min = hostthresholds[i]; // 通过对应成二维数组,得到两个对应的阈值 thresa = i / 128; thresb = i % 128 + 128; } } // 将阈值进行类型转换。 unsigned char thresholda = (unsigned char)thresa; unsigned char thresholdb = (unsigned char)thresb; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数,使用最佳阈值对图像进行二值化 if (this-> isForward) { _OtsuForThree_ForwardKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud,thresholda, thresholdb); } else { _OtsuForThree_BackwardKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud,thresholda, thresholdb); } if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
//Util device functions ///////////////////////////////////////////////////////////// //WARP LEVEL NO SYNC FUNCTIONS, USE WITH CAUTION /*********************************8 * * shared memory pointer naming conventions: * * [v][type]sm[OffsetDescriptor][Usage] * * v: if pointer name starts with a v it is declared a volatile to guarantee warp safe execution * * type: (optional, only used for temporary type casts) can be i=int, f=float, d=double,... * * OffsetDesriptor: if none given: e.g. sm, ism, the pointer points to a unique address/offset used only by the current thread * Base: points to the first element of the per thread/per Warp memory. all warps in block point to the same address. * Warp: (sometimes shortened to W) pointer points to the first element of a buffer of warp size used only by the the threads in the current warp * * Usage: suffix do indicate special use or different sm layout with num elements != num threads * * * Examples: * ism: pointer to thread specific address casted to int* * vsmWarpTrace address pointing to the first element of a trace-buffer specific to the current warp which his declared volatile * smWarp pointer to the first element of a buffer of warp size used only by the the threads in the current warp * * volatile int * vism = (int*)&smWarp[threadIdx.x]; * assigns the int* casted address for the current thread to a volatile declared pointer * * * */ // Accumulates sm values of blockDim.x and stores them in sm value of thread 0; // does not distribute results in sm, hence // all values in sm for threads with Idx.x > 0 are garbage afterwards // only works if blockDim.x has a value of 2^n <= warpsize (32) since no sync is used. // smP[0] will contain correct value template<typename T> __device__ inline void WarpSumNoSync(T * sm) { volatile T * vsm = sm; int offset = blockDim.x >> 1; while(offset >= 1){ if(threadIdx.x < offset) *vsm += *(vsm + offset); offset = offset >> 1; } } //sums only up for groups of n where n has to be a warpsize/(2^k) template<typename T> __device__ inline void WarpSumNoSync(T * sm, int n) { volatile T * vsm = sm; int tid = threadIdx.x % n; int offset = n >> 1; while(offset >= 1){ if(tid < offset) *vsm += *(vsm + offset); offset = offset >> 1; } *vsm = *(vsm-tid); //distribute to all threads sm address } //sums up shared memory from sm[0] to sm[threadIdx.x] template<typename T> __device__ inline T WarpRunningSumToCurrentThread(T * smWarp) { volatile T * vsmWarp = smWarp; T s = 0; for(int i = 0 ; i<blockDim.x; i++) s += (i<=threadIdx.x)?(vsmWarp[i]):0; return s; } //accumulates one trace with nframes per thread into the output Trace by accumulaton //the input races frame by frame in shared memory. all operations are warp based and rely on //blockDim.x == warp size // output trace buffer has to be initialized with 0 since accumulated value gets added to current value. // template<typename T> //blockDim.x == warpsize !! sm buffer must be at least smBase[numframes] __device__ inline void WarpTraceAccum(T* outTrace, T*smWarp, T*localTrace, const int nframes) { volatile float * vsmW = smWarp; volatile float * vOut = outTrace; for(int f=0; f<nframes; f++){ vsmW[threadIdx.x] = localTrace[f]; // each thread of the warp writes one frame WarpSumNoSync(&smWarp[threadIdx.x]); if(threadIdx.x == 0) vOut[f] += vsmW[0]; } } //accumulates one frame of trace held //validTrace is option and defaults to true, if some traces are not part of the accumulation for those threads/traces validTrace has to be set to false template<typename T> //blockDim.x == warpsize !! sm buffer must be at least smBase[numframes] __device__ inline void WarpTraceAccumSingleFrame(T* smWarpTrace, const int frame, T*smWarp, const T localFrameValue, const bool validTrace) { //volatile float * vsmW = smWarp; volatile float * vsm = smWarp + threadIdx.x; volatile float * vOut = smWarpTrace; *vsm = (validTrace)?(localFrameValue):(0.0f); WarpSumNoSync(vsm); if(threadIdx.x == 0) vOut[frame] += *vsm; } //overloaded from above, where input trace instead of just the frame value is passed //validTrace is option and defaults to true, if some traces are not part of the accumulation for those threads/traces validTrace has to be set to false template<typename T> __device__ inline void WarpTraceAccumSingleFrame(T* smWarpTrace, const int frame, T*smWarp, const T * localTrace, const bool validTrace) { WarpTraceAccumSingleFrame( smWarpTrace, frame, smWarp, localTrace[frame], validTrace); } //same as above but each thread also passes a valid trace flag which tells if there is a trace to accumulate //the function will also accumulate the number of valid traces and return the value //blockDim.x == warpsize !! sm buffer must be at least smBase[numframes] //validTrace is option and defaults to true, if some traces are not part of the accumulation for those threads/traces validTrace has to be set to false template<typename T> __device__ inline int WarpTraceAccumCount(T* smWarpTrace, const int nframes, T*smWarp, const T*localTrace, const bool validTrace) { volatile int * vismW = (int*)smWarp; volatile float * vsmW = smWarp; volatile int*vism = vismW+threadIdx.x; /// !!!!! *vism = (validTrace)?(1):(0); WarpSumNoSync(vism); int numTraces = vismW[0]; if(numTraces > 0){ volatile float * vOut = smWarpTrace; for(int f=0; f<nframes; f++){ vsmW[threadIdx.x] = (validTrace)?(localTrace[f]):(0); // each thread of the warp writes one frame WarpSumNoSync(&(smWarp[threadIdx.x])); if(threadIdx.x == 0) vOut[f] += vsmW[0]; } } return numTraces; } // END WARP LEVEL FUNCTIONS //////////////////////////////////////// //BLOCK LEVEL REDUCTION //reduces shared memory and returns sum, //no sync at the end make sure to sync before using smem after function call template<typename T> __device__ inline T ReduceSharedMemory(T* smBase, T*sm) { WarpSumNoSync(sm); //Synchronize to allow to sum up partial sums from all warps within block __syncthreads(); T sum = 0; //calculate num live beads for whole region in all threads for(size_t i=0; i<blockDim.y; i++){ sum += smBase[i*blockDim.x]; //all threads get correct sum from SM base pointer } return sum; } //accumulates num Warps traces with nframes which are stored consecutively in shared memory into one final trace //parameters: outTrace: points to the first frame of a buffer of length nframes or larger // smTracesBase: points to the first frame of a buffer containing numWarps traces of length nframes. // nframes: number of frames. template<typename T> __device__ inline void BlockTraceAccumfromWarps(T* outTrace, const T*smBaseTraces, const int nframes, const int maxCompFrames) { int accumFrame = threadIdx.x + blockDim.x * threadIdx.y; int blockSize = blockDim.x * blockDim.y; __syncthreads(); while (accumFrame < nframes){ T accum = 0.0f; for(int i=0; i<blockDim.y; i++) accum += smBaseTraces[accumFrame+maxCompFrames*i]; outTrace[accumFrame] = accum; accumFrame += blockSize; } } //same as above but result will be written to the first warps trace in shared memory. template<typename T> __device__ inline void BlockTraceAccumfromWarpsInplace(T*smBaseTraces, const int nframes, const int maxCompFrames) { int offset = threadIdx.x + blockDim.x * threadIdx.y; int blockSize = blockDim.x * blockDim.y; __syncthreads(); while (offset < nframes){ for(int i=1; i<blockDim.y; i++) smBaseTraces[offset] += smBaseTraces[offset+maxCompFrames*i]; offset += blockSize; } } //as above but then stores the result to a global address // atomicGlobalAccum: true does an atomic accumulation in global memory, if false value gets saved to global memory and current value get's overwritten. template<typename T> __device__ inline void BlockTraceAccumfromWarpsInplaceToGlobal( T*gTrace,const size_t outFrameStride, T*smBaseTraces, const int nframes, const int maxCompFrames, const bool atomicGlobalAccum) { int accumFrame = threadIdx.x + blockDim.x * threadIdx.y; int blockSize = blockDim.x * blockDim.y; gTrace += accumFrame * outFrameStride; volatile T * vsmBaseTrace = smBaseTraces; __syncthreads(); while (accumFrame < nframes){ for(int i=1; i<blockDim.y; i++) vsmBaseTrace[accumFrame] += vsmBaseTrace[accumFrame+maxCompFrames*i]; if(atomicGlobalAccum) atomicAdd(gTrace,vsmBaseTrace[accumFrame]); else *gTrace = vsmBaseTrace[accumFrame]; accumFrame += blockSize; gTrace += blockSize * outFrameStride; } } template<typename T> __device__ inline T BlockAccumPerThreadValuesAcrossWarpsSharedMem(T*smBase) { __syncthreads(); T sum = 0; for(int i=0; i<blockDim.y;i++) sum += smBase[threadIdx.x + i*blockDim.x]; return sum; } //per warp value at each sm_warp_base: smBase[0],[warpsize],[2*warpsize].... will be accumulated in smBase[0] and then stored to global // atomicGlobalAccum: true does an atomic accumulation in global memory, if false value gets saved to global memory and current value get's overwritten. template<typename T> __device__ inline void BlockAccumValuePerWarpToGlobal( T*gValue, T*smBase, const bool atomicGlobalAccum) { T sum = BlockAccumPerThreadValuesAcrossWarpsSharedMem(smBase); if (threadIdx.x==0 && threadIdx.y ==0){ if(atomicGlobalAccum) atomicAdd(gValue,sum); else *gValue=sum; } } ///////////////////////////////////////////////////////// //checks if the passed bit is set in the mask //for more than one bit passed //default: match any, returns true if at least one of the bits is matched //if matchAll == true: returns true only if all bits passed in type are set in mask __host__ __device__ inline bool Match(const unsigned short * mask, unsigned short type, bool matchAll) { unsigned short afterAnd = LDG_LOAD(mask) & type; if(!matchAll) return (afterAnd ? true : false ); // at least one bit matches else return afterAnd == type; // all bits matched } template<typename T> __device__ inline void clampT ( T &x, T a, T b) { // Clamps x between a and b x = (x < a ? a : (x > b ? b : x)); } //checks mask if well can be used as a empty reference __device__ inline bool useForEmpty(const unsigned short *bfmask) { //ToDo add Duds if needed return ( Match(bfmask,(unsigned short)MaskReference) && !Match(bfmask,(unsigned int)(MaskPinned | MaskIgnore)) // && !Match(bfmask,(MaskType)MaskIgnore) ); } //interpolate and correct template<typename T> __device__ inline float iPc(const T lvalue, const T rvalue, const float frac, const float c) { //return (1-frac)*lvalue + frac*rvalue - c; return (float)((rvalue - lvalue)*frac + lvalue - c); } //uncompresses trace from a buffer with a stride of framestride into the buffer uncompTrace with consecutive elements (stride = 1) of length CfP.getUmcompressedFrames template<typename T> __device__ inline void GetUncompressTrace(float * uncompTrace, const ConstantFrameParams & CfP, const T * compTrace, const int frameStride ) { if(CfP.getRawFrames() < CfP.getUncompFrames()){ int my_frame = 0; float prev=*compTrace; float next=0.0f; *uncompTrace = prev; for ( my_frame=1; my_frame<CfP.getUncompFrames(); my_frame++ ) { // need to make this faster!!! int interf= CfP.interpolatedFrames[my_frame]; next = compTrace[frameStride*interf]; prev = compTrace[frameStride*(interf-1)]; // interpolate float mult = CfP.interpolatedMult[my_frame]; uncompTrace[my_frame] = ( prev-next ) *mult + next; } }else{ // the rare "uncompressed" case for ( int my_frame=0; my_frame<CfP.getUncompFrames(); my_frame++ ) { uncompTrace[my_frame] = compTrace[my_frame*frameStride]; } } } template<typename T> __device__ inline float ComputeDcOffsetForUncompressedTrace ( const T *bPtr, const int uncompressedFrames, const float t_start, const float t_end ) { float cnt = 0.0001f; float dc_zero = 0.000f; int above_t_start = ( int ) ceil ( t_start ); int below_t_end = ( int ) floor ( t_end ); //ToDo: figure out how to propagate back to CPU //assert (0 <= above_t_start) && (above_t_start-1 < imgFrames) && (0 <= below_t_end+1) && (below_t_end < imgFrames)); for ( int pt = above_t_start; pt <= below_t_end; pt++ ) { dc_zero += ( float ) ( bPtr[pt] ); cnt += 1.0f; } // include values surrounding t_start & t_end weighted by overhang if ( above_t_start > 0 ) { float overhang = ( above_t_start-t_start ); dc_zero = dc_zero + bPtr[above_t_start-1]*overhang; cnt += overhang; } if ( below_t_end < ( uncompressedFrames-1 ) ) { float overhang = ( t_end-below_t_end ); dc_zero = dc_zero + bPtr[below_t_end+1]* ( t_end-below_t_end ); cnt += overhang; } dc_zero /= cnt; return dc_zero; } template<typename T> __device__ inline float ComputeDcOffsetForCompressedTrace ( const T * fgPtr, const size_t frameStride, const float* frameNumber, const float t_start, const float t_end, const int numFrames ) { float dc_zero = 0.0f; float cnt = 0.0f; int pt; int pt1 = 0; int pt2 = 0; // TODO: is this really "rezero frames before pH step start?" // this should be compatible with i_start from the nuc rise - which may change if we change the shape??? for (pt = 0;frameNumber[pt] < t_end;pt++) { pt2 = pt+1; if (frameNumber[pt]>t_start) { if (pt1 == 0) pt1 = pt; // set to first point above t_start dc_zero += (float) (fgPtr[pt*frameStride]); cnt += 1.0f; // should this be frames_per_point???? //cnt += time_cp->frames_per_point[pt]; // this somehow makes it worse???? } } // include values surrounding t_start & t_end weighted by overhang if (pt1 > 0) { // timecp->frameNumber[pt1-1] < t_start <= timecp->frameNumber[pt1] // normalize to a fraction in the spirit of "this somehow makes it worse" float den = (frameNumber[pt1]-frameNumber[pt1-1]); if ( den > 0 ) { float overhang = (frameNumber[pt1] - t_start)/den; dc_zero = dc_zero + fgPtr[(pt1-1)*frameStride]*overhang; cnt += overhang; } } if ( (pt2 < numFrames) && (pt2>0) ) { // timecp->frameNumber[pt2-1] <= t_end < timecp->frameNumber[pt2] // normalize to a fraction in the spirit of "this somehow makes it worse float den = (frameNumber[pt2]-frameNumber[pt2-1]); if ( den > 0 ) { float overhang = (t_end - frameNumber[pt2-1])/den; dc_zero = dc_zero + fgPtr[pt2*frameStride]*overhang; cnt += overhang; } } return (cnt > 0.0f )?(dc_zero/cnt):(0.0f); } //empty trace handling __device__ inline void TShiftAndPseudoCompression ( float *fbkg, const float *bg, const float * frameNumber,const float tshift, const int npts, int const uncompFrames, const float dcoffset ) { //fprintf(stdout, "tshift %f\n", tshift); for (int i=0;i < npts;i++){ // get the frame number of this data point (might be fractional because this point could be // the average of several frames of data. This number is the average time of all the averaged // data points float t=frameNumber[i]; float fn=t-tshift; if (fn < 0.0f) fn = 0.0f; if (fn > (ConstFrmP.getUncompFrames()-2)) fn = ConstFrmP.getUncompFrames()-2; int ifn= (int) fn; float frac = fn - ifn; if(dcoffset == 0) fbkg[i] = ( (1-frac) *bg[ifn] + frac*bg[ifn+1]); else fbkg[i] = ( (1-frac) *bg[ifn] + frac*bg[ifn+1]) - dcoffset; //= ((tmp == tmp)?(tmp):(0)); //nan check //assert ( !isnan(fbkg[i]) ); } } __device__ inline void TShiftAndPseudoCompressionOneFrame ( //tshift always left shift float *fbkg, const float *bg, const float * frameNumber,const float tshift, const int thisFrame, const float dcoffset ) { //printf("tshift %f\n", tshift); //for (int i=0;i < npts;i++){ int i = thisFrame; // get the frame number of this data point (might be fractional because this point could be // the average of several frames of data. This number is the average time of all the averaged // data points float t=frameNumber[i]; float fn=t-tshift; if (fn < 0.0f) fn = 0.0f; if (fn > (ConstFrmP.getUncompFrames()-2)) fn = ConstFrmP.getUncompFrames()-2; int ifn= (int) fn; float frac = fn - ifn; if(dcoffset == 0) fbkg[i] = ( (1-frac) *bg[ifn] + frac*bg[ifn+1]); else fbkg[i] = ( (1-frac) *bg[ifn] + frac*bg[ifn+1]) - dcoffset; //= ((tmp == tmp)?(tmp):(0)); //nan check //assert ( !isnan(fbkg[i]) ); } //Util Kernels //input width and height template<typename TDest, typename TSrc> __global__ void transposeDataKernel(TDest *dest, TSrc *source, int width, int height) { __shared__ float tile[32][32+1]; if(sizeof(TDest) > sizeof(float)){ //printf ("TRANSPOSE KERNEL ERROR: destination type cannot be larger than %d bytes\n", sizeof(float)); return; } TDest * smPtr; int xIndexIn = blockIdx.x * 32 + threadIdx.x; int yIndexIn = blockIdx.y * 32 + threadIdx.y; int Iindex = xIndexIn + (yIndexIn)*width; int xIndexOut = blockIdx.y * 32 + threadIdx.x; int yIndexOut = blockIdx.x * 32 + threadIdx.y; int Oindex = xIndexOut + (yIndexOut)*height; smPtr = (TDest*) &tile[threadIdx.y][threadIdx.x]; if(xIndexIn < width && yIndexIn < height) *smPtr = (TDest)source[Iindex]; smPtr = (TDest*) &tile[threadIdx.x][threadIdx.y]; __syncthreads(); if(xIndexOut < height && yIndexOut < width) dest[Oindex] = *smPtr; } // Reduces shared memory of power of 2 size (max 512 elements) and puts the sum in sm[0] template<typename T> __device__ inline void ReduceAndAvgAtBlockLevel(T *sm, int N, bool avg) { if (blockDim.x >= 512) { if (threadIdx.x < 256) sm[threadIdx.x] += sm[threadIdx.x + 256]; } __syncthreads(); if (blockDim.x >= 256) { if (threadIdx.x < 128) sm[threadIdx.x] += sm[threadIdx.x + 128]; } __syncthreads(); if (blockDim.x >= 128) { if (threadIdx.x < 64) sm[threadIdx.x] += sm[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { if (blockDim.x >= 64) sm[threadIdx.x] += sm[threadIdx.x + 32]; if (blockDim.x >= 32) sm[threadIdx.x] += sm[threadIdx.x + 16]; if (blockDim.x >= 16) sm[threadIdx.x] += sm[threadIdx.x + 8]; if (blockDim.x >= 8) sm[threadIdx.x] += sm[threadIdx.x + 4]; if (blockDim.x >= 4) sm[threadIdx.x] += sm[threadIdx.x + 2]; if (blockDim.x >= 2) sm[threadIdx.x] += sm[threadIdx.x + 1]; } if (threadIdx.x == 0 && avg) sm[0] /= (T)N; __syncthreads(); } template<typename T> __device__ inline void SimplestReductionAndAverage(T *sm, int N, bool avg) { T sum = 0.0f; if (threadIdx.x == 0) { for (int i=0; i<N; ++i) sum += sm[i]; if (avg) sm[0] = sum / (T)N; else sm[0] = sum; } __syncthreads(); } template __global__ void transposeDataKernel<float,float>(float*,float*,int,int); template __global__ void transposeDataKernel<short,short>(short*,short*,int,int); template __global__ void transposeDataKernel<float,short>(float*,short*,int,int);
the_stack
#pragma once #include <map> #include <unordered_map> #include <set> namespace gunrock { namespace app { namespace louvain { /****************************************************************************** * Housekeeping Routines ******************************************************************************/ cudaError_t UseParameters_test(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(parameters.Use<uint32_t>( "omp-threads", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Number of threads for parallel omp louvain implementation; 0 for " "default.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "omp-runs", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1, "Number of runs for parallel omp louvain implementation.", __FILE__, __LINE__)); return retval; } /** * @brief Displays the community detection result (i.e. communities of vertices) * @tparam T Type of values to display * @tparam SizeT Type of size counters * @param[in] community for each node. * @param[in] num_nodes Number of nodes in the graph. */ template <typename T, typename SizeT> void DisplaySolution(T *array, SizeT length) { if (length > 40) length = 40; util::PrintMsg("[", true, false); for (SizeT i = 0; i < length; ++i) { util::PrintMsg(std::to_string(i) + ":" + std::to_string(array[i]) + " ", true, false); } util::PrintMsg("]"); } /****************************************************************************** * Louvain Testing Routines *****************************************************************************/ template <typename GraphT, typename ValueT = typename GraphT::ValueT> ValueT Get_Modularity(const GraphT &graph, typename GraphT::VertexT *communities = NULL) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; SizeT nodes = graph.nodes; ValueT *w_v2 = new ValueT[nodes]; ValueT *w_c = new ValueT[nodes]; ValueT m2 = 0; ValueT w_in = 0; for (VertexT v = 0; v < nodes; v++) { w_v2[v] = 0; w_c[v] = 0; } //#pragma omp parallel for //reduction(+:m) for (VertexT v = 0; v < nodes; v++) { SizeT start_e = graph.GetNeighborListOffset(v); SizeT degree = graph.GetNeighborListLength(v); VertexT c_v = (communities == NULL) ? v : communities[v]; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = graph.GetEdgeDest(e); ValueT w = graph.edge_values[e]; w_v2[v] += w; w_c[c_v] += w; VertexT c_u = (communities == NULL) ? u : communities[u]; if (c_v != c_u) continue; w_in += w; } } ValueT q = 0; for (VertexT v = 0; v < nodes; v++) { m2 += w_v2[v]; if (w_c[v] != 0) q += w_c[v] * w_c[v]; } delete[] w_v2; w_v2 = NULL; delete[] w_c; w_c = NULL; return (w_in - q / m2) / m2; /* q = 0; ValueT w1 = 0, w2 = 0; //#pragma omp parallel for //reduction(+:q) for (VertexT v = 0; v < nodes; v++) { VertexT comm_v = (communities == NULL) ? v : communities[v]; q_v[v] = 0; SizeT start_e = graph.GetNeighborListOffset(v); SizeT degree = graph.GetNeighborListLength(v); ValueT w_v2_v = w_v2[v]; std::unordered_map<VertexT, ValueT> w_v2v; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = graph.GetEdgeDest(e); if (comm_v != ((communities == NULL) ? u : communities[u])) continue; ValueT w = graph.edge_values[e]; auto it = w_v2v.find(u); if (it == w_v2v.end()) w_v2v[u] = w; else it -> second += w; } auto &comm = comms[comm_v]; for (auto it = comm.begin(); it != comm.end(); it++) { VertexT u = *it; auto it2 = w_v2v.find(u); ValueT w = 0; if (it2 != w_v2v.end()) w = w_v2v[u]; w1 += w; w2 += w_v2_v * w_v2[u]; q_v[v] += (w - w_v2_v * w_v2[u] / m); } //q += q_v; w_v2v.clear(); } for (VertexT v = 0; v < nodes; v++) q += q_v[v]; util::PrintMsg("w1 = " + std::to_string(w1) + + ", w2 = " + std::to_string(w2) + ", w_c^2 / m = " + std::to_string(w1 - q)); q /= m; delete[] q_v ; q_v = NULL; delete[] w_2v; w_2v = NULL; delete[] w_v2; w_v2 = NULL; return q; */ } /** * @brief Simple CPU-based reference Louvain Community Detection implementation * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Input parameters * @param[in] graph Input graph * @param[out] communities Community IDs for each vertex * \return double Time taken for the Louvain implementation */ template <typename GraphT, typename ValueT = typename GraphT::ValueT> double CPU_Reference(util::Parameters &parameters, GraphT &graph, typename GraphT::VertexT *communities, std::vector<std::vector<typename GraphT::VertexT> *> *pass_communities = NULL, std::vector<GraphT *> *pass_graphs = NULL) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; VertexT max_passes = parameters.Get<VertexT>("max-passes"); VertexT max_iters = parameters.Get<VertexT>("max-iters"); bool pass_stats = parameters.Get<bool>("pass-stats"); bool iter_stats = parameters.Get<bool>("iter-stats"); ValueT pass_gain_threshold = parameters.Get<ValueT>("pass-th"); ValueT iter_gain_threshold = parameters.Get<ValueT>("iter-th"); bool has_pass_communities = false; if (pass_communities != NULL) has_pass_communities = true; else pass_communities = new std::vector<std::vector<VertexT> *>; pass_communities->clear(); bool has_pass_graphs = false; if (pass_graphs != NULL) has_pass_graphs = true; ValueT q = Get_Modularity(graph); ValueT *w_v2c = new ValueT[graph.nodes]; VertexT *neighbor_comms = new VertexT[graph.nodes]; VertexT *comm_convert = new VertexT[graph.nodes]; ValueT *w_v2self = new ValueT[graph.nodes]; ValueT *w_v2 = new ValueT[graph.nodes]; ValueT *w_c2 = new ValueT[graph.nodes]; typedef std::pair<VertexT, ValueT> pairT; std::vector<pairT> *w_c2c = new std::vector<pairT>[graph.nodes]; GraphT temp_graph; temp_graph.Allocate(graph.nodes, graph.edges, util::HOST); auto &temp_row_offsets = temp_graph.CsrT::row_offsets; auto &temp_column_indices = temp_graph.CsrT::column_indices; auto &temp_edge_values = temp_graph.CsrT::edge_values; auto c_graph = &graph; auto n_graph = c_graph; n_graph = NULL; ValueT m2 = 0; for (SizeT e = 0; e < graph.edges; e++) m2 += graph.CsrT::edge_values[e]; for (SizeT v = 0; v < graph.nodes; v++) w_v2c[v] = util::PreDefinedValues<ValueT>::InvalidValue; util::CpuTimer cpu_timer, pass_timer, iter_timer; cpu_timer.Start(); int pass_num = 0; while (pass_num < max_passes) { // Pass initialization if (pass_stats) pass_timer.Start(); if (iter_stats) iter_timer.Start(); auto &current_graph = *c_graph; SizeT nodes = current_graph.nodes; auto c_communities = new std::vector<VertexT>; auto &current_communities = *c_communities; current_communities.reserve(nodes); for (VertexT v = 0; v < nodes; v++) { current_communities[v] = v; w_v2[v] = 0; w_v2self[v] = 0; SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; w_v2[v] += w; if (u == v) w_v2self[v] += w; } w_c2[v] = w_v2[v]; } if (iter_stats) iter_timer.Stop(); util::PrintMsg("pass " + std::to_string(pass_num) + ", pre-iter, elapsed = " + std::to_string(iter_timer.ElapsedMillis()), iter_stats); // Modulation Optimization int iter_num = 0; ValueT pass_gain = 0; while (iter_num < max_iters) { if (iter_stats) iter_timer.Start(); ValueT iter_gain = 0; for (VertexT v = 0; v < nodes; v++) { SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); VertexT num_neighbor_comms = 0; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; VertexT c = current_communities[u]; if (!util::isValid(w_v2c[c])) { w_v2c[c] = w; neighbor_comms[num_neighbor_comms] = c; num_neighbor_comms++; } else w_v2c[c] += w; } VertexT org_comm = current_communities[v]; VertexT new_comm = org_comm; ValueT w_v2c_org = 0; if (util::isValid(w_v2c[org_comm])) w_v2c_org = w_v2c[org_comm]; ValueT w_v2_v = w_v2[v]; ValueT gain_base = w_v2self[v] - w_v2c_org - (w_v2[v] - w_c2[org_comm]) * w_v2_v / m2; ValueT max_gain = 0; for (VertexT i = 0; i < num_neighbor_comms; i++) { VertexT c = neighbor_comms[i]; if (c == org_comm) { w_v2c[c] = util::PreDefinedValues<ValueT>::InvalidValue; continue; } ValueT w_v2c_c = w_v2c[c]; w_v2c[c] = util::PreDefinedValues<ValueT>::InvalidValue; ValueT gain = gain_base + w_v2c_c - w_c2[c] * w_v2_v / m2; if (gain > max_gain) { max_gain = gain; new_comm = c; } } if (max_gain > 0 && new_comm != current_communities[v]) { iter_gain += max_gain; current_communities[v] = new_comm; w_c2[new_comm] += w_v2[v]; w_c2[org_comm] -= w_v2[v]; } } iter_num++; iter_gain *= 2; iter_gain /= m2; q += iter_gain; pass_gain += iter_gain; if (iter_stats) iter_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", iter " + std::to_string(iter_num) + ", q = " + std::to_string(q) + ", iter_gain = " + std::to_string(iter_gain) + ", pass_gain = " + std::to_string(pass_gain) + ", elapsed = " + std::to_string(iter_timer.ElapsedMillis()), iter_stats); if (iter_gain < iter_gain_threshold) break; } // Community Aggregation if (iter_stats) iter_timer.Start(); VertexT num_comms = 0; for (VertexT v = 0; v < nodes; v++) comm_convert[v] = 0; for (VertexT v = 0; v < nodes; v++) comm_convert[current_communities[v]] = 1; for (VertexT v = 0; v < nodes; v++) { if (comm_convert[v] == 0) continue; comm_convert[v] = num_comms; num_comms++; } for (VertexT v = 0; v < nodes; v++) current_communities[v] = comm_convert[current_communities[v]]; pass_communities->push_back(c_communities); for (VertexT v = 0; v < nodes; v++) { SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); VertexT comm_v = current_communities[v]; auto &w_c2c_c = w_c2c[comm_v]; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; VertexT comm_u = current_communities[u]; w_c2c_c.push_back(std::make_pair(comm_u, w)); } } SizeT num_edges = 0; auto &w_2c = w_v2c; temp_row_offsets[0] = 0; for (VertexT c = 0; c < num_comms; c++) { auto &w_c2c_c = w_c2c[c]; VertexT num_neighbor_comms = 0; for (auto it = w_c2c_c.begin(); it != w_c2c_c.end(); it++) { VertexT u_c = it->first; ValueT w = it->second; if (!util::isValid(w_2c[u_c])) { w_2c[u_c] = w; neighbor_comms[num_neighbor_comms] = u_c; num_neighbor_comms++; } else w_2c[u_c] += w; } w_c2c_c.clear(); for (VertexT i = 0; i < num_neighbor_comms; i++) { VertexT u_c = neighbor_comms[i]; ValueT w = w_2c[u_c]; temp_column_indices[num_edges + i] = u_c; temp_edge_values[num_edges + i] = w; w_2c[u_c] = util::PreDefinedValues<ValueT>::InvalidValue; } num_edges += num_neighbor_comms; temp_row_offsets[c + 1] = num_edges; } n_graph = new GraphT; auto &next_graph = *n_graph; if (has_pass_graphs) pass_graphs->push_back(n_graph); next_graph.Allocate(num_comms, num_edges, util::HOST); memcpy(next_graph.CsrT::row_offsets + 0, temp_row_offsets + 0, sizeof(SizeT) * (num_comms + 1)); memcpy(next_graph.CsrT::column_indices + 0, temp_column_indices + 0, sizeof(VertexT) * num_edges); memcpy(next_graph.CsrT::edge_values + 0, temp_edge_values + 0, sizeof(ValueT) * num_edges); if (iter_stats) iter_timer.Stop(); util::PrintMsg("pass " + std::to_string(pass_num) + ", graph compaction, elapsed = " + std::to_string(iter_timer.ElapsedMillis()), iter_stats); if (pass_stats) pass_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", #v = " + std::to_string(nodes) + " -> " + std::to_string(num_comms) + ", #e = " + std::to_string(current_graph.edges) + " -> " + std::to_string(num_edges) + ", #iter = " + std::to_string(iter_num) + ", q = " + std::to_string(q) + ", pass_gain = " + std::to_string(pass_gain) + ", elapsed = " + std::to_string(pass_timer.ElapsedMillis()), pass_stats); if (pass_num != 0 && !has_pass_graphs) { current_graph.Release(util::HOST); delete c_graph; } c_graph = n_graph; n_graph = NULL; pass_num++; if (pass_gain < pass_gain_threshold) break; } // Assining communities to vertices in original graph for (VertexT v = 0; v < graph.nodes; v++) communities[v] = v; pass_num = 0; for (auto it = pass_communities->begin(); it != pass_communities->end(); it++) { auto &v2c = *(*it); for (VertexT v = 0; v < graph.nodes; v++) { communities[v] = v2c[communities[v]]; } pass_num++; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); // Clearn-up if (!has_pass_communities) { for (auto it = pass_communities->begin(); it != pass_communities->end(); it++) { (*it)->clear(); delete *it; } pass_communities->clear(); delete pass_communities; pass_communities = NULL; } temp_graph.Release(); delete[] comm_convert; comm_convert = NULL; delete[] w_c2c; w_c2c = NULL; delete[] w_v2self; w_v2self = NULL; delete[] w_v2; w_v2 = NULL; delete[] w_c2; w_c2 = NULL; delete[] w_v2c; w_v2c = NULL; delete[] neighbor_comms; neighbor_comms = NULL; return elapsed; } /** * @brief Simple CPU-based reference Louvain Community Detection implementation * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Input parameters * @param[in] graph Input graph * @param[out] communities Community IDs for each vertex * \return double Time taken for the Louvain implementation */ template <typename GraphT, typename ValueT = typename GraphT::ValueT> double OMP_Reference(util::Parameters &parameters, GraphT &graph, typename GraphT::VertexT *communities, std::vector<std::vector<typename GraphT::VertexT> *> *pass_communities = NULL, std::vector<GraphT *> *pass_graphs = NULL) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; VertexT max_passes = parameters.Get<VertexT>("max-passes"); VertexT max_iters = parameters.Get<VertexT>("max-iters"); bool pass_stats = parameters.Get<bool>("pass-stats"); bool iter_stats = parameters.Get<bool>("iter-stats"); ValueT pass_gain_threshold = parameters.Get<ValueT>("pass-th"); ValueT iter_gain_threshold = parameters.Get<ValueT>("iter-th"); int num_threads = parameters.Get<int>("omp-threads"); ValueT first_threshold = parameters.Get<ValueT>("1st-th"); #pragma omp parallel { if (num_threads == 0) num_threads = omp_get_num_threads(); } util::PrintMsg("#threads = " + std::to_string(num_threads) + ", 1st-th = " + std::to_string(first_threshold)); bool has_pass_communities = false; if (pass_communities != NULL) has_pass_communities = true; else pass_communities = new std::vector<std::vector<VertexT> *>; pass_communities->clear(); bool has_pass_graphs = false; if (pass_graphs != NULL) has_pass_graphs = true; ValueT q = Get_Modularity(graph); ValueT **w_v2cs = new ValueT *[num_threads]; VertexT **neighbor_commss = new VertexT *[num_threads]; ValueT *iter_gains = new ValueT[num_threads]; VertexT *comm_convert = new VertexT[graph.nodes]; ValueT *w_v2self = new ValueT[graph.nodes]; ValueT *w_v2 = new ValueT[graph.nodes]; ValueT *w_c2 = new ValueT[graph.nodes]; VertexT *comm_counts = new VertexT[num_threads]; typedef std::pair<VertexT, ValueT> pairT; std::vector<pairT> **w_c2cs = new std::vector<pairT> *[num_threads]; #pragma omp parallel num_threads(num_threads) { int thread_num = omp_get_thread_num(); w_v2cs[thread_num] = new ValueT[graph.nodes]; auto &w_v2c = w_v2cs[thread_num]; for (SizeT v = 0; v < graph.nodes; v++) w_v2c[v] = util::PreDefinedValues<ValueT>::InvalidValue; neighbor_commss[thread_num] = new VertexT[graph.nodes]; w_c2cs[thread_num] = new std::vector<pairT>[graph.nodes]; } // GraphT temp_graph; // temp_graph.Allocate(graph.nodes, graph.edges, util::HOST); // auto &temp_row_offsets = temp_graph.CsrT::row_offsets; // auto &temp_column_indices = temp_graph.CsrT::column_indices; // auto &temp_edge_values = temp_graph.CsrT::edge_values; SizeT *temp_row_offsets = new SizeT[graph.nodes + 1]; std::vector<SizeT> *temp_column_indicess = new std::vector<SizeT>[num_threads]; std::vector<ValueT> *temp_edge_valuess = new std::vector<ValueT>[num_threads]; auto c_graph = &graph; auto n_graph = c_graph; n_graph = NULL; ValueT m2 = 0; //#pragma omp parallel for num_threads(num_threads) reduction(+:m2) for (SizeT e = 0; e < graph.edges; e++) m2 += graph.CsrT::edge_values[e]; util::CpuTimer cpu_timer, pass_timer, iter_timer; cpu_timer.Start(); int pass_num = 0; while (pass_num < max_passes) { // if (pass_num > 1) // num_threads = 1; // Pass initialization if (pass_stats) pass_timer.Start(); if (iter_stats) iter_timer.Start(); auto &current_graph = *c_graph; SizeT nodes = current_graph.nodes; auto c_communities = new std::vector<VertexT>; auto &current_communities = *c_communities; current_communities.reserve(nodes); #pragma omp parallel for num_threads(num_threads) for (VertexT v = 0; v < nodes; v++) { current_communities[v] = v; w_v2[v] = 0; w_v2self[v] = 0; SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; w_v2[v] += w; if (u == v) w_v2self[v] += w; } w_c2[v] = w_v2[v]; } if (iter_stats) iter_timer.Stop(); util::PrintMsg("pass " + std::to_string(pass_num) + ", pre-iter, elapsed = " + std::to_string(iter_timer.ElapsedMillis()), iter_stats); // Modulation Optimization int iter_num = 0; ValueT pass_gain = 0; ValueT iter_gain = 0; bool to_continue = true; //#pragma omp parallel num_threads(num_threads) // while (iter_num < max_iters) { while (to_continue) { // int thread_num = omp_get_thread_num(); //#pragma omp single { if (iter_stats) iter_timer.Start(); iter_gain = 0; } for (int t = 0; t < num_threads; t++) iter_gains[t] = 0; // iter_gains[thread_num] = 0; // auto &w_v2c = w_v2cs[thread_num]; // auto &neighbor_comms = neighbor_commss[thread_num]; // auto &iter_gain = iter_gains[thread_num]; #pragma omp parallel for num_threads(num_threads) //#pragma omp for for (VertexT v = 0; v < nodes; v++) //#pragma omp parallel num_threads(num_threads) { int thread_num = omp_get_thread_num(); // iter_gains[thread_num] = 0; // VertexT start_v = nodes / num_threads * thread_num; // VertexT end_v = nodes / num_threads * (thread_num + 1); auto &w_v2c = w_v2cs[thread_num]; auto &neighbor_comms = neighbor_commss[thread_num]; // auto &iter_gain = iter_gains[thread_num]; // if (thread_num == 0) // start_v = 0; // if (thread_num == num_threads - 1) // end_v = nodes; // for (VertexT v = start_v; v < end_v; v++) { SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); VertexT num_neighbor_comms = 0; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; VertexT c = current_communities[u]; if (!util::isValid(w_v2c[c])) { w_v2c[c] = w; neighbor_comms[num_neighbor_comms] = c; num_neighbor_comms++; } else w_v2c[c] += w; } VertexT org_comm = current_communities[v]; VertexT new_comm = org_comm; ValueT w_v2c_org = 0; if (util::isValid(w_v2c[org_comm])) w_v2c_org = w_v2c[org_comm]; ValueT w_v2_v = w_v2[v]; ValueT gain_base = w_v2self[v] - w_v2c_org - (w_v2_v - w_c2[org_comm]) * w_v2_v / m2; //- w_v2_v * w_v2_v / m2; ValueT max_gain = 0; for (VertexT i = 0; i < num_neighbor_comms; i++) { VertexT c = neighbor_comms[i]; if (c == org_comm) { w_v2c[c] = util::PreDefinedValues<ValueT>::InvalidValue; continue; } ValueT w_v2c_c = w_v2c[c]; w_v2c[c] = util::PreDefinedValues<ValueT>::InvalidValue; ValueT gain = gain_base + w_v2c_c - w_c2[c] * w_v2_v / m2; if (gain > max_gain) { max_gain = gain; new_comm = c; } } if (new_comm != current_communities[v]) { if (max_gain > 0) { current_communities[v] = new_comm; #pragma omp atomic w_c2[new_comm] += w_v2[v]; #pragma omp atomic w_c2[org_comm] -= w_v2[v]; iter_gains[thread_num] += max_gain; } } } } //#pragma omp barrier //#pragma omp single { iter_gain = 0; for (int t = 0; t < num_threads; t++) { iter_gain += iter_gains[t]; } iter_gain *= 2; iter_gain /= m2; q += iter_gain; pass_gain += iter_gain; if (iter_stats) { iter_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", iter " + std::to_string(iter_num) + ", q = " + std::to_string(q) + ", iter_gain = " + std::to_string(iter_gain) + ", pass_gain = " + std::to_string(pass_gain) + ", elapsed = " + std::to_string(iter_timer.ElapsedMillis()), iter_stats); } iter_num++; if ((pass_num != 0 && iter_gain < iter_gain_threshold) || (pass_num == 0 && iter_gain < first_threshold) || iter_num >= max_iters) to_continue = false; } } // end of while (to_continue) } // end of omp parallel // Community Aggregation if (iter_stats) iter_timer.Start(); // util::CpuTimer timer1, timer2, timer3, timer4; // timer1.Start(); VertexT num_comms = 0; SizeT num_edges = 0; #pragma omp parallel num_threads(num_threads) { int thread_num = omp_get_thread_num(); VertexT start_v = nodes / num_threads * thread_num; VertexT end_v = nodes / num_threads * (thread_num + 1); if (thread_num == 0) start_v = 0; if (thread_num == num_threads - 1) end_v = nodes; #pragma omp for for (VertexT v = 0; v < nodes; v++) // for (VertexT v = start_v; v < end_v; v++) comm_convert[v] = 0; #pragma omp barrier // util::PrintMsg(std::to_string(thread_num) + " 0"); #pragma omp for for (VertexT v = 0; v < nodes; v++) // for (VertexT v = start_v; v < end_v; v++) comm_convert[current_communities[v]] = 1; #pragma omp barrier // util::PrintMsg(std::to_string(thread_num) + " 1"); auto &comm_count = comm_counts[thread_num]; comm_count = 1; for (VertexT v = start_v; v < end_v; v++) { if (comm_convert[v] == 0) continue; comm_convert[v] = comm_count; comm_count++; } #pragma omp barrier // util::PrintMsg(std::to_string(thread_num) + " 2"); #pragma omp single { num_comms = 0; for (int t = 0; t < num_threads; t++) { VertexT temp = comm_counts[t] - 1; comm_counts[t] = num_comms; num_comms += temp; } } //#pragma omp for // for (VertexT v = 0; v < nodes; v++) for (VertexT v = start_v; v < end_v; v++) { if (comm_convert[v] != 0) { comm_convert[v]--; comm_convert[v] += comm_counts[thread_num]; } } #pragma omp barrier // util::PrintMsg(std::to_string(thread_num) + " 3"); #pragma omp for for (VertexT v = 0; v < nodes; v++) // for (VertexT v = start_v; v < end_v; v++) current_communities[v] = comm_convert[current_communities[v]]; //} #pragma omp single { pass_communities->push_back(c_communities); // timer1.Stop(); // timer2.Start(); } //#pragma omp parallel for num_threads(num_threads) ////reduction(+:iter_gain) for (VertexT v = 0; v < nodes; v++) //{ // int thread_num = omp_get_thread_num(); // VertexT start_v = nodes / num_threads * thread_num; // VertexT end_v = nodes / num_threads * (thread_num + 1); // if (thread_num == 0) // start_v = 0; // if (thread_num == num_threads - 1) // end_v = nodes; #pragma omp for for (VertexT v = 0; v < nodes; v++) // for (VertexT v = start_v; v < end_v; v++) { int thread_num = omp_get_thread_num(); auto &w_c2c = w_c2cs[thread_num]; SizeT start_e = current_graph.GetNeighborListOffset(v); SizeT degree = current_graph.GetNeighborListLength(v); VertexT comm_v = current_communities[v]; auto &w_c2c_c = w_c2c[comm_v]; for (SizeT k = 0; k < degree; k++) { SizeT e = start_e + k; VertexT u = current_graph.GetEdgeDest(e); ValueT w = current_graph.edge_values[e]; VertexT comm_u = current_communities[u]; w_c2c_c.push_back(std::make_pair(comm_u, w)); } } //} //#pragma omp single //{ // timer2.Stop(); // timer3.Start(); //} // temp_row_offsets[0] = 0; //#pragma omp parallel num_threads(num_threads) //{ // int thread_num = omp_get_thread_num(); VertexT start_c = num_comms / num_threads * thread_num; VertexT end_c = num_comms / num_threads * (thread_num + 1); if (thread_num == 0) start_c = 0; if (thread_num == num_threads - 1) end_c = num_comms; auto &temp_column_indices = temp_column_indicess[thread_num]; auto &temp_edge_values = temp_edge_valuess[thread_num]; auto &w_2c = w_v2cs[thread_num]; auto &neighbor_comms = neighbor_commss[thread_num]; for (VertexT c = start_c; c < end_c; c++) { VertexT num_neighbor_comms = 0; for (int t = 0; t < num_threads; t++) { auto &w_c2c_c = w_c2cs[t][c]; for (auto it = w_c2c_c.begin(); it != w_c2c_c.end(); it++) { VertexT u_c = it->first; ValueT w = it->second; if (!util::isValid(w_2c[u_c])) { w_2c[u_c] = w; neighbor_comms[num_neighbor_comms] = u_c; num_neighbor_comms++; } else w_2c[u_c] += w; } w_c2c_c.clear(); } for (VertexT i = 0; i < num_neighbor_comms; i++) { VertexT u_c = neighbor_comms[i]; ValueT w = w_2c[u_c]; temp_column_indices.push_back(u_c); temp_edge_values.push_back(w); w_2c[u_c] = util::PreDefinedValues<ValueT>::InvalidValue; } temp_row_offsets[c] = num_neighbor_comms; } #pragma omp barrier #pragma omp single { num_edges = 0; for (VertexT c = 0; c < num_comms; c++) { SizeT temp = temp_row_offsets[c]; temp_row_offsets[c] = num_edges; num_edges += temp; } temp_row_offsets[num_comms] = num_edges; n_graph = new GraphT; // auto &next_graph = *n_graph; if (has_pass_graphs) pass_graphs->push_back(n_graph); n_graph->Allocate(num_comms, num_edges, util::HOST); } memcpy(n_graph->CsrT::column_indices + temp_row_offsets[start_c], temp_column_indices.data(), temp_column_indices.size() * sizeof(VertexT)); memcpy(n_graph->CsrT::edge_values + temp_row_offsets[start_c], temp_edge_values.data(), temp_edge_values.size() * sizeof(ValueT)); memcpy(n_graph->CsrT::row_offsets + start_c, temp_row_offsets + start_c, sizeof(SizeT) * (end_c - start_c + ((thread_num == num_threads - 1) ? 1 : 0))); temp_column_indices.clear(); temp_edge_values.clear(); } // timer3.Stop(); // timer4.Start(); // timer4.Stop(); // util::PrintMsg("Timers = " // + std::to_string(timer1.ElapsedMillis()) + " " // + std::to_string(timer2.ElapsedMillis()) + " " // + std::to_string(timer3.ElapsedMillis()) + " " // + std::to_string(timer4.ElapsedMillis())); if (iter_stats) { iter_timer.Stop(); util::PrintMsg("pass " + std::to_string(pass_num) + ", graph compaction, elapsed = " + std::to_string(iter_timer.ElapsedMillis())); } if (pass_stats) { pass_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", #v = " + std::to_string(nodes) + " -> " + std::to_string(num_comms) + ", #e = " + std::to_string(current_graph.edges) + " -> " + std::to_string(num_edges) + ", #iter = " + std::to_string(iter_num) + ", q = " + std::to_string(q) + ", pass_gain = " + std::to_string(pass_gain) + ", elapsed = " + std::to_string(pass_timer.ElapsedMillis())); } if (pass_num != 0 && !has_pass_graphs) { current_graph.Release(util::HOST); delete c_graph; } c_graph = n_graph; n_graph = NULL; pass_num++; if (pass_gain < pass_gain_threshold) break; } // Assining communities to vertices in original graph for (VertexT v = 0; v < graph.nodes; v++) communities[v] = v; pass_num = 0; for (auto it = pass_communities->begin(); it != pass_communities->end(); it++) { auto &v2c = *(*it); for (VertexT v = 0; v < graph.nodes; v++) { communities[v] = v2c[communities[v]]; } pass_num++; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); // Clearn-up if (!has_pass_communities) { for (auto it = pass_communities->begin(); it != pass_communities->end(); it++) { (*it)->clear(); delete *it; } pass_communities->clear(); delete pass_communities; pass_communities = NULL; } // temp_graph.Release(); delete[] temp_row_offsets; temp_row_offsets = NULL; delete[] temp_column_indicess; temp_column_indicess = NULL; delete[] temp_edge_valuess; temp_edge_valuess = NULL; delete[] comm_convert; comm_convert = NULL; delete[] w_v2self; w_v2self = NULL; delete[] w_v2; w_v2 = NULL; delete[] w_c2; w_c2 = NULL; #pragma omp parallel num_threads(num_threads) { int thread_num = omp_get_thread_num(); delete[] w_v2cs[thread_num]; w_v2cs[thread_num] = NULL; delete[] neighbor_commss[thread_num]; neighbor_commss[thread_num] = NULL; delete[] w_c2cs[thread_num]; w_c2cs[thread_num] = 0; } delete[] w_c2cs; w_c2cs = NULL; delete[] w_v2cs; w_v2cs = NULL; delete[] neighbor_commss; neighbor_commss = NULL; return elapsed; } /** * @brief Validation of Louvain results * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] src The source vertex * @param[in] h_distances Computed distances from the source to each vertex * @param[in] h_preds Computed predecessors for each vertex * @param[in] ref_distances Reference distances from the source to each vertex * @param[in] ref_preds Reference predecessors for each vertex * @param[in] verbose Whether to output detail comparsions * \return GraphT::SizeT Number of errors */ template <typename GraphT, typename ValueT = typename GraphT::ValueT> typename GraphT::SizeT Validate_Results( util::Parameters &parameters, GraphT &graph, typename GraphT::VertexT *communities, typename GraphT::VertexT *ref_communities = NULL, bool verbose = true) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; SizeT num_errors = 0; bool quiet = parameters.Get<bool>("quiet"); char *comm_markers = new char[graph.nodes]; for (VertexT v = 0; v < graph.nodes; v++) comm_markers[v] = 0; util::PrintMsg("Community Validity: ", !quiet, false); // Verify the result for (VertexT v = 0; v < graph.nodes; v++) { auto c = communities[v]; if (util::lessThanZero(c) || c >= graph.nodes) { util::PrintMsg("FAIL: communties[" + std::to_string(v) + "] = " + std::to_string(c) + " out of bound", (!quiet) && (num_errors == 0)); num_errors++; continue; } comm_markers[c] = 1; } if (num_errors != 0) { delete[] comm_markers; comm_markers = NULL; util::PrintMsg(std::to_string(num_errors) + " errors occurred.", !quiet); return num_errors; } util::PrintMsg("PASS", !quiet); ValueT num_comms = 0; for (VertexT v = 0; v < graph.nodes; v++) { if (comm_markers[v] != 0) { num_comms++; comm_markers[v] = 0; } } ValueT q = Get_Modularity(graph, communities); util::PrintMsg("Computed: #communities = " + std::to_string(num_comms) + ", modularity = " + std::to_string(q)); if (ref_communities == NULL) { delete[] comm_markers; comm_markers = NULL; return num_errors; } for (VertexT v = 0; v < graph.nodes; v++) { auto c = ref_communities[v]; if (util::lessThanZero(c) || c >= graph.nodes) { num_errors++; continue; } comm_markers[c] = 1; } if (num_errors != 0) { util::PrintMsg("Reference: " + std::to_string(num_errors) + " vertices have communities out of bound.", !quiet); delete[] comm_markers; comm_markers = NULL; return 0; } num_comms = 0; for (VertexT v = 0; v < graph.nodes; v++) if (comm_markers[v] != 0) num_comms++; q = Get_Modularity(graph, ref_communities); util::PrintMsg("Reference: #communities = " + std::to_string(num_comms) + ", modularity = " + std::to_string(q)); delete[] comm_markers; comm_markers = NULL; return num_errors; } } // namespace louvain } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
* Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * CUBLAS provides high-performance matrix multiplication. * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. * */ // Utilities and system includes #include <shrUtils.h> #include <cublas_v2.h> #include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples #include <shrQATest.h> #include <cuda_runtime.h> #include "matrixMul.h" // includes, kernels #include <matrixMul_kernel.cu> static char *sSDKsample = "matrixMul"; //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while ( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name); } return devID; } // end of CUDA Helper Functions //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void printDiff(float*, float*, int, int, int, float); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); void inline checkError(cublasStatus_t status, const char* msg) { if(status != CUBLAS_STATUS_SUCCESS){ printf(msg); exit(-1); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { shrQAStart(argc, argv); printf("[ %s ]\n", sSDKsample); //shrSetLogFileName ("matrixMul.txt"); shrLog("%s\n\tStarting (CUDA and CUBLAS tests)...\n\n", argv[0]); runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { if(checkCmdLineFlag(argc, (const char**)argv, "device")) { int devID = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { checkCudaErrors( cudaSetDevice(gpuGetMaxGflopsDeviceId()) ); } int devID; cudaDeviceProp props; // get number of SMs on this GPU checkCudaErrors(cudaGetDevice(&devID)); checkCudaErrors(cudaGetDeviceProperties(&props, devID)); // use a larger block size for Fermi and above int block_size = (props.major < 2) ? 16 : 32; printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); // set seed for rand() srand(2006); // Optional Command-line multiplier for matrix sizes unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; int iSizeMultiple = 5; shrGetCmdLineArgumenti(argc, (const char**)argv, "sizemult", &iSizeMultiple); iSizeMultiple = CLAMP(iSizeMultiple, 1, 10); bool useCublasOnly = false; if(shrCheckCmdLineFlag(argc, (const char**)argv, "cublas")) useCublasOnly = true; // For GPUs with fewer # of SM's, we limit the maximum size of the matrix if (props.multiProcessorCount <= 4) { uiWA = 2 * block_size * iSizeMultiple; uiHA = 4 * block_size * iSizeMultiple; uiWB = 2 * block_size * iSizeMultiple; uiHB = 4 * block_size * iSizeMultiple; uiWC = 2 * block_size * iSizeMultiple; uiHC = 4 * block_size * iSizeMultiple; } else { uiWA = WA * iSizeMultiple; uiHA = HA * iSizeMultiple; uiWB = WB * iSizeMultiple; uiHB = HB * iSizeMultiple; uiWC = WC * iSizeMultiple; uiHC = HC * iSizeMultiple; } shrLog("\nUsing Matrix Sizes: A(%u x %u), B(%u x %u), C(%u x %u)\n\n", uiWA, uiHA, uiWB, uiHB, uiWC, uiHC); // allocate host memory for matrices A and B unsigned int size_A = uiWA * uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = uiWB * uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A, *d_B, *d_C; unsigned int size_C = uiWC * uiHC; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); float* h_CUBLAS = (float*) malloc(mem_size_C); checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A)); checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B)); // copy host memory to device checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(block_size, block_size); dim3 grid(uiWC / threads.x, uiHC / threads.y); // kernel warmup if(useCublasOnly) { } else { } // create and start timer shrLog("Runing Kernels...\n\n"); StopWatchInterface * timer_cublas; StopWatchInterface * timer_matrixMul; // execute the kernel int nIter = 30; // CUBLAS version 2.0 { cublasHandle_t handle; checkError(cublasCreate(&handle), "cublasCreate() error!\n"); const float alpha = 1.0f; const float beta = 0.0f; //Perform warmup operation with cublas cublasStatus_t ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, uiWB, uiHA, uiWA, &alpha, d_B, uiWB, d_A, uiWA, &beta, d_C, uiWA); checkError(ret, "cublas Sgemm returned an error!\n"); // Start Timing sdkCreateTimer(&timer_cublas); sdkStartTimer(&timer_cublas); for (int j = 0; j < nIter; j++) { //note cublas is column primary! //need to transpose the order cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, uiWB, uiHA, uiWA, &alpha, d_B, uiWB, d_A, uiWA, &beta, d_C, uiWA); } // check if kernel execution generated and error getLastCudaError("CUBLAS Kernel execution failed"); cudaDeviceSynchronize(); // stop and destroy timer sdkStopTimer(&timer_cublas); double dSeconds = sdkGetTimerValue(&timer_cublas)/((double)nIter * 1000.0); double dNumOps = 2.0 * (double)uiWA * (double)uiHA * (double)uiWB; double gflops = 1.0e-9 * dNumOps/dSeconds; //Log througput, etc shrLogEx(LOGBOTH | MASTER, 0, "> CUBLAS %.4f GFlop/s, Time = %.5f s, Size = %.0f Ops\n\n", gflops, dSeconds, dNumOps); sdkDeleteTimer(&timer_cublas); // copy result from device to host checkCudaErrors(cudaMemcpy(h_CUBLAS, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); checkError(cublasDestroy(handle), "cublasDestroy() error!\n"); } // For the case where "-cublas" is not specified, we will run the matrixMul kernel if (!useCublasOnly) { //Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMul<16><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } else { matrixMul<32><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } cudaDeviceSynchronize(); // Start Timing sdkCreateTimer(&timer_matrixMul); sdkStartTimer(&timer_matrixMul); for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMul<16><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } else { matrixMul<32><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } } // check if kernel execution generated and error getLastCudaError("CUDA matrixMul Kernel execution failed"); cudaDeviceSynchronize(); // stop and destroy timer sdkStopTimer(&timer_matrixMul); double dSeconds = sdkGetTimerValue(&timer_matrixMul)/((double)nIter * 1000.0); double dNumOps = 2.0 * (double)uiWA * (double)uiHA * (double)uiWB; double gflops = 1.0e-9 * dNumOps/dSeconds; //Log througput, etc shrLogEx(LOGBOTH | MASTER, 0, "> CUDA matrixMul %.4f GFlop/s, Time = %.5f s, Size = %.0f Ops, ", gflops, dSeconds, dNumOps); shrLogEx(LOGBOTH | MASTER, 0, "NumDevsUsed = %d, Workgroup = %u\n", 1, threads.x * threads.y); sdkDeleteTimer(&timer_matrixMul); // copy result from device to host checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); } // compute reference solution shrLog("\nComparing GPU results with Host computation...\n\n"); float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB); // check result (CUBLAS) printf("Comparing CUBLAS & Host results\n"); shrBOOL resCUBLAS = shrCompareL2fe(reference, h_CUBLAS, size_C, 1.0e-6f); if (resCUBLAS != shrTRUE) { printDiff(reference, h_CUBLAS, uiWC, uiHC, 100, 1.0e-5f); } shrLog("CUBLAS compares %s\n\n", (shrTRUE == resCUBLAS) ? "OK" : "FAIL"); // check result (matrixMul) printf("Comparing CUDA matrixMul & Host results\n"); shrBOOL resCUDA = shrCompareL2fe(reference, h_C, size_C, 1.0e-6f); if (resCUDA != shrTRUE) { printDiff(reference, h_C, uiWC, uiHC, 100, 1.0e-5f); } shrLog("CUDA matrixMul compares %s\n\n", (shrTRUE == resCUDA) ? "OK" : "FAIL"); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (resCUDA == shrTRUE && resCUBLAS == shrTRUE) ? QA_PASSED : QA_FAILED); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol) { shrLog("Listing first %d Differences > %.6f...\n", iListLength, fListTol); int i,j,k; int error_count=0; for (j = 0; j < height; j++) { if (error_count < iListLength) { shrLog("\n Row %d:\n", j); } for (i = 0; i < width; i++) { k = j * width + i; float fDiff = fabs(data1[k] - data2[k]); if (fDiff > fListTol) { if (error_count < iListLength) { shrLog(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff); } error_count++; } } } shrLog(" \n Total Errors = %d\n\n", error_count); }
the_stack
#include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cuda.h> #include <cuda_runtime.h> #include <cfloat> #include "common.hpp" using math_ops::Exp; using math_ops::Pow; #define BLOCKSIZE 512 namespace soft_dice_space { template<typename T> class sum_op { public: __device__ __forceinline__ T operator()(T a, T b) const { return a + b; } }; template<template<typename> class Reduction, typename scalar_t> __device__ __forceinline__ void reduce_op( scalar_t* sdata, int blocksize, const Reduction<scalar_t>& oper) { int tid = threadIdx.x; __syncthreads(); for (int s{blocksize / 2}; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = oper(sdata[tid], sdata[tid + s]); } __syncthreads(); } } } // kernel function for forward and backward template<typename scalar_t> __global__ void compute_numer_denor(const int batchsize, const int sample_size, const int n_blockxs_sample, const scalar_t *logits, const scalar_t *labels, scalar_t *numer, scalar_t *denor, const float p) { /* Tips about shared memory: * 1. torch will instantiate the template with three types: double, float, half; * 2. these three types should not share same definitions of shared memory; * 3. so one method is to use static shared memory with memory size explicitly assigned, and another method is to allocate shared memory with same raw type, such as unsigned char here, and then cast the pointer according to different template types */ // method1: use static sized shared memory // __shared__ scalar_t sdata[BLOCKSIZE * 2]; // method2: allocate with raw uchar type and then cast in different kernel extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + threadIdx.y * blockDim.x; int tid = threadIdx.x; int tstrd = blockDim.x * n_blockxs_sample; int bid = threadIdx.y + blockIdx.x * blockDim.y; int bstrd = gridDim.x * blockDim.y; int n_sample_blocks = n_blockxs_sample * batchsize; const scalar_t one(1.); for (int i{bid}; i < n_sample_blocks; i += bstrd) { int sample_start = (i / n_blockxs_sample) * sample_size; int local_tid = (i % n_blockxs_sample) * blockDim.x + tid; scalar_t v_numer{0}, v_denor{0}; for (int j{local_tid}; j < sample_size; j += tstrd) { scalar_t prob = one / (one + Exp(-logits[j + sample_start])); scalar_t lb = labels[j + sample_start]; v_numer += prob * lb * scalar_t(2.); v_denor += Pow(prob, scalar_t(p)) + lb; } __syncthreads(); sdata[tid] = v_numer; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); if (tid == 0) { numer[i] = sdata[0]; } __syncthreads(); sdata[tid] = v_denor; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); if (tid == 0) { denor[i] = sdata[0]; } } } template<typename scalar_t> __global__ void SoftDiceForward(const int batchsize, const int n_blockxs_sample, const scalar_t *numer, const scalar_t *denor, scalar_t *losses, const float smooth) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + threadIdx.y * blockDim.x; int tid = threadIdx.x; int bid = threadIdx.y + blockIdx.x * blockDim.y; int bstrd = gridDim.x * blockDim.y; const scalar_t one(1.); for (int i{bid}; i < batchsize; i += bstrd) { scalar_t v_numer{0}, v_denor{0}; int t_start = i * n_blockxs_sample; for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) { v_numer += numer[j + t_start]; v_denor += denor[j + t_start]; } // reduce numer __syncthreads(); sdata[tid] = v_numer; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); v_numer = sdata[0]; // reduce denorm __syncthreads(); sdata[tid] = v_denor; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); v_denor = sdata[0]; if (tid == 0) { losses[bid] = one - (v_numer + smooth) / (v_denor + smooth); } } } template<typename scalar_t> __global__ void reduce_numer_denor(const int batchsize, const int n_blockxs_sample, scalar_t *numer, scalar_t *denor, const float smooth) { extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[]; scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw); sdata = sdata + threadIdx.y * blockDim.x; int tid = threadIdx.x; int bid = threadIdx.y + blockIdx.x * blockDim.y; int bstrd = gridDim.x * blockDim.y; for (int i{bid}; i < batchsize; i += bstrd) { scalar_t v_numer{0}, v_denor{0}; int t_start = i * n_blockxs_sample; for (int j{tid}; j < n_blockxs_sample; j += blockDim.x) { v_numer += numer[j + t_start]; v_denor += denor[j + t_start]; } // reduce numer __syncthreads(); sdata[tid] = v_numer; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); if (tid == 0) { numer[t_start] = sdata[0] + smooth; } // reduce denorm __syncthreads(); sdata[tid] = v_denor; __syncthreads(); soft_dice_space::reduce_op<soft_dice_space::sum_op, scalar_t>( sdata, blockDim.x, soft_dice_space::sum_op<scalar_t>()); if (tid == 0) { denor[t_start] = sdata[0] + smooth; } } } template<typename scalar_t> __global__ void SoftDiceBackward(const int batchsize, const int sample_size, const int n_blockxs_sample, const scalar_t *logits, const scalar_t *labels, const scalar_t *grad, const scalar_t *numer, const scalar_t *denor, scalar_t *grad_logits, const float p) { int tid = threadIdx.x; int tstrd = blockDim.x * n_blockxs_sample; int bid = blockIdx.x * blockDim.y + threadIdx.y; int bstrd = blockDim.y * gridDim.x; const scalar_t one(1.); const scalar_t two(2.); const scalar_t v_p(p); int n_sample_blocks = n_blockxs_sample * batchsize; for (int i{bid}; i < n_sample_blocks; i += bstrd) { int sample_idx = i / n_blockxs_sample; int sample_start = sample_idx * sample_size; int local_tid = (i % n_blockxs_sample) * blockDim.x + tid; scalar_t v_numer = numer[sample_idx * n_blockxs_sample]; scalar_t v_denor = denor[sample_idx * n_blockxs_sample]; scalar_t grad_val = grad[sample_idx]; for (int j{local_tid}; j < sample_size; j += tstrd) { scalar_t prob = one / (one + Exp(-logits[j + sample_start])); scalar_t lb = labels[j + sample_start]; scalar_t term1 = v_p * Pow(prob, scalar_t(p)) * (one - prob) * v_numer / Pow(v_denor, two); scalar_t term2 = two * lb * prob * (one - prob) / v_denor; grad_logits[j + sample_start] = grad_val * (term1 - term2); } } } // cuda forward and backward at::Tensor SoftDice_forward_cuda(const at::Tensor &logits, const at::Tensor &labels, const float p, const float smooth) { // CHECK type and shape AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int batchsize = logits.size(0); const int num_samples = logits.numel(); const int sample_size = num_samples / batchsize; // parallel method for numer/denor int blockx1 = 32; while (blockx1 < sample_size) blockx1 *= 2; blockx1 = std::max(32, std::min(BLOCKSIZE, blockx1 / 2)); int n_blockxs_sample = std::max(1, sample_size / blockx1); int blocky1 = std::max(1, BLOCKSIZE / blockx1); if (blocky1 > batchsize) blocky1 = batchsize; int gridx1 = batchsize * n_blockxs_sample / blocky1; gridx1 = std::max(1, std::min(4096, gridx1)); dim3 block1(blockx1, blocky1); dim3 grid1(gridx1); // parallel method for loss int blockx2 = 32; while (blockx2 < n_blockxs_sample) blockx2 *= 2; blockx2 = std::max(32, std::min(BLOCKSIZE, blockx2 / 2)); int blocky2 = std::max(1, BLOCKSIZE / blockx2); int gridx2 = std::min(batchsize / blocky2, 4096); gridx2 = std::max(1, gridx2); dim3 block2(blockx2, blocky2); dim3 grid2(gridx2); // allocate memory and cuda grid/block // Note: should use torch::zeros rather than at::zeros, torch::zeros is variable // and at::zeros is tensor auto losses = torch::empty({batchsize}, logits.options()); auto numer = torch::zeros( {batchsize * n_blockxs_sample}, logits.options()); auto denor = torch::zeros( {batchsize * n_blockxs_sample}, logits.options()); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "soft dice forward", [&] { int shm_size = blockx1 * blocky1 * sizeof(scalar_t); compute_numer_denor<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>( batchsize, sample_size, n_blockxs_sample, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<scalar_t>(), numer.contiguous().data_ptr<scalar_t>(), denor.contiguous().data_ptr<scalar_t>(), p ); shm_size = blockx2 * blocky2 * sizeof(scalar_t); SoftDiceForward<scalar_t><<<grid2, block2, shm_size, at::cuda::getCurrentCUDAStream()>>>( batchsize, n_blockxs_sample, numer.contiguous().data_ptr<scalar_t>(), denor.contiguous().data_ptr<scalar_t>(), losses.contiguous().data_ptr<scalar_t>(), smooth ); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SoftDice_backward_cuda(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, const float p, const float smooth) { // CHECK type and shape AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda"); AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda"); AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda"); const int batchsize = logits.size(0); const int num_samples = logits.numel(); const int sample_size = num_samples / batchsize; // parallel settings for numer/denor int blockx1 = 32; while (blockx1 < sample_size) blockx1 *= 2; blockx1 = std::max(32, std::min(BLOCKSIZE, blockx1 / 2)); int n_blockxs_sample = sample_size / blockx1; int blocky1 = std::max(1, BLOCKSIZE / blockx1); if (blocky1 > batchsize) blocky1 = batchsize; int gridx1 = batchsize * n_blockxs_sample / blocky1; gridx1 = std::max(1, std::min(4096, gridx1)); dim3 block1(blockx1, blocky1); dim3 grid1(gridx1); // parallel settings for reduce numer/denor int blockx2 = 32; while (blockx2 < n_blockxs_sample) blockx2 *= 2; blockx2 = std::max(32, std::min(BLOCKSIZE, blockx2 / 2)); int blocky2 = std::max(1, BLOCKSIZE / blockx2); int gridx2 = std::min(batchsize / blocky2, 4096); gridx2 = std::max(1, gridx2); dim3 block2(blockx2, blocky2); dim3 grid2(gridx2); // allocate memory and cuda grid/block auto grad_logits = torch::empty_like(logits); auto numer = torch::zeros( {batchsize * n_blockxs_sample}, logits.options()); auto denor = torch::zeros( {batchsize * n_blockxs_sample}, logits.options()); // call kernel AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "soft dice backwrd", [&] { int shm_size = blockx1 * blocky1 * sizeof(scalar_t); compute_numer_denor<scalar_t><<<grid1, block1, shm_size, at::cuda::getCurrentCUDAStream()>>>( batchsize, sample_size, n_blockxs_sample, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<scalar_t>(), numer.contiguous().data_ptr<scalar_t>(), denor.contiguous().data_ptr<scalar_t>(), p ); shm_size = blockx2 * blocky2 * sizeof(scalar_t); reduce_numer_denor<scalar_t><<<grid2, block2, shm_size, at::cuda::getCurrentCUDAStream()>>>( batchsize, n_blockxs_sample, numer.contiguous().data_ptr<scalar_t>(), denor.contiguous().data_ptr<scalar_t>(), smooth ); SoftDiceBackward<scalar_t><<<grid1, block1, 0, at::cuda::getCurrentCUDAStream()>>>( batchsize, sample_size, n_blockxs_sample, logits.contiguous().data_ptr<scalar_t>(), labels.contiguous().data_ptr<scalar_t>(), grad.contiguous().data_ptr<scalar_t>(), numer.contiguous().data_ptr<scalar_t>(), denor.contiguous().data_ptr<scalar_t>(), grad_logits.contiguous().data_ptr<scalar_t>(), p ); }); THCudaCheck(cudaGetLastError()); return grad_logits; } // python inferface at::Tensor SoftDice_forward(const at::Tensor &logits, const at::Tensor &labels, const float p, const float smooth) { if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this dice loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return SoftDice_forward_cuda(logits, labels, p, smooth); } at::Tensor SoftDice_backward(const at::Tensor &grad, const at::Tensor &logits, const at::Tensor &labels, const float p, const float smooth) { if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) { AT_ERROR("this dice loss only supports gpu mode\n"); } at::DeviceGuard guard(logits.device()); return SoftDice_backward_cuda(grad, logits, labels, p, smooth); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("soft_dice_forward", &SoftDice_forward, "soft-dice forward"); m.def("soft_dice_backward", &SoftDice_backward, "soft-dice backward"); }
the_stack
// Make sure we don't allow dynamic initialization for device // variables, but accept empty constructors allowed by CUDA. // RUN: %clang_cc1 -verify %s -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 %s #ifdef __clang__ #include "Inputs/cuda.h" #endif // Use the types we share with CodeGen tests. #include "Inputs/cuda-initializers.h" __shared__ int s_v_i = 1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ int d_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ int s_v_f = f(); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ int c_v_f = f(); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T s_t_i = {2}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ EC d_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC d_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC s_ec_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC c_ec_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ETC d_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ETC s_etc_i2 = {3}; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ETC c_etc_i2 = {3}; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UC d_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UC c_uc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UD d_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UD s_ud; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UD c_ud; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ ECI d_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ ECI s_eci; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ ECI c_eci; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NEC d_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NEC s_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NEC c_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NED d_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NED c_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCV d_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCV s_ncv; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCV c_ncv; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ VD d_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ VD s_vd; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ VD c_vd; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NCF d_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCF s_ncf; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NCF c_ncf; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __device__ UTC d_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ UTC d_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ UTC s_utc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ UTC c_utc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ NETC d_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ NETC s_netc_i(3); // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ NETC c_netc_i(3); // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ EC_I_EC1 d_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ EC_I_EC1 s_ec_i_ec1; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ EC_I_EC1 c_ec_i_ec1; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_V_T d_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_V_T s_t_v_t; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_V_T c_t_v_t; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NEC d_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NEC s_t_b_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NEC c_t_b_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NEC d_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NEC s_t_f_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NEC c_t_f_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NEC d_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NEC s_t_fa_nec; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NEC c_t_fa_nec; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_B_NED d_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_B_NED s_t_b_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_B_NED c_t_b_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_F_NED d_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_F_NED s_t_f_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_F_NED c_t_f_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __device__ T_FA_NED d_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} __shared__ T_FA_NED s_t_fa_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} __constant__ T_FA_NED c_t_fa_ned; // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}} // Verify that only __shared__ local variables may be static on device // side and that they are not allowed to be initialized. __device__ void df_sema() { static __shared__ NCFS s_ncfs; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ UC s_uc; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __shared__ NED s_ned; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} static __device__ int ds; // expected-error@-1 {{within a __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} static __constant__ int dc; // expected-error@-1 {{within a __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} static int v; // expected-error@-1 {{within a __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} static const int cv = 1; static const __device__ int cds = 1; // expected-error@-1 {{within a __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} static const __constant__ int cdc = 1; // expected-error@-1 {{within a __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} } __host__ __device__ void hd_sema() { static int x = 42; #ifdef __CUDA_ARCH__ // expected-error@-2 {{within a __host__ __device__ function, only __shared__ variables or const variables without device memory qualifier may be marked 'static'}} #endif } inline __host__ __device__ void hd_emitted_host_only() { static int x = 42; // no error on device because this is never codegen'ed there. } void call_hd_emitted_host_only() { hd_emitted_host_only(); } // Verify that we also check field initializers in instantiated structs. struct NontrivialInitializer { __host__ __device__ NontrivialInitializer() : x(43) {} int x; }; template <typename T> __global__ void bar() { __shared__ T bad; // expected-error@-1 {{initialization is not supported for __shared__ variables.}} } void instantiate() { bar<NontrivialInitializer><<<1, 1>>>(); // expected-note@-1 {{in instantiation of function template specialization 'bar<NontrivialInitializer>' requested here}} }
the_stack
#include "../kernels/transformerKernels.h" #include "../kernels/embKernels.h" /** @file Transformer encoder, composed by gemm lib and custom cuda kernel function */ namespace lightseq { namespace cuda { template <OperationType OpType_> Encoder<OpType_>::Encoder(int max_batch_size, int *p_d_token_id, int *p_d_padding_mask, _DataType *p_d_output, const TransformerWeight<OpType_> &tw, cudaStream_t stream, cublasHandle_t hd, const int *p_d_lang_id) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_padding_mask(p_d_padding_mask), _p_d_output(p_d_output), _p_d_lang_id(p_d_lang_id), _tw(tw), _stream(stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024) {} /** Compute GPU memory size needed by transformer encoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> long Encoder<OpType_>::compute_buffer_bytesize() { long sz1 = _max_batch_dim * 6 + _max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step; long sz2 = _max_batch_dim + _max_batch_size * _tw._max_step * _tw._inner_size; return max(sz1, sz2) * sizeof(_DataType); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void Encoder<OpType_>::init_buffer(void *pbuf) { _DataType *p_d_buf = reinterpret_cast<_DataType *>(pbuf); _p_d_qkv_projected = p_d_buf; _p_d_q = _p_d_qkv_projected + _max_batch_dim * 3; _p_d_k = _p_d_q + _max_batch_dim; _p_d_v = _p_d_k + _max_batch_dim; _p_d_c = _p_d_v + _max_batch_dim; _p_d_ffn_buf1 = p_d_buf; _p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim; // encoder and decoder use the same buffer to save gpu memory useage return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string Encoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_tw._multilg_type == 0 && _p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_tw._multilg_type != 0 && _p_d_src_emb_wei.size() != 5) { return "violate p_d_src_emb_wei.size() = 5"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } if (_tw._multilg_type != 0 && _p_d_lang_id == nullptr) { return "lang id should not be null when multilg"; } return ""; } /** Encoder inference */ template <OperationType OpType_> void Encoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { if (batch_size > _max_batch_size) { throw std::runtime_error("batch size of input greater than max_batch_size"); } if (batch_seq_len > _tw._max_step) { throw std::runtime_error("seq len of input greater than max_step"); } /* ---step1. init--- */ _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif /* ---step2. encoder feedforward--- */ launch_enc_emb<_DataType>(_p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask, _tw._padding_id, batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[4], _p_d_lang_id, _tw._multilg_type); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "emb out: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "emb out", 10); } } // not normal print_vec(_p_d_src_emb_wei[0], "token embedding weight", 10); print_vec(_p_d_src_emb_wei[1], "position embedding weight", 10); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); #ifdef DEBUG_RESULT for (int i = 0; i < _batch_size; i++) { // batch_id for (int j = 0; j < _batch_seq_len; j++) { // token_id std::cout << "encoder output: token-" << j << std::endl; print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size + j * _tw._hidden_size, "encoder_output", _tw._dim_per_head); } } // not normal #endif return; } /** Encoder self attention */ template <OperationType OpType_> void Encoder<OpType_>::self_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_encself_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_padding_mask); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void Encoder<OpType_>::ffn_add_norm() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block, _tw._is_post_ln); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (_tw._use_gelu) { ker_bias_gelu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } else { ker_bias_relu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); } /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_output, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template class Encoder<OperationType::FP16>; template class Encoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
the_stack
* An implementation of COO SpMV using prefix scan to implement a * reduce-value-by-row strategy ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iterator> #include <vector> #include <string> #include <algorithm> #include <stdio.h> #include <cub/cub.cuh> #include "coo_graph.cuh" #include "../test/test_util.h" using namespace cub; using namespace std; /****************************************************************************** * Globals, constants, and typedefs ******************************************************************************/ typedef int VertexId; // uint32s as vertex ids typedef double Value; // double-precision floating point values bool g_verbose = false; int g_timing_iterations = 1; CachingDeviceAllocator g_allocator; /****************************************************************************** * Texture referencing ******************************************************************************/ /** * Templated texture reference type for multiplicand vector */ template <typename Value> struct TexVector { // Texture type to actually use (e.g., because CUDA doesn't load doubles as texture items) typedef typename If<(Equals<Value, double>::VALUE), uint2, Value>::Type CastType; // Texture reference type typedef texture<CastType, cudaTextureType1D, cudaReadModeElementType> TexRef; static TexRef ref; /** * Bind textures */ static void BindTexture(void *d_in, int elements) { cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc<CastType>(); if (d_in) { size_t offset; size_t bytes = sizeof(CastType) * elements; CubDebugExit(cudaBindTexture(&offset, ref, d_in, tex_desc, bytes)); } } /** * Unbind textures */ static void UnbindTexture() { CubDebugExit(cudaUnbindTexture(ref)); } /** * Load */ static __device__ __forceinline__ Value Load(int offset) { Value output; reinterpret_cast<typename TexVector<Value>::CastType &>(output) = tex1Dfetch(TexVector<Value>::ref, offset); return output; } }; // Texture reference definitions template <typename Value> typename TexVector<Value>::TexRef TexVector<Value>::ref = 0; /****************************************************************************** * Utility types ******************************************************************************/ /** * A partial dot-product sum paired with a corresponding row-id */ template <typename VertexId, typename Value> struct PartialProduct { VertexId row; /// Row-id Value partial; /// PartialProduct sum }; /** * A partial dot-product sum paired with a corresponding row-id (specialized for double-int pairings) */ template <> struct PartialProduct<int, double> { long long row; /// Row-id double partial; /// PartialProduct sum }; /** * Reduce-value-by-row scan operator */ struct ReduceByKeyOp { template <typename PartialProduct> __device__ __forceinline__ PartialProduct operator()( const PartialProduct &first, const PartialProduct &second) { PartialProduct retval; retval.partial = (second.row != first.row) ? second.partial : first.partial + second.partial; retval.row = second.row; return retval; } }; /** * Stateful block-wide prefix operator for BlockScan */ template <typename PartialProduct> struct BlockPrefixCallbackOp { // Running block-wide prefix PartialProduct running_prefix; /** * Returns the block-wide running_prefix in thread-0 */ __device__ __forceinline__ PartialProduct operator()( const PartialProduct &block_aggregate) ///< The aggregate sum of the BlockScan inputs { ReduceByKeyOp scan_op; PartialProduct retval = running_prefix; running_prefix = scan_op(running_prefix, block_aggregate); return retval; } }; /** * Operator for detecting discontinuities in a list of row identifiers. */ struct NewRowOp { /// Returns true if row_b is the start of a new row template <typename VertexId> __device__ __forceinline__ bool operator()( const VertexId& row_a, const VertexId& row_b) { return (row_a != row_b); } }; /****************************************************************************** * Persistent thread block types ******************************************************************************/ /** * SpMV thread block abstraction for processing a contiguous segment of * sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct PersistentBlockSpmv { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockExchange type for exchanging rows between warp-striped -> blocked arrangements typedef BlockExchange<VertexId, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeRows; // Parameterized BlockExchange type for exchanging values between warp-striped -> blocked arrangements typedef BlockExchange<Value, BLOCK_THREADS, ITEMS_PER_THREAD, true> BlockExchangeValues; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this thread block struct TempStorage { union { typename BlockExchangeRows::TempStorage exchange_rows; // Smem needed for BlockExchangeRows typename BlockExchangeValues::TempStorage exchange_values; // Smem needed for BlockExchangeValues struct { typename BlockScan::TempStorage scan; // Smem needed for BlockScan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for BlockDiscontinuity }; }; VertexId first_block_row; ///< The first row-ID seen by this thread block VertexId last_block_row; ///< The last row-ID seen by this thread block Value first_product; ///< The first dot-product written by this thread block }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; VertexId *d_rows; VertexId *d_columns; Value *d_values; Value *d_vector; Value *d_result; PartialProduct *d_block_partials; int block_offset; int block_end; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ PersistentBlockSpmv( TempStorage &temp_storage, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result, PartialProduct *d_block_partials, int block_offset, int block_end) : temp_storage(temp_storage), d_rows(d_rows), d_columns(d_columns), d_values(d_values), d_vector(d_vector), d_result(d_result), d_block_partials(d_block_partials), block_offset(block_offset), block_end(block_end) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_rows[block_offset]; VertexId last_block_row = d_rows[block_end - 1]; temp_storage.first_block_row = first_block_row; temp_storage.last_block_row = last_block_row; temp_storage.first_product = Value(0); // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId columns[ITEMS_PER_THREAD]; VertexId rows[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a thread block-striped tile of A (sparse row-ids, column-ids, and values) if (FULL_TILE) { // Unguarded loads LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows); } else { // This is a partial-tile (e.g., the last tile of input). Extend the coordinates of the last // vertex for out-of-bound items, but zero-valued LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_columns + block_offset, columns, guarded_items, VertexId(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_values + block_offset, values, guarded_items, Value(0)); LoadDirectWarpStriped<LOAD_DEFAULT>(threadIdx.x, d_rows + block_offset, rows, guarded_items, temp_storage.last_block_row); } // Load the referenced values from x and compute the dot product partials sums #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { #if CUB_PTX_ARCH >= 350 values[ITEM] *= ThreadLoad<LOAD_LDG>(d_vector + columns[ITEM]); #else values[ITEM] *= TexVector<Value>::Load(columns[ITEM]); #endif } // Transpose from warp-striped to blocked arrangement BlockExchangeValues(temp_storage.exchange_values).WarpStripedToBlocked(values); __syncthreads(); // Transpose from warp-striped to blocked arrangement BlockExchangeRows(temp_storage.exchange_rows).WarpStripedToBlocked(rows); // Barrier for smem reuse and coherence __syncthreads(); // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( head_flags, // (Out) Head flags rows, // Original row ids NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Assemble partial product structures #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { partial_sums[ITEM].partial = values[ITEM]; partial_sums[ITEM].row = rows[ITEM]; } // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Barrier for smem reuse and coherence __syncthreads(); // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; // Save off the first partial product that this thread block will scatter if (partial_sums[ITEM].row == temp_storage.first_block_row) { temp_storage.first_product = partial_sums[ITEM].partial; } } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles while (block_offset <= block_end - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process the last, partially-full tile (if present) int guarded_items = block_end - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } if (threadIdx.x == 0) { if (gridDim.x == 1) { // Scatter the final aggregate (this kernel contains only 1 thread block) d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } else { // Write the first and last partial products from this thread block so // that they can be subsequently "fixed up" in the next kernel. PartialProduct first_product; first_product.row = temp_storage.first_block_row; first_product.partial = temp_storage.first_product; d_block_partials[blockIdx.x * 2] = first_product; d_block_partials[(blockIdx.x * 2) + 1] = prefix_op.running_prefix; } } } }; /** * Threadblock abstraction for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> struct FinalizeSpmvBlock { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Constants enum { TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Head flag type typedef int HeadFlag; // Partial dot product type typedef PartialProduct<VertexId, Value> PartialProduct; // Parameterized BlockScan type for reduce-value-by-row scan typedef BlockScan<PartialProduct, BLOCK_THREADS, BLOCK_SCAN_RAKING_MEMOIZE> BlockScan; // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment typedef BlockDiscontinuity<HeadFlag, BLOCK_THREADS> BlockDiscontinuity; // Shared memory type for this thread block struct TempStorage { typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-row scan typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging VertexId last_block_row; }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- TempStorage &temp_storage; BlockPrefixCallbackOp<PartialProduct> prefix_op; Value *d_result; PartialProduct *d_block_partials; int num_partials; //--------------------------------------------------------------------- // Operations //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ FinalizeSpmvBlock( TempStorage &temp_storage, Value *d_result, PartialProduct *d_block_partials, int num_partials) : temp_storage(temp_storage), d_result(d_result), d_block_partials(d_block_partials), num_partials(num_partials) { // Initialize scalar shared memory values if (threadIdx.x == 0) { VertexId first_block_row = d_block_partials[0].row; VertexId last_block_row = d_block_partials[num_partials - 1].row; temp_storage.last_block_row = last_block_row; // Initialize prefix_op to identity prefix_op.running_prefix.row = first_block_row; prefix_op.running_prefix.partial = Value(0); } __syncthreads(); } /** * Processes a COO input tile of edges, outputting dot products for each row */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( int block_offset, int guarded_items = 0) { VertexId rows[ITEMS_PER_THREAD]; PartialProduct partial_sums[ITEMS_PER_THREAD]; HeadFlag head_flags[ITEMS_PER_THREAD]; // Load a tile of block partials from previous kernel if (FULL_TILE) { // Full tile #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums); #endif } else { // Partial tile (extend zero-valued coordinates of the last partial-product for out-of-bounds items) PartialProduct default_sum; default_sum.row = temp_storage.last_block_row; default_sum.partial = Value(0); #if CUB_PTX_ARCH >= 350 LoadDirectBlocked<LOAD_LDG>(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #else LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); #endif } // Copy out row IDs for row-head flagging #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { rows[ITEM] = partial_sums[ITEM].row; } // FlagT row heads by looking for discontinuities BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( rows, // Original row ids head_flags, // (Out) Head flags NewRowOp(), // Functor for detecting start of new rows prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan PartialProduct block_aggregate; BlockScan(temp_storage.scan).ExclusiveScan( partial_sums, // Scan input partial_sums, // Scan output ReduceByKeyOp(), // Scan operator block_aggregate, // Block-wide total (unused) prefix_op); // Prefix operator for seeding the block-wide scan with the running total // Scatter an accumulated dot product if it is the head of a valid row #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (head_flags[ITEM]) { d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; } } } /** * Iterate over input tiles belonging to this thread block */ __device__ __forceinline__ void ProcessTiles() { // Process full tiles int block_offset = 0; while (block_offset <= num_partials - TILE_ITEMS) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; } // Process final partial tile (if present) int guarded_items = num_partials - block_offset; if (guarded_items) { ProcessTile<false>(block_offset, guarded_items); } // Scatter the final aggregate (this kernel contains only 1 thread block) if (threadIdx.x == 0) { d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; } } }; /****************************************************************************** * Kernel entrypoints ******************************************************************************/ /** * SpMV kernel whose thread blocks each process a contiguous segment of sparse COO tiles. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS) __global__ void CooKernel( GridEvenShare<int> even_share, PartialProduct<VertexId, Value> *d_block_partials, VertexId *d_rows, VertexId *d_columns, Value *d_values, Value *d_vector, Value *d_result) { // Specialize SpMV thread block abstraction type typedef PersistentBlockSpmv<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> PersistentBlockSpmv; // Shared memory allocation __shared__ typename PersistentBlockSpmv::TempStorage temp_storage; // Initialize thread block even-share to tell us where to start and stop our tile-processing even_share.BlockInit(); // Construct persistent thread block PersistentBlockSpmv persistent_block( temp_storage, d_rows, d_columns, d_values, d_vector, d_result, d_block_partials, even_share.block_offset, even_share.block_end); // Process input tiles persistent_block.ProcessTiles(); } /** * Kernel for "fixing up" an array of interblock SpMV partial products. */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, typename VertexId, typename Value> __launch_bounds__ (BLOCK_THREADS, 1) __global__ void CooFinalizeKernel( PartialProduct<VertexId, Value> *d_block_partials, int num_partials, Value *d_result) { // Specialize "fix-up" thread block abstraction type typedef FinalizeSpmvBlock<BLOCK_THREADS, ITEMS_PER_THREAD, VertexId, Value> FinalizeSpmvBlock; // Shared memory allocation __shared__ typename FinalizeSpmvBlock::TempStorage temp_storage; // Construct persistent thread block FinalizeSpmvBlock persistent_block(temp_storage, d_result, d_block_partials, num_partials); // Process input tiles persistent_block.ProcessTiles(); } //--------------------------------------------------------------------- // Host subroutines //--------------------------------------------------------------------- /** * Simple test of device */ template < int COO_BLOCK_THREADS, int COO_ITEMS_PER_THREAD, int COO_SUBSCRIPTION_FACTOR, int FINALIZE_BLOCK_THREADS, int FINALIZE_ITEMS_PER_THREAD, typename VertexId, typename Value> void TestDevice( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { typedef PartialProduct<VertexId, Value> PartialProduct; const int COO_TILE_SIZE = COO_BLOCK_THREADS * COO_ITEMS_PER_THREAD; // SOA device storage VertexId *d_rows; // SOA graph row coordinates VertexId *d_columns; // SOA graph col coordinates Value *d_values; // SOA graph values Value *d_vector; // Vector multiplicand Value *d_result; // Output row PartialProduct *d_block_partials; // Temporary storage for communicating dot product partials between thread blocks // Create SOA version of coo_graph on host int num_edges = coo_graph.coo_tuples.size(); VertexId *h_rows = new VertexId[num_edges]; VertexId *h_columns = new VertexId[num_edges]; Value *h_values = new Value[num_edges]; for (int i = 0; i < num_edges; i++) { h_rows[i] = coo_graph.coo_tuples[i].row; h_columns[i] = coo_graph.coo_tuples[i].col; h_values[i] = coo_graph.coo_tuples[i].val; } // Get CUDA properties Device device_props; CubDebugExit(device_props.Init()); // Determine launch configuration from kernel properties int coo_sm_occupancy; CubDebugExit(device_props.MaxSmOccupancy( coo_sm_occupancy, CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, VertexId, Value>, COO_BLOCK_THREADS)); int max_coo_grid_size = device_props.sm_count * coo_sm_occupancy * COO_SUBSCRIPTION_FACTOR; // Construct an even-share work distribution GridEvenShare<int> even_share(num_edges, max_coo_grid_size, COO_TILE_SIZE); int coo_grid_size = even_share.grid_size; int num_partials = coo_grid_size * 2; // Allocate COO device arrays CubDebugExit(g_allocator.DeviceAllocate((void**)&d_rows, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_columns, sizeof(VertexId) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_edges)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_vector, sizeof(Value) * coo_graph.col_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_result, sizeof(Value) * coo_graph.row_dim)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_block_partials, sizeof(PartialProduct) * num_partials)); // Copy host arrays to device CubDebugExit(cudaMemcpy(d_rows, h_rows, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_columns, h_columns, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * num_edges, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_vector, h_vector, sizeof(Value) * coo_graph.col_dim, cudaMemcpyHostToDevice)); // Bind textures TexVector<Value>::BindTexture(d_vector, coo_graph.col_dim); // Print debug info printf("CooKernel<%d, %d><<<%d, %d>>>(...), Max SM occupancy: %d\n", COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, coo_grid_size, COO_BLOCK_THREADS, coo_sm_occupancy); if (coo_grid_size > 1) { printf("CooFinalizeKernel<<<1, %d>>>(...)\n", FINALIZE_BLOCK_THREADS); } fflush(stdout); CubDebugExit(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte)); // Run kernel (always run one iteration without timing) GpuTimer gpu_timer; float elapsed_millis = 0.0; for (int i = 0; i <= g_timing_iterations; i++) { gpu_timer.Start(); // Initialize output CubDebugExit(cudaMemset(d_result, 0, coo_graph.row_dim * sizeof(Value))); // Run the COO kernel CooKernel<COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD><<<coo_grid_size, COO_BLOCK_THREADS>>>( even_share, d_block_partials, d_rows, d_columns, d_values, d_vector, d_result); if (coo_grid_size > 1) { // Run the COO finalize kernel CooFinalizeKernel<FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD><<<1, FINALIZE_BLOCK_THREADS>>>( d_block_partials, num_partials, d_result); } gpu_timer.Stop(); if (i > 0) elapsed_millis += gpu_timer.ElapsedMillis(); } // Force any kernel stdio to screen CubDebugExit(cudaCudaSynchronize()); fflush(stdout); // Display timing if (g_timing_iterations > 0) { float avg_elapsed = elapsed_millis / g_timing_iterations; int total_bytes = ((sizeof(VertexId) + sizeof(VertexId)) * 2 * num_edges) + (sizeof(Value) * coo_graph.row_dim); printf("%d iterations, average elapsed (%.3f ms), utilized bandwidth (%.3f GB/s), GFLOPS(%.3f)\n", g_timing_iterations, avg_elapsed, total_bytes / avg_elapsed / 1000.0 / 1000.0, num_edges * 2 / avg_elapsed / 1000.0 / 1000.0); } // Check results int compare = CompareDeviceResults(h_reference, d_result, coo_graph.row_dim, true, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Cleanup TexVector<Value>::UnbindTexture(); CubDebugExit(g_allocator.DeviceFree(d_block_partials)); CubDebugExit(g_allocator.DeviceFree(d_rows)); CubDebugExit(g_allocator.DeviceFree(d_columns)); CubDebugExit(g_allocator.DeviceFree(d_values)); CubDebugExit(g_allocator.DeviceFree(d_vector)); CubDebugExit(g_allocator.DeviceFree(d_result)); delete[] h_rows; delete[] h_columns; delete[] h_values; } /** * Compute reference answer on CPU */ template <typename VertexId, typename Value> void ComputeReference( CooGraph<VertexId, Value>& coo_graph, Value* h_vector, Value* h_reference) { for (VertexId i = 0; i < coo_graph.row_dim; i++) { h_reference[i] = 0.0; } for (VertexId i = 0; i < coo_graph.coo_tuples.size(); i++) { h_reference[coo_graph.coo_tuples[i].row] += coo_graph.coo_tuples[i].val * h_vector[coo_graph.coo_tuples[i].col]; } } /** * Assign arbitrary values to vector items */ template <typename Value> void AssignVectorValues(Value *vector, int col_dim) { for (int i = 0; i < col_dim; i++) { vector[i] = 1.0; } } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("i", g_timing_iterations); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s\n [--device=<device-id>] [--v] [--iterations=<test iterations>] [--grid-size=<grid-size>]\n" "\t--type=wheel --spokes=<spokes>\n" "\t--type=grid2d --width=<width> [--no-self-loops]\n" "\t--type=grid3d --width=<width> [--no-self-loops]\n" "\t--type=market --file=<file>\n" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Get graph type string type; args.GetCmdLineArgument("type", type); // Generate graph structure CpuTimer timer; timer.Start(); CooGraph<VertexId, Value> coo_graph; if (type == string("grid2d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid2d width(%d)... ", (self_loops) ? "5-pt" : "4-pt", width); fflush(stdout); if (coo_graph.InitGrid2d(width, self_loops)) exit(1); } else if (type == string("grid3d")) { VertexId width; args.GetCmdLineArgument("width", width); bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); printf("Generating %s grid3d width(%d)... ", (self_loops) ? "7-pt" : "6-pt", width); fflush(stdout); if (coo_graph.InitGrid3d(width, self_loops)) exit(1); } else if (type == string("wheel")) { VertexId spokes; args.GetCmdLineArgument("spokes", spokes); printf("Generating wheel spokes(%d)... ", spokes); fflush(stdout); if (coo_graph.InitWheel(spokes)) exit(1); } else if (type == string("market")) { string filename; args.GetCmdLineArgument("file", filename); printf("Generating MARKET for %s... ", filename.c_str()); fflush(stdout); if (coo_graph.InitMarket(filename)) exit(1); } else { printf("Unsupported graph type\n"); exit(1); } timer.Stop(); printf("Done (%.3fs). %d non-zeros, %d rows, %d columns\n", timer.ElapsedMillis() / 1000.0, coo_graph.coo_tuples.size(), coo_graph.row_dim, coo_graph.col_dim); fflush(stdout); if (g_verbose) { cout << coo_graph << "\n"; } // Create vector Value *h_vector = new Value[coo_graph.col_dim]; AssignVectorValues(h_vector, coo_graph.col_dim); if (g_verbose) { printf("Vector[%d]: ", coo_graph.col_dim); DisplayResults(h_vector, coo_graph.col_dim); printf("\n\n"); } // Compute reference answer Value *h_reference = new Value[coo_graph.row_dim]; ComputeReference(coo_graph, h_vector, h_reference); if (g_verbose) { printf("Results[%d]: ", coo_graph.row_dim); DisplayResults(h_reference, coo_graph.row_dim); printf("\n\n"); } // Parameterization for SM35 enum { COO_BLOCK_THREADS = 64, COO_ITEMS_PER_THREAD = 10, COO_SUBSCRIPTION_FACTOR = 4, FINALIZE_BLOCK_THREADS = 256, FINALIZE_ITEMS_PER_THREAD = 4, }; // Run GPU version TestDevice< COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, COO_SUBSCRIPTION_FACTOR, FINALIZE_BLOCK_THREADS, FINALIZE_ITEMS_PER_THREAD>(coo_graph, h_vector, h_reference); // Cleanup delete[] h_vector; delete[] h_reference; return 0; }
the_stack
#include <ops/declarable/helpers/convolutions.h> #include "cudnnUtils.h" namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// std::tuple<std::unique_ptr<NDArray>, std::unique_ptr<NDArray>> checkConv2dCUDNNPadAsymmetric( const NDArray* input, const NDArray* gradI, const int iH, const int iW, const int oH, const int oW, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const bool isNCHW) { const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH); const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW); const bool isPHasymm = pH != (pHsum - pH); const bool isPWasymm = pW != (pWsum - pW); std::unique_ptr<NDArray> uNewInput = {}, uNewGradI = {}; if (!isPHasymm && !isPWasymm) return std::make_tuple(std::move(uNewInput), std::move(uNewGradI)); std::vector<sd::LongType> newShape = input->getShapeAsVector(); const int iHposition = isNCHW ? 2 : 1; if (isPHasymm) newShape[iHposition] += 1; if (isPWasymm) newShape[iHposition + 1] += 1; uNewInput.reset(new NDArray(input->ordering(), newShape, input->dataType(), input->getContext())); if (isNCHW) (*uNewInput)({0, 0, 0, 0, 0, input->sizeAt(2), 0, input->sizeAt(3)}).assign(input); else (*uNewInput)({0, 0, 0, input->sizeAt(1), 0, input->sizeAt(2), 0, 0}).assign(input); if (gradI != nullptr) uNewGradI.reset(new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext())); return std::make_tuple(std::move(uNewInput), std::move(uNewGradI)); } ////////////////////////////////////////////////////////////////////////// std::tuple<std::unique_ptr<NDArray>, std::unique_ptr<NDArray>> checkConv3dCUDNNPadAsymmetric( const NDArray* input, const NDArray* gradI, const int iD, const int iH, const int iW, const int oD, const int oH, const int oW, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const bool isNCDHW) { const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD); const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH); const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW); const bool isPDasymm = pD != (pDsum - pD); const bool isPHasymm = pH != (pHsum - pH); const bool isPWasymm = pW != (pWsum - pW); std::unique_ptr<NDArray> uNewInput = {}, uNewGradI = {}; if (!isPDasymm && !isPHasymm && !isPWasymm) return std::make_tuple(std::move(uNewInput), std::move(uNewGradI)); std::vector<sd::LongType> newShape = input->getShapeAsVector(); const int iDposition = isNCDHW ? 2 : 1; if (isPDasymm) newShape[iDposition] += 1; if (isPHasymm) newShape[iDposition + 1] += 1; if (isPWasymm) newShape[iDposition + 2] += 1; uNewInput.reset(new NDArray(input->ordering(), newShape, input->dataType(), input->getContext())); if (isNCDHW) (*uNewInput)({0, 0, 0, 0, 0, input->sizeAt(2), 0, input->sizeAt(3), 0, input->sizeAt(4)}).assign(input); else (*uNewInput)({0, 0, 0, input->sizeAt(1), 0, input->sizeAt(2), 0, input->sizeAt(3), 0, 0}).assign(input); if (gradI != nullptr) uNewGradI.reset(new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext())); return std::make_tuple(std::move(uNewInput), std::move(uNewGradI)); } ////////////////////////////////////////////////////////////////////////// void pooling2dCUDNN(const LaunchContext* context, const NDArray* input, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const bool isNCHW, const cudnnPoolingMode_t mode) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; // input descriptor, output descriptor CudnnTensor x, z; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of pooling PoolingDesc pooling; pooling.set2D(mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input}); // run calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingForward), cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer())); auto cudaErr = cudaStreamSynchronize(*context->getCudaStream()); if (cudaErr != 0) throw cuda_exception::build("pooling2dCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({output}, {input}); } ////////////////////////////////////////////////////////////////////////// void pooling2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* gradO, NDArray* gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const bool isNCHW, const cudnnPoolingMode_t mode) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; // input and gradI descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // gradO descriptor CudnnTensor dz; if (gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); // description of pooling PoolingDesc pooling; pooling.set2D(mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI}, {input, gradO}); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingBackward), cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer())); auto cudaErr = cudaStreamSynchronize(*context->getCudaStream()); if (cudaErr != 0) throw cuda_exception::build("pooling2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI}, {input, gradO}); } ////////////////////////////////////////////////////////////////////////// void pooling3dCUDNN(const LaunchContext* context, const NDArray* input, NDArray* output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const bool isNCDHW, const cudnnPoolingMode_t mode) { auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); const int numDims = 5; int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD); const int pSizes[] = {pD, pH, pW}; const int sSizes[] = {sD, sH, sW}; const int kSizes[] = {kD, kH, kW}; const int xShape[] = {bS, iC, iD, iH, iW}; const int zShape[] = {bS, oC, oD, oH, oW}; const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)}; const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)}; cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; // input descriptor, output descriptor CudnnTensor x, z; if (input->ews() == 1 && input->ordering() == 'c') x.setEx(format, cudnnDataType(input->dataType()), numDims, xShape); else x.set(cudnnDataType(input->dataType()), numDims, xShape, xStrides); if (output->ews() == 1 && output->ordering() == 'c') z.setEx(format, cudnnDataType(output->dataType()), numDims, zShape); else z.set(cudnnDataType(output->dataType()), numDims, zShape, zStrides); // description of pooling PoolingDesc pooling; pooling.set(mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input}); // run calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingForward), cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer())); auto cudaErr = cudaStreamSynchronize(*context->getCudaStream()); if (cudaErr != 0) throw cuda_exception::build("pooling3dCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({output}, {input}); } ////////////////////////////////////////////////////////////////////////// void pooling3dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* gradO, NDArray* gradI, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const bool isNCDHW, const cudnnPoolingMode_t mode) { auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); const int numDims = 5; int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD); const int pSizes[] = {pD, pH, pW}; const int sSizes[] = {sD, sH, sW}; const int kSizes[] = {kD, kH, kW}; const int xShape[] = {bS, iC, iD, iH, iW}; const int dzShape[] = {bS, oC, oD, oH, oW}; const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)}; const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)}; cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; // input and gradI descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.setEx(format, cudnnDataType(input->dataType()), numDims, xShape); else x.set(cudnnDataType(input->dataType()), numDims, xShape, xStrides); // gradO descriptor CudnnTensor dz; if (gradO->ews() == 1 && gradO->ordering() == 'c') dz.setEx(format, cudnnDataType(gradO->dataType()), numDims, dzShape); else dz.set(cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides); // description of pooling PoolingDesc pooling; pooling.set(mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); // cudnn maxpool2d_bp api requires ff output as one of input arguments if (mode == CUDNN_POOLING_MAX) { NDArray temp(gradO); NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp}); // run ff calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingForward), cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer())); // run bp calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingBackward), cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer())); NDArray::registerSpecialUse({gradI}, {input, gradO, &temp}); } else { NDArray::prepareSpecialUse({gradI}, {input, gradO}); // run bp calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnPoolingBackward), cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer())); NDArray::registerSpecialUse({gradI}, {input, gradO}); } auto cudaErr = cudaStreamSynchronize(*context->getCudaStream()); if (cudaErr != 0) throw cuda_exception::build("pooling3dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); } } // namespace platforms } // namespace ops } // namespace sd
the_stack
#if !defined(CUDA_VERSION) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) typedef unsigned long long uint64_t; #endif // We have to keep all builtins that depend on particular target feature in the // same function, because the codegen will stop after the very first function // that encounters an error, so -verify will not be able to find errors in // subsequent functions. // CHECK-LABEL: nvvm_wmma_m16n16k16 __device__ void nvvm_wmma_m16n16k16(int *src, int *dst, float *fsrc, float *fdst, int ldm) { // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_a' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_a(dst, src+1, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_b' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_b(dst, src+2, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32 // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_M16: call {{.*}} @llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite // pre-sm_70-error-re@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature sm_70{{.*}},ptx60{{.*}}}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); } #ifdef PTX61 // CHECK-LABEL: nvvm_wmma_m32n8k16 __device__ void nvvm_wmma_m32n8k16(int *src, int *dst, float *fsrc, float *fdst, int ldm) { // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_a(dst, src+1, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_b(dst, src+2, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f16(dst, src, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_st_c_f16(dst, src, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m32n8k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m32n8k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); // m8n32k16 variants. // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_a' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_a(dst, src+1, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_b' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_b(dst, src+2, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f16(dst, src, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_ld_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_st_c_f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_st_c_f16(dst, src, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_st_c_f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f16f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f16' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32 // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK_M32_M8: call {{.*}} @llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite // pre-ptx61-error-re@+1 {{'__hmma_m8n32k16_mma_f32f32' needs target feature sm_70{{.*}},ptx61{{.*}}}} __hmma_m8n32k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); } #endif
the_stack
#include <utilities/graph_utils.cuh> namespace cugraph { namespace detail { template <typename vertex_t, typename edge_t, typename weight_t> __global__ void attraction_kernel(const vertex_t* restrict row, const vertex_t* restrict col, const weight_t* restrict v, const edge_t e, const float* restrict x_pos, const float* restrict y_pos, float* restrict attract_x, float* restrict attract_y, const int* restrict mass, bool outbound_attraction_distribution, bool lin_log_mode, const float edge_weight_influence, const float coef) { vertex_t i, src, dst; weight_t weight = 1; // For every edge for (i = threadIdx.x + blockIdx.x * blockDim.x; i < e; i += gridDim.x * blockDim.x) { src = row[i]; dst = col[i]; // We only need the lower triangular part if (dst <= src) return; if (v) { weight = v[i]; } weight = pow(weight, edge_weight_influence); float x_dist = x_pos[src] - x_pos[dst]; float y_dist = y_pos[src] - y_pos[dst]; float factor = -coef * weight; if (lin_log_mode) { float distance = pow(x_dist, 2) + pow(y_dist, 2); distance += FLT_EPSILON; distance = sqrt(distance); factor *= log(1 + distance) / distance; } if (outbound_attraction_distribution) factor /= mass[src]; // Force computation atomicAdd(&attract_x[src], x_dist * factor); atomicAdd(&attract_y[src], y_dist * factor); atomicAdd(&attract_x[dst], -x_dist * factor); atomicAdd(&attract_y[dst], -y_dist * factor); } } template <typename vertex_t, typename edge_t, typename weight_t> void apply_attraction(const vertex_t* restrict row, const vertex_t* restrict col, const weight_t* restrict v, const edge_t e, const float* restrict x_pos, const float* restrict y_pos, float* restrict attract_x, float* restrict attract_y, const int* restrict mass, bool outbound_attraction_distribution, bool lin_log_mode, const float edge_weight_influence, const float coef, cudaStream_t stream) { // 0 edge graph. if (!e) return; dim3 nthreads, nblocks; nthreads.x = min(e, CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((e + nthreads.x - 1) / nthreads.x, CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; attraction_kernel<vertex_t, edge_t, weight_t> <<<nblocks, nthreads, 0, stream>>>(row, col, v, e, x_pos, y_pos, attract_x, attract_y, mass, outbound_attraction_distribution, lin_log_mode, edge_weight_influence, coef); CHECK_CUDA(stream); } template <typename vertex_t> __global__ void linear_gravity_kernel(const float* restrict x_pos, const float* restrict y_pos, float* restrict attract_x, float* restrict attract_y, const int* restrict mass, const float gravity, const vertex_t n) { // For every node. for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += gridDim.x * blockDim.x) { float x_dist = x_pos[i]; float y_dist = y_pos[i]; float distance = sqrt(x_dist * x_dist + y_dist * y_dist + FLT_EPSILON); float factor = mass[i] * gravity / distance; attract_x[i] -= x_dist * factor; attract_y[i] -= y_dist * factor; } } template <typename vertex_t> __global__ void strong_gravity_kernel(const float* restrict x_pos, const float* restrict y_pos, float* restrict attract_x, float* restrict attract_y, const int* restrict mass, const float gravity, const float scaling_ratio, const vertex_t n) { // For every node. for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += gridDim.x * blockDim.x) { float x_dist = x_pos[i]; float y_dist = y_pos[i]; float factor = scaling_ratio * mass[i] * gravity; attract_x[i] -= x_dist * factor; attract_y[i] -= y_dist * factor; } } template <typename vertex_t> void apply_gravity(const float* restrict x_pos, const float* restrict y_pos, float* restrict attract_x, float* restrict attract_y, const int* restrict mass, const float gravity, bool strong_gravity_mode, const float scaling_ratio, const vertex_t n, cudaStream_t stream) { dim3 nthreads, nblocks; nthreads.x = min(n, CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((n + nthreads.x - 1) / nthreads.x, CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; if (strong_gravity_mode) { strong_gravity_kernel<vertex_t><<<nblocks, nthreads, 0, stream>>>( x_pos, y_pos, attract_x, attract_y, mass, gravity, scaling_ratio, n); } else { linear_gravity_kernel<vertex_t> <<<nblocks, nthreads, 0, stream>>>(x_pos, y_pos, attract_x, attract_y, mass, gravity, n); } CHECK_CUDA(stream); } template <typename vertex_t> __global__ void local_speed_kernel(const float* restrict repel_x, const float* restrict repel_y, const float* restrict attract_x, const float* restrict attract_y, const float* restrict old_dx, const float* restrict old_dy, const int* restrict mass, float* restrict swinging, float* restrict traction, const vertex_t n) { // For every node. for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += gridDim.x * blockDim.x) { const float dx = repel_x[i] + attract_x[i]; const float dy = repel_y[i] + attract_y[i]; float node_swinging = mass[i] * sqrt(pow(old_dx[i] - dx, 2) + pow(old_dy[i] - dy, 2)); float node_traction = 0.5 * mass[i] * sqrt(pow(old_dx[i] + dx, 2) + pow(old_dy[i] + dy, 2)); swinging[i] = node_swinging; traction[i] = node_traction; } } template <typename vertex_t> void compute_local_speed(const float* restrict repel_x, const float* restrict repel_y, const float* restrict attract_x, const float* restrict attract_y, float* restrict old_dx, float* restrict old_dy, const int* restrict mass, float* restrict swinging, float* restrict traction, const vertex_t n, cudaStream_t stream) { dim3 nthreads, nblocks; nthreads.x = min(n, CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((n + nthreads.x - 1) / nthreads.x, CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; local_speed_kernel<<<nblocks, nthreads, 0, stream>>>( repel_x, repel_y, attract_x, attract_y, old_dx, old_dy, mass, swinging, traction, n); CHECK_CUDA(stream); } template <typename vertex_t> void adapt_speed(const float jitter_tolerance, float* restrict jt, float* restrict speed, float* restrict speed_efficiency, const float s, const float t, const vertex_t n) { float estimated_jt = 0.05 * sqrt(n); float min_jt = sqrt(estimated_jt); float max_jt = 10; float target_speed; float min_speed_efficiency = 0.05; const float max_rise = 0.5; *jt = jitter_tolerance * max(min_jt, min(max_jt, estimated_jt * t / (n * n))); if (s / t > 2.0) { if (*speed_efficiency > min_speed_efficiency) { *speed_efficiency *= 0.5; } *jt = max(*jt, jitter_tolerance); } if (s == 0) target_speed = FLT_MAX; else target_speed = (*jt * *speed_efficiency * t) / s; if (s > *jt * t) { if (*speed_efficiency > min_speed_efficiency) *speed_efficiency *= .7; } else if (*speed < 1000) *speed_efficiency *= 1.3; *speed = *speed + min(target_speed - *speed, max_rise * *speed); } template <typename vertex_t> __global__ void update_positions_kernel(float* restrict x_pos, float* restrict y_pos, const float* restrict repel_x, const float* restrict repel_y, const float* restrict attract_x, const float* restrict attract_y, float* restrict old_dx, float* restrict old_dy, const float* restrict swinging, const float speed, const vertex_t n) { // For every node. for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += gridDim.x * blockDim.x) { const float factor = speed / (1.0 + sqrt(speed * swinging[i])); const float dx = (repel_x[i] + attract_x[i]); const float dy = (repel_y[i] + attract_y[i]); x_pos[i] += dx * factor; y_pos[i] += dy * factor; old_dx[i] = dx; old_dy[i] = dy; } } template <typename vertex_t> void apply_forces(float* restrict x_pos, float* restrict y_pos, const float* restrict repel_x, const float* restrict repel_y, const float* restrict attract_x, const float* restrict attract_y, float* restrict old_dx, float* restrict old_dy, const float* restrict swinging, const float speed, const vertex_t n, cudaStream_t stream) { dim3 nthreads, nblocks; nthreads.x = min(n, CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((n + nthreads.x - 1) / nthreads.x, CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; update_positions_kernel<vertex_t><<<nblocks, nthreads, 0, stream>>>( x_pos, y_pos, repel_x, repel_y, attract_x, attract_y, old_dx, old_dy, swinging, speed, n); CHECK_CUDA(stream); } } // namespace detail } // namespace cugraph
the_stack
template <class Space> void TestArray2dBasicConstructor(void) { cusp::array2d<float, Space> A(3, 2); ASSERT_EQUAL(A.num_rows, 3); ASSERT_EQUAL(A.num_cols, 2); ASSERT_EQUAL(A.num_entries, 6); ASSERT_EQUAL(A.values.size(), 6); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dBasicConstructor) template <class Space> void TestArray2dFillConstructor(void) { cusp::array2d<float, Space> A(3, 2, 13.0f); ASSERT_EQUAL(A.num_rows, 3); ASSERT_EQUAL(A.num_cols, 2); ASSERT_EQUAL(A.num_entries, 6); ASSERT_EQUAL(A.values.size(), 6); ASSERT_EQUAL(A.values[0], 13.0f); ASSERT_EQUAL(A.values[1], 13.0f); ASSERT_EQUAL(A.values[2], 13.0f); ASSERT_EQUAL(A.values[3], 13.0f); ASSERT_EQUAL(A.values[4], 13.0f); ASSERT_EQUAL(A.values[5], 13.0f); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dFillConstructor) template <class Space> void TestArray2dCopyConstructor(void) { cusp::array2d<float, Space> A(3, 2); A(0,0) = 1; A(0,1) = 2; A(1,0) = 3; A(1,1) = 4; A(2,0) = 5; A(2,1) = 6; cusp::array2d<float, Space> B(A); ASSERT_EQUAL(A.num_rows, B.num_rows); ASSERT_EQUAL(A.num_cols, B.num_cols); ASSERT_EQUAL(A.num_entries, B.num_entries); ASSERT_EQUAL(A.pitch, B.pitch); ASSERT_EQUAL(A.values, B.values); cusp::array2d<float, Space, cusp::column_major> C(A); ASSERT_EQUAL(A.num_rows, C.num_rows); ASSERT_EQUAL(A.num_cols, C.num_cols); ASSERT_EQUAL(A.num_entries, C.num_entries); ASSERT_EQUAL(3, C.pitch); ASSERT_EQUAL(A(0,0), C(0,0)); ASSERT_EQUAL(A(0,1), C(0,1)); ASSERT_EQUAL(A(1,0), C(1,0)); ASSERT_EQUAL(A(1,1), C(1,1)); ASSERT_EQUAL(A(2,0), C(2,0)); ASSERT_EQUAL(A(2,1), C(2,1)); // set pitch to 4 A.resize(3,2,4); thrust::fill(A.values.begin(), A.values.end(), -1); A(0,0) = 1; A(0,1) = 2; A(1,0) = 3; A(1,1) = 4; A(2,0) = 5; A(2,1) = 6; cusp::array2d<float, Space> D(A); ASSERT_EQUAL(A.num_rows, D.num_rows); ASSERT_EQUAL(A.num_cols, D.num_cols); ASSERT_EQUAL(A.num_entries, D.num_entries); ASSERT_EQUAL(A.pitch, D.pitch); ASSERT_EQUAL(A(0,0), D(0,0)); ASSERT_EQUAL(A(0,1), D(0,1)); ASSERT_EQUAL(A(1,0), D(1,0)); ASSERT_EQUAL(A(1,1), D(1,1)); ASSERT_EQUAL(A(2,0), D(2,0)); ASSERT_EQUAL(A(2,1), D(2,1)); cusp::array2d<float, Space, cusp::column_major> E(A); ASSERT_EQUAL(A.num_rows, E.num_rows); ASSERT_EQUAL(A.num_cols, E.num_cols); ASSERT_EQUAL(A.num_entries, E.num_entries); ASSERT_EQUAL(3, E.pitch); ASSERT_EQUAL(A(0,0), E(0,0)); ASSERT_EQUAL(A(0,1), E(0,1)); ASSERT_EQUAL(A(1,0), E(1,0)); ASSERT_EQUAL(A(1,1), E(1,1)); ASSERT_EQUAL(A(2,0), E(2,0)); ASSERT_EQUAL(A(2,1), E(2,1)); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dCopyConstructor) template <class Space> void TestArray2dRowMajor(void) { cusp::array2d<float, Space, cusp::row_major> A(2,3); A(0,0) = 10; A(0,1) = 20; A(0,2) = 30; A(1,0) = 40; A(1,1) = 50; A(1,2) = 60; ASSERT_EQUAL(A(0,0), 10); ASSERT_EQUAL(A(0,1), 20); ASSERT_EQUAL(A(0,2), 30); ASSERT_EQUAL(A(1,0), 40); ASSERT_EQUAL(A(1,1), 50); ASSERT_EQUAL(A(1,2), 60); ASSERT_EQUAL(A.values[0], 10); ASSERT_EQUAL(A.values[1], 20); ASSERT_EQUAL(A.values[2], 30); ASSERT_EQUAL(A.values[3], 40); ASSERT_EQUAL(A.values[4], 50); ASSERT_EQUAL(A.values[5], 60); // test non-trivial pitch A.resize(2,3,4); thrust::fill(A.values.begin(), A.values.end(), 0); A(0,0) = 10; A(0,1) = 20; A(0,2) = 30; A(1,0) = 40; A(1,1) = 50; A(1,2) = 60; ASSERT_EQUAL(A.values[0], 10); ASSERT_EQUAL(A.values[1], 20); ASSERT_EQUAL(A.values[2], 30); ASSERT_EQUAL(A.values[3], 0); ASSERT_EQUAL(A.values[4], 40); ASSERT_EQUAL(A.values[5], 50); ASSERT_EQUAL(A.values[6], 60); ASSERT_EQUAL(A.values[7], 0); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dRowMajor) template <class Space> void TestArray2dColumnMajor(void) { cusp::array2d<float, Space, cusp::column_major> A(2,3); A(0,0) = 10; A(0,1) = 20; A(0,2) = 30; A(1,0) = 40; A(1,1) = 50; A(1,2) = 60; ASSERT_EQUAL(A(0,0), 10); ASSERT_EQUAL(A(0,1), 20); ASSERT_EQUAL(A(0,2), 30); ASSERT_EQUAL(A(1,0), 40); ASSERT_EQUAL(A(1,1), 50); ASSERT_EQUAL(A(1,2), 60); ASSERT_EQUAL(A.values[0], 10); ASSERT_EQUAL(A.values[1], 40); ASSERT_EQUAL(A.values[2], 20); ASSERT_EQUAL(A.values[3], 50); ASSERT_EQUAL(A.values[4], 30); ASSERT_EQUAL(A.values[5], 60); // test non-trivial pitch A.resize(2,3,4); thrust::fill(A.values.begin(), A.values.end(), 0); A(0,0) = 10; A(0,1) = 20; A(0,2) = 30; A(1,0) = 40; A(1,1) = 50; A(1,2) = 60; ASSERT_EQUAL(A.values[ 0], 10); ASSERT_EQUAL(A.values[ 1], 40); ASSERT_EQUAL(A.values[ 2], 0); ASSERT_EQUAL(A.values[ 3], 0); ASSERT_EQUAL(A.values[ 4], 20); ASSERT_EQUAL(A.values[ 5], 50); ASSERT_EQUAL(A.values[ 6], 0); ASSERT_EQUAL(A.values[ 7], 0); ASSERT_EQUAL(A.values[ 8], 30); ASSERT_EQUAL(A.values[ 9], 60); ASSERT_EQUAL(A.values[10], 0); ASSERT_EQUAL(A.values[11], 0); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dColumnMajor) template <class Space> void TestArray2dMixedOrientations(void) { cusp::array2d<float, Space, cusp::row_major> R(2,3); cusp::array2d<float, Space, cusp::column_major> C(2,3); R(0,0) = 10; R(0,1) = 20; R(0,2) = 30; R(1,0) = 40; R(1,1) = 50; R(1,2) = 60; C = R; ASSERT_EQUAL(C(0,0), 10); ASSERT_EQUAL(C(0,1), 20); ASSERT_EQUAL(C(0,2), 30); ASSERT_EQUAL(C(1,0), 40); ASSERT_EQUAL(C(1,1), 50); ASSERT_EQUAL(C(1,2), 60); R = C; ASSERT_EQUAL(R(0,0), 10); ASSERT_EQUAL(R(0,1), 20); ASSERT_EQUAL(R(0,2), 30); ASSERT_EQUAL(R(1,0), 40); ASSERT_EQUAL(R(1,1), 50); ASSERT_EQUAL(R(1,2), 60); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dMixedOrientations) template <class Space> void TestArray2dResize(void) { cusp::array2d<float, Space> A; A.resize(3, 2); ASSERT_EQUAL(A.num_rows, 3); ASSERT_EQUAL(A.num_cols, 2); ASSERT_EQUAL(A.pitch, 2); ASSERT_EQUAL(A.num_entries, 6); ASSERT_EQUAL(A.values.size(), 6); A.resize(3, 2, 4); ASSERT_EQUAL(A.num_rows, 3); ASSERT_EQUAL(A.num_cols, 2); ASSERT_EQUAL(A.pitch, 4); ASSERT_EQUAL(A.num_entries, 6); ASSERT_EQUAL(A.values.size(), 12); ASSERT_THROWS(A.resize(3,2,1), cusp::invalid_input_exception); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dResize) template <class Space> void TestArray2dSwap(void) { cusp::array2d<float, Space> A(2,2); cusp::array2d<float, Space> B(3,1); A(0,0) = 10; A(0,1) = 20; A(1,0) = 30; A(1,1) = 40; B(0,0) = 50; B(1,0) = 60; B(2,0) = 70; cusp::array2d<float, Space> A_copy(A); cusp::array2d<float, Space> B_copy(B); A.swap(B); ASSERT_EQUAL(A.num_rows, B_copy.num_rows); ASSERT_EQUAL(A.num_cols, B_copy.num_cols); ASSERT_EQUAL(A.num_entries, B_copy.num_entries); ASSERT_EQUAL(A.values, B_copy.values); ASSERT_EQUAL(B.num_rows, A_copy.num_rows); ASSERT_EQUAL(B.num_cols, A_copy.num_cols); ASSERT_EQUAL(B.num_entries, A_copy.num_entries); ASSERT_EQUAL(B.values, A_copy.values); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dSwap) void TestArray2dRebind(void) { typedef cusp::array2d<float, cusp::host_memory> HostMatrix; typedef HostMatrix::rebind<cusp::device_memory>::type DeviceMatrix; HostMatrix h_A(10,10); DeviceMatrix d_A(h_A); ASSERT_EQUAL(h_A.num_entries, d_A.num_entries); } DECLARE_UNITTEST(TestArray2dRebind); template <typename MemorySpace> void TestArray2dEquality(void) { cusp::array2d<float, MemorySpace, cusp::row_major> A(2,2); cusp::array2d<float, MemorySpace, cusp::column_major> B(2,2); cusp::array2d<float, MemorySpace, cusp::row_major> C(2,2); C.resize(2,2,5); thrust::fill(C.values.begin(), C.values.end(), -1); cusp::array2d<float, MemorySpace, cusp::row_major> D(2,3); cusp::array2d<float, MemorySpace, cusp::column_major> E(2,3); cusp::array2d<float, MemorySpace, cusp::column_major> F(2,3); F.resize(2,3,4); thrust::fill(F.values.begin(), F.values.end(), -1); // start with A == B == C and D == E == F A(0,0) = 1; A(0,1) = 2; A(1,0) = 4; A(1,1) = 5; B(0,0) = 1; B(0,1) = 2; B(1,0) = 4; B(1,1) = 5; C(0,0) = 1; C(0,1) = 2; C(1,0) = 4; C(1,1) = 5; D(0,0) = 1; D(0,1) = 2; D(0,2) = 3; D(1,0) = 7; D(1,1) = 5; D(1,2) = 6; E(0,0) = 1; E(0,1) = 2; E(0,2) = 3; E(1,0) = 7; E(1,1) = 5; E(1,2) = 6; F(0,0) = 1; F(0,1) = 2; F(0,2) = 3; F(1,0) = 7; F(1,1) = 5; F(1,2) = 6; ASSERT_EQUAL(A == A, true); ASSERT_EQUAL(B == A, true); ASSERT_EQUAL(C == A, true); ASSERT_EQUAL(D == A, false); ASSERT_EQUAL(E == A, false); ASSERT_EQUAL(F == A, false); ASSERT_EQUAL(A == B, true); ASSERT_EQUAL(B == B, true); ASSERT_EQUAL(C == B, true); ASSERT_EQUAL(D == B, false); ASSERT_EQUAL(E == B, false); ASSERT_EQUAL(F == B, false); ASSERT_EQUAL(A == C, true); ASSERT_EQUAL(B == C, true); ASSERT_EQUAL(C == C, true); ASSERT_EQUAL(D == C, false); ASSERT_EQUAL(E == C, false); ASSERT_EQUAL(F == C, false); ASSERT_EQUAL(A == D, false); ASSERT_EQUAL(B == D, false); ASSERT_EQUAL(C == D, false); ASSERT_EQUAL(D == D, true); ASSERT_EQUAL(E == D, true); ASSERT_EQUAL(F == D, true); ASSERT_EQUAL(A == E, false); ASSERT_EQUAL(B == E, false); ASSERT_EQUAL(C == E, false); ASSERT_EQUAL(D == E, true); ASSERT_EQUAL(E == E, true); ASSERT_EQUAL(F == E, true); ASSERT_EQUAL(A == F, false); ASSERT_EQUAL(B == F, false); ASSERT_EQUAL(C == F, false); ASSERT_EQUAL(D == F, true); ASSERT_EQUAL(E == F, true); ASSERT_EQUAL(F == F, true); // peturb B and E B(1,0) = 9; E(1,0) = 9; ASSERT_EQUAL(A == A, true); ASSERT_EQUAL(B == A, false); ASSERT_EQUAL(C == A, true); ASSERT_EQUAL(D == A, false); ASSERT_EQUAL(E == A, false); ASSERT_EQUAL(F == A, false); ASSERT_EQUAL(A == B, false); ASSERT_EQUAL(B == B, true); ASSERT_EQUAL(C == B, false); ASSERT_EQUAL(D == B, false); ASSERT_EQUAL(E == B, false); ASSERT_EQUAL(F == B, false); ASSERT_EQUAL(A == C, true); ASSERT_EQUAL(B == C, false); ASSERT_EQUAL(C == C, true); ASSERT_EQUAL(D == C, false); ASSERT_EQUAL(E == C, false); ASSERT_EQUAL(F == C, false); ASSERT_EQUAL(A == D, false); ASSERT_EQUAL(B == D, false); ASSERT_EQUAL(C == D, false); ASSERT_EQUAL(D == D, true); ASSERT_EQUAL(E == D, false); ASSERT_EQUAL(F == D, true); ASSERT_EQUAL(A == E, false); ASSERT_EQUAL(B == E, false); ASSERT_EQUAL(C == E, false); ASSERT_EQUAL(D == E, false); ASSERT_EQUAL(E == E, true); ASSERT_EQUAL(F == E, false); ASSERT_EQUAL(A == F, false); ASSERT_EQUAL(B == F, false); ASSERT_EQUAL(C == F, false); ASSERT_EQUAL(D == F, true); ASSERT_EQUAL(E == F, false); ASSERT_EQUAL(F == F, true); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dEquality) template <class Space> void TestArray2dCopySemantics(void) { // check that destination .pitch is respected cusp::array2d<float, Space> A(3, 2); A(0,0) = 1; A(0,1) = 2; A(1,0) = 3; A(1,1) = 4; A(2,0) = 5; A(2,1) = 6; cusp::array2d<float, Space> B; B.resize(3, 2, 4); B = A; ASSERT_EQUAL_QUIET(A, B); ASSERT_EQUAL(B.pitch, 4); cusp::array2d<float, Space> C; C.resize(3, 2, 4); cusp::copy(A, C); ASSERT_EQUAL_QUIET(A, C); ASSERT_EQUAL(C.pitch, 4); } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dCopySemantics) template <class Space> void TestArray2dRowView(void) { // row view of row major matrix { cusp::array2d<float, Space, cusp::row_major> A(3, 2, -1); ASSERT_EQUAL(A.row(0).size(), 2); for (size_t i = 0; i < A.num_rows; i++) cusp::blas::fill(A.row(i), i); ASSERT_EQUAL(A(0,0), 0); ASSERT_EQUAL(A(0,1), 0); ASSERT_EQUAL(A(1,0), 1); ASSERT_EQUAL(A(1,1), 1); ASSERT_EQUAL(A(2,0), 2); ASSERT_EQUAL(A(2,1), 2); } // row view of column major matrix { cusp::array2d<float, Space, cusp::column_major> A(3, 2, -1); ASSERT_EQUAL(A.row(0).size(), 2); for (size_t i = 0; i < A.num_rows; i++) cusp::blas::fill(A.row(i), i); ASSERT_EQUAL(A(0,0), 0); ASSERT_EQUAL(A(0,1), 0); ASSERT_EQUAL(A(1,0), 1); ASSERT_EQUAL(A(1,1), 1); ASSERT_EQUAL(A(2,0), 2); ASSERT_EQUAL(A(2,1), 2); } } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dRowView) template <class Space> void TestArray2dColumnView(void) { // column view of column major matrix { cusp::array2d<float, Space, cusp::column_major> A(3, 2, -1); ASSERT_EQUAL(A.column(0).size(), 3); for (size_t i = 0; i < A.num_cols; i++) cusp::blas::fill(A.column(i), i); ASSERT_EQUAL(A(0,0), 0); ASSERT_EQUAL(A(1,0), 0); ASSERT_EQUAL(A(2,0), 0); ASSERT_EQUAL(A(0,1), 1); ASSERT_EQUAL(A(1,1), 1); ASSERT_EQUAL(A(2,1), 1); } // column view of row major matrix { cusp::array2d<float, Space, cusp::row_major> A(3, 2, -1); ASSERT_EQUAL(A.column(0).size(), 3); for (size_t i = 0; i < A.num_cols; i++) cusp::blas::fill(A.column(i), i); ASSERT_EQUAL(A(0,0), 0); ASSERT_EQUAL(A(1,0), 0); ASSERT_EQUAL(A(2,0), 0); ASSERT_EQUAL(A(0,1), 1); ASSERT_EQUAL(A(1,1), 1); ASSERT_EQUAL(A(2,1), 1); } } DECLARE_HOST_DEVICE_UNITTEST(TestArray2dColumnView)
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Init phase. // Labels start at value 1. __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzb block_conn, cuda::PtrStepSzi block_labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = 2 * row * img.step + 2 * col; unsigned conn_index = row * (block_conn.step / block_conn.elem_size) + col; unsigned labels_index = row * (block_labels.step / block_labels.elem_size) + col; if (row < block_conn.rows && col < block_conn.cols) { unsigned P0 = 0x777; unsigned P = 0; if (img[img_index]) { P |= P0; } if (2 * col + 1 < img.cols) { if (img[img_index + 1]) { P |= (P0 << 1); } if (2 * row + 1 < img.rows && img[img_index + img.step + 1]) { P |= (P0 << 5); } } if (2 * row + 1 < img.rows) { if (img[img_index + img.step]) { P |= (P0 << 4); } } if (col == 0) { P &= 0xEEEE; } if (2 * col + 1 >= img.cols) { P &= 0x3333; } else if (2 * col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (2 * row + 1 >= img.rows) { P &= 0xFF; } else if (2 * row + 2 >= img.rows) { P &= 0xFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned char conn_bitmask = 0; if (P > 0) { block_labels[labels_index] = labels_index + 1; if (HasBit(P, 0) && img[img_index - img.step - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 8) && img[img_index + img.step - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 7) && img[img_index + 2]) || (HasBit(P, 11) && img[img_index + img.step + 2])) { SetBit(conn_bitmask, 4); } if (HasBit(P, 12) && img[img_index + 2 * img.step - 1]) { SetBit(conn_bitmask, 5); } if ((HasBit(P, 13) && img[img_index + 2 * img.step]) || (HasBit(P, 14) && img[img_index + 2 * img.step + 1])) { SetBit(conn_bitmask, 6); } if (HasBit(P, 15) && img[img_index + 2 * img.step + 2]) { SetBit(conn_bitmask, 7); } } else { block_labels[labels_index] = 0; } block_conn[conn_index] = conn_bitmask; } } /*__global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned conn_index = row * (connections.step / connections.elem_size) + col; unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col; if (row < connections.rows && col < connections.cols) { expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2; unsigned char neighbours = connections[conn_index]; if (HasBit(neighbours, 0)) { expansion[exp_index] = 1; } else { expansion[exp_index] = 0; } if (HasBit(neighbours, 1)) { expansion[exp_index + 1] = 1; } else { expansion[exp_index + 1] = 0; } if (HasBit(neighbours, 2)) { expansion[exp_index + 2] = 1; } else { expansion[exp_index + 2] = 0; } if (HasBit(neighbours, 3)) { expansion[exp_index + (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 4)) { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0; } if (HasBit(neighbours, 5)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 6)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0; } if (HasBit(neighbours, 7)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0; } } }*/ __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned char neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; if (HasBit(neighbours, 0)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, labels.data[labels_index - 1]); } if (HasBit(neighbours, 4)) { min = MinLabel(min, labels.data[labels_index + 1]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]); } return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSzi labels, cuda::PtrStepSzb connections, char *changes) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned connections_index = row * (connections.step / connections.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char neighbours = connections[connections_index]; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } // Analysis phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { // Performances are the same as the paper variant unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } // Final Labeling phase // Assigns every pixel of 2x2 blocks the block label __global__ void FinalLabeling(cuda::PtrStepSzi block_labels, cuda::PtrStepSzi labels, const cuda::PtrStepSzb img) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned blocks_index = row * (block_labels.step / block_labels.elem_size) + col; unsigned labels_index = 2 * row * (labels.step / labels.elem_size) + 2 * col; unsigned img_index = 2 * row * (img.step / img.elem_size) + 2 * col; if (row < block_labels.rows && col < block_labels.cols) { unsigned int label = block_labels[blocks_index]; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (2 * col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BE_TEX : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; cuda::GpuMat d_connections_; cuda::GpuMat d_block_labels_; public: BE_TEX() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); // Extra structures that I would gladly do without d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); //d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); // La Init esplode // Controlla che cosa contiene connections //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //assert(cudaDeviceSynchronize() == cudaSuccess); //Immagine di debug della inizializzazione //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } // Immagine di debug delle label dei blocchi //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE_TEX);
the_stack
#include <assert.h> #include <math.h> // For floor. #include <limits> // We never want positions to go exactly to the border or exactly to the edge // of an occupied piece of geometry. Therefore all rays will be truncated by // a very small amount (hit_margin). const float hit_margin = 1e-5f; const float epsilon = 1e-12f; // Get the integer index of the current voxel. // Manta defines 0.5 as the center of the first cell, you can see this in // manta/source/grid.h Grid::getInterpolated() and the lower level call in // manta/source/util/interpol.h interpol(), where the input position has a // pos - 0.5 applied to it (our interpol function does this as well). __device__ void GetPixelCenter(const CudaVec3& pos, int32_t* ix, int32_t* iy, int32_t* iz) { // Note: you could either calculate (int)round(pos.x - 0.5), or you can // just round down without taking off the 0.5 value. *ix = static_cast<int32_t>(pos.x); *iy = static_cast<int32_t>(pos.y); *iz = static_cast<int32_t>(pos.z); } // Note: IsOutOfDomainReal considers AGAINST the domain to be out of domain. // It also considers the space from the left of the first cell to the center // (even though we don't have samples there) and the space from the right of the // last cell to the border. __device__ bool IsOutOfDomainReal(const CudaVec3& pos, const CudaFlagGrid& flags) { return (pos.x <= 0.0f || // LHS of cell. pos.x >= (float)flags.xsize() || // RHS of cell. pos.y <= 0.0f || pos.y >= (float)flags.ysize() || pos.z <= 0.0f || pos.z >= (float)flags.zsize()); } __device__ bool IsBlockedCell(const CudaFlagGrid& flags, int32_t i, int32_t j, int32_t k, int32_t b) { // Returns true if the cell is blocked. // Shouldn't be called on point outside the domain. if (flags.isOutOfDomain(i, j, k, b)) { // We can't assert in CUDA. Treat out of domain as blocked and hope the CPU // tests catch this. return true; } return !flags.isFluid(i, j, k, b); } __device__ void ClampToDomain(const CudaFlagGrid& flags, int32_t* ix, int32_t* iy, int32_t* iz) { *ix = max(min(*ix, (int32_t)flags.xsize() - (int32_t)1), (int32_t)0); *iy = max(min(*iy, (int32_t)flags.ysize() - (int32_t)1), (int32_t)0); *iz = max(min(*iz, (int32_t)flags.zsize() - (int32_t)1), (int32_t)0); } __device__ void ClampToDomainReal(CudaVec3& pos, const CudaFlagGrid& flags) { // Clamp to a position epsilon inside the simulation domain. pos.x = min(max(pos.x, hit_margin), (float)flags.xsize() - hit_margin); pos.y = min(max(pos.y, hit_margin), (float)flags.ysize() - hit_margin); pos.z = min(max(pos.z, hit_margin), (float)flags.zsize() - hit_margin); } // This version takes in the float position, calculates the current voxel index // and performs the integer lookup on that. __device__ bool IsBlockedCellReal(const CudaFlagGrid& flags, const CudaVec3& pos, int32_t b) { int32_t ix, iy, iz; GetPixelCenter(pos, &ix, &iy, &iz); return IsBlockedCell(flags, ix, iy, iz, b); } #include "quadrants.h" // I HATE doing this, but I used the code from here: // https://github.com/erich666/GraphicsGems/blob/master/gems/RayBox.c // And modified it (there were actually a few numerical precision bugs). // I tested the hell out of it, so it seems to work. // // @param hit_margin - value >= 0 describing margin added to hit to // prevent interpenetration. __device__ bool CudaHitBoundingBox( const float* minB, const float* maxB, // box const float* origin, const float* dir, // ray float* coord) { // hit point. char inside = true; Quadrants quadrant[3]; register int i; int whichPlane; float maxT[3]; float candidate_plane[3]; // Find candidate planes; this loop can be avoided if rays cast all from the // eye (assume perpsective view). for (i = 0; i < 3; i++) { if (origin[i] < minB[i]) { quadrant[i] = LEFT; candidate_plane[i] = minB[i]; inside = false; } else if (origin[i] > maxB[i]) { quadrant[i] = RIGHT; candidate_plane[i] = maxB[i]; inside = false; } else { quadrant[i] = MIDDLE; } } // Ray origin inside bounding box. if (inside) { for (i = 0; i < 3; i++) { coord[i] = origin[i]; } return true; } // Calculate T distances to candidate planes. for (i = 0; i < 3; i++) { if (quadrant[i] != MIDDLE && dir[i] != 0.0f) { maxT[i] = (candidate_plane[i] - origin[i]) / dir[i]; } else { maxT[i] = -1.0f; } } // Get largest of the maxT's for final choice of intersection. whichPlane = 0; for (i = 1; i < 3; i++) { if (maxT[whichPlane] < maxT[i]) { whichPlane = i; } } // Check final candidate actually inside box and calculate the coords (if // not). if (maxT[whichPlane] < 0.0f) { return false; } const float err_tol = 1e-6f; for (i = 0; i < 3; i++) { if (whichPlane != i) { coord[i] = origin[i] + maxT[whichPlane] * dir[i]; if (coord[i] < (minB[i] - err_tol) || coord[i] > (maxB[i] + err_tol)) { return false; } } else { coord[i] = candidate_plane[i]; } } return true; } // calcRayBoxIntersection will calculate the intersection point for the ray // starting at pos, and pointing along dt (which should be unit length). // The box is size 1 and is centered at ctr. __device__ bool calcRayBoxIntersection( const CudaVec3& pos, const CudaVec3& dt, const CudaVec3& ctr, const float hit_margin, CudaVec3* ipos) { if (hit_margin < 0) { // We cannot assert on device. Set the pos to (0.5, 0.5, 0.5) return true // and hope that the CPU test catches this. ipos->x = 0.5f; ipos->y = 0.5f; ipos->z = 0.5f; return true; } float box_min[3]; box_min[0] = ctr.x - 0.5f - hit_margin; box_min[1] = ctr.y - 0.5f - hit_margin; box_min[2] = ctr.z - 0.5f - hit_margin; float box_max[3]; box_max[0] = ctr.x + 0.5f + hit_margin; box_max[1] = ctr.y + 0.5f + hit_margin; box_max[2] = ctr.z + 0.5f + hit_margin; bool hit = CudaHitBoundingBox(box_min, box_max, // box &pos.x, &dt.x, // ray &ipos->x); return hit; } // calcRayBorderIntersection will calculate the intersection point for the ray // starting at pos and pointing to next_pos. // // IMPORTANT: This function ASSUMES that the ray actually intersects. Nasty // things will happen if it does not. // EDIT(tompson, 09/25/16): This is so important that we'll actually double // check the input coords anyway. __device__ bool calcRayBorderIntersection( const CudaVec3& pos, const CudaVec3& next_pos, const CudaFlagGrid& flags, const float hit_margin, CudaVec3* ipos) { if (hit_margin <= 0) { // We cannot assert on device. Set the pos to (0.5, 0.5, 0.5) return true // and hope that the CPU test catches this. ipos->x = 0.5f; ipos->y = 0.5f; ipos->z = 0.5f; return true; } // The source location should be INSIDE the boundary. if (IsOutOfDomainReal(pos, flags)) { ipos->x = 0.5f; ipos->y = 0.5f; ipos->z = 0.5f; return true; } // The target location should be OUTSIDE the boundary. if (!IsOutOfDomainReal(next_pos, flags)) { ipos->x = 0.5f; ipos->y = 0.5f; ipos->z = 0.5f; return true; } // Calculate the minimum step length to exit each face and then step that // far. The line equation is: // P = gamma * (next_pos - pos) + pos. // So calculate gamma required to make P < + margin for each dim // independently. // P_i = m --> m - pos_i = gamma * (next_pos_i - pos_i) // --> gamma_i = (m - pos_i) / (next_pos_i - pos_i) float min_step = CUDART_INF_F; if (next_pos.x <= hit_margin) { // left face. const float dx = next_pos.x - pos.x; if (std::abs(dx) >= epsilon) { const float xstep = (hit_margin - pos.x) / dx; min_step = min(min_step, xstep); } } if (next_pos.y <= hit_margin) { const float dy = next_pos.y - pos.y; if (std::abs(dy) >= epsilon) { const float ystep = (hit_margin - pos.y) / dy; min_step = min(min_step, ystep); } } if (next_pos.z <= hit_margin) { const float dz = next_pos.z - pos.z; if (std::abs(dz) >= epsilon) { const float zstep = (hit_margin - pos.z) / dz; min_step = min(min_step, zstep); } } // Also calculate the min step to exit a positive face. // P_i = dim - m --> dim - m - pos_i = gamma * (next_pos_i - pos_i) // --> gamma = (dim - m - pos_i) / (next_pos_i - pos_i) if (next_pos.x >= ((float)flags.xsize() - hit_margin)) { // right face. const float dx = next_pos.x - pos.x; if (std::abs(dx) >= epsilon) { const float xstep = ((float)flags.xsize() - hit_margin - pos.x) / dx; min_step = min(min_step, xstep); } } if (next_pos.y >= ((float)flags.ysize() - hit_margin)) { const float dy = next_pos.y - pos.y; if (std::abs(dy) >= epsilon) { const float ystep = ((float)flags.ysize() - hit_margin - pos.y) / dy; min_step = min(min_step, ystep); } } if (next_pos.z >= ((float)flags.zsize() - hit_margin)) { const float dz = next_pos.z - pos.z; if (std::abs(dz) >= epsilon) { const float zstep = ((float)flags.zsize() - hit_margin - pos.z) / dz; min_step = min(min_step, zstep); } } if (min_step < 0 || min_step >= CUDART_INF_F) { return false; } // Take the minimum step. ipos->x = min_step * (next_pos.x - pos.x) + pos.x; ipos->y = min_step * (next_pos.y - pos.y) + pos.y; ipos->z = min_step * (next_pos.z - pos.z) + pos.z; return true; } // The following function performs a line trace along the displacement vector // and returns either: // a) The position 'p + delta' if NO geometry is found on the line trace. or // b) The position at the first geometry blocker along the path. // The search is exhaustive (i.e. O(n) in the length of the displacement vector) // // Note: the returned position is NEVER in geometry or outside the bounds. We // go to great lengths to ensure this. // // TODO(tompsion): This is probably not efficient at all. // It also has the potential to miss geometry along the path if the width // of the geometry is less than 1 grid. // // For real grids values are stored at i+0.5, j+0.5, k+0.5. i.e. the center of // the first cell is (0.5, 0.5, 0.5) so the corner is (0, 0, 0). Likewise the // center of the last cell is (xsize - 1 + 0.5, ...) so the corner is // (xsize, ysize, zsize). // // For MAC grids values are stored at i, j+0.5, k+0.5 for the x component. // So the MAC component for the (i, j, k) index is on the left, bottom and back // faces of the cell respectively (i.e. the negative edge). // // So, if you want to START a line trace at the index (i, j, k) you should add // 0.5 to each component before calling this function as (i, j, k) converted to // real will actually be the (left, bottom, back) side of that cell. __device__ bool calcLineTrace(const CudaVec3& pos, const CudaVec3& delta, const CudaFlagGrid& flags, const int32_t ibatch, CudaVec3* new_pos, const bool do_line_trace) { // We can choose to not do a line trace at all. if (!do_line_trace) { (*new_pos) = pos + delta; return false; } // If we're ALREADY in a obstacle segment (or outside the domain) then a lot // of logic below will fail. This function should only be called on fluid // cells! (*new_pos) = pos; const float length = delta.norm(); if (length <= epsilon) { // We're not being asked to step anywhere. Return false and copy the pos. // (copy already done above). return false; } // Figure out the step size in x, y and z for our marching. CudaVec3 dt = delta / length; // Otherwise, we start the line search, by stepping a unit length along the // vector and checking the neighbours. // // A few words about the implementation (because it's complicated and perhaps // needlessly so). We maintain a VERY important loop invariant: new_pos is // NEVER allowed to enter solid geometry or go off the domain. next_pos // is the next step's tentative location, and we will always try and back // it off to the closest non-geometry valid cell before updating new_pos. // // We will also go to great lengths to ensure this loop invariant is // correct (probably at the expense of speed). float cur_length = 0.0f; CudaVec3 next_pos; // Tentative step location. while (cur_length < (length - hit_margin)) { // We haven't stepped far enough. So take a step. float cur_step = min(length - cur_length, 1.0f); next_pos = (*new_pos) + (dt * cur_step); // Check to see if we went too far. // TODO(tompson): This is not correct, we might skip over small // pieces of geometry if the ray brushes against the corner of a // occupied voxel, but doesn't land in it. Fix this (it's very rare though). // There are two possible cases. We've either stepped out of the domain // or entered a blocked cell. if (IsOutOfDomainReal(next_pos, flags)) { // Case 1. 'next_pos' exits the grid. CudaVec3 ipos; const bool hit = calcRayBorderIntersection( *new_pos, next_pos, flags, hit_margin, &ipos); if (!hit) { // This is an EXTREMELY rare case. It happens because either the ray is // almost parallel to the domain boundary, OR floating point round-off // causes the intersection test to fail. // In this case, fall back to simply clamping next_pos inside the domain // boundary. It's not ideal, but better than a hard failure (the reason // why it's wrong is that clamping will bring the point off the ray). ipos = next_pos; ClampToDomainReal(ipos, flags); } if (!IsBlockedCellReal(flags, ipos, ibatch)) { // OK to return here (i.e. we're up against the border and not // in a blocked cell). (*new_pos) = ipos; return true; } else { // Otherwise, we hit the border boundary, but we entered a blocked cell. // Continue on to case 2. next_pos = ipos; } } if (IsBlockedCellReal(flags, next_pos, ibatch)) { // Case 2. next_pos enters a blocked cell. const uint32_t max_count = 4; // TODO(tompson): high enough? // Note: we need to spin here because while we backoff a blocked cell that // is a unit step away, there might be ANOTHER blocked cell along the ray // which is less than a unit step away. for (uint32_t count = 0; count <= max_count; count++) { if (!IsBlockedCellReal(flags, next_pos, ibatch)) { break; } if (count == max_count) { // We cannot assert on device. Set the pos to (0.5, 0.5, 0.5) return // true and hope that the CPU / GPU test catches this. new_pos->x = 0.5f; new_pos->y = 0.5f; new_pos->z = 0.5f; return true; } // Calculate the center of the blocker cell. CudaVec3 next_pos_ctr; int32_t ix, iy, iz; GetPixelCenter(next_pos, &ix, &iy, &iz); next_pos_ctr.x = (float)(ix) + 0.5f; next_pos_ctr.y = (float)(iy) + 0.5f; next_pos_ctr.z = (float)(iz) + 0.5f; CudaVec3 ipos; const bool hit = calcRayBoxIntersection(*new_pos, dt, next_pos_ctr, hit_margin, &ipos); if (!hit) { // This can happen in very rare cases if the ray box // intersection test fails because of floating point round off. // It can also happen if the simulation becomes unstable (maybe with a // poorly trained model) and the velocity values are extremely high. // In this case, fall back to simply returning new_pos (for which the // loop invariant guarantees is a valid point). return true; } next_pos = ipos; // There's a nasty corner case here. It's when the cell we were trying // to step to WAS a blocker, but the ray passed through a blocker to get // there (i.e. our step size didn't catch the first blocker). If this is // the case we need to do another intersection test, but this time with // the ray point destination that is the closer cell. // --> There's nothing to do. The outer while loop will try another // intersection for us. } // At this point next_pos is guaranteed to be within the domain and // not within a solid cell. (*new_pos) = next_pos; return true; } // Otherwise, update the position to the current step location. (*new_pos) = next_pos; cur_length += cur_step; } return false; }
the_stack
#include "caffe/layers/neuron_layer.hpp" #include "caffe/layers/prelu_layer.hpp" namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void PReLULayer<Dtype, MItype, MOtype>::GenerateProgram() { this->device_program_ = this->device_->CreateProgram(); stringstream ss; ss << this->device_program_->setup(); ss << this->device_program_->template define_type<Dtype>("Dtype"); ss << this->device_program_->template define_type<MItype>("MItype"); ss << this->device_program_->template define_type<MOtype>("MOtype"); { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "n", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "in", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "out", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "slope_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "div_factor", KERNEL_ARG_CONST)); ss << this->device_program_->function("PReLUForward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "n"); ss << "int_tp c = (index / dim) % channels / div_factor;" << std::endl; ss << "out[index] = in[index] > (Dtype)0 ? in[index] : in[index]" << " * slope_data[c];" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "n", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "channels", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dim", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "in_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "in_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "out_diff", KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "slope_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "div_factor", KERNEL_ARG_CONST)); ss << this->device_program_->function("PReLUBackward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "n"); ss << "int_tp c = (index / dim) % channels / div_factor;" << std::endl; ss << "out_diff[index] = in_diff[index]" << " * (((in_data[index] > (Dtype)0) ? (Dtype)1 : (Dtype)0)" << " + ((in_data[index] <= (Dtype)0) ? (Dtype)1 : (Dtype)0)" << " * slope_data[c]);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } { KernelArgs args; args.push_back(this->device_program_->template create_kernel_arg<uint_tp>( "n", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "rows", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "rowPitch", KERNEL_ARG_CONST)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "in_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "in_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "out_diff", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("PReLUParamBackward", args); ss << this->device_program_->kernel_loop("uint_tp", "index", "n"); ss << "out_diff[index] = in_diff[index] * in_data[index]" << " * ((in_data[index] <= (Dtype)0) ? (Dtype)1: (Dtype)0);" << std::endl; ss << "for (int k = 1; k < rows; k++) {" << std::endl; ss << "out_diff[index] += in_diff[index + k * rowPitch]" << " * in_data[index + k * rowPitch]" << " * ((in_data[index + k * rowPitch] <= (Dtype)0)" << " ? (Dtype)1 : (Dtype)0);" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; } this->device_program_->set_source(ss.str()); this->device_program_->Compile(true, true); } template<typename Dtype, typename MItype, typename MOtype> void PReLULayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); vptr<Dtype> top_data = top[0]->mutable_gpu_data(); const int_tp count = bottom[0]->count(); const int_tp dim = bottom[0]->count(2); const int_tp channels = bottom[0]->shape(1); vptr<const Dtype> slope_data = this->blobs_[0]->gpu_data(); const int_tp div_factor = channel_shared_ ? channels : 1; // For in-place computation if (top[0] == bottom[0]) { this->device_->template copy<Dtype>(count, bottom_data, bottom_memory_.mutable_gpu_data()); } shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("PReLUForward"); kernel->add_arg(&count); kernel->add_arg(&channels); kernel->add_arg(&dim); kernel->add_arg(&bottom_data); kernel->add_arg(&top_data); kernel->add_arg(&slope_data); kernel->add_arg(&div_factor); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } template<typename Dtype, typename MItype, typename MOtype> void PReLULayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { vptr<const Dtype> bottom_data = bottom[0]->gpu_data(); vptr<const Dtype> top_diff = top[0]->gpu_diff(); const int_tp count = bottom[0]->count(); const int_tp dim = bottom[0]->count(2); const int_tp channels = bottom[0]->shape(1); // For in-place computation if (top[0] == bottom[0]) { bottom_data = bottom_memory_.gpu_data(); } // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computation), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { vptr<Dtype> slope_diff = this->blobs_[0]->mutable_gpu_diff(); int_tp cdim = channels * dim; vptr<Dtype> backward_buff_diff = backward_buff_.mutable_gpu_diff(); int_tp num = bottom[0]->shape(0); int_tp top_offset = top[0]->offset(1); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("PReLUParamBackward"); kernel->add_arg(&cdim); kernel->add_arg(&num); kernel->add_arg(&top_offset); kernel->add_arg(&top_diff); kernel->add_arg(&bottom_data); kernel->add_arg(&backward_buff_diff); vector<size_t> work_size(1, cdim); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); if (channel_shared_) { Dtype dsum; this->device_->template dot<Dtype>(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &dsum); this->device_->template add_scalar<Dtype>(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } else { this->device_->template gemv<Dtype>(CblasNoTrans, channels, dim, 1., backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } // Propagate to bottom if (propagate_down[0]) { vptr<Dtype> bottom_diff = bottom[0]->mutable_gpu_diff(); vptr<const Dtype> slope_data = this->blobs_[0]->gpu_data(); int_tp div_factor = channel_shared_ ? channels : 1; shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("PReLUBackward"); kernel->add_arg(&count); kernel->add_arg(&channels); kernel->add_arg(&dim); kernel->add_arg(&top_diff); kernel->add_arg(&bottom_data); kernel->add_arg(&bottom_diff); kernel->add_arg(&slope_data); kernel->add_arg(&div_factor); vector<size_t> work_size(1, count); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); } } INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, GenerateProgram, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, GenerateProgram, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, GenerateProgram, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Forward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Forward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Forward_gpu, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Backward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Backward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(PReLULayer, Backward_gpu, (double), (double), (double)); } // namespace caffe
the_stack
* \file * cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. */ #pragma once #include <iterator> #include "../util_type.cuh" #include "../block/block_reduce.cuh" #include "../block/block_scan.cuh" #include "../block/block_exchange.cuh" #include "../config.cuh" #include "../thread/thread_search.cuh" #include "../thread/thread_operators.cuh" #include "../iterator/cache_modified_input_iterator.cuh" #include "../iterator/counting_input_iterator.cuh" CUB_NAMESPACE_BEGIN /****************************************************************************** * Tuning policy ******************************************************************************/ /** * Parameterizable tuning policy type for AgentSpmv */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) CacheLoadModifier _ROW_OFFSETS_SEARCH_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets during search CacheLoadModifier _ROW_OFFSETS_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets CacheLoadModifier _COLUMN_INDICES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR column-indices CacheLoadModifier _VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR values CacheLoadModifier _VECTOR_VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading vector values bool _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through shared memory) BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct AgentSpmvPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (pre-staged through shared memory) }; static const CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets static const CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets static const CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR column-indices static const CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR values static const CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading vector values static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ template < typename ValueT, ///< Matrix and vector value type typename OffsetT> ///< Signed integer type for sequence offsets struct SpmvParams { const ValueT* d_values; ///< Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix <b>A</b>. const OffsetT* d_row_end_offsets; ///< Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values const OffsetT* d_column_indices; ///< Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix <b>A</b>. (Indices are zero-valued.) const ValueT* d_vector_x; ///< Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em> ValueT* d_vector_y; ///< Pointer to the array of \p num_rows values corresponding to the dense output vector <em>y</em> int num_rows; ///< Number of rows of matrix <b>A</b>. int num_cols; ///< Number of columns of matrix <b>A</b>. int num_nonzeros; ///< Number of nonzero elements of matrix <b>A</b>. ValueT alpha; ///< Alpha multiplicand ValueT beta; ///< Beta addend-multiplicand }; /** * \brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. */ template < typename AgentSpmvPolicyT, ///< Parameterized AgentSpmvPolicy tuning policy type typename ValueT, ///< Matrix and vector value type typename OffsetT, ///< Signed integer type for sequence offsets bool HAS_ALPHA, ///< Whether the input parameter \p alpha is 1 bool HAS_BETA, ///< Whether the input parameter \p beta is 0 int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability struct AgentSpmv { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; /// 2D merge path coordinate type typedef typename CubVector<OffsetT, 2>::Type CoordinateT; /// Input iterator wrapper types (for applying cache modifiers) typedef CacheModifiedInputIterator< AgentSpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, OffsetT, OffsetT> RowOffsetsSearchIteratorT; typedef CacheModifiedInputIterator< AgentSpmvPolicyT::ROW_OFFSETS_LOAD_MODIFIER, OffsetT, OffsetT> RowOffsetsIteratorT; typedef CacheModifiedInputIterator< AgentSpmvPolicyT::COLUMN_INDICES_LOAD_MODIFIER, OffsetT, OffsetT> ColumnIndicesIteratorT; typedef CacheModifiedInputIterator< AgentSpmvPolicyT::VALUES_LOAD_MODIFIER, ValueT, OffsetT> ValueIteratorT; typedef CacheModifiedInputIterator< AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, ValueT, OffsetT> VectorValueIteratorT; // Tuple type for scanning (pairs accumulated segment-value with segment-index) typedef KeyValuePair<OffsetT, ValueT> KeyValuePairT; // Reduce-value-by-segment scan operator typedef ReduceByKeyOp<cub::Sum> ReduceBySegmentOpT; // BlockReduce specialization typedef BlockReduce< ValueT, BLOCK_THREADS, BLOCK_REDUCE_WARP_REDUCTIONS> BlockReduceT; // BlockScan specialization typedef BlockScan< KeyValuePairT, BLOCK_THREADS, AgentSpmvPolicyT::SCAN_ALGORITHM> BlockScanT; // BlockScan specialization typedef BlockScan< ValueT, BLOCK_THREADS, AgentSpmvPolicyT::SCAN_ALGORITHM> BlockPrefixSumT; // BlockExchange specialization typedef BlockExchange< ValueT, BLOCK_THREADS, ITEMS_PER_THREAD> BlockExchangeT; /// Merge item type (either a non-zero value or a row-end offset) union MergeItem { // Value type to pair with index type OffsetT // (NullType if loading values directly during merge) using MergeValueT = cub::detail::conditional_t< AgentSpmvPolicyT::DIRECT_LOAD_NONZEROS, NullType, ValueT>; OffsetT row_end_offset; MergeValueT nonzero; }; /// Shared memory type required by this thread block struct _TempStorage { CoordinateT tile_coords[2]; union Aliasable { // Smem needed for tile of merge items MergeItem merge_items[ITEMS_PER_THREAD + TILE_ITEMS + 1]; // Smem needed for block exchange typename BlockExchangeT::TempStorage exchange; // Smem needed for block-wide reduction typename BlockReduceT::TempStorage reduce; // Smem needed for tile scanning typename BlockScanT::TempStorage scan; // Smem needed for tile prefix sum typename BlockPrefixSumT::TempStorage prefix_sum; } aliasable; }; /// Temporary storage type (unionable) struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage& temp_storage; /// Reference to temp_storage SpmvParams<ValueT, OffsetT>& spmv_params; ValueIteratorT wd_values; ///< Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix <b>A</b>. RowOffsetsIteratorT wd_row_end_offsets; ///< Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values ColumnIndicesIteratorT wd_column_indices; ///< Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix <b>A</b>. (Indices are zero-valued.) VectorValueIteratorT wd_vector_x; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em> VectorValueIteratorT wd_vector_y; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em> //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ AgentSpmv( TempStorage& temp_storage, ///< Reference to temp_storage SpmvParams<ValueT, OffsetT>& spmv_params) ///< SpMV input parameter bundle : temp_storage(temp_storage.Alias()), spmv_params(spmv_params), wd_values(spmv_params.d_values), wd_row_end_offsets(spmv_params.d_row_end_offsets), wd_column_indices(spmv_params.d_column_indices), wd_vector_x(spmv_params.d_vector_x), wd_vector_y(spmv_params.d_vector_y) {} /** * Consume a merge tile, specialized for direct-load of nonzeros */ __device__ __forceinline__ KeyValuePairT ConsumeTile( int tile_idx, CoordinateT tile_start_coord, CoordinateT tile_end_coord, Int2Type<true> is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch { int tile_num_rows = tile_end_coord.x - tile_start_coord.x; int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; // Gather the row end-offsets for the merge tile into shared memory for (int item = threadIdx.x; item < tile_num_rows + ITEMS_PER_THREAD; item += BLOCK_THREADS) { const OffsetT offset = (cub::min)(static_cast<OffsetT>(tile_start_coord.x + item), static_cast<OffsetT>(spmv_params.num_rows - 1)); s_tile_row_end_offsets[item] = wd_row_end_offsets[offset]; } CTA_SYNC(); // Search for the thread's starting coordinate within the merge tile CountingInputIterator<OffsetT> tile_nonzero_indices(tile_start_coord.y); CoordinateT thread_start_coord; MergePathSearch( OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal s_tile_row_end_offsets, // List A tile_nonzero_indices, // List B tile_num_rows, tile_num_nonzeros, thread_start_coord); CTA_SYNC(); // Perf-sync // Compute the thread's merge path segment CoordinateT thread_current_coord = thread_start_coord; KeyValuePairT scan_segment[ITEMS_PER_THREAD]; ValueT running_total = 0.0; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { OffsetT nonzero_idx = CUB_MIN(tile_nonzero_indices[thread_current_coord.y], spmv_params.num_nonzeros - 1); OffsetT column_idx = wd_column_indices[nonzero_idx]; ValueT value = wd_values[nonzero_idx]; ValueT vector_value = wd_vector_x[column_idx]; ValueT nonzero = value * vector_value; OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) { // Move down (accumulate) running_total += nonzero; scan_segment[ITEM].value = running_total; scan_segment[ITEM].key = tile_num_rows; ++thread_current_coord.y; } else { // Move right (reset) scan_segment[ITEM].value = running_total; scan_segment[ITEM].key = thread_current_coord.x; running_total = 0.0; ++thread_current_coord.x; } } CTA_SYNC(); // Block-wide reduce-value-by-segment KeyValuePairT tile_carry; ReduceBySegmentOpT scan_op; KeyValuePairT scan_item; scan_item.value = running_total; scan_item.key = thread_current_coord.x; BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); if (tile_num_rows > 0) { if (threadIdx.x == 0) scan_item.key = -1; // Direct scatter #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (scan_segment[ITEM].key < tile_num_rows) { if (scan_item.key == scan_segment[ITEM].key) scan_segment[ITEM].value = scan_item.value + scan_segment[ITEM].value; if (HAS_ALPHA) { scan_segment[ITEM].value *= spmv_params.alpha; } if (HAS_BETA) { // Update the output vector element ValueT addend = spmv_params.beta * wd_vector_y[tile_start_coord.x + scan_segment[ITEM].key]; scan_segment[ITEM].value += addend; } // Set the output vector element spmv_params.d_vector_y[tile_start_coord.x + scan_segment[ITEM].key] = scan_segment[ITEM].value; } } } // Return the tile's running carry-out return tile_carry; } /** * Consume a merge tile, specialized for indirect load of nonzeros */ __device__ __forceinline__ KeyValuePairT ConsumeTile( int tile_idx, CoordinateT tile_start_coord, CoordinateT tile_end_coord, Int2Type<false> is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch { int tile_num_rows = tile_end_coord.x - tile_start_coord.x; int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; #if (CUB_PTX_ARCH >= 520) OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; // Gather the nonzeros for the merge tile into shared memory #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_idx; ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_idx; ValueT* s = s_tile_nonzeros + nonzero_idx; if (nonzero_idx < tile_num_nonzeros) { OffsetT column_idx = *ci; ValueT value = *a; ValueT vector_value = wd_vector_x[column_idx]; ValueT nonzero = value * vector_value; *s = nonzero; } } #else OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; // Gather the nonzeros for the merge tile into shared memory if (tile_num_nonzeros > 0) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1); OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx]; ValueT value = wd_values[tile_start_coord.y + nonzero_idx]; ValueT vector_value = wd_vector_x[column_idx]; ValueT nonzero = value * vector_value; s_tile_nonzeros[nonzero_idx] = nonzero; } } #endif // Gather the row end-offsets for the merge tile into shared memory #pragma unroll 1 for (int item = threadIdx.x; item < tile_num_rows + ITEMS_PER_THREAD; item += BLOCK_THREADS) { const OffsetT offset = (cub::min)(static_cast<OffsetT>(tile_start_coord.x + item), static_cast<OffsetT>(spmv_params.num_rows - 1)); s_tile_row_end_offsets[item] = wd_row_end_offsets[offset]; } CTA_SYNC(); // Search for the thread's starting coordinate within the merge tile CountingInputIterator<OffsetT> tile_nonzero_indices(tile_start_coord.y); CoordinateT thread_start_coord; MergePathSearch( OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal s_tile_row_end_offsets, // List A tile_nonzero_indices, // List B tile_num_rows, tile_num_nonzeros, thread_start_coord); CTA_SYNC(); // Perf-sync // Compute the thread's merge path segment CoordinateT thread_current_coord = thread_start_coord; KeyValuePairT scan_segment[ITEMS_PER_THREAD]; ValueT running_total = 0.0; OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; ValueT nonzero = s_tile_nonzeros[thread_current_coord.y]; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) { // Move down (accumulate) scan_segment[ITEM].value = nonzero; running_total += nonzero; ++thread_current_coord.y; nonzero = s_tile_nonzeros[thread_current_coord.y]; } else { // Move right (reset) scan_segment[ITEM].value = 0.0; running_total = 0.0; ++thread_current_coord.x; row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; } scan_segment[ITEM].key = thread_current_coord.x; } CTA_SYNC(); // Block-wide reduce-value-by-segment KeyValuePairT tile_carry; ReduceBySegmentOpT scan_op; KeyValuePairT scan_item; scan_item.value = running_total; scan_item.key = thread_current_coord.x; BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); if (threadIdx.x == 0) { scan_item.key = thread_start_coord.x; scan_item.value = 0.0; } if (tile_num_rows > 0) { CTA_SYNC(); // Scan downsweep and scatter ValueT* s_partials = &temp_storage.aliasable.merge_items[0].nonzero; if (scan_item.key != scan_segment[0].key) { s_partials[scan_item.key] = scan_item.value; } else { scan_segment[0].value += scan_item.value; } #pragma unroll for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (scan_segment[ITEM - 1].key != scan_segment[ITEM].key) { s_partials[scan_segment[ITEM - 1].key] = scan_segment[ITEM - 1].value; } else { scan_segment[ITEM].value += scan_segment[ITEM - 1].value; } } CTA_SYNC(); #pragma unroll 1 for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS) { spmv_params.d_vector_y[tile_start_coord.x + item] = s_partials[item]; } } // Return the tile's running carry-out return tile_carry; } /** * Consume input tile */ __device__ __forceinline__ void ConsumeTile( CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates KeyValuePairT* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block int num_merge_tiles) ///< [in] Number of merge tiles { int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index if (tile_idx >= num_merge_tiles) return; // Read our starting coordinates if (threadIdx.x < 2) { if (d_tile_coordinates == NULL) { // Search our starting coordinates OffsetT diagonal = (tile_idx + threadIdx.x) * TILE_ITEMS; CoordinateT tile_coord; CountingInputIterator<OffsetT> nonzero_indices(0); // Search the merge path MergePathSearch( diagonal, RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), nonzero_indices, spmv_params.num_rows, spmv_params.num_nonzeros, tile_coord); temp_storage.tile_coords[threadIdx.x] = tile_coord; } else { temp_storage.tile_coords[threadIdx.x] = d_tile_coordinates[tile_idx + threadIdx.x]; } } CTA_SYNC(); CoordinateT tile_start_coord = temp_storage.tile_coords[0]; CoordinateT tile_end_coord = temp_storage.tile_coords[1]; // Consume multi-segment tile KeyValuePairT tile_carry = ConsumeTile( tile_idx, tile_start_coord, tile_end_coord, Int2Type<AgentSpmvPolicyT::DIRECT_LOAD_NONZEROS>()); // Output the tile's carry-out if (threadIdx.x == 0) { if (HAS_ALPHA) { tile_carry.value *= spmv_params.alpha; } tile_carry.key += tile_start_coord.x; if (tile_carry.key >= spmv_params.num_rows) { // FIXME: This works around an invalid memory access in the // fixup kernel. The underlying issue needs to be debugged and // properly fixed, but this hack prevents writes to // out-of-bounds addresses. It doesn't appear to have an effect // on the validity of the results, since this only affects the // carry-over from last tile in the input. tile_carry.key = spmv_params.num_rows - 1; tile_carry.value = ValueT{}; }; d_tile_carry_pairs[tile_idx] = tile_carry; } } }; CUB_NAMESPACE_END
the_stack
* * Benchmark: Iterative solver tests (solver.cpp and solver.cu are identical, the latter being required for compilation using CUDA nvcc) * */ #ifndef NDEBUG #define NDEBUG #endif #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/io.hpp> #include <boost/numeric/ublas/operation_sparse.hpp> #define VIENNACL_WITH_UBLAS 1 #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/coordinate_matrix.hpp" #include "viennacl/compressed_matrix.hpp" #include "viennacl/ell_matrix.hpp" #include "viennacl/hyb_matrix.hpp" #include "viennacl/linalg/cg.hpp" #include "viennacl/linalg/bicgstab.hpp" #include "viennacl/linalg/gmres.hpp" #include "viennacl/linalg/ilu.hpp" #include "viennacl/linalg/ichol.hpp" #include "viennacl/linalg/jacobi_precond.hpp" #include "viennacl/linalg/row_scaling.hpp" #ifdef VIENNACL_WITH_OPENCL #include "viennacl/linalg/mixed_precision_cg.hpp" #endif #include "viennacl/io/matrix_market.hpp" #include <iostream> #include <vector> #include "benchmark-utils.hpp" #include "io.hpp" using namespace boost::numeric; #define BENCHMARK_RUNS 1 template <typename ScalarType> ScalarType diff_inf(ublas::vector<ScalarType> & v1, viennacl::vector<ScalarType> & v2) { ublas::vector<ScalarType> v2_cpu(v2.size()); viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin()); for (unsigned int i=0;i<v1.size(); ++i) { if ( std::max( fabs(v2_cpu[i]), fabs(v1[i]) ) > 0 ) v2_cpu[i] = fabs(v2_cpu[i] - v1[i]) / std::max( fabs(v2_cpu[i]), fabs(v1[i]) ); else v2_cpu[i] = 0.0; } return norm_inf(v2_cpu); } template <typename ScalarType> ScalarType diff_2(ublas::vector<ScalarType> & v1, viennacl::vector<ScalarType> & v2) { ublas::vector<ScalarType> v2_cpu(v2.size()); viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin()); return norm_2(v1 - v2_cpu) / norm_2(v1); } template <typename MatrixType, typename VectorType, typename SolverTag, typename PrecondTag> void run_solver(MatrixType const & matrix, VectorType const & rhs, VectorType const & ref_result, SolverTag const & solver, PrecondTag const & precond, long ops) { Timer timer; VectorType result(rhs); VectorType residual(rhs); viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { result = viennacl::linalg::solve(matrix, rhs, solver, precond); } viennacl::backend::finish(); double exec_time = timer.get(); std::cout << "Exec. time: " << exec_time << std::endl; std::cout << "Est. "; printOps(ops, exec_time / BENCHMARK_RUNS); residual -= viennacl::linalg::prod(matrix, result); std::cout << "Relative residual: " << viennacl::linalg::norm_2(residual) / viennacl::linalg::norm_2(rhs) << std::endl; std::cout << "Estimated rel. residual: " << solver.error() << std::endl; std::cout << "Iterations: " << solver.iters() << std::endl; result -= ref_result; std::cout << "Relative deviation from result: " << viennacl::linalg::norm_2(result) / viennacl::linalg::norm_2(ref_result) << std::endl; } template<typename ScalarType> int run_benchmark() { Timer timer; double exec_time; ScalarType std_factor1 = static_cast<ScalarType>(3.1415); ScalarType std_factor2 = static_cast<ScalarType>(42.0); viennacl::scalar<ScalarType> vcl_factor1(std_factor1); viennacl::scalar<ScalarType> vcl_factor2(std_factor2); ublas::vector<ScalarType> ublas_vec1; ublas::vector<ScalarType> ublas_vec2; ublas::vector<ScalarType> ublas_result; unsigned int solver_iters = 100; unsigned int solver_krylov_dim = 20; double solver_tolerance = 1e-6; if (!readVectorFromFile<ScalarType>("../examples/testdata/rhs65025.txt", ublas_vec1)) { std::cout << "Error reading RHS file" << std::endl; return 0; } std::cout << "done reading rhs" << std::endl; ublas_vec2 = ublas_vec1; if (!readVectorFromFile<ScalarType>("../examples/testdata/result65025.txt", ublas_result)) { std::cout << "Error reading result file" << std::endl; return 0; } std::cout << "done reading result" << std::endl; viennacl::compressed_matrix<ScalarType> vcl_compressed_matrix(ublas_vec1.size(), ublas_vec1.size()); viennacl::coordinate_matrix<ScalarType> vcl_coordinate_matrix(ublas_vec1.size(), ublas_vec1.size()); viennacl::ell_matrix<ScalarType> vcl_ell_matrix; viennacl::hyb_matrix<ScalarType> vcl_hyb_matrix; viennacl::vector<ScalarType> vcl_vec1(ublas_vec1.size()); viennacl::vector<ScalarType> vcl_vec2(ublas_vec1.size()); viennacl::vector<ScalarType> vcl_result(ublas_vec1.size()); ublas::compressed_matrix<ScalarType> ublas_matrix; if (!viennacl::io::read_matrix_market_file(ublas_matrix, "../examples/testdata/mat65k.mtx")) { std::cout << "Error reading Matrix file" << std::endl; return EXIT_FAILURE; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading matrix" << std::endl; //cpu to gpu: viennacl::copy(ublas_matrix, vcl_compressed_matrix); viennacl::copy(ublas_matrix, vcl_coordinate_matrix); viennacl::copy(ublas_matrix, vcl_ell_matrix); viennacl::copy(ublas_matrix, vcl_hyb_matrix); viennacl::copy(ublas_vec1, vcl_vec1); viennacl::copy(ublas_vec2, vcl_vec2); viennacl::copy(ublas_result, vcl_result); std::cout << "------- Jacobi preconditioner ----------" << std::endl; viennacl::linalg::jacobi_precond< ublas::compressed_matrix<ScalarType> > ublas_jacobi(ublas_matrix, viennacl::linalg::jacobi_tag()); viennacl::linalg::jacobi_precond< viennacl::compressed_matrix<ScalarType> > vcl_jacobi_csr(vcl_compressed_matrix, viennacl::linalg::jacobi_tag()); viennacl::linalg::jacobi_precond< viennacl::coordinate_matrix<ScalarType> > vcl_jacobi_coo(vcl_coordinate_matrix, viennacl::linalg::jacobi_tag()); std::cout << "------- Row-Scaling preconditioner ----------" << std::endl; viennacl::linalg::row_scaling< ublas::compressed_matrix<ScalarType> > ublas_row_scaling(ublas_matrix, viennacl::linalg::row_scaling_tag(1)); viennacl::linalg::row_scaling< viennacl::compressed_matrix<ScalarType> > vcl_row_scaling_csr(vcl_compressed_matrix, viennacl::linalg::row_scaling_tag(1)); viennacl::linalg::row_scaling< viennacl::coordinate_matrix<ScalarType> > vcl_row_scaling_coo(vcl_coordinate_matrix, viennacl::linalg::row_scaling_tag(1)); /////////////////////////////////////////////////////////////////////////////// ////////////////////// Incomplete Cholesky preconditioner ////////////////// /////////////////////////////////////////////////////////////////////////////// std::cout << "------- ICHOL0 on CPU (ublas) ----------" << std::endl; timer.start(); viennacl::linalg::ichol0_precond< ublas::compressed_matrix<ScalarType> > ublas_ichol0(ublas_matrix, viennacl::linalg::ichol0_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) ublas_ichol0.apply(ublas_vec1); exec_time = timer.get(); std::cout << "ublas time: " << exec_time << std::endl; std::cout << "------- ICHOL0 with ViennaCL ----------" << std::endl; timer.start(); viennacl::linalg::ichol0_precond< viennacl::compressed_matrix<ScalarType> > vcl_ichol0(vcl_compressed_matrix, viennacl::linalg::ichol0_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_ichol0.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL time: " << exec_time << std::endl; /////////////////////////////////////////////////////////////////////////////// ////////////////////// ILU preconditioner ////////////////// /////////////////////////////////////////////////////////////////////////////// std::cout << "------- ILU0 on with ublas ----------" << std::endl; timer.start(); viennacl::linalg::ilu0_precond< ublas::compressed_matrix<ScalarType> > ublas_ilu0(ublas_matrix, viennacl::linalg::ilu0_tag()); exec_time = timer.get(); std::cout << "Setup time (no level scheduling): " << exec_time << std::endl; timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) ublas_ilu0.apply(ublas_vec1); exec_time = timer.get(); std::cout << "ublas ILU0 substitution time (no level scheduling): " << exec_time << std::endl; std::cout << "------- ILU0 with ViennaCL ----------" << std::endl; timer.start(); viennacl::linalg::ilu0_precond< viennacl::compressed_matrix<ScalarType> > vcl_ilu0(vcl_compressed_matrix, viennacl::linalg::ilu0_tag()); exec_time = timer.get(); std::cout << "Setup time (no level scheduling): " << exec_time << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_ilu0.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL ILU0 substitution time (no level scheduling): " << exec_time << std::endl; timer.start(); viennacl::linalg::ilu0_tag ilu0_with_level_scheduling; ilu0_with_level_scheduling.use_level_scheduling(true); viennacl::linalg::ilu0_precond< viennacl::compressed_matrix<ScalarType> > vcl_ilu0_level_scheduling(vcl_compressed_matrix, ilu0_with_level_scheduling); exec_time = timer.get(); std::cout << "Setup time (with level scheduling): " << exec_time << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_ilu0_level_scheduling.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL ILU0 substitution time (with level scheduling): " << exec_time << std::endl; //////////////////////////////////////////// std::cout << "------- Block-ILU0 with ublas ----------" << std::endl; ublas_vec1 = ublas_vec2; viennacl::copy(ublas_vec1, vcl_vec1); timer.start(); viennacl::linalg::block_ilu_precond< ublas::compressed_matrix<ScalarType>, viennacl::linalg::ilu0_tag> ublas_block_ilu0(ublas_matrix, viennacl::linalg::ilu0_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) ublas_block_ilu0.apply(ublas_vec1); exec_time = timer.get(); std::cout << "ublas time: " << exec_time << std::endl; std::cout << "------- Block-ILU0 with ViennaCL ----------" << std::endl; timer.start(); viennacl::linalg::block_ilu_precond< viennacl::compressed_matrix<ScalarType>, viennacl::linalg::ilu0_tag> vcl_block_ilu0(vcl_compressed_matrix, viennacl::linalg::ilu0_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; //vcl_block_ilu0.apply(vcl_vec1); //warm-up viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_block_ilu0.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL time: " << exec_time << std::endl; //////////////////////////////////////////// std::cout << "------- ILUT with ublas ----------" << std::endl; ublas_vec1 = ublas_vec2; viennacl::copy(ublas_vec1, vcl_vec1); timer.start(); viennacl::linalg::ilut_precond< ublas::compressed_matrix<ScalarType> > ublas_ilut(ublas_matrix, viennacl::linalg::ilut_tag()); exec_time = timer.get(); std::cout << "Setup time (no level scheduling): " << exec_time << std::endl; timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) ublas_ilut.apply(ublas_vec1); exec_time = timer.get(); std::cout << "ublas ILUT substitution time (no level scheduling): " << exec_time << std::endl; std::cout << "------- ILUT with ViennaCL ----------" << std::endl; timer.start(); viennacl::linalg::ilut_precond< viennacl::compressed_matrix<ScalarType> > vcl_ilut(vcl_compressed_matrix, viennacl::linalg::ilut_tag()); exec_time = timer.get(); std::cout << "Setup time (no level scheduling): " << exec_time << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_ilut.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL ILUT substitution time (no level scheduling): " << exec_time << std::endl; timer.start(); viennacl::linalg::ilut_tag ilut_with_level_scheduling; ilut_with_level_scheduling.use_level_scheduling(true); viennacl::linalg::ilut_precond< viennacl::compressed_matrix<ScalarType> > vcl_ilut_level_scheduling(vcl_compressed_matrix, ilut_with_level_scheduling); exec_time = timer.get(); std::cout << "Setup time (with level scheduling): " << exec_time << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_ilut_level_scheduling.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL ILUT substitution time (with level scheduling): " << exec_time << std::endl; //////////////////////////////////////////// std::cout << "------- Block-ILUT with ublas ----------" << std::endl; ublas_vec1 = ublas_vec2; viennacl::copy(ublas_vec1, vcl_vec1); timer.start(); viennacl::linalg::block_ilu_precond< ublas::compressed_matrix<ScalarType>, viennacl::linalg::ilut_tag> ublas_block_ilut(ublas_matrix, viennacl::linalg::ilut_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; //ublas_block_ilut.apply(ublas_vec1); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) ublas_block_ilut.apply(ublas_vec1); exec_time = timer.get(); std::cout << "ublas time: " << exec_time << std::endl; std::cout << "------- Block-ILUT with ViennaCL ----------" << std::endl; timer.start(); viennacl::linalg::block_ilu_precond< viennacl::compressed_matrix<ScalarType>, viennacl::linalg::ilut_tag> vcl_block_ilut(vcl_compressed_matrix, viennacl::linalg::ilut_tag()); exec_time = timer.get(); std::cout << "Setup time: " << exec_time << std::endl; //vcl_block_ilut.apply(vcl_vec1); //warm-up viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) vcl_block_ilut.apply(vcl_vec1); viennacl::backend::finish(); exec_time = timer.get(); std::cout << "ViennaCL time: " << exec_time << std::endl; /////////////////////////////////////////////////////////////////////////////// ////////////////////// CG solver ////////////////// /////////////////////////////////////////////////////////////////////////////// long cg_ops = static_cast<long>(solver_iters * (ublas_matrix.nnz() + 6 * ublas_vec2.size())); viennacl::linalg::cg_tag cg_solver(solver_tolerance, solver_iters); std::cout << "------- CG solver (no preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, viennacl::linalg::no_precond(), cg_ops); std::cout << "------- CG solver (no preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, viennacl::linalg::no_precond(), cg_ops); #ifdef VIENNACL_WITH_OPENCL if (sizeof(ScalarType) == sizeof(double)) { std::cout << "------- CG solver, mixed precision (no preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; viennacl::linalg::mixed_precision_cg_tag mixed_precision_cg_solver(solver_tolerance, solver_iters); run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, mixed_precision_cg_solver, viennacl::linalg::no_precond(), cg_ops); run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, mixed_precision_cg_solver, viennacl::linalg::no_precond(), cg_ops); } #endif std::cout << "------- CG solver (no preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, cg_solver, viennacl::linalg::no_precond(), cg_ops); std::cout << "------- CG solver (no preconditioner) via ViennaCL, ell_matrix ----------" << std::endl; run_solver(vcl_ell_matrix, vcl_vec2, vcl_result, cg_solver, viennacl::linalg::no_precond(), cg_ops); std::cout << "------- CG solver (no preconditioner) via ViennaCL, hyb_matrix ----------" << std::endl; run_solver(vcl_hyb_matrix, vcl_vec2, vcl_result, cg_solver, viennacl::linalg::no_precond(), cg_ops); std::cout << "------- CG solver (ICHOL0 preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_ichol0, cg_ops); std::cout << "------- CG solver (ICHOL0 preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_ichol0, cg_ops); std::cout << "------- CG solver (ILU0 preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_ilu0, cg_ops); std::cout << "------- CG solver (ILU0 preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_ilu0, cg_ops); std::cout << "------- CG solver (Block-ILU0 preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_block_ilu0, cg_ops); std::cout << "------- CG solver (Block-ILU0 preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_block_ilu0, cg_ops); std::cout << "------- CG solver (ILUT preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_ilut, cg_ops); std::cout << "------- CG solver (ILUT preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_ilut, cg_ops); std::cout << "------- CG solver (ILUT preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, cg_solver, vcl_ilut, cg_ops); std::cout << "------- CG solver (Block-ILUT preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_block_ilut, cg_ops); std::cout << "------- CG solver (Block-ILUT preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_block_ilut, cg_ops); std::cout << "------- CG solver (Jacobi preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_jacobi, cg_ops); std::cout << "------- CG solver (Jacobi preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_jacobi_csr, cg_ops); std::cout << "------- CG solver (Jacobi preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, cg_solver, vcl_jacobi_coo, cg_ops); std::cout << "------- CG solver (row scaling preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, cg_solver, ublas_row_scaling, cg_ops); std::cout << "------- CG solver (row scaling preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, cg_solver, vcl_row_scaling_csr, cg_ops); std::cout << "------- CG solver (row scaling preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, cg_solver, vcl_row_scaling_coo, cg_ops); /////////////////////////////////////////////////////////////////////////////// ////////////////////// BiCGStab solver ////////////////// /////////////////////////////////////////////////////////////////////////////// long bicgstab_ops = static_cast<long>(solver_iters * (2 * ublas_matrix.nnz() + 13 * ublas_vec2.size())); viennacl::linalg::bicgstab_tag bicgstab_solver(solver_tolerance, solver_iters); std::cout << "------- BiCGStab solver (no preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, bicgstab_solver, viennacl::linalg::no_precond(), bicgstab_ops); std::cout << "------- BiCGStab solver (no preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, viennacl::linalg::no_precond(), bicgstab_ops); std::cout << "------- BiCGStab solver (no preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, viennacl::linalg::no_precond(), bicgstab_ops); std::cout << "------- BiCGStab solver (ILUT preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, bicgstab_solver, ublas_ilut, bicgstab_ops); std::cout << "------- BiCGStab solver (ILUT preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_ilut, bicgstab_ops); std::cout << "------- BiCGStab solver (Block-ILUT preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, bicgstab_solver, ublas_block_ilut, bicgstab_ops); #ifdef VIENNACL_WITH_OPENCL std::cout << "------- BiCGStab solver (Block-ILUT preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_block_ilut, bicgstab_ops); #endif // std::cout << "------- BiCGStab solver (ILUT preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; // run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_ilut, bicgstab_ops); std::cout << "------- BiCGStab solver (Jacobi preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, bicgstab_solver, ublas_jacobi, bicgstab_ops); std::cout << "------- BiCGStab solver (Jacobi preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_jacobi_csr, bicgstab_ops); std::cout << "------- BiCGStab solver (Jacobi preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_jacobi_coo, bicgstab_ops); std::cout << "------- BiCGStab solver (row scaling preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, bicgstab_solver, ublas_row_scaling, bicgstab_ops); std::cout << "------- BiCGStab solver (row scaling preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_row_scaling_csr, bicgstab_ops); std::cout << "------- BiCGStab solver (row scaling preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, bicgstab_solver, vcl_row_scaling_coo, bicgstab_ops); /////////////////////////////////////////////////////////////////////////////// /////////////////////// GMRES solver /////////////////// /////////////////////////////////////////////////////////////////////////////// long gmres_ops = static_cast<long>(solver_iters * (ublas_matrix.nnz() + (solver_iters * 2 + 7) * ublas_vec2.size())); viennacl::linalg::gmres_tag gmres_solver(solver_tolerance, solver_iters, solver_krylov_dim); std::cout << "------- GMRES solver (no preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, gmres_solver, viennacl::linalg::no_precond(), gmres_ops); std::cout << "------- GMRES solver (no preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, gmres_solver, viennacl::linalg::no_precond(), gmres_ops); std::cout << "------- GMRES solver (no preconditioner) on GPU, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, gmres_solver, viennacl::linalg::no_precond(), bicgstab_ops); std::cout << "------- GMRES solver (ILUT preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, gmres_solver, ublas_ilut, gmres_ops); std::cout << "------- GMRES solver (ILUT preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_ilut, gmres_ops); std::cout << "------- GMRES solver (ILUT preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_ilut, gmres_ops); std::cout << "------- GMRES solver (Jacobi preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, gmres_solver, ublas_jacobi, gmres_ops); std::cout << "------- GMRES solver (Jacobi preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_jacobi_csr, gmres_ops); std::cout << "------- GMRES solver (Jacobi preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_jacobi_coo, gmres_ops); std::cout << "------- GMRES solver (row scaling preconditioner) using ublas ----------" << std::endl; run_solver(ublas_matrix, ublas_vec2, ublas_result, gmres_solver, ublas_row_scaling, gmres_ops); std::cout << "------- GMRES solver (row scaling preconditioner) via ViennaCL, compressed_matrix ----------" << std::endl; run_solver(vcl_compressed_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_row_scaling_csr, gmres_ops); std::cout << "------- GMRES solver (row scaling preconditioner) via ViennaCL, coordinate_matrix ----------" << std::endl; run_solver(vcl_coordinate_matrix, vcl_vec2, vcl_result, gmres_solver, vcl_row_scaling_coo, gmres_ops); return EXIT_SUCCESS; } int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << " Device Info" << std::endl; std::cout << "----------------------------------------------" << std::endl; #ifdef VIENNACL_WITH_OPENCL std::cout << viennacl::ocl::current_device().info() << std::endl; #endif std::cout << "---------------------------------------------------------------------------" << std::endl; std::cout << "---------------------------------------------------------------------------" << std::endl; std::cout << " Benchmark for Execution Times of Iterative Solvers provided with ViennaCL " << std::endl; std::cout << "---------------------------------------------------------------------------" << std::endl; std::cout << " Note that the purpose of this benchmark is not to run solvers until" << std::endl; std::cout << " convergence. Instead, only the execution times of a few iterations are" << std::endl; std::cout << " recorded. Residual errors are only printed for information." << std::endl << std::endl; std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Benchmark :: Solver" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; std::cout << " -------------------------------" << std::endl; std::cout << " # benchmarking single-precision" << std::endl; std::cout << " -------------------------------" << std::endl; run_benchmark<float>(); #ifdef VIENNACL_WITH_OPENCL if( viennacl::ocl::current_device().double_support() ) #endif { std::cout << std::endl; std::cout << " -------------------------------" << std::endl; std::cout << " # benchmarking double-precision" << std::endl; std::cout << " -------------------------------" << std::endl; run_benchmark<double>(); } return 0; }
the_stack
namespace oneflow { namespace { template<typename T, typename G> __global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay, learning_rate_val); } } template<typename T, typename K, typename IDX> __global__ void IndexedSlicesSGDUpdateGpu(float weight_decay, const IDX feature_size, const int64_t lower_bound, const int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model) { const int64_t n = *num_unique_instance * feature_size; const T lr = *learning_rate; CUDA_1D_KERNEL_LOOP_T(IDX, i, n) { const IDX indices_idx = i / feature_size; const IDX inner_idx = i - indices_idx * feature_size; const IDX instance_id = indices[indices_idx]; if (instance_id >= lower_bound && instance_id < upper_bound) { const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx; SGDUpdateFunctor<T, T>()(values + i, model + model_idx, static_cast<T>(1), 0.0, 0.0, weight_decay, lr); } } } template<typename T> __global__ void SumSquares2(int64_t n, const T* src0, T* dst0, const T* src1, T* dst1) { T t_sum0 = 0; T t_sum1 = 0; CUDA_1D_KERNEL_LOOP(i, n) { t_sum0 += src0[i] * src0[i]; t_sum1 += src1[i] * src1[i]; } typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage0; __shared__ typename BlockReduce::TempStorage temp_storage1; T b_sum0 = BlockReduce(temp_storage0).Sum(t_sum0); T b_sum1 = BlockReduce(temp_storage1).Sum(t_sum1); if (threadIdx.x == 0) { cuda::atomic::Add(dst0, b_sum0); cuda::atomic::Add(dst1, b_sum1); } } } // namespace template<typename T, typename G> struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model); }; template<typename T, typename G> void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model) { SGDUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model); } template<typename T> struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model); }; template<typename T> void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model) { SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model); } template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>; template<typename T, typename K, typename IDX> struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> { static void Update(DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model); }; template<typename T, typename K, typename IDX> void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update( DeviceCtx* ctx, float weight_decay, int64_t num_indices, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model) { IndexedSlicesSGDUpdateGpu<T, K, IDX> <<<BlocksNum4ThreadsNum(num_indices * feature_size), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(weight_decay, feature_size, lower_bound, upper_bound, num_unique_instance, learning_rate, indices, values, model); } #define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \ idx_type_pair) \ template struct IndexedSlicesSGDUpdateKernelUtil< \ DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \ OF_PP_PAIR_FIRST(idx_type_pair)>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ); #undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU namespace { template<typename T, typename G> __global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta, weight_decay, learning_rate_val); } } template<typename T, typename K, typename IDX> __global__ void IndexedSlicesMomentumUpdateGpu(T beta, float weight_decay, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model, T* momentum) { const int64_t n = *num_unique_instance * feature_size; const T lr = *learning_rate; CUDA_1D_KERNEL_LOOP(i, n) { const IDX indices_idx = i / feature_size; const IDX inner_idx = i - indices_idx * feature_size; const IDX instance_id = indices[indices_idx]; if (instance_id >= lower_bound && instance_id < upper_bound) { const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx; MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx, static_cast<T>(1), 0.0, 0.0, beta, weight_decay, lr); } } } } // namespace template<typename T, typename G> struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum); }; template<typename T, typename G> void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum) { MomentumUpdateGpu<T, G> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model, momentum); } template<typename T> struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum); }; template<typename T> void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum) { MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, beta, weight_decay, learning_rate_val, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum); } template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>; template<typename T, typename K, typename IDX> struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> { static void Update(DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model, T* momentum); }; template<typename T, typename K, typename IDX> void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update( DeviceCtx* ctx, T beta, float weight_decay, int64_t num_instance, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const K* indices, const T* values, T* model, T* momentum) { IndexedSlicesMomentumUpdateGpu<T, K, IDX><<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( beta, weight_decay, feature_size, lower_bound, upper_bound, num_unique_instance, learning_rate, indices, values, model, momentum); } #define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \ val_type_pair, key_type_pair, idx_type_pair) \ template struct IndexedSlicesMomentumMdUpdateKernelUtil< \ DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \ OF_PP_PAIR_FIRST(idx_type_pair)>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ); #undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU namespace { __global__ void BiasCorrectionFactorKernelGpu(float beta, const int64_t* train_step, float* out) { const auto exponent = static_cast<double>(*train_step + 1); const float bias_correction_factor = 1.0 - static_cast<float>(pow(beta, exponent)); *out = bias_correction_factor; } template<typename T, typename G> __global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v, T* max_v) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; } if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, max_v + i, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1_val, bias_correction2_val, learning_rate_val); } } template<typename T> __global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t, T* beta2_t) { if (skip_if != nullptr && *skip_if != 0) { return; } *beta1_t *= beta1; *beta2_t *= beta2; } template<typename T, typename K, typename IDX> __global__ void IndexedSlicesAdamUpdateGpu( float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float lr, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v, T* max_v) { if (learning_rate != nullptr) { lr = *learning_rate; } float bias_correction1 = 1.0; float bias_correction2 = 1.0; if (bias_correction1_ptr != nullptr) { bias_correction1 = *bias_correction1_ptr; } if (bias_correction2_ptr != nullptr) { bias_correction2 = *bias_correction2_ptr; } const int64_t n = *num_unique_instance * feature_size; CUDA_1D_KERNEL_LOOP(i, n) { const IDX indices_idx = i / feature_size; const IDX inner_idx = i - indices_idx * feature_size; const IDX instance_id = indices[indices_idx]; if (instance_id >= lower_bound && instance_id < upper_bound) { const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx; AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx, max_v + i, static_cast<T>(1), 0, 0, beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1, bias_correction2, lr); } } } template<typename T, typename G> __global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, const T* beta1_t, const T* beta2_t, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v) { if (skip_if != nullptr && *skip_if != 0) { return; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i, v + i, scale, l1, l2, beta1, beta2, epsilon); } } template<typename T> __global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate, const int64_t* skip_if, const T* w_norm_2, const T* g_norm_2, const T* beta1_t, const T* beta2_t, const T* adam_diff, T* model) { if (skip_if != nullptr && *skip_if != 0) { return; } const float lr = LambLRFunctor<T>()(*learning_rate, w_norm_2, g_norm_2); CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); } } } // namespace template<typename T, typename G> struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v, T* max_v); }; template<typename T, typename G> void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff, T* model, T* m, T* v, T* max_v) { AdamUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model, m, v, max_v); } template<typename T> struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m, T* v, T* max_v); }; template<typename T> void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float learning_rate_val, float bias_correction1_val, float bias_correction2_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const float16* model_diff, T* model, T* m, T* v, T* max_v) { AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, learning_rate_val, bias_correction1_val, bias_correction2_val, learning_rate, scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, reinterpret_cast<const half*>(model_diff), model, m, v, max_v); } template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>; template<typename T, typename G> __global__ void AdagradUpdateGpu(int64_t n, T scale, float l1, float l2, float lr_decay, float epsilon, float weight_decay, float learning_rate_val, int64_t train_step, const float* learning_rate, const int64_t* train_step_ptr, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* sum) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (train_step_ptr != nullptr) { train_step = *train_step_ptr + 1; } // train_step_ptr start from zero. if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } learning_rate_val = learning_rate_val / (1 + (train_step - 1) * lr_decay); CUDA_1D_KERNEL_LOOP(i, n) { AdagradUpdateFunctor<T, G>()(model_diff + i, model + i, sum + i, scale, l1, l2, epsilon, weight_decay, learning_rate_val); } } template<typename T, typename G> struct AdagradUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float lr_decay, float epsilon, float weight_decay, float learning_rate_val, int64_t train_step, const float* learning_rate, const int64_t* train_step_ptr, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* sum); }; template<typename T, typename G> void AdagradUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float lr_decay, float epsilon, float weight_decay, float learning_rate_val, int64_t train_step, const float* learning_rate, const int64_t* train_step_ptr, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* sum) { AdagradUpdateGpu<T, G> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, lr_decay, epsilon, weight_decay, learning_rate_val, train_step, learning_rate, train_step_ptr, scale_by_ptr, skip_if, model_diff, model, sum); } template struct AdagradUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct AdagradUpdateKernelUtil<DeviceType::kGPU, double, double>; template<typename T, typename G> struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t); }; template<typename T, typename G> void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t) { AdamUpdateBetaTGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, skip_if, beta1_t, beta2_t); LambGradGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, skip_if, model_diff, adam_diff, model, m, v); T* w_norm_2 = norm_buffer; T* g_norm_2 = norm_buffer + 1; Memset<DeviceType::kGPU>(ctx, norm_buffer, 0, 2 * sizeof(T)); SumSquares2<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, model, w_norm_2, adam_diff, g_norm_2); LambUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, weight_decay, learning_rate, skip_if, w_norm_2, g_norm_2, beta1_t, beta2_t, adam_diff, model); } template<typename T> struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t); }; template<typename T> void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2, float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t, T* beta2_t) { LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer, beta1_t, beta2_t); } template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>; template<typename T, typename K, typename IDX> struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> { static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float lr, int64_t num_instance, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v, T* max_v); }; template<typename T, typename K, typename IDX> void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update( DeviceCtx* ctx, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction, float lr, int64_t num_instance, int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate, const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v, T* max_v) { IndexedSlicesAdamUpdateGpu<T, K, IDX><<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, lr, feature_size, lower_bound, upper_bound, num_unique_instance, learning_rate, bias_correction1_ptr, bias_correction2_ptr, indices, values, model, m, v, max_v); } #define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \ idx_type_pair) \ template struct IndexedSlicesAdamMdUpdateKernelUtil< \ DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \ OF_PP_PAIR_FIRST(idx_type_pair)>; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ); #undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU template<> struct BiasCorrectionFactorKernelUtil<DeviceType::kGPU> { static void BiasCorrectionFactorCompute(DeviceCtx* ctx, float beta, const int64_t* train_step, float* out); }; void BiasCorrectionFactorKernelUtil<DeviceType::kGPU>::BiasCorrectionFactorCompute( DeviceCtx* ctx, float beta, const int64_t* train_step, float* out) { BiasCorrectionFactorKernelGpu<<<1, 1, 0, ctx->cuda_stream()>>>(beta, train_step, out); } namespace { template<typename T, typename G, bool centered> __global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square, T* mean_gradient, float epsilon, float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model) { if (skip_if != nullptr && *skip_if != 0) { return; } if (learning_rate != nullptr) { learning_rate_val = *learning_rate; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2, mean_square + i, (centered ? mean_gradient + i : nullptr), epsilon, weight_decay, decay_rate, learning_rate_val); } } } // namespace template<typename T, typename G> struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon, float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* mean_square, T* mean_gradient); }; template<typename T, typename G> void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon, float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* mean_square, T* mean_gradient) { if (centered) { RmsPropUpdateGpu<T, G, true> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate, learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model); } else { RmsPropUpdateGpu<T, G, false> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate, learning_rate_val, learning_rate, scale_by_ptr, skip_if, model_diff, model); } } template<typename T> struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon, float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square, T* mean_gradient); }; template<typename T> void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon, float weight_decay, float decay_rate, float learning_rate_val, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square, T* mean_gradient) { RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate_val, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, mean_square, mean_gradient); } template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>; namespace { template<typename T, typename G> __global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* model_diff_tmp) { if (skip_if != nullptr && *skip_if != 0) { return; } if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; } CUDA_1D_KERNEL_LOOP(i, n) { model_diff_tmp[i] = CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2); } } template<typename T> __global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon, T lars_coefficient, const int64_t* skip_if, T* data_tmp) { if (skip_if != nullptr && *skip_if != 0) { return; } T* model_norm = &data_tmp[0]; T* model_diff_norm = &data_tmp[1]; T* local_learning_rate = &data_tmp[2]; *model_norm = std::sqrt(*model_norm); *model_diff_norm = std::sqrt(*model_diff_norm); T lars = static_cast<T>(1); if (*model_norm > 0 && *model_diff_norm > 0) { lars = lars_coefficient * (*model_norm) / (epsilon + (*model_diff_norm) + weight_decay * (*model_norm)); } *local_learning_rate = *learning_rate * lars; } template<typename T> __global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay, const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp, T* model) { if (skip_if != nullptr && *skip_if != 0) { return; } CUDA_1D_KERNEL_LOOP(i, n) { LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay, *local_learning_rate); } } } // namespace template<typename T, typename G> struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon, float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp); }; template<typename T, typename G> void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon, float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp) { LarsScaleModelDiffGpu<T, G> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp); T* model_norm = data_tmp; T* model_diff_norm = data_tmp + 1; T* local_learning_rate = data_tmp + 2; Memset<DeviceType::kGPU>(ctx, data_tmp, 0, 2 * sizeof(T)); SumSquares2<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, model, model_norm, model_diff_tmp, model_diff_norm); LarsGetLocalLearningRateGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>( learning_rate, weight_decay, epsilon, lars_coefficient, skip_if, data_tmp); LarsUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp, model); } template<typename T> struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> { static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon, float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp); }; template<typename T> void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update( DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon, float lars_coefficient, float weight_decay, const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp) { LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update( ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model, momentum, data_tmp, model_diff_tmp); } template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>; template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>; template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>; } // namespace oneflow
the_stack
#include <cuda.h> #include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" #include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" #include "DataFormats/EcalDigi/interface/EcalDataFrame.h" #include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" #include "DataFormats/Math/interface/approx_exp.h" #include "DataFormats/Math/interface/approx_log.h" #include "FWCore/Utilities/interface/CMSUnrollLoop.h" #include "AmplitudeComputationCommonKernels.h" #include "AmplitudeComputationKernels.h" #include "KernelHelpers.h" namespace ecal { namespace multifit { template <typename MatrixType> __device__ __forceinline__ bool update_covariance(EcalPulseCovariance const& pulse_covariance, MatrixType& inverse_cov, SampleVector const& amplitudes) { constexpr int nsamples = SampleVector::RowsAtCompileTime; constexpr int npulses = BXVectorType::RowsAtCompileTime; CMS_UNROLL_LOOP for (unsigned int ipulse = 0; ipulse < npulses; ipulse++) { auto const amplitude = amplitudes.coeff(ipulse); if (amplitude == 0) continue; // FIXME: ipulse - 5 -> ipulse - firstOffset int bx = ipulse - 5; int first_sample_t = std::max(0, bx + 3); int offset = -3 - bx; auto const value_sq = amplitude * amplitude; for (int col = first_sample_t; col < nsamples; col++) { for (int row = col; row < nsamples; row++) { inverse_cov(row, col) += value_sq * __ldg(&pulse_covariance.covval[row + offset][col + offset]); } } } return true; } /// /// launch ctx parameters are (nchannels / block, blocks) /// TODO: trivial impl for now, there must be a way to improve /// /// Conventions: /// - amplitudes -> solution vector, what we are fitting for /// - samples -> raw detector responses /// - passive constraint - satisfied constraint /// - active constraint - unsatisfied (yet) constraint /// __global__ void kernel_minimize(uint32_t const* dids_eb, uint32_t const* dids_ee, SampleMatrix const* __restrict__ noisecov, EcalPulseCovariance const* __restrict__ pulse_covariance, BXVectorType* bxs, SampleVector const* __restrict__ samples, SampleVector* amplitudesEB, SampleVector* amplitudesEE, PulseMatrixType const* __restrict__ pulse_matrix, ::ecal::reco::StorageScalarType* chi2sEB, ::ecal::reco::StorageScalarType* chi2sEE, ::ecal::reco::StorageScalarType* energiesEB, ::ecal::reco::StorageScalarType* energiesEE, char* acState, int nchannels, int max_iterations, uint32_t const offsetForHashes, uint32_t const offsetForInputs) { // FIXME: ecal has 10 samples and 10 pulses.... // but this needs to be properly treated and renamed everywhere constexpr auto NSAMPLES = SampleMatrix::RowsAtCompileTime; constexpr auto NPULSES = SampleMatrix::ColsAtCompileTime; static_assert(NSAMPLES == NPULSES); using DataType = SampleVector::Scalar; extern __shared__ char shrmem[]; DataType* shrMatrixLForFnnlsStorage = reinterpret_cast<DataType*>(shrmem) + calo::multifit::MapSymM<DataType, NPULSES>::total * threadIdx.x; DataType* shrAtAStorage = reinterpret_cast<DataType*>(shrmem) + calo::multifit::MapSymM<DataType, NPULSES>::total * (threadIdx.x + blockDim.x); // channel int idx = threadIdx.x + blockDim.x * blockIdx.x; // ref the right ptr #define ARRANGE(var) auto* var = idx >= offsetForInputs ? var##EE : var##EB ARRANGE(amplitudes); ARRANGE(chi2s); ARRANGE(energies); #undef ARRANGE if (idx < nchannels) { if (static_cast<MinimizationState>(acState[idx]) == MinimizationState::Precomputed) return; // get the hash int const inputCh = idx >= offsetForInputs ? idx - offsetForInputs : idx; auto const* dids = idx >= offsetForInputs ? dids_ee : dids_eb; auto const did = DetId{dids[inputCh]}; auto const isBarrel = did.subdetId() == EcalBarrel; auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId()) : offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId()); // inits int iter = 0; int npassive = 0; calo::multifit::ColumnVector<NPULSES, int> pulseOffsets; CMS_UNROLL_LOOP for (int i = 0; i < NPULSES; ++i) pulseOffsets(i) = i; calo::multifit::ColumnVector<NPULSES, DataType> resultAmplitudes; CMS_UNROLL_LOOP for (int counter = 0; counter < NPULSES; counter++) resultAmplitudes(counter) = 0; // inits //SampleDecompLLT covariance_decomposition; //SampleMatrix inverse_cov; // SampleVector::Scalar chi2 = 0, chi2_now = 0; float chi2 = 0, chi2_now = 0; // loop until ocnverge while (true) { if (iter >= max_iterations) break; //inverse_cov = noisecov[idx]; //DataType covMatrixStorage[MapSymM<DataType, NSAMPLES>::total]; DataType* covMatrixStorage = shrMatrixLForFnnlsStorage; calo::multifit::MapSymM<DataType, NSAMPLES> covMatrix{covMatrixStorage}; int counter = 0; CMS_UNROLL_LOOP for (int col = 0; col < NSAMPLES; col++) { CMS_UNROLL_LOOP for (int row = col; row < NSAMPLES; row++) covMatrixStorage[counter++] = __ldg(&noisecov[idx].coeffRef(row, col)); } update_covariance(pulse_covariance[hashedId], covMatrix, resultAmplitudes); // compute actual covariance decomposition //covariance_decomposition.compute(inverse_cov); //auto const& matrixL = covariance_decomposition.matrixL(); DataType matrixLStorage[calo::multifit::MapSymM<DataType, NSAMPLES>::total]; calo::multifit::MapSymM<DataType, NSAMPLES> matrixL{matrixLStorage}; calo::multifit::compute_decomposition_unrolled(matrixL, covMatrix); // L * A = P calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES> A; calo::multifit::solve_forward_subst_matrix(A, pulse_matrix[idx], matrixL); // L b = s float reg_b[NSAMPLES]; calo::multifit::solve_forward_subst_vector(reg_b, samples[idx], matrixL); // FIXME: shared mem //DataType AtAStorage[MapSymM<DataType, NPULSES>::total]; calo::multifit::MapSymM<DataType, NPULSES> AtA{shrAtAStorage}; //SampleMatrix AtA; SampleVector Atb; CMS_UNROLL_LOOP for (int icol = 0; icol < NPULSES; icol++) { float reg_ai[NSAMPLES]; // load column icol CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_ai[counter] = A(counter, icol); // compute diagoanl float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_ai[counter] * reg_ai[counter]; // store AtA(icol, icol) = sum; // go thru the other columns CMS_UNROLL_LOOP for (int j = icol + 1; j < NPULSES; j++) { // load column j float reg_aj[NSAMPLES]; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) reg_aj[counter] = A(counter, j); // accum float sum = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum += reg_aj[counter] * reg_ai[counter]; // store //AtA(icol, j) = sum; AtA(j, icol) = sum; } // Atb accum float sum_atb = 0.f; CMS_UNROLL_LOOP for (int counter = 0; counter < NSAMPLES; counter++) sum_atb += reg_ai[counter] * reg_b[counter]; // store atb Atb(icol) = sum_atb; } // FIXME: shared mem //DataType matrixLForFnnlsStorage[MapSymM<DataType, NPULSES>::total]; calo::multifit::MapSymM<DataType, NPULSES> matrixLForFnnls{shrMatrixLForFnnlsStorage}; calo::multifit::fnnls(AtA, Atb, //amplitudes[idx], resultAmplitudes, npassive, pulseOffsets, matrixLForFnnls, 1e-11, 500, 16, 2); calo::multifit::calculateChiSq(matrixL, pulse_matrix[idx], resultAmplitudes, samples[idx], chi2_now); auto deltachi2 = chi2_now - chi2; chi2 = chi2_now; if (std::abs(deltachi2) < 1e-3) break; //---- AM: TEST //---- it was 3 lines above, now here as in the CPU version ++iter; } // store to global output values // FIXME: amplitudes are used in global directly chi2s[inputCh] = chi2; energies[inputCh] = resultAmplitudes(5); CMS_UNROLL_LOOP for (int counter = 0; counter < NPULSES; counter++) amplitudes[inputCh](counter) = resultAmplitudes(counter); } } namespace v1 { void minimization_procedure(EventInputDataGPU const& eventInputGPU, EventOutputDataGPU& eventOutputGPU, EventDataForScratchGPU& scratch, ConditionsProducts const& conditions, ConfigurationParameters const& configParameters, cudaStream_t cudaStream) { using DataType = SampleVector::Scalar; unsigned int totalChannels = eventInputGPU.ebDigis.size + eventInputGPU.eeDigis.size; // unsigned int threads_min = conf.threads.x; // TODO: configure from python unsigned int threads_min = configParameters.kernelMinimizeThreads[0]; unsigned int blocks_min = threads_min > totalChannels ? 1 : (totalChannels + threads_min - 1) / threads_min; uint32_t const offsetForHashes = conditions.offsetForHashes; uint32_t const offsetForInputs = eventInputGPU.ebDigis.size; auto const nbytesShared = 2 * threads_min * calo::multifit::MapSymM<DataType, SampleVector::RowsAtCompileTime>::total * sizeof(DataType); kernel_minimize<<<blocks_min, threads_min, nbytesShared, cudaStream>>>( eventInputGPU.ebDigis.ids.get(), eventInputGPU.eeDigis.ids.get(), (SampleMatrix*)scratch.noisecov.get(), conditions.pulseCovariances.values, (BXVectorType*)scratch.activeBXs.get(), (SampleVector*)scratch.samples.get(), (SampleVector*)eventOutputGPU.recHitsEB.amplitudesAll.get(), (SampleVector*)eventOutputGPU.recHitsEE.amplitudesAll.get(), (PulseMatrixType*)scratch.pulse_matrix.get(), eventOutputGPU.recHitsEB.chi2.get(), eventOutputGPU.recHitsEE.chi2.get(), eventOutputGPU.recHitsEB.amplitude.get(), eventOutputGPU.recHitsEE.amplitude.get(), scratch.acState.get(), totalChannels, 50, offsetForHashes, offsetForInputs); cudaCheck(cudaGetLastError()); } } // namespace v1 } // namespace multifit } // namespace ecal
the_stack
#define LBANN_LAYER_NORM_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/regularizers/layer_norm.hpp" #include "lbann/utils/gpu/helpers.hpp" #include <thrust/pair.h> namespace lbann { namespace { /** Functor for adding @c thrust::pair objects. */ template <typename Pair> struct pair_sum { __device__ __forceinline__ Pair operator()(const Pair& x, const Pair& y) { return Pair(x.first+y.first, x.second+y.second); } }; /** Accumulate sums and sums of squares for each data sample. * * On input, sums and sqsums are filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1 */ template <size_t bdimx, typename TensorDataType> __global__ void fp_sums_kernel( size_t local_num_samples, size_t local_sample_size, const TensorDataType* __restrict__ vals, size_t vals_ldim, TensorDataType* sums, size_t sums_stride, TensorDataType* sqsums, size_t sqsums_stride) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x + blockDim.x * threadIdx.y; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t i = gidy; i < local_num_samples; i += nthreadsy) { // Accumulate sums and perform block-wide reduction using pair_t = thrust::pair<TensorDataType,TensorDataType>; using pair_sum_t = pair_sum<pair_t>; pair_t sum_sqsum(0,0); for (size_t j = gidx; j < local_sample_size; j += nthreadsx) { const auto& x = vals[i*vals_ldim + j]; sum_sqsum.first += x; sum_sqsum.second += x * x; } sum_sqsum = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sum_sqsum); // Output result to global memory if (tid == 0) { gpu_lib::atomic_add(&sums[i*sums_stride], sum_sqsum.first); gpu_lib::atomic_add(&sqsums[i*sqsums_stride], sum_sqsum.second); } } } /** Compute per-sample statistics. * * mean = sum(x_i) / n * * var = ( sum(x_i^2)/n - mean^2 ) * n/(n-1) * * On input, means contains per-sample sums and vars contains * per-sample sums of squares. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (local_num_samples / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void fp_statistics_kernel( unsigned long long sample_size, size_t local_num_samples, TensorDataType* means, size_t means_stride, TensorDataType* vars, size_t vars_stride) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t i = gid; i < local_num_samples; i += nthreads) { const auto sum = means[i*means_stride]; const auto sqsum = vars[i*means_stride]; const TensorDataType sample_size_dt = TensorDataType(sample_size); const auto& mean = sum / sample_size_dt; const auto& sqmean = sqsum / sample_size_dt; const auto& var = (sqmean - mean*mean) * sample_size_dt / TensorDataType(sample_size-1); means[i*means_stride] = mean; vars[i*vars_stride] = gpu_lib::max(var, TensorDataType(0.0)); } } /** Compute outputs. * * y_i = (x_i - mean) / sqrt(var + epsilon) * * Block dimensions: bdimx x bdimy x 1 * * Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1 */ template <typename TensorDataType> __global__ void fp_output_kernel( size_t local_num_samples, size_t local_sample_size, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* means, size_t means_stride, const TensorDataType* vars, size_t vars_stride) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t i = gidy; i < local_num_samples; i += nthreadsy) { const auto& mean = means[i*means_stride]; const auto& var = vars[i*vars_stride]; const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); for (size_t j = gidx; j < local_sample_size; j += nthreadsx) { const auto& x = input[i*input_ldim + j]; auto& y = output[i*output_ldim + j]; y = (x - mean) * inv_stdev; } } } /** @brief Forward prop */ template <typename TensorDataType> void fp_impl(lbann_comm& comm, TensorDataType epsilon, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& output, El::AbstractDistMatrix<TensorDataType>& statistics) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix()); auto& local_output = dynamic_cast<GPUMatType&>(output.Matrix()); auto& local_statistics = dynamic_cast<GPUMatType&>(statistics.Matrix()); auto local_means = El::View(local_statistics, El::IR(0), El::ALL); auto local_vars = El::View(local_statistics, El::IR(1), El::ALL); // Dimensions const size_t sample_size = input.Height(); const size_t local_num_samples = local_input.Width(); const size_t local_sample_size = local_input.Height(); // Trivial cases if (local_num_samples < 1) { return; } // Compute sums El::Zero(statistics); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_sample_size + block_size - 1) / block_size; grid_dims.y = local_num_samples; hydrogen::gpu::LaunchKernel( fp_sums_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_num_samples, local_sample_size, local_input.LockedBuffer(), local_input.LDim(), local_means.Buffer(), local_means.LDim(), local_vars.Buffer(), local_vars.LDim()); } comm.allreduce(statistics, statistics.RedundantComm(), El::mpi::SUM); // Compute statistics from sums if (sample_size <= 1) { // local_means already has correct values El::Fill(local_vars, El::TypeTraits<TensorDataType>::One()); } else if (!local_statistics.IsEmpty()) { auto sync_info = gpu::get_sync_info(local_statistics); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_num_samples + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( fp_statistics_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, sample_size, local_num_samples, local_means.Buffer(), local_means.LDim(), local_vars.Buffer(), local_vars.LDim()); } // Apply layer norm if (!local_output.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_sample_size + block_size - 1) / block_size; grid_dims.y = local_num_samples; hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_num_samples, local_sample_size, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), local_means.LockedBuffer(), local_means.LDim(), local_vars.LockedBuffer(), local_vars.LDim()); } } /** Compute gradients w.r.t. per-sample statistics. * * dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon) * * dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2 * * On input, means_grad and vars_grad are filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (local_sample_size / bsize) x local_num_samples x 1 */ template <size_t bdimx, typename TensorDataType> __global__ void bp_statistics_grad_kernel( size_t local_num_samples, size_t local_sample_size, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ output_grad, size_t output_grad_ldim, const TensorDataType* means, size_t means_stride, const TensorDataType* vars, size_t vars_stride, TensorDataType* means_grad, size_t means_grad_stride, TensorDataType* vars_grad, size_t vars_grad_stride) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x + blockDim.x * threadIdx.y; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t i = gidy; i < local_num_samples; i += nthreadsy) { // Accumulate sums and perform block-wide reduction using pair_t = thrust::pair<TensorDataType,TensorDataType>; using pair_sum_t = pair_sum<pair_t>; pair_t sums(0,0); const auto& mean = means[i*means_stride]; for (size_t j = gidx; j < local_sample_size; j += nthreadsx) { const auto& x = input[i*input_ldim + j]; const auto& dy = output_grad[i*output_grad_ldim + j]; sums.first += dy; sums.second += dy * (x - mean); } sums = gpu_lib::block_reduce<bdimx,bdimy,bdimz,pair_t,pair_sum_t>(sums); // Output result to global memory if (tid == 0) { const auto& var = vars[i*vars_stride]; const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); const TensorDataType dmean = -sums.first * inv_stdev; const TensorDataType dvar = -sums.second * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2); gpu_lib::atomic_add(&means_grad[i*means_grad_stride], dmean); gpu_lib::atomic_add(&vars_grad[i*vars_grad_stride], dvar); } } } /** Compute gradients w.r.t. input. * * dL/dx_i = ( dL/dy_i / sqrt(var+epsilon) * + dL/dmean / n * + dL/dvar * (x_i - mean) * 2/(n-1) ) * * Block dimensions: bdimx x bdimy x 1 * * Grid dimensions: (local_sample_size / bdimx) x (local_num_samples / bdimy) x 1 */ template <typename TensorDataType> __global__ void bp_input_grad_kernel( unsigned long long sample_size, size_t local_num_samples, size_t local_sample_size, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ output_grad, size_t output_grad_ldim, TensorDataType* __restrict__ input_grad, size_t input_grad_ldim, const TensorDataType* __restrict__ means, size_t means_stride, const TensorDataType* __restrict__ vars, size_t vars_stride, const TensorDataType* means_grad, size_t means_grad_stride, const TensorDataType* vars_grad, size_t vars_grad_stride) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t i = gidy; i < local_num_samples; i += nthreadsy) { const auto& mean = means[i*means_stride]; const auto& var = vars[i*vars_stride]; const auto& inv_stdev = gpu_lib::rsqrt(var + epsilon); const auto& dmean = means_grad[i*means_grad_stride]; const auto& dvar = vars_grad[i*vars_grad_stride]; for (size_t j = gidx; j < local_sample_size; j += nthreadsx) { const auto& x = input[i*input_ldim + j]; const auto& dy = output_grad[i*output_grad_ldim + j]; auto& dx = input_grad[i*input_grad_ldim + j]; dx = (dy * inv_stdev + dmean / TensorDataType(sample_size) + dvar * (x - mean) * TensorDataType(2) / TensorDataType(sample_size - 1)); } } } /** @brief Backprop */ template <typename TensorDataType> void bp_impl(lbann_comm& comm, TensorDataType epsilon, const El::AbstractDistMatrix<TensorDataType>& input, const El::AbstractDistMatrix<TensorDataType>& output_grad, El::AbstractDistMatrix<TensorDataType>& input_grad, const El::AbstractDistMatrix<TensorDataType>& statistics, El::AbstractDistMatrix<TensorDataType>& statistics_grad) { using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>; // Local matrices const auto& local_input = dynamic_cast<const GPUMatType&>(input.LockedMatrix()); const auto& local_output_grad = dynamic_cast<const GPUMatType&>(output_grad.LockedMatrix()); auto& local_input_grad = dynamic_cast<GPUMatType&>(input_grad.Matrix()); const auto& local_statistics = dynamic_cast<const GPUMatType&>(statistics.LockedMatrix()); const auto local_means = El::LockedView(local_statistics, El::IR(0), El::ALL); const auto local_vars = El::LockedView(local_statistics, El::IR(1), El::ALL); auto& local_statistics_grad = dynamic_cast<GPUMatType&>(statistics_grad.Matrix()); auto local_means_grad = El::View(local_statistics_grad, El::IR(0), El::ALL); auto local_vars_grad = El::View(local_statistics_grad, El::IR(1), El::ALL); // Dimensions const size_t sample_size = input.Height(); const size_t local_num_samples = local_input.Width(); const size_t local_sample_size = local_input.Height(); // Trivial case if sample size <= 1 // Note: Output is constant, so error signal is zero. if (sample_size <= 1) { El::Zero(input_grad); return; } // Compute gradient w.r.t. statistics El::Zero(statistics_grad); if (!local_output_grad.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad), gpu::get_sync_info(local_output_grad), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_sample_size + block_size - 1) / block_size; grid_dims.y = local_num_samples; hydrogen::gpu::LaunchKernel( bp_statistics_grad_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_num_samples, local_sample_size, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_output_grad.LockedBuffer(), local_output_grad.LDim(), local_means.LockedBuffer(), local_means.LDim(), local_vars.LockedBuffer(), local_vars.LDim(), local_means_grad.Buffer(), local_means_grad.LDim(), local_vars_grad.Buffer(), local_vars_grad.LDim()); } comm.allreduce(statistics_grad, statistics_grad.RedundantComm(), El::mpi::SUM); // Compute gradient w.r.t. input if (!local_input_grad.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_statistics_grad), gpu::get_sync_info(local_output_grad), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_sample_size + block_size - 1) / block_size; grid_dims.y = local_num_samples; hydrogen::gpu::LaunchKernel( bp_input_grad_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, sample_size, local_num_samples, local_sample_size, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_output_grad.LockedBuffer(), local_output_grad.LDim(), local_input_grad.Buffer(), local_input_grad.LDim(), local_means.LockedBuffer(), local_means.LDim(), local_vars.LockedBuffer(), local_vars.LDim(), local_means_grad.LockedBuffer(), local_means_grad.LDim(), local_vars_grad.LockedBuffer(), local_vars_grad.LDim()); } } } // namespace <anon> // Template instantiation template <typename TensorDataType, data_layout Layout, El::Device Device> void layer_norm_layer<TensorDataType, Layout, Device>::fp_compute() { fp_impl(*this->get_comm(), this->m_epsilon, this->get_prev_activations(), this->get_activations(), *this->m_statistics); } template <typename TensorDataType, data_layout Layout, El::Device Device> void layer_norm_layer<TensorDataType, Layout, Device>::bp_compute() { bp_impl(*this->get_comm(), this->m_epsilon, this->get_prev_activations(), this->get_prev_error_signals(), this->get_error_signals(), *this->m_statistics, *this->m_statistics_gradient); } #define PROTO(T) \ template class layer_norm_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class layer_norm_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
the_stack
#ifdef USE_CUBLAS void LMFitCUDA::solve_equation_systems_lup() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); // initialize components of equation systems gpu_data_.copy(gpu_data_.decomposed_hessians_, gpu_data_.hessians_, n_fits_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_); // decompose hessians cublasStatus_t lu_status_decopmposition = DECOMPOSE_LUP( gpu_data_.cublas_handle_, info_.n_parameters_to_fit_, gpu_data_.pointer_decomposed_hessians_, info_.n_parameters_to_fit_, gpu_data_.pivot_vectors_, gpu_data_.solution_info_, n_fits_); // initialize deltas with values of gradients gpu_data_.copy(gpu_data_.deltas_, gpu_data_.gradients_, n_fits_ * info_.n_parameters_to_fit_); // TODO: check solution_info int solution_info; // solve equation systems cublasStatus_t lu_status_solution = SOLVE_LUP( gpu_data_.cublas_handle_, CUBLAS_OP_N, info_.n_parameters_to_fit_, 1, (REAL const **)(gpu_data_.pointer_decomposed_hessians_.data()), info_.n_parameters_to_fit_, gpu_data_.pivot_vectors_, gpu_data_.pointer_deltas_, info_.n_parameters_to_fit_, &solution_info, n_fits_); } #else //USE_CUBLAS void LMFitCUDA::solve_equation_systems_gj() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); int n_parameters_pow2 = 1; while (n_parameters_pow2 < info_.n_parameters_to_fit_) { n_parameters_pow2 *= 2; } //set up to run the Gauss Jordan elimination int const n_equations = info_.n_parameters_to_fit_; int const n_solutions = n_fits_; threads.x = n_equations + 1; threads.y = n_equations; blocks.x = n_solutions; //set the size of the shared memory area for each block int const shared_size = sizeof(REAL) * ((threads.x * threads.y) + n_parameters_pow2 + n_parameters_pow2); //run the Gauss Jordan elimination cuda_gaussjordan <<< blocks, threads, shared_size >>>( gpu_data_.deltas_, gpu_data_.gradients_, gpu_data_.hessians_, gpu_data_.finished_, gpu_data_.solution_info_, info_.n_parameters_to_fit_, n_parameters_pow2); CUDA_CHECK_STATUS(cudaGetLastError()); } #endif // USE_CUBLAS void LMFitCUDA::update_states() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); //set up to update the lm_state_gpu_ variable with the Gauss Jordan results threads.x = std::min(n_fits_, 256); blocks.x = int(std::ceil(REAL(n_fits_) / REAL(threads.x))); //update the gpu_data_.states_ variable cuda_update_state_after_solving <<< blocks, threads >>>( n_fits_, gpu_data_.solution_info_, gpu_data_.finished_, gpu_data_.states_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::scale_hessians() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.n_parameters_to_fit_*info_.n_fits_per_block_; blocks.x = n_fits_ / info_.n_fits_per_block_; cuda_modify_step_widths <<< blocks, threads >>>( gpu_data_.hessians_, gpu_data_.lambdas_, gpu_data_.scaling_vectors_, info_.n_parameters_to_fit_, gpu_data_.iteration_failed_, gpu_data_.finished_, info_.n_fits_per_block_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::project_parameters_to_box() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.n_parameters_to_fit_*info_.n_fits_per_block_; blocks.x = n_fits_ / info_.n_fits_per_block_; cuda_project_parameters_to_box <<< blocks, threads >>>( gpu_data_.parameters_, info_.n_parameters_, info_.n_parameters_to_fit_, gpu_data_.parameters_to_fit_indices_, gpu_data_.constraints_, gpu_data_.constraint_types_, gpu_data_.finished_, info_.n_fits_per_block_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::update_parameters() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.n_parameters_*info_.n_fits_per_block_; blocks.x = n_fits_ / info_.n_fits_per_block_; cuda_update_parameters <<< blocks, threads >>>( gpu_data_.parameters_, gpu_data_.prev_parameters_, gpu_data_.deltas_, info_.n_parameters_to_fit_, gpu_data_.parameters_to_fit_indices_, gpu_data_.finished_, info_.n_fits_per_block_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::calc_curve_values() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.n_points_ * info_.n_fits_per_block_ / info_.n_blocks_per_fit_; if (info_.n_blocks_per_fit_ > 1) threads.x += info_.n_points_ % threads.x; blocks.x = n_fits_ / info_.n_fits_per_block_ * info_.n_blocks_per_fit_; cuda_calc_curve_values <<< blocks, threads >>>( gpu_data_.parameters_, n_fits_, info_.n_points_, info_.n_parameters_, gpu_data_.finished_, gpu_data_.values_, gpu_data_.derivatives_, info_.n_fits_per_block_, info_.n_blocks_per_fit_, info_.model_id_, gpu_data_.chunk_index_, gpu_data_.user_info_, info_.user_info_size_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::calc_chi_squares() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.power_of_two_n_points_ * info_.n_fits_per_block_ / info_.n_blocks_per_fit_; blocks.x = n_fits_ / info_.n_fits_per_block_ * info_.n_blocks_per_fit_; int const shared_size = sizeof(REAL) * threads.x; REAL * chi_squares = info_.n_blocks_per_fit_ > 1 ? gpu_data_.subtotals_ : gpu_data_.chi_squares_; cuda_calculate_chi_squares <<< blocks, threads, shared_size >>>( chi_squares, gpu_data_.states_, gpu_data_.data_, gpu_data_.values_, gpu_data_.weights_, info_.n_points_, n_fits_, info_.estimator_id_, gpu_data_.finished_, info_.n_fits_per_block_, gpu_data_.user_info_, info_.user_info_size_); CUDA_CHECK_STATUS(cudaGetLastError()); threads.x = std::min(n_fits_, 256); blocks.x = int(std::ceil(REAL(n_fits_) / REAL(threads.x))); if (info_.n_blocks_per_fit_ > 1) { cuda_sum_chi_square_subtotals <<< blocks, threads >>> ( gpu_data_.chi_squares_, gpu_data_.subtotals_, info_.n_blocks_per_fit_, n_fits_, gpu_data_.finished_); CUDA_CHECK_STATUS(cudaGetLastError()); } cuda_check_fit_improvement <<< blocks, threads >>>( gpu_data_.iteration_failed_, gpu_data_.chi_squares_, gpu_data_.prev_chi_squares_, n_fits_, gpu_data_.finished_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::calc_gradients() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = info_.power_of_two_n_points_ * info_.n_fits_per_block_ / info_.n_blocks_per_fit_; blocks.x = n_fits_ / info_.n_fits_per_block_ * info_.n_blocks_per_fit_; int const shared_size = sizeof(REAL) * threads.x; REAL * gradients = info_.n_blocks_per_fit_ > 1 ? gpu_data_.subtotals_ : gpu_data_.gradients_; cuda_calculate_gradients <<< blocks, threads, shared_size >>>( gradients, gpu_data_.data_, gpu_data_.values_, gpu_data_.derivatives_, gpu_data_.weights_, info_.n_points_, n_fits_, info_.n_parameters_, info_.n_parameters_to_fit_, gpu_data_.parameters_to_fit_indices_, info_.estimator_id_, gpu_data_.finished_, gpu_data_.iteration_failed_, info_.n_fits_per_block_, gpu_data_.user_info_, info_.user_info_size_); CUDA_CHECK_STATUS(cudaGetLastError()); if (info_.n_blocks_per_fit_ > 1) { int const gradients_size = n_fits_ * info_.n_parameters_to_fit_; threads.x = std::min(gradients_size, 256); blocks.x = int(std::ceil(REAL(gradients_size) / REAL(threads.x))); cuda_sum_gradient_subtotals <<< blocks, threads >>> ( gpu_data_.gradients_, gpu_data_.subtotals_, info_.n_blocks_per_fit_, n_fits_, info_.n_parameters_to_fit_, gpu_data_.iteration_failed_, gpu_data_.finished_); CUDA_CHECK_STATUS(cudaGetLastError()); } } void LMFitCUDA::calc_hessians() { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); int const n_unique_values = info_.n_parameters_to_fit_ * (info_.n_parameters_to_fit_ + 1) / 2; int n_hessians_per_block = 1; if (info_.n_parameters_to_fit_) { while ((n_hessians_per_block + 1) * n_unique_values < info_.warp_size_) { n_hessians_per_block++; } } int const temp_threads_x = n_unique_values * n_hessians_per_block; threads.x = std::min(temp_threads_x, info_.max_threads_); blocks.y = temp_threads_x / info_.max_threads_ + int((temp_threads_x % info_.max_threads_) > 0); blocks.x = n_fits_ / n_hessians_per_block + int((n_fits_ % n_hessians_per_block) > 0); cuda_calculate_hessians <<< blocks, threads >>>( gpu_data_.hessians_, gpu_data_.data_, gpu_data_.values_, gpu_data_.derivatives_, gpu_data_.weights_, n_fits_, info_.n_points_, info_.n_parameters_, info_.n_parameters_to_fit_, gpu_data_.parameters_to_fit_indices_, info_.estimator_id_, gpu_data_.iteration_failed_, gpu_data_.finished_, gpu_data_.user_info_, info_.user_info_size_); CUDA_CHECK_STATUS(cudaGetLastError()); } void LMFitCUDA::evaluate_iteration(int const iteration) { dim3 threads(1, 1, 1); dim3 blocks(1, 1, 1); threads.x = std::min(n_fits_, 256); blocks.x = int(std::ceil(REAL(n_fits_) / REAL(threads.x))); cuda_check_for_convergence<<< blocks, threads >>>( gpu_data_.finished_, tolerance_, gpu_data_.states_, gpu_data_.chi_squares_, gpu_data_.prev_chi_squares_, iteration, info_.max_n_iterations_, n_fits_); CUDA_CHECK_STATUS(cudaGetLastError()); gpu_data_.set(gpu_data_.all_finished_, 1); cuda_evaluate_iteration<<< blocks, threads >>>( gpu_data_.all_finished_, gpu_data_.n_iterations_, gpu_data_.finished_, iteration, gpu_data_.states_, n_fits_); CUDA_CHECK_STATUS(cudaGetLastError()); gpu_data_.read(&all_finished_, gpu_data_.all_finished_); cuda_prepare_next_iteration<<< blocks, threads >>>( gpu_data_.lambdas_, gpu_data_.chi_squares_, gpu_data_.prev_chi_squares_, gpu_data_.parameters_, gpu_data_.prev_parameters_, n_fits_, info_.n_parameters_); CUDA_CHECK_STATUS(cudaGetLastError()); }
the_stack
#pragma once #include <gunrock/util/array_utils.cuh> #include <gunrock/graph/graph_base.cuh> #include <gunrock/graph/coo.cuh> #include <gunrock/util/binary_search.cuh> #include <gunrock/util/device_intrinsics.cuh> namespace gunrock { namespace graph { /** * @brief CSR data structure which uses Compressed Sparse Row * format to store a graph. It is a compressed way to present * the graph as a sparse matrix. * * @tparam VertexT Vertex identifier type. * @tparam SizeT Graph size type. * @tparam ValueT Associated value type. */ template< typename _VertexT = int, typename _SizeT = _VertexT, typename _ValueT = _VertexT, GraphFlag _FLAG = GRAPH_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault, bool VALID = true> struct Csr : public GraphBase<_VertexT, _SizeT, _ValueT, _FLAG | HAS_CSR, cudaHostRegisterFlag> { typedef _VertexT VertexT; typedef _SizeT SizeT; typedef _ValueT ValueT; static const GraphFlag FLAG = _FLAG | HAS_CSR; static const util::ArrayFlag ARRAY_FLAG = util::If_Val<(FLAG & GRAPH_PINNED) != 0, (FLAG & ARRAY_RESERVE) | util::PINNED, FLAG & ARRAY_RESERVE>::Value; typedef GraphBase<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> BaseGraph; typedef Csr<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag> CsrT; // Column indices corresponding to all the // non-zero values in the sparse matrix util::Array1D<SizeT, VertexT, ARRAY_FLAG, cudaHostRegisterFlag> column_indices; // List of indices where each row of the // sparse matrix starts util::Array1D<SizeT, SizeT, ARRAY_FLAG, cudaHostRegisterFlag> row_offsets; typedef util::Array1D<SizeT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> Array_ValueT; typedef util::NullArray<SizeT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> Array_NValueT; // List of values attached to edges in the graph typename util::If<(FLAG & HAS_EDGE_VALUES) != 0, Array_ValueT, Array_NValueT>::Type edge_values; // List of values attached to nodes in the graph typename util::If<(FLAG & HAS_NODE_VALUES) != 0, Array_ValueT, Array_NValueT>::Type node_values; /** * @brief CSR Constructor * * @param[in] pinned Use pinned memory for CSR data structure * (default: do not use pinned memory) */ Csr() : BaseGraph() { column_indices.SetName("column_indices"); row_offsets .SetName("row_offsets"); edge_values .SetName("edge_values"); node_values .SetName("node_values"); } /** * @brief CSR destructor */ __host__ __device__ ~Csr() { //Release(); } /** * @brief Deallocates CSR graph */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(row_offsets .Release(target)); GUARD_CU(column_indices.Release(target)); GUARD_CU(node_values .Release(target)); GUARD_CU(edge_values .Release(target)); GUARD_CU(BaseGraph ::Release(target)); return retval; } /** * @brief Allocate memory for CSR graph. * * @param[in] nodes Number of nodes in COO-format graph * @param[in] edges Number of edges in COO-format graph */ cudaError_t Allocate(SizeT nodes, SizeT edges, util::Location target = GRAPH_DEFAULT_TARGET) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseGraph ::Allocate(nodes, edges, target)); GUARD_CU(row_offsets .Allocate(nodes + 1 , target)); GUARD_CU(column_indices.Allocate(edges , target)); GUARD_CU(node_values .Allocate(nodes , target)); GUARD_CU(edge_values .Allocate(edges , target)); return retval; } cudaError_t Move( util::Location source, util::Location target, cudaStream_t stream = 0) { cudaError_t retval = cudaSuccess; SizeT invalid_size = util::PreDefinedValues<SizeT>::InvalidValue; GUARD_CU(BaseGraph ::Move(source, target, stream)); GUARD_CU(row_offsets .Move(source, target, invalid_size, 0, stream)); GUARD_CU(column_indices.Move(source, target, invalid_size, 0, stream)); GUARD_CU(edge_values .Move(source, target, invalid_size, 0, stream)); GUARD_CU(node_values .Move(source, target, invalid_size, 0, stream)); return retval; } template < typename VertexT_in, typename SizeT_in, typename ValueT_in, GraphFlag FLAG_in, unsigned int cudaHostRegisterFlag_in> cudaError_t FromCsr( Csr<VertexT_in, SizeT_in, ValueT_in, FLAG_in, cudaHostRegisterFlag_in> &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { cudaError_t retval = cudaSuccess; if (target == util::LOCATION_DEFAULT) target = source.row_offsets.GetSetted() | source.row_offsets.GetAllocated(); GUARD_CU(BaseGraph::Set(source)); GUARD_CU(Allocate(source.nodes, source.edges, target)); GUARD_CU(row_offsets .Set(source.row_offsets, this -> nodes + 1, target, stream)); GUARD_CU(column_indices.Set(source.column_indices, this -> edges, target, stream)); GUARD_CU(edge_values .Set(source.edge_values, this -> edges, target, stream)); GUARD_CU(node_values .Set(source.node_values, this -> nodes, target, stream)); return retval; } /** * @brief Build CSR graph from COO graph, sorted or unsorted * * @param[in] output_file Output file to dump the graph topology info * @param[in] coo Pointer to COO-format graph * @param[in] coo_nodes Number of nodes in COO-format graph * @param[in] coo_edges Number of edges in COO-format graph * @param[in] ordered_rows Are the rows sorted? If not, sort them. * @param[in] undirected Is the graph directed or not? * @param[in] reversed Is the graph reversed or not? * @param[in] quiet Don't print out anything. * * Default: Assume rows are not sorted. */ template <typename GraphT> cudaError_t FromCoo( GraphT &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, //bool ordered_rows = false, //bool undirected = false, //bool reversed = false, bool quiet = false) { typedef typename GraphT::CooT CooT; //typedef Coo<VertexT_in, SizeT_in, ValueT_in, FLAG_in, // cudaHostRegisterFlag_in> CooT; util::PrintMsg("Converting " + std::to_string(source.CooT::nodes) + " vertices, " + std::to_string(source.CooT::edges) + (source.CooT::directed ? " directed" : " undirected") + " edges (" + (source.CooT::edge_order == BY_ROW_ASCENDING ? " ordered" : "unordered") + " tuples) to CSR format...", !quiet, false); time_t mark1 = time(NULL); cudaError_t retval = cudaSuccess; if (target == util::LOCATION_DEFAULT) target = source.CooT::edge_pairs.GetSetted() | source.CooT::edge_pairs.GetAllocated(); /*if (retval = BaseGraph:: template Set<typename CooT::CooT>((typename CooT::CooT)source)) return retval; */ this -> nodes = source.CooT::nodes; this -> edges = source.CooT::edges; this -> directed = source.CooT::directed; GUARD_CU(Allocate(source.CooT::nodes, source.CooT::edges, target)); // Sort COO by row GUARD_CU(source.CooT::Order(BY_ROW_ASCENDING, target, stream)); //source.CooT::Display(); // assign column_indices GUARD_CU(column_indices.ForEach(source.CooT::edge_pairs, []__host__ __device__ (VertexT &column_index, const typename CooT::EdgePairT &edge_pair){ column_index = edge_pair.y;}, this -> edges, target, stream)); // assign edge_values if (FLAG & HAS_EDGE_VALUES) { GUARD_CU(edge_values.ForEach(source.CooT::edge_values, []__host__ __device__ (ValueT &edge_value, const typename CooT::ValueT &edge_value_in){ edge_value = edge_value_in;}, this -> edges, target, stream)); } // assign row_offsets SizeT edges = this -> edges; SizeT nodes = this -> nodes; auto row_edge_compare = [] __host__ __device__ ( const typename CooT::EdgePairT &edge_pair, const VertexT &row){ return edge_pair.x < row; }; GUARD_CU(row_offsets.ForAll(source.CooT::edge_pairs, [nodes, edges, row_edge_compare] __host__ __device__ ( SizeT *row_offsets, const typename CooT::EdgePairT *edge_pairs, const VertexT &row){ if (row <= edge_pairs[0].x) row_offsets[row] = 0; else if (row < nodes) { auto pos = util::BinarySearch_LeftMost(row, edge_pairs, (SizeT)0, edges-1, row_edge_compare, [] (const typename CooT::EdgePairT &pair, const VertexT &row) { return (pair.x == row); }); //if (row > edge_pairs[edges-1].x) // pos = edges; //else { while (pos < edges && row > edge_pairs[pos].x) pos ++; //} //if (pos > edges || row >= edge_pairs[edges-1].x) // printf("Error row_offsets[%d] = %d\n", // row, pos); row_offsets[row] = pos; } else row_offsets[row] = edges; }, this -> nodes + 1, target, stream)); time_t mark2 = time(NULL); util::PrintMsg("Done (" + std::to_string(mark2 - mark1) + "s).", !quiet); //for (SizeT v = 0; v < nodes; v++) //{ // if (row_offsets [v] > row_offsets[v+1]) // { // util::PrintMsg("Error: row_offsets[" // + std::to_string(v) + "] = " + std::to_string(row_offsets[v]) // + " > row_offsets[" + std::to_string(v+1) // + "] = " + std::to_string(row_offsets[v+1])); // continue; // } // // if (row_offsets[v] < 0 || row_offsets[v] > edges) // { // util::PrintMsg("Error: row_offsets[" // + std::to_string(v) + "] = " + std::to_string(row_offsets[v]) // + " > edges = " + std::to_string(edges)); // continue; // } // // SizeT e_start = row_offsets[v]; // SizeT e_end = row_offsets[v+1]; // SizeT degree = e_end - e_start; // for (SizeT e = e_start; e < e_end; e++) // { // if (source.CooT::edge_pairs[e].x != v) // util::PrintMsg("Error: edge_pairs[" + std::to_string(e) // + "] = (" + std::to_string(source.CooT::edge_pairs[e].x) // + ", " + std::to_string(source.CooT::edge_pairs[e].y) // + ") != v " + std::to_string(v)); // } //} return retval; } template <typename GraphT> cudaError_t FromCsc( GraphT &source, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { typedef typename GraphT::CscT CscT; typedef Coo<VertexT, SizeT, ValueT, FLAG | HAS_COO, cudaHostRegisterFlag> CooT; cudaError_t retval = cudaSuccess; CooT coo; GUARD_CU(coo.FromCsc(source, target, stream, quiet)); GUARD_CU( FromCoo( coo, target, stream, quiet)); GUARD_CU(coo.Release()); return retval; } /** * @brief Display CSR graph to console * * @param[in] with_edge_value Whether display graph with edge values. */ cudaError_t Display( std::string graph_prefix = "", SizeT nodes_to_show = 40, bool with_edge_values = true) { cudaError_t retval = cudaSuccess; if (nodes_to_show > this -> nodes) nodes_to_show = this -> nodes; util::PrintMsg(graph_prefix + "Graph containing " + std::to_string(this -> nodes) + " vertices, " + std::to_string(this -> edges) + " edges, in CSR format." + " Neighbor list of first " + std::to_string(nodes_to_show) + " nodes :"); for (SizeT node = 0; node < nodes_to_show; node++) { std::string str = "v " + std::to_string(node) + " " + std::to_string(row_offsets[node]) + " : "; for (SizeT edge = row_offsets[node]; edge < row_offsets[node + 1]; edge++) { if (edge - row_offsets[node] > 40) break; str = str + "[" + std::to_string(column_indices[edge]); if (with_edge_values && (FLAG & HAS_EDGE_VALUES)) { str = str + "," + std::to_string(edge_values[edge]); } if (edge - row_offsets[node] != 40 && edge != row_offsets[node+1] -1) str = str + "], "; else str = str + "]"; } if (row_offsets[node + 1] - row_offsets[node] > 40) str = str + "..."; util::PrintMsg(str); } return retval; } /** * @brief Sort CSR graph edges per vertex in ascending order * */ cudaError_t Sort() { cudaError_t retval = cudaSuccess; SizeT num_nodes = this -> nodes; SizeT num_edges = this -> edges; typedef std::pair<VertexT, ValueT> EdgeValPairT; util::Array1D<SizeT, EdgeValPairT> sorted_neighbors; GUARD_CU(sorted_neighbors.Allocate(num_edges, util::HOST)); #pragma omp parallel do { int thread_num = omp_get_thread_num(); int num_threads = omp_get_num_threads(); SizeT node_start = (SizeT)(num_nodes) * thread_num / num_threads; SizeT node_end = (SizeT)(num_nodes) * (thread_num + 1) / num_threads; node_end = (thread_num == (num_threads - 1)) ? num_nodes : node_end; for (SizeT node = node_start; node < node_end; node++) { SizeT start_offset = row_offsets[node]; SizeT end_offset = row_offsets[node + 1]; for (SizeT off = start_offset; off < end_offset; off++) { sorted_neighbors[off] = std::make_pair(column_indices[off], edge_values[off]); } std::sort(sorted_neighbors + start_offset, sorted_neighbors + end_offset, [](const EdgeValPairT & a, const EdgeValPairT & b) -> bool { return a.first < b.first; } ); for (SizeT off = start_offset; off < end_offset; off++) { column_indices[off] = sorted_neighbors[off].first; edge_values [off] = sorted_neighbors[off].second; } } }while (false); GUARD_CU(sorted_neighbors.Release(util::HOST)); return cudaSuccess; } __device__ __host__ __forceinline__ SizeT GetNeighborListLength(const VertexT &v) const { if (util::lessThanZero(v) || v >= this -> nodes) return 0; return _ldg(row_offsets + (v+1)) - _ldg(row_offsets + v); } __device__ __host__ __forceinline__ SizeT GetNeighborListOffset(const VertexT &v) const { return _ldg(row_offsets + v); } __device__ __host__ __forceinline__ VertexT GetEdgeSrc(const SizeT &e) const { return util::BinarySearch_RightMost(e, row_offsets + 0, (SizeT)0, this -> nodes); } __device__ __host__ __forceinline__ VertexT GetEdgeDest(const SizeT &e) const { //return _ldg(column_indices + e); return column_indices[e]; } __device__ __host__ __forceinline__ void GetEdgeSrcDest(const SizeT &e, VertexT &src, VertexT &dest) const { src = util::BinarySearch_RightMost(e, row_offsets + 0, (SizeT)0, this -> nodes); dest = column_indices[e]; } __device__ __host__ __forceinline__ SizeT GetSrcDestEdge(const VertexT &src, const VertexT &dest) { return util::BinarySearch(dest, column_indices + 0, row_offsets[src], row_offsets[src + 1] - 1); } /*template <typename Tuple> void CsrToCsc(Csr<VertexId, SizeT, Value> &target, Csr<VertexId, SizeT, Value> &source) { target.nodes = source.nodes; target.edges = source.edges; target.average_degree = source.average_degree; target.average_edge_value = source.average_edge_value; target.average_node_value = source.average_node_value; target.out_nodes = source.out_nodes; { Tuple *coo = (Tuple*)malloc(sizeof(Tuple) * source.edges); int idx = 0; for (int i = 0; i < source.nodes; ++i) { for (int j = source.row_offsets[i]; j < source.row_offsets[i+1]; ++j) { coo[idx].row = source.column_indices[j]; coo[idx].col = i; coo[idx++].val = (source.edge_values == NULL) ? 0 : source.edge_values[j]; } } if (source.edge_values == NULL) target.template FromCoo<false>(NULL, coo, nodes, edges); else target.template FromCoo<true>(NULL, coo, nodes, edges); free(coo); } }*/ /** * * @brief Store graph information into a file. * * @param[in] file_name Original graph file path and name. * @param[in] v Number of vertices in input graph. * @param[in] e Number of edges in input graph. * @param[in] row Row-offsets array store row pointers. * @param[in] col Column-indices array store destinations. * @param[in] edge_values Per edge weight values associated. * */ /*void WriteBinary( char *file_name, SizeT v, SizeT e, SizeT *row, VertexId *col, Value *edge_values = NULL) { std::ofstream fout(file_name); if (fout.is_open()) { fout.write(reinterpret_cast<const char*>(&v), sizeof(SizeT)); fout.write(reinterpret_cast<const char*>(&e), sizeof(SizeT)); fout.write(reinterpret_cast<const char*>(row), (v + 1)*sizeof(SizeT)); fout.write(reinterpret_cast<const char*>(col), e * sizeof(VertexId)); if (edge_values != NULL) { fout.write(reinterpret_cast<const char*>(edge_values), e * sizeof(Value)); } fout.close(); } }*/ /* * @brief Write human-readable CSR arrays into 3 files. * Can be easily used for python interface. * * @param[in] file_name Original graph file path and name. * @param[in] v Number of vertices in input graph. * @param[in] e Number of edges in input graph. * @param[in] row_offsets Row-offsets array store row pointers. * @param[in] col_indices Column-indices array store destinations. * @param[in] edge_values Per edge weight values associated. */ /*void WriteCSR( char *file_name, SizeT v, SizeT e, SizeT *row_offsets, VertexId *col_indices, Value *edge_values = NULL) { std::cout << file_name << std::endl; char rows[256], cols[256], vals[256]; sprintf(rows, "%s.rows", file_name); sprintf(cols, "%s.cols", file_name); sprintf(vals, "%s.vals", file_name); std::ofstream rows_output(rows); if (rows_output.is_open()) { std::copy(row_offsets, row_offsets + v + 1, std::ostream_iterator<SizeT>(rows_output, "\n")); rows_output.close(); } std::ofstream cols_output(cols); if (cols_output.is_open()) { std::copy(col_indices, col_indices + e, std::ostream_iterator<VertexId>(cols_output, "\n")); cols_output.close(); } if (edge_values != NULL) { std::ofstream vals_output(vals); if (vals_output.is_open()) { std::copy(edge_values, edge_values + e, std::ostream_iterator<Value>(vals_output, "\n")); vals_output.close(); } } }*/ /* * @brief Write Ligra input CSR arrays into .adj file. * Can be easily used for python interface. * * @param[in] file_name Original graph file path and name. * @param[in] v Number of vertices in input graph. * @param[in] e Number of edges in input graph. * @param[in] row Row-offsets array store row pointers. * @param[in] col Column-indices array store destinations. * @param[in] edge_values Per edge weight values associated. * @param[in] quiet Don't print out anything. */ /*void WriteToLigraFile( const char *file_name, SizeT v, SizeT e, SizeT *row, VertexId *col, Value *edge_values = NULL, bool quiet = false) { char adj_name[256]; sprintf(adj_name, "%s.adj", file_name); if (!quiet) { printf("writing to ligra .adj file.\n"); } std::ofstream fout3(adj_name); if (fout3.is_open()) { fout3 << "AdjacencyGraph" << std::endl << v << std::endl << e << std::endl; for (int i = 0; i < v; ++i) fout3 << row[i] << std::endl; for (int i = 0; i < e; ++i) fout3 << col[i] << std::endl; if (edge_values != NULL) { for (int i = 0; i < e; ++i) fout3 << edge_values[i] << std::endl; } fout3.close(); } } void WriteToMtxFile( const char *file_name, SizeT v, SizeT e, SizeT *row, VertexId *col, Value *edge_values = NULL, bool quiet = false) { char adj_name[256]; sprintf(adj_name, "%s.mtx", file_name); if (!quiet) { printf("writing to .mtx file.\n"); } std::ofstream fout3(adj_name); if (fout3.is_open()) { fout3 << v << " " << v << " " << e << std::endl; for (int i = 0; i < v; ++i) { SizeT begin = row[i]; SizeT end = row[i+1]; for (int j = begin; j < end; ++j) { fout3 << col[j]+1 << " " << i+1; if (edge_values != NULL) { fout3 << " " << edge_values[j] << std::endl; } else { fout3 << " " << rand() % 64 << std::endl; } } } fout3.close(); } }*/ /** * @brief Read from stored row_offsets, column_indices arrays. * * @tparam LOAD_EDGE_VALUES Whether or not to load edge values. * * @param[in] f_in Input file name. * @param[in] quiet Don't print out anything. */ /*template <bool LOAD_EDGE_VALUES> void FromCsr(char *f_in, bool quiet = false) { if (!quiet) { printf(" Reading directly from stored binary CSR arrays ...\n"); } time_t mark1 = time(NULL); std::ifstream input(f_in); SizeT v, e; input.read(reinterpret_cast<char*>(&v), sizeof(SizeT)); input.read(reinterpret_cast<char*>(&e), sizeof(SizeT)); FromScratch<LOAD_EDGE_VALUES, false>(v, e); input.read(reinterpret_cast<char*>(row_offsets), (v + 1)*sizeof(SizeT)); input.read(reinterpret_cast<char*>(column_indices), e * sizeof(VertexId)); if (LOAD_EDGE_VALUES) { input.read(reinterpret_cast<char*>(edge_values), e * sizeof(Value)); } time_t mark2 = time(NULL); if (!quiet) { printf("Done reading (%ds).\n", (int) (mark2 - mark1)); } // compute out_nodes SizeT out_node = 0; for (SizeT node = 0; node < nodes; node++) { if (row_offsets[node + 1] - row_offsets[node] > 0) { ++out_node; } } out_nodes = out_node; }*/ /** * @brief (Specific for SM) Read from stored row_offsets, column_indices arrays. * * @tparam LOAD_NODE_VALUES Whether or not to load node values. * * @param[in] f_in Input graph file name. * @param[in] f_label Input label file name. * @param[in] quiet Don't print out anything. */ /*template <bool LOAD_NODE_VALUES> void FromCsr_SM(char *f_in, char *f_label, bool quiet = false) { if (!quiet) { printf(" Reading directly from stored binary CSR arrays ...\n"); if(LOAD_NODE_VALUES) printf(" Reading directly from stored binary label arrays ...\n"); } time_t mark1 = time(NULL); std::ifstream input(f_in); std::ifstream input_label(f_label); SizeT v, e; input.read(reinterpret_cast<char*>(&v), sizeof(SizeT)); input.read(reinterpret_cast<char*>(&e), sizeof(SizeT)); FromScratch<false, LOAD_NODE_VALUES>(v, e); input.read(reinterpret_cast<char*>(row_offsets), (v + 1)*sizeof(SizeT)); input.read(reinterpret_cast<char*>(column_indices), e * sizeof(VertexId)); if (LOAD_NODE_VALUES) { input_label.read(reinterpret_cast<char*>(node_values), v * sizeof(Value)); } // for(int i=0; i<v; i++) printf("%lld ", (long long)node_values[i]); printf("\n"); time_t mark2 = time(NULL); if (!quiet) { printf("Done reading (%ds).\n", (int) (mark2 - mark1)); } // compute out_nodes SizeT out_node = 0; for (SizeT node = 0; node < nodes; node++) { if (row_offsets[node + 1] - row_offsets[node] > 0) { ++out_node; } } out_nodes = out_node; }*/ /** * \addtogroup PublicInterface * @{ */ /** * @brief Check values. */ /*bool CheckValue() { for (SizeT node = 0; node < nodes; ++node) { for (SizeT edge = row_offsets[node]; edge < row_offsets[node + 1]; ++edge) { int src_node = node; int dst_node = column_indices[edge]; int edge_value = edge_values[edge]; for (SizeT r_edge = row_offsets[dst_node]; r_edge < row_offsets[dst_node + 1]; ++r_edge) { if (column_indices[r_edge] == src_node) { if (edge_values[r_edge] != edge_value) return false; } } } } return true; }*/ /** * @brief Find node with largest neighbor list * @param[in] max_degree Maximum degree in the graph. * * \return int the source node with highest degree */ /*int GetNodeWithHighestDegree(int& max_degree) { int degree = 0; int src = 0; for (SizeT node = 0; node < nodes; node++) { if (row_offsets[node + 1] - row_offsets[node] > degree) { degree = row_offsets[node + 1] - row_offsets[node]; src = node; } } max_degree = degree; return src; }*/ /** * @brief Display the neighbor list of a given node. * * @param[in] node Vertex ID to display. */ /*void DisplayNeighborList(VertexId node) { if (node < 0 || node >= nodes) return; for (SizeT edge = row_offsets[node]; edge < row_offsets[node + 1]; edge++) { util::PrintValue(column_indices[edge]); printf(", "); } printf("\n"); }*/ /** * @brief Get the degrees of all the nodes in graph * * @param[in] node_degrees node degrees to fill in */ /*void GetNodeDegree(unsigned long long *node_degrees) { for(SizeT node=0; node < nodes; ++node) { node_degrees[node] = row_offsets[node+1]-row_offsets[node]; } }*/ /** * @brief Get the average node value in graph */ /*Value GetAverageNodeValue() { if (abs(average_node_value - 0) < 0.001 && node_values != NULL) { double mean = 0, count = 0; for (SizeT node = 0; node < nodes; ++node) { if (node_values[node] < UINT_MAX) { count += 1; mean += (node_values[node] - mean) / count; } } average_node_value = static_cast<Value>(mean); } return average_node_value; }*/ /** * @brief Get the average edge value in graph */ /*Value GetAverageEdgeValue() { if (abs(average_edge_value - 0) < 0.001 && edge_values != NULL) { double mean = 0, count = 0; for (SizeT edge = 0; edge < edges; ++edge) { if (edge_values[edge] < UINT_MAX) { count += 1; mean += (edge_values[edge] - mean) / count; } } } return average_edge_value; }*/ /**@}*/ }; // CSR template< typename VertexT, typename SizeT , typename ValueT , GraphFlag _FLAG , unsigned int cudaHostRegisterFlag> struct Csr<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag, false> { cudaError_t Release(util::Location target = util::LOCATION_ALL) { return cudaSuccess; } template <typename CooT_in> cudaError_t FromCoo( CooT_in &coo, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } template <typename CsrT_in> cudaError_t FromCsr( CsrT_in &csr, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } template <typename CscT_in> cudaError_t FromCsc( CscT_in &csc, util::Location target = util::LOCATION_DEFAULT, cudaStream_t stream = 0, bool quiet = false) { return cudaSuccess; } cudaError_t Sort() { return cudaSuccess; } __device__ __host__ __forceinline__ SizeT GetNeighborListLength(const VertexT &v) const { return 0; } cudaError_t Move( util::Location source, util::Location target, cudaStream_t stream = 0) { return cudaSuccess; } cudaError_t Display( std::string graph_prefix = "", SizeT nodes_to_show = 40, bool with_edge_values = true) { return cudaSuccess; } }; } // namespace graph } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
namespace nvbio { namespace { // anonymous namespace /// convert a DNA+N+$ symbol to its ASCII character /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE char dna6_to_char(const uint8 c) { return c == 0u ? 'A' : c == 1u ? 'C' : c == 2u ? 'G' : c == 3u ? 'T' : c == 255u ? '$' : 'N'; } /// convert a DNA+N+$ string to an ASCII string /// template <typename SymbolIterator> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void dna6_to_string( const SymbolIterator begin, const uint32 n, char* string) { for (uint32 i = 0; i < n; ++i) string[i] = dna6_to_char( begin[i] ); string[n] = '\0'; } // utility function to convert an unsigned int to a base-10 string representation template <typename T> uint32 itoa(char *buf, T in) { uint32 len = 0; // convert to base10 do { buf[len] = "0123456789"[in % 10]; in /= 10; len++; } while(in); // reverse for(uint32 c = 0; c < len / 2; c++) { char tmp; tmp = buf[c]; buf[c] = buf[len - c - 1]; buf[len - c - 1] = tmp; } // terminate buf[len] = 0; return len; } } // anonymous namespace /// A class to output the BWT to a packed host string /// template <typename BWTWriter, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename word_type> struct FileBWTHandler : public SetBWTHandler, public BWTWriter { static const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) ); static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE; /// constructor /// FileBWTHandler() : offset(0) {} /// destructor /// virtual ~FileBWTHandler() {} /// write header /// void write_header() { const char* magic = "PRIB"; // PRImary-Binary BWTWriter::index_write( 4, magic ); } /// process a batch of BWT symbols /// template <uint32 IN_SYMBOL_SIZE> void write_bwt( const uint32 n_suffixes, const uint32* bwt_storage) { typedef PackedStream<const uint32*,uint8,IN_SYMBOL_SIZE,true> input_stream_type; input_stream_type bwt( bwt_storage ); const uint32 n_words = util::round_i( n_suffixes, SYMBOLS_PER_WORD ); // expand our cache if needed if (cache.size() < n_words+2 ) // 2 more guardband words to avoid out-of-bounds accesses cache.resize( n_words+2 ); const uint32 word_offset = offset & (SYMBOLS_PER_WORD-1); uint32 word_rem = 0; uint32 cache_idx = 0; if (word_offset) { // compute how many symbols we still need to encode to fill the current word word_rem = SYMBOLS_PER_WORD - word_offset; // fetch the word in question word_type word = cache_word; for (uint32 i = 0; i < word_rem; ++i) { const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE; const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx; const word_type symbol = word_type(bwt[i]) << symbol_offset; // set bits word |= symbol; } // write out the cached word cache[0] = word; cache_idx = 1; } #pragma omp parallel for for (int i = word_rem; i < int( n_suffixes ); i += SYMBOLS_PER_WORD) { // encode a word's worth of characters word_type word = 0u; const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, n_suffixes - i ); for (uint32 j = 0; j < n_symbols; ++j) { const uint32 bit_idx = j * SYMBOL_SIZE; const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx; const word_type symbol = word_type(bwt[i + j]) << symbol_offset; // set bits word |= symbol; } // write out the word and advance word_idx const uint32 word_idx = (i - word_rem) / SYMBOLS_PER_WORD; cache[ cache_idx + word_idx ] = word; } // compute how many words we can actually write out const uint32 n_full_words = cache_idx + (n_suffixes - word_rem) / SYMBOLS_PER_WORD; // write out the cache buffer { const uint32 n_bytes = uint32( sizeof(word_type) * n_full_words ); const uint32 n_written = BWTWriter::bwt_write( n_bytes, &cache[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : bwt write failed! (%u/%u bytes written)", n_written, n_bytes); } // save the last (possibly partial) word (hence the +2 guardband) cache_word = cache[ n_full_words ]; } /// process a batch of BWT symbols /// void process( const uint32 n_suffixes, const uint32 bits_per_symbol, const uint32* bwt, const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { if (bits_per_symbol == 2) write_bwt<2>( n_suffixes, bwt ); else if (bits_per_symbol == 4) write_bwt<4>( n_suffixes, bwt ); else if (bits_per_symbol == 8) write_bwt<8>( n_suffixes, bwt ); else throw nvbio::runtime_error("FileBWTHandler::process() : unsupported input format! (%u bits per symbol)", bits_per_symbol); // and write the list to the output if (n_dollars) { if (dollars.size() < n_dollars) dollars.resize( n_dollars ); #pragma omp parallel for for (int32 i = 0; i < int32( n_dollars ); ++i) dollars[i] = std::make_pair( dollar_pos[i], dollar_ids[i] ); const uint32 n_bytes = uint32( sizeof(uint64) * 2 * n_dollars ); const uint32 n_written = BWTWriter::index_write( n_bytes, &dollars[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : index write failed! (%u/%u bytes written)", n_written, n_bytes); } // advance the offset offset += n_suffixes; } /// process a batch of BWT symbols /// void process( const uint32 n_suffixes, const uint8* bwt, const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { const uint32 n_words = util::round_i( n_suffixes, SYMBOLS_PER_WORD ); // expand our cache if needed if (cache.size() < n_words+2 ) // 2 more guardband words to avoid out-of-bounds accesses cache.resize( n_words+2 ); const uint32 word_offset = offset & (SYMBOLS_PER_WORD-1); uint32 word_rem = 0; uint32 cache_idx = 0; if (word_offset) { // compute how many symbols we still need to encode to fill the current word word_rem = SYMBOLS_PER_WORD - word_offset; // fetch the word in question word_type word = cache_word; for (uint32 i = 0; i < word_rem; ++i) { const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE; const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx; const word_type symbol = word_type(bwt[i]) << symbol_offset; // set bits word |= symbol; } // write out the cached word cache[0] = word; cache_idx = 1; } #pragma omp parallel for for (int i = word_rem; i < int( n_suffixes ); i += SYMBOLS_PER_WORD) { // encode a word's worth of characters word_type word = 0u; const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, n_suffixes - i ); for (uint32 j = 0; j < n_symbols; ++j) { const uint32 bit_idx = j * SYMBOL_SIZE; const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx; const word_type symbol = word_type(bwt[i + j]) << symbol_offset; // set bits word |= symbol; } // write out the word and advance word_idx const uint32 word_idx = (i - word_rem) / SYMBOLS_PER_WORD; cache[ cache_idx + word_idx ] = word; } // compute how many words we can actually write out const uint32 n_full_words = cache_idx + (n_suffixes - word_rem) / SYMBOLS_PER_WORD; // write out the cache buffer { const uint32 n_bytes = uint32( sizeof(word_type) * n_full_words ); const uint32 n_written = BWTWriter::bwt_write( n_bytes, &cache[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : bwt write failed! (%u/%u bytes written)", n_written, n_bytes); } // save the last (possibly partial) word (hence the +2 guardband) cache_word = cache[ n_full_words ]; // and write the list to the output if (n_dollars) { priv::alloc_storage( dollars, n_dollars ); #pragma omp parallel for for (int32 i = 0; i < int32( n_dollars ); ++i) dollars[i] = std::make_pair( dollar_pos[i], dollar_ids[i] ); const uint32 n_bytes = uint32( sizeof(uint64) * 2 * n_dollars ); const uint32 n_written = BWTWriter::index_write( n_bytes, &dollars[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : index write failed! (%u/%u bytes written)", n_written, n_bytes); } // advance the offset offset += n_suffixes; } uint64 offset; std::vector<word_type> cache; word_type cache_word; std::vector< std::pair<uint64,uint64> > dollars; }; /// A class to output the BWT to a ASCII file /// template <typename BWTWriter> struct ASCIIFileBWTHandler : public SetBWTHandler, public BWTWriter { /// constructor /// ASCIIFileBWTHandler() : offset(0) {} /// destructor /// virtual ~ASCIIFileBWTHandler() {} /// write header /// void write_header() { const char* magic = "#PRI\n"; // PRImary-ASCII BWTWriter::index_write( 5, magic ); } void write_dollars( const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { if (n_dollars) { // reserve enough storage to encode 2 very large numbers in base 10 (up to 15 digits), plus a space and a newline priv::alloc_storage( dollar_buffer, n_dollars * 32 ); uint32 output_size = 0; for (uint32 i = 0; i < n_dollars; ++i) { char* buf = &dollar_buffer[ output_size ]; const uint32 len1 = itoa( buf, dollar_pos[i] ); buf[len1] = ' '; const uint32 len2 = itoa( buf + len1 + 1, dollar_ids[i] ); buf[len1 + len2 + 1] = '\n'; const uint32 len = len1 + len2 + 2; output_size += len; } const uint32 n_bytes = output_size; const uint32 n_written = BWTWriter::index_write( n_bytes, &dollar_buffer[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : index write failed! (%u/%u bytes written)", n_written, n_bytes); } } /// process a batch of BWT symbols /// template <typename bwt_iterator> void write( const uint32 n_suffixes, const bwt_iterator bwt, const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { // write out the cache buffer priv::alloc_storage( ascii, n_suffixes + 1 ); // convert to ASCII dna6_to_string( bwt, n_suffixes, &ascii[0] ); { const uint32 n_bytes = uint32( n_suffixes ); const uint32 n_written = BWTWriter::bwt_write( n_bytes, &ascii[0] ); if (n_written != n_bytes) throw nvbio::runtime_error("FileBWTHandler::process() : bwt write failed! (%u/%u bytes written)", n_written, n_bytes); } // and write the list to the output write_dollars( n_dollars, dollar_pos, dollar_ids ); // advance the offset offset += n_suffixes; } /// process a batch of BWT symbols /// void process( const uint32 n_suffixes, const uint32 bits_per_symbol, const uint32* bwt, const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { // convert to ASCII if (bits_per_symbol == 2) write( n_suffixes, PackedStream<const uint32*,uint8,2,true>( bwt ), n_dollars, dollar_pos, dollar_ids ); else if (bits_per_symbol == 4) write( n_suffixes, PackedStream<const uint32*,uint8,4,true>( bwt ), n_dollars, dollar_pos, dollar_ids ); else if (bits_per_symbol == 8) write( n_suffixes, PackedStream<const uint32*,uint8,8,true>( bwt ), n_dollars, dollar_pos, dollar_ids ); else throw nvbio::runtime_error("FileBWTHandler::process() : unsupported input format! (%u bits per symbol)", bits_per_symbol); } /// process a batch of BWT symbols /// void process( const uint32 n_suffixes, const uint8* bwt, const uint32 n_dollars, const uint64* dollar_pos, const uint64* dollar_ids) { write( n_suffixes, bwt, n_dollars, dollar_pos, dollar_ids ); } uint64 offset; std::vector<char> ascii; std::vector<char> dollar_buffer; }; /// A class to output the BWT to a binary file /// struct RawBWTWriter { /// constructor /// RawBWTWriter(); /// destructor /// ~RawBWTWriter(); void open(const char* output_name, const char* index_name); /// write to the bwt /// uint32 bwt_write(const uint32 n_bytes, const void* buffer); /// write to the index /// uint32 index_write(const uint32 n_bytes, const void* buffer); /// return whether the file is in a good state /// bool is_ok() const; private: FILE* output_file; FILE* index_file; }; /// A class to output the BWT to a gzipped binary file /// struct BWTGZWriter { /// constructor /// BWTGZWriter(); /// destructor /// ~BWTGZWriter(); void open(const char* output_name, const char* index_name, const char* compression); /// write to the bwt /// uint32 bwt_write(const uint32 n_bytes, const void* buffer); /// write to the index /// uint32 index_write(const uint32 n_bytes, const void* buffer); /// return whether the file is in a good state /// bool is_ok() const; private: void* output_file; void* index_file; }; // constructor // RawBWTWriter::RawBWTWriter() : output_file(NULL), index_file(NULL) {} // destructor // RawBWTWriter::~RawBWTWriter() { fclose( output_file ); fclose( index_file ); } void RawBWTWriter::open(const char* output_name, const char* index_name) { log_verbose(stderr," opening bwt file \"%s\"\n", output_name); log_verbose(stderr," opening index file \"%s\"\n", index_name); output_file = fopen( output_name, "wb" ); index_file = fopen( index_name, "wb" ); } // write to the bwt // uint32 RawBWTWriter::bwt_write(const uint32 n_bytes, const void* buffer) { return fwrite( buffer, sizeof(uint8), n_bytes, output_file ); } // write to the index // uint32 RawBWTWriter::index_write(const uint32 n_bytes, const void* buffer) { return fwrite( buffer, sizeof(uint8), n_bytes, index_file ); } // return whether the file is in a good state // bool RawBWTWriter::is_ok() const { return output_file != NULL || index_file != NULL; } // constructor // BWTGZWriter::BWTGZWriter() : output_file(NULL), index_file(NULL) {} // destructor // BWTGZWriter::~BWTGZWriter() { gzclose( output_file ); gzclose( index_file ); } void BWTGZWriter::open(const char* output_name, const char* index_name, const char* compression) { char comp_string[5]; sprintf( comp_string, "wb%s", compression ); log_verbose(stderr," opening bwt file \"%s\" (compression level: %s)\n", output_name, compression); log_verbose(stderr," opening index file \"%s\" (compression level: %s)\n", index_name, compression); output_file = gzopen( output_name, comp_string ); index_file = gzopen( index_name, comp_string ); } // write to the bwt // uint32 BWTGZWriter::bwt_write(const uint32 n_bytes, const void* buffer) { return gzwrite( output_file, buffer, n_bytes ); } // write to the index // uint32 BWTGZWriter::index_write(const uint32 n_bytes, const void* buffer) { return gzwrite( index_file, buffer, n_bytes ); } // return whether the file is in a good state // bool BWTGZWriter::is_ok() const { return output_file != NULL || index_file != NULL; } // open a BWT file // SetBWTHandler* open_bwt_file(const char* output_name, const char* params) { enum OutputFormat { UNKNOWN = 0, TXT = 1, TXTGZ = 2, TXTBGZ = 3, TXTLZ4 = 4, BWT2 = 5, BWT2GZ = 6, BWT2BGZ = 7, BWT2LZ4 = 8, BWT4 = 9, BWT4GZ = 10, BWT4BGZ = 11, BWT4LZ4 = 12, }; OutputFormat format = UNKNOWN; std::string index_string = output_name; // detect the file format from the suffix { const uint32 len = (uint32)strlen( output_name ); // // detect BWT2* variants // if (len >= strlen(".bwt.bgz")) { if (strcmp(&output_name[len - strlen(".bwt.bgz")], ".bwt.bgz") == 0) { format = BWT2BGZ; index_string.replace( index_string.find(".bwt.bgz"), 8u, ".pri.bgz" ); } } if (len >= strlen(".bwt.gz")) { if (strcmp(&output_name[len - strlen(".bwt.gz")], ".bwt.gz") == 0) { format = BWT2GZ; index_string.replace( index_string.find(".bwt.gz"), 7u, ".pri.gz" ); } } if (len >= strlen(".bwt")) { if (strcmp(&output_name[len - strlen(".bwt")], ".bwt") == 0) { format = BWT2; index_string.replace( index_string.find(".bwt"), 4u, ".pri" ); } } // // detect BWT4* variants // if (len >= strlen(".bwt4.bgz")) { if (strcmp(&output_name[len - strlen(".bwt4.bgz")], ".bwt4.bgz") == 0) { format = BWT4BGZ; index_string.replace( index_string.find(".bwt4.bgz"), 9u, ".pri.bgz" ); } } if (len >= strlen(".bwt4.gz")) { if (strcmp(&output_name[len - strlen(".bwt4.gz")], ".bwt4.gz") == 0) { format = BWT4GZ; index_string.replace( index_string.find(".bwt4.gz"), 8u, ".pri.gz" ); } } if (len >= strlen(".bwt4")) { if (strcmp(&output_name[len - strlen(".bwt4")], ".bwt4") == 0) { format = BWT4; index_string.replace( index_string.find(".bwt4"), 5u, ".pri" ); } } // // detect TXT* variants // if (len >= strlen(".txt.gz")) { if (strcmp(&output_name[len - strlen(".txt.gz")], ".txt.gz") == 0) { format = TXTGZ; index_string.replace( index_string.find(".txt.gz"), 7u, ".pri.gz" ); } } if (len >= strlen(".txt.bgz")) { if (strcmp(&output_name[len - strlen(".txt.bgz")], ".txt.bgz") == 0) { format = TXTGZ; index_string.replace( index_string.find(".txt.bgz"), 8u, ".pri.bgz" ); } } if (len >= strlen(".txt")) { if (strcmp(&output_name[len - strlen(".txt")], ".txt") == 0) { format = TXT; index_string.replace( index_string.find(".txt"), 4u, ".pri" ); } } } if (format == BWT2) { // build an output handler FileBWTHandler<RawBWTWriter,2,true,uint32>* file_handler = new FileBWTHandler<RawBWTWriter,2,true,uint32>(); file_handler->open( output_name, index_string.c_str() ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == BWT2BGZ) { // build an output handler FileBWTHandler<BWTBGZWriter,2,true,uint32>* file_handler = new FileBWTHandler<BWTBGZWriter,2,true,uint32>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == BWT2GZ) { // build an output handler FileBWTHandler<BWTGZWriter,2,true,uint32>* file_handler = new FileBWTHandler<BWTGZWriter,2,true,uint32>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == BWT4) { // build an output handler FileBWTHandler<RawBWTWriter,4,true,uint32>* file_handler = new FileBWTHandler<RawBWTWriter,4,true,uint32>(); file_handler->open( output_name, index_string.c_str() ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == BWT4BGZ) { // build an output handler FileBWTHandler<BWTBGZWriter,4,true,uint32>* file_handler = new FileBWTHandler<BWTBGZWriter,4,true,uint32>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == BWT4GZ) { // build an output handler FileBWTHandler<BWTGZWriter,4,true,uint32>* file_handler = new FileBWTHandler<BWTGZWriter,4,true,uint32>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == TXT) { // build an output handler ASCIIFileBWTHandler<RawBWTWriter>* file_handler = new ASCIIFileBWTHandler<RawBWTWriter>(); file_handler->open( output_name, index_string.c_str() ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == TXTGZ) { // build an output handler ASCIIFileBWTHandler<BWTGZWriter>* file_handler = new ASCIIFileBWTHandler<BWTGZWriter>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } else if (format == TXTBGZ) { // build an output handler ASCIIFileBWTHandler<BWTBGZWriter>* file_handler = new ASCIIFileBWTHandler<BWTBGZWriter>(); file_handler->open( output_name, index_string.c_str(), params ); if (file_handler->is_ok() == false) { log_error(stderr," unable to open output file \"%s\"\n", output_name); return NULL; } file_handler->write_header(); return file_handler; } log_error(stderr," unknown output format \"%s\"\n", output_name); return NULL; } } // namespace nvbio
the_stack
#include <algorithm> #include <cassert> #include <cmath> #include <limits> #include <thrust/complex.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <typeinfo> #include <isce3/container/RadarGeometry.h> #include <isce3/core/Ellipsoid.h> #include <isce3/core/Linspace.h> #include <isce3/core/LookSide.h> #include <isce3/core/Projections.h> #include <isce3/core/Vector.h> #include <isce3/cuda/container/RadarGeometry.h> #include <isce3/cuda/core/Interp1d.h> #include <isce3/cuda/core/Kernels.h> #include <isce3/cuda/core/Orbit.h> #include <isce3/cuda/core/OrbitView.h> #include <isce3/cuda/core/gpuLUT2d.h> #include <isce3/cuda/except/Error.h> #include <isce3/cuda/geometry/gpuDEMInterpolator.h> #include <isce3/cuda/geometry/gpuGeometry.h> #include <isce3/error/ErrorCode.h> #include <isce3/focus/BistaticDelay.h> using namespace isce3::core; using namespace isce3::cuda::geometry; using isce3::cuda::core::interp1d; using isce3::error::ErrorCode; using isce3::focus::bistaticDelay; using isce3::focus::dryTropoDelayTSX; using HostDEMInterpolator = isce3::geometry::DEMInterpolator; using HostRadarGeometry = isce3::container::RadarGeometry; using DeviceDEMInterpolator = isce3::cuda::geometry::gpuDEMInterpolator; using DeviceOrbitView = isce3::cuda::core::OrbitView; using DeviceRadarGeometry = isce3::cuda::container::RadarGeometry; template<typename T> using DeviceLUT2d = isce3::cuda::core::gpuLUT2d<T>; // clang-format off template<typename T> using HostBartlettKernel = isce3::core::BartlettKernel<T>; template<typename T> using HostChebyKernel = isce3::core::ChebyKernel<T>; template<typename T> using HostKnabKernel = isce3::core::KnabKernel<T>; template<typename T> using HostLinearKernel = isce3::core::LinearKernel<T>; template<typename T> using HostTabulatedKernel = isce3::core::TabulatedKernel<T>; template<typename T> using DeviceBartlettKernel = isce3::cuda::core::BartlettKernel<T>; template<typename T> using DeviceChebyKernel = isce3::cuda::core::ChebyKernel<T>; template<typename T> using DeviceKnabKernel = isce3::cuda::core::KnabKernel<T>; template<typename T> using DeviceLinearKernel = isce3::cuda::core::LinearKernel<T>; template<typename T> using DeviceTabulatedKernel = isce3::cuda::core::TabulatedKernel<T>; // clang-format on namespace isce3 { namespace cuda { namespace focus { namespace { /** * \internal * Interpolate platform position and velocity at a range of uniformly-spaced * timepoints. * * The global error code is set if any thread encounters an error. * * \param[out] pos Interpolated positions (m) * \param[out] vel Interpolated velocities (m/s) * \param[in] orbit Platform orbit * \param[in] t Interpolation times w.r.t. reference epoch (s) * \param[out] errc Error flag */ __global__ void interpolateOrbit(Vec3* pos, Vec3* vel, const DeviceOrbitView orbit, const Linspace<double> t, ErrorCode* errc) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x); // bounds check if (tid >= t.size()) { return; } // interpolate orbit const auto status = orbit.interpolate(&pos[tid], &vel[tid], t[tid]); // check error code if (status != ErrorCode::Success) { *errc = status; } } /** * \internal * Interpolate platform position and velocity at a series of timepoints. * * The global error code is set if any thread encounters an error. * * \param[out] pos Interpolated positions (m) * \param[out] vel Interpolated velocities (m/s) * \param[in] orbit Platform orbit * \param[in] t Interpolation times w.r.t. reference epoch (s) * \param[in] n Number of timepoints * \param[out] errc Error flag */ __global__ void interpolateOrbit(Vec3* pos, Vec3* vel, const DeviceOrbitView orbit, const double* t, const size_t n, ErrorCode* errc) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // interpolate orbit const auto status = orbit.interpolate(&pos[tid], &vel[tid], t[tid]); // check error code if (status != ErrorCode::Success) { *errc = status; } } /** * \internal * Transform a 2D radar grid from radar coordinates (azimuth, range) to * geodetic coordinates (longitude, latitude, height). * * The radar grid is defined by the \p azimuth_time and \p slant_range inputs. * * The global error code is set if any thread encounters an error. * * \param[out] llh_out Lon/lat/hae of each target (deg/deg/m) * \param[in] azimuth_time Azimuth time coordinates w.r.t. reference epoch (s) * \param[in] slant_range Slant range coordinates (m) * \param[in] doppler Doppler model * \param[in] orbit Platform orbit * \param[in] dem DEM sampling interface * \param[in] ellipsoid Reference ellipsoid * \param[in] wvl Radar wavelength (m) * \param[in] side Radar look side * \param[in] h0 Initial height estimate for all targets (m) * \param[in] params Root-finding algorithm parameters * \param[out] errc Error flag */ __global__ void runRdr2Geo(Vec3* llh_out, const Linspace<double> azimuth_time, const Linspace<double> slant_range, const DeviceLUT2d<double> doppler, const DeviceOrbitView orbit, DeviceDEMInterpolator dem, const Ellipsoid ellipsoid, const double wvl, const LookSide side, const double h0, const Rdr2GeoParams params, ErrorCode* errc) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check const auto lines = static_cast<size_t>(azimuth_time.size()); const auto samples = static_cast<size_t>(slant_range.size()); if (tid >= lines * samples) { return; } // convert flat index to 2D array indices const auto j = static_cast<int>(tid / samples); const auto i = static_cast<int>(tid % samples); // evaluate Doppler model at target position const double t = azimuth_time[j]; const double r = slant_range[i]; const double fd = doppler.eval(t, r); // make thread-local variable to store rdr2geo output // set height to initial height guess Vec3 llh; llh[2] = h0; // run rdr2geo const int converged = rdr2geo(t, r, fd, orbit, ellipsoid, dem, llh, wvl, side, params.threshold, params.maxiter, params.extraiter); // check convergence if (converged) { llh_out[tid] = llh; } else { // set output to NaN (?) constexpr static auto nan = std::numeric_limits<double>::quiet_NaN(); llh_out[tid] = {nan, nan, nan}; // set global error flag *errc = ErrorCode::FailedToConverge; } } /** * \internal * Transform each input target position from geodetic coordinates (longitude, * latitude, height) to radar coordinates (azimuth, range). * * The global error code is set if any thread encounters an error. * * \param[out] t_out Azim. time of each target w.r.t. reference epoch (s) * \param[out] r_out Slant range of each target (m) * \param[in] llh_in Lon/lat/hae of each target (deg/deg/m) * \param[in] n Number of targets * \param[in] ellipsoid Reference ellipsoid * \param[in] orbit Platform orbit * \param[in] doppler Doppler model * \param[in] wvl Radar wavelength (m) * \param[in] side Radar look side * \param[in] t0 Initial azimuth time estimate for all targets (s) * \param[in] params Root-finding algorithm parameters * \param[out] errc Error flag */ __global__ void runGeo2Rdr(double* t_out, double* r_out, const Vec3* llh_in, const size_t n, const Ellipsoid ellipsoid, const DeviceOrbitView orbit, const DeviceLUT2d<double> doppler, const double wvl, const LookSide side, const double t0, const Geo2RdrParams params, ErrorCode* errc) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // make thread-local variables to store geo2rdr output range, azimuth time // & set azimuth time to initial guess double t = t0; double r; // run geo2rdr auto llh = llh_in[tid]; int converged = geo2rdr(llh, ellipsoid, orbit, doppler, &t, &r, wvl, side, params.threshold, params.maxiter, params.delta_range); // check convergence if (converged) { t_out[tid] = t; r_out[tid] = r; } else { // set outputs to NaN (?) constexpr static auto nan = std::numeric_limits<double>::quiet_NaN(); t_out[tid] = nan; r_out[tid] = nan; // set global error flag *errc = ErrorCode::FailedToConverge; } } /** * \internal * Transform target coordinates from LLH to ECEF, given some reference ellipsoid * * \param[out] xyz ECEF coordinates of each target (m) * \param[in] llh Lon/lat/hae coordinates of each target (deg/deg/m) * \param[in] n Number of targets * \param[in] ellipsoid Reference ellipsoid */ __global__ void llh2ecef(Vec3* xyz, const Vec3* llh, const size_t n, const Ellipsoid ellipsoid) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // transform coordinates from LLH to ECEF xyz[tid] = ellipsoid.lonLatToXyz(llh[tid]); } /** * \internal * Estimate dry troposphere delay for one or more targets using the TerraSAR-X * model. * * \param[out] tau_atm Dry troposphere delay for each target (s) * \param[in] p Platform position at target's azimuth time (m) * \param[in] llh Lon/lat/hae coordinates of each target (deg/deg/m) * \param[in] n Number of targets * \param[in] ellipsoid Reference ellipsoid */ __global__ void estimateDryTropoDelayTSX(double* tau_atm, const Vec3* p, const Vec3* llh, const size_t n, const Ellipsoid ellipsoid) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // estimate dry troposphere delay tau_atm[tid] = dryTropoDelayTSX(p[tid], llh[tid], ellipsoid); } /** * \internal * Estimate coherent processing window bounds for one or more targets. * * Returns the indices of the first pulse and one past the last pulse to * coherently integrate for each target. * * \param[out] kstart_out Processing window start pulse (inclusive) * \param[out] kstop_out Processing window end pulse (exclusive) * \param[in] t_in Azim. time of each target w.r.t. reference epoch (s) * \param[in] r_in Slant range of each target (m) * \param[in] x_in Position of each target in ECEF coords (m) * \param[in] p_in Platform position at each target's azimuth time (m) * \param[in] v_in Platform velocity at each target's azimuth time (m) * \param[in] n Number of targets * \param[in] azimuth_time Azim. time of each pulse w.r.t. reference epoch (s) * \param[in] wvl Radar wavelength (m) * \param[in] ds Desired azimuth resolution (m) */ __global__ void getCPIBounds(int* kstart_out, int* kstop_out, const double* t_in, const double* r_in, const Vec3* x_in, const Vec3* p_in, const Vec3* v_in, const size_t n, const Linspace<double> azimuth_time, const double wvl, const double ds) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // load inputs const double t = t_in[tid]; const double r = r_in[tid]; const Vec3 p = p_in[tid]; const Vec3 v = v_in[tid]; const Vec3 x = x_in[tid]; // estimate synthetic aperture length required to achieve the desired // azimuth resolution const double l = wvl * r * (p.norm() / x.norm()) / (2. * ds); // approximate CPI duration (assuming constant platform velocity) const double cpi = l / v.norm(); // get coherent processing window start & end time const double tstart = t - 0.5 * cpi; const double tstop = t + 0.5 * cpi; // convert CPI bounds to pulse indices const double t0 = azimuth_time.first(); const double dt = azimuth_time.spacing(); const auto kstart = static_cast<int>(std::floor((tstart - t0) / dt)); const auto kstop = static_cast<int>(std::ceil((tstop - t0) / dt)); kstart_out[tid] = std::max(kstart, 0); kstop_out[tid] = std::min(kstop, azimuth_time.size()); } /** * \internal * Backprojection core processing loop * * Compress the radar return from each input target in azimuth by coherently * integrating the echos from a range of pulses. * * Operates on pulses from a batch of the full range-compressed swath * bounded by [ \p batch_start , \p batch_stop ). The result for each target is * added to the previous pixel value, which may contain partially-compressed * data from previous batches. Therefore, the output array should be initialized * to zero prior to batch processing. * * \param[in,out] out Output focused image data for each target * \param[in] rc Range-compressed signal data batch * \param[in] pos Platform position at each pulse (m) * \param[in] vel Platform velocity at each pulse (m/s) * \param[in] sampling_window Range sampling window (s) * \param[in] x_in ECEF position of each target (m) * \param[in] tau_atm_in Dry troposphere delay for each target (s) * \param[in] kstart_in First pulse in CPI for each target * \param[in] kstop_in One past the last pulse in the CPI for each target * \param[in] n Number of targets * \param[in] fc Center frequency (Hz) * \param[in] kernel 1D interpolation kernel * \param[in] batch_start First pulse in batch of range-compressed data * \param[in] batch_stop One past the last pulse in the rc data batch */ template<class Kernel> __global__ void sumCoherentBatch( thrust::complex<float>* out, const thrust::complex<float>* rc, const Vec3* __restrict__ pos, const Vec3* __restrict__ vel, const Linspace<double> sampling_window, const Vec3* __restrict__ x_in, const double* __restrict__ tau_atm_in, const int* __restrict__ kstart_in, const int* __restrict__ kstop_in, const size_t n, const double fc, const Kernel kernel, const int batch_start, const int batch_stop) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; // bounds check if (tid >= n) { return; } // cache some inputs const Vec3 x = x_in[tid]; const double tau_atm = tau_atm_in[tid]; const int kstart = kstart_in[tid]; const int kstop = kstop_in[tid]; // get range sampling window start, spacing, number of samples const double tau0 = sampling_window.first(); const double dtau = sampling_window.spacing(); const auto samples = static_cast<size_t>(sampling_window.size()); // loop over lines in batch thrust::complex<double> batch_sum = {0., 0.}; for (int k = batch_start; k < batch_stop; ++k) { // check if pulse is within CPI bounds if (k < kstart or k >= kstop) { continue; } // compute round-trip delay to target const double tau = tau_atm + bistaticDelay(pos[k], vel[k], x); // interpolate range-compressed data const auto* rc_line = &rc[(k - batch_start) * samples]; const double u = (tau - tau0) / dtau; thrust::complex<double> z = interp1d(kernel, rc_line, samples, 1, u); // apply phase migration compensation double sin_phi, cos_phi; ::sincospi(2. * fc * tau, &sin_phi, &cos_phi); z *= thrust::complex<double>(cos_phi, sin_phi); batch_sum += z; } // add batch sum to total out[tid] += thrust::complex<float>(batch_sum); } } // namespace template<class Kernel> void backproject(std::complex<float>* out, const DeviceRadarGeometry& out_geometry, const std::complex<float>* in, const DeviceRadarGeometry& in_geometry, DeviceDEMInterpolator& dem, double fc, double ds, const Kernel& kernel, DryTroposphereModel dry_tropo_model, const Rdr2GeoParams& rdr2geo_params, const Geo2RdrParams& geo2rdr_params, int batch) { // XXX input reference epoch must match output reference epoch if (out_geometry.referenceEpoch() != in_geometry.referenceEpoch()) { std::string errmsg = "input reference epoch must match output " "reference epoch"; throw isce3::except::RuntimeError(ISCE_SRCINFO(), errmsg); } // init device variable to return error codes from device code thrust::device_vector<ErrorCode> errc(1, ErrorCode::Success); // get input & output radar grid azimuth time & slant range coordinates const Linspace<double> in_azimuth_time = in_geometry.sensingTime(); const Linspace<double> in_slant_range = in_geometry.slantRange(); const Linspace<double> out_azimuth_time = out_geometry.sensingTime(); const Linspace<double> out_slant_range = out_geometry.slantRange(); // interpolate platform position & velocity at each pulse int in_lines = in_azimuth_time.size(); thrust::device_vector<Vec3> pos(in_lines); thrust::device_vector<Vec3> vel(in_lines); { const unsigned block = 256; const unsigned grid = (in_lines + block - 1) / block; interpolateOrbit<<<grid, block>>>(pos.data().get(), vel.data().get(), in_geometry.orbit(), in_azimuth_time, errc.data().get()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // range sampling window static constexpr double c = isce3::core::speed_of_light; const double swst = 2. * in_slant_range.first() / c; const double dtau = 2. * in_slant_range.spacing() / c; const int in_samples = in_slant_range.size(); const auto sampling_window = Linspace<double>(swst, dtau, in_samples); // reference ellipsoid const int epsg = dem.epsgCode(); const Ellipsoid ellipsoid = makeProjection(epsg)->ellipsoid(); // carrier wavelength const double wvl = c / fc; // run rdr2geo using output geometry to get LLH position of each target in // output grid const size_t out_grid_size = out_geometry.gridLength() * out_geometry.gridWidth(); thrust::device_vector<Vec3> llh(out_grid_size); { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; const double h0 = 0.; runRdr2Geo<<<grid, block>>>(llh.data().get(), out_azimuth_time, out_slant_range, out_geometry.doppler(), out_geometry.orbit(), dem, ellipsoid, wvl, out_geometry.lookSide(), h0, rdr2geo_params, errc.data().get()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // run geo2rdr using input geometry to estimate the center of the coherent // processing window for each target thrust::device_vector<double> t(out_grid_size); thrust::device_vector<double> r(out_grid_size); { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; const double t0 = in_geometry.radarGrid().sensingMid(); runGeo2Rdr<<<grid, block>>>( t.data().get(), r.data().get(), llh.data().get(), out_grid_size, ellipsoid, in_geometry.orbit(), in_geometry.doppler(), wvl, in_geometry.lookSide(), t0, geo2rdr_params, errc.data().get()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // transform each target position from LLH to ECEF coordinates thrust::device_vector<Vec3> x(out_grid_size); { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; llh2ecef<<<grid, block>>>(x.data().get(), llh.data().get(), out_grid_size, ellipsoid); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // get platform position & velocity at center of CPI for each target thrust::device_vector<Vec3> p(out_grid_size); thrust::device_vector<Vec3> v(out_grid_size); { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; interpolateOrbit<<<grid, block>>>(p.data().get(), v.data().get(), in_geometry.orbit(), t.data().get(), out_grid_size, errc.data().get()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // estimate dry troposphere delay thrust::device_vector<double> tau_atm(out_grid_size); if (dry_tropo_model == DryTroposphereModel::NoDelay) { checkCudaErrors(cudaMemset(tau_atm.data().get(), 0, out_grid_size * sizeof(double))); } else if (dry_tropo_model == DryTroposphereModel::TSX) { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; estimateDryTropoDelayTSX<<<grid, block>>>( tau_atm.data().get(), p.data().get(), llh.data().get(), out_grid_size, ellipsoid); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } else { std::string errmsg = "unexpected dry troposphere model"; throw isce3::except::InvalidArgument(ISCE_SRCINFO(), errmsg); } // get coherent integration bounds (pulse indices) for each target thrust::device_vector<int> kstart(out_grid_size); thrust::device_vector<int> kstop(out_grid_size); { const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; getCPIBounds<<<grid, block>>>( kstart.data().get(), kstop.data().get(), t.data().get(), r.data().get(), x.data().get(), p.data().get(), v.data().get(), out_grid_size, in_azimuth_time, wvl, ds); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // init device buffer for output focused image data thrust::device_vector<thrust::complex<float>> img(out_grid_size); // XXX not sure if img data is default initialized to zero checkCudaErrors(cudaMemset(img.data().get(), 0, img.size() * sizeof(thrust::complex<float>))); // the full range-compressed swath may exceed device memory limitations // so we process out-of-core in batches of range-compressed pulses const size_t width = in_geometry.gridWidth(); thrust::device_vector<thrust::complex<float>> rc(batch * width); // get the min & max processing window bounds from among all targets const int kstart_min = thrust::reduce(kstart.begin(), kstart.end(), std::numeric_limits<int>::max(), thrust::minimum<int>()); const int kstop_max = thrust::reduce(kstop.begin(), kstop.end(), std::numeric_limits<int>::min(), thrust::maximum<int>()); // iterate over batches of range-compressed data for (int k = kstart_min; k < kstop_max; k += batch) { // actual size of current batch const int curr_batch = std::min(batch, kstop_max - k); // copy batch of range-compressed data to device memory checkCudaErrors( cudaMemcpy(rc.data().get(), &in[k * width], curr_batch * width * sizeof(std::complex<float>), cudaMemcpyHostToDevice)); // integrate pulses const unsigned block = 256; const unsigned grid = (out_grid_size + block - 1) / block; using KV = typename Kernel::view_type; sumCoherentBatch<KV><<<grid, block>>>( img.data().get(), rc.data().get(), pos.data().get(), vel.data().get(), sampling_window, x.data().get(), tau_atm.data().get(), kstart.data().get(), kstop.data().get(), out_grid_size, fc, kernel, k, k + curr_batch); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cudaStreamDefault)); } // copy output back to the host checkCudaErrors(cudaMemcpy(out, img.data().get(), out_grid_size * sizeof(std::complex<float>), cudaMemcpyDeviceToHost)); // check for errors from device code if (errc[0] != ErrorCode::Success) { std::string errmsg = isce3::error::getErrorString(errc[0]); throw isce3::except::RuntimeError(ISCE_SRCINFO(), errmsg); } } void backproject(std::complex<float>* out, const HostRadarGeometry& out_geometry, const std::complex<float>* in, const HostRadarGeometry& in_geometry, const HostDEMInterpolator& dem, double fc, double ds, const Kernel<float>& kernel, DryTroposphereModel dry_tropo_model, const Rdr2GeoParams& rdr2geo_params, const Geo2RdrParams& geo2rdr_params, int batch) { // copy inputs to device const DeviceRadarGeometry d_out_geometry(out_geometry); const DeviceRadarGeometry d_in_geometry(in_geometry); DeviceDEMInterpolator d_dem(dem); if (typeid(kernel) == typeid(HostBartlettKernel<float>)) { const DeviceBartlettKernel<float> d_kernel( dynamic_cast<const HostBartlettKernel<float>&>(kernel)); backproject(out, d_out_geometry, in, d_in_geometry, d_dem, fc, ds, d_kernel, dry_tropo_model, rdr2geo_params, geo2rdr_params); } else if (typeid(kernel) == typeid(HostLinearKernel<float>)) { const DeviceLinearKernel<float> d_kernel( dynamic_cast<const HostLinearKernel<float>&>(kernel)); backproject(out, d_out_geometry, in, d_in_geometry, d_dem, fc, ds, d_kernel, dry_tropo_model, rdr2geo_params, geo2rdr_params); } else if (typeid(kernel) == typeid(HostKnabKernel<float>)) { const DeviceKnabKernel<float> d_kernel( dynamic_cast<const HostKnabKernel<float>&>(kernel)); backproject(out, d_out_geometry, in, d_in_geometry, d_dem, fc, ds, d_kernel, dry_tropo_model, rdr2geo_params, geo2rdr_params); } else if (typeid(kernel) == typeid(HostTabulatedKernel<float>)) { const DeviceTabulatedKernel<float> d_kernel( dynamic_cast<const HostTabulatedKernel<float>&>(kernel)); backproject(out, d_out_geometry, in, d_in_geometry, d_dem, fc, ds, d_kernel, dry_tropo_model, rdr2geo_params, geo2rdr_params); } else if (typeid(kernel) == typeid(HostChebyKernel<float>)) { const DeviceChebyKernel<float> d_kernel( dynamic_cast<const HostChebyKernel<float>&>(kernel)); backproject(out, d_out_geometry, in, d_in_geometry, d_dem, fc, ds, d_kernel, dry_tropo_model, rdr2geo_params, geo2rdr_params); } else { throw isce3::except::RuntimeError(ISCE_SRCINFO(), "not implemented"); } } }}} // namespace isce3::cuda::focus
the_stack
namespace xgboost { template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : index_(-1), device_(-1), start_(0), on_d_(false), vec_(nullptr) {} static size_t ShardStart(size_t size, int ndevices, int index) { size_t portion = dh::DivRoundUp(size, ndevices); size_t begin = index * portion; begin = begin > size ? size : begin; return begin; } static size_t ShardSize(size_t size, int ndevices, int index) { size_t portion = dh::DivRoundUp(size, ndevices); size_t begin = index * portion, end = (index + 1) * portion; begin = begin > size ? size : begin; end = end > size ? size : end; return end - begin; } void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; index_ = vec_->devices_.Index(device); size_t size_h = vec_->Size(); int ndevices = vec_->devices_.Size(); start_ = ShardStart(size_h, ndevices, index_); size_t size_d = ShardSize(size_h, ndevices, index_); dh::safe_cuda(cudaSetDevice(device_)); data_.resize(size_d); on_d_ = !vec_->on_h_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(); dh::safe_cuda(cudaSetDevice(device_)); dh::safe_cuda(cudaMemcpy(data_.data().get(), begin + start_, data_.size() * sizeof(T), cudaMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(); dh::safe_cuda(cudaSetDevice(device_)); dh::safe_cuda(cudaMemcpy(begin.get() + start_, data_.data().get(), data_.size() * sizeof(T), cudaMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(); dh::safe_cuda(cudaSetDevice(device_)); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(); other->LazySyncDevice(); dh::safe_cuda(cudaSetDevice(device_)); dh::safe_cuda(cudaMemcpy(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), cudaMemcpyDefault)); } void LazySyncHost() { dh::safe_cuda(cudaSetDevice(device_)); thrust::copy(data_.begin(), data_.end(), vec_->data_h_.begin() + start_); on_d_ = false; } void LazySyncDevice() { if (on_d_) { return; } // data is on the host size_t size_h = vec_->data_h_.size(); int ndevices = vec_->devices_.Size(); start_ = ShardStart(size_h, ndevices, index_); size_t size_d = ShardSize(size_h, ndevices, index_); dh::safe_cuda(cudaSetDevice(device_)); data_.resize(size_d); thrust::copy(vec_->data_h_.begin() + start_, vec_->data_h_.begin() + start_ + size_d, data_.begin()); on_d_ = true; // this may cause a race condition if LazySyncDevice() is called // from multiple threads in parallel; // however, the race condition is benign, and will not cause problems vec_->on_h_ = false; vec_->size_d_ = vec_->data_h_.size(); } int index_; int device_; thrust::device_vector<T> data_; size_t start_; // true if there is an up-to-date copy of data on device, false otherwise bool on_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, GPUSet devices) : devices_(devices), on_h_(devices.IsEmpty()), size_d_(0) { if (!devices.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // Init can be std::vector<T> or std::initializer_list<T> template <class Init> HostDeviceVectorImpl(const Init& init, GPUSet devices) : devices_(devices), on_h_(devices.IsEmpty()), size_d_(0) { if (!devices.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, devices_[i]); }); } HostDeviceVectorImpl(const HostDeviceVectorImpl<T>&) = delete; HostDeviceVectorImpl(HostDeviceVectorImpl<T>&&) = delete; void operator=(const HostDeviceVectorImpl<T>&) = delete; void operator=(HostDeviceVectorImpl<T>&&) = delete; size_t Size() const { return on_h_ ? data_h_.size() : size_d_; } GPUSet Devices() const { return devices_; } T* DevicePointer(int device) { CHECK(devices_.Contains(device)); LazySyncDevice(device); return shards_[devices_.Index(device)].data_.data().get(); } size_t DeviceSize(int device) { CHECK(devices_.Contains(device)); LazySyncDevice(device); return shards_[devices_.Index(device)].data_.size(); } size_t DeviceStart(int device) { CHECK(devices_.Contains(device)); LazySyncDevice(device); return shards_[devices_.Index(device)].start_; } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (on_h_) { thrust::copy(begin, end, data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (on_h_) { thrust::copy(data_h_.begin(), data_h_.end(), begin); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { if (on_h_) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); if (on_h_ && other->on_h_) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); } else { CHECK(devices_ == other->devices_); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_[i]); }); } } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (on_h_) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (on_h_) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(); return data_h_; } void Reshard(GPUSet new_devices) { if (devices_ == new_devices) return; CHECK(devices_.IsEmpty()); devices_ = new_devices; InitShards(); } void Resize(size_t new_size, T v) { if (new_size == Size()) return; if (Size() == 0 && !devices_.IsEmpty()) { // fast on-device resize on_h_ = false; size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(); data_h_.resize(new_size, v); } } void LazySyncHost() { if (on_h_) return; if (data_h_.size() != size_d_) data_h_.resize(size_d_); dh::ExecuteShards(&shards_, [&](DeviceShard& shard) { shard.LazySyncHost(); }); on_h_ = true; } void LazySyncDevice(int device) { CHECK(devices_.Contains(device)); shards_[devices_.Index(device)].LazySyncDevice(); } std::vector<T> data_h_; bool on_h_; // the total size of the data stored on the devices size_t size_d_; GPUSet devices_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, GPUSet devices) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, devices); } template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, GPUSet devices) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, devices); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, GPUSet devices) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, devices); } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { HostDeviceVectorImpl<T>* tmp = impl_; impl_ = nullptr; delete tmp; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(HostDeviceVector<T>* other) { impl_->Copy(other->impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> void HostDeviceVector<T>::Reshard(GPUSet new_devices) { impl_->Reshard(new_devices); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<unsigned int>; } // namespace xgboost
the_stack
#include "AffineTrans.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include <npp.h> #include <nppdefs.h> #include <nppcore.h> #include <nppi.h> // 宏:M_PI // π值。对于某些操作系统,M_PI可能没有定义,这里补充定义 M_PI。 #ifndef M_PI #define M_PI 3.14159265359 #endif // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 结构体:AffineTransParam(旋转仿射变换的内部参数) // 该结构体定义了旋转仿射变换的内部参数。它的作用在于简化参数传递的形式,在调用 // 算法函数的时候,Host 代码会首先根据类的成员变量和用户的参数计算出这个形似的 // 内部参数,然后再将这个内部参数传递给 Kernel,进而由 Kernel 完成对图像的并行 // 处理。 typedef struct AffineTransParam_st { float x0, y0; // 旋转前平移的向量 float cosalpha, sinalpha; // 旋转角度对应的余弦和正弦值 float x1, y1; // 旋转后平移的向量 } AffineTransParam; // 全局变量:_hardIplInimgTex(作为输入图像的纹理内存引用) // 纹理内存只能用于全局变量,因此将硬件插值的旋转变换的 Kernel 函数的输入图像列 // 于此处。 static texture<unsigned char, 2, cudaReadModeElementType> _hardIplInimgTex; // Kernel 函数:_hardRotateKer(利用硬件插值实现的旋转变换) // 利用纹理内存提供的硬件插值功能,实现的并行旋转变换。没有输入图像的参数,是因 // 为输入图像通过纹理内存来读取数据,纹理内存只能声明为全局变量。 static __global__ void // Kernel 函数无返回值。 _hardRotateKer( ImageCuda outimg, // 输出图像 AffineTransParam param // 旋转变换的参数 ); // Kernel 函数:_softRotateKer(利用软件件插值实现的旋转变换) // 利用 Fanczos 软件硬件插值算法,实现的并行旋转变换。 static __global__ void // Kernel 函数无返回值。 _softRotateKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 AffineTransParam param // 旋转变换的参数 ); // Host 函数:_rotateNpp(基于 NPP 的旋转变换实现) // 由于调用 NPP 支持库中的函数同 Runtime API 的 CUDA Kernel 调用具有较大的差 // 别,这里我们单独将 NPP 的旋转变换实现单独提出来作为一个函数以方便代码阅读。 // 注意,这个函数没有对输入输出图像进行前后处理工作,因此,必须要求输入输出图像 // 在当前 Device 上合法可用的数据空间,否则会带来不可预知的错误。 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返回 // NO_ERROR。 _rotateNpp( Image *inimg, // 输入图像,要求该图像必须在当前 Device 上有数 // 据。 Image *outimg, // 输出图像,要求该图像必须在当前 Device 上有数 // 据。 AffineTransParam atp // 旋转变换参数 ); // Host 函数:_rotateGeneral(通用旋转变换) // 作为整个旋转仿射变换的枢纽函数,所有的上层函数调用都会汇聚于此,并游该函数分 // 配调度下一层调度。在这个函数中包含了如下的功能:(1)对输入和输出图像进行数 // 据的准备工作,包括申请当前 Device 存储空间等;(2)针对不同的实现类型,对图 // 像数据进行个别的加工,如对于 NPP 实现调用 _rotateNpp 函数,对于硬插值实现调 // 纹理内存绑定操作等。 static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返 // 回 NO_ERROR。 _rotateGeneral( Image *inimg, // 输入图像 Image *outimg, // 输出图像 AffineTransParam atp, // 旋转变换参数 int imptype // 实现方式 ); // 函数:_calcRotateCenterParam(按照中心点旋转计算内部参数) // 根据给定的 AffineTrans 类,根据其中的成员变量,计算处内部的参数,内部参数随 // 后用于调用 Kernel 函数。 static __host__ __device__ int // 返回值:函数是否正确执行,若函数正确执行,返 // 回 NO_ERROR。 _calcRotateCenterParam( AffineTrans *at, // 输入参数,需要计算内部参数的类 Image *inimg, // 输入参数,用于考虑 ROI 子图像的问题。 Image *outimg, // 输出参数,用于考虑 ROI 子图像的问题。 AffineTransParam *atp // 输出参数,转换出来的内部参数,参数中原来的数 // 据将会被抹除。 ); // 函数:calcRotateShiftParam(按照平移旋转计算内部参数) // 根据给定的 AffineTrans 类,根据其中的成员变量,计算处内部的参数,内部参数随 // 后用于调用 Kernel 函数。 static __host__ __device__ int // 返回值:函数是否正确执行,若函数正确执行,返 // 回 NO_ERROR。 _calcRotateShiftParam( AffineTrans *at, // 输入参数,需要计算内部参数的类 Image *inimg, // 输入参数,用于考虑 ROI 子图像的问题。 Image *outimg, // 输出参数,用于考虑 ROI 子图像的问题。 AffineTransParam *atp // 输出参数,转换出来的内部参数,参数中原来的数 ); // Kernel 函数:_hardRotateKer(利用硬件插值实现的旋转变换) static __global__ void _hardRotateKer(ImageCuda outimg, AffineTransParam param) { // 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 dstr 需要进行乘 4 计算。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // 计算第一个输出坐标点对应的图像数据数组下标。 int dstidx = dstr * outimg.pitchBytes + dstc; // 声明目标图像输出像素对应的源图像中的坐标点,由于计算会得到小数结果,因此 // 使用浮点型存储该做标。 float srcc, srcr; // 由于是通过目标坐标点反推回源图像中的坐标点,因此这里实用的是逆向的旋转变 // 换。首先进行的是旋转后的平移,由于是逆向操作,这里是减法。 int tmpc = dstc - param.x1; int tmpr = dstr - param.y1; // 利用旋转矩阵,进行旋转变换,由于是逆向操作,这里的旋转矩阵也是正向变换的 // 旋转矩阵的逆矩阵。最后,进行旋转前的平移,同样也是逆向操作,故用减法。 srcc = tmpc * param.cosalpha - tmpr * param.sinalpha - param.x0; srcr = tmpc * param.sinalpha + tmpr * param.cosalpha - param.y0; // 通过上面的步骤,求出了第一个输出坐标对应的源图像坐标。这里利用纹理内存的 // 硬件插值功能,直接使用浮点型的坐标读取相应的源图像“像素”值,并赋值给目 // 标图像。这里没有进行对源图像读取的越界检查,这是因为纹理内存硬件插值功能 // 可以处理越界访问的情况,越界访问会按照事先的设置得到一个相对合理的像素颜 // 色值,不会引起错误。 outimg.imgMeta.imgData[dstidx] = tex2D(_hardIplInimgTex, srcc, srcr); // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++dstr >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 dstidx += outimg.pitchBytes; // 根据上一个源坐标位置计算当前的源坐标位置。由于只有 y 分量增加 1,因 // 此,对应的源坐标只有在涉及到 dstr 的项上有变化,从而消除了一系列乘法 // 计算,而通过两个源坐标的差值进行简单的加减法而得。 srcc -= param.sinalpha; srcr += param.cosalpha; // 将对应的源坐标位置出的插值像素写入到目标图像的当前像素点中。 outimg.imgMeta.imgData[dstidx] = tex2D(_hardIplInimgTex, srcc, srcr); } } // Kernel 函数:_softRotateKer(利用软件插值实现的旋转变换) static __global__ void _softRotateKer(ImageCuda inimg, ImageCuda outimg, AffineTransParam param) { // 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 dstr 需要进行乘 4 计算。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height) return; // 计算第一个输出坐标点对应的图像数据数组下标。 int dstidx = dstr * outimg.pitchBytes + dstc; // 声明目标图像输出像素对应的源图像中的坐标点,由于计算会得到小数结果,因此 // 使用浮点型存储该做标。 float srcc, srcr; // 由于是通过目标坐标点反推回源图像中的坐标点,因此这里实用的是逆向的旋转变 // 换。首先进行的是旋转后的平移,由于是逆向操作,这里是减法。 int tmpc = dstc - param.x1; int tmpr = dstr - param.y1; // 利用旋转矩阵,进行旋转变换,由于是逆向操作,这里的旋转矩阵也是正向变换的 // 旋转矩阵的逆矩阵。最后,进行旋转前的平移,同样也是逆向操作,故用减法。 srcc = tmpc * param.cosalpha - tmpr * param.sinalpha - param.x0; srcr = tmpc * param.sinalpha + tmpr * param.cosalpha - param.y0; // 调用 Fanczos 软件插值算法实现,获得源图像中对应坐标下的插值值。由于插值 // 算法实现函数处理了越界的情况,因此这里可以安全的把一些问题丢给插值算法实 // 现函数来处理。 outimg.imgMeta.imgData[dstidx] = _fanczosInterpoDev(inimg, srcc, srcr); // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++dstr >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 dstidx += outimg.pitchBytes; // 根据上一个源坐标位置计算当前的源坐标位置。由于只有 y 分量增加 1,因 // 此,对应的源坐标只有在涉及到 dstr 的项上有变化,从而消除了一系列乘法 // 计算,而通过两个源坐标的差值进行简单的加减法而得。 srcc -= param.sinalpha; srcr += param.cosalpha; // 将对应的源坐标位置出的插值像素写入到目标图像的当前像素点中。 outimg.imgMeta.imgData[dstidx] = _fanczosInterpoDev(inimg, srcc, srcr); } } // Host 函数:_rotateNpp(基于 NPP 的旋转变换实现) static __host__ int _rotateNpp(Image *inimg, Image *outimg, AffineTransParam atp) { // 检查输入和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 获得输入输出图像对应的 ImageCuda 型指针。 ImageCuda *inimgCud = IMAGE_CUDA(inimg); ImageCuda *outimgCud = IMAGE_CUDA(outimg); NppiRect srcroi; // 输入图像的 ROI srcroi.x = inimg->roiX1; srcroi.y = inimg->roiY1; srcroi.width = inimg->roiX2 - inimg->roiX1; srcroi.height = inimg->roiY2 - inimg->roiY1; // 计算出 4 个基准点的坐标变换,这四个基本坐标选择了源图像 ROI 上的四个角 // 点。 double afquad[4][2]; // 存放 4 个基准点在目标图像中的坐标。 // NPP 函数仅支持旋转后图像平移,因此需要根据内部参数中的旋转前平移和旋转后 // 平移推算出整体的平移量。 double xshift = atp.x0 * atp.cosalpha + atp.y0 * atp.sinalpha + atp.x1; double yshift = atp.y0 * atp.cosalpha - atp.x0 * atp.sinalpha + atp.y1; // 旋转角度,为了软件工程的美观,以及 double 类型的需求,这里是通过计算重新 // 计算角度,当然,也可以通过改造 AffineTransParam 来减去这一计算。 double alpha = asin(atp.sinalpha) * 180.0f / M_PI; // 调用 NPP 函数获取 ROI 四点对应到目标图像中的坐标。 NppStatus nppstatus; nppstatus = nppiGetRotateQuad(srcroi, afquad, alpha, xshift, yshift); if (nppstatus < NPP_SUCCESS) // 这里使用小于号的原因是 NPP 的错误码中,正数 return CUDA_ERROR; // 表示无错误的警告,0 表示无错误,负数才表示 // 真正发生了错误。 // 利用 NPP 函数求出旋转变换对应的仿射矩阵。利用这个矩阵可以将旋转变换的实 // 现,转化为调用仿射函数实行仿射变换。 double afcoeff[2][3]; nppstatus = nppiGetAffineTransform(srcroi, afquad, afcoeff); if (nppstatus < NPP_SUCCESS) return CUDA_ERROR; // 为调用 NPP 仿射函数做一些数据准备工作。由于 NPP 很好的支持了 ROI,所以, // 这里没有使用 ROI 子图像,而直接使用了整幅图像和 ROI 信息。 Npp8u *psrc = (Npp8u *)(inimg->imgData); // 输入图像的指针 Npp8u *pdst = (Npp8u *)(outimg->imgData); // 输出图像的指针 Npp32s srcstep = inimgCud->pitchBytes; // 输入图像的 Pitch Npp32s dststep = outimgCud->pitchBytes; // 输出图像的 Pitch NppiSize srcsize; // 输入图像的总尺寸 srcsize.width = inimg->width; // 宽 srcsize.height = inimg->height; // 高 NppiRect dstroi; // 输出图像的 ROI,这里输入图像的 ROI 已在前面完 dstroi.x = outimg->roiX1; // 成了赋值,此处无需再赋值。 dstroi.y = outimg->roiY1; dstroi.width = outimg->roiX2 - outimg->roiX1; dstroi.height = outimg->roiY2 - outimg->roiY1; int iplmode = NPPI_INTER_LINEAR; // 插值方式(这里我们采用了线性插值) // 调用 NPP 的仿射变换函数完成图像的旋转变换。 nppstatus = nppiWarpAffine_8u_C1R(psrc, srcsize, srcstep, srcroi, pdst, dststep, dstroi, afcoeff, iplmode); // 现已确定,现行的 NPP 版本并不算稳定,在某些 ROI 的情况下会出现莫名其妙的 // 无法处理的情况,这时会报告 NPP_ERROR 错误码,但是这个错误码的具体含义 // NVIDIA 并未给出一个明确的说法。显然这时由于 NPP 内部不稳定造成的。在 NPP // 文档的第 644 页关于函数 nppiWarpAffine_8u_C1R 的介绍中并未说明该函数会产 // 生 NPP_ERROR 的错误。希望未来的 NPP 版本可以解决不稳定的问题。 if (nppstatus < NPP_SUCCESS) return CUDA_ERROR; // 处理完毕返回。 return NO_ERROR; } // Host 函数:_rotateGeneral(通用旋转变换) static __host__ int _rotateGeneral(Image *inimg, Image *outimg, AffineTransParam atp, int imptype) { // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图像 // 尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->width, inimg->height); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 如果实现方式为调用 NPP 支持库,由于实现方式同其他 CUDA Kernel 的实现法方 // 法差别较大,则在此直接转入 NPP 处理函数。 if (imptype == AFFINE_NVIDIA_LIB) return _rotateNpp(inimg, outimg, atp); // 提取输入图像的 ROI 子图像。 ImageCuda *inimgCud = IMAGE_CUDA(inimg); // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 针对不同的实现类型,选择不同的路径进行处理。 cudaError_t cuerrcode; switch (imptype) { // 使用硬件插值实现旋转变换: case AFFINE_HARD_IPL: // 设置数据通道描述符,因为只有一个颜色通道(灰度图),因此描述符中只有 // 第一个分量含有数据。概述据通道描述符用于纹理内存的绑定操作。 struct cudaChannelFormatDesc chndesc; chndesc = cudaCreateChannelDesc(sizeof (unsigned char) * 8, 0, 0, 0, cudaChannelFormatKindUnsigned); // 将输入图像的 ROI 子图像绑定到纹理内存。 cuerrcode = cudaBindTexture2D( NULL, &_hardIplInimgTex, inimg->imgData, &chndesc, inimg->width, inimg->height, inimgCud->pitchBytes); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 调用 Kernel 函数,完成实际的图像旋转变换。 _hardRotateKer<<<gridsize, blocksize>>>(outsubimgCud, atp); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; break; // 使用软件插值实现旋转变换: case AFFINE_SOFT_IPL: // 调用 Kernel 函数,完成实际的图像旋转变换。 _softRotateKer<<<gridsize, blocksize>>>(*inimgCud, outsubimgCud, atp); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; break; // 其他方式情况下,直接返回非法数据错误。由于 NPP 实现已在前面跳转入了相应 // 的其他函数,该 switch-case 语句中未包含对 NPP 实现的处理。 default: return INVALID_DATA; } // 处理完毕,退出。 return NO_ERROR; } // 函数:calcRotateCenterParam(按照中心点旋转计算内部参数) static __host__ __device__ int _calcRotateCenterParam( AffineTrans *at, Image *inimg, Image *outimg, AffineTransParam *atp) { // 如果两个参数都为 NULL 则报错。如果 at 为 NULL,则无法计算;如果 atp 为 // NULL,则无法保存计算结果。 if (at == NULL || atp == NULL || inimg == NULL || outimg == NULL) return NULL_POINTER; // 获取图像旋转的中心点 int xc = at->getX(); int yc = at->getY(); // 如果旋转中心点角度在输入图像之外,则报错退出。 if (xc < 0 || xc >= inimg->width || yc < 0 || yc >= inimg->height) return INVALID_DATA; // 设置旋转前平移向量。基于中心的旋转相当于先将旋转中心移动到原点,在旋转后 // 再将图像移动回去。 atp->x0 = -xc; atp->y0 = -yc; // 计算旋转角度的余弦和正弦的值。 float alpharad = at->getAlpha() * M_PI / 180.0f; atp->cosalpha = cos(alpharad); atp->sinalpha = sin(alpharad); // 设置旋转后平移向量。 atp->x1 = xc ; atp->y1 = yc; // 针对 ROI 信息调整平移, AffineTrans 中的基准坐标是相对于整幅图像而言的, // 因此这里需要进行一下差值,从相对于整幅图像的基准坐标计算得到相对于 ROI // 子图像的坐标。注意,在 NPP 实现中,由于 NPP 具有对 ROI 的处理能力,因 // 此,对于 NPP 实现不需要对旋转中心点作出调整。 if (at->getImpType() != AFFINE_NVIDIA_LIB) { // 由于输入图像不需要考虑 ROI 范围,因此,我们在实现过程中,撇开了输入 // 图像的 ROI 区域,直接使用更加容易计算的整幅图像作为数据来源。 //atp->x0 += inimg->roiX1; //atp->y0 += inimg->roiY1; atp->x1 -= outimg->roiX1; atp->y1 -= outimg->roiY1; } // 处理完毕,成功返回。 return NO_ERROR; } // Host 成员方法:rotateCenter(基于中心的旋转) __host__ int AffineTrans::rotateCenter(Image *inimg, Image *outimg) { // 检查输入和输出图像,若有一个为 NULL,则报错。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 转换参数,将类内的参数转换成 Kernel 函数使用的内部参数。 AffineTransParam atp; int errcode; errcode = _calcRotateCenterParam(this, inimg, outimg, &atp); if (errcode != NO_ERROR) { // 如果在参数转换过程中发生错误,则记录 stateFlag 并退出。 this->stateFlag = errcode; return errcode; } // 交由枢纽函数 _rotateGeneral 进行后续的实际旋转变换。 errcode = _rotateGeneral(inimg, outimg, atp, this->impType); if (errcode != NO_ERROR) { // 如果在枢纽函数处理过程中发生错误,则记录 stateFlag 并退出。 this->stateFlag = errcode; return errcode; } // 处理完毕退出。 return NO_ERROR; } // 函数:calcRotateShiftParam(按照平移旋转计算内部参数) static __host__ __device__ int _calcRotateShiftParam( AffineTrans *at, Image *inimg, Image *outimg, AffineTransParam *atp) { // 如果两个参数都为 NULL 则报错。如果 at 为 NULL,则无法计算;如果 atp 为 // NULL,则无法保存计算结果。 if (at == NULL || atp == NULL || inimg == NULL || outimg == NULL) return NULL_POINTER; // 获取图像旋转的中心点 int xc = inimg->width / 2; int yc = inimg->height / 2; // 设置旋转前平移向量。基于中心的旋转相当于先将旋转中心移动到原点,在旋转后 // 再将图像移动回去。 atp->x0 = -xc + at->getX(); atp->y0 = -yc + at->getY(); // 计算旋转角度的余弦和正弦的值。 float alpharad = at->getAlpha() * M_PI / 180.0f; atp->cosalpha = cos(alpharad); atp->sinalpha = sin(alpharad); // 设置旋转后平移向量。 atp->x1 = xc ; atp->y1 = yc; // 针对 ROI 信息调整平移, AffineTrans 中的基准坐标是相对于整幅图像而言的, // 因此这里需要进行一下差值,从相对于整幅图像的基准坐标计算得到相对于 ROI // 子图像的坐标。注意,在 NPP 实现中,由于 NPP 具有对 ROI 的处理能力,因 // 此,对于 NPP 实现不需要对旋转中心点作出调整。 if (at->getImpType() != AFFINE_NVIDIA_LIB) { atp->x0 += inimg->roiX1; atp->y0 += inimg->roiY1; atp->x1 -= outimg->roiX1; atp->y1 -= outimg->roiY1; } // 处理完毕,成功返回。 return NO_ERROR; } // Host 成员方法:rotateShift(基于平移的旋转) __host__ int AffineTrans::rotateShift(Image *inimg, Image *outimg) { // 检查输入和输出图像,若有一个为 NULL,则报错。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 转换参数,将类内的参数转换成 Kernel 函数使用的内部参数。 AffineTransParam atp; int errcode; errcode = _calcRotateShiftParam(this, inimg, outimg, &atp); if (errcode != NO_ERROR) { // 如果在参数转换过程中发生错误,则记录 stateFlag 并退出。 this->stateFlag = errcode; return errcode; } // 交由枢纽函数 _rotateGeneral 进行后续的实际旋转变换。 errcode = _rotateGeneral(inimg, outimg, atp, this->impType); if (errcode != NO_ERROR) { // 如果在枢纽函数处理过程中发生错误,则记录 stateFlag 并退出。 this->stateFlag = errcode; return errcode; } // 处理完毕退出。 return NO_ERROR; }
the_stack
#include <torch/extension.h> #include <cuda_runtime.h> #include <iostream> #include <stdio.h> #define TensorAccessor5D torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,int32_t> /* #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else static __inline__ __device__ double atomicAdd(double *address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; if (val==0.0) return __longlong_as_double(old); do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif */ template <typename scalar_t> __global__ void sa_weight_forward_kernel( const TensorAccessor5D query, const TensorAccessor5D key, TensorAccessor5D weight,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //query B*T*C*H*W //key B*T*C*H*W //weight B*T*9T*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ scalar_t sum=0.0; if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ for(int c=0;c<C;++c){ scalar_t q=query[batch][time][c][h][w]; scalar_t k=key[batch][cal_time][c][h+dh][w+dw]; sum+=q*k; } } weight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]=sum; } } } } } } template <typename scalar_t> __global__ void sa_map_forward_kernel( const TensorAccessor5D weight, const TensorAccessor5D proj, TensorAccessor5D out,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //weight B*T*9T*H*W //proj B*T*C*H*W //out B*T*C*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int c=0;c<C;++c){ scalar_t sum=0.0; for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ scalar_t weight_temp=weight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]; scalar_t proj_value=proj[batch][cal_time][c][h+dh][w+dw]; sum+=weight_temp*proj_value; } } } } out[batch][time][c][h][w]=sum; } } } } template <typename scalar_t> __global__ void sa_weight_backward_kernel_query( const TensorAccessor5D dweight, const TensorAccessor5D key, TensorAccessor5D dquery,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //weight B*T*9T*H*W //proj B*T*C*H*W //out B*T*C*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int c=0;c<C;++c){ scalar_t sum=0.0; for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ scalar_t _dweight=dweight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]; scalar_t _key=key[batch][cal_time][c][h+dh][w+dw]; sum+=_dweight*_key; } } } } dquery[batch][time][c][h][w]=sum; } } } } template <typename scalar_t> __global__ void sa_weight_backward_kernel_key( const TensorAccessor5D dweight, const TensorAccessor5D query, TensorAccessor5D dkey,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //weight B*T*9T*H*W //proj B*T*C*H*W //out B*T*C*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int c=0;c<C;++c){ for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ scalar_t _dweight=dweight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]; scalar_t _query=query[batch][time][c][h][w]; atomicAdd(&dkey[batch][cal_time][c][h+dh][w+dw],_dweight*_query); } } } } } } } } template <typename scalar_t> __global__ void sa_map_backward_kernel_weight( const TensorAccessor5D dout, const TensorAccessor5D proj, TensorAccessor5D dweight,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //weight B*T*9T*H*W //proj B*T*C*H*W //out B*T*C*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ scalar_t sum=0.0; for(int c=0;c<C;++c){ if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ scalar_t _proj=proj[batch][cal_time][c][h+dh][w+dw]; scalar_t _dout=dout[batch][time][c][h][w]; sum+=_dout*_proj; } } dweight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]=sum; } } } } } } template <typename scalar_t> __global__ void sa_map_backward_kernel_proj( const TensorAccessor5D dout, const TensorAccessor5D weight, TensorAccessor5D dproj,int B,int T,int C,int H,int W,int radius,int dilation){ int w = blockIdx.x * blockDim.x + threadIdx.x;//col int h = blockIdx.y * blockDim.y + threadIdx.y;//row int time = blockIdx.z;//time int diameter=2*radius+1; //weight B*T*9T*H*W //proj B*T*C*H*W //out B*T*C*H*W if(w<W&&h<H&&time<T){ for(int batch=0;batch<B;++batch){ for(int c=0;c<C;++c){ for(int cal_time=0;cal_time<T;++cal_time){ for(int dh=-radius*dilation;dh<=radius*dilation;dh+=dilation){ for(int dw=-radius*dilation;dw<=radius*dilation;dw+=dilation){ if(h+dh<H&&h+dh>=0&&w+dw<W&&w+dw>=0){ scalar_t weight_temp=weight[batch][time][cal_time*diameter*diameter+(dh/dilation+radius)*(2*radius+1)+(dw/dilation+radius)][h][w]; scalar_t _dout=dout[batch][time][c][h][w]; atomicAdd(&dproj[batch][cal_time][c][h+dh][w+dw],_dout*weight_temp); } } } } } } } } void _sa_weight_forward_cuda(const torch::Tensor& query,const torch::Tensor& key,torch::Tensor& weight,int B,int T,int C,int H,int W,int radius,int dilation){ dim3 threads(16,16); dim3 blocks((W+threads.x-1)/threads.x,(H+threads.y-1)/threads.y,T); AT_DISPATCH_FLOATING_TYPES(weight.scalar_type(), "sa_weight_forward_cuda", ([&] { sa_weight_forward_kernel<scalar_t><<<blocks, threads>>>( query.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(), key.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(), weight.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(),B,T,C,H,W,radius,dilation); })); } void _sa_map_forward_cuda(const torch::Tensor& weight,const torch::Tensor& proj,torch::Tensor& out,int B,int T,int C,int H,int W,int radius,int dilation){ dim3 threads(16,16); dim3 blocks((W+threads.x-1)/threads.x,(H+threads.y-1)/threads.y,T); AT_DISPATCH_FLOATING_TYPES(weight.scalar_type(), "sa_map_forward_cuda", ([&] { sa_map_forward_kernel<scalar_t><<<blocks, threads>>>( weight.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(), proj.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(), out.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(),B,T,C,H,W,radius,dilation); })); } void _sa_weight_backward_cuda(const torch::Tensor& dw,const torch::Tensor& query, const torch::Tensor& key,torch::Tensor& dquery,torch::Tensor& dkey, int B,int T,int C,int H,int W,int radius,int dilation){ dim3 threads(16,16); dim3 blocks((W+threads.x-1)/threads.x,(H+threads.y-1)/threads.y,T); AT_DISPATCH_FLOATING_TYPES(dw.scalar_type(), "sa_weight_backward_cuda", ([&] { const TensorAccessor5D dw_acc=dw.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); const TensorAccessor5D query_acc=query.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); const TensorAccessor5D key_acc=key.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); TensorAccessor5D dquery_acc=dquery.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); TensorAccessor5D dkey_acc=dkey.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); sa_weight_backward_kernel_query<scalar_t><<<blocks, threads>>>(dw_acc,key_acc,dquery_acc,B,T,C,H,W,radius,dilation); sa_weight_backward_kernel_key<scalar_t><<<blocks, threads>>>(dw_acc,query_acc,dkey_acc,B,T,C,H,W,radius,dilation); })); } void _sa_map_backward_cuda(const torch::Tensor& dout, const torch::Tensor& weight, const torch::Tensor& proj,torch::Tensor& dweight,torch::Tensor& dproj, int B,int T,int C,int H,int W,int radius,int dilation){ dim3 threads(16,16); dim3 blocks((W+threads.x-1)/threads.x,(H+threads.y-1)/threads.y,T); AT_DISPATCH_FLOATING_TYPES(dout.scalar_type(), "sa_map_backward_cuda", ([&] { const TensorAccessor5D dout_acc=dout.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); const TensorAccessor5D weight_acc=weight.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); const TensorAccessor5D proj_acc=proj.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); TensorAccessor5D dweight_acc=dweight.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); TensorAccessor5D dproj_acc=dproj.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,int32_t>(); sa_map_backward_kernel_weight<scalar_t><<<blocks, threads>>>(dout_acc,proj_acc,dweight_acc,B,T,C,H,W,radius,dilation); sa_map_backward_kernel_proj<scalar_t><<<blocks, threads>>>(dout_acc,weight_acc,dproj_acc,B,T,C,H,W,radius,dilation); })); }
the_stack
* It also shows how to correctly templatize dynamically allocated shared * memory arrays. * Host code. */ #include <shrUtils.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples #include <shrQATest.h> // This is for automated testing output (--qatest) #include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> // includes, kernels #include "simpleTemplates_kernel.cu" int g_TotalFailures = 0; //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); } checkCudaErrors( cudaSetDevice(devID) ); printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while (current_device < device_count) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameter\n "); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } return devID; } // end of CUDA Helper Functions //////////////////////////////////////////////////////////////////////////////// // declaration, forward template <class T> void runTest( int argc, char** argv, int len); template<class T> void computeGold( T* reference, T* idata, const unsigned int len) { const T T_len = static_cast<T>( len); for( unsigned int i = 0; i < len; ++i) { reference[i] = idata[i] * T_len; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { shrQAStart(argc, argv); printf("> runTest<float,32>\n"); runTest<float>( argc, argv, 32); printf("> runTest<int,64>\n"); runTest<int>( argc, argv, 64); printf("\n[simpleTemplates] -> Test Results: %d Failures\n", g_TotalFailures); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (g_TotalFailures == 0) ? QA_PASSED : QA_FAILED); } // To completely templatize runTest (below) with cutil, we need to use // template specialization to wrap up CUTIL's array comparison and file writing // functions for different types. // Here's the generic wrapper for cutCompare* template<class T> class ArrayComparator { public: bool compare( const T* reference, T* data, unsigned int len) { fprintf(stderr, "Error: no comparison function implemented for this type\n"); return false; } }; // Here's the specialization for ints: template<> class ArrayComparator<int> { public: bool compare( const int* reference, int* data, unsigned int len) { return compareData(reference, data, len, 0.15f, 0.0f); } }; // Here's the specialization for floats: template<> class ArrayComparator<float> { public: bool compare( const float* reference, float* data, unsigned int len) { return compareData(reference, data, len, 0.15f, 0.15f); } }; // Here's the generic wrapper for cutWriteFile* template<class T> class ArrayFileWriter { public: bool write(const char* filename, T* data, unsigned int len, float epsilon) { fprintf(stderr, "Error: no file write function implemented for this type\n"); return false; } }; // Here's the specialization for ints: template<> class ArrayFileWriter<int> { public: bool write(const char* filename, int* data, unsigned int len, float epsilon) { return sdkWriteFile(filename, data, len, epsilon, false); } }; // Here's the specialization for floats: template<> class ArrayFileWriter<float> { public: bool write(const char* filename, float* data, unsigned int len, float epsilon) { return sdkWriteFile(filename, data, len, epsilon, false); } }; //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// template<class T> void runTest( int argc, char** argv, int len) { int devID; cudaDeviceProp deviceProps; devID = findCudaDevice(argc, (const char**)argv); // get number of SMs on this GPU checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID)); printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount); StopWatchInterface *timer = NULL; sdkCreateTimer( &timer ); sdkStartTimer ( &timer ); unsigned int num_threads = len; unsigned int mem_size = sizeof( float) * num_threads; // allocate host memory T* h_idata = (T*) malloc( mem_size); // initalize the memory for( unsigned int i = 0; i < num_threads; ++i) { h_idata[i] = (T) i; } // allocate device memory T* d_idata; checkCudaErrors( cudaMalloc( (void**) &d_idata, mem_size)); // copy host memory to device checkCudaErrors( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) ); // allocate device memory for result T* d_odata; checkCudaErrors( cudaMalloc( (void**) &d_odata, mem_size)); // setup execution parameters dim3 grid( 1, 1, 1); dim3 threads( num_threads, 1, 1); // execute the kernel testKernel<T><<< grid, threads, mem_size >>>( d_idata, d_odata); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // allocate mem for the result on host side T* h_odata = (T*) malloc( mem_size); // copy result from device to host checkCudaErrors( cudaMemcpy( h_odata, d_odata, sizeof(T) * num_threads, cudaMemcpyDeviceToHost) ); sdkStopTimer( &timer ); printf( "Processing time: %f (ms)\n", sdkGetTimerValue( &timer )); sdkDeleteTimer( &timer ); // compute reference solution T* reference = (T*) malloc( mem_size); computeGold<T>( reference, h_idata, num_threads); ArrayComparator<T> comparator; ArrayFileWriter<T> writer; // check result if( checkCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test writer.write( "./data/regression.dat", h_odata, num_threads, 0.0f ); } else { // custom output handling when no regression test running // in this case check if the result is equivalent to the expected soluion bool res = comparator.compare( reference, h_odata, num_threads); printf( "Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH"); g_TotalFailures += (1 != res); } // cleanup memory free( h_idata); free( h_odata); free( reference); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); cudaDeviceReset(); }
the_stack
using namespace std; typedef uint8_t uint8; typedef unsigned int uint32; typedef unsigned long long int uint64; #define STREAM_BLOCK 16 #define BLOCK_SIZE 32 #define BLOCK_D_SIZE 64 #define INTEGRAL_BLOCK_SIZE 8 #define XDIM_MAX_THREADS 1024 #define XDIM_H_THREADS 512 #define XDIM_Q_THREADS 256 #define SHARED_MEMORY 49152 #define INIT_BLOCK 8 __global__ void NisterPrecompute(uint64 * Al,uint64 * Ar,double* Cl, double* Cr, const uint64* l_integral, const uint64* r_integral, const unsigned long long * l_sq_integral, const unsigned long long* r_sq_integral, const int rows , const int cols, const int wsize, const int wc, const int sqwin ){ extern __shared__ unsigned long long integralparts[]; const int Row = threadIdx.x; const int Col = blockIdx.x; const int o_index = (Row+wc)*cols+Col+wc; const int top = (Row)*4; const int bottom = (Row+wsize)*4; const int tr = (Row)*cols+Col+wsize; const int tl = Row*cols+Col; if(Row<(rows+1)&& Col < cols-wsize){ integralparts[Row*4] =l_integral[tr]-l_integral[tl]; integralparts[Row*4+1] =r_integral[tr] -r_integral[tl]; integralparts[Row*4+2] =l_sq_integral[tr] -l_sq_integral[tl]; integralparts[Row*4+3] =r_sq_integral[tr] -r_sq_integral[tl]; } __syncthreads(); if(Row < rows-wsize && Col < cols-wsize){ uint32 al = integralparts[bottom] - integralparts[top]; uint32 ar = integralparts[bottom+1] - integralparts[top+1]; unsigned long long Bl = integralparts[bottom+2]- integralparts[top+2]; unsigned long long Br = integralparts[bottom+3]- integralparts[top+3]; Al[o_index] = al; Ar[o_index] = ar; Cl[ o_index ] = 1/(sqrt(sqwin*Bl - (double)( al )*( al ) )); Cr[ o_index ] = 1/(sqrt(sqwin*Br - (double)( ar )*( ar) )); } } __global__ void NisterMatch(const float* left, const float* right, double* integral_vol, const int rows, const int cols, const int integrrows , const int integrcols , const int ndisp,const int offset ){ extern __shared__ float row_slice[]; int Col = threadIdx.x+offset; int Row = blockIdx.x; if(Col <cols && Row<rows ){ row_slice[threadIdx.x] = left[Row*cols+Col]; row_slice[blockDim.x+ndisp+threadIdx.x] = right[Row*cols+Col]; } float rp = ceil( (float)ndisp/blockDim.x ); for(int b=0; b<rp; b++){ if(blockIdx.x > 0 && (threadIdx.x+b*blockDim.x) < ndisp && (int)(Col -(ndisp-b*blockDim.x))>=0 ){ row_slice[blockDim.x+(threadIdx.x+b*blockDim.x)] = right[Row*cols+(Col -(ndisp-b*blockDim.x))]; } } __syncthreads(); for(int d=0; d<ndisp; d++){ if(Row < rows && Col < cols && Col-d >=0 ){ integral_vol[d*integrrows*integrcols+Row*integrcols + Col] = row_slice[threadIdx.x] * row_slice[(blockDim.x+ndisp)+threadIdx.x-d]; } } } __global__ void NCC(const double* integral_vol, double* slice, const uint64 * Al,const uint64 * Ar,const double* Cl, const double* Cr, const int integrrows , const int integrcols , const int rows , const int cols, const int wsize, const int wc,const int sqwin,const int ndisp, const int warpwidth){ extern __shared__ __align__(sizeof(double)) unsigned char ncc_shared[]; uint64 * Ar_sm = reinterpret_cast<uint64 *>(&ncc_shared[0]); double * Cr_sm = reinterpret_cast<double *>(&ncc_shared[(XDIM_Q_THREADS+ndisp)*sizeof(double)]); const int Row = blockIdx.y; const int Col =blockIdx.x*blockDim.x + threadIdx.x; int threaddispl = 0; if(blockIdx.x >0){ threaddispl=ndisp; } float rp = ceil( (float)ndisp/blockDim.x ); for(int b=0; b<rp; b++){ if(blockIdx.x > 0 && (threadIdx.x+b*blockDim.x) < ndisp && (int) (Col -(ndisp-b*blockDim.x)) >=0 ){ Ar_sm[(threadIdx.x+b*blockDim.x)] = Ar[(Row+wc)*cols + (Col -(ndisp-b*blockDim.x)+wc) ]; Cr_sm[(threadIdx.x+b*blockDim.x)] = Cr[(Row+wc)*cols + (Col -(ndisp-b*blockDim.x)+wc) ]; } } if(Row < rows-wsize && Col < cols-wsize){ const int index = (Row+wc)*cols+(Col+wc); const uint64 al = Al[index]; const double cl = Cl[index]; Ar_sm[threaddispl+ threadIdx.x ] = Ar[index ]; Cr_sm[threaddispl+ threadIdx.x ] = Cr[index ]; __syncthreads(); #pragma unroll for (int d=0; d< ndisp; d++){ const int dindex = threaddispl+threadIdx.x-d; const int disp = d*integrrows*integrcols; double ncccost = 2; if(Col < cols-wsize && dindex >=0 && (int)Col-d>=0){ const double lD = integral_vol[disp+(Row+wsize)*integrcols + (Col+wsize)] - integral_vol[disp+(Row)*integrcols + (Col+wsize)] - integral_vol[disp+(Row+wsize)*integrcols + Col] + integral_vol[disp+Row*integrcols + Col]; if( isfinite(cl) && isfinite(Cr_sm[ dindex ])){ ncccost = 1-((double)(sqwin*lD- al * Ar_sm[dindex] )*cl*Cr_sm[ dindex ]) ; } } slice[d*rows*cols+(Row+wc)*cols + (Col+wc)] = ncccost; } } } __global__ void square( const float* input, uint64 * output, const int height, const int width,const int oHeigth, const int oWidth){ const int index = blockIdx.x*blockDim.x+threadIdx.x; if(index< height*width){ uint64 val = (uint64)input[index]; val = val*val; __syncthreads(); output[index] = val; } } void usage(void){ std::cout << "NCC genmeric CUDA implementation" << std::endl; std::cout << "Arguments" << std::endl; std::cout << "-l:\t\t Left image | File containing names of the left images" << std::endl; std::cout << "-r:\t\t Right image | File containing the names of the right images" << std::endl; std::cout << "-ndisp:\t\t Number of Disparities" << std::endl; std::cout << "-wsize:\t\t Window size" << std::endl; std::cout << "-dopost:\t Default false. If set, activates sgm cost optimization" << std::endl; std::cout << "-list:\t\t Default is single file. If set, left and right files should be lists of images." << std::endl; std::cout << "-out:\t\t Output directory for disparity images." << std::endl; std::cout << "-out_type:\t Output image type. Supports pgm|pfm|png|disp(uint16 png format)." << std::endl; std::cout << "-postconf:\t Optional configuration file for post-processing." << std::endl; std::cout << "-h:\t\t Prints this help" << std::endl; } int main(int argc, char* argv[]){ string leftfile; string rightfile; string out=string("."); string out_t=string("disp"); int wsize=9; int ndisp=256; bool post=false; bool single=true; int argsassigned = 0; int required=0; postparams params; //sgm params params.pi1=1.32; params.pi2=24.25; params.tau_so=1; params.alpha1=2; params.sgm_q1=3; params.sgm_q2=2; params.alpha2=6; params.sigma = 5.99; params.kernel_size=5; int direction =-1; for(int i=0; i<argc; i++){ if( !strcmp(argv[i], "-l") ){ leftfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-r") ){ rightfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-ndisp") ){ ndisp= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-wsize") ){ wsize= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i], "-dopost") ){ post= true; argsassigned++; }else if(!strcmp(argv[i],"-list")){ single=false; argsassigned++; }else if(!strcmp(argv[i],"-out")){ out=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-out_type")){ out_t=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-postconf")){ parseConf(params ,string(argv[++i])); argsassigned++; }else if(!strcmp(argv[i],"-h")){ usage(); return 0; } } if(argsassigned == 0){ usage(); return 0; } if(argsassigned ==1){ leftfile = string("../../leftimg.txt"); rightfile = string("../../rightimg.txt"); } else if( required < 4 ){ usage(); return 0; } std::vector<string> limg; std::vector<string> rimg; if (single){ limg.push_back(leftfile); rimg.push_back(rightfile); }else{ limg = getImages(leftfile); rimg = getImages(rightfile); } imgio* imgutil = new imgio(); imgutil->read_image_meta(limg[0].c_str()); //######################### Allocate memory on the device ###########################################// float* imgl; size_t ibytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &imgl, ibytes ); float* imgr; cudaMallocHost( (void**) &imgr, ibytes ); int width = imgutil->getWidth(); int height = imgutil->getHeight(); int wdiv = ceil((float)width/32); const int warpwidth = wdiv*32; cudaStream_t stream1; cudaStream_t stream2; //cudaError_t strerr; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); const int wc = wsize/2; double* cost_d; size_t bytes = height*width*ndisp*sizeof(double); cudaMalloc( (void**) &cost_d, bytes ); double* post_cost_d; cudaMalloc( (void**) &post_cost_d, bytes ); float* disp_h; size_t dbytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &disp_h, dbytes ); float * disp_d; cudaMalloc(&disp_d, dbytes); float * disp_tmp; cudaMalloc(&disp_tmp, dbytes); float* imgl_d; cudaMalloc(&imgl_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); float* imgr_d; cudaMalloc(&imgr_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); uint64* l_integral_d; uint64* r_integral_d; uint64* l_integral_d_t; uint64* r_integral_d_t; cudaMalloc(&l_integral_d, height*width*sizeof(uint64)); cudaMemsetAsync(l_integral_d, 0,height*width*sizeof(uint64),stream1); cudaMalloc(&r_integral_d, height*width*sizeof(uint64)); cudaMemsetAsync(r_integral_d, 0,height*width*sizeof(uint64),stream2); cudaMalloc(&l_integral_d_t, height*width*sizeof(uint64)); cudaMemsetAsync(l_integral_d_t, 0,height*width*sizeof(uint64),stream1); cudaMalloc(&r_integral_d_t, height*width*sizeof(uint64)); cudaMemsetAsync(r_integral_d_t, 0,height*width*sizeof(uint64),stream2); unsigned long long int * l_sq_integral_d; unsigned long long int * r_sq_integral_d; //missing kernel cudaMalloc(&l_sq_integral_d, height*width*sizeof(unsigned long long int)); cudaMemsetAsync(l_sq_integral_d, 0,height*width*sizeof(unsigned long long int),stream1); cudaMalloc(&r_sq_integral_d, height*width*sizeof(unsigned long long int)); cudaMemsetAsync(r_sq_integral_d, 0,height*width*sizeof(unsigned long long int),stream2); uint64 * Al_d; uint64 * Ar_d; double* Cl_d; double* Cr_d; cudaMalloc(&Al_d, height*width*sizeof(uint64)); cudaMalloc(&Ar_d, height*width*sizeof(uint64)); cudaMalloc(&Cl_d, height*width*sizeof(double)); cudaMalloc(&Cr_d, height*width*sizeof(double)); cudaMemsetAsync(Al_d,0,height*width*sizeof(uint64)); cudaMemsetAsync(Ar_d,0,height*width*sizeof(uint64)); cudaMemsetAsync(Cl_d,0,height*width*sizeof(double)); cudaMemsetAsync(Cr_d,0,height*width*sizeof(double)); int size1 = height*ndisp; int size2 = width*ndisp; dim3 argGridSGM1((size1 - 1) / ndisp + 1,width); dim3 argGridSGM2((size2 - 1) / ndisp + 1,height); float * tmp_d; cudaMalloc(&tmp_d, width*ndisp*sizeof(float)); cudaMemsetAsync(tmp_d,0 , width*ndisp*sizeof(float),0); float* left_cross; cudaMalloc(&left_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(left_cross,0 , 4*height*width*sizeof(float),0); float* right_cross; cudaMalloc(&right_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(right_cross,0 , 4*height*width*sizeof(float),0); int kr = ceil(params.sigma*3); int ks = kr*2+1; float * kernel = (float*)calloc(ks*ks,sizeof(float)); for (int i=0; i<ks; i++){ for(int j=0; j<ks; j++){ int y= (i-1)-kr; int x= (j-1)-kr; kernel[i*ks+j] = exp( -(x*x+y*y)/(2*params.sigma*params.sigma) ); } } float *kernel_d; cudaMalloc(&kernel_d, ks*ks*sizeof(float)); cudaMemcpy( kernel_d, kernel, ks*ks*sizeof(float), cudaMemcpyHostToDevice); int vthreads = XDIM_MAX_THREADS; if(height < XDIM_Q_THREADS) vthreads=XDIM_Q_THREADS; else if(height < XDIM_Q_THREADS) vthreads=XDIM_H_THREADS; int vreps = ceil((float) height/vthreads ); dim3 integraldim2Grid(1,width,1 ); dim3 integraldim1Grid(1,height,1 ); dim3 integraldim1Griddisp(1,height,ndisp ); int hthreads = XDIM_MAX_THREADS; if(width < XDIM_Q_THREADS) width=XDIM_Q_THREADS; else if(width<XDIM_H_THREADS) width=XDIM_H_THREADS; int hreps = ceil((float)width/hthreads); dim3 hintegralGrid(height,1, 1 ); dim3 preCompBlock(XDIM_MAX_THREADS); dim3 preCompGrid(width); dim3 MatchGrid(height); dim3 vintegralGriddisp(1,width,ndisp ); dim3 swapBlock(BLOCK_D_SIZE,16,1); dim3 swapGrid(ceil((float)imgutil->getWidth()*imgutil->getHeight()/BLOCK_D_SIZE),ceil((float) ndisp/BLOCK_D_SIZE )); dim3 transBlock(BLOCK_D_SIZE,16,1); dim3 transGrid(ceil((float)imgutil->getWidth()/BLOCK_D_SIZE ),ceil((float)imgutil->getHeight()/BLOCK_D_SIZE)); dim3 transinvGrid(ceil((float)imgutil->getHeight()/BLOCK_D_SIZE),ceil((float)imgutil->getWidth()/BLOCK_D_SIZE )); dim3 argBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 argGrid(ceil((float) imgutil->getWidth() / BLOCK_SIZE),ceil( (float)imgutil->getHeight()/ BLOCK_SIZE)); dim3 argBlockSq(XDIM_MAX_THREADS); dim3 argGridSq(ceil((float)imgutil->getWidth()*imgutil->getHeight()/(XDIM_MAX_THREADS))); dim3 dimBlockNCC(XDIM_Q_THREADS); dim3 dimGridNCC(ceil((float) imgutil->getWidth() / XDIM_Q_THREADS),imgutil->getHeight()-wsize); //########################################################################################################################################// for(size_t i=0; i<limg.size(); i++){ imgutil->read_image(limg[i],imgl); imgutil->read_image(rimg[i],imgr); cudaMemcpyAsync( imgl_d, imgl, width*height*sizeof(float), cudaMemcpyHostToDevice,stream1); cudaMemcpyAsync( imgr_d, imgr, width*height*sizeof(float), cudaMemcpyHostToDevice,stream2); cudaMemsetAsync(cost_d,2 , height*width*ndisp*sizeof(double),stream1); cudaMemsetAsync(post_cost_d,2 , width*height*ndisp*sizeof(double),stream2); for (int r =0; r<hreps; r++){ int tthreads=hthreads; if(r >0 && r==hreps-1) tthreads = width-r*(hthreads-1); HorizontalIntegralKernel_outofplace<<<hintegralGrid, tthreads, tthreads*sizeof(uint64) >>>(l_integral_d, imgl_d, height , width , 1,r*(hthreads-1)); HorizontalIntegralKernel_outofplace<<<hintegralGrid, tthreads, tthreads*sizeof(uint64) >>>(r_integral_d, imgr_d, height , width , 1,r*(hthreads-1)); } transpose<<< transGrid, transBlock >>>( l_integral_d, l_integral_d_t,height,width); transpose<<< transGrid, transBlock >>>( r_integral_d, r_integral_d_t,height,width); for(int r=0;r<vreps;r++){ int tthreads=vthreads; if(r >=0 && r==vreps-1) tthreads=height-r*(vthreads-1); IntegralKernel<<<integraldim2Grid, tthreads, 32*sizeof(uint64) >>>(l_integral_d_t,width, height,1,r*(vthreads-1) ); IntegralKernel<<<integraldim2Grid, tthreads, 32*sizeof(uint64) >>>(r_integral_d_t,width, height,1,r*(vthreads-1) ); } transpose<<< transinvGrid, transBlock >>>(l_integral_d_t, l_integral_d, width,height); transpose<<< transinvGrid, transBlock >>>(r_integral_d_t, r_integral_d, width,height); square<<< argGridSq,argBlockSq >>>( imgl_d, l_sq_integral_d, height,width, height, width); square<<< argGridSq,argBlockSq >>>( imgr_d, r_sq_integral_d, height,width, height, width); for (int r =0; r<hreps; r++){ int tthreads=hthreads; if(r >0 && r==hreps-1) tthreads = width-r*(hthreads-1); IntegralKernel<<<integraldim1Grid, tthreads, 32*sizeof(uint64) >>>(l_sq_integral_d, height, width,1,r*(hthreads-1) ); IntegralKernel<<<integraldim1Grid, tthreads, 32*sizeof(uint64) >>>(r_sq_integral_d, height, width,1,r*(hthreads-1) ); } transpose<<< transGrid, transBlock >>>( l_sq_integral_d, l_integral_d_t,height,width); transpose<<< transGrid, transBlock >>>( r_sq_integral_d, r_integral_d_t,height,width); for(int r=0;r<vreps;r++){ int tthreads=vthreads; if(r >=0 && r==vreps-1) tthreads=height-r*(vthreads-1); IntegralKernel<<<integraldim2Grid, tthreads, 32*sizeof(uint64) >>>(l_integral_d_t,width, height,1,r*(vthreads-1) ); IntegralKernel<<<integraldim2Grid, tthreads, 32*sizeof(uint64) >>>(r_integral_d_t,width, height,1,r*(vthreads-1) ); } transpose<<< transinvGrid, transBlock >>>( l_integral_d_t, l_sq_integral_d,width,height); transpose<<< transinvGrid, transBlock >>>( r_integral_d_t, r_sq_integral_d,width,height); NisterPrecompute<<< preCompGrid,preCompBlock,XDIM_MAX_THREADS*4*sizeof(unsigned long long int) >>>(Al_d, Ar_d, Cl_d, Cr_d, l_integral_d , r_integral_d , l_sq_integral_d, r_sq_integral_d, height , width,wsize,wc,wsize*wsize ); for (int r =0; r<hreps; r++){ int tthreads=hthreads; if(r >0 && r==hreps-1) tthreads = width-r*hthreads; NisterMatch<<<MatchGrid, tthreads, (2*tthreads+ndisp)*sizeof(float)>>>(imgl_d,imgr_d,post_cost_d,height , width,height,width,ndisp,r*hthreads); } for (int r =0; r<hreps; r++){ int tthreads=hthreads; if(r >0 && r==hreps-1) tthreads = width-r*(hthreads-1); IntegralKernel<<<integraldim1Griddisp, tthreads, 32*sizeof(uint64) >>>(post_cost_d, height, width,ndisp,r*(hthreads-1) ); } for(int r=0;r<vreps;r++){ int tthreads=vthreads; if(r >=0 && r==vreps-1) tthreads=height-r*(vthreads-1); VerticalIntegralKernel<<<vintegralGriddisp, tthreads,32*sizeof(double) >>>(post_cost_d,height,width,1,r*(vthreads-1)); } NCC<<<dimGridNCC, dimBlockNCC,2*(XDIM_Q_THREADS+ndisp)*sizeof(double)>>>(post_cost_d,cost_d, Al_d, Ar_d, Cl_d, Cr_d, height,width,height,width ,wsize,wc,wsize*wsize,ndisp,warpwidth); if(post){ swap_axis<<< swapGrid, swapBlock >>>( cost_d, post_cost_d,height,width,ndisp ); cudaMemset(cost_d,0 , height*width*ndisp*sizeof(double)); for (int step = 0; step < width; step++) { sgm_loop<0><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < width; step++) { sgm_loop<1><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<2><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<3><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } argmin<<<argGrid, argBlock>>>( disp_d, cost_d, height, width,ndisp ); subpixel_enchancement<<<(height*width - 1) / TB + 1, TB>>>( disp_d, cost_d, disp_tmp, height*width, height*width, ndisp); median2d<<<(height*width - 1) / TB + 1, TB>>>( disp_tmp, disp_d, height*width, height, width, params.kernel_size / 2); mean2d<<<(height*width - 1) / TB + 1, TB>>>( disp_d, kernel_d, disp_tmp, height*width, ks / 2, height, width, params.alpha2); }else{ argmin_d<<<argGrid, argBlock>>>( disp_tmp, cost_d, height, width,ndisp ); } cudaMemcpy( disp_h, disp_tmp, height*width*sizeof(float), cudaMemcpyDeviceToHost ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); imgutil->write_image(out + string("/") +limg[i].substr(limg[i].find_last_of("/")+1) ,disp_h,out_t); } cudaFreeHost(imgl); cudaFreeHost(imgr); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaFree(left_cross); cudaFree(right_cross); cudaFree(tmp_d); cudaFreeHost(imgl); cudaFreeHost(imgr); cudaFreeHost(disp_h); cudaFree(disp_d); cudaFree(disp_tmp); cudaFree(imgl_d); cudaFree(imgr_d); cudaFree(cost_d); cudaFree(post_cost_d); cudaFree(l_integral_d); cudaFree(r_integral_d); cudaFree(l_sq_integral_d); cudaFree(r_sq_integral_d); cudaFree(Al_d); cudaFree(Ar_d); cudaFree(Cl_d); cudaFree(Cr_d); delete imgutil; }
the_stack
#define NUM_NL_ITERS 5 //#define DEBUG namespace amgx { template <class Handle> struct CWrapper { AMGX_Mode mode; Handle hdl; }; // parameter is used as test name DECLARE_UNITTEST_BEGIN(TestMemoryUse); std::string base_keywords() { return "stress"; } struct TestCase { std::vector<std::string> file_names; std::string config_string; double max_mem_usage; double max_mem_leak; }; double get_memory_usage() { size_t free_mem, total_mem; cudaMemGetInfo( &free_mem, &total_mem ); size_t used_mem = total_mem - free_mem; used_mem /= size_t(1024); double used_mem_f = double(used_mem) / 1024.0; return used_mem_f; } void check_memory_usage(const char *msg, double &mem_before, TestCase &test_case, int it) { std::stringstream mem_msg; mem_msg << "Maximum used memory exceeds limit set to " << test_case.max_mem_usage << " for matrix " << test_case.file_names[it]; double used_mem_f = get_memory_usage(); UNITTEST_ASSERT_TRUE_DESC(mem_msg.str().c_str(), (used_mem_f - mem_before) < test_case.max_mem_usage); #ifdef DEBUG std::cout << msg << ", memory usage =: " << used_mem_f - mem_before << " Mb" << std::endl; #endif } void load_data(TestCase &test_case, double &mem_before, int it, int &num_rows, int &num_nz, int &bsize_x, int &bsize_y, int &bsize, std::vector<int> &row_offsets, std::vector<int> &col_indices, std::vector<double> &off_dia_values, std::vector<double> &dia_values, std::vector<double> &x_vec, std::vector<double> &b_vec) { // -------------------------------------- // Create matrix arrays from file // -------------------------------------- Matrix_h Atemp; Vector_h btemp, xtemp; // ------------------------------------------- // Read the matrix // ------------------------------------------- std::string fail_msg = "Cannot open " + test_case.file_names[it]; this->PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE(this->read_system(test_case.file_names[it].c_str(), Atemp, btemp, xtemp)); bool hasDiag = Atemp.hasProps(DIAG); num_rows = Atemp.get_num_rows(); num_nz = Atemp.get_num_nz(); bsize_x = Atemp.get_block_dimx(); bsize_y = Atemp.get_block_dimy(); bsize = bsize_x * bsize_y; // Create row_offsets, col_indices, off_dia_values and dia_values arrays from the matrix just rea if (xtemp.size() == 0) { xtemp.resize(num_rows * bsize_y, 0.); } row_offsets.resize(num_rows + 1); col_indices.resize(num_nz); off_dia_values.resize(num_nz * bsize); if (hasDiag) { dia_values.resize(num_rows * bsize); } x_vec.resize(num_rows * bsize_y); b_vec.resize(num_rows * bsize_x); // Fill vectors int *raw_row_ptr = Atemp.row_offsets.raw(); int *raw_col_ptr = Atemp.col_indices.raw(); double *raw_val_ptr = Atemp.values.raw(); // Row offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] = raw_row_ptr[i]; } // Column indices for (int i = 0; i < num_nz; i++) { col_indices[i] = raw_col_ptr[i]; } // Off-diagonal values for (int i = 0; i < num_nz; i++) for (int j = 0; j < bsize; j++) { off_dia_values[i * bsize + j] = raw_val_ptr[i * bsize + j]; } // Diagonal values if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] = raw_val_ptr[num_nz * bsize + i * bsize + j]; } } } // RHS double *b_raw_ptr = btemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_x; j++) { b_vec[i * bsize_x + j] = b_raw_ptr[i * bsize_x + j]; } //b_vec[i*bsize_x+j] = b_raw_ptr[i*bsize_x+j]+(1.0*rand()/RAND_MAX); // x vector double *x_raw_ptr = xtemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_y; j++) { x_vec[i * bsize_y + j] = x_raw_ptr[i * bsize_y + j]; } //x_vec[i*bsize_y+j] = x_raw_ptr[i*bsize_y+j]+(1.0*rand()/RAND_MAX); } void mem_test_main(TestCase &test_case, double &mem_before) { int mat_it = 0; check_memory_usage("before initialize", mem_before, test_case, mat_it); AMGX_initialize(); AMGX_initialize_plugins(); check_memory_usage("after initialize", mem_before, test_case, mat_it); AMGX_config_handle rsrc_cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create(&rsrc_cfg, ""), AMGX_OK); // Choosing device 0 int device = 0; AMGX_resources_handle rsrc = NULL; UNITTEST_ASSERT_EQUAL(AMGX_resources_create(&rsrc, rsrc_cfg, NULL, 1, &device), AMGX_OK); // query device pool size amgx::CWrapper<AMGX_resources_handle> *c_resources = (amgx::CWrapper<AMGX_resources_handle> *)rsrc; double old_max_mem_usage = test_case.max_mem_usage; test_case.max_mem_usage += ((Resources *)c_resources->hdl)->getPoolSize() / 1024.0 / 1024.0; check_memory_usage("after resources ", mem_before, test_case, mat_it); int num_rows, num_nz; int bsize_x, bsize_y, bsize; std::vector<int> row_offsets; std::vector<int> col_indices; std::vector<double> off_dia_values; std::vector<double> dia_values; std::vector<double> x_vec; std::vector<double> b_vec; // All of these should create the same result std::string option_string = test_case.config_string; bool fine_level_recreated = true; AMGX_matrix_handle matrix = NULL; AMGX_vector_handle b = NULL; AMGX_vector_handle x = NULL; // run multiple non-linear iterations for ( int it = 0; it < NUM_NL_ITERS; it++ ) { mat_it = it % test_case.file_names.size(); load_data(test_case, mem_before, mat_it, num_rows, num_nz, bsize_x, bsize_y, bsize, row_offsets, col_indices, off_dia_values, dia_values, x_vec, b_vec); if ( fine_level_recreated ) { if ( matrix != NULL ) { check_memory_usage("before matrix destroy", mem_before, test_case, mat_it); AMGX_matrix_destroy( matrix ); check_memory_usage("after matrix destroy", mem_before, test_case, mat_it); } if ( b != NULL ) { check_memory_usage("before vector destroy", mem_before, test_case, mat_it); AMGX_vector_destroy( b ); check_memory_usage("after vector destroy", mem_before, test_case, mat_it); } if ( x != NULL ) { check_memory_usage("before vector destroy", mem_before, test_case, mat_it); AMGX_vector_destroy( x ); check_memory_usage("after vector destroy", mem_before, test_case, mat_it); } check_memory_usage("before matrix create", mem_before, test_case, mat_it); AMGX_matrix_create( &matrix, rsrc, AMGX_mode_dDDI ); check_memory_usage("after matrix create", mem_before, test_case, mat_it); check_memory_usage("before vector create", mem_before, test_case, mat_it); AMGX_vector_create( &b, rsrc, AMGX_mode_dDDI ); check_memory_usage("after vector create", mem_before, test_case, mat_it); check_memory_usage("before vector create", mem_before, test_case, mat_it); AMGX_vector_create( &x, rsrc, AMGX_mode_dDDI ); check_memory_usage("after vector create", mem_before, test_case, mat_it); } // fill matrix check_memory_usage("before matrix upload", mem_before, test_case, mat_it); if ( fine_level_recreated ) { AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0]); } else { AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], &dia_values[0]); } check_memory_usage("after matrix upload", mem_before, test_case, mat_it); // fill b check_memory_usage("before vector-b upload", mem_before, test_case, mat_it); AMGX_vector_upload( b, num_rows, bsize_y, &b_vec[0] ); check_memory_usage("after vector-b upload", mem_before, test_case, mat_it); // fill x check_memory_usage("before vector-x upload", mem_before, test_case, mat_it); AMGX_vector_upload( x, num_rows, bsize_x, &x_vec[0] ); check_memory_usage("after vector-x upload", mem_before, test_case, mat_it); double used_mem_before = get_memory_usage(); AMGX_config_handle cfg; check_memory_usage("before config create", mem_before, test_case, mat_it); UNITTEST_ASSERT_EQUAL(AMGX_config_create( &cfg, option_string.c_str() ), AMGX_OK); check_memory_usage("after config create", mem_before, test_case, mat_it); // verbose = 2 AMGX_solver_handle solver; check_memory_usage("before solver create", mem_before, test_case, mat_it); AMGX_solver_create( &solver, rsrc, AMGX_mode_dDDI, cfg); check_memory_usage("after solver create", mem_before, test_case, mat_it); // solver setup check_memory_usage("before solver setup", mem_before, test_case, mat_it); AMGX_solver_setup( solver, matrix ); check_memory_usage("after solver setup", mem_before, test_case, mat_it); // solver solve check_memory_usage("before solver solve", mem_before, test_case, mat_it); AMGX_solver_solve( solver, b, x ); check_memory_usage("after solver solve", mem_before, test_case, mat_it); // copy solution vector check_memory_usage("before vector copy", mem_before, test_case, mat_it); AMGX_vector_download( x, &x_vec[0] ); check_memory_usage("after vector copy", mem_before, test_case, mat_it); // read the number of iterations int num_iterations = 0; AMGX_solver_get_iterations_number( solver, &num_iterations ); check_memory_usage("after get iteration number", mem_before, test_case, mat_it); // read the residuals and check for NaNs double res[] = { 0.0, 0.0, 0.0, 0.0 }; AMGX_solver_get_iteration_residual( solver, 0, 0, &res[0] ); AMGX_solver_get_iteration_residual( solver, 0, 1, &res[1] ); AMGX_solver_get_iteration_residual( solver, 0, 2, &res[2] ); AMGX_solver_get_iteration_residual( solver, 0, 3, &res[3] ); check_memory_usage("after get iteration residual", mem_before, test_case, mat_it); check_memory_usage("before solver destroy", mem_before, test_case, mat_it); AMGX_solver_destroy( solver ); check_memory_usage("after solver destroy", mem_before, test_case, mat_it); check_memory_usage("before cfg destroy", mem_before, test_case, mat_it); AMGX_config_destroy( cfg ); check_memory_usage("after cfg destroy", mem_before, test_case, mat_it); double used_mem_after = get_memory_usage(); if (it == 0) { int num_rows; int block_dimx; int block_dimy; AMGX_matrix_get_size(matrix, &num_rows, &block_dimx, &block_dimy); // get size of the coloring check_memory_usage("before coloring resize", mem_before, test_case, it); typename Matrix_d::IVector coloring; coloring.resize(num_rows * 2); check_memory_usage("after coloring resize", mem_before, test_case, it); double coloring_size = (get_memory_usage() - used_mem_after); check_memory_usage("before coloring clear and shrink-to-fit", mem_before, test_case, it); coloring.clear(); coloring.shrink_to_fit(); check_memory_usage("after coloring clear and shrink-to-fit", mem_before, test_case, it); used_mem_after -= coloring_size; // account for memory pool creation used_mem_after -= 32; #ifdef DEBUG std::cout << "Coloring size= " << coloring_size << std::endl; //std::cout << "num_rows= " << num_rows << std::endl; #endif } fine_level_recreated = false; double memLeak = used_mem_after - used_mem_before; std::stringstream msg; msg << "Memory leak after " << it << " iteration: " << memLeak << " Mb exceed threshold set to " << test_case.max_mem_leak ; #ifdef DEBUG std::cout << "Mem leak = " << memLeak << ", after " << it << " iteration " << std::endl; << std::endl; #endif UNITTEST_ASSERT_TRUE_DESC(msg.str().c_str(), memLeak - 100 <= test_case.max_mem_leak); } // clean-up matrix and vectors if ( matrix != NULL ) { check_memory_usage("before matrix destroy", mem_before, test_case, mat_it); AMGX_matrix_destroy( matrix ); check_memory_usage("after matrix destroy", mem_before, test_case, mat_it); } if ( b != NULL ) { check_memory_usage("before vector destroy", mem_before, test_case, mat_it); AMGX_vector_destroy( b ); check_memory_usage("after vector destroy", mem_before, test_case, mat_it); } if ( x != NULL ) { check_memory_usage("before vector destroy", mem_before, test_case, mat_it); AMGX_vector_destroy( x ); check_memory_usage("after vector destroy", mem_before, test_case, mat_it); } test_case.max_mem_usage = old_max_mem_usage; check_memory_usage("before resources free", mem_before, test_case, mat_it); AMGX_config_destroy(rsrc_cfg); AMGX_resources_destroy(rsrc); check_memory_usage("after resources free", mem_before, test_case, mat_it); check_memory_usage("before finalize", mem_before, test_case, mat_it); AMGX_finalize_plugins(); AMGX_finalize(); check_memory_usage("after finalize", mem_before, test_case, mat_it); } void run() { std::vector<TestCase> test_cases; TestCase temp_case; // List test cases std::ostringstream cfg_options; cfg_options << "config_version=1,"; cfg_options << "algorithm=AGGREGATION,"; cfg_options << "coarseAgenerator=LOW_DEG,"; cfg_options << "coloring_level=1,"; cfg_options << "convergence=RELATIVE_MAX,"; cfg_options << "cycle=V,"; cfg_options << "determinism_flag=1,"; cfg_options << "matrix_coloring_scheme=MIN_MAX,"; cfg_options << "max_iters=30,"; cfg_options << "max_levels=21,"; cfg_options << "min_block_rows=20,"; cfg_options << "norm=L1,"; cfg_options << "postsweeps=3,"; cfg_options << "presweeps=0,"; cfg_options << "selector=ONE_PHASE_HANDSHAKING,"; cfg_options << "smoother=MULTICOLOR_DILU,"; cfg_options << "smoother_weight=0.9,"; cfg_options << "tolerance=0.1,"; cfg_options << "monitor_residual=1,"; cfg_options << "print_solve_stats=1,"; cfg_options << "store_res_history=1,"; temp_case.config_string = cfg_options.str(); temp_case.file_names.clear(); temp_case.file_names.push_back("Public/florida/atmosdd.mtx"); temp_case.max_mem_usage = 2066; // Mb temp_case.max_mem_leak = 0; test_cases.push_back(temp_case); SignalHandler::hook(); AMGX_finalize_plugins(); AMGX_finalize(); UnitTest::amgx_intialized = false; // Empty kernel call to initialize cuda context size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); size_t context_buffer = 10000000; thrust::device_vector<double> test_vector; int vec_size = (free_mem - context_buffer) / 8; test_vector.resize(vec_size); test_vector.clear(); test_vector.shrink_to_fit(); for (int i = 0; i < test_cases.size(); i++) { double used_mem_before = get_memory_usage(); mem_test_main(test_cases[i], used_mem_before); double used_mem_after = get_memory_usage(); double memLeak = used_mem_after - used_mem_before; #ifdef DEBUG std::cout << "Mem leak = " << memLeak << std::endl; #endif std::stringstream msg; msg << "Memory leak: " << memLeak << " Mb exceed threshold set to " << test_cases[i].max_mem_leak ; UNITTEST_ASSERT_TRUE_DESC(msg.str().c_str(), memLeak <= test_cases[i].max_mem_leak); } AMGX_initialize(); AMGX_initialize_plugins(); UnitTest::amgx_intialized = true; } DECLARE_UNITTEST_END(TestMemoryUse); // if you want to be able run this test for all available configs you can write this: //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE // or run for all device configs //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE //TestMemoryUse <TemplateMode<AMGX_mode_dDDI>::Type> TestMemoryUse_instance_mode_dDDI; // or you can specify several desired configs //TemplateTest <TemplateMode<AMGX_mode_hDFI>::Type> TemplateTest_hDFI; //TemplateTest <TemplateMode<AMGX_mode_dDFI>::Type> TemplateTest_dDFI; }
the_stack
__device__ __constant__ float EPS2; __device__ __constant__ float DT_TICK; struct ds64 { union { float2 val; double dbl; }; __device__ ds64() {} __device__ ds64(float x) : val(make_float2(x, x)) {} __device__ ds64 operator+=(const float x) { const float vx = val.x + x; const float vy = val.y - ((vx - val.x) - x); val = make_float2(vx, vy); return *this; } __device__ double to_double() const { return (double)val.x + (double)val.y; } }; template<class REAL> struct cuvec3 { REAL x, y, z; __host__ __device__ cuvec3() {} __host__ __device__ cuvec3(const REAL v) : x(v), y(v), z(v) {} __host__ __device__ cuvec3(const REAL _x, const REAL _y, const REAL _z) : x(_x), y(_y), z(_z) {} __host__ __device__ cuvec3 operator=(const cuvec3<float> v) {x = v.x; y = v.y; z = v.z; return *this;}; __host__ __device__ cuvec3 operator=(const cuvec3<double > v) {x = v.x; y = v.y; z = v.z; return *this;}; __host__ __device__ REAL operator*(const cuvec3<REAL> v) const {return (x*v.x + y*v.y + z*v.z);} __host__ __device__ cuvec3 operator*(const REAL v) const {return cuvec3(x*v, y*v, z*v);} // __host__ __device__ cuvec3 operator+(const cuvec3<REAL> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);} __host__ __device__ cuvec3 operator-(const cuvec3<REAL> v) const {return cuvec3(x-v.x, y-v.y, z-v.z);} __host__ __device__ cuvec3 operator%(const cuvec3<REAL> v) const {return cuvec3(x*v.y - y*v.x, y*v.z-z*v.y, z*v.x - x*v.z);} __host__ __device__ cuvec3 operator-() const {return cuvec3(-x, -y, -z);} __host__ __device__ cuvec3 operator+(const cuvec3<float> v) const {return cuvec3(x+v.x, y+v.y, z+v.z);} __host__ __device__ cuvec3 operator+(const cuvec3<double > v) const {return cuvec3(x+v.x, y+v.y, z+v.z);} __host__ __device__ cuvec3 operator += (const cuvec3<REAL> v) { *this = *this + v; return *this; } __host__ __device__ cuvec3 operator -= (const cuvec3<REAL> v) { *this = *this - v; return *this; } __host__ __device__ cuvec3 operator *= (const REAL s) { *this = *this * s; return *this; } __host__ __device__ friend cuvec3 operator * (const REAL s ,const cuvec3<REAL> v) { return v*s; } __host__ __device__ REAL norm2() const {return (*this)*(*this);}; }; typedef cuvec3<double > dcuvec3; typedef cuvec3<float> fcuvec3; __device__ float sqr(const float x) { return x*x; } /****************************/ /****************************/ /****************************/ template<class T> struct ADDOP { __device__ static inline T identity() {return (T)(0);} __device__ static inline T apply(T a, T b) {return (T)(a + b);}; __device__ static inline T unapply(T a, T b) {return (T)(a - b);}; __device__ static inline T mask(bool flag, T b) {return (T)(-(int)(flag) & b);}; }; template<class OP, class T> __device__ __forceinline__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx ) { const unsigned int lane = idx & 31; if (lane >= 1) ptr[idx] = mysum = OP::apply(ptr[idx - 1], mysum); if (lane >= 2) ptr[idx] = mysum = OP::apply(ptr[idx - 2], mysum); if (lane >= 4) ptr[idx] = mysum = OP::apply(ptr[idx - 4], mysum); if (lane >= 8) ptr[idx] = mysum = OP::apply(ptr[idx - 8], mysum); if (lane >= 16) ptr[idx] = mysum = OP::apply(ptr[idx - 16], mysum); return ptr[idx]; } template<class OP, class T> __device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx) { const unsigned int lane = idx & 31; const unsigned int warpid = idx >> 5; T mysum = ptr[idx]; __syncthreads(); // step 1: Intra-warp scan in each warp T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx); __syncthreads(); // step 2: Collect per-warp particle results if (lane == 31) ptr[warpid] = ptr[idx]; __syncthreads(); mysum = ptr[idx]; // step 3: Use 1st warp to scan per-warp results if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx); __syncthreads(); // step 4: Accumulate results from Steps 1 and 3; if (warpid > 0) val = OP::apply(ptr[warpid - 1], val); __syncthreads(); // Step 5: Write and return the final result ptr[idx] = val; __syncthreads(); return val; //ptr[blockDim.x - 1]; } template<class OP, class T> __device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx) { T y = OP::identity(); volatile T *ptr = ptr_global; for (int p = 0; p < N; p += blockDim.x) { ptr = &ptr_global[p]; inclusive_scan_block<OP, T>(ptr, idx); ptr[idx] = OP::apply(ptr[idx], y); __syncthreads(); y = ptr[blockDim.x - 1]; __syncthreads(); } return y; } /****************************/ /****************************/ /****************************/ struct dev_particle { dcuvec3 pos; // 6 fcuvec3 vel; // 9 fcuvec3 acc; // 12 fcuvec3 jrk; // 15 float mass; // 16 float h2; // 17 unsigned int time; // 18 int id; // 19 int iPad; // 20 int iPadX[12]; __host__ __device__ dev_particle() {} __host__ dev_particle(const regf4::Particle&); }; #define PTCL_LEN (sizeof(dev_particle) / sizeof(float4)) struct dev_predictor { fcuvec3 pos; // 3 fcuvec3 vel; // 6 union { float mass; // 7 float dt; }; float h2; // 8 }; #define PRED_LEN (sizeof(dev_predictor) / sizeof(float4)) struct dev_force { ds64 accx, accy, accz; // 6 fcuvec3 jrk; // 9 float h2; // 10 int nngb; // 11 int iPad; // 12 __device__ dev_force() : accx(0.0f), accy(0.0f), accz(0.0f), jrk(0.0f), nngb(0) {} }; /********************************/ /********************************/ /********************************/ __global__ void dev_predict_ptcl( const int ni, const unsigned int tsys, const dev_particle *ptcl_in, __out dev_predictor *pred_out, __out float *dt_out) { const int id = blockIdx.x*blockDim.x + threadIdx.x; const int addr = id < ni ? id : ni-1; const dev_particle ip = ptcl_in[addr]; dev_predictor ipred; const float dt = DT_TICK*(tsys - ip.time); const float dt2 = dt*(1.0f/2.0f); const float dt3 = dt*(1.0f/3.0f); ipred.pos = ip.pos + dt*(ip.vel + dt2*(ip.acc + dt3*ip.jrk)); ipred.vel = ip.vel + dt*(ip.acc + dt2* ip.jrk); ipred.mass = ip.mass; ipred.h2 = ip.h2; if (id < ni) { pred_out[addr] = ipred; dt_out [addr] = dt; } } /********************************/ /********************************/ /********************************/ template<int NGB_PER_BLOCK> __forceinline__ __device__ dev_force dev_regfij( const unsigned int jidx, const dev_predictor pi, const dev_predictor pj, __out dev_force fi, __out unsigned int *ngb_list) { const fcuvec3 dr = pj.pos - pi.pos; const fcuvec3 dv = pj.vel - pi.vel; const float r2 = dr*dr; const float r2p = fminf(r2, (dr + pi.dt*dv).norm2()); if (r2p < pi.h2) { if (pj.mass > 0.0f) { ngb_list[fi.nngb & (NGB_PER_BLOCK-1)] = jidx; fi.nngb += (r2 > 0.0f); } } else { const float rv = dr*dv; const float rinv1 = rsqrt(r2 + EPS2); const float rinv2 = rinv1*rinv1; const float rinv3 = pj.mass*(rinv1*rinv2); const float alpha = rv*rinv2; const fcuvec3 Aij = rinv3*dr; const fcuvec3 Jij = rinv3*dv - Aij*(3.0f*alpha); fi.accx += Aij.x; fi.accy += Aij.y; fi.accz += Aij.z; fi.jrk += Jij; } return fi; } /********************************/ template<int NTHREAD, int NJBLOCK, int NJBLOCK2, int NGB_PER_BLOCK> __global__ void #if 0 __launch_bounds__ (NTHREAD, 1) #endif dev_regf( const int ni, const int nj_per_block, const int *active_list, const dev_predictor *pred_in, const float *dt_in, __out dev_force *force_out, __out unsigned int *ngb_out) { __shared__ dev_predictor jpshared[NTHREAD]; // compute iblock & jblock offset const int iblock = blockIdx.x*NTHREAD; const int jblock = blockIdx.y; const int tid = threadIdx.x; // read i-particle into registers const int idx = iblock + tid; const int addr = active_list[idx < ni ? idx : ni - 1]; dev_predictor ipred = pred_in[addr]; ipred.dt = dt_in[addr]; // initialize i-particle's force dev_force iforce; // obtain beginning & end of j particles for this block const int jbeg = jblock*nj_per_block; const int jend = jbeg + nj_per_block; unsigned int *ingb_ptr = ngb_out + NGB_PER_BLOCK*(jblock + NJBLOCK*idx); for (int j = jbeg; j < jend; j += NTHREAD) { #if 0 jpshared[tid] = pred_in[j + tid]; #else float4 *src = (float4*)&pred_in[j]; float4 *dst = (float4*)jpshared; #pragma unroll for (int it = 0; it < PRED_LEN; it++) { dst[tid] = src[tid]; dst += NTHREAD; src += NTHREAD; } #endif __syncthreads(); if (idx < ni) { #pragma unroll 8 for (int jj = 0; jj < NTHREAD; jj++) iforce = dev_regfij<NGB_PER_BLOCK>(j+jj, ipred, jpshared[jj], iforce, ingb_ptr); } __syncthreads(); } if (idx < ni) { iforce.h2 = ipred.h2; force_out[jblock + idx*NJBLOCK2] = iforce; } } /********************************/ /********************************/ /********************************/ template<class OP, class T, int NTHREAD> __device__ T reduce_block(volatile T *ptr, T mySum, const unsigned int tid) { ptr[tid] = mySum; __syncthreads(); if (NTHREAD >= 512) { if (tid < 256) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+256]); } __syncthreads(); } if (NTHREAD >= 256) { if (tid < 128) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+128]); } __syncthreads(); } if (NTHREAD >= 128) { if (tid < 64) { ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 64]); } __syncthreads(); } if (tid < 32) { if (NTHREAD >= 64) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+32]); if (NTHREAD >= 32) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+16]); if (NTHREAD >= 16) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 8]); if (NTHREAD >= 8) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 4]); if (NTHREAD >= 4) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 2]); if (NTHREAD >= 2) ptr[tid] = mySum = OP::apply(mySum, ptr[tid+ 1]); } __syncthreads(); return ptr[0]; } // here each particle is assigned to a single block... // for 60 active blocks in dev_regf, and 64 threads the max efficiency is 60/64... template<int NTHREAD, int NJBLOCK, int NJBLOCK2> __global__ void dev_reduce_regf( const dev_force *force_in, __out int2 *ngb_offset, __out dev_force *force_out) { // we use parallel prefix sum to obtain reduce forces const int idx = blockIdx.x; // body id const int tid = threadIdx.x; // block id __shared__ float shdata[2*NTHREAD]; double *shdbl = (double*)shdata; dev_force iforce; if (tid < NJBLOCK) iforce = force_in[tid + idx*NJBLOCK2]; iforce.accx.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accx.to_double(), tid); iforce.accy.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accy.to_double(), tid); iforce.accz.dbl = reduce_block<ADDOP<double>, double, NTHREAD>(shdbl, iforce.accz.to_double(), tid); iforce.jrk.x = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.x, tid); iforce.jrk.y = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.y, tid); iforce.jrk.z = reduce_block<ADDOP<float>, float, NTHREAD>(shdata, iforce.jrk.z, tid); int *shint = (int*)shdata; shint[tid] = iforce.nngb; inclusive_scan_block<ADDOP<int>, int>(shint, tid); const int nngb = shint[NTHREAD-1]; /* #ngb in a block, memory offset */ #if 0 if (idx == 0) { for (int t = 0; t < NTHREAD; t++) { __syncthreads(); if (t == tid) printf(" nnbb= %d offset= %d addr= %d tid= %d NJBLOCK= %d\n", iforce.nngb, shint[tid] - iforce.nngb, idx + tid*NJBLOCK, tid, NJBLOCK); } } #endif if (tid < NJBLOCK) ngb_offset[tid + idx*NJBLOCK] = (int2){iforce.nngb, shint[tid] - iforce.nngb}; if (tid == 0) { iforce.nngb = nngb; force_out[idx] = iforce; } } /********************************/ template<int NTHREAD, int NJBLOCK, int NGB_PER_BLOCK, int NGB_MAX> __global__ void dev_reduce_ngb( const int2 *ngb_offset, const unsigned int *ngb_in, __out unsigned int *ngb_out ) { const int idx = blockIdx.x; // body id const int tid = threadIdx.x; for (int i = 0; i < NJBLOCK; i++) { const int2 ingb = ngb_offset[i + idx*NJBLOCK]; const int nngb = ingb.x; const int offset = ingb.y; if (tid < nngb) { #if 0 if (idx == 0) { for (int t = 0; t < NTHREAD; t++) { __syncthreads(); if (tid == t) printf("block= %d tid= %d: addr= %d offset= %d nngb= %d newx= %d\n", i, tid, idx*NGB_MAX+offset+tid, offset, nngb ,offset+nngb); } } #endif const int offset_tot = min(offset+tid, NGB_MAX-1); ngb_out[idx*NGB_MAX+offset_tot] = ngb_in[NGB_PER_BLOCK*(i + NJBLOCK*idx)+tid]; } } } /********************************/ __global__ void dev_move_particles( const int nj, const int *addr_in, const dev_particle *ptcl_in, __out dev_particle *ptcl_out) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= nj) return; const int addr = addr_in[idx]; ptcl_out[addr] = ptcl_in[idx]; } /********************************/ struct gpot_struct { dcuvec3 pos; float mass; }; template<int BLOCKSIZE> __global__ void dev_compute_potential( const int ni, const int nj, const dev_particle *ptcl_in, __out float *gpot_out) { const int idx = blockDim.x*blockIdx.x + threadIdx.x; const int addr = idx < ni ? idx : ni - 1; const int tid = threadIdx.x; __shared__ gpot_struct shmem[BLOCKSIZE]; ds64 gpot(0.0f); const dcuvec3 ipos = ptcl_in[addr].pos; for (int j = 0; j < nj; j += BLOCKSIZE) { dev_particle pj = ptcl_in[j+tid]; shmem[tid].pos = pj.pos; shmem[tid].mass = pj.mass; __syncthreads(); #pragma unroll for (int jj = 0; jj < BLOCKSIZE; jj++) { const dcuvec3 jpos = shmem[jj].pos; const float jmass = shmem[jj].mass; const fcuvec3 dr = fcuvec3(jpos.x - ipos.x, jpos.y - ipos.y, jpos.z - ipos.z); const float r2 = dr*dr; const float rinv = (r2 > 0.0f) ? rsqrt(r2 + EPS2) : 0.0f; gpot += jmass * rinv; } __syncthreads(); } if (idx < ni) gpot_out[idx] = -gpot.to_double(); }
the_stack
* @file * The cub::WarpExchange class provides [<em>collective</em>](index.html#sec0) * methods for rearranging data partitioned across a CUDA warp. */ #pragma once #include <cub/config.cuh> #include <cub/util_ptx.cuh> #include <cub/util_type.cuh> CUB_NAMESPACE_BEGIN /** * @brief The WarpExchange class provides [<em>collective</em>](index.html#sec0) * methods for rearranging data partitioned across a CUDA warp. * @ingroup WarpModule * * @tparam T * The data type to be exchanged. * * @tparam ITEMS_PER_THREAD * The number of items partitioned onto each thread. * * @tparam LOGICAL_WARP_THREADS * <b>[optional]</b> The number of threads per "logical" warp (may be less * than the number of hardware warp threads). Default is the warp size of the * targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a * power of two. * * @tparam PTX_ARCH * <b>[optional]</b> \ptxversion * * @par Overview * - It is commonplace for a warp of threads to rearrange data items between * threads. For example, the global memory accesses prefer patterns where * data items are "striped" across threads (where consecutive threads access * consecutive items), yet most warp-wide operations prefer a "blocked" * partitioning of items across threads (where consecutive items belong to a * single thread). * - WarpExchange supports the following types of data exchanges: * - Transposing between [<em>blocked</em>](index.html#sec5sec3) and * [<em>striped</em>](index.html#sec5sec3) arrangements * - Scattering ranked items to a * [<em>striped arrangement</em>](index.html#sec5sec3) * * @par A Simple Example * @par * The code snippet below illustrates the conversion from a "blocked" to a * "striped" arrangement of 64 integer items partitioned across 16 threads where * each thread owns 4 items. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * constexpr int warps_per_block = block_threads / warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each * using WarpExchangeT = * cub::WarpExchange<int, items_per_thread, warp_threads>; * * // Allocate shared memory for WarpExchange * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; * * // Load a tile of data striped across threads * int thread_data[items_per_thread]; * // ... * * // Collectively exchange data into a blocked arrangement across threads * WarpExchangeT(temp_storage[warp_id]).StripedToBlocked(thread_data, thread_data); * @endcode * @par * Suppose the set of striped input @p thread_data across the block of threads * is <tt>{ [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }</tt>. * The corresponding output @p thread_data in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }</tt>. */ template <typename InputT, int ITEMS_PER_THREAD, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int PTX_ARCH = CUB_PTX_ARCH> class WarpExchange { static_assert(PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); constexpr static int ITEMS_PER_TILE = ITEMS_PER_THREAD * LOGICAL_WARP_THREADS + 1; constexpr static bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH); constexpr static int LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH); // Insert padding if the number of items per thread is a power of two // and > 4 (otherwise we can typically use 128b loads) constexpr static bool INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo<ITEMS_PER_THREAD>::VALUE); constexpr static int PADDING_ITEMS = INSERT_PADDING ? (ITEMS_PER_TILE >> LOG_SMEM_BANKS) : 0; union _TempStorage { InputT items_shared[ITEMS_PER_TILE + PADDING_ITEMS]; }; // union TempStorage /// Shared storage reference _TempStorage &temp_storage; const unsigned int lane_id; const unsigned int warp_id; const unsigned int member_mask; public: /// \smemstorage{WarpExchange} struct TempStorage : Uninitialized<_TempStorage> {}; /*************************************************************************//** * @name Collective constructors ****************************************************************************/ //@{ WarpExchange() = delete; /** * @brief Collective constructor using the specified memory allocation as * temporary storage. */ explicit __device__ __forceinline__ WarpExchange(TempStorage &temp_storage) : temp_storage(temp_storage.Alias()) , lane_id(IS_ARCH_WARP ? LaneId() : (LaneId() % LOGICAL_WARP_THREADS)) , warp_id(IS_ARCH_WARP ? 0 : (LaneId() / LOGICAL_WARP_THREADS)) , member_mask(WarpMask<LOGICAL_WARP_THREADS>(warp_id)) { } //@} end member group /*************************************************************************//** * @name Data movement ****************************************************************************/ //@{ /** * @brief Transposes data items from <em>blocked</em> arrangement to * <em>striped</em> arrangement. * * @par * \smemreuse * * @par Snippet * The code snippet below illustrates the conversion from a "blocked" to a * "striped" arrangement of 64 integer items partitioned across 16 threads * where each thread owns 4 items. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * constexpr int warps_per_block = block_threads / warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each * using WarpExchangeT = cub::WarpExchange<int, items_per_thread, warp_threads>; * * // Allocate shared memory for WarpExchange * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * // ... * * // Collectively exchange data into a striped arrangement across threads * WarpExchangeT(temp_storage[warp_id]).BlockedToStriped(thread_data, thread_data); * @endcode * @par * Suppose the set of striped input @p thread_data across the block of threads * is <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }</tt>. * The corresponding output @p thread_data in those threads will be * <tt>{ [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }</tt>. * * @param[in] input_items * Items to exchange, converting between <em>blocked</em> and * <em>striped</em> arrangements. * * @param[out] output_items * Items from exchange, converting between <em>striped</em> and * <em>blocked</em> arrangements. May be aliased to @p input_items. */ template <typename OutputT> __device__ __forceinline__ void BlockedToStriped(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) { for (int item = 0; item < ITEMS_PER_THREAD; item++) { const int idx = ITEMS_PER_THREAD * lane_id + item; temp_storage.items_shared[idx] = input_items[item]; } WARP_SYNC(member_mask); for (int item = 0; item < ITEMS_PER_THREAD; item++) { const int idx = LOGICAL_WARP_THREADS * item + lane_id; output_items[item] = temp_storage.items_shared[idx]; } } /** * @brief Transposes data items from <em>striped</em> arrangement to * <em>blocked</em> arrangement. * * @par * \smemreuse * * @par Snippet * The code snippet below illustrates the conversion from a "striped" to a * "blocked" arrangement of 64 integer items partitioned across 16 threads * where each thread owns 4 items. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * constexpr int warps_per_block = block_threads / warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each * using WarpExchangeT = cub::WarpExchange<int, items_per_thread, warp_threads>; * * // Allocate shared memory for WarpExchange * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; * * // Load a tile of data striped across threads * int thread_data[items_per_thread]; * // ... * * // Collectively exchange data into a blocked arrangement across threads * WarpExchangeT(temp_storage[warp_id]).StripedToBlocked(thread_data, thread_data); * @endcode * @par * Suppose the set of striped input @p thread_data across the block of threads * is <tt>{ [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }</tt>. * The corresponding output @p thread_data in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }</tt>. * * @param[in] input_items * Items to exchange * * @param[out] output_items * Items from exchange. May be aliased to @p input_items. */ template <typename OutputT> __device__ __forceinline__ void StripedToBlocked(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) { for (int item = 0; item < ITEMS_PER_THREAD; item++) { const int idx = LOGICAL_WARP_THREADS * item + lane_id; temp_storage.items_shared[idx] = input_items[item]; } WARP_SYNC(member_mask); for (int item = 0; item < ITEMS_PER_THREAD; item++) { const int idx = ITEMS_PER_THREAD * lane_id + item; output_items[item] = temp_storage.items_shared[idx]; } } /** * @brief Exchanges valid data items annotated by rank * into <em>striped</em> arrangement. * * @par * \smemreuse * * @par Snippet * The code snippet below illustrates the conversion from a "scatter" to a * "striped" arrangement of 64 integer items partitioned across 16 threads * where each thread owns 4 items. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * constexpr int warps_per_block = block_threads / warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each * using WarpExchangeT = cub::WarpExchange<int, items_per_thread, warp_threads>; * * // Allocate shared memory for WarpExchange * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[items_per_thread]; * int thread_ranks[items_per_thread]; * // ... * * // Collectively exchange data into a striped arrangement across threads * WarpExchangeT(temp_storage[warp_id]).ScatterToStriped( * thread_data, thread_ranks); * @endcode * @par * Suppose the set of input @p thread_data across the block of threads * is `{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }`, and the set of * @p thread_ranks is `{ [63,62,61,60], ..., [7,6,5,4], [3,2,1,0] }`. The * corresponding output @p thread_data in those threads will be * `{ [63, 47, 31, 15], [62, 46, 30, 14], ..., [48, 32, 16, 0] }`. * * @tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets * * @param[in,out] items Items to exchange * @param[in] ranks Corresponding scatter ranks */ template <typename OffsetT> __device__ __forceinline__ void ScatterToStriped(InputT (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) { ScatterToStriped(items, items, ranks); } /** * @brief Exchanges valid data items annotated by rank * into <em>striped</em> arrangement. * * @par * \smemreuse * * @par Snippet * The code snippet below illustrates the conversion from a "scatter" to a * "striped" arrangement of 64 integer items partitioned across 16 threads * where each thread owns 4 items. * @par * @code * #include <cub/cub.cuh> // or equivalently <cub/warp/warp_exchange.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * constexpr int warp_threads = 16; * constexpr int block_threads = 256; * constexpr int items_per_thread = 4; * constexpr int warps_per_block = block_threads / warp_threads; * const int warp_id = static_cast<int>(threadIdx.x) / warp_threads; * * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each * using WarpExchangeT = cub::WarpExchange<int, items_per_thread, warp_threads>; * * // Allocate shared memory for WarpExchange * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_input[items_per_thread]; * int thread_ranks[items_per_thread]; * // ... * * // Collectively exchange data into a striped arrangement across threads * int thread_output[items_per_thread]; * WarpExchangeT(temp_storage[warp_id]).ScatterToStriped( * thread_input, thread_output, thread_ranks); * @endcode * @par * Suppose the set of input @p thread_input across the block of threads * is `{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }`, and the set of * @p thread_ranks is `{ [63,62,61,60], ..., [7,6,5,4], [3,2,1,0] }`. The * corresponding @p thread_output in those threads will be * `{ [63, 47, 31, 15], [62, 46, 30, 14], ..., [48, 32, 16, 0] }`. * * @tparam OffsetT <b>[inferred]</b> Signed integer type for local offsets * * @param[in] input_items * Items to exchange * * @param[out] output_items * Items from exchange. May be aliased to @p input_items. * * @param[in] ranks * Corresponding scatter ranks */ template <typename OutputT, typename OffsetT> __device__ __forceinline__ void ScatterToStriped(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (INSERT_PADDING) { ranks[ITEM] = SHR_ADD(ranks[ITEM], LOG_SMEM_BANKS, ranks[ITEM]); } temp_storage.items_shared[ranks[ITEM]] = input_items[ITEM]; } WARP_SYNC(member_mask); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { int item_offset = (ITEM * LOGICAL_WARP_THREADS) + lane_id; if (INSERT_PADDING) { item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); } output_items[ITEM] = temp_storage.items_shared[item_offset]; } } //@} end member group }; CUB_NAMESPACE_END
the_stack
namespace at { namespace native { template <typename scalar_t> struct TopKTypeConfig {}; template <> struct TopKTypeConfig<float> { typedef uint32_t RadixType; // Converts a float to an integer representation with the same // sorting; i.e., for floats f1, f2: // if f1 < f2 then convert(f1) < convert(f2) // We use this to enable radix selection of floating-point values. // This also gives a relative order for NaNs, but that's ok, as they // will all be adjacent // neg inf: signbit=1 exp=ff fraction=0 --> radix = 0 00 ff.. // pos inf: signbit=0 exp=ff fraction=0 --> radix = 1 ff 00.. // pos nan: signbit=0 exp=ff fraction>0 --> radix = 1 ff x>0 // neg nan: signbit=1 exp=ff fraction>0 --> radix = 0 00 x<ff... static inline __device__ RadixType convert(float v) { RadixType x = __float_as_int(v); RadixType mask = (x & 0x80000000) ? 0xffffffff : 0x80000000; return (v == v) ? (x ^ mask) : 0xffffffff; } static inline __device__ float deconvert(RadixType v) { RadixType mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff; return __int_as_float(v ^ mask); } }; template <> struct TopKTypeConfig<uint8_t> { typedef uint32_t RadixType; static inline __device__ RadixType convert(uint8_t v) { return v; } static inline __device__ uint8_t deconvert(RadixType v) { return v; } }; template <> struct TopKTypeConfig<int8_t> { typedef uint32_t RadixType; static inline __device__ RadixType convert(int8_t v) { return 128u + v; } static inline __device__ int8_t deconvert(RadixType v) { return v - 128; } }; template <> struct TopKTypeConfig<int16_t> { typedef uint32_t RadixType; static inline __device__ RadixType convert(int16_t v) { static_assert(sizeof(short) == 2, ""); return 32768u + v; } static inline __device__ int16_t deconvert(RadixType v) { return v - 32768; } }; template <> struct TopKTypeConfig<int32_t> { typedef uint32_t RadixType; static inline __device__ RadixType convert(int32_t v) { static_assert(sizeof(int) == 4, ""); return 2147483648u + v; } static inline __device__ int32_t deconvert(RadixType v) { return v - 2147483648u; } }; template <> struct TopKTypeConfig<int64_t> { typedef uint64_t RadixType; static inline __device__ RadixType convert(int64_t v) { static_assert(sizeof(int64_t) == 8, ""); return 9223372036854775808ull + v; } static inline __device__ int64_t deconvert(RadixType v) { return v - 9223372036854775808ull; } }; template <> struct TopKTypeConfig<double> { typedef uint64_t RadixType; static inline __device__ RadixType convert(double v) { RadixType x = __double_as_longlong(v); RadixType mask = -((x >> 63)) | 0x8000000000000000; return (v == v) ? (x ^ mask) : 0xffffffffffffffff; } static inline __device__ double deconvert(RadixType v) { RadixType mask = ((v >> 63) - 1) | 0x8000000000000000; return __longlong_as_double(v ^ mask); } }; template <> struct TopKTypeConfig<at::Half> { typedef uint32_t RadixType; static inline __device__ RadixType convert(at::Half v) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) RadixType x = __half_as_ushort(v); RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000; return (v == v) ? (x ^ mask) : 0xffff; #else assert(false); return 0u; #endif } static inline __device__ at::Half deconvert(RadixType v) { #if defined(__CUDA_ARCH__) || defined(__HIP_PLATFORM_HCC__) RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff; return __ushort_as_half(v ^ mask); #else assert(false); return static_cast<at::Half>(0); #endif } }; template <> struct TopKTypeConfig<at::BFloat16> { typedef uint32_t RadixType; static inline __device__ RadixType convert(at::BFloat16 v) { RadixType x = v.x; RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000; return (v == v) ? (x ^ mask) : 0xffff; } static inline __device__ at::BFloat16 deconvert(RadixType v) { RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff; at::BFloat16 r; r.x = (v ^ mask); return r; } }; // This function counts the distribution of all input values in a // slice we are selecting by radix digit at `radixDigitPos`, but only // those that pass the filter `((v & desiredMask) == desired)`. // This produces and broadcasts the seen counts for a single block only. // `smem` must have at least `RadixSize` elements. template < typename scalar_t, typename bitwise_t, typename index_t, typename CountType, int RadixSize, int RadixBits> __device__ void countRadixUsingMask( CountType counts[RadixSize], CountType* smem, bitwise_t desired, bitwise_t desiredMask, int radixDigitPos, index_t sliceSize, index_t withinSliceStride, scalar_t* data) { // Clear out per-thread counts from a previous round #pragma unroll for (int i = 0; i < RadixSize; ++i) { counts[i] = 0; } if (threadIdx.x < RadixSize) { smem[threadIdx.x] = 0; } __syncthreads(); // Scan over all the data. Upon a read, the warp will accumulate // counts per each digit in the radix using warp voting. for (index_t i = threadIdx.x; i < sliceSize; i += blockDim.x) { bitwise_t val = TopKTypeConfig<scalar_t>::convert(doLdg(&data[i * withinSliceStride])); bool hasVal = ((val & desiredMask) == desired); bitwise_t digitInRadix = Bitfield<bitwise_t>::getBitfield(val, radixDigitPos, RadixBits); #pragma unroll for (uint32_t j = 0; j < RadixSize; ++j) { bool vote = hasVal && (digitInRadix == j); #if defined(__HIP_PLATFORM_HCC__) counts[j] += __popcll(WARP_BALLOT(vote)); #else counts[j] += __popc(WARP_BALLOT(vote, ACTIVE_MASK())); #endif } } // Now, for each warp, sum values if (getLaneId() == 0) { #pragma unroll for (uint32_t i = 0; i < RadixSize; ++i) { gpuAtomicAdd(&smem[i], counts[i]); } } __syncthreads(); // For each thread, read in the total counts #pragma unroll for (uint32_t i = 0; i < RadixSize; ++i) { counts[i] = smem[i]; } __syncthreads(); } // Over what radix we are selecting values constexpr int RADIX_BITS = 2; // digits are base-(2 ^ RADIX_BITS) constexpr int RADIX_SIZE = 4; // 2 ^ RADIX_BITS constexpr int RADIX_MASK = (RADIX_SIZE - 1); // This finds the unique value `v` that matches the pattern // ((v & desired) == desiredMask) in our sorted int format template <typename scalar_t, typename bitwise_t, typename index_t> __device__ scalar_t findPattern( scalar_t* smem, scalar_t* data, index_t sliceSize, index_t withinSliceStride, bitwise_t desired, bitwise_t desiredMask) { if (threadIdx.x < 2) { smem[threadIdx.x] = static_cast<scalar_t>(0); } __syncthreads(); // All threads participate in the loop, in order to sync on the flag index_t numIterations = THCRoundUp(sliceSize, static_cast<index_t>(blockDim.x)); for (index_t i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < sliceSize); scalar_t v = inRange ? doLdg(&data[i * withinSliceStride]) : static_cast<scalar_t>(0); if (inRange && ((TopKTypeConfig<scalar_t>::convert(v) & desiredMask) == desired)) { // There should not be conflicts if we are using findPattern, // since the result is unique smem[0] = static_cast<scalar_t>(1); smem[1] = v; // can't use val as the flag, since it could be 0 } __syncthreads(); scalar_t found = smem[0]; scalar_t val = smem[1]; __syncthreads(); // Check to see if a thread found the value if (THCNumerics<scalar_t>::ne(found, static_cast<scalar_t>(0))) { // all threads return this value return val; } } // should not get here assert(false); return static_cast<scalar_t>(0); } // Returns the top-Kth element found in the data using radix selection template <typename scalar_t, typename bitwise_t, typename index_t, bool Order> __device__ void radixSelect( scalar_t* data, index_t k, index_t sliceSize, index_t withinSliceStride, int* smem, scalar_t* topK) { // Per-thread buckets into which we accumulate digit counts in our // radix int counts[RADIX_SIZE]; // We only consider elements x such that (x & desiredMask) == desired // Initially, we consider all elements of the array, so the above // statement is true regardless of input. bitwise_t desired = 0; bitwise_t desiredMask = 0; // We are looking for the top kToFind-th element when iterating over // digits; this count gets reduced by elimination when counting // successive digits int kToFind = k; // We start at the most significant digit in our radix, scanning // through to the least significant digit #pragma unroll for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0; digitPos -= RADIX_BITS) { // Count radix distribution for the current position and reduce // across all threads countRadixUsingMask< scalar_t, bitwise_t, index_t, int, RADIX_SIZE, RADIX_BITS>( counts, smem, desired, desiredMask, digitPos, sliceSize, withinSliceStride, data); auto found_unique = [&](int i, int count) -> bool { /* All threads have the same value in counts here, so all */ /* threads will return from the function. */ if (count == 1 && kToFind == 1) { /* There is a unique answer. */ desired = Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS); desiredMask = Bitfield<bitwise_t>::setBitfield( desiredMask, RADIX_MASK, digitPos, RADIX_BITS); /* The answer is now the unique element v such that: */ /* (v & desiredMask) == desired */ /* However, we do not yet know what the actual element is. We */ /* need to perform a search through the data to find the */ /* element that matches this pattern. */ *topK = findPattern<scalar_t, bitwise_t, index_t>( (scalar_t*)smem, data, sliceSize, withinSliceStride, desired, desiredMask); return true; } return false; }; auto found_non_unique = [&](int i, int count) -> bool { if (count >= kToFind) { desired = Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS); desiredMask = Bitfield<bitwise_t>::setBitfield( desiredMask, RADIX_MASK, digitPos, RADIX_BITS); /* The top-Kth element v must now be one such that: */ /* (v & desiredMask == desired) */ /* but we haven't narrowed it down; we must check the next */ /* least-significant digit */ return true; } kToFind -= count; return false; // continue the loop }; // All threads participate in the comparisons below to know the // final result if (Order) { // Process in descending order #pragma unroll for (int i = RADIX_SIZE - 1; i >= 0; --i) { int count = counts[i]; if (found_unique(i, count)) { return; } if (found_non_unique(i, count)) { break; } } } else { // Process in ascending order #pragma unroll for (int i = 0; i < RADIX_SIZE; ++i) { int count = counts[i]; if (found_unique(i, count)) { return; } if (found_non_unique(i, count)) { break; } } } } // end digitPos for // There is no unique result, but there is a non-unique result // matching `desired` exactly *topK = TopKTypeConfig<scalar_t>::deconvert(desired); } } // namespace native } // namespace at
the_stack
#define __fadd_rn(a,b) ((a)+(b)) #define __fsub_rn(a,b) ((a)-(b)) #define __frsqrt_rn(a) (1.f / sqrtf(a)) union un { float f; unsigned int ui; }; __device__ __host__ __forceinline__ void svd( float a11, float a12, float a13, float a21, float a22, float a23, float a31, float a32, float a33, // input A float &u11, float &u12, float &u13, float &u21, float &u22, float &u23, float &u31, float &u32, float &u33, // output U float &s11, //float &s12, float &s13, float &s21, float &s22, //float &s23, float &s31, float &s32, float &s33, // output S float &v11, float &v12, float &v13, float &v21, float &v22, float &v23, float &v31, float &v32, float &v33 // output V ) { un Sa11, Sa21, Sa31, Sa12, Sa22, Sa32, Sa13, Sa23, Sa33; un Su11, Su21, Su31, Su12, Su22, Su32, Su13, Su23, Su33; un Sv11, Sv21, Sv31, Sv12, Sv22, Sv32, Sv13, Sv23, Sv33; un Sc, Ss, Sch, Ssh; un Stmp1, Stmp2, Stmp3, Stmp4, Stmp5; un Ss11, Ss21, Ss31, Ss22, Ss32, Ss33; un Sqvs, Sqvvx, Sqvvy, Sqvvz; Sa11.f = a11; Sa12.f = a12; Sa13.f = a13; Sa21.f = a21; Sa22.f = a22; Sa23.f = a23; Sa31.f = a31; Sa32.f = a32; Sa33.f = a33; //########################################################### // Compute normal equations matrix //########################################################### Ss11.f = Sa11.f*Sa11.f; Stmp1.f = Sa21.f*Sa21.f; Ss11.f = __fadd_rn(Stmp1.f, Ss11.f); Stmp1.f = Sa31.f*Sa31.f; Ss11.f = __fadd_rn(Stmp1.f, Ss11.f); Ss21.f = Sa12.f*Sa11.f; Stmp1.f = Sa22.f*Sa21.f; Ss21.f = __fadd_rn(Stmp1.f, Ss21.f); Stmp1.f = Sa32.f*Sa31.f; Ss21.f = __fadd_rn(Stmp1.f, Ss21.f); Ss31.f = Sa13.f*Sa11.f; Stmp1.f = Sa23.f*Sa21.f; Ss31.f = __fadd_rn(Stmp1.f, Ss31.f); Stmp1.f = Sa33.f*Sa31.f; Ss31.f = __fadd_rn(Stmp1.f, Ss31.f); Ss22.f = Sa12.f*Sa12.f; Stmp1.f = Sa22.f*Sa22.f; Ss22.f = __fadd_rn(Stmp1.f, Ss22.f); Stmp1.f = Sa32.f*Sa32.f; Ss22.f = __fadd_rn(Stmp1.f, Ss22.f); Ss32.f = Sa13.f*Sa12.f; Stmp1.f = Sa23.f*Sa22.f; Ss32.f = __fadd_rn(Stmp1.f, Ss32.f); Stmp1.f = Sa33.f*Sa32.f; Ss32.f = __fadd_rn(Stmp1.f, Ss32.f); Ss33.f = Sa13.f*Sa13.f; Stmp1.f = Sa23.f*Sa23.f; Ss33.f = __fadd_rn(Stmp1.f, Ss33.f); Stmp1.f = Sa33.f*Sa33.f; Ss33.f = __fadd_rn(Stmp1.f, Ss33.f); Sqvs.f = 1.f; Sqvvx.f = 0.f; Sqvvy.f = 0.f; Sqvvz.f = 0.f; //########################################################### // Solve symmetric eigenproblem using Jacobi iteration //########################################################### for (int i = 0; i < 4; i++) { Ssh.f = Ss21.f * 0.5f; Stmp5.f = __fsub_rn(Ss11.f, Ss22.f); Stmp2.f = Ssh.f*Ssh.f; Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0; Ssh.ui = Stmp1.ui&Ssh.ui; Sch.ui = Stmp1.ui&Stmp5.ui; Stmp2.ui = ~Stmp1.ui&gone; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f*Ssh.f; Stmp2.f = Sch.f*Sch.f; Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp4.f = __frsqrt_rn(Stmp3.f); Ssh.f = Stmp4.f*Ssh.f; Sch.f = Stmp4.f*Sch.f; Stmp1.f = gfour_gamma_squared*Stmp1.f; Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0; Stmp2.ui = gsine_pi_over_eight&Stmp1.ui; Ssh.ui = ~Stmp1.ui&Ssh.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui; Sch.ui = ~Stmp1.ui&Sch.ui; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f * Ssh.f; Stmp2.f = Sch.f * Sch.f; Sc.f = __fsub_rn(Stmp2.f, Stmp1.f); Ss.f = Sch.f * Ssh.f; Ss.f = __fadd_rn(Ss.f, Ss.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f); #endif //########################################################### // Perform the actual Givens conjugation //########################################################### Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Ss33.f = Ss33.f * Stmp3.f; Ss31.f = Ss31.f * Stmp3.f; Ss32.f = Ss32.f * Stmp3.f; Ss33.f = Ss33.f * Stmp3.f; Stmp1.f = Ss.f * Ss31.f; Stmp2.f = Ss.f * Ss32.f; Ss31.f = Sc.f * Ss31.f; Ss32.f = Sc.f * Ss32.f; Ss31.f = __fadd_rn(Stmp2.f, Ss31.f); Ss32.f = __fsub_rn(Ss32.f, Stmp1.f); Stmp2.f = Ss.f*Ss.f; Stmp1.f = Ss22.f*Stmp2.f; Stmp3.f = Ss11.f*Stmp2.f; Stmp4.f = Sc.f*Sc.f; Ss11.f = Ss11.f*Stmp4.f; Ss22.f = Ss22.f*Stmp4.f; Ss11.f = __fadd_rn(Ss11.f, Stmp1.f); Ss22.f = __fadd_rn(Ss22.f, Stmp3.f); Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f); Stmp2.f = __fadd_rn(Ss21.f, Ss21.f); Ss21.f = Ss21.f*Stmp4.f; Stmp4.f = Sc.f*Ss.f; Stmp2.f = Stmp2.f*Stmp4.f; Stmp5.f = Stmp5.f*Stmp4.f; Ss11.f = __fadd_rn(Ss11.f, Stmp2.f); Ss21.f = __fsub_rn(Ss21.f, Stmp5.f); Ss22.f = __fsub_rn(Ss22.f, Stmp2.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("%.20g\n", Ss11.f); printf("%.20g %.20g\n", Ss21.f, Ss22.f); printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f); #endif //########################################################### // Compute the cumulative rotation, in quaternion form //########################################################### Stmp1.f = Ssh.f*Sqvvx.f; Stmp2.f = Ssh.f*Sqvvy.f; Stmp3.f = Ssh.f*Sqvvz.f; Ssh.f = Ssh.f*Sqvs.f; Sqvs.f = Sch.f*Sqvs.f; Sqvvx.f = Sch.f*Sqvvx.f; Sqvvy.f = Sch.f*Sqvvy.f; Sqvvz.f = Sch.f*Sqvvz.f; Sqvvz.f = __fadd_rn(Sqvvz.f, Ssh.f); Sqvs.f = __fsub_rn(Sqvs.f, Stmp3.f); Sqvvx.f = __fadd_rn(Sqvvx.f, Stmp2.f); Sqvvy.f = __fsub_rn(Sqvvy.f, Stmp1.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f); #endif ////////////////////////////////////////////////////////////////////////// // (1->3) ////////////////////////////////////////////////////////////////////////// Ssh.f = Ss32.f * 0.5f; Stmp5.f = __fsub_rn(Ss22.f, Ss33.f); Stmp2.f = Ssh.f * Ssh.f; Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0; Ssh.ui = Stmp1.ui&Ssh.ui; Sch.ui = Stmp1.ui&Stmp5.ui; Stmp2.ui = ~Stmp1.ui&gone; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f * Ssh.f; Stmp2.f = Sch.f * Sch.f; Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp4.f = __frsqrt_rn(Stmp3.f); Ssh.f = Stmp4.f * Ssh.f; Sch.f = Stmp4.f * Sch.f; Stmp1.f = gfour_gamma_squared * Stmp1.f; Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0; Stmp2.ui = gsine_pi_over_eight&Stmp1.ui; Ssh.ui = ~Stmp1.ui&Ssh.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui; Sch.ui = ~Stmp1.ui&Sch.ui; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f * Ssh.f; Stmp2.f = Sch.f * Sch.f; Sc.f = __fsub_rn(Stmp2.f, Stmp1.f); Ss.f = Sch.f*Ssh.f; Ss.f = __fadd_rn(Ss.f, Ss.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f); #endif //########################################################### // Perform the actual Givens conjugation //########################################################### Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Ss11.f = Ss11.f * Stmp3.f; Ss21.f = Ss21.f * Stmp3.f; Ss31.f = Ss31.f * Stmp3.f; Ss11.f = Ss11.f * Stmp3.f; Stmp1.f = Ss.f*Ss21.f; Stmp2.f = Ss.f*Ss31.f; Ss21.f = Sc.f*Ss21.f; Ss31.f = Sc.f*Ss31.f; Ss21.f = __fadd_rn(Stmp2.f, Ss21.f); Ss31.f = __fsub_rn(Ss31.f, Stmp1.f); Stmp2.f = Ss.f*Ss.f; Stmp1.f = Ss33.f*Stmp2.f; Stmp3.f = Ss22.f*Stmp2.f; Stmp4.f = Sc.f * Sc.f; Ss22.f = Ss22.f * Stmp4.f; Ss33.f = Ss33.f * Stmp4.f; Ss22.f = __fadd_rn(Ss22.f, Stmp1.f); Ss33.f = __fadd_rn(Ss33.f, Stmp3.f); Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f); Stmp2.f = __fadd_rn(Ss32.f, Ss32.f); Ss32.f = Ss32.f*Stmp4.f; Stmp4.f = Sc.f*Ss.f; Stmp2.f = Stmp2.f*Stmp4.f; Stmp5.f = Stmp5.f*Stmp4.f; Ss22.f = __fadd_rn(Ss22.f, Stmp2.f); Ss32.f = __fsub_rn(Ss32.f, Stmp5.f); Ss33.f = __fsub_rn(Ss33.f, Stmp2.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("%.20g\n", Ss11.f); printf("%.20g %.20g\n", Ss21.f, Ss22.f); printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f); #endif //########################################################### // Compute the cumulative rotation, in quaternion form //########################################################### Stmp1.f = Ssh.f*Sqvvx.f; Stmp2.f = Ssh.f*Sqvvy.f; Stmp3.f = Ssh.f*Sqvvz.f; Ssh.f = Ssh.f*Sqvs.f; Sqvs.f = Sch.f*Sqvs.f; Sqvvx.f = Sch.f*Sqvvx.f; Sqvvy.f = Sch.f*Sqvvy.f; Sqvvz.f = Sch.f*Sqvvz.f; Sqvvx.f = __fadd_rn(Sqvvx.f, Ssh.f); Sqvs.f = __fsub_rn(Sqvs.f, Stmp1.f); Sqvvy.f = __fadd_rn(Sqvvy.f, Stmp3.f); Sqvvz.f = __fsub_rn(Sqvvz.f, Stmp2.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f); #endif #if 1 ////////////////////////////////////////////////////////////////////////// // 1 -> 2 ////////////////////////////////////////////////////////////////////////// Ssh.f = Ss31.f * 0.5f; Stmp5.f = __fsub_rn(Ss33.f, Ss11.f); Stmp2.f = Ssh.f*Ssh.f; Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0; Ssh.ui = Stmp1.ui&Ssh.ui; Sch.ui = Stmp1.ui&Stmp5.ui; Stmp2.ui = ~Stmp1.ui&gone; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f*Ssh.f; Stmp2.f = Sch.f*Sch.f; Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp4.f = __frsqrt_rn(Stmp3.f); Ssh.f = Stmp4.f*Ssh.f; Sch.f = Stmp4.f*Sch.f; Stmp1.f = gfour_gamma_squared*Stmp1.f; Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0; Stmp2.ui = gsine_pi_over_eight&Stmp1.ui; Ssh.ui = ~Stmp1.ui&Ssh.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp2.ui = gcosine_pi_over_eight&Stmp1.ui; Sch.ui = ~Stmp1.ui&Sch.ui; Sch.ui = Sch.ui | Stmp2.ui; Stmp1.f = Ssh.f*Ssh.f; Stmp2.f = Sch.f*Sch.f; Sc.f = __fsub_rn(Stmp2.f, Stmp1.f); Ss.f = Sch.f*Ssh.f; Ss.f = __fadd_rn(Ss.f, Ss.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f); #endif //########################################################### // Perform the actual Givens conjugation //########################################################### Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f); Ss22.f = Ss22.f * Stmp3.f; Ss32.f = Ss32.f * Stmp3.f; Ss21.f = Ss21.f * Stmp3.f; Ss22.f = Ss22.f * Stmp3.f; Stmp1.f = Ss.f*Ss32.f; Stmp2.f = Ss.f*Ss21.f; Ss32.f = Sc.f*Ss32.f; Ss21.f = Sc.f*Ss21.f; Ss32.f = __fadd_rn(Stmp2.f, Ss32.f); Ss21.f = __fsub_rn(Ss21.f, Stmp1.f); Stmp2.f = Ss.f*Ss.f; Stmp1.f = Ss11.f*Stmp2.f; Stmp3.f = Ss33.f*Stmp2.f; Stmp4.f = Sc.f*Sc.f; Ss33.f = Ss33.f*Stmp4.f; Ss11.f = Ss11.f*Stmp4.f; Ss33.f = __fadd_rn(Ss33.f, Stmp1.f); Ss11.f = __fadd_rn(Ss11.f, Stmp3.f); Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f); Stmp2.f = __fadd_rn(Ss31.f, Ss31.f); Ss31.f = Ss31.f*Stmp4.f; Stmp4.f = Sc.f*Ss.f; Stmp2.f = Stmp2.f*Stmp4.f; Stmp5.f = Stmp5.f*Stmp4.f; Ss33.f = __fadd_rn(Ss33.f, Stmp2.f); Ss31.f = __fsub_rn(Ss31.f, Stmp5.f); Ss11.f = __fsub_rn(Ss11.f, Stmp2.f); #ifdef DEBUG_JACOBI_CONJUGATE printf("%.20g\n", Ss11.f); printf("%.20g %.20g\n", Ss21.f, Ss22.f); printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f); #endif //########################################################### // Compute the cumulative rotation, in quaternion form //########################################################### Stmp1.f = Ssh.f*Sqvvx.f; Stmp2.f = Ssh.f*Sqvvy.f; Stmp3.f = Ssh.f*Sqvvz.f; Ssh.f = Ssh.f*Sqvs.f; Sqvs.f = Sch.f*Sqvs.f; Sqvvx.f = Sch.f*Sqvvx.f; Sqvvy.f = Sch.f*Sqvvy.f; Sqvvz.f = Sch.f*Sqvvz.f; Sqvvy.f = __fadd_rn(Sqvvy.f, Ssh.f); Sqvs.f = __fsub_rn(Sqvs.f, Stmp2.f); Sqvvz.f = __fadd_rn(Sqvvz.f, Stmp1.f); Sqvvx.f = __fsub_rn(Sqvvx.f, Stmp3.f); #endif } //########################################################### // Normalize quaternion for matrix V //########################################################### Stmp2.f = Sqvs.f*Sqvs.f; Stmp1.f = Sqvvx.f*Sqvvx.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = Sqvvy.f*Sqvvy.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = Sqvvz.f*Sqvvz.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Sqvs.f = Sqvs.f*Stmp1.f; Sqvvx.f = Sqvvx.f*Stmp1.f; Sqvvy.f = Sqvvy.f*Stmp1.f; Sqvvz.f = Sqvvz.f*Stmp1.f; //########################################################### // Transform quaternion to matrix V //########################################################### Stmp1.f = Sqvvx.f*Sqvvx.f; Stmp2.f = Sqvvy.f*Sqvvy.f; Stmp3.f = Sqvvz.f*Sqvvz.f; Sv11.f = Sqvs.f*Sqvs.f; Sv22.f = __fsub_rn(Sv11.f, Stmp1.f); Sv33.f = __fsub_rn(Sv22.f, Stmp2.f); Sv33.f = __fadd_rn(Sv33.f, Stmp3.f); Sv22.f = __fadd_rn(Sv22.f, Stmp2.f); Sv22.f = __fsub_rn(Sv22.f, Stmp3.f); Sv11.f = __fadd_rn(Sv11.f, Stmp1.f); Sv11.f = __fsub_rn(Sv11.f, Stmp2.f); Sv11.f = __fsub_rn(Sv11.f, Stmp3.f); Stmp1.f = __fadd_rn(Sqvvx.f, Sqvvx.f); Stmp2.f = __fadd_rn(Sqvvy.f, Sqvvy.f); Stmp3.f = __fadd_rn(Sqvvz.f, Sqvvz.f); Sv32.f = Sqvs.f*Stmp1.f; Sv13.f = Sqvs.f*Stmp2.f; Sv21.f = Sqvs.f*Stmp3.f; Stmp1.f = Sqvvy.f*Stmp1.f; Stmp2.f = Sqvvz.f*Stmp2.f; Stmp3.f = Sqvvx.f*Stmp3.f; Sv12.f = __fsub_rn(Stmp1.f, Sv21.f); Sv23.f = __fsub_rn(Stmp2.f, Sv32.f); Sv31.f = __fsub_rn(Stmp3.f, Sv13.f); Sv21.f = __fadd_rn(Stmp1.f, Sv21.f); Sv32.f = __fadd_rn(Stmp2.f, Sv32.f); Sv13.f = __fadd_rn(Stmp3.f, Sv13.f); ///########################################################### // Multiply (from the right) with V //########################################################### Stmp2.f = Sa12.f; Stmp3.f = Sa13.f; Sa12.f = Sv12.f*Sa11.f; Sa13.f = Sv13.f*Sa11.f; Sa11.f = Sv11.f*Sa11.f; Stmp1.f = Sv21.f*Stmp2.f; Sa11.f = __fadd_rn(Sa11.f, Stmp1.f); Stmp1.f = Sv31.f*Stmp3.f; Sa11.f = __fadd_rn(Sa11.f, Stmp1.f); Stmp1.f = Sv22.f*Stmp2.f; Sa12.f = __fadd_rn(Sa12.f, Stmp1.f); Stmp1.f = Sv32.f*Stmp3.f; Sa12.f = __fadd_rn(Sa12.f, Stmp1.f); Stmp1.f = Sv23.f*Stmp2.f; Sa13.f = __fadd_rn(Sa13.f, Stmp1.f); Stmp1.f = Sv33.f*Stmp3.f; Sa13.f = __fadd_rn(Sa13.f, Stmp1.f); Stmp2.f = Sa22.f; Stmp3.f = Sa23.f; Sa22.f = Sv12.f*Sa21.f; Sa23.f = Sv13.f*Sa21.f; Sa21.f = Sv11.f*Sa21.f; Stmp1.f = Sv21.f*Stmp2.f; Sa21.f = __fadd_rn(Sa21.f, Stmp1.f); Stmp1.f = Sv31.f*Stmp3.f; Sa21.f = __fadd_rn(Sa21.f, Stmp1.f); Stmp1.f = Sv22.f*Stmp2.f; Sa22.f = __fadd_rn(Sa22.f, Stmp1.f); Stmp1.f = Sv32.f*Stmp3.f; Sa22.f = __fadd_rn(Sa22.f, Stmp1.f); Stmp1.f = Sv23.f*Stmp2.f; Sa23.f = __fadd_rn(Sa23.f, Stmp1.f); Stmp1.f = Sv33.f*Stmp3.f; Sa23.f = __fadd_rn(Sa23.f, Stmp1.f); Stmp2.f = Sa32.f; Stmp3.f = Sa33.f; Sa32.f = Sv12.f*Sa31.f; Sa33.f = Sv13.f*Sa31.f; Sa31.f = Sv11.f*Sa31.f; Stmp1.f = Sv21.f*Stmp2.f; Sa31.f = __fadd_rn(Sa31.f, Stmp1.f); Stmp1.f = Sv31.f*Stmp3.f; Sa31.f = __fadd_rn(Sa31.f, Stmp1.f); Stmp1.f = Sv22.f*Stmp2.f; Sa32.f = __fadd_rn(Sa32.f, Stmp1.f); Stmp1.f = Sv32.f*Stmp3.f; Sa32.f = __fadd_rn(Sa32.f, Stmp1.f); Stmp1.f = Sv23.f*Stmp2.f; Sa33.f = __fadd_rn(Sa33.f, Stmp1.f); Stmp1.f = Sv33.f*Stmp3.f; Sa33.f = __fadd_rn(Sa33.f, Stmp1.f); //########################################################### // Permute columns such that the singular values are sorted //########################################################### Stmp1.f = Sa11.f*Sa11.f; Stmp4.f = Sa21.f*Sa21.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp4.f = Sa31.f*Sa31.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp2.f = Sa12.f*Sa12.f; Stmp4.f = Sa22.f*Sa22.f; Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f); Stmp4.f = Sa32.f*Sa32.f; Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f); Stmp3.f = Sa13.f*Sa13.f; Stmp4.f = Sa23.f*Sa23.f; Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f); Stmp4.f = Sa33.f*Sa33.f; Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f); // Swap columns 1-2 if necessary Stmp4.ui = (Stmp1.f < Stmp2.f) ? 0xffffffff : 0; Stmp5.ui = Sa11.ui^Sa12.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa11.ui = Sa11.ui^Stmp5.ui; Sa12.ui = Sa12.ui^Stmp5.ui; Stmp5.ui = Sa21.ui^Sa22.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa21.ui = Sa21.ui^Stmp5.ui; Sa22.ui = Sa22.ui^Stmp5.ui; Stmp5.ui = Sa31.ui^Sa32.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa31.ui = Sa31.ui^Stmp5.ui; Sa32.ui = Sa32.ui^Stmp5.ui; Stmp5.ui = Sv11.ui^Sv12.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv11.ui = Sv11.ui^Stmp5.ui; Sv12.ui = Sv12.ui^Stmp5.ui; Stmp5.ui = Sv21.ui^Sv22.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv21.ui = Sv21.ui^Stmp5.ui; Sv22.ui = Sv22.ui^Stmp5.ui; Stmp5.ui = Sv31.ui^Sv32.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv31.ui = Sv31.ui^Stmp5.ui; Sv32.ui = Sv32.ui^Stmp5.ui; Stmp5.ui = Stmp1.ui^Stmp2.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp1.ui = Stmp1.ui^Stmp5.ui; Stmp2.ui = Stmp2.ui^Stmp5.ui; // If columns 1-2 have been swapped, negate 2nd column of A and V so that V is still a rotation Stmp5.f = -2.f; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp4.f = 1.f; Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f); Sa12.f = Sa12.f*Stmp4.f; Sa22.f = Sa22.f*Stmp4.f; Sa32.f = Sa32.f*Stmp4.f; Sv12.f = Sv12.f*Stmp4.f; Sv22.f = Sv22.f*Stmp4.f; Sv32.f = Sv32.f*Stmp4.f; // Swap columns 1-3 if necessary Stmp4.ui = (Stmp1.f < Stmp3.f) ? 0xffffffff : 0; Stmp5.ui = Sa11.ui^Sa13.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa11.ui = Sa11.ui^Stmp5.ui; Sa13.ui = Sa13.ui^Stmp5.ui; Stmp5.ui = Sa21.ui^Sa23.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa21.ui = Sa21.ui^Stmp5.ui; Sa23.ui = Sa23.ui^Stmp5.ui; Stmp5.ui = Sa31.ui^Sa33.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa31.ui = Sa31.ui^Stmp5.ui; Sa33.ui = Sa33.ui^Stmp5.ui; Stmp5.ui = Sv11.ui^Sv13.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv11.ui = Sv11.ui^Stmp5.ui; Sv13.ui = Sv13.ui^Stmp5.ui; Stmp5.ui = Sv21.ui^Sv23.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv21.ui = Sv21.ui^Stmp5.ui; Sv23.ui = Sv23.ui^Stmp5.ui; Stmp5.ui = Sv31.ui^Sv33.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv31.ui = Sv31.ui^Stmp5.ui; Sv33.ui = Sv33.ui^Stmp5.ui; Stmp5.ui = Stmp1.ui^Stmp3.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp1.ui = Stmp1.ui^Stmp5.ui; Stmp3.ui = Stmp3.ui^Stmp5.ui; // If columns 1-3 have been swapped, negate 1st column of A and V so that V is still a rotation Stmp5.f = -2.f; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp4.f = 1.f; Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f); Sa11.f = Sa11.f*Stmp4.f; Sa21.f = Sa21.f*Stmp4.f; Sa31.f = Sa31.f*Stmp4.f; Sv11.f = Sv11.f*Stmp4.f; Sv21.f = Sv21.f*Stmp4.f; Sv31.f = Sv31.f*Stmp4.f; // Swap columns 2-3 if necessary Stmp4.ui = (Stmp2.f < Stmp3.f) ? 0xffffffff : 0; Stmp5.ui = Sa12.ui^Sa13.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa12.ui = Sa12.ui^Stmp5.ui; Sa13.ui = Sa13.ui^Stmp5.ui; Stmp5.ui = Sa22.ui^Sa23.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa22.ui = Sa22.ui^Stmp5.ui; Sa23.ui = Sa23.ui^Stmp5.ui; Stmp5.ui = Sa32.ui^Sa33.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sa32.ui = Sa32.ui^Stmp5.ui; Sa33.ui = Sa33.ui^Stmp5.ui; Stmp5.ui = Sv12.ui^Sv13.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv12.ui = Sv12.ui^Stmp5.ui; Sv13.ui = Sv13.ui^Stmp5.ui; Stmp5.ui = Sv22.ui^Sv23.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv22.ui = Sv22.ui^Stmp5.ui; Sv23.ui = Sv23.ui^Stmp5.ui; Stmp5.ui = Sv32.ui^Sv33.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Sv32.ui = Sv32.ui^Stmp5.ui; Sv33.ui = Sv33.ui^Stmp5.ui; Stmp5.ui = Stmp2.ui^Stmp3.ui; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp2.ui = Stmp2.ui^Stmp5.ui; Stmp3.ui = Stmp3.ui^Stmp5.ui; // If columns 2-3 have been swapped, negate 3rd column of A and V so that V is still a rotation Stmp5.f = -2.f; Stmp5.ui = Stmp5.ui&Stmp4.ui; Stmp4.f = 1.f; Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f); Sa13.f = Sa13.f*Stmp4.f; Sa23.f = Sa23.f*Stmp4.f; Sa33.f = Sa33.f*Stmp4.f; Sv13.f = Sv13.f*Stmp4.f; Sv23.f = Sv23.f*Stmp4.f; Sv33.f = Sv33.f*Stmp4.f; //########################################################### // Construct QR factorization of A*V (=U*D) using Givens rotations //########################################################### Su11.f = 1.f; Su12.f = 0.f; Su13.f = 0.f; Su21.f = 0.f; Su22.f = 1.f; Su23.f = 0.f; Su31.f = 0.f; Su32.f = 0.f; Su33.f = 1.f; Ssh.f = Sa21.f*Sa21.f; Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0; Ssh.ui = Ssh.ui&Sa21.ui; Stmp5.f = 0.f; Sch.f = __fsub_rn(Stmp5.f, Sa11.f); Sch.f = fmaxf(Sch.f, Sa11.f); Sch.f = fmaxf(Sch.f, gsmall_number); Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Stmp1.f = Stmp1.f*Stmp2.f; Sch.f = __fadd_rn(Sch.f, Stmp1.f); Stmp1.ui = ~Stmp5.ui&Ssh.ui; Stmp2.ui = ~Stmp5.ui&Sch.ui; Sch.ui = Stmp5.ui&Sch.ui; Ssh.ui = Stmp5.ui&Ssh.ui; Sch.ui = Sch.ui | Stmp1.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Sch.f = Sch.f*Stmp1.f; Ssh.f = Ssh.f*Stmp1.f; Sc.f = Sch.f*Sch.f; Ss.f = Ssh.f*Ssh.f; Sc.f = __fsub_rn(Sc.f, Ss.f); Ss.f = Ssh.f*Sch.f; Ss.f = __fadd_rn(Ss.f, Ss.f); //########################################################### // Rotate matrix A //########################################################### Stmp1.f = Ss.f*Sa11.f; Stmp2.f = Ss.f*Sa21.f; Sa11.f = Sc.f*Sa11.f; Sa21.f = Sc.f*Sa21.f; Sa11.f = __fadd_rn(Sa11.f, Stmp2.f); Sa21.f = __fsub_rn(Sa21.f, Stmp1.f); Stmp1.f = Ss.f*Sa12.f; Stmp2.f = Ss.f*Sa22.f; Sa12.f = Sc.f*Sa12.f; Sa22.f = Sc.f*Sa22.f; Sa12.f = __fadd_rn(Sa12.f, Stmp2.f); Sa22.f = __fsub_rn(Sa22.f, Stmp1.f); Stmp1.f = Ss.f*Sa13.f; Stmp2.f = Ss.f*Sa23.f; Sa13.f = Sc.f*Sa13.f; Sa23.f = Sc.f*Sa23.f; Sa13.f = __fadd_rn(Sa13.f, Stmp2.f); Sa23.f = __fsub_rn(Sa23.f, Stmp1.f); //########################################################### // Update matrix U //########################################################### Stmp1.f = Ss.f*Su11.f; Stmp2.f = Ss.f*Su12.f; Su11.f = Sc.f*Su11.f; Su12.f = Sc.f*Su12.f; Su11.f = __fadd_rn(Su11.f, Stmp2.f); Su12.f = __fsub_rn(Su12.f, Stmp1.f); Stmp1.f = Ss.f*Su21.f; Stmp2.f = Ss.f*Su22.f; Su21.f = Sc.f*Su21.f; Su22.f = Sc.f*Su22.f; Su21.f = __fadd_rn(Su21.f, Stmp2.f); Su22.f = __fsub_rn(Su22.f, Stmp1.f); Stmp1.f = Ss.f*Su31.f; Stmp2.f = Ss.f*Su32.f; Su31.f = Sc.f*Su31.f; Su32.f = Sc.f*Su32.f; Su31.f = __fadd_rn(Su31.f, Stmp2.f); Su32.f = __fsub_rn(Su32.f, Stmp1.f); // Second Givens rotation Ssh.f = Sa31.f*Sa31.f; Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0; Ssh.ui = Ssh.ui&Sa31.ui; Stmp5.f = 0.f; Sch.f = __fsub_rn(Stmp5.f, Sa11.f); Sch.f = fmaxf(Sch.f, Sa11.f); Sch.f = fmaxf(Sch.f, gsmall_number); Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Stmp1.f = Stmp1.f*Stmp2.f; Sch.f = __fadd_rn(Sch.f, Stmp1.f); Stmp1.ui = ~Stmp5.ui&Ssh.ui; Stmp2.ui = ~Stmp5.ui&Sch.ui; Sch.ui = Stmp5.ui&Sch.ui; Ssh.ui = Stmp5.ui&Ssh.ui; Sch.ui = Sch.ui | Stmp1.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Sch.f = Sch.f*Stmp1.f; Ssh.f = Ssh.f*Stmp1.f; Sc.f = Sch.f*Sch.f; Ss.f = Ssh.f*Ssh.f; Sc.f = __fsub_rn(Sc.f, Ss.f); Ss.f = Ssh.f*Sch.f; Ss.f = __fadd_rn(Ss.f, Ss.f); //########################################################### // Rotate matrix A //########################################################### Stmp1.f = Ss.f*Sa11.f; Stmp2.f = Ss.f*Sa31.f; Sa11.f = Sc.f*Sa11.f; Sa31.f = Sc.f*Sa31.f; Sa11.f = __fadd_rn(Sa11.f, Stmp2.f); Sa31.f = __fsub_rn(Sa31.f, Stmp1.f); Stmp1.f = Ss.f*Sa12.f; Stmp2.f = Ss.f*Sa32.f; Sa12.f = Sc.f*Sa12.f; Sa32.f = Sc.f*Sa32.f; Sa12.f = __fadd_rn(Sa12.f, Stmp2.f); Sa32.f = __fsub_rn(Sa32.f, Stmp1.f); Stmp1.f = Ss.f*Sa13.f; Stmp2.f = Ss.f*Sa33.f; Sa13.f = Sc.f*Sa13.f; Sa33.f = Sc.f*Sa33.f; Sa13.f = __fadd_rn(Sa13.f, Stmp2.f); Sa33.f = __fsub_rn(Sa33.f, Stmp1.f); //########################################################### // Update matrix U //########################################################### Stmp1.f = Ss.f*Su11.f; Stmp2.f = Ss.f*Su13.f; Su11.f = Sc.f*Su11.f; Su13.f = Sc.f*Su13.f; Su11.f = __fadd_rn(Su11.f, Stmp2.f); Su13.f = __fsub_rn(Su13.f, Stmp1.f); Stmp1.f = Ss.f*Su21.f; Stmp2.f = Ss.f*Su23.f; Su21.f = Sc.f*Su21.f; Su23.f = Sc.f*Su23.f; Su21.f = __fadd_rn(Su21.f, Stmp2.f); Su23.f = __fsub_rn(Su23.f, Stmp1.f); Stmp1.f = Ss.f*Su31.f; Stmp2.f = Ss.f*Su33.f; Su31.f = Sc.f*Su31.f; Su33.f = Sc.f*Su33.f; Su31.f = __fadd_rn(Su31.f, Stmp2.f); Su33.f = __fsub_rn(Su33.f, Stmp1.f); // Third Givens Rotation Ssh.f = Sa32.f*Sa32.f; Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0; Ssh.ui = Ssh.ui&Sa32.ui; Stmp5.f = 0.f; Sch.f = __fsub_rn(Stmp5.f, Sa22.f); Sch.f = fmaxf(Sch.f, Sa22.f); Sch.f = fmaxf(Sch.f, gsmall_number); Stmp5.ui = (Sa22.f >= Stmp5.f) ? 0xffffffff : 0; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Stmp1.f = Stmp1.f*Stmp2.f; Sch.f = __fadd_rn(Sch.f, Stmp1.f); Stmp1.ui = ~Stmp5.ui&Ssh.ui; Stmp2.ui = ~Stmp5.ui&Sch.ui; Sch.ui = Stmp5.ui&Sch.ui; Ssh.ui = Stmp5.ui&Ssh.ui; Sch.ui = Sch.ui | Stmp1.ui; Ssh.ui = Ssh.ui | Stmp2.ui; Stmp1.f = Sch.f*Sch.f; Stmp2.f = Ssh.f*Ssh.f; Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f); Stmp1.f = __frsqrt_rn(Stmp2.f); Stmp4.f = Stmp1.f*0.5f; Stmp3.f = Stmp1.f*Stmp4.f; Stmp3.f = Stmp1.f*Stmp3.f; Stmp3.f = Stmp2.f*Stmp3.f; Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f); Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f); Sch.f = Sch.f*Stmp1.f; Ssh.f = Ssh.f*Stmp1.f; Sc.f = Sch.f*Sch.f; Ss.f = Ssh.f*Ssh.f; Sc.f = __fsub_rn(Sc.f, Ss.f); Ss.f = Ssh.f*Sch.f; Ss.f = __fadd_rn(Ss.f, Ss.f); //########################################################### // Rotate matrix A //########################################################### Stmp1.f = Ss.f*Sa21.f; Stmp2.f = Ss.f*Sa31.f; Sa21.f = Sc.f*Sa21.f; Sa31.f = Sc.f*Sa31.f; Sa21.f = __fadd_rn(Sa21.f, Stmp2.f); Sa31.f = __fsub_rn(Sa31.f, Stmp1.f); Stmp1.f = Ss.f*Sa22.f; Stmp2.f = Ss.f*Sa32.f; Sa22.f = Sc.f*Sa22.f; Sa32.f = Sc.f*Sa32.f; Sa22.f = __fadd_rn(Sa22.f, Stmp2.f); Sa32.f = __fsub_rn(Sa32.f, Stmp1.f); Stmp1.f = Ss.f*Sa23.f; Stmp2.f = Ss.f*Sa33.f; Sa23.f = Sc.f*Sa23.f; Sa33.f = Sc.f*Sa33.f; Sa23.f = __fadd_rn(Sa23.f, Stmp2.f); Sa33.f = __fsub_rn(Sa33.f, Stmp1.f); //########################################################### // Update matrix U //########################################################### Stmp1.f = Ss.f*Su12.f; Stmp2.f = Ss.f*Su13.f; Su12.f = Sc.f*Su12.f; Su13.f = Sc.f*Su13.f; Su12.f = __fadd_rn(Su12.f, Stmp2.f); Su13.f = __fsub_rn(Su13.f, Stmp1.f); Stmp1.f = Ss.f*Su22.f; Stmp2.f = Ss.f*Su23.f; Su22.f = Sc.f*Su22.f; Su23.f = Sc.f*Su23.f; Su22.f = __fadd_rn(Su22.f, Stmp2.f); Su23.f = __fsub_rn(Su23.f, Stmp1.f); Stmp1.f = Ss.f*Su32.f; Stmp2.f = Ss.f*Su33.f; Su32.f = Sc.f*Su32.f; Su33.f = Sc.f*Su33.f; Su32.f = __fadd_rn(Su32.f, Stmp2.f); Su33.f = __fsub_rn(Su33.f, Stmp1.f); v11 = Sv11.f; v12 = Sv12.f; v13 = Sv13.f; v21 = Sv21.f; v22 = Sv22.f; v23 = Sv23.f; v31 = Sv31.f; v32 = Sv32.f; v33 = Sv33.f; u11 = Su11.f; u12 = Su12.f; u13 = Su13.f; u21 = Su21.f; u22 = Su22.f; u23 = Su23.f; u31 = Su31.f; u32 = Su32.f; u33 = Su33.f; s11 = Sa11.f; //s12 = Sa12.f; s13 = Sa13.f; s21 = Sa21.f; s22 = Sa22.f; //s23 = Sa23.f; s31 = Sa31.f; s32 = Sa32.f; s33 = Sa33.f; } __global__ void svd3_SOA(const float*__restrict input, float*__restrict output, int testsize) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= testsize) return; svd( input[tid + 0 * testsize], input[tid + 1 * testsize], input[tid + 2 * testsize], input[tid + 3 * testsize], input[tid + 4 * testsize], input[tid + 5 * testsize], input[tid + 6 * testsize], input[tid + 7 * testsize], input[tid + 8 * testsize], output[tid + 0 * testsize], output[tid + 1 * testsize], output[tid + 2 * testsize], output[tid + 3 * testsize], output[tid + 4 * testsize], output[tid + 5 * testsize], output[tid + 6 * testsize], output[tid + 7 * testsize], output[tid + 8 * testsize], output[tid + 9 * testsize], output[tid + 10 * testsize], output[tid + 11 * testsize], output[tid + 12 * testsize], output[tid + 13 * testsize], output[tid + 14 * testsize], output[tid + 15 * testsize], output[tid + 16 * testsize], output[tid + 17 * testsize], output[tid + 18 * testsize], output[tid + 19 * testsize], output[tid + 20 * testsize] ); }
the_stack
#include <thrust/sequence.h> #include <boost/config.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/connected_components.hpp> #include "impl.cuh" #include "polygon.h" namespace { using namespace manifold; using namespace thrust::placeholders; struct ToSphere { float length; __host__ __device__ void operator()(glm::vec3& v) { v = glm::cos(glm::half_pi<float>() * (1.0f - v)); v = length * glm::normalize(v); if (isnan(v.x)) v = glm::vec3(0.0); } }; struct UpdateTriBary { const int nextBary; __host__ __device__ BaryRef operator()(BaryRef ref) { ref.vertBary += nextBary; return ref; } }; struct UpdateHalfedge { const int nextVert; const int nextEdge; const int nextFace; __host__ __device__ Halfedge operator()(Halfedge edge) { edge.startVert += nextVert; edge.endVert += nextVert; edge.pairedHalfedge += nextEdge; edge.face += nextFace; return edge; } }; int ConnectedComponents(VecDH<int>& components, int numVert, const VecDH<Halfedge>& halfedges) { boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> graph( numVert); for (int i = 0; i < halfedges.size(); ++i) { const Halfedge halfedge = halfedges.H()[i]; if (halfedge.IsForward()) { boost::add_edge(halfedge.startVert, halfedge.endVert, graph); } } components.resize(numVert); int numComponent = boost::connected_components(graph, components.H().data()); return numComponent; } struct Equals { int val; __host__ __device__ bool operator()(int x) { return x == val; } }; struct RemoveFace { const Halfedge* halfedge; const int* vertLabel; const int keepLabel; __host__ __device__ bool operator()(int face) { return vertLabel[halfedge[3 * face].startVert] != keepLabel; } }; } // namespace namespace manifold { /* * Constructs a smooth version of the input mesh by creating tangents; this * method will throw if you have supplied tangnets with your mesh already. The * actual triangle resolution is unchanged; use the Refine() method to * interpolate to a higher-resolution curve. * * By default, every edge is calculated for maximum smoothness (very much * approximately), attempting to minimize the maximum mean curvature magnitude. * No higher-order derivatives are considered, as the interpolation is * independent per triangle, only sharing constraints on their boundaries. * * If desired, you can supply a vector of sharpened halfedges, which should in * general be a small subset of all halfedges. Order of entries doesn't matter, * as each one specifies the desired smoothness (between zero and one, with one * the default for all unspecified halfedges) and the halfedge index (3 * * triangle index + [0,1,2] where 0 is the edge between triVert 0 and 1, etc). * * At a smoothness value of zero, a sharp crease is made. The smoothness is * interpolated along each edge, so the specified value should be thought of as * an average. Where exactly two sharpened edges meet at a vertex, their * tangents are rotated to be colinear so that the sharpened edge can be * continuous. Vertices with only one sharpened edge are completely smooth, * allowing sharpened edges to smoothly vanish at termination. A single vertex * can be sharpened by sharping all edges that are incident on it, allowing * cones to be formed. */ Manifold Manifold::Smooth(const Mesh& mesh, const std::vector<Smoothness>& sharpenedEdges) { ALWAYS_ASSERT( mesh.halfedgeTangent.empty(), std::runtime_error, "when supplying tangents, the normal constructor should be used " "rather than Smooth()."); Manifold manifold(mesh); manifold.pImpl_->CreateTangents(sharpenedEdges); return manifold; } /** * Constructs a tetrahedron centered at the origin with one vertex at (1,1,1) * and the rest at similarly symmetric points. */ Manifold Manifold::Tetrahedron() { Manifold tetrahedron; tetrahedron.pImpl_ = std::make_unique<Impl>(Impl::Shape::TETRAHEDRON); return tetrahedron; } /** * Constructs a unit cube (edge lengths all one), by default in the first * octant, touching the origin. Set center to true to shift the center to the * origin. */ Manifold Manifold::Cube(glm::vec3 size, bool center) { Manifold cube; cube.pImpl_ = std::make_unique<Impl>(Impl::Shape::CUBE); cube.Scale(size); if (center) cube.Translate(-size / 2.0f); return cube; } /** * A convenience constructor for the common case of extruding a circle. Can also * form cones if both radii are specified. Set center to true to center the * manifold vertically on the origin (default places the bottom on the origin). */ Manifold Manifold::Cylinder(float height, float radiusLow, float radiusHigh, int circularSegments, bool center) { float scale = radiusHigh >= 0.0f ? radiusHigh / radiusLow : 1.0f; float radius = max(radiusLow, radiusHigh); int n = circularSegments > 2 ? circularSegments : GetCircularSegments(radius); Polygons circle(1); float dPhi = 360.0f / n; for (int i = 0; i < n; ++i) { circle[0].push_back( {radiusLow * glm::vec2(cosd(dPhi * i), sind(dPhi * i)), 0}); } Manifold cylinder = Manifold::Extrude(circle, height, 0, 0.0f, glm::vec2(scale)); if (center) cylinder.Translate(glm::vec3(0.0f, 0.0f, -height / 2.0f)); return cylinder; } /** * Constructs a sphere of a given radius and number of segments along its * diameter. This number will always be rounded up to the nearest factor of * four, as this sphere is constructed by refining an octahedron. This means * there are a circle of vertices on all three of the axis planes. */ Manifold Manifold::Sphere(float radius, int circularSegments) { int n = circularSegments > 0 ? (circularSegments + 3) / 4 : GetCircularSegments(radius) / 4; Manifold sphere; sphere.pImpl_ = std::make_unique<Impl>(Impl::Shape::OCTAHEDRON); sphere.pImpl_->Subdivide(n); thrust::for_each_n(sphere.pImpl_->vertPos_.beginD(), sphere.NumVert(), ToSphere({radius})); sphere.pImpl_->Finish(); // Ignore preceding octahedron. sphere.pImpl_->ReinitializeReference(); return sphere; } /** * Constructs a manifold from a set of polygons by extruding them along the * Z-axis. The overall height and the scale at the top (X and Y independently) * can be specified, as can a twist, to be applied linearly. In the case of * twist, it can also be helpful to specify nDivisions, which specifies the * quantization of the triangles vertically. If the scale is {0,0}, a pure cone * is formed with only a single vertex at the top. */ Manifold Manifold::Extrude(Polygons crossSection, float height, int nDivisions, float twistDegrees, glm::vec2 scaleTop) { ALWAYS_ASSERT(scaleTop.x >= 0 && scaleTop.y >= 0, userErr, "scale values cannot be negative"); Manifold extrusion; ++nDivisions; auto& vertPos = extrusion.pImpl_->vertPos_.H(); VecDH<glm::ivec3> triVertsDH; auto& triVerts = triVertsDH.H(); int nCrossSection = 0; bool isCone = scaleTop.x == 0.0 && scaleTop.y == 0.0; int idx = 0; for (auto& poly : crossSection) { nCrossSection += poly.size(); for (PolyVert& polyVert : poly) { vertPos.push_back({polyVert.pos.x, polyVert.pos.y, 0.0f}); polyVert.idx = idx++; } } for (int i = 1; i < nDivisions + 1; ++i) { float alpha = i / float(nDivisions); float phi = alpha * twistDegrees; glm::mat2 transform(cosd(phi), sind(phi), -sind(phi), cosd(phi)); glm::vec2 scale = glm::mix(glm::vec2(1.0f), scaleTop, alpha); transform = transform * glm::mat2(scale.x, 0.0f, 0.0f, scale.y); int j = 0; int idx = 0; for (const auto& poly : crossSection) { for (int vert = 0; vert < poly.size(); ++vert) { int offset = idx + nCrossSection * i; int thisVert = vert + offset; int lastVert = (vert == 0 ? poly.size() : vert) - 1 + offset; if (i == nDivisions && isCone) { triVerts.push_back({nCrossSection * i + j, lastVert - nCrossSection, thisVert - nCrossSection}); } else { glm::vec2 pos = transform * poly[vert].pos; vertPos.push_back({pos.x, pos.y, height * alpha}); triVerts.push_back({thisVert, lastVert, thisVert - nCrossSection}); triVerts.push_back( {lastVert, lastVert - nCrossSection, thisVert - nCrossSection}); } } ++j; idx += poly.size(); } } if (isCone) for (int j = 0; j < crossSection.size(); ++j) // Duplicate vertex for Genus vertPos.push_back({0.0f, 0.0f, height}); std::vector<glm::ivec3> top = Triangulate(crossSection); for (const glm::ivec3& tri : top) { triVerts.push_back({tri[0], tri[2], tri[1]}); if (!isCone) triVerts.push_back(tri + nCrossSection * nDivisions); } extrusion.pImpl_->CreateHalfedges(triVertsDH); extrusion.pImpl_->Finish(); extrusion.pImpl_->InitializeNewReference(); extrusion.pImpl_->MergeCoplanarRelations(); return extrusion; } /** * Constructs a manifold from a set of polygons by revolving this cross-section * around its Y-axis and then setting this as the Z-axis of the resulting * manifold. If the polygons cross the Y-axis, only the part on the positive X * side is used. Geometrically valid input will result in geometrically valid * output. */ Manifold Manifold::Revolve(const Polygons& crossSection, int circularSegments) { float radius = 0.0f; for (const auto& poly : crossSection) { for (const auto& vert : poly) { radius = max(radius, vert.pos.x); } } int nDivisions = circularSegments > 2 ? circularSegments : GetCircularSegments(radius); Manifold revoloid; auto& vertPos = revoloid.pImpl_->vertPos_.H(); VecDH<glm::ivec3> triVertsDH; auto& triVerts = triVertsDH.H(); float dPhi = 360.0f / nDivisions; for (const auto& poly : crossSection) { int start = -1; for (int polyVert = 0; polyVert < poly.size(); ++polyVert) { if (poly[polyVert].pos.x <= 0) { start = polyVert; break; } } if (start == -1) { // poly all positive for (int polyVert = 0; polyVert < poly.size(); ++polyVert) { int startVert = vertPos.size(); int lastStart = startVert + (polyVert == 0 ? nDivisions * (poly.size() - 1) : -nDivisions); for (int slice = 0; slice < nDivisions; ++slice) { int lastSlice = (slice == 0 ? nDivisions : slice) - 1; float phi = slice * dPhi; glm::vec2 pos = poly[polyVert].pos; vertPos.push_back({pos.x * cosd(phi), pos.x * sind(phi), pos.y}); triVerts.push_back({startVert + slice, startVert + lastSlice, lastStart + lastSlice}); triVerts.push_back( {lastStart + lastSlice, lastStart + slice, startVert + slice}); } } } else { // poly crosses zero int polyVert = start; glm::vec2 pos = poly[polyVert].pos; do { glm::vec2 lastPos = pos; polyVert = (polyVert + 1) % poly.size(); pos = poly[polyVert].pos; if (pos.x > 0) { if (lastPos.x <= 0) { float a = pos.x / (pos.x - lastPos.x); vertPos.push_back({0.0f, 0.0f, glm::mix(pos.y, lastPos.y, a)}); } int startVert = vertPos.size(); for (int slice = 0; slice < nDivisions; ++slice) { int lastSlice = (slice == 0 ? nDivisions : slice) - 1; float phi = slice * dPhi; glm::vec2 pos = poly[polyVert].pos; vertPos.push_back({pos.x * cosd(phi), pos.x * sind(phi), pos.y}); if (lastPos.x > 0) { triVerts.push_back({startVert + slice, startVert + lastSlice, startVert - nDivisions + lastSlice}); triVerts.push_back({startVert - nDivisions + lastSlice, startVert - nDivisions + slice, startVert + slice}); } else { triVerts.push_back( {startVert - 1, startVert + slice, startVert + lastSlice}); } } } else if (lastPos.x > 0) { int startVert = vertPos.size(); float a = pos.x / (pos.x - lastPos.x); vertPos.push_back({0.0f, 0.0f, glm::mix(pos.y, lastPos.y, a)}); for (int slice = 0; slice < nDivisions; ++slice) { int lastSlice = (slice == 0 ? nDivisions : slice) - 1; triVerts.push_back({startVert, startVert - nDivisions + lastSlice, startVert - nDivisions + slice}); } } } while (polyVert != start); } } revoloid.pImpl_->CreateHalfedges(triVertsDH); revoloid.pImpl_->Finish(); revoloid.pImpl_->InitializeNewReference(); revoloid.pImpl_->MergeCoplanarRelations(); return revoloid; } /** * Constructs a new manifold from a vector of other manifolds. This is a purely * topological operation, so care should be taken to avoid creating * geometrically-invalid results. */ Manifold Manifold::Compose(const std::vector<Manifold>& manifolds) { int numVert = 0; int numEdge = 0; int numTri = 0; int numBary = 0; for (const Manifold& manifold : manifolds) { numVert += manifold.NumVert(); numEdge += manifold.NumEdge(); numTri += manifold.NumTri(); numBary += manifold.pImpl_->meshRelation_.barycentric.size(); } Manifold out; Impl& combined = *(out.pImpl_); combined.vertPos_.resize(numVert); combined.halfedge_.resize(2 * numEdge); combined.faceNormal_.resize(numTri); combined.halfedgeTangent_.resize(2 * numEdge); combined.meshRelation_.barycentric.resize(numBary); combined.meshRelation_.triBary.resize(numTri); int nextVert = 0; int nextEdge = 0; int nextTri = 0; int nextBary = 0; for (const Manifold& manifold : manifolds) { const Impl& impl = *(manifold.pImpl_); impl.ApplyTransform(); thrust::copy(impl.vertPos_.beginD(), impl.vertPos_.endD(), combined.vertPos_.beginD() + nextVert); thrust::copy(impl.faceNormal_.beginD(), impl.faceNormal_.endD(), combined.faceNormal_.beginD() + nextTri); thrust::copy(impl.halfedgeTangent_.beginD(), impl.halfedgeTangent_.endD(), combined.halfedgeTangent_.beginD() + nextEdge); thrust::copy(impl.meshRelation_.barycentric.beginD(), impl.meshRelation_.barycentric.endD(), combined.meshRelation_.barycentric.beginD() + nextBary); thrust::transform(impl.meshRelation_.triBary.beginD(), impl.meshRelation_.triBary.endD(), combined.meshRelation_.triBary.beginD() + nextTri, UpdateTriBary({nextBary})); thrust::transform(impl.halfedge_.beginD(), impl.halfedge_.endD(), combined.halfedge_.beginD() + nextEdge, UpdateHalfedge({nextVert, nextEdge, nextTri})); nextVert += manifold.NumVert(); nextEdge += 2 * manifold.NumEdge(); nextTri += manifold.NumTri(); nextBary += impl.meshRelation_.barycentric.size(); } combined.DuplicateMeshIDs(); combined.Finish(); return out; } /** * This operation returns a copy of this manifold, but as a vector of meshes * that are topologically disconnected. */ std::vector<Manifold> Manifold::Decompose() const { VecDH<int> vertLabel; int numLabel = ConnectedComponents(vertLabel, NumVert(), pImpl_->halfedge_); if (numLabel == 1) { std::vector<Manifold> meshes(1); meshes[0] = *this; return meshes; } std::vector<Manifold> meshes(numLabel); for (int i = 0; i < numLabel; ++i) { meshes[i].pImpl_->vertPos_.resize(NumVert()); VecDH<int> vertNew2Old(NumVert()); int nVert = thrust::copy_if( zip(pImpl_->vertPos_.beginD(), countAt(0)), zip(pImpl_->vertPos_.endD(), countAt(NumVert())), vertLabel.beginD(), zip(meshes[i].pImpl_->vertPos_.beginD(), vertNew2Old.beginD()), Equals({i})) - zip(meshes[i].pImpl_->vertPos_.beginD(), countAt(0)); meshes[i].pImpl_->vertPos_.resize(nVert); VecDH<int> faceNew2Old(NumTri()); thrust::sequence(faceNew2Old.beginD(), faceNew2Old.endD()); int nFace = thrust::remove_if( faceNew2Old.beginD(), faceNew2Old.endD(), RemoveFace({pImpl_->halfedge_.cptrD(), vertLabel.cptrD(), i})) - faceNew2Old.beginD(); faceNew2Old.resize(nFace); meshes[i].pImpl_->GatherFaces(*pImpl_, faceNew2Old); meshes[i].pImpl_->ReindexVerts(vertNew2Old, pImpl_->NumVert()); meshes[i].pImpl_->Finish(); meshes[i].pImpl_->transform_ = pImpl_->transform_; } return meshes; } } // namespace manifold
the_stack
template<typename T, int BS> __device__ T min_dist_all (T x, T y, T z, T L[3][3], T Linv[3][3], T images[27][3]) { int tid = threadIdx.x; T u0 = Linv[0][0]*x + Linv[0][1]*y + Linv[0][2]*z; T u1 = Linv[1][0]*x + Linv[1][1]*y + Linv[1][2]*z; T u2 = Linv[2][0]*x + Linv[2][1]*y + Linv[2][2]*z; u0 -= rintf(u0); u1 -= rintf(u1); u2 -= rintf(u2); x = L[0][0]*u0 + L[0][1]*u1 + L[0][2]*u2; y = L[1][0]*u0 + L[1][1]*u1 + L[1][2]*u2; z = L[2][0]*u0 + L[2][1]*u1 + L[2][2]*u2; __shared__ T dist2[27]; if (tid < 27) { x += images[tid][0]; y += images[tid][1]; z += images[tid][2]; dist2[tid] = x*x + y*y + z*z; } __syncthreads(); for (int s=BS>>1; s>0; s>>=1) { if (tid < s && (tid+s) < 27) dist2[tid] = min(dist2[tid+s],dist2[tid]); __syncthreads(); } return sqrtf(dist2[0]); } template<typename T> __device__ T min_dist_only (T x, T y, T z, T L[3][3], T Linv[3][3]) { T u0 = Linv[0][0]*x + Linv[0][1]*y + Linv[0][2]*z; T u1 = Linv[1][0]*x + Linv[1][1]*y + Linv[1][2]*z; T u2 = Linv[2][0]*x + Linv[2][1]*y + Linv[2][2]*z; u0 -= rintf(u0); u1 -= rintf(u1); u2 -= rintf(u2); x = L[0][0]*u0 + L[0][1]*u1 + L[0][2]*u2; y = L[1][0]*u0 + L[1][1]*u1 + L[1][2]*u2; z = L[2][0]*u0 + L[2][1]*u1 + L[2][2]*u2; return sqrtf(x*x + y*y + z*z); } // This should be okay for all but the smallest of primitive cells. // That is, the r_c for the core should be smaller than the simulation // cell radius template<typename T> __device__ T min_dist (T &x, T &y, T &z, T L[3][3], T Linv[3][3]) { T u0 = Linv[0][0]*x + Linv[0][1]*y + Linv[0][2]*z; T u1 = Linv[1][0]*x + Linv[1][1]*y + Linv[1][2]*z; T u2 = Linv[2][0]*x + Linv[2][1]*y + Linv[2][2]*z; u0 -= rintf(u0); u1 -= rintf(u1); u2 -= rintf(u2); x = L[0][0]*u0 + L[0][1]*u1 + L[0][2]*u2; y = L[1][0]*u0 + L[1][1]*u1 + L[1][2]*u2; z = L[2][0]*u0 + L[2][1]*u1 + L[2][2]*u2; return sqrtf(x*x + y*y + z*z); } template<typename T, int BS> __global__ void find_core_electrons_PBC_kernel(T **R, int numElec, T *I, int firstIon, int lastIon, T rcut, T *L_global, T *Linv_global, int2 **pairs, T **dist, int *numPairs) { int tid = threadIdx.x; __shared__ T *myR, *mydist; __shared__ int2 *mypairs; if (tid == 0) { myR = R[blockIdx.x]; mydist = dist[blockIdx.x]; mypairs = pairs[blockIdx.x]; } __shared__ T L[3][3], Linv[3][3]; if (tid < 9) { L[0][tid] = L_global[tid]; Linv[0][tid] = Linv_global[tid]; } __syncthreads(); // int i0 = tid / 9; // int i1 = (tid - 9*i0)/3; // int i2 = (tid - 9*i0 - 3*i1); // __syncthreads(); int numIon = lastIon - firstIon + 1; int numElecBlocks = numElec/BS + ((numElec % BS) ? 1 : 0); int numIonBlocks = numIon /BS + ((numIon % BS) ? 1 : 0); __shared__ T r[BS][3]; __shared__ T i[BS][3]; __shared__ T d[BS]; __shared__ int2 blockpairs[BS]; __shared__ T blockdist[BS]; int npairs=0, index=0, blockNum=0; for (int iBlock=0; iBlock<numIonBlocks; iBlock++) { for (int dim=0; dim<3; dim++) if (dim*BS+tid < 3*numIon) i[0][dim*BS+tid] = I[3*BS*iBlock + 3*firstIon + dim*BS+tid]; int ionEnd = ((iBlock+1)*BS < numIon) ? BS : (numIon - iBlock*BS); for (int eBlock=0; eBlock<numElecBlocks; eBlock++) { int elecEnd = ((eBlock+1)*BS < numElec) ? BS : (numElec - eBlock*BS); for (int dim=0; dim<3; dim++) if (dim*BS+tid < 3*numElec) r[0][dim*BS+tid] = myR[3*BS*eBlock + dim*BS+tid]; for (int ion=0; ion<ionEnd; ion++) { d[tid] = min_dist_only(r[tid][0]-i[ion][0], r[tid][1]-i[ion][1], r[tid][2]-i[ion][2], L, Linv); for (int elec=0; elec<elecEnd; elec++) { if (d[elec] < rcut) { if (index == BS) { mypairs[blockNum*BS+tid] = blockpairs[tid]; mydist[blockNum*BS+tid] = blockdist[tid]; blockNum++; index = 0; } if (tid == 0) { blockpairs[index].x = iBlock*BS+ion; blockpairs[index].y = eBlock*BS+elec; blockdist[index] = d[tid]; } index++; npairs++; } } } __syncthreads(); } __syncthreads(); } // Write pairs and distances remaining the final block if (tid < index) { mypairs[blockNum*BS+tid] = blockpairs[tid]; mydist[blockNum*BS+tid] = blockdist[tid]; } if (tid == 0) numPairs[blockIdx.x] = npairs; } void find_core_electrons_PBC (float *R[], int numElec, float I[], int firstIon, int lastIon, float rcut, float L[], float Linv[], int2 *pairs[], float *dist[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_PBC_kernel<float,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, L, Linv, pairs, dist, numPairs); } void find_core_electrons_PBC (double *R[], int numElec, double I[], int firstIon, int lastIon, double rcut, double L[], double Linv[], int2 *pairs[], double *dist[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_PBC_kernel<double,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, L, Linv, pairs, dist, numPairs); } template<typename T, int BS> __global__ void find_core_electrons_PBC_kernel(T **R, int numElec, T *I, int firstIon, int lastIon, T rcut, T *L_global, T *Linv_global, T *quadPoints, int numQuadPoints, int **elecs, T **ratioPos, T **dist_list, T **cosTheta_list, int *numPairs) { int tid = threadIdx.x; __shared__ T *myR, *myRatioPos, *myDist, *myCosTheta; __shared__ int *myElecs; __shared__ T qp[BS][3]; for (int i=0; i<3; i++) if (i*BS + tid < 3*numQuadPoints) qp[0][i*BS+tid] = quadPoints[i*BS+tid]; if (tid == 0) { myR = R[blockIdx.x]; myElecs = elecs[blockIdx.x]; myRatioPos = ratioPos[blockIdx.x]; myDist = dist_list[blockIdx.x]; myCosTheta = cosTheta_list[blockIdx.x]; } __shared__ T L[3][3], Linv[3][3]; if (tid < 9) { L[0][tid] = L_global[tid]; Linv[0][tid] = Linv_global[tid]; } __syncthreads(); int numIon = lastIon - firstIon + 1; int numElecBlocks = numElec/BS + ((numElec % BS) ? 1 : 0); int numIonBlocks = numIon /BS + ((numIon % BS) ? 1 : 0); __shared__ T r[BS][3]; __shared__ T i[BS][3]; __shared__ int blockElecs[BS]; __shared__ T blockPos[BS][3]; __shared__ T dist[BS], disp[BS][3]; __shared__ T blockDist[BS], blockCosTheta[BS]; int posIndex=0, posBlockNum = 0; int npairs=0, index=0, blockNum=0; for (int iBlock=0; iBlock<numIonBlocks; iBlock++) { for (int dim=0; dim<3; dim++) if ((3*iBlock+dim)*BS+tid < 3*numIon) i[0][dim*BS+tid] = I[3*BS*iBlock + 3*firstIon + dim*BS+tid]; int ionEnd = ((iBlock+1)*BS < numIon) ? BS : (numIon - iBlock*BS); for (int eBlock=0; eBlock<numElecBlocks; eBlock++) { int elecEnd = ((eBlock+1)*BS < numElec) ? BS : (numElec - eBlock*BS); for (int dim=0; dim<3; dim++) if ((3*eBlock+dim)*BS+tid < 3*numElec) r[0][dim*BS+tid] = myR[3*BS*eBlock + dim*BS+tid]; __syncthreads(); for (int ion=0; ion<ionEnd; ion++) { disp[tid][0] = r[tid][0]-i[ion][0]; disp[tid][1] = r[tid][1]-i[ion][1]; disp[tid][2] = r[tid][2]-i[ion][2]; dist[tid] = min_dist<T>(disp[tid][0], disp[tid][1], disp[tid][2], L, Linv); for (int elec=0; elec<elecEnd; elec++) { __syncthreads(); if (dist[elec] < rcut) { // First, write quadrature points if (numQuadPoints + posIndex <= BS) { if (tid < numQuadPoints) { blockPos[posIndex+tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid][0]; blockPos[posIndex+tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid][1]; blockPos[posIndex+tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid][2]; blockCosTheta[posIndex+tid] = (disp[elec][0]*qp[tid][0] + disp[elec][1]*qp[tid][1] + disp[elec][2]*qp[tid][2]) / dist[elec]; } posIndex += numQuadPoints; } else { // Write whatever will fit in the shared buffer int numWrite = BS - posIndex; if (tid < numWrite) { blockPos[posIndex+tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid][0]; blockPos[posIndex+tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid][1]; blockPos[posIndex+tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid][2]; blockCosTheta[posIndex+tid] = (disp[elec][0]*qp[tid][0] + disp[elec][1]*qp[tid][1] + disp[elec][2]*qp[tid][2]) / dist[elec]; } __syncthreads(); // dump the full buffer to global memory for (int j=0; j<3; j++) myRatioPos[(posBlockNum*3+j)*BS+tid] = blockPos[0][j*BS+tid]; myCosTheta[posBlockNum*BS+tid] = blockCosTheta[tid]; posBlockNum++; __syncthreads(); // Write the remainder into shared memory if (tid < (numQuadPoints - numWrite)) { blockPos[tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid+numWrite][0]; blockPos[tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid+numWrite][1]; blockPos[tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid+numWrite][2]; blockCosTheta[tid] = (disp[elec][0]*qp[tid+numWrite][0] + disp[elec][1]*qp[tid+numWrite][1] + disp[elec][2]*qp[tid+numWrite][2]) / dist[elec]; } posIndex = numQuadPoints - numWrite; } // Now, write electron IDs if (index == BS) { myElecs[blockNum*BS+tid] = blockElecs[tid]; myDist[blockNum*BS+tid] = blockDist[tid]; blockNum++; index = 0; } if (tid == 0) { blockElecs[index] = eBlock*BS+elec; blockDist[index] = dist[elec]; } index++; npairs++; __syncthreads(); } } } } } for (int j=0; j<3; j++) if (j*BS + tid < 3*posIndex) myRatioPos[(posBlockNum*3+j)*BS + tid] = blockPos[0][j*BS+tid]; if (tid < posIndex) myCosTheta[posBlockNum*BS+tid] = blockCosTheta[tid]; // Write pairs and distances remaining the final block if (tid < index) { myElecs[blockNum*BS+tid] = blockElecs[tid]; myDist[blockNum*BS+tid] = blockDist[tid]; } if (tid == 0) numPairs[blockIdx.x] = npairs; } void find_core_electrons_PBC (float *R[], int numElec, float I[], int firstIon, int lastIon, float rcut, float L[], float Linv[], float quadPoints[], int numQuadPoints, int *elecs[], float *ratioPos[], float *dist[], float *cosTheta[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_PBC_kernel<float,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, L, Linv, quadPoints, numQuadPoints, elecs, ratioPos, dist, cosTheta, numPairs); } void find_core_electrons_PBC (double *R[], int numElec, double I[], int firstIon, int lastIon, double rcut, double L[], double Linv[], double quadPoints[], int numQuadPoints, int *elecs[], double *ratioPos[], double *dist[], double *cosTheta[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_PBC_kernel<double,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, L, Linv, quadPoints, numQuadPoints, elecs, ratioPos, dist, cosTheta, numPairs); } ////////////////////// // Non-PBC versions // ////////////////////// template<typename T, int BS> __global__ void find_core_electrons_kernel(T **R, int numElec, T *I, int firstIon, int lastIon, T rcut, int2 **pairs, T **dist, int *numPairs) { int tid = threadIdx.x; __shared__ T *myR, *mydist; __shared__ int2 *mypairs; if (tid == 0) { myR = R[blockIdx.x]; mydist = dist[blockIdx.x]; mypairs = pairs[blockIdx.x]; } __syncthreads(); // int i0 = tid / 9; // int i1 = (tid - 9*i0)/3; // int i2 = (tid - 9*i0 - 3*i1); // __syncthreads(); int numIon = lastIon - firstIon + 1; int numElecBlocks = numElec/BS + ((numElec % BS) ? 1 : 0); int numIonBlocks = numIon /BS + ((numIon % BS) ? 1 : 0); __shared__ T r[BS][3]; __shared__ T i[BS][3]; __shared__ T d[BS]; __shared__ int2 blockpairs[BS]; __shared__ T blockdist[BS]; int npairs=0, index=0, blockNum=0; for (int iBlock=0; iBlock<numIonBlocks; iBlock++) { for (int dim=0; dim<3; dim++) if (dim*BS+tid < 3*numIon) i[0][dim*BS+tid] = I[3*BS*iBlock + 3*firstIon + dim*BS+tid]; int ionEnd = ((iBlock+1)*BS < numIon) ? BS : (numIon - iBlock*BS); for (int eBlock=0; eBlock<numElecBlocks; eBlock++) { int elecEnd = ((eBlock+1)*BS < numElec) ? BS : (numElec - eBlock*BS); for (int dim=0; dim<3; dim++) if (dim*BS+tid < 3*numElec) r[0][dim*BS+tid] = myR[3*BS*eBlock + dim*BS+tid]; for (int ion=0; ion<ionEnd; ion++) { d[tid] = sqrtf((r[tid][0]-i[ion][0])*(r[tid][0]-i[ion][0]) + (r[tid][0]-i[ion][1])*(r[tid][1]-i[ion][1]) + (r[tid][0]-i[ion][2])*(r[tid][2]-i[ion][2])); for (int elec=0; elec<elecEnd; elec++) { if (d[elec] < rcut) { if (index == BS) { mypairs[blockNum*BS+tid] = blockpairs[tid]; mydist[blockNum*BS+tid] = blockdist[tid]; blockNum++; index = 0; } if (tid == 0) { blockpairs[index].x = iBlock*BS+ion; blockpairs[index].y = eBlock*BS+elec; blockdist[index] = d[tid]; } index++; npairs++; } } } __syncthreads(); } __syncthreads(); } // Write pairs and distances remaining the final block if (tid < index) { mypairs[blockNum*BS+tid] = blockpairs[tid]; mydist[blockNum*BS+tid] = blockdist[tid]; } if (tid == 0) numPairs[blockIdx.x] = npairs; } void find_core_electrons (float *R[], int numElec, float I[], int firstIon, int lastIon, float rcut, int2 *pairs[], float *dist[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_kernel<float,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, pairs, dist, numPairs); } void find_core_electrons (double *R[], int numElec, double I[], int firstIon, int lastIon, double rcut, int2 *pairs[], double *dist[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_kernel<double,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, pairs, dist, numPairs); } template<typename T, int BS> __global__ void find_core_electrons_kernel(T **R, int numElec, T *I, int firstIon, int lastIon, T rcut, T *quadPoints, int numQuadPoints, int **elecs, T **ratioPos, T **dist_list, T **cosTheta_list, int *numPairs) { int tid = threadIdx.x; __shared__ T *myR, *myRatioPos, *myDist, *myCosTheta; __shared__ int *myElecs; __shared__ T qp[BS][3]; for (int i=0; i<3; i++) if (i*BS + tid < 3*numQuadPoints) qp[0][i*BS+tid] = quadPoints[i*BS+tid]; if (tid == 0) { myR = R[blockIdx.x]; myElecs = elecs[blockIdx.x]; myRatioPos = ratioPos[blockIdx.x]; myDist = dist_list[blockIdx.x]; myCosTheta = cosTheta_list[blockIdx.x]; } __syncthreads(); int numIon = lastIon - firstIon + 1; int numElecBlocks = numElec/BS + ((numElec % BS) ? 1 : 0); int numIonBlocks = numIon /BS + ((numIon % BS) ? 1 : 0); __shared__ T r[BS][3]; __shared__ T i[BS][3]; __shared__ int blockElecs[BS]; __shared__ T blockPos[BS][3]; __shared__ T dist[BS], disp[BS][3]; __shared__ T blockDist[BS], blockCosTheta[BS]; int posIndex=0, posBlockNum = 0; int npairs=0, index=0, blockNum=0; for (int iBlock=0; iBlock<numIonBlocks; iBlock++) { for (int dim=0; dim<3; dim++) if ((3*iBlock+dim)*BS+tid < 3*numIon) i[0][dim*BS+tid] = I[3*BS*iBlock + 3*firstIon + dim*BS+tid]; int ionEnd = ((iBlock+1)*BS < numIon) ? BS : (numIon - iBlock*BS); for (int eBlock=0; eBlock<numElecBlocks; eBlock++) { int elecEnd = ((eBlock+1)*BS < numElec) ? BS : (numElec - eBlock*BS); for (int dim=0; dim<3; dim++) if ((3*eBlock+dim)*BS+tid < 3*numElec) r[0][dim*BS+tid] = myR[3*BS*eBlock + dim*BS+tid]; __syncthreads(); for (int ion=0; ion<ionEnd; ion++) { disp[tid][0] = r[tid][0]-i[ion][0]; disp[tid][1] = r[tid][1]-i[ion][1]; disp[tid][2] = r[tid][2]-i[ion][2]; dist[tid] = sqrtf(disp[tid][0]*disp[tid][0] + disp[tid][1]*disp[tid][1] + disp[tid][2]*disp[tid][2]); for (int elec=0; elec<elecEnd; elec++) { __syncthreads(); if (dist[elec] < rcut) { // First, write quadrature points if (numQuadPoints + posIndex <= BS) { if (tid < numQuadPoints) { blockPos[posIndex+tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid][0]; blockPos[posIndex+tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid][1]; blockPos[posIndex+tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid][2]; blockCosTheta[posIndex+tid] = (disp[elec][0]*qp[tid][0] + disp[elec][1]*qp[tid][1] + disp[elec][2]*qp[tid][2]) / dist[elec]; } posIndex += numQuadPoints; } else { // Write whatever will fit in the shared buffer int numWrite = BS - posIndex; if (tid < numWrite) { blockPos[posIndex+tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid][0]; blockPos[posIndex+tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid][1]; blockPos[posIndex+tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid][2]; blockCosTheta[posIndex+tid] = (disp[elec][0]*qp[tid][0] + disp[elec][1]*qp[tid][1] + disp[elec][2]*qp[tid][2]) / dist[elec]; } __syncthreads(); // dump the full buffer to global memory for (int j=0; j<3; j++) myRatioPos[(posBlockNum*3+j)*BS+tid] = blockPos[0][j*BS+tid]; myCosTheta[posBlockNum*BS+tid] = blockCosTheta[tid]; posBlockNum++; __syncthreads(); // Write the remainder into shared memory if (tid < (numQuadPoints - numWrite)) { blockPos[tid][0] = r[elec][0] - disp[elec][0] /*i[ion][0]*/ + dist[elec]*qp[tid+numWrite][0]; blockPos[tid][1] = r[elec][1] - disp[elec][1] /*i[ion][1]*/ + dist[elec]*qp[tid+numWrite][1]; blockPos[tid][2] = r[elec][2] - disp[elec][2] /*i[ion][2]*/ + dist[elec]*qp[tid+numWrite][2]; blockCosTheta[tid] = (disp[elec][0]*qp[tid+numWrite][0] + disp[elec][1]*qp[tid+numWrite][1] + disp[elec][2]*qp[tid+numWrite][2]) / dist[elec]; } posIndex = numQuadPoints - numWrite; } // Now, write electron IDs if (index == BS) { myElecs[blockNum*BS+tid] = blockElecs[tid]; myDist[blockNum*BS+tid] = blockDist[tid]; blockNum++; index = 0; } if (tid == 0) { blockElecs[index] = eBlock*BS+elec; blockDist[index] = dist[elec]; } index++; npairs++; __syncthreads(); } } } } } for (int j=0; j<3; j++) if (j*BS + tid < 3*posIndex) myRatioPos[(posBlockNum*3+j)*BS + tid] = blockPos[0][j*BS+tid]; if (tid < posIndex) myCosTheta[posBlockNum*BS+tid] = blockCosTheta[tid]; // Write pairs and distances remaining the final block if (tid < index) { myElecs[blockNum*BS+tid] = blockElecs[tid]; myDist[blockNum*BS+tid] = blockDist[tid]; } if (tid == 0) numPairs[blockIdx.x] = npairs; } void find_core_electrons (float *R[], int numElec, float I[], int firstIon, int lastIon, float rcut, float quadPoints[], int numQuadPoints, int *elecs[], float *ratioPos[], float *dist[], float *cosTheta[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_kernel<float,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, quadPoints, numQuadPoints, elecs, ratioPos, dist, cosTheta, numPairs); } void find_core_electrons (double *R[], int numElec, double I[], int firstIon, int lastIon, double rcut, double quadPoints[], int numQuadPoints, int *elecs[], double *ratioPos[], double *dist[], double *cosTheta[], int numPairs[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); find_core_electrons_kernel<double,BS><<<dimGrid,dimBlock>>> (R, numElec, I, firstIon, lastIon, rcut, quadPoints, numQuadPoints, elecs, ratioPos, dist, cosTheta, numPairs); } // Maximum quadrature points of 32; // This kernel assumes that the pair are sorted according to ion // number template<typename T, int BS> __global__ void make_work_list_kernel (int2 **pairs, T **dist, int *numPairs, T *I, int numIons, T *quadPoints, int numQuadPoints, T **ratio_pos) { __shared__ T qp[BS][3]; __shared__ T *myPairs, *myDist, *myRatioPos; __shared__ int np; int tid = threadIdx.x; if (tid == 0) { myPairs = pairs[blockIdx.x]; myDist = dist[blockIdx.x]; myRatioPos = ratio_pos[blockIdx.x]; np = numPairs[blockIdx.x]; } for (int i=0; i<3; i++) if (i*BS+tid < 3*numQuadPoints) qp[0][i*BS + tid] = quadPoints[i*BS + tid]; __syncthreads(); __shared__ int2 sharedPairs[BS]; __shared__ T i[BS][3]; int iBlock = 0; int numPairBlocks = np/BS + ((np % BS) ? 1 : 0); for (int pairBlock=0; pairBlock<numPairBlocks; pairBlock++) { if (pairBlock*BS + tid < np) sharedPairs[tid] = myPairs[pairBlock*BS+tid]; int end = ((pairBlock+1)*BS < np) ? BS : (np - pairBlock*BS); for (int ip=0; ip<end; ip++) { if (iBlock*BS < sharedPairs[ip].x) { while ((iBlock++*BS) < sharedPairs[ip].x); for (int dim=0; dim<3; dim++) if (dim*BS+tid < 3*numIons) i[0][dim*BS+tid] = I[3*BS*iBlock + dim*BS+tid]; } } } }
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> // #include <THC/THC.h> #include <THC/THCAtomics.cuh> // #include <THC/THCDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename scalar_t> __device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t *bottom_data, const int height, const int width, const int nheads, const int channels, scalar_t h, scalar_t w, const int m, const int c) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) { int ptr1 = h_low * width * nheads * channels + w_low * nheads * channels + m * channels + c; v1 = bottom_data[ptr1]; } scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) { int ptr2 = h_low * width * nheads * channels + w_high * nheads * channels + m * channels + c; v2 = bottom_data[ptr2]; } scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) { int ptr3 = h_high * width * nheads * channels + w_low * nheads * channels + m * channels + c; v3 = bottom_data[ptr3]; } scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) { int ptr4 = h_high * width * nheads * channels + w_high * nheads * channels + m * channels + c; v4 = bottom_data[ptr4]; } scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t ms_deform_attn_get_gradient_weight(scalar_t h, scalar_t w, const int gh, const int gw, const int height, const int width) { if (h <= -1 || h >= height || w <= -1 || w >= width) { //empty return 0; } int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t weight = 0; if (gh == h_low && gw == w_low) weight = (gh + 1 - h) * (gw + 1 - w); if (gh == h_low && gw == w_high) weight = (gh + 1 - h) * (w + 1 - gw); if (gh == h_high && gw == w_low) weight = (h + 1 - gh) * (gw + 1 - w); if (gh == h_high && gw == w_high) weight = (h + 1 - gh) * (w + 1 - gw); return weight; } template <typename scalar_t> __device__ scalar_t ms_deform_attn_get_coordinate_weight(scalar_t h, scalar_t w, const int m, const int c, const int height, const int width, const int nheads, const int channels, const scalar_t *bottom_data, const int bp_dir) { if (h <= -1 || h >= height || w <= -1 || w >= width) { //empty return 0; } int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t weight = 0; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) { int ptr1 = h_low * width * nheads * channels + w_low * nheads * channels + m * channels + c; v1 = bottom_data[ptr1]; } scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) { int ptr2 = h_low * width * nheads * channels + w_high * nheads * channels + m * channels + c; v2 = bottom_data[ptr2]; } scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) { int ptr3 = h_high * width * nheads * channels + w_low * nheads * channels + m * channels + c; v3 = bottom_data[ptr3]; } scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) { int ptr4 = h_high * width * nheads * channels + w_high * nheads * channels + m * channels + c; v4 = bottom_data[ptr4]; } if (bp_dir == 1) { if (h_low >= 0 && w_low >= 0) weight += -1 * (w_low + 1 - w) * v1; if (h_low >= 0 && w_high <= width - 1) weight += -1 * (w - w_low) * v2; if (h_high <= height - 1 && w_low >= 0) weight += (w_low + 1 - w) * v3; if (h_high <= height - 1 && w_high <= width - 1) weight += (w - w_low) * v4; } else if (bp_dir == 0) { if (h_low >= 0 && w_low >= 0) weight += -1 * (h_low + 1 - h) * v1; if (h_low >= 0 && w_high <= width - 1) weight += (h_low + 1 - h) * v2; if (h_high <= height - 1 && w_low >= 0) weight += -1 * (h - h_low) * v3; if (h_high <= height - 1 && w_high <= width - 1) weight += (h - h_low) * v4; } return weight; } template <typename scalar_t> __global__ void ms_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *data_col) { // launch batch_size * num_levels * num_query * num_point * channels cores // data_value: batch_size, spatial_size, num_heads, channels // data_sampling_loc: batch_size, num_query, num_heads, num_levels, num_point, 2 // data_attn_weight: batch_size, num_query, num_heads, num_levels, num_point // data_col: num_levels*num_point, batch_size, num_query, num_heads, channels CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int c_col = index % channels; const int p_col = (index / channels) % num_point; const int q_col = (index / channels / num_point) % num_query; const int l_col = (index / channels / num_point / num_query) % num_levels; const int b_col = index / channels / num_point / num_query / num_levels; const int level_start_id = data_level_start_index[l_col]; const int spatial_h = data_spatial_shapes[l_col * 2]; const int spatial_w = data_spatial_shapes[l_col * 2 + 1]; // num_heads, channels scalar_t *data_col_ptr = data_col + ( c_col + channels * 0 + channels * num_heads * q_col + channels * num_heads * num_query * b_col + channels * num_heads * num_query * batch_size * p_col + channels * num_heads * num_query * batch_size * num_point * l_col); // spatial_h, spatial_w, num_heads, channels const scalar_t *data_value_ptr = data_value + (b_col * spatial_size * num_heads * channels + level_start_id * num_heads * channels); // num_heads, num_levels, num_point, 2 const scalar_t *data_sampling_loc_ptr = data_sampling_loc + ( b_col * num_query * num_heads * num_levels * num_point * 2 + q_col * num_heads * num_levels * num_point * 2); // num_heads, num_levels, num_point const scalar_t *data_attn_weight_ptr = data_attn_weight + ( b_col * num_query * num_heads * num_levels * num_point + q_col * num_heads * num_levels * num_point); for (int i = 0; i < num_heads; ++i) { const int data_loc_h_ptr = i * num_levels * num_point * 2 + l_col * num_point * 2 + p_col * 2 + 1; const int data_loc_w_ptr = i * num_levels * num_point * 2 + l_col * num_point * 2 + p_col * 2; const int data_weight_ptr = i * num_levels * num_point + l_col * num_point + p_col; const scalar_t loc_h = data_sampling_loc_ptr[data_loc_h_ptr]; const scalar_t loc_w = data_sampling_loc_ptr[data_loc_w_ptr]; const scalar_t weight = data_attn_weight_ptr[data_weight_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = loc_h * spatial_h - 0.5; const scalar_t w_im = loc_w * spatial_w - 0.5; if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { val = ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, i, c_col); } *data_col_ptr = val * weight; data_col_ptr += channels; } } } template <typename scalar_t> __global__ void ms_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *grad_value) { // launch batch_size * num_levels * num_query * num_point * num_heads * channels cores // grad_value: batch_size, spatial_size, num_heads, channels // data_sampling_loc: batch_size, num_query, num_heads, num_levels, num_point, 2 // data_attn_weight: batch_size, num_query, num_heads, num_levels, num_point // data_col: batch_size, num_query, num_heads, channels CUDA_KERNEL_LOOP(index, n) { const int c_col = index % channels; const int m_col = (index / channels) % num_heads; const int p_col = (index / channels / num_heads) % num_point; const int q_col = (index / channels / num_heads / num_point) % num_query; const int l_col = (index / channels / num_heads / num_point / num_query) % num_levels; const int b_col = index / channels / num_heads / num_point / num_query / num_levels; const int level_start_id = data_level_start_index[l_col]; const int spatial_h = data_spatial_shapes[l_col * 2]; const int spatial_w = data_spatial_shapes[l_col * 2 + 1]; const scalar_t col = data_col[ c_col + channels * m_col + channels * num_heads * q_col + channels * num_heads * num_query * b_col]; int sampling_ptr = b_col * num_query * num_heads * num_levels * num_point + q_col * num_heads * num_levels * num_point + m_col * num_levels * num_point + l_col * num_point + p_col; const scalar_t sampling_x = data_sampling_loc[2 * sampling_ptr] * spatial_w - 0.5; const scalar_t sampling_y = data_sampling_loc[2 * sampling_ptr + 1] * spatial_h - 0.5; const scalar_t attn_weight = data_attn_weight[sampling_ptr]; const scalar_t cur_top_grad = col * attn_weight; const int cur_h = (int)sampling_y; const int cur_w = (int)sampling_x; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < spatial_h && cur_w + dx >= 0 && cur_w + dx < spatial_w && abs(sampling_y - (cur_h + dy)) < 1 && abs(sampling_x - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = b_col * spatial_size * num_heads * channels + (level_start_id + (cur_h+dy)*spatial_w + (cur_w+dx)) * num_heads * channels + m_col * channels + c_col; scalar_t weight = ms_deform_attn_get_gradient_weight(sampling_y, sampling_x, cur_h + dy, cur_w + dx, spatial_h, spatial_w); atomicAdd(grad_value + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void ms_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { // sampling_loc: batch_size, num_query, num_heads, num_levels, num_point, 2 // attn_weight: batch_size, num_query, num_heads, num_levels, num_point // column: batch_size, num_query, num_heads, channels // value: batch_size, spatial_size, num_heads, channels // num_kernels = batch_size * num_query * num_heads * num_levels * num_point * 2 CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, wval = 0; const int loc_c = index % 2; const int k = (index / 2) % num_point; const int l = (index / 2 / num_point) % num_levels; const int m = (index / 2 / num_point / num_levels) % num_heads; const int q = (index / 2 / num_point / num_levels / num_heads) % num_query; const int b = index / 2 / num_point / num_levels / num_heads / num_query; const int level_start_id = data_level_start_index[l]; const int spatial_h = data_spatial_shapes[l * 2]; const int spatial_w = data_spatial_shapes[l * 2 + 1]; const scalar_t *data_col_ptr = data_col +( m * channels + q * channels * num_heads + b * channels * num_heads * num_query); const scalar_t *data_value_ptr = data_value + ( 0 * channels + level_start_id * channels * num_heads + b * channels * num_heads * spatial_size); scalar_t sampling_x = data_sampling_loc[(index / 2) * 2] * spatial_w - 0.5; scalar_t sampling_y = data_sampling_loc[(index / 2) * 2 + 1] * spatial_h - 0.5; const scalar_t attn_weight = data_attn_weight[index / 2]; for (int col_c = 0; col_c < channels; col_c += 1) { const scalar_t col = data_col_ptr[col_c]; if (sampling_x <= -1 || sampling_y <= -1 || sampling_x >= spatial_w || sampling_y >= spatial_h) { sampling_x = sampling_y = -2; } else { wval += col * ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, sampling_y, sampling_x, m, col_c); } const scalar_t weight = ms_deform_attn_get_coordinate_weight( sampling_y, sampling_x, m, col_c, spatial_h, spatial_w, num_heads, channels, data_value_ptr, loc_c); val += weight * col * attn_weight; } if (loc_c == 0) val *= spatial_w; else if (loc_c == 1) val *= spatial_h; grad_sampling_loc[index] = val; if (loc_c % 2 == 0) grad_attn_weight[index / 2] = wval; } } template <typename scalar_t> void ms_deformable_im2col_cuda(cudaStream_t stream, const scalar_t* data_value, const int64_t* data_spatial_shapes, const int64_t* data_level_start_index, const scalar_t* data_sampling_loc, const scalar_t* data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t* data_col) { // num_axes should be smaller than block size const int num_kernels = batch_size * num_levels * num_query * num_point * channels; ms_deformable_im2col_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void ms_deformable_col2im_cuda(cudaStream_t stream, const scalar_t* data_col, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t* grad_value) { const int num_kernels = batch_size * num_levels * num_query * num_point * num_heads * channels; ms_deformable_col2im_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void ms_deformable_col2im_coord_cuda(cudaStream_t stream, const scalar_t* data_col, const scalar_t *data_value, const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, const int batch_size, const int spatial_size, const int num_heads, const int channels, const int num_levels, const int num_query, const int num_point, scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { // data_sampling_loc: batch_size, num_query, num_heads, num_levels, num_point, 2 // data_attn_weight: batch_size, num_query, num_heads, num_levels, num_point const int num_kernels = batch_size * num_query * num_heads * num_levels * num_point * 2; ms_deformable_col2im_coord_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_col, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_sampling_loc, grad_attn_weight); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in ms_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
the_stack
namespace surfelwarp { namespace device { enum { jtj_blk_size = 36, warp_size = 32, num_warps = 4, thread_blk_size = num_warps * warp_size, }; __device__ __forceinline__ void computeScalarJtJBlock( const float jacobian[6], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } } __device__ __forceinline__ void computeSmoothJtJBlock( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //Check the validity of this term const auto validity = term2jacobian.validity_indicator[typed_term]; if(validity == 0) { #pragma unroll for (auto i = 0; i < jtj_blk_size; i++) { jtj_blk[i] = 0.0f; } return; } const ushort2 node_ij = term2jacobian.node_graph[typed_term]; unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 r = term2jacobian.Ti_xj[typed_term]; const float3 s = term2jacobian.Tj_xj[typed_term]; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __device__ __forceinline__ void computeChannelledJtJBlock( const float jacobian_channelled[18], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //The first iteration: assign const float* jacobian = jacobian_channelled; #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } //The next 2 iterations: plus for(auto channel = 1; channel < 3; channel++) { jacobian = &(jacobian_channelled[channel * 6]); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian[5] * jacobian[jac_row]; } } } //The deprecated method __device__ __forceinline__ void computeSmoothJtJBlockOnline( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto xj4 = term2jacobian.reference_node_coords[node_ij.y]; DualQuaternion dq_i = term2jacobian.node_se3[node_ij.x]; DualQuaternion dq_j = term2jacobian.node_se3[node_ij.y]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 xj = make_float3(xj4.x, xj4.y, xj4.z); const float3 r = Ti.rot * xj + Ti.trans; const float3 s = Tj.rot * xj + Tj.trans; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __global__ void computeJtJNonDiagonalBlockNoSyncKernel( const NodePair2TermsIndex::NodePair2TermMap nodepair2term, const Term2JacobianMaps term2jacobian, float* jtj_blks, const PenaltyConstants constants = PenaltyConstants() ) { const auto nodepair_idx = blockIdx.x; const auto encoded_pair = nodepair2term.encoded_nodepair[nodepair_idx]; const auto term_begin = nodepair2term.nodepair_term_range[nodepair_idx].x; const auto term_end = nodepair2term.nodepair_term_range[nodepair_idx].y; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The shared memory for reduction __shared__ float shared_blks[jtj_blk_size][num_warps]; //Zero out the elements for(auto iter = threadIdx.x; iter < jtj_blk_size * num_warps; iter += thread_blk_size) { shared_blks[iter % jtj_blk_size][iter / jtj_blk_size] = 0.0f; } __syncthreads(); for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //The memory for store the JtResidual result of each threads float local_blks[jtj_blk_size]; if(iter < term_size) { const auto term_idx = nodepair2term.nodepair_term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, nodepair2term.term_offset, term_type, typed_term_idx); switch (term_type) { case TermType::DenseImage: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.dense_depth_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DenseDepthSquared() * nodepair_weight); } break; case TermType::Smooth: computeSmoothJtJBlock(term2jacobian.smooth_term, typed_term_idx, encoded_pair, local_blks, constants.SmoothSquared()); break; /*case TermType::DensityMap: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.density_map_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DensitySquared() * nodepair_weight); } break;*/ case TermType::Foreground: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.foreground_mask_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.ForegroundSquared() * nodepair_weight); } break; case TermType::Feature: { float term_jacobian[18] = {0}; float nodepair_weight = 0; computeFeatureJtJBlockJacobian(term2jacobian.sparse_feature_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeChannelledJtJBlock(term_jacobian, local_blks, constants.SparseFeatureSquared() * nodepair_weight); } break; default: term_valid = false; break; } } //__syncthreads(); //Do a reduction for (int i = 0; i < jtj_blk_size; i++) { float data = (iter < term_size && term_valid) ? local_blks[i] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_blks[i][warp_id] += data; } } } __syncthreads(); //Write to output for(auto iter = threadIdx.x; iter < jtj_blk_size; iter += thread_blk_size) jtj_blks[jtj_blk_size * nodepair_idx + iter] = (shared_blks[iter][0] + shared_blks[iter][1] + shared_blks[iter][2] + shared_blks[iter][3]); } } // namespace device } // namespace surfelwarp void surfelwarp::JtJMaterializer::computeNonDiagonalBlocksNoSync(cudaStream_t stream) { //Correct the size of node pairs const auto num_nodepairs = m_nodepair2term_map.encoded_nodepair.Size(); SURFELWARP_CHECK_EQ(num_nodepairs, m_nodepair2term_map.nodepair_term_range.Size()); m_nondiag_blks.ResizeArrayOrException(num_nodepairs * device::jtj_blk_size); //Invoke the kernel dim3 blk(device::thread_blk_size); dim3 grid(num_nodepairs); device::computeJtJNonDiagonalBlockNoSyncKernel<<<grid, blk, 0, stream>>>( m_nodepair2term_map, m_term2jacobian_map, m_nondiag_blks.Ptr(), m_penalty_constants ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif //Do a sanity check //nonDiagonalBlocksSanityCheck(); }
the_stack
#include <algorithm> #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "io_iterator.h" #include "pwu_kernel_parameter.h" #include "rpucuda_pulsed_device.h" namespace RPU { /****************************************************************************************************************/ /* PULSEDWEIGHTUPDATER */ /******************************************************************************************************************/ template <typename T> PulsedWeightUpdater<T>::PulsedWeightUpdater(CudaContext *c, int x_size, int d_size) : context_{c}, x_size_{x_size}, d_size_{d_size} { blm_ = RPU::make_unique<BitLineMaker<T>>(c, x_size, d_size); up_context_ = nullptr; is_async_update_ = false; }; template <typename T> pwukpvec_t<T> PulsedWeightUpdater<T>::getValidUpdateKernels( PulsedRPUDeviceCudaBase<T> *rpucuda_device, int m_batch, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; for (int use_bo64 : {1, 0}) { // omit 2 (ie bo64 translation) for (int out_trans : {true, false}) { pwukpvec_t<T> v2 = rpucuda_device->getUpdateKernels(m_batch, up.getNK32Default(), use_bo64, out_trans, up); for (int i = 0; i < v2.size(); i++) { if (v2[i]->isValid()) { v.push_back(v2[i]); } } } if (v.size() > 0 && (m_batch >= 1000)) { break; // prefer bo64 for large batch if possible } } return v; } template <typename T> void PulsedWeightUpdater<T>::makeUpdateAsync() { if (!is_async_update_) { is_async_update_ = true; up_context_ = RPU::make_unique<CudaContext>(context_->getGPUId()); } } template <typename T> void PulsedWeightUpdater<T>::waitForUpdateCalculations() { if (is_async_update_) { // use the up_context event for it because context_ might be shared context_->recordWaitEvent(up_context_->getStream(), up_context_->getEvent()); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::executeUpdate( pwukp_t<T> kpars, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { T pc_lr = rpucuda_device->getPulseCountLearningRate(lr); blm_->makeCounts( x_in, d_in, up, rpucuda_device->getWeightGranularity(), pc_lr, m_batch, x_trans_in, d_trans_in, kpars->getOutTrans(), kpars->getUseBo64(), kpars->getImplicitPulses()); CudaContext *c = context_; if (is_async_update_) { up_context_->recordWaitEvent(context_->getStream(), context_->getEvent()); c = &*up_context_; } // the original learninig rate needs to be passed rpucuda_device->runUpdateKernel( kpars, c, dev_weights, m_batch, &*blm_, up, lr, c->getRandomStates(kpars->getNStates())); } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::tuneUpdate( pwukp_t<T> &opt_kernel_pars, pwukpvec_t<T> &v, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { bool is_async_update = is_async_update_; is_async_update_ = false; CUDA_TIMING_INIT; int nrepeats = 3; CudaArray<T> dev_tmp_weights(context_, x_size_ * d_size_); auto *tmp_device = rpucuda_device->clone(); PulsedUpdateMetaParameter<T> up_tuning(up); up_tuning._currently_tuning = true; dev_tmp_weights.assignFromDevice(dev_weights); context_->synchronizeDevice(); // maybe other streams exist. T min_timing = FLT_MAX; int min_i = 0; for (int k = 0; k < v.size(); k++) { CUDA_TIMING_START(*context_); for (int i = 0; i < nrepeats; i++) { this->executeUpdate( v[k], x_in, d_in, dev_tmp_weights.getData(), tmp_device, up_tuning, lr, m_batch, x_trans_in, d_trans_in); } CUDA_TIMING_STOP_NO_OUTPUT(*context_); v[k]->timing = milliseconds / nrepeats; if (v[k]->timing < min_timing) { min_timing = v[k]->timing; min_i = k; } } CUDA_TIMING_DESTROY; is_async_update_ = is_async_update; opt_kernel_pars = v[min_i]; delete tmp_device; DEBUG_OUT( "UpdateTuner: Using " << opt_kernel_pars->getName() << " for PWU [" << opt_kernel_pars->timing << "].\n"); DEBUG_CALL(opt_kernel_pars->print()); } template <typename T> template <typename InputIteratorT> const T *PulsedWeightUpdater<T>::copyIterator2Buffer( InputIteratorT vec, std::shared_ptr<CudaArray<T>> &buffer, int size) { if ((buffer == nullptr) || (buffer->getSize() < size)) { buffer = std::shared_ptr<CudaArray<T>>(new CudaArray<T>(context_, size)); } RPU::math::copyWithIterator(context_, buffer->getData(), vec, size); return buffer->getDataConst(); } template <> template <> const float *PulsedWeightUpdater<float>::copyIterator2Buffer( const float *vec, std::shared_ptr<CudaArray<float>> &buffer, int size) { return vec; } #ifdef RPU_USE_DOUBLE template <> template <> const double *PulsedWeightUpdater<double>::copyIterator2Buffer( const double *vec, std::shared_ptr<CudaArray<double>> &buffer, int size) { return vec; } #endif template <typename T> void PulsedWeightUpdater<T>::setSharedBuffer( int m_batch, std::shared_ptr<CudaArray<T>> x_buffer, std::shared_ptr<CudaArray<T>> d_buffer) { if (x_buffer) { dev_fpx_buffer_ = x_buffer; if (dev_fpx_buffer_->getSize() < m_batch * x_size_) { RPU_FATAL("X batch buffer size too small."); } } if (d_buffer) { dev_fpd_buffer_ = d_buffer; if (dev_fpd_buffer_->getSize() < m_batch * d_size_) { RPU_FATAL("D batch buffer size too small."); } } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doFPupdate( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (m_batch == 1 && beta == 1.0) { RPU::math::ger<T>(context_, d_size_, x_size_, -lr, d_out, 1, x_out, 1, dev_weights, d_size_); } else { RPU::math::gemm<T>( context_, d_trans, !x_trans, d_size_, // M x_size_, // N m_batch, // K -lr, d_out, d_trans ? m_batch : d_size_, x_out, x_trans ? m_batch : x_size_, beta, dev_weights, d_size_); } } template <typename T> void PulsedWeightUpdater<T>::checkBuffers(int m_batch) { // make sure shared buffers are constructed if ((dev_fpx_buffer_ == nullptr) || (dev_fpx_buffer_->getSize() < x_size_ * m_batch)) { dev_fpx_buffer_ = std::make_shared<CudaArray<T>>(context_, x_size_ * m_batch); } if ((dev_fpd_buffer_ == nullptr) || (dev_fpd_buffer_->getSize() < d_size_ * m_batch)) { dev_fpd_buffer_ = std::make_shared<CudaArray<T>>(context_, d_size_ * m_batch); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doDirectUpdate( XInputIteratorT x_in, DInputIteratorT d_in, AbstractRPUDeviceCuda<T> *rpucuda_device, T *dev_weights, const T lr, const PulsedUpdateMetaParameter<T> &up, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { checkBuffers(m_batch); // make sure they are created (we need them also for float * iterator) const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (!rpucuda_device->hasDirectUpdate()) { RPU_FATAL("Device does not support a direct update"); } rpucuda_device->doDirectUpdate( x_out, d_out, dev_weights, lr, m_batch, x_trans, d_trans, beta, up, dev_fpx_buffer_->getData(), // this might overrite x_out dev_fpd_buffer_->getData()); } template <typename T> bool PulsedWeightUpdater<T>::checkForFPUpdate( AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up) { if (rpucuda_device_in == nullptr) { return true; } if (rpucuda_device_in->implements() == DeviceUpdateType::FloatingPoint) { return true; } if (rpucuda_device_in->isPulsedDevice() && up.pulse_type == PulseType::None) { return true; } if (rpucuda_device_in->hasDirectUpdate()) { // also FP has direct, but that is handled above return false; } // omitting !isPulsedDevice return false; } #define FORCE_TUNING_THRES 0 template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::update( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans, const bool d_trans) { // FP update if no device is given if (rpucuda_device_in != nullptr && rpucuda_device_in->hasDirectUpdate()) { doDirectUpdate(x_in, d_in, rpucuda_device_in, dev_weights, lr, up, m_batch, x_trans, d_trans); return; } else if ( checkForFPUpdate(rpucuda_device_in, up) || (up.pulse_type == PulseType::NoneWithDevice)) { doFPupdate(x_in, d_in, dev_weights, lr, m_batch, x_trans, d_trans); if (up.pulse_type == PulseType::NoneWithDevice) { // apply bounds rpucuda_device_in->clipWeights(dev_weights, -1.0); } return; } // safe because of isPulsedDevice PulsedRPUDeviceCudaBase<T> *rpucuda_device = static_cast<PulsedRPUDeviceCudaBase<T> *>(rpucuda_device_in); bool force_tuning = false; // check need for init (or re-init) DeviceUpdateType update_type = rpucuda_device->implements(); if (update_type != update_type_) //|| (!blm_->checkBuffer(m_batch,BL))) { // we do not check for change in x_size/d_size, but they are assumed to be constant as well! force_tuning = true; update_type_ = update_type; update_count_ = 0; // init kernels valid_kernels_ = getValidUpdateKernels(rpucuda_device, m_batch, up); if (valid_kernels_.size() == 0) { RPU_FATAL("Cannot find valid update kernels"); } kernel_pars_ = valid_kernels_[0]; // this will be modified if tuned if (up._debug_kernel_index >= 0) { // set default for debugging // just get a valid kpars (will be overwritten if tuning is used below) force_tuning = false; int kidx = up._debug_kernel_index; if (up._debug_kernel_index >= valid_kernels_.size()) { std::cout << "DEBUG WARNING: kernel index out of range " << valid_kernels_.size() << std::endl; kidx = 0; } kernel_pars_ = valid_kernels_[kidx]; if (kernel_pars_->getUseBo64() == 1) { std::cout << "DEBUG WARNING: cannot test BO64 direct. Set to translate " << std::endl; kernel_pars_->forceBo64Translate(); } if (kidx == 0) { kernel_pars_->force32(); // debug hack: might break kernel in the worst case kernel_pars_->forceNonTrans(); // debug hack: might break kernel in the worst case std::cout << "DEBUG WARNING: Kernel index 0: FORCED 32 and non-trans" << std::endl; } std::cout << "Selected kernel index " << kidx << " out of " << valid_kernels_.size() << std::endl; kernel_pars_->print(); } } if (update_count_ < FORCE_TUNING_THRES) { // only once again update_count_ += 1; force_tuning = force_tuning || (update_count_ == FORCE_TUNING_THRES); } // tune if requested if (force_tuning) { this->tuneUpdate( kernel_pars_, valid_kernels_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } // do update this->executeUpdate( kernel_pars_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } #define RPU_PWU_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \ template void PulsedWeightUpdater<NUM_T>::update( \ XITERT, DITERT, NUM_T *, AbstractRPUDeviceCuda<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::doFPupdate( \ XITERT, DITERT, NUM_T *, const NUM_T, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::doDirectUpdate( \ XITERT, DITERT, AbstractRPUDeviceCuda<NUM_T> *, NUM_T *, const NUM_T, \ const PulsedUpdateMetaParameter<NUM_T> &, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::tuneUpdate( \ pwukp_t<NUM_T> &, pwukpvec_t<NUM_T> &, XITERT, DITERT, NUM_T *, \ PulsedRPUDeviceCudaBase<NUM_T> *, const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, \ const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::executeUpdate( \ pwukp_t<NUM_T>, XITERT, DITERT, NUM_T *, PulsedRPUDeviceCudaBase<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); #define TRANSFLOAT(TRANS) TRANS, float template class PulsedWeightUpdater<float>; RPU_PWU_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, const float *, const float *); RPU_PWU_ITER_TEMPLATE( float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE( float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>); RPU_PWU_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *); #undef TRANSFLOAT #ifdef RPU_USE_DOUBLE #define TRANSDOUBLE(TRANS) TRANS, double template class PulsedWeightUpdater<double>; RPU_PWU_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, const double *, const double *); RPU_PWU_ITER_TEMPLATE( double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE( double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>); RPU_PWU_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *); #undef TRANSDOUBLE #endif #undef RPU_PWU_ITER_TEMPLATE } // namespace RPU
the_stack
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]); THCudaLongTensor_copyLong(state, indices_, indices); THCTensor_(indexCopy)(state, dst, dim, indices_, src); THCudaLongTensor_free(state, indices_); } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); long dims = THCTensor_(nDimension)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimension)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); long srcDims = THCTensor_(nDimension)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, "expecting vector of indices"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices"); int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); long dstCopyDimSize = THCTensor_(size)(state, dst, dim); ptrdiff_t sliceSize = srcTotalSize / numIndices; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexCopyLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) && TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) { TensorInfo<real, unsigned int> dstInfo = getTensorInfo<THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<long, unsigned int> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(real, unsigned int, 3, 3, -2); } else { SMALL_INDEX(real, unsigned int, -1, -1, -1); } } else { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { LARGE_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { LARGE_INDEX(real, unsigned int, 3, 3, -2); } else { LARGE_INDEX(real, unsigned int, -1, -1, -1); } } } else { TensorInfo<real, unsigned long> dstInfo = getTensorInfo<THCTensor, unsigned long>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<real, unsigned long> srcInfo = getTensorInfo<THCTensor, unsigned long>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<long, unsigned long> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(real, unsigned long, -1, -1, -1); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]); THCudaLongTensor_copyLong(state, indices_, indices); THCTensor_(indexAdd)(state, dst, dim, indices_, src); THCudaLongTensor_free(state, indices_); } void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); long dims = THCTensor_(nDimension)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimension)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); long srcDims = THCTensor_(nDimension)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, "expecting vector of indices"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices"); int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); long dstAddDimSize = THCTensor_(size)(state, dst, dim); ptrdiff_t sliceSize = srcTotalSize / numIndices; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, sliceSize, dstAddDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstAddDim, srcAddDim, sliceSize, dstAddDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) && TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) { TensorInfo<real, unsigned int> dstInfo = getTensorInfo<THCTensor, unsigned int>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<THCTensor, unsigned int>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<long, unsigned int> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(real, unsigned int, 3, 3, -2); } else { SMALL_INDEX(real, unsigned int, -1, -1, -1); } } else { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { LARGE_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { LARGE_INDEX(real, unsigned int, 3, 3, -2); } else { LARGE_INDEX(real, unsigned int, -1, -1, -1); } } } else { TensorInfo<real, unsigned long> dstInfo = getTensorInfo<THCTensor, unsigned long>(state, dst); int dstAddDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstAddDim); TensorInfo<real, unsigned long> srcInfo = getTensorInfo<THCTensor, unsigned long>(state, src); int srcAddDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcAddDim); TensorInfo<long, unsigned long> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(real, unsigned long, -1, -1, -1); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, real val) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]); THCudaLongTensor_copyLong(state, indices_, indices); THCTensor_(indexFill)(state, dst, dim, indices_, val); THCudaLongTensor_free(state, indices_); } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); long dims = THCTensor_(nDimension)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); long srcDims = THCTensor_(nDimension)(state, dst); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, "expecting vector of indices"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); long dstFillDimSize = THCTensor_(size)(state, dst, dim); ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) { TensorInfo<real, unsigned int> dstInfo = getTensorInfo<THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<long, unsigned int> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(real, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(real, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(real, unsigned int, 3, -2); } else { SMALL_INDEX(real, unsigned int, -1, -1); } } else { if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(real, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { LARGE_INDEX(real, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { LARGE_INDEX(real, unsigned int, 3, -2); } else { LARGE_INDEX(real, unsigned int, -1, -1); } } } else { TensorInfo<real, unsigned long> dstInfo = getTensorInfo<THCTensor, unsigned long>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<long, unsigned long> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(real, unsigned long, -1, -1); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THLongTensor *indices) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector"); THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]); THCudaLongTensor_copyLong(state, indices_, indices); THCTensor_(indexSelect)(state, dst, src, dim, indices_); THCudaLongTensor_free(state, indices_); } void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices)); long dims = THCTensor_(nDimension)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimension)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); long srcDims = THCTensor_(nDimension)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, "expecting vector of indices"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); THLongStorage *newSize = THCTensor_(newSizeOf)(state, src); THLongStorage_set(newSize, dim, numIndices); THCTensor_(resize)(state, dst, newSize, NULL); THLongStorage_free(newSize); int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); long srcSelectDimSize = THCTensor_(size)(state, src, dim); ptrdiff_t sliceSize = dstTotalSize / numIndices; int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, dstTotalSize, sliceSize, srcSelectDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) && TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) && TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) { TensorInfo<real, unsigned int> dstInfo = getTensorInfo<THCTensor, unsigned int>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<THCTensor, unsigned int>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<long, unsigned int> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(real, unsigned int, 3, 3, -2); } else { SMALL_INDEX(real, unsigned int, -1, -1, -1); } } else { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(real, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { LARGE_INDEX(real, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { LARGE_INDEX(real, unsigned int, 3, 3, -2); } else { LARGE_INDEX(real, unsigned int, -1, -1, -1); } } } else { TensorInfo<real, unsigned long> dstInfo = getTensorInfo<THCTensor, unsigned long>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<real, unsigned long> srcInfo = getTensorInfo<THCTensor, unsigned long>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<long, unsigned long> indicesInfo = getTensorInfo<THCudaLongTensor, unsigned long>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(real, unsigned long, -1, -1, -1); } #undef SMALL_INDEX #undef LARGE_INDEX } #endif
the_stack
// implementation based on "A new Direct Connected Component Labeling and Analysis Algorithms for // GPUs" // https://ieeexplore.ieee.org/document/8596835 #include <vector> #include "connected_component.h" #include "thrust/for_each.h" #include "thrust/iterator/counting_iterator.h" namespace mmdeploy { __device__ int start_distance(unsigned pixels, int tx) { unsigned v = ~(pixels << (32 - tx)); return __clz(reinterpret_cast<int&>(v)); } __device__ int end_distance(unsigned pixels, int tx) { unsigned v = ~(pixels >> (tx + 1)); return __ffs(reinterpret_cast<int&>(v)); } template <typename T> __device__ void swap(T& x, T& y) { T tmp = x; x = y; y = tmp; } __device__ void merge(int* label, int u, int v) { // find root of u while (u != v && u != label[u]) { u = label[u]; } // find root of v while (u != v && v != label[v]) { v = label[v]; } while (u != v) { // post-condition: u > v if (u < v) swap(u, v); // try to set label[u] = v auto w = atomicMin(label + u, v); // if u is modified by other threads, try again u = u == w ? v : w; } } __host__ __device__ int div_up(int x, int y) { return (x + y - 1) / y; } __host__ __device__ int round_up(int x, int y) { return div_up(x, y) * y; } template <int block_w, int block_h> __global__ void LabelStripsKernel(const uint8_t* mask, int h, int w, int* label) { __shared__ unsigned shared_pixels[block_h]; auto tx = static_cast<int>(threadIdx.x); auto ty = static_cast<int>(threadIdx.y); auto x0 = tx + static_cast<int>(blockIdx.x * blockDim.x); auto y0 = ty + static_cast<int>(blockIdx.y * blockDim.y); auto w_32 = round_up(w, 32); for (auto y = y0; y < h; y += blockDim.y * gridDim.y) { //* 0 -> current line //* 1 -> line above int distance0 = 0; int distance1 = 0; for (auto x = x0; x < w_32; x += blockDim.x * gridDim.x) { unsigned active = __ballot_sync(0xffffffff, x < w); if (x < w) { auto key = y * w + x; auto p0 = mask[y * w + x]; auto pixels0 = __ballot_sync(active, p0); auto s_dist0 = start_distance(pixels0, tx); if (p0 && s_dist0 == 0) { auto l = tx ? key : key - distance0; label[y * w + x] = static_cast<int>(l); } if (tx == 0) { shared_pixels[ty] = pixels0; } __syncthreads(); auto pixels1 = ty ? shared_pixels[ty - 1] : 0; int p1 = (pixels1 & (1 << tx)); int s_dist1 = start_distance(pixels1, tx); if (tx == 0) { s_dist0 = distance0; s_dist1 = distance1; } if (p0 && p1 && (s_dist0 == 0 || s_dist1 == 0)) { int label0 = key - s_dist0; int label1 = key - w - s_dist1; merge(label, label0, label1); } auto d1 = start_distance(pixels1, 32); distance1 = d1 == 32 ? d1 + distance1 : d1; auto d0 = start_distance(pixels0, 32); distance0 = d0 == 32 ? d0 + distance0 : d0; } } } } __global__ void MergeStripsKernel(const uint8_t* mask, int h, int w, int* label) { auto tx = threadIdx.x; auto ty = threadIdx.y; auto x0 = tx + blockIdx.x * blockDim.x; auto y0 = ty + blockIdx.y * blockDim.y; auto w_32 = round_up(w, 32); for (auto y = y0; y < h; y += blockDim.y * gridDim.y) { if (y > 0) { for (auto x = x0; x < w_32; x += blockDim.x * gridDim.x) { unsigned active = __ballot_sync(0xffffffff, x < w); if (x < w) { auto key0 = y * w + x; auto key1 = key0 - w; auto p0 = mask[key0]; auto p1 = mask[key1]; auto pixels0 = __ballot_sync(active, p0); auto pixels1 = __ballot_sync(active, p1); if (p0 && p1) { auto s_dist0 = start_distance(pixels0, tx); auto s_dist1 = start_distance(pixels1, tx); if (s_dist0 == 0 || s_dist1 == 0) { merge(label, key0 - s_dist0, key1 - s_dist1); } } } } } } } __device__ int encode(int label) { return -2 - label; } __device__ int decode(int label) { return -2 - label; } struct _discretize_label_op { int* label; int* n_comp; __device__ void operator()(int index) const { if (label[index] == index) { auto comp = atomicAdd(n_comp, 1); label[index] = encode(comp); } } }; struct _decode_label_op { const int* label; int* output; __device__ void operator()(int index) const { auto comp = label[index]; output[index] = comp < -1 ? decode(comp) + 1 : 0; } }; __global__ void RelabelStripsKernel(const uint8_t* mask, int h, int w, int* label) { auto tx = threadIdx.x; auto ty = threadIdx.y; auto x0 = tx + blockIdx.x * blockDim.x; auto y0 = ty + blockIdx.y * blockDim.y; const auto stride_x = static_cast<int>(blockDim.x * gridDim.x); const auto stride_y = static_cast<int>(blockDim.y * gridDim.y); const auto w_32 = round_up(w, 32); for (auto y = y0; y < h; y += stride_y) { for (auto x = x0; x < w_32; x += stride_x) { unsigned active = __ballot_sync(0xffffffff, x < w); if (x < w) { auto k = y * w + x; auto p = mask[k]; auto pixels = __ballot_sync(active, p); auto s_dist = start_distance(pixels, tx); auto idx = 0; if (p && s_dist == 0) { idx = label[k]; while (idx > 0) { idx = label[idx]; } } idx = __shfl_sync(active, idx, tx - s_dist); if (p) { label[k] = idx; } } } } } __global__ void ComputeStatsKernel_v2(const uint8_t* mask, const int* label, const float* score, int h, int w, float* comp_score, int* comp_area) { auto tx = threadIdx.x; auto ty = threadIdx.y; auto x0 = tx + blockIdx.x * blockDim.x; auto y0 = ty + blockIdx.y * blockDim.y; const auto stride_x = static_cast<int>(blockDim.x * gridDim.x); const auto stride_y = static_cast<int>(blockDim.y * gridDim.y); const auto w_32 = round_up(w, 32); for (auto y = y0; y < h; y += stride_y) { for (auto x = x0; x < w_32; x += stride_x) { unsigned active = __ballot_sync(0xffffffff, x < w); if (x < w) { auto k = y * w + x; auto p = mask[k]; auto pixels = __ballot_sync(active, p); auto s_dist = start_distance(pixels, tx); auto count = end_distance(pixels, tx); float s = p ? score[k] : 0; for (int offset = 16; offset > 0; offset /= 2) { auto v = __shfl_down_sync(active, s, offset); // mask out past-the-end items s += offset < count ? v : 0.f; } if (p && s_dist == 0) { auto idx = decode(label[k]); atomicAdd(comp_area + idx, count); atomicAdd(comp_score + idx, s); } } } } } __global__ void GetContoursKernel(const int* label, int h, int w, int2* contour, int* size) { const auto x0 = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x); const auto y0 = static_cast<int>(threadIdx.y + blockIdx.y * blockDim.y); const auto stride_x = static_cast<int>(blockDim.x * gridDim.x); const auto stride_y = static_cast<int>(blockDim.y * gridDim.y); for (auto y = y0; y < h; y += stride_y) { for (auto x = x0; x < w; x += stride_x) { const auto index = y * w + x; // encoded label const auto comp = label[index]; if (comp < -1) { // non-linear filters const auto l = x > 0 && label[index - 1] == comp; const auto t = y > 0 && label[index - w] == comp; const auto r = x < w - 1 && label[index + 1] == comp; const auto b = y < h - 1 && label[index + w] == comp; const auto tl = y > 0 && x > 0 && label[index - w - 1] == comp; const auto tr = y > 0 && x < w - 1 && label[index - w + 1] == comp; const auto bl = y < h - 1 && x > 0 && label[index + w - 1] == comp; const auto br = y < h - 1 && x < w - 1 && label[index + w + 1] == comp; if (!((l && r) || (t && b) || (tl && br) || (tr && bl))) { const auto p = atomicAdd(size, 1); contour[p] = {index, decode(comp)}; } } } } } struct ConnectedComponents::Impl { public: explicit Impl(cudaStream_t stream); void Resize(int height, int width); int GetComponents(const uint8_t* d_mask, int* h_label); void GetContours(std::vector<std::vector<cv::Point>>& corners); void GetStats(const uint8_t* d_mask, const float* d_score, std::vector<float>& scores, std::vector<int>& areas); ~Impl(); int* d_label_{nullptr}; float* d_comp_score_{nullptr}; int* d_comp_area_{nullptr}; int* d_contour_{nullptr}; // int2 int* d_contour_size_{nullptr}; int* d_n_comp_{nullptr}; int n_comp_{0}; int height_{0}; int width_{0}; size_t size_{0}; size_t capacity_{0}; double growth_factor_{1.1}; cudaStream_t stream_{nullptr}; bool owned_stream_{false}; }; int ConnectedComponents::Impl::GetComponents(const uint8_t* d_mask, int* h_label) { { dim3 threads(32, 4); dim3 blocks(1, div_up(height_, (int)threads.y)); cudaMemsetAsync(d_label_, -1, sizeof(int) * size_, stream_); LabelStripsKernel<32, 4><<<blocks, threads, 0, stream_>>>(d_mask, height_, width_, d_label_); } { dim3 threads(32, 4); dim3 blocks(div_up(width_, (int)threads.x), div_up(height_, (int)threads.y)); MergeStripsKernel<<<blocks, threads, 0, stream_>>>(d_mask, height_, width_, d_label_); cudaMemsetAsync(d_n_comp_, 0, sizeof(int), stream_); thrust::for_each_n(thrust::cuda::par.on(stream_), thrust::counting_iterator<int>(0), height_ * width_, _discretize_label_op{d_label_, d_n_comp_}); RelabelStripsKernel<<<blocks, threads, 0, stream_>>>(d_mask, height_, width_, d_label_); } cudaMemcpyAsync(&n_comp_, d_n_comp_, sizeof(int), cudaMemcpyDefault, stream_); if (h_label) { dim3 threads(32, 4); dim3 blocks(div_up(width_, (int)threads.x), div_up(height_, (int)threads.y)); // reuse d_comp_area_, which is also an int buffer thrust::for_each_n(thrust::cuda::par.on(stream_), thrust::counting_iterator<int>(0), height_ * width_, _decode_label_op{d_label_, d_comp_area_}); cudaMemcpyAsync(h_label, d_comp_area_, sizeof(int) * size_, cudaMemcpyDefault, stream_); } cudaStreamSynchronize(stream_); return n_comp_; } void ConnectedComponents::Impl::GetStats(const uint8_t* d_mask, const float* d_score, std::vector<float>& scores, std::vector<int>& areas) { cudaMemsetAsync(d_comp_score_, 0, sizeof(float) * size_, stream_); cudaMemsetAsync(d_comp_area_, 0, sizeof(int) * size_, stream_); dim3 threads(32, 4); dim3 blocks(div_up(width_, (int)threads.x), div_up(height_, (int)threads.y)); ComputeStatsKernel_v2<<<blocks, threads, 0, stream_>>>(d_mask, d_label_, d_score, height_, width_, d_comp_score_, d_comp_area_); scores.resize(n_comp_); areas.resize(n_comp_); cudaMemcpyAsync(scores.data(), d_comp_score_, sizeof(float) * n_comp_, cudaMemcpyDefault, stream_); cudaMemcpyAsync(areas.data(), d_comp_area_, sizeof(int) * n_comp_, cudaMemcpyDefault, stream_); cudaStreamSynchronize(stream_); } void ConnectedComponents::Impl::GetContours(std::vector<std::vector<cv::Point>>& corners) { cudaMemsetAsync(d_contour_size_, 0, sizeof(int), stream_); auto d_contour = reinterpret_cast<int2*>(d_contour_); { dim3 threads(32, 4); dim3 blocks(div_up(width_, (int)threads.x), div_up(height_, (int)threads.y)); GetContoursKernel<<<blocks, threads, 0, stream_>>>(d_label_, height_, width_, d_contour, d_contour_size_); } int contour_size{}; cudaMemcpyAsync(&contour_size, d_contour_size_, sizeof(int), cudaMemcpyDefault, stream_); cudaStreamSynchronize(stream_); std::vector<int2> index_comp(contour_size); cudaMemcpyAsync(index_comp.data(), d_contour_, sizeof(int2) * contour_size, cudaMemcpyDefault, stream_); cudaStreamSynchronize(stream_); corners.resize(n_comp_); for (const auto& p : index_comp) { auto comp = p.y; assert(0 <= comp && comp < n_comp_); corners[comp].emplace_back(p.x % width_, p.x / width_); } } void ConnectedComponents::Impl::Resize(int height, int width) { size_t size = height * width; if (size > capacity_) { if (!capacity_) { capacity_ = size; } else { while (capacity_ < size) { capacity_ *= growth_factor_; } } cudaFree(d_label_); cudaFree(d_comp_score_); cudaFree(d_comp_area_); cudaFree(d_contour_); cudaMalloc(&d_label_, sizeof(int) * capacity_); cudaMalloc(&d_comp_score_, sizeof(float) * capacity_); cudaMalloc(&d_comp_area_, sizeof(int) * capacity_); cudaMalloc(&d_contour_, sizeof(int2) * capacity_); } if (!d_contour_size_) { cudaMalloc(&d_contour_size_, sizeof(int)); } if (!d_n_comp_) { cudaMalloc(&d_n_comp_, sizeof(int)); } height_ = height; width_ = width; size_ = size; } ConnectedComponents::Impl::Impl(cudaStream_t stream) : stream_(stream) { if (!stream_) { cudaStreamCreate(&stream_); owned_stream_ = true; } } ConnectedComponents::Impl::~Impl() { cudaFree(d_label_); cudaFree(d_comp_score_); cudaFree(d_comp_area_); cudaFree(d_contour_); cudaFree(d_contour_size_); cudaFree(d_n_comp_); if (owned_stream_) { cudaStreamDestroy(stream_); } } ConnectedComponents::ConnectedComponents(void* stream) : impl_(std::make_unique<Impl>((cudaStream_t)stream)) {} ConnectedComponents::~ConnectedComponents() = default; void ConnectedComponents::Resize(int height, int width) { impl_->Resize(height, width); } int ConnectedComponents::GetComponents(const uint8_t* d_mask, int* h_label) { return impl_->GetComponents(d_mask, h_label); } void ConnectedComponents::GetContours(std::vector<std::vector<cv::Point>>& corners) { return impl_->GetContours(corners); } void ConnectedComponents::GetStats(const uint8_t* d_mask, const float* d_score, std::vector<float>& scores, std::vector<int>& areas) { return impl_->GetStats(d_mask, d_score, scores, areas); } } // namespace mmdeploy
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <curand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "cudaCG_all.h" //#define NUM_THREADS 512 #define BLOCK 512 #define CEIL_DIV(num, denum) (num+denum-1)/denum #define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1])) //cum[l] remembers the start of the middle channel for l (in (?, tm, 2)) #define WIDX(l,tOut,tMid,i,cum,tauMids) (i+2*(tMid+tauMids[l]*tOut+cum[l])) #define PLUSMINUS(k) ((k%2==1) ? -1 : 1) #define MAX_LMAX 512-1 #define MAX_(a,b) ((a<b)?b:a) #define MIN_(a,b) ((a>b)?b:a) __constant__ int RESERVED_T[MAX_LMAX+1]; // __constant__ int RESERVED_CUMU_TM[MAX_LMAX+2]; // namespace { //================================================================================================================== __global__ void cuda_FN_forward_job( //from cudaBatchNorm_forward_job const float* FF, float* moving_std, const int* t_FF, const int* cumu_t_FF, float cnt, float eps, int Lmax, int Batch_size, int update_std, int nthreads){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < nthreads){ int t_offset = 0, l=0; while(l<=Lmax){ t_offset+=t_FF[l]; if (t_offset <= global_threadId){ l++; } else { t_offset -= t_FF[l]; break; } } int tmid = global_threadId - t_offset; if (update_std){ //calculate mean double N = (double) Batch_size * (2*l+1); double mean = 0., mean_sq = 0., norm = 0.; for (int b = 0; b < Batch_size; b++){ for (int m = 0; m < 2*l+1; m++){ float realm = FF[IDX(b,l,tmid,m,0,cumu_t_FF,Lmax)]; float imagm = FF[IDX(b,l,tmid,m,1,cumu_t_FF,Lmax)]; norm = realm*realm+imagm*imagm; mean_sq += norm; mean += sqrt(norm); } } double std = mean_sq/N - (mean/N * mean/N); if (std <= 0){ //numerical stability std = 0.; } else{ std = sqrt(std); } moving_std[t_offset + tmid] *= cnt / (cnt + 1); moving_std[t_offset + tmid] += std / (cnt + 1); } } } __global__ void cudaWeightTransform_forward_job( const float* FF, const float* W, float* out, const int* t_FF, const int* cumu_tm_FF, const int* cumu_tm_O, const int* cumu_tt_W, const float* FN_stds, float eps, int Lmax) { int b = blockIdx.z; int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; if (global_threadId < cumu_tm_O[Lmax+1]){ //first, loop to get l int ltm = global_threadId; int l=0; while (cumu_tm_O[l] <= ltm){l++;} l--; int tmid_offset = 0; for (int templ = 0; templ< l; templ++){ tmid_offset += t_FF[templ]; } int tout = (ltm - cumu_tm_O[l]) / (2*l+1); int m = (ltm - cumu_tm_O[l]) % (2*l+1); float real=0.0, imag=0.0, divisor = 1.0; for (int tmid = 0; tmid < t_FF[l]; tmid++){ float realw = W[WIDX(l,tout,tmid,0,cumu_tt_W,t_FF)]; float imagw = W[WIDX(l,tout,tmid,1,cumu_tt_W,t_FF)]; float realm = FF[IDX(b,l,tmid,m,0,cumu_tm_FF,Lmax)]; float imagm = FF[IDX(b,l,tmid,m,1,cumu_tm_FF,Lmax)]; if (FN_stds){ divisor = MAX_(eps, FN_stds[tmid_offset+tmid]); } real += (realw * realm - imagw * imagm) / divisor; imag += (realw * imagm + imagw * realm) / divisor; } out[IDX(b,l,tout,m,0,cumu_tm_O,Lmax)] = real; out[IDX(b,l,tout,m,1,cumu_tm_O,Lmax)] = imag; } } __global__ void cudaWeightGrad1_backward_job( const float* FF, float* __restrict__ grad_W, const float* grad_out, const int* t_FF, const int* cumu_tm_FF, const int* cumu_tm_O, const int* cumu_tt_W, const float* FN_stds, float eps, int Lmax, int size_W){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.z; if (global_threadId < size_W){ int l=0; while (cumu_tt_W[l] <= global_threadId){l++;} l--; int tmid_offset = 0; for (int templ = 0; templ< l; templ++){ tmid_offset += t_FF[templ]; } int tout = (global_threadId - cumu_tt_W[l]) / t_FF[l]; int tmid = (global_threadId - cumu_tt_W[l]) % t_FF[l]; float real=0.0, imag=0.0, divisor = 1.0; if (FN_stds){ divisor = MAX_(eps, FN_stds[tmid_offset+tmid]); } for (int m = 0; m < 2*l+1; m++){ float realo = grad_out[IDX(b,l,tout,m,0,cumu_tm_O,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cumu_tm_O,Lmax)]; float realm = FF[IDX(b,l,tmid,m,0,cumu_tm_FF,Lmax)]; float imagm = FF[IDX(b,l,tmid,m,1,cumu_tm_FF,Lmax)]; real += (realm * realo + imagm * imago)/divisor; imag += (realm * imago - realo * imagm)/divisor; } atomicAdd(&(grad_W[WIDX(l,tout,tmid,0,cumu_tt_W,t_FF)]), real); atomicAdd(&(grad_W[WIDX(l,tout,tmid,1,cumu_tt_W,t_FF)]), imag); } } __global__ void cudaMiddleGrad_backward_job( float* grad_FF, const float* W, const float* grad_out, const float* moving_std, const int* t_FF, const int* cumu_tm_FF, const int* t_O, const int* cumu_tm_O, const int* cumu_tt_W, const float* FN_stds, float eps, int Lmax){ int global_threadId = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.z; if (global_threadId < cumu_tm_FF[Lmax+1]){ int l=0; while (cumu_tm_FF[l] <= global_threadId){l++;} l--; int tmid_offset = 0; for (int templ = 0; templ< l; templ++){ tmid_offset += t_FF[templ]; } int tm = global_threadId - cumu_tm_FF[l]; int tmid = tm / (2*l+1), m = tm % (2*l+1); float real=0.0, imag=0.0, divisor = 1.0; if (FN_stds){ divisor = MAX_(eps, FN_stds[tmid_offset+tmid]); } for (int tout = 0; tout < t_O[l]; tout++){ float realo = grad_out[IDX(b,l,tout,m,0,cumu_tm_O,Lmax)]; float imago = grad_out[IDX(b,l,tout,m,1,cumu_tm_O,Lmax)]; float realw = W[WIDX(l,tout,tmid,0,cumu_tt_W,t_FF)]; float imagw = W[WIDX(l,tout,tmid,1,cumu_tt_W,t_FF)]; real += realw * realo + imagw * imago; imag += realw * imago - realo * imagw; } grad_FF[IDX(b,l,tmid,m,0,cumu_tm_FF,Lmax)] = real / divisor; grad_FF[IDX(b,l,tmid,m,1,cumu_tm_FF,Lmax)] = imag / divisor; //} } } } // namespace void FN_WMM_forward_cuda(torch::Tensor FF_tensor, torch::Tensor O_tensor, torch::Tensor W_tensor, int L, int B, int size_FN, int size_WMM, int* d_t_FF, int* d_cumu_tm_FF, int* d_cumu_tm_O, int* d_cumu_tt_W, float* FN_stds, float FN_cnt, float FN_eps, int FN_flags){ float* FF = FF_tensor.data<float>(); float* O = O_tensor.data<float>(); float* W = W_tensor.data<float>(); dim3 DimBlock(BLOCK, 1, 1); int update_std = (FN_flags & 0x2) ? 1 : 0; if (FN_stds && update_std){ dim3 DimGrid2(CEIL_DIV(size_FN, BLOCK), 1, 1); cuda_FN_forward_job<<<DimGrid2, DimBlock>>>(FF, FN_stds, d_t_FF, d_cumu_tm_FF, FN_cnt, FN_eps, L, B, update_std, size_FN); cudaDeviceSynchronize(); } dim3 DimGrid(CEIL_DIV(size_WMM, BLOCK), 1, B); cudaWeightTransform_forward_job<<<DimGrid, DimBlock>>>(FF, W, O, d_t_FF, d_cumu_tm_FF, d_cumu_tm_O, d_cumu_tt_W, FN_stds, FN_eps, L); cudaDeviceSynchronize(); } void FN_WMM_backward_cuda(torch::Tensor grad_out_tensor, torch::Tensor grad_FF_tensor, torch::Tensor grad_W_tensor, //inputs torch::Tensor FF_tensor, torch::Tensor W_tensor, int L, int B, int size_FF, int size_W, int* d_t_FF, int* d_cumu_tm_FF, int* d_t_O, int* d_cumu_tm_O, int* d_cumu_tt_W, float* FN_stds, float FN_cnt, float FN_eps, int FN_flags){ float* FF = FF_tensor.data<float>(); float* W = W_tensor.data<float>(); float* grad_FF = grad_FF_tensor.data<float>(); float* grad_out = grad_out_tensor.data<float>(); float* grad_W = grad_W_tensor.data<float>(); dim3 DimBlock(BLOCK, 1, 1); dim3 DimGrid(CEIL_DIV(size_W, BLOCK), 1, B); cudaWeightGrad1_backward_job<<<DimGrid, DimBlock>>>( FF, grad_W, grad_out, d_t_FF, d_cumu_tm_FF, d_cumu_tm_O, d_cumu_tt_W, FN_stds, FN_eps, L, size_W); cudaDeviceSynchronize(); dim3 DimGrid0(CEIL_DIV(size_FF, BLOCK), 1, B); cudaMiddleGrad_backward_job<<<DimGrid0, DimBlock>>>(grad_FF, W, grad_out, FN_stds, d_t_FF, d_cumu_tm_FF, d_t_O, d_cumu_tm_O, d_cumu_tt_W, FN_stds, FN_eps, L); cudaThreadSynchronize(); }
the_stack
#include <cuda.h> #include <string> #include "utils/blitz_math_function.h" #include "utils/blitz_gpu_function.h" namespace blitz { namespace kernels { scoped_ptr<CubinLoadModule> CubinModule::instance_(0); boost::once_flag CubinModule::flag_ = BOOST_ONCE_INIT; template<> void SassGemm( const float* A, const float* B, float* C, bool transa, bool transb, float alpha, float beta, size_t M, size_t N, size_t K) { CUfunction function; size_t lda, ldb, ldc = N; #ifdef BLITZ_PERFORMANCE float elapsed_time = 0.0f; CUevent event_start, event_stop; BLITZ_GPU_TIMER_START(elapsed_time, event_start, event_stop); #endif // BLITZ_PERFORMANCE // create kernel string kernel; if (transa == true && transb == false) { lda = M * 32; ldb = N * 32; if (M % 4 == 0 && N % 4 == 0) { kernel = "sgemm_tn_128x128_vec"; } else { kernel = "sgemm_tn_128x128"; } } else if (transa == false && transb == true) { lda = K; ldb = K; if (K % 4 == 0) { kernel = "sgemm_nt_128x128_vec"; } else { kernel = "sgemm_nt_128x128"; } } else if (transa == false && transb == false) { lda = K; ldb = N * 32; if (K % 4 == 0 && N % 4 == 0) { kernel = "sgemm_nn_128x128_vec"; } else { kernel = "sgemm_nn_128x128"; } } else { LOG(FATAL) << "Not support both matrice transport!"; } // kernel call, asynrhonize function = CubinModule::GetFunction(kernel); void* params[] = {&A, &B, &C, &alpha, &beta, &lda, &ldb, &ldc, (void*)&M, (void*)&N, (void*)&K}; // TODO(keren): multiple kernels size_t sizeA = 128, sizeB = 128; size_t gridA = M / sizeA + (M % sizeA != 0); size_t gridB = N / sizeB + (N % sizeB != 0); // TODO(keren): adjust number of threads size_t threads = 256; // lanuch kernel CUresult res = cuLaunchKernel(function, 1, gridA, gridB, threads, 1, 1, 0, 0, params, NULL); if (res != CUDA_SUCCESS) { LOG(FATAL) << "Error launching kernel "; } #ifdef BLITZ_PERFORMANCE double computations = 2 * M * N * K; BLITZ_GPU_TIMER_END(elapsed_time, event_start, event_stop); BLITZ_GPU_TIMER_INFO(computations, elapsed_time); #endif // BLITZ_PERFORMANCE } template<> void SassGemm( const double* A, const double* B, double* C, bool transa, bool transb, double alpha, double beta, size_t M, size_t N, size_t K) { LOG(FATAL) << "sass kernel dost not support double precision"; } template<> void SassConvolution2DForward( float* I, float* O, float* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { float alpha = 1.0f; size_t D = 1, M = 1, T = 1; size_t str_d = 1; size_t pad_d = 0; size_t WN, HW, DHW, HWN, DHWN; size_t RS, RST, KRST, CRST; size_t PQ, QN, MPQ, PQN, MPQN; size_t magic_HW, shift_HW; size_t magic_W, shift_W; size_t magic_RST, shift_RST; size_t magic_RS, shift_RS; size_t magic_S, shift_S; size_t magic_PQ, shift_PQ; size_t magic_Q, shift_Q; size_t magic_PQu, shift_PQu; size_t magic_Qu, shift_Qu; size_t magic_str_w, shift_str_w; size_t magic_str_h, shift_str_h; size_t magic_str_d, shift_str_d; size_t grid_P = 1; size_t grid_Q = 1; size_t grid_PQ = grid_P * grid_Q; size_t grid_PQM = grid_PQ * M; // input WN = W * N; HW = H * W; DHW = D * HW; HWN = H * WN; DHWN = HWN; // filter RS = R * S; RST = RS; KRST = K * RST; CRST = C * RST; // output QN = Q * N; PQ = P * Q; PQN = P * QN; MPQ = PQ; MPQN = PQN; // magic numbers utils::Magic32(DHW, HW, magic_HW, shift_HW); utils::Magic32(HW, W, magic_W, shift_W); utils::Magic32(CRST, RST, magic_RST, shift_RST); utils::Magic32(RST + 32, RS, magic_RS, shift_RS); utils::Magic32(RS + 32, S, magic_S, shift_S); utils::Magic32(MPQ, PQ, magic_PQ, shift_PQ); utils::Magic32(PQ, Q, magic_Q, shift_Q); utils::Magic32(grid_PQM, grid_PQ, magic_PQu, shift_PQu); utils::Magic32(grid_PQ, grid_Q, magic_Qu, shift_Qu); utils::Magic32(W + S - pad_w - 2, str_w, magic_str_w, shift_str_w); utils::Magic32(H + R - pad_h - 2, str_h, magic_str_h, shift_str_h); utils::Magic32(D + T - pad_d - 2, str_d, magic_str_d, shift_str_d); // test param set up TODO(keren): erase float *test_param; // arguments size_t gridX, gridY, gridZ; CUresult result; CUfunction function; string kernel_name; void *args[37] = { &test_param, &O, &I, &F, &alpha, &N, &K, &D, &H, &W, &WN, &HWN, &DHWN, &C, &KRST, &RST, &RS, &magic_RS, &shift_RS, &S, &magic_S, &shift_S, &pad_d, &pad_h, &pad_w, &str_d, &str_h, &str_w, &Q, &PQ, &QN, &PQN, &MPQN, &magic_Q, &shift_Q, &magic_PQ, &shift_PQ}; if (K <= 64 || N <= 64) { gridX = MPQ; gridY = K / 64 + (K % 64 != 0); gridZ = N / 64 + (N % 64 != 0); kernel_name = "sconv_fprop_K64_N64"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 64, 1, 1, 4 * RST * 2, 0, args, NULL); } else { gridX = MPQ; gridY = K / 128 + (K % 128 != 0); gridZ = N / 128 + (N % 128 != 0); kernel_name = "sconv_fprop_K128_N128"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 256, 1, 1, 4 * RST * 2, 0, args, NULL); } if (result != CUDA_SUCCESS) { LOG(FATAL) << "Launch kernel: " << kernel_name << " error!"; } } template<> void SassConvolution2DBackward( float* I, float* O, float* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { float alpha = 1.0f; size_t D = 1, M = 1, T = 1; size_t str_d = 1; size_t pad_d = 0; size_t WN, HW, DHW, HWN, DHWN; size_t RS, RST, CRST; size_t PQ, QN, MPQ, PQN, MPQN; size_t magic_HW, shift_HW; size_t magic_W, shift_W; size_t magic_RST, shift_RST; size_t magic_RS, shift_RS; size_t magic_S, shift_S; size_t magic_PQ, shift_PQ; size_t magic_Q, shift_Q; size_t magic_PQu, shift_PQu; size_t magic_Qu, shift_Qu; size_t magic_str_w, shift_str_w; size_t magic_str_h, shift_str_h; size_t magic_str_d, shift_str_d; size_t grid_P = 1; size_t grid_Q = 1; size_t grid_PQ = grid_P * grid_Q; size_t grid_PQM = grid_PQ * M; size_t CRST32, MPQN32; // input WN = W * N; HW = H * W; DHW = D * HW; HWN = H * WN; DHWN = HWN; // filter RS = R * S; RST = RS; CRST = C * RST; // output QN = Q * N; PQ = P * Q; PQN = P * QN; MPQ = PQ; MPQN = PQN; // special bprop CRST32 = 32 * CRST; MPQN32 = 32 * MPQN; // magic numbers utils::Magic32(DHW, HW, magic_HW, shift_HW); utils::Magic32(HW, W, magic_W, shift_W); utils::Magic32(CRST, RST, magic_RST, shift_RST); utils::Magic32(RST + 32, RS, magic_RS, shift_RS); utils::Magic32(RS + 32, S, magic_S, shift_S); utils::Magic32(MPQ, PQ, magic_PQ, shift_PQ); utils::Magic32(PQ, Q, magic_Q, shift_Q); utils::Magic32(grid_PQM, grid_PQ, magic_PQu, shift_PQu); utils::Magic32(grid_PQ, grid_Q, magic_Qu, shift_Qu); utils::Magic32(W + S - pad_w - 2, str_w, magic_str_w, shift_str_w); utils::Magic32(H + R - pad_h - 2, str_h, magic_str_h, shift_str_h); utils::Magic32(D + T - pad_d - 2, str_d, magic_str_d, shift_str_d); // test param set up TODO(keren): erase float *test_param; // arguments size_t gridX, gridY, gridZ; CUresult result; CUfunction function; string kernel_name; if (C % 64 == 0) { // C64 || C128 if (C > 64) { void *args[45] = { &test_param, &I, &O, &F, &alpha, &N, &C, &M, &P, &Q, &QN, &PQN, &MPQN, &K, &CRST, &RST, &RS, &magic_RS, &shift_RS, &S, &magic_S, &shift_S, &pad_d, &pad_h, &pad_w, &str_d, &str_h, &str_w, &W, &HW, &WN, &HWN, &DHWN, &magic_W, &shift_W, &magic_HW, &shift_HW, &R, &T, &magic_str_w, &shift_str_w, &magic_str_h, &shift_str_h, &magic_str_d, &shift_str_d}; gridX = DHW; gridY = C / 128 + (C % 128 != 0); gridZ = N / 128 + (N % 128 != 0); kernel_name = "sconv_bprop_C128_N128"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 256, 1, 1, 4 * RST * 2, 0, args, NULL); if (result != CUDA_SUCCESS) { LOG(FATAL) << "Launch kernel: " << kernel_name << " error!"; } } else { void *args[45] = { &test_param, &I, &O, &F, &alpha, &N, &C, &M, &P, &Q, &QN, &PQN, &MPQN, &K, &CRST, &RST, &RS, &magic_RS, &shift_RS, &S, &magic_S, &shift_S, &pad_d, &pad_h, &pad_w, &str_d, &str_h, &str_w, &W, &HW, &WN, &HWN, &DHWN, &magic_W, &shift_W, &magic_HW, &shift_HW, &R, &T, &magic_str_w, &shift_str_w, &magic_str_h, &shift_str_h, &magic_str_d, &shift_str_d}; gridX = DHW; gridY = C / 64 + (C % 64 != 0); gridZ = N / 64 + (N % 64 != 0); kernel_name = "sconv_bprop_C64_N64"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 64, 1, 1, 4 * RST * 2, 0, args, NULL); if (result != CUDA_SUCCESS) { LOG(FATAL) << "Launch kernel: " << kernel_name << " error!"; } } } else { // C1 void *args[41] = { &test_param, &I, &O, &F, &alpha, &N, &K, &D, &H, &W, &WN, &HWN, &DHWN, &C, &CRST, &RST, &magic_RST, &shift_RST, &RS, &magic_RS, &shift_RS, &S, &magic_S, &shift_S, &pad_d, &pad_h, &pad_w, &str_d, &str_h, &str_w, &Q, &PQ, &QN, &PQN, &MPQN, &magic_Q, &shift_Q, &magic_PQ, &shift_PQ, &CRST32, &MPQN32}; gridX = MPQ; gridY = CRST / 32 + (CRST % 32 != 0); gridZ = N / 64 + (N % 64 != 0); kernel_name = "sconv_bprop_C1_N64"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 32, 1, 1, 4 * RST * 2, 0, args, NULL); if (result != CUDA_SUCCESS) { LOG(FATAL) << "Launch kernel: " << kernel_name << " error!"; } } } template<> void SassConvolution2DUpdate( float* I, float* O, float* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { float alpha = 1.0f; size_t D = 1, M = 1, T = 1; size_t str_d = 1; size_t pad_d = 0; size_t WN, HW, DHW, HWN, DHWN; size_t RS, RST, CRST; size_t PQ, QN, MPQ, PQN, MPQN; size_t magic_HW, shift_HW; size_t magic_W, shift_W; size_t magic_RST, shift_RST; size_t magic_RS, shift_RS; size_t magic_S, shift_S; size_t magic_PQ, shift_PQ; size_t magic_Q, shift_Q; size_t magic_PQu, shift_PQu; size_t magic_Qu, shift_Qu; size_t magic_str_w, shift_str_w; size_t magic_str_h, shift_str_h; size_t magic_str_d, shift_str_d; size_t grid_P = 1; size_t grid_Q = 1; size_t grid_PQ = grid_P * grid_Q; size_t grid_PQM = grid_PQ * M; // input WN = W * N; HW = H * W; DHW = D * HW; HWN = H * WN; DHWN = HWN; // filter RS = R * S; RST = RS; CRST = C * RST; // output QN = Q * N; PQ = P * Q; PQN = P * QN; MPQ = PQ; MPQN = PQN; // magic numbers utils::Magic32(DHW, HW, magic_HW, shift_HW); utils::Magic32(HW, W, magic_W, shift_W); utils::Magic32(CRST, RST, magic_RST, shift_RST); utils::Magic32(RST + 32, RS, magic_RS, shift_RS); utils::Magic32(RS + 32, S, magic_S, shift_S); utils::Magic32(MPQ, PQ, magic_PQ, shift_PQ); utils::Magic32(PQ, Q, magic_Q, shift_Q); utils::Magic32(grid_PQM, grid_PQ, magic_PQu, shift_PQu); utils::Magic32(grid_PQ, grid_Q, magic_Qu, shift_Qu); utils::Magic32(W + S - pad_w - 2, str_w, magic_str_w, shift_str_w); utils::Magic32(H + R - pad_h - 2, str_h, magic_str_h, shift_str_h); utils::Magic32(D + T - pad_d - 2, str_d, magic_str_d, shift_str_d); // test param set up TODO(keren): erase float *test_param; // arguments size_t gridX, gridY, gridZ; CUresult result; CUfunction function; string kernel_name; void *args[43] = { &test_param, &F, &I, &O, &alpha, &N, &K, &D, &H, &W, &WN, &HWN, &DHWN, &C, &CRST, &RST, &magic_RST, &shift_RST, &RS, &magic_RS, &shift_RS, &S, &magic_S, &shift_S, &pad_d, &pad_h, &pad_w, &str_d, &str_h, &str_w, &P, &Q, &PQ, &QN, &PQN, &MPQN, &magic_Qu, &shift_Qu, &magic_PQu, &shift_PQu, &grid_P, &grid_Q, &grid_PQ}; gridX = grid_PQM; gridY = CRST / 128 + (CRST % 128 != 0); gridZ = K / 128 + (K % 128 != 0); kernel_name = "sconv_update_C128_K128"; function = CubinModule::GetFunction(kernel_name); result = cuLaunchKernel(function, gridX, gridY, gridZ, 256, 1, 1, 0, 0, args, NULL); if (result != CUDA_SUCCESS) { LOG(FATAL) << "Launch kernel: " << kernel_name << " error!"; } } template<> void SassConvolution2DForward( double* I, double* O, double* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { LOG(FATAL) << "sass kernel dost not support double precision"; } template<> void SassConvolution2DBackward( double* I, double* O, double* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { LOG(FATAL) << "sass kernel dost not support double precision"; } template<> void SassConvolution2DUpdate( double* I, double* O, double* F, size_t N, size_t C, size_t H, size_t W, size_t R, size_t S, size_t K, size_t P, size_t Q, size_t pad_h, size_t pad_w, size_t str_h, size_t str_w) { LOG(FATAL) << "sass kernel dost not support double precision"; } // shuffle // K * C * T * R * S // to // K * T * R * S * C template<typename DType> __global__ void GPUFilterShuffle( const DType* input, DType* output, size_t TRSC, size_t TRS, size_t RSC, size_t SC, size_t C, size_t K, size_t RS, size_t magic_RS, size_t shift_RS, size_t S, size_t magic_S, size_t shift_S) { // C * K __shared__ DType tile[32][33]; size_t tx = threadIdx.x; size_t ty = threadIdx.y; size_t bk = blockIdx.x; size_t bc = blockIdx.y; size_t trs = blockIdx.z; // t = trs % rs // r = rs % s // s = rs - r * s size_t t = magic_RS * trs; t >>= shift_RS; size_t rs = trs - t * RS; size_t r = magic_S * rs; r >>= shift_S; size_t s = rs - r * S; size_t k = bk * 32 + tx; size_t c = bc * 32 + ty; for (size_t i = 0; i < 32; i += 8) { size_t ci = c + i; if (ci < C && k < K) tile[ty + i][tx] = input[k * TRSC + ci * TRS + t * RS + r * S + s]; } __syncthreads(); k = bk * 32 + ty; c = bc * 32 + tx; for (size_t i = 0; i < 32; i += 8) { size_t ki = k + i; if (ki < K && c < C) output[ki * TRSC + t * RSC + r * SC + s * C + c] = tile[tx][ty + i]; } } template<> void Filter2DShuffle( const float* input, float* output, size_t K, size_t C, size_t R, size_t S) { size_t T = 1; size_t TRSC, RSC, SC; size_t RST, RS; size_t magic_RS, shift_RS; size_t magic_S, shift_S; // output SC = S * C; RSC = R * SC; TRSC = T * RSC; // filter RS = R * S; RST = T * RS; utils::Magic32(RST + 32, RS, magic_RS, shift_RS); utils::Magic32(RS + 32, S, magic_S, shift_S); const size_t gridX = K / 32 + (K % 32 != 0); const size_t gridY = C / 32 + (C % 32 != 0); dim3 grid_dim(gridX, gridY, RST); dim3 block_dim(32, 8, 1); GPUFilterShuffle<<<grid_dim, block_dim>>>( input, output, TRSC, RST, RSC, SC, C, K, RS, magic_RS, shift_RS, S, magic_S, shift_S); } template<> void Filter2DShuffle( const double* input, double* output, size_t K, size_t C, size_t R, size_t S) { LOG(FATAL) << "sass kernel dost not support double precision"; } } // namespace kernels } // namespace blitz
the_stack
#pragma once #include <iostream> #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/oprtr/oprtr.cuh> #include <gunrock/app/pr_nibble/pr_nibble_problem.cuh> namespace gunrock { namespace app { namespace pr_nibble { /** * @brief Speciflying parameters for pr_nibble Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } /** * @brief defination of pr_nibble iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct PRNibbleIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::CsrT CsrT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; PRNibbleIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of pr_nibble, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // -- // Alias variables auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; auto &iteration = enactor_stats.iteration; // problem specific data alias auto &grad = data_slice.grad; auto &q = data_slice.q; auto &y = data_slice.y; auto &z = data_slice.z; auto &touched = data_slice.touched; auto &alpha = data_slice.alpha; auto &rho = data_slice.rho; auto &eps = data_slice.eps; auto &src_node = data_slice.src; auto &src_neib = data_slice.src_neib; auto &num_ref_nodes = data_slice.num_ref_nodes; auto &d_grad_scale = data_slice.d_grad_scale; auto &d_grad_scale_value = data_slice.d_grad_scale_value; // -- // Define operations // compute operation auto compute_op = [graph, iteration, src_node, src_neib, z, y, grad, q, alpha, rho, touched, num_ref_nodes] __host__ __device__(VertexT * v, const SizeT &i) { VertexT idx = v[i]; // ignore the neighbor on the first iteration if ((iteration == 0) && (idx == src_neib)) return; // Compute degrees SizeT idx_d = graph.GetNeighborListLength(idx); ValueT idx_d_sqrt = sqrt((ValueT)idx_d); ValueT idx_dn_sqrt = 1.0 / idx_d_sqrt; // this is at end in original implementation, but works // here after the first iteration (+ have to adjust for // it in StopCondition) if ((iteration > 0) && (idx == src_node)) { grad[idx] -= alpha / num_ref_nodes * idx_dn_sqrt; } z[idx] = y[idx] - grad[idx]; if (z[idx] == 0) return; ValueT q_old = q[idx]; ValueT thresh = rho * alpha * idx_d_sqrt; if (z[idx] >= thresh) { q[idx] = z[idx] - thresh; } else if (z[idx] <= -thresh) { q[idx] = z[idx] + thresh; } else { q[idx] = (ValueT)0; } if (iteration == 0) { y[idx] = q[idx]; } else { ValueT beta = (1 - sqrt(alpha)) / (1 + sqrt(alpha)); y[idx] = q[idx] + beta * (q[idx] - q_old); } touched[idx] = 0; grad[idx] = y[idx] * (1.0 + alpha) / 2; }; GUARD_CU(frontier.V_Q()->ForAll(compute_op, frontier.queue_length, util::DEVICE, oprtr_parameters.stream)); // advance operation auto advance_op = [graph, touched, grad, y, alpha, iteration] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { ValueT src_dn_sqrt = 1.0 / sqrt((ValueT)graph.GetNeighborListLength(src)); ValueT dest_dn_sqrt = 1.0 / sqrt((ValueT)graph.GetNeighborListLength(dest)); ValueT src_y = Load<cub::LOAD_CG>(y + src); ValueT grad_update = -src_dn_sqrt * src_y * dest_dn_sqrt * (1.0 - alpha) / 2; ValueT last_grad = atomicAdd(grad + dest, grad_update); if (last_grad + grad_update == 0) return false; bool already_touched = atomicMax(touched + dest, 1) == 1; return !already_touched; }; // filter operation auto filter_op = [] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { return true; }; GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>( graph.csr(), frontier.V_Q(), frontier.Next_V_Q(), oprtr_parameters, advance_op, filter_op)); if (oprtr_parameters.advance_mode != "LB_CULL" && oprtr_parameters.advance_mode != "LB_LIGHT_CULL") { frontier.queue_reset = false; GUARD_CU(oprtr::Filter<oprtr::OprtrType_V2V>( graph.csr(), frontier.V_Q(), frontier.Next_V_Q(), oprtr_parameters, filter_op)); } GUARD_CU(frontier.work_progress.GetQueueLength( frontier.queue_index, frontier.queue_length, false, oprtr_parameters.stream, true)); // Convergence checking ValueT grad_thresh = rho * alpha * (1 + eps); GUARD_CU(cudaMemset(d_grad_scale, 0, 1 * sizeof(int))); GUARD_CU(cudaMemset(d_grad_scale_value, 0, 1 * sizeof(ValueT))); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); auto convergence_op = [graph, grad, d_grad_scale, d_grad_scale_value, grad_thresh, iteration, src_node, alpha, num_ref_nodes] __host__ __device__(VertexT & v) { ValueT v_dn_sqrt = 1.0 / sqrt((ValueT)graph.GetNeighborListLength(v)); ValueT val = grad[v]; if (v == src_node) val -= (alpha / num_ref_nodes) * v_dn_sqrt; val = abs(val * v_dn_sqrt); atomicMax(d_grad_scale_value, val); if (val > grad_thresh) { atomicMax(d_grad_scale, 1); } }; GUARD_CU(frontier.V_Q()->ForEach(convergence_op, frontier.queue_length, util::DEVICE, oprtr_parameters.stream)); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); cudaMemcpy(data_slice.h_grad_scale, d_grad_scale, 1 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(data_slice.h_grad_scale_value, d_grad_scale_value, 1 * sizeof(ValueT), cudaMemcpyDeviceToHost); // printf("data_slice.h_grad_scale=%d | h_val=%0.17g | // grad_thresh=%0.17g\n", // data_slice.h_grad_scale[0], data_slice.h_grad_scale_value[0], // grad_thresh); return retval; } bool Stop_Condition(int gpu_num = 0) { auto &enactor_slice = this->enactor->enactor_slices[0]; auto &enactor_stats = enactor_slice.enactor_stats; auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &iter = enactor_stats.iteration; // never break on first iteration if (iter == 0) { return false; } // max iterations if (iter >= data_slice.max_iter) { printf( "pr_nibble::Stop_Condition: reached max iterations. breaking at " "it=%d\n", iter); return true; } // gradient too small if (!(*data_slice.h_grad_scale)) { printf( "pr_nibble::Stop_Condition: gradient too small. breaking at it=%d\n", iter); return true; } return false; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { // ================ INCOMPLETE TEMPLATE - MULTIGPU ==================== auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; // auto iteration = enactor_slice.enactor_stats.iteration; // TODO: add problem specific data alias here, e.g.: // auto &distances = data_slice.distances; auto expand_op = [ // TODO: pass data used by the lambda, e.g.: // distances ] __host__ __device__(VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { // TODO: fill in the lambda to combine received and local data, e.g.: // ValueT in_val = value__associate_ins[in_pos]; // ValueT old_val = atomicMin(distances + key, in_val); // if (old_val <= in_val) // return false; return true; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } }; // end of PRNibbleIterationLoop /** * @brief Template enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase< typename _Problem::GraphT, typename _Problem::GraphT::VertexT, typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::GraphT GraphT; typedef typename GraphT::VertexT LabelT; typedef typename GraphT::ValueT ValueT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef PRNibbleIterationLoop<EnactorT> IterationT; Problem *problem; IterationT *iterations; /** * @brief pr_nibble constructor */ Enactor() : BaseEnactor("pr_nibble"), problem(NULL) { // <OPEN> change according to algorithmic needs this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; // </OPEN> } /** * @brief pr_nibble destructor */ virtual ~Enactor() { /*Release();*/ } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the problem. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; // Lazy initialization GUARD_CU(BaseEnactor::Init( problem, Enactor_None, // <OPEN> change to how many frontier queues, and their types 2, NULL, // </OPEN> target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges, this->queue_factors)); } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief one run of pr_nibble, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop< // <OPEN> change to how many {VertexT, ValueT} data need to communicate // per element in the inter-GPU sub-frontiers 0, 1, // </OPEN> IterationT>(thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Reset enactor ... * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset( // <DONE> problem specific data if necessary, eg VertexT src, VertexT src_neib, // </DONE> util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); // <DONE> Initialize frontiers according to the algorithm: // In this case, we add a `src` + a neighbor to the frontier for (int gpu = 0; gpu < this->num_gpus; gpu++) { if ((this->num_gpus == 1) || (gpu == this->problem->org_graph->GpT::partition_table[src])) { this->thread_slices[gpu].init_size = 2; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer_].frontier; frontier.queue_length = (peer_ == 0) ? 2 : 0; if (peer_ == 0) { GUARD_CU(frontier.V_Q()->ForAll( [src, src_neib] __host__ __device__(VertexT * v, const SizeT &i) { v[i] = i == 0 ? src : src_neib; }, 2, target, 0)); } } } else { this->thread_slices[gpu].init_size = 0; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { this->enactor_slices[gpu * this->num_gpus + peer_] .frontier.queue_length = 0; } } } // </DONE> GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief Enacts a pr_nibble computing on the specified graph. ... * \return cudaError_t error message(s), if any */ cudaError_t Enact( // <TODO> problem specific data if necessary, eg // VertexT src = 0 // </TODO> ) { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU Template Done.", this->flag & Debug); return retval; } }; } // namespace pr_nibble } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#pragma once #include "hoomd/BoxDim.h" #include "hoomd/CachedAllocator.h" #include "hoomd/GPUPartition.cuh" #include "hoomd/HOOMDMath.h" #include "hoomd/Index1D.h" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" #include "hoomd/VectorMath.h" #include "hoomd/hpmc/Moves.h" #include <hip/hip_runtime.h> #include "hoomd/hpmc/HPMCCounters.h" #include "GPUHelpers.cuh" #include "HPMCMiscFunctions.h" // base data types #include "IntegratorHPMCMonoGPUTypes.cuh" #include <cassert> namespace hpmc { namespace gpu { #ifdef __HIP_PLATFORM_NVCC__ #define MAX_BLOCK_SIZE 1024 #define MIN_BLOCK_SIZE 32 #else #define MAX_BLOCK_SIZE 1024 #define MIN_BLOCK_SIZE 1024 // on AMD, we do not use __launch_bounds__ #endif #ifdef __HIPCC__ namespace kernel { //! Check narrow-phase overlaps template<class Shape, unsigned int max_threads> #ifdef __HIP_PLATFORM_NVCC__ __launch_bounds__(max_threads) #endif __global__ void hpmc_narrow_phase(const Scalar4* d_postype, const Scalar4* d_orientation, const Scalar4* d_trial_postype, const Scalar4* d_trial_orientation, const unsigned int* d_trial_move_type, const unsigned int* d_excell_idx, const unsigned int* d_excell_size, const Index2D excli, hpmc_counters_t* d_counters, const unsigned int num_types, const BoxDim box, const Scalar3 ghost_width, const uint3 cell_dim, const Index3D ci, const unsigned int N_local, const unsigned int* d_check_overlaps, const Index2D overlap_idx, const typename Shape::param_type* d_params, const unsigned int* d_update_order_by_ptl, const unsigned int* d_reject_in, unsigned int* d_reject_out, const unsigned int* d_reject_out_of_cell, const unsigned int max_extra_bytes, const unsigned int max_queue_size, const unsigned int work_offset, const unsigned int nwork) { __shared__ unsigned int s_overlap_checks; __shared__ unsigned int s_overlap_err_count; __shared__ unsigned int s_queue_size; __shared__ unsigned int s_still_searching; unsigned int group = threadIdx.y; unsigned int offset = threadIdx.z; unsigned int group_size = blockDim.z; bool master = (offset == 0) && threadIdx.x == 0; unsigned int n_groups = blockDim.y; // load the per type pair parameters into shared memory HIP_DYNAMIC_SHARED(char, s_data) typename Shape::param_type* s_params = (typename Shape::param_type*)(&s_data[0]); Scalar4* s_orientation_group = (Scalar4*)(s_params + num_types); Scalar3* s_pos_group = (Scalar3*)(s_orientation_group + n_groups); unsigned int* s_check_overlaps = (unsigned int*)(s_pos_group + n_groups); unsigned int* s_queue_j = (unsigned int*)(s_check_overlaps + overlap_idx.getNumElements()); unsigned int* s_queue_gid = (unsigned int*)(s_queue_j + max_queue_size); unsigned int* s_type_group = (unsigned int*)(s_queue_gid + max_queue_size); unsigned int* s_reject_group = (unsigned int*)(s_type_group + n_groups); { // copy over parameters one int per thread for fast loads unsigned int tidx = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; unsigned int block_size = blockDim.x * blockDim.y * blockDim.z; unsigned int param_size = num_types * sizeof(typename Shape::param_type) / sizeof(int); for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size) { if (cur_offset + tidx < param_size) { ((int*)s_params)[cur_offset + tidx] = ((int*)d_params)[cur_offset + tidx]; } } unsigned int ntyppairs = overlap_idx.getNumElements(); for (unsigned int cur_offset = 0; cur_offset < ntyppairs; cur_offset += block_size) { if (cur_offset + tidx < ntyppairs) { s_check_overlaps[cur_offset + tidx] = d_check_overlaps[cur_offset + tidx]; } } } __syncthreads(); // initialize extra shared mem char* s_extra = (char*)(s_reject_group + n_groups); unsigned int available_bytes = max_extra_bytes; for (unsigned int cur_type = 0; cur_type < num_types; ++cur_type) s_params[cur_type].load_shared(s_extra, available_bytes); __syncthreads(); if (master && group == 0) { s_overlap_checks = 0; s_overlap_err_count = 0; s_queue_size = 0; s_still_searching = 1; } bool active = true; unsigned int idx = blockIdx.x * n_groups + group; if (idx >= nwork) active = false; idx += work_offset; unsigned int my_cell; unsigned int overlap_checks = 0; unsigned int overlap_err_count = 0; // if this particle is rejected a priori because it has left the cell, don't check overlaps // and avoid out of range memory access when computing the cell if (active && d_reject_out_of_cell[idx]) active = false; unsigned int update_order_i; if (active) { Scalar4 postype_i(d_trial_postype[idx]); vec3<Scalar> pos_i(postype_i); unsigned int type_i = __scalar_as_int(postype_i.w); // find the cell this particle should be in vec3<Scalar> pos_i_old(d_postype[idx]); my_cell = computeParticleCell(vec_to_scalar3(pos_i_old), box, ghost_width, cell_dim, ci, false); // the order of this particle in the chain update_order_i = d_update_order_by_ptl[idx]; if (master) { s_pos_group[group] = make_scalar3(pos_i.x, pos_i.y, pos_i.z); s_type_group[group] = type_i; s_orientation_group[group] = d_trial_orientation[idx]; } } if (master && active) { // load from output, this race condition is intentional and implements an // optional early exit flag between concurrently running kernels s_reject_group[group] = atomicCAS(&d_reject_out[idx], 0, 0); } // sync so that s_postype_group and s_orientation are available before other threads might // process overlap checks __syncthreads(); // counters to track progress through the loop over potential neighbors unsigned int excell_size; unsigned int k = offset; // true if we are checking against the old configuration if (active) { excell_size = d_excell_size[my_cell]; overlap_checks += excell_size; } // loop while still searching while (s_still_searching) { // stage 1, fill the queue. // loop through particles in the excell list and add them to the queue if they pass the // circumsphere check // active threads add to the queue if (active && !s_reject_group[group] && threadIdx.x == 0) { // prefetch j unsigned int j, next_j = 0; if (k < excell_size) { next_j = __ldg(&d_excell_idx[excli(k, my_cell)]); } // add to the queue as long as the queue is not full, and we have not yet reached the // end of our own list and as long as no overlaps have been found // every thread can add at most one element to the neighbor list while (s_queue_size < max_queue_size && k < excell_size) { // build some shapes, but we only need them to get diameters, so don't load // orientations build shape i from shared memory vec3<Scalar> pos_i(s_pos_group[group]); Shape shape_i(quat<Scalar>(), s_params[s_type_group[group]]); // prefetch next j j = next_j; k += group_size; if (k < excell_size) { next_j = __ldg(&d_excell_idx[excli(k, my_cell)]); } // has j been updated? ghost particles are not updated // these multiple gmem loads present a minor optimization opportunity for the future bool j_has_been_updated = j < N_local && d_update_order_by_ptl[j] < update_order_i && !d_reject_in[j] && d_trial_move_type[j]; // true if particle j is in the old configuration bool old = !j_has_been_updated; // check particle circumspheres // load particle j (always load ghosts from particle data) const Scalar4 postype_j = (old || j >= N_local) ? d_postype[j] : d_trial_postype[j]; unsigned int type_j = __scalar_as_int(postype_j.w); vec3<Scalar> pos_j(postype_j); Shape shape_j(quat<Scalar>(), s_params[type_j]); // place ourselves into the minimum image vec3<Scalar> r_ij = pos_j - pos_i; r_ij = box.minImage(r_ij); if (idx != j && (old || j < N_local) && check_circumsphere_overlap(r_ij, shape_i, shape_j)) { // add this particle to the queue unsigned int insert_point = atomicAdd(&s_queue_size, 1); if (insert_point < max_queue_size) { s_queue_gid[insert_point] = group; s_queue_j[insert_point] = (j << 1) | (old ? 1 : 0); } else { // or back up if the queue is already full // we will recheck and insert this on the next time through k -= group_size; } } } // end while (s_queue_size < max_queue_size && k < excell_size) } // end if active // sync to make sure all threads in the block are caught up __syncthreads(); // when we get here, all threads have either finished their list, or encountered a full // queue either way, it is time to process overlaps need to clear the still searching flag // and sync first if (master && group == 0) s_still_searching = 0; unsigned int tidx_1d = offset + group_size * group; // max_queue_size is always <= block size, so we just need an if here if (tidx_1d < min(s_queue_size, max_queue_size)) { // need to extract the overlap check to perform out of the shared mem queue unsigned int check_group = s_queue_gid[tidx_1d]; unsigned int check_j_flag = s_queue_j[tidx_1d]; bool check_old = check_j_flag & 1; unsigned int check_j = check_j_flag >> 1; Scalar4 postype_j; Scalar4 orientation_j; vec3<Scalar> r_ij; // build shape i from shared memory Scalar3 pos_i = s_pos_group[check_group]; unsigned int type_i = s_type_group[check_group]; Shape shape_i(quat<Scalar>(s_orientation_group[check_group]), s_params[type_i]); // build shape j from global memory postype_j = check_old ? d_postype[check_j] : d_trial_postype[check_j]; orientation_j = make_scalar4(1, 0, 0, 0); unsigned int type_j = __scalar_as_int(postype_j.w); Shape shape_j(quat<Scalar>(orientation_j), s_params[type_j]); if (shape_j.hasOrientation()) shape_j.orientation = check_old ? quat<Scalar>(d_orientation[check_j]) : quat<Scalar>(d_trial_orientation[check_j]); // put particle j into the coordinate system of particle i r_ij = vec3<Scalar>(postype_j) - vec3<Scalar>(pos_i); r_ij = vec3<Scalar>(box.minImage(vec_to_scalar3(r_ij))); if (s_check_overlaps[overlap_idx(type_i, type_j)] && test_overlap(r_ij, shape_i, shape_j, overlap_err_count)) { atomicAdd(&s_reject_group[check_group], 1); } } // threads that need to do more looking set the still_searching flag __syncthreads(); if (master && group == 0) s_queue_size = 0; if (active && (threadIdx.x == 0) && !s_reject_group[group] && k < excell_size) atomicAdd(&s_still_searching, 1); __syncthreads(); } // end while (s_still_searching) if (active && master) { // update reject flags in global mem if (s_reject_group[group]) atomicAdd(&d_reject_out[idx], 1); } if (master) { atomicAdd(&s_overlap_checks, overlap_checks); atomicAdd(&s_overlap_err_count, overlap_err_count); } __syncthreads(); if (master && group == 0) { // write out counters to global memory #if (__CUDA_ARCH__ >= 600) atomicAdd_system(&d_counters->overlap_err_count, s_overlap_err_count); atomicAdd_system(&d_counters->overlap_checks, s_overlap_checks); #else atomicAdd(&d_counters->overlap_err_count, s_overlap_err_count); atomicAdd(&d_counters->overlap_checks, s_overlap_checks); #endif } } //! Launcher for narrow phase kernel with templated launch bounds template<class Shape, unsigned int cur_launch_bounds> void narrow_phase_launcher(const hpmc_args_t& args, const typename Shape::param_type* params, unsigned int max_threads, detail::int2type<cur_launch_bounds>) { assert(params); if (max_threads == cur_launch_bounds * MIN_BLOCK_SIZE) { // determine the maximum block size and clamp the input block size down int max_block_size; hipFuncAttributes attr; constexpr unsigned int launch_bounds_nonzero = cur_launch_bounds > 0 ? cur_launch_bounds : 1; hipFuncGetAttributes( &attr, reinterpret_cast<const void*>( kernel::hpmc_narrow_phase<Shape, launch_bounds_nonzero * MIN_BLOCK_SIZE>)); max_block_size = attr.maxThreadsPerBlock; // choose a block size based on the max block size by regs (max_block_size) and include // dynamic shared memory usage unsigned int run_block_size = min(args.block_size, (unsigned int)max_block_size); unsigned int overlap_threads = args.overlap_threads; unsigned int tpp = min(args.tpp, run_block_size); while (overlap_threads * tpp > run_block_size || run_block_size % (overlap_threads * tpp) != 0) { tpp--; } tpp = std::min((unsigned int)args.devprop.maxThreadsDim[2], tpp); // clamp blockDim.z unsigned int n_groups = run_block_size / (tpp * overlap_threads); unsigned int max_queue_size = n_groups * tpp; const unsigned int min_shared_bytes = static_cast<unsigned int>(args.num_types * sizeof(typename Shape::param_type) + args.overlap_idx.getNumElements() * sizeof(unsigned int)); size_t shared_bytes = n_groups * (2 * sizeof(unsigned int) + sizeof(Scalar4) + sizeof(Scalar3)) + max_queue_size * 2 * sizeof(unsigned int) + min_shared_bytes; if (min_shared_bytes >= args.devprop.sharedMemPerBlock) throw std::runtime_error("Insufficient shared memory for HPMC kernel: reduce number of " "particle types or size of shape parameters"); while (shared_bytes + attr.sharedSizeBytes >= args.devprop.sharedMemPerBlock) { run_block_size -= args.devprop.warpSize; if (run_block_size == 0) throw std::runtime_error("Insufficient shared memory for HPMC kernel"); tpp = min(tpp, run_block_size); while (overlap_threads * tpp > run_block_size || run_block_size % (overlap_threads * tpp) != 0) { tpp--; } tpp = std::min((unsigned int)args.devprop.maxThreadsDim[2], tpp); // clamp blockDim.z n_groups = run_block_size / (tpp * overlap_threads); max_queue_size = n_groups * tpp; shared_bytes = n_groups * (2 * sizeof(unsigned int) + sizeof(Scalar4) + sizeof(Scalar3)) + max_queue_size * 2 * sizeof(unsigned int) + min_shared_bytes; } // determine dynamically allocated shared memory size unsigned int base_shared_bytes = static_cast<unsigned int>(shared_bytes + attr.sharedSizeBytes); unsigned int max_extra_bytes = static_cast<unsigned int>(args.devprop.sharedMemPerBlock - base_shared_bytes); char* ptr = (char*)nullptr; unsigned int available_bytes = max_extra_bytes; for (unsigned int i = 0; i < args.num_types; ++i) { params[i].allocate_shared(ptr, available_bytes); } unsigned int extra_bytes = max_extra_bytes - available_bytes; shared_bytes += extra_bytes; dim3 thread(overlap_threads, n_groups, tpp); for (int idev = args.gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = args.gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; const unsigned int num_blocks = nwork / n_groups + 1; dim3 grid(num_blocks, 1, 1); assert(args.d_postype); assert(args.d_orientation); assert(args.d_trial_postype); assert(args.d_trial_orientation); assert(args.d_excell_idx); assert(args.d_excell_size); assert(args.d_counters); assert(args.d_check_overlaps); assert(args.d_reject_in); assert(args.d_reject_out); assert(args.d_reject_out_of_cell); hipLaunchKernelGGL((hpmc_narrow_phase<Shape, launch_bounds_nonzero * MIN_BLOCK_SIZE>), grid, thread, shared_bytes, args.streams[idev], args.d_postype, args.d_orientation, args.d_trial_postype, args.d_trial_orientation, args.d_trial_move_type, args.d_excell_idx, args.d_excell_size, args.excli, args.d_counters + idev * args.counters_pitch, args.num_types, args.box, args.ghost_width, args.cell_dim, args.ci, args.N, args.d_check_overlaps, args.overlap_idx, params, args.d_update_order_by_ptl, args.d_reject_in, args.d_reject_out, args.d_reject_out_of_cell, max_extra_bytes, max_queue_size, range.first, nwork); } } else { narrow_phase_launcher<Shape>(args, params, max_threads, detail::int2type<cur_launch_bounds / 2>()); } } } // end namespace kernel //! Kernel driver for kernel::hpmc_narrow_phase template<class Shape> void hpmc_narrow_phase(const hpmc_args_t& args, const typename Shape::param_type* params) { assert(args.d_postype); assert(args.d_orientation); assert(args.d_counters); // select the kernel template according to the next power of two of the block size unsigned int launch_bounds = MIN_BLOCK_SIZE; while (launch_bounds < args.block_size) launch_bounds *= 2; kernel::narrow_phase_launcher<Shape>(args, params, launch_bounds, detail::int2type<MAX_BLOCK_SIZE / MIN_BLOCK_SIZE>()); } #endif #undef MAX_BLOCK_SIZE #undef MIN_BLOCK_SIZE } // end namespace gpu } // end namespace hpmc
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/functional.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/scan.h> #include <thrust/transform.h> namespace { // anonymous static constexpr int BLOCK_SIZE = 256; template <int phase, bool replacement_has_nulls> __global__ void replace_nulls_strings(cudf::column_device_view input, cudf::column_device_view replacement, cudf::bitmask_type* output_valid, cudf::size_type* offsets, char* chars, cudf::size_type* valid_counter) { cudf::size_type nrows = input.size(); cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; uint32_t active_mask = 0xffffffff; active_mask = __ballot_sync(active_mask, i < nrows); auto const lane_id{threadIdx.x % cudf::detail::warp_size}; uint32_t valid_sum{0}; while (i < nrows) { bool input_is_valid = input.is_valid_nocheck(i); bool output_is_valid = true; if (replacement_has_nulls && !input_is_valid) { output_is_valid = replacement.is_valid_nocheck(i); } cudf::string_view out; if (input_is_valid) { out = input.element<cudf::string_view>(i); } else if (output_is_valid) { out = replacement.element<cudf::string_view>(i); } bool nonzero_output = (input_is_valid || output_is_valid); if (phase == 0) { offsets[i] = nonzero_output ? out.size_bytes() : 0; uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); if (0 == lane_id) { output_valid[cudf::word_index(i)] = bitmask; valid_sum += __popc(bitmask); } } else if (phase == 1) { if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes()); } i += blockDim.x * gridDim.x; active_mask = __ballot_sync(active_mask, i < nrows); } // Compute total valid count for this block and add it to global count uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum); // one thread computes and adds to output_valid_count if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); } } template <typename Type, bool replacement_has_nulls> __global__ void replace_nulls(cudf::column_device_view input, cudf::column_device_view replacement, cudf::mutable_column_device_view output, cudf::size_type* output_valid_count) { cudf::size_type nrows = input.size(); cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; uint32_t active_mask = 0xffffffff; active_mask = __ballot_sync(active_mask, i < nrows); auto const lane_id{threadIdx.x % cudf::detail::warp_size}; uint32_t valid_sum{0}; while (i < nrows) { bool input_is_valid = input.is_valid_nocheck(i); bool output_is_valid = true; if (input_is_valid) { output.data<Type>()[i] = input.element<Type>(i); } else { if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); } output.data<Type>()[i] = replacement.element<Type>(i); } /* output valid counts calculations*/ if (replacement_has_nulls) { uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); if (0 == lane_id) { output.set_mask_word(cudf::word_index(i), bitmask); valid_sum += __popc(bitmask); } } i += blockDim.x * gridDim.x; active_mask = __ballot_sync(active_mask, i < nrows); } if (replacement_has_nulls) { // Compute total valid count for this block and add it to global count uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum); // one thread computes and adds to output_valid_count if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } } } /** * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate * `replace_nulls` with the appropriate data types. */ struct replace_nulls_column_kernel_forwarder { template <typename col_type, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<col_type>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const& input, cudf::column_view const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { cudf::size_type nrows = input.size(); cudf::detail::grid_1d grid{nrows, BLOCK_SIZE}; auto output = cudf::detail::allocate_like(input, input.size(), replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS : cudf::mask_allocation_policy::NEVER, stream, mr); auto output_view = output->mutable_view(); auto replace = replace_nulls<col_type, false>; if (output_view.nullable()) replace = replace_nulls<col_type, true>; auto device_in = cudf::column_device_view::create(input); auto device_out = cudf::mutable_column_device_view::create(output_view); auto device_replacement = cudf::column_device_view::create(replacement); rmm::device_scalar<cudf::size_type> valid_counter(0, stream); cudf::size_type* valid_count = valid_counter.data(); replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>( *device_in, *device_replacement, *device_out, valid_count); if (output_view.nullable()) { output->set_null_count(output->size() - valid_counter.value(stream)); } return output; } template <typename col_type, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<col_type>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const&, cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("No specialization exists for the given type."); } }; template <> std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::string_view>( cudf::column_view const& input, cudf::column_view const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { rmm::device_scalar<cudf::size_type> valid_counter(0, stream); cudf::size_type* valid_count = valid_counter.data(); auto replace_first = replace_nulls_strings<0, false>; auto replace_second = replace_nulls_strings<1, false>; if (replacement.has_nulls()) { replace_first = replace_nulls_strings<0, true>; replace_second = replace_nulls_strings<1, true>; } // Create new offsets column to use in kernel std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column( cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream); auto sizes_view = sizes->mutable_view(); auto device_in = cudf::column_device_view::create(input, stream); auto device_replacement = cudf::column_device_view::create(replacement, stream); rmm::device_buffer valid_bits = cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); // Call first pass kernel to get sizes in offsets cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1}; replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>( *device_in, *device_replacement, reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()), sizes_view.begin<cudf::size_type>(), nullptr, valid_count); std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column( sizes_view.begin<int32_t>(), sizes_view.end<int32_t>(), stream, mr); auto offsets_view = offsets->mutable_view(); auto const bytes = cudf::detail::get_value<int32_t>(offsets_view, offsets_view.size() - 1, stream); // Allocate chars array and output null mask std::unique_ptr<cudf::column> output_chars = cudf::strings::detail::create_chars_child_column(bytes, stream, mr); auto output_chars_view = output_chars->mutable_view(); replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>( *device_in, *device_replacement, reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()), offsets_view.begin<cudf::size_type>(), output_chars_view.data<char>(), valid_count); return cudf::make_strings_column(input.size(), std::move(offsets), std::move(output_chars), input.size() - valid_counter.value(stream), std::move(valid_bits)); } template <> std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::dictionary32>( cudf::column_view const& input, cudf::column_view const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { cudf::dictionary_column_view dict_input(input); cudf::dictionary_column_view dict_repl(replacement); return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr); } template <typename T> struct replace_nulls_functor { T const* value_it; replace_nulls_functor(T const* _value_it) : value_it(_value_it) {} __device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; } }; /** * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate * `replace_nulls` with the appropriate data types. */ struct replace_nulls_scalar_kernel_forwarder { template <typename col_type, typename std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const& input, cudf::scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); std::unique_ptr<cudf::column> output = cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr); auto output_view = output->mutable_view(); using ScalarType = cudf::scalar_type_t<col_type>; auto& s1 = static_cast<ScalarType const&>(replacement); auto device_in = cudf::column_device_view::create(input); auto func = replace_nulls_functor<col_type>{s1.data()}; thrust::transform(rmm::exec_policy(stream), input.data<col_type>(), input.data<col_type>() + input.size(), cudf::detail::make_validity_iterator(*device_in), output_view.data<col_type>(), func); return output; } template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const&, cudf::scalar const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("No specialization exists for the given type."); } }; template <> std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::string_view>( cudf::column_view const& input, cudf::scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); cudf::strings_column_view input_s(input); const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement); return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr); } template <> std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::dictionary32>( cudf::column_view const& input, cudf::scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { cudf::dictionary_column_view dict_input(input); return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr); } /** * @brief Function used by replace_nulls policy */ std::unique_ptr<cudf::column> replace_nulls_policy_impl(cudf::column_view const& input, cudf::replace_policy const& replace_policy, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto device_in = cudf::column_device_view::create(input); auto index = thrust::make_counting_iterator<cudf::size_type>(0); auto valid_it = cudf::detail::make_validity_iterator(*device_in); auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it)); rmm::device_uvector<cudf::size_type> gather_map(input.size(), stream); auto gm_begin = thrust::make_zip_iterator( thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator())); auto func = cudf::detail::replace_policy_functor(); if (replace_policy == cudf::replace_policy::PRECEDING) { thrust::inclusive_scan( rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func); } else { auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size()); auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size()); thrust::inclusive_scan( rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func); } auto output = cudf::detail::gather(cudf::table_view({input}), gather_map, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, stream, mr); return std::move(output->release()[0]); } } // end anonymous namespace namespace cudf { namespace detail { std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input, cudf::column_view const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch"); if (input.is_empty()) { return cudf::empty_like(input); } if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); } return cudf::type_dispatcher<dispatch_storage_type>( input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr); } std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input, cudf::scalar const& replacement, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) { return cudf::empty_like(input); } if (!input.has_nulls() || !replacement.is_valid(stream)) { return std::make_unique<cudf::column>(input, stream, mr); } return cudf::type_dispatcher<dispatch_storage_type>( input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr); } std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input, cudf::replace_policy const& replace_policy, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) { return cudf::empty_like(input); } if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); } return replace_nulls_policy_impl(input, replace_policy, stream, mr); } } // namespace detail std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input, cudf::column_view const& replacement, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); } std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input, cudf::scalar const& replacement, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); } std::unique_ptr<cudf::column> replace_nulls(column_view const& input, replace_policy const& replace_policy, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return cudf::detail::replace_nulls(input, replace_policy, rmm::cuda_stream_default, mr); } } // namespace cudf
the_stack
#pragma once #include <gunrock/util/sort_device.cuh> #include <gunrock/util/select_device.cuh> #include <gunrock/util/reduce_device.cuh> #include <gunrock/util/binary_search.cuh> #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/louvain/louvain_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> namespace gunrock { namespace app { namespace louvain { /** * @brief Speciflying parameters for Louvain Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<uint64_t>( "max-passes", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 10, "Maximum number of passes to run the louvain algorithm.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<uint64_t>( "max-iters", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 10, "Maximum number of iterations to run for each pass.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "pass-th", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1e-4, "Modularity threshold to continue further passes.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "iter-th", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 1e-6, "Modularity threshold to continue further iterations within a pass.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "1st-th", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 1e-4, "Modularity threshold to continue further iterations in the first pass.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<double>( "neighborcomm-th", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, -1.0, "Threshold of number of vertex-community pairs changes to quick an " "iteration; " " value less than 0 will disable this feature", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "pass-stats", util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to show per-pass stats.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "iter-stats", util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to show per-iteration stats.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "unify-segments", util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, false, "Whether to use cub::RadixSort instead of cub::SegmentedRadixSort.", __FILE__, __LINE__)); return retval; } /** * @brief defination of Louvain iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct LouvainIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem ProblemT; typedef typename EnactorT::Problem::EdgePairT EdgePairT; typedef typename EnactorT::Problem::GraphT GraphT; typedef typename EnactorT::Problem::GraphT::CsrT CsrT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; LouvainIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of Louvain, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // Data alias the enactor works on auto &enactor = this->enactor[0]; auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; // auto &graph = data_slice.sub_graph[0]; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; auto &pass_num = enactor_stats.iteration; auto &w_v2 = data_slice.w_v2; auto &w_v2self = data_slice.w_v2self; auto &w_c2 = data_slice.w_c2; auto &current_communities = data_slice.current_communities; auto &next_communities = data_slice.next_communities; auto &community_sizes = data_slice.community_sizes; // auto &edge_comms0 = data_slice.edge_comms0; auto &edge_comms0 = data_slice.edge_pairs0; // auto &edge_comms1 = data_slice.edge_comms1; auto &edge_comms1 = data_slice.edge_pairs1; auto &edge_weights0 = data_slice.edge_weights0; auto &edge_weights1 = data_slice.edge_weights1; auto &seg_offsets0 = data_slice.seg_offsets0; auto &seg_offsets1 = data_slice.seg_offsets1; auto &gain_bases = data_slice.gain_bases; auto &max_gains = data_slice.max_gains; auto &cub_temp_space = data_slice.cub_temp_space; auto &num_neighbor_comms = data_slice.num_neighbor_comms; auto &edge_pairs0 = data_slice.edge_pairs0; auto &edge_pairs1 = data_slice.edge_pairs1; auto unify_segments = enactor.unify_segments; auto &num_new_comms = data_slice.num_new_comms; auto &num_new_edges = data_slice.num_new_edges; auto &iter_gain = data_slice.iter_gain; cudaStream_t stream = oprtr_parameters.stream; auto target = util::DEVICE; util::Array1D<SizeT, VertexT> *null_frontier = NULL; util::CpuTimer iter_timer, pass_timer; auto graph_ptr = data_slice.sub_graph; if (enactor_stats.iteration != 0) graph_ptr = &(data_slice.new_graphs[enactor_stats.iteration % 2]); auto &graph = graph_ptr[0]; auto &weights = graph.CsrT::edge_values; if (enactor.pass_stats) pass_timer.Start(); if (enactor.iter_stats) iter_timer.Start(); // Pass initialization GUARD_CU(w_v2.ForAll( [w_v2self, current_communities, community_sizes] __host__ __device__( ValueT * w_v2_, const SizeT &v) { w_v2_[v] = 0; w_v2self[v] = 0; current_communities[v] = v; community_sizes[v] = 1; }, graph.nodes, target, stream)); // Accumulate edge values auto accu_op = [w_v2, w_v2self, weights] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { auto old_weight = atomicAdd(w_v2 + src, weights[edge_id]); // printf("w_v2[%d] : %lf -> %lf\n", // src, old_weight, old_weight + weights[edge_id]); if (src == dest) atomicAdd(w_v2self + src, weights[edge_id]); return false; }; frontier.queue_length = graph.nodes; frontier.queue_reset = true; // oprtr_parameters.advance_mode = ""; GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>( graph.csr(), null_frontier, null_frontier, oprtr_parameters, accu_op)); GUARD_CU(w_c2.ForAll( [w_v2] __host__ __device__(ValueT * w_c, const VertexT &v) { w_c[v] = w_v2[v]; // w_c = w_v; // printf("w_v2[%d] = %lf\n", // v, w_v2[v]); }, graph.nodes, target, stream)); if (enactor.iter_stats) { iter_timer.Stop(); util::PrintMsg("Pass " + std::to_string(pass_num) + ", pre-iter, elapsed = " + std::to_string(iter_timer.ElapsedMillis())); } int iter_num = 0; ValueT pass_gain = 0; bool to_continue = true; SizeT pervious_num_neighbor_comms = 0; // Iterations while (to_continue) { if (enactor.iter_stats) iter_timer.Start(); if (unify_segments) { GUARD_CU(edge_pairs0.ForAll( [edge_weights0, weights, current_communities, graph] __host__ __device__(EdgePairT * e_pairs, const SizeT &e) { VertexT src, dest; graph.GetEdgeSrcDest(e, src, dest); e_pairs[e] = ProblemT::MakePair(src, current_communities[dest]); // edge_weights0[e] = weights[e]; }, graph.edges, target, stream)); GUARD_CU(util::cubSortPairs(cub_temp_space, edge_pairs0, edge_pairs1, weights, edge_weights1, graph.edges, 0, sizeof(EdgePairT) * 8, stream)); GUARD_CU(seg_offsets0.Set(0, graph.edges + 1, target, stream)); GUARD_CU(seg_offsets0.ForAll( [edge_pairs1, graph] __host__ __device__(SizeT * offsets, const SizeT &e) { bool to_keep = false; if (e == 0 || e == graph.edges) to_keep = true; else { EdgePairT pair = edge_pairs1[e]; EdgePairT pervious_pair = edge_pairs1[e - 1]; if (ProblemT::GetFirst(pair) != ProblemT::GetFirst(pervious_pair) || ProblemT::GetSecond(pair) != ProblemT::GetSecond(pervious_pair)) to_keep = true; } offsets[e] = (to_keep) ? e : util::PreDefinedValues<SizeT>::InvalidValue; }, graph.edges + 1, target, stream)); } else { GUARD_CU(edge_comms0.ForAll( [edge_weights0, weights, current_communities, graph] __host__ __device__(EdgePairT * e_comms, const SizeT &e) { e_comms[e] = current_communities[graph.GetEdgeDest(e)]; edge_weights0[e] = weights[e]; }, graph.edges, target, stream)); GUARD_CU(util::cubSegmentedSortPairs( cub_temp_space, edge_comms0, edge_comms1, edge_weights0, edge_weights1, graph.edges, graph.nodes, graph.CsrT::row_offsets, 0, std::ceil(std::log2(graph.nodes)), stream)); GUARD_CU(seg_offsets0.Set(0, graph.edges + 1, target, stream)); GUARD_CU(graph.CsrT::row_offsets.ForAll( [seg_offsets0] __host__ __device__(SizeT * offsets, const SizeT &v) { seg_offsets0[offsets[v]] = 1; }, graph.nodes + 1, target, stream)); GUARD_CU(seg_offsets0.ForAll( [edge_comms1] __host__ __device__(SizeT * offsets, const SizeT &e) { bool to_keep = false; if (offsets[e] == 1) to_keep = true; else if (e == 0) to_keep = true; else if (edge_comms1[e] != edge_comms1[e - 1]) to_keep = true; offsets[e] = (to_keep) ? e : util::PreDefinedValues<SizeT>::InvalidValue; }, graph.edges + 1, target, stream)); } // Filter in order GUARD_CU(util::cubSelectIf( cub_temp_space, seg_offsets0, seg_offsets1, num_neighbor_comms, graph.edges + 1, [] __host__ __device__(const SizeT &e) { return util::isValid(e); }, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed."); GUARD_CU(num_neighbor_comms.Move(util::DEVICE, util::HOST, 1, 0, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed."); auto n_neighbor_comms = num_neighbor_comms[0] - 1; // util::PrintMsg("num_neigbhor_comms = " + // std::to_string(n_neighbor_comms)); // GUARD_CU(oprtr::Set(neighbor_comm_offsets.GetPointer(util::DEVICE) // + (num_neighbor_comms[0] + 1), graph.edges, // graph.edges - num_neighbor_comms[0] - 1, // target, stream)); GUARD_CU(util::SegmentedReduce( cub_temp_space, edge_weights1, edge_weights0, n_neighbor_comms, seg_offsets1, [] __host__ __device__(const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, stream)); GUARD_CU(seg_offsets0.ForAll( [seg_offsets1, n_neighbor_comms, graph] __host__ __device__( SizeT * offsets, const VertexT &v) { if (v == graph.nodes) offsets[v] = n_neighbor_comms; else offsets[v] = util::BinarySearch_LeftMost( graph.GetNeighborListOffset(v), seg_offsets1, (SizeT)0, n_neighbor_comms + 1, false); //, //[] (const SizeT &a, const SizeT &b) //{ // return a < b; //}); // if (offsets[v] != graph.row_offsets[v]) // printf("offsets[%d] <- %d, row_offsets[v] = %d\n", // v, offsets[v], graph.row_offsets[v]); }, graph.nodes + 1, target, stream)); auto m2 = data_slice.m2; GUARD_CU(gain_bases.ForAll( [seg_offsets0, edge_weights0, edge_comms1, seg_offsets1, w_v2, current_communities, w_v2self, m2, w_c2, unify_segments, edge_pairs1] __host__ __device__(ValueT * bases, const VertexT &v) { SizeT start_pos = seg_offsets0[v]; SizeT end_pos = seg_offsets0[v + 1]; VertexT org_comm = current_communities[v]; ValueT w_v2c_org = 0; // printf("seg_range0[%d] = [%d, %d)\n", // v, start_pos, end_pos); for (SizeT pos = start_pos; pos < end_pos; pos++) { SizeT seg_start = seg_offsets1[pos]; VertexT comm = (unify_segments ? ProblemT::GetSecond(edge_pairs1[seg_start]) : edge_comms1[seg_start]); // printf("seg %d: v %d -> c %d, w_v2c = %f\n", // pos, v, comm, edge_weights0[pos]); if (org_comm == comm) { w_v2c_org = edge_weights0[pos]; break; } } ValueT w_v2_v = w_v2[v]; VertexT comm = current_communities[v]; bases[v] = w_v2self[v] - w_v2c_org - (w_v2_v - w_c2[comm]) * w_v2_v / m2; // printf("bases[%d] = %lf, w_v2[v] = %lf, comm = %d, " // "w_v2c_org = %lf, w_c2[comm] = %lf, m2 = %lf\n", // v, bases[v], w_v2_v, comm, w_v2c_org, w_c2[comm], m2); }, graph.nodes, target, stream)); GUARD_CU(max_gains.ForAll( [next_communities, current_communities] __host__ __device__( ValueT * gains, const VertexT &v) { gains[v] = 0; next_communities[v] = current_communities[v]; }, graph.nodes, target, stream)); GUARD_CU(edge_weights0.ForAll( [seg_offsets0, seg_offsets1, n_neighbor_comms, gain_bases, w_c2, w_v2, m2, current_communities, max_gains, next_communities, edge_comms1, graph, unify_segments, edge_pairs1] __host__ __device__(ValueT * w_v2c, const SizeT &pos) { VertexT v = util::BinarySearch_RightMost( pos, seg_offsets0, (SizeT)0, graph.nodes, false); //, //[] (const SizeT &a, const SizeT &b) //{ // return a < b; //}); // if (pos < seg_offsets0[v] && v > 0) // v--; // printf("seg %d: v = %d, seg_offsets0[v] = %d\n", // pos, v, seg_offsets0[v]); VertexT comm = unify_segments ? ProblemT::GetSecond(edge_pairs1[seg_offsets1[pos]]) : edge_comms1[seg_offsets1[pos]]; ValueT gain = 0; if (comm != current_communities[v]) { gain = gain_bases[v] + w_v2c[pos] - w_c2[comm] * w_v2[v] / m2; ValueT old_gain = atomicMax(max_gains + v, gain); // printf("seg %d: v %d -> c %d, gain = %lf, gain_bases = %lf, " // "w_v2c = %lf, w_c2 = %lf, w_v2 = %lf, old_gain = %lf\n", // pos, v, comm, gain, gain_bases[v], w_v2c[pos], // w_c2[comm], w_v2[v], old_gain); if (old_gain >= gain) gain = 0; // else // next_communities[v] = comm; } w_v2c[pos] = gain; }, n_neighbor_comms, target, stream)); GUARD_CU(edge_weights0.ForAll( [max_gains, next_communities, seg_offsets0, graph, seg_offsets1, edge_comms1, unify_segments, edge_pairs1] __host__ __device__(ValueT * gains, const SizeT &pos) { auto gain = gains[pos]; if (gain < 1e-8) return; VertexT v = util::BinarySearch_LeftMost(pos, seg_offsets0, (SizeT)0, graph.nodes, false); //, //[] (const SizeT &a, const SizeT &b) //{ // return a < b; //}); // if (pos < seg_offsets0[v] && v > 0) // v--; if (abs(max_gains[v] - gain) > 1e-8) return; next_communities[v] = unify_segments ? ProblemT::GetSecond(edge_pairs1[seg_offsets1[pos]]) : edge_comms1[seg_offsets1[pos]]; // if (next_communities[v] >= graph.nodes) // printf("Invalid comm: next_comm[%d] = %d, seg_offsets1[%d] = // %d\n", // v, next_communities[v], // pos, seg_offsets1[pos]); }, n_neighbor_comms, target, stream)); GUARD_CU(current_communities.ForAll( [next_communities, community_sizes, max_gains, w_v2, w_c2] __host__ __device__(VertexT * communities, const VertexT &v) { VertexT c_comm = communities[v]; VertexT n_comm = next_communities[v]; if (c_comm == v && community_sizes[v] + community_sizes[n_comm] <= 2) { if (n_comm > v && next_communities[n_comm] == v) { max_gains[v] = 0; return; } } if (c_comm == n_comm) { return; max_gains[v] = 0; } // printf("v %d : c %d -> c %d, gain = %lf\n", // v, c_comm, n_comm, max_gains[v]); atomicSub(community_sizes + c_comm, 1); atomicAdd(community_sizes + n_comm, 1); auto w_v2v = w_v2[v]; atomicAdd(w_c2 + n_comm, w_v2v); atomicAdd(w_c2 + c_comm, -1 * w_v2v); communities[v] = n_comm; }, graph.nodes, target, stream)); GUARD_CU(util::cubReduce( cub_temp_space, max_gains, iter_gain, graph.nodes, [] __host__ __device__(const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, stream)); // GUARD_CU(iter_gain.ForEach( // [] __host__ __device__ (const ValueT &gain) // { // printf("iter_gain = %f\n", gain); // }, 1, target, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed."); // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); GUARD_CU(iter_gain.Move(util::DEVICE, util::HOST, 1, 0, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed."); // GUARD_CU2(cudaDeviceSynchronize(), // "cudaDeviceSynchronize failed."); // printf("gain0 = %f\n", (double)(iter_gain[0])); iter_gain[0] *= 2; // printf("gain1 = %lf\n", iter_gain[0]); iter_gain[0] /= data_slice.m2; // printf("gain2 = %lf\n", iter_gain[0]); data_slice.q += iter_gain[0]; pass_gain += iter_gain[0]; if (enactor.iter_stats) { iter_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", iter " + std::to_string(iter_num) + ", q = " + std::to_string(data_slice.q) + ", iter_gain = " + std::to_string(iter_gain[0]) + ", pass_gain = " + std::to_string(pass_gain) + ", #neighbor_comms = " + std::to_string(n_neighbor_comms) + ", elapsed = " + std::to_string(iter_timer.ElapsedMillis())); } iter_num++; if ((pass_num != 0 && iter_gain[0] < enactor.iter_gain_threshold) || (pass_num == 0 && iter_gain[0] < enactor.first_threshold) || iter_num >= enactor.max_iters || (enactor.neighborcomm_threshold > 0 && iter_num != 1 && pass_num == 0 && n_neighbor_comms > (1 - enactor.neighborcomm_threshold) * pervious_num_neighbor_comms)) to_continue = false; pervious_num_neighbor_comms = n_neighbor_comms; } data_slice.pass_gain = pass_gain; if (enactor.iter_stats) iter_timer.Start(); // Graph contraction GUARD_CU(edge_comms0.ForEach( [] __host__ __device__(EdgePairT & comm) { comm = util::PreDefinedValues<EdgePairT>::InvalidValue; }, graph.nodes, target, stream)); GUARD_CU(edge_comms0.ForAll( [current_communities] __host__ __device__(EdgePairT * comms0, const SizeT &v) { VertexT comm = current_communities[v]; comms0[comm] = comm; // if (comm == 8278) // printf("Comm[%d] = %d\n", v, comm); }, graph.nodes, target, stream)); GUARD_CU(util::cubSelectIf(cub_temp_space, edge_comms0, edge_comms1, num_new_comms, graph.nodes, [] __host__ __device__(EdgePairT & comm) { // if (comm == 8278) // printf("Comm %d, valid = %s\n", comm, // util::isValid(comm) ? "True" : // "False"); return (util::isValid(comm)); }, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed"); GUARD_CU(num_new_comms.Move(target, util::HOST, 1, 0, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed"); auto n_new_comms = num_new_comms[0]; // util::PrintMsg("#new_comms = " + std::to_string(n_new_comms)); GUARD_CU(edge_comms0.ForAll( [edge_comms1, n_new_comms] __host__ __device__(EdgePairT * comms0, const SizeT &new_comm) { comms0[edge_comms1[new_comm]] = new_comm; // if (edge_comms1[new_comm] == 8278) // printf("Comms0[%d] = %d\n", // edge_comms1[new_comm], new_comm); }, n_new_comms, target, stream)); // GUARD_CU(edge_comms0.ForAll( // [] __host__ __device__ (VertexT *comms0, VertexT &v) // { // printf("edge_Comms0[8278] = %d\n", // comms0[8278]); // }, 1, target, stream)); GUARD_CU(current_communities.ForAll( [edge_comms0, n_new_comms] __host__ __device__(VertexT * comms, const VertexT &v) { VertexT comm = comms[v]; comms[v] = edge_comms0[comm]; // if (comms[v] >= n_new_comms) // printf("Invalid comm: %d -> %d, edge_comms0 = %d\n", // comm, comms[v], edge_comms0[comm]); }, graph.nodes, target, stream)); auto null_ptr = &current_communities; null_ptr = NULL; frontier.queue_length = graph.nodes; frontier.queue_reset = true; GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>( graph.csr(), null_ptr, null_ptr, oprtr_parameters, [edge_comms0, current_communities, edge_pairs0] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { VertexT src_comm = current_communities[src]; // src_comm = edge_comms0[src_comm]; VertexT dest_comm = current_communities[dest]; // dest_comm = edge_comms0[dest_comm]; edge_pairs0[edge_id] = (((EdgePairT)src_comm) << (sizeof(VertexT) * 8)) + (EdgePairT)dest_comm; return false; })); GUARD_CU(util::cubSortPairs(cub_temp_space, edge_pairs0, edge_pairs1, graph.CsrT::edge_values, edge_weights1, graph.edges, 0, sizeof(EdgePairT) * 8, stream)); GUARD_CU(seg_offsets0.ForEach( [] __host__ __device__(SizeT & offset) { offset = util::PreDefinedValues<SizeT>::InvalidValue; }, graph.edges + 1, target, stream)); GUARD_CU(seg_offsets0.ForAll( [graph, edge_pairs1] __host__ __device__(SizeT * offsets0, const SizeT &e) { if (e != 0 && e != graph.edges) { auto edge = edge_pairs1[e]; auto pervious_edge = edge_pairs1[e - 1]; if (edge == pervious_edge) return; } offsets0[e] = e; }, graph.edges + 1, target, stream)); GUARD_CU(util::cubSelectIf(cub_temp_space, seg_offsets0, seg_offsets1, num_new_edges, graph.edges + 1, [] __host__ __device__(SizeT & offset) { return (util::isValid(offset)); }, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed"); GUARD_CU(num_new_edges.Move(target, util::HOST, 1, 0, stream)); GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed"); auto n_new_edges = num_new_edges[0] - 1; // util::PrintMsg("#new_edges = " + std::to_string(n_new_edges)); // GraphT *new_graph = new GraphT; auto &new_graph = data_slice.new_graphs[(pass_num + 1) % 2]; if (n_new_comms > new_graph.CsrT::row_offsets.GetSize() - 1 || n_new_edges > new_graph.CsrT::column_indices.GetSize()) { GUARD_CU(new_graph.CsrT::Allocate(n_new_comms * 1.1, n_new_edges * 1.1, util::DEVICE)); } // else { new_graph.nodes = n_new_comms; new_graph.edges = n_new_edges; new_graph.CsrT::nodes = n_new_comms; new_graph.CsrT::edges = n_new_edges; //} GUARD_CU(util::SegmentedReduce( cub_temp_space, edge_weights1, new_graph.CsrT::edge_values, n_new_edges, seg_offsets1, [] __host__ __device__(const ValueT &a, const ValueT &b) { return a + b; }, (ValueT)0, stream)); GUARD_CU(new_graph.CsrT::column_indices.ForAll( [edge_pairs1, seg_offsets1, n_new_comms] __host__ __device__( VertexT * indices, const SizeT &e) { indices[e] = edge_pairs1[seg_offsets1[e]] & util::PreDefinedValues<VertexT>::AllOnes; // if (indices[e] >= n_new_comms) // printf("Invalid dest: %d, e = %d, pair = %ld\n", // indices[e], e, edge_pairs1[seg_offsets1[e]]); }, n_new_edges, target, stream)); auto &new_row_offsets = new_graph.CsrT::row_offsets; GUARD_CU(seg_offsets1.ForAll( [new_row_offsets, edge_pairs1, n_new_comms, n_new_edges] __host__ __device__(SizeT * offsets, const SizeT &new_e) { VertexT src = 0, pervious_src = 0; if (new_e != n_new_edges) src = edge_pairs1[offsets[new_e]] >> (sizeof(VertexT) * 8); else src = n_new_comms; if (new_e != 0) pervious_src = edge_pairs1[offsets[new_e - 1]] >> (sizeof(VertexT) * 8); if (src == pervious_src) return; for (VertexT new_v = (new_e == 0 ? 0 : (pervious_src + 1)); new_v <= src; new_v++) new_row_offsets[new_v] = new_e; }, n_new_edges + 1, target, stream)); GUARD_CU(new_row_offsets.ForAll( [] __host__ __device__(SizeT * offsets, const VertexT &v) { offsets[0] = 0; }, 1, target, stream)); if (enactor.iter_stats) { iter_timer.Stop(); util::PrintMsg("pass " + std::to_string(pass_num) + ", graph compaction, elapsed = " + std::to_string(iter_timer.ElapsedMillis())); } if (enactor.pass_stats) { pass_timer.Stop(); util::PrintMsg( "pass " + std::to_string(pass_num) + ", #v = " + std::to_string(graph.nodes) + " -> " + std::to_string(n_new_comms) + ", #e = " + std::to_string(graph.edges) + " -> " + std::to_string(n_new_edges) + ", #iter = " + std::to_string(iter_num) + ", q = " + std::to_string(data_slice.q) + ", pass_gain = " + std::to_string(pass_gain) + ", elapsed = " + std::to_string(pass_timer.ElapsedMillis())); } GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed"); // util::Array1D<SizeT, VertexT> &pass_comm = new util::Array1D<SizeT, // VertexT>; auto &pass_comm = data_slice.pass_communities[pass_num]; GUARD_CU(pass_comm.EnsureSize_(graph.nodes, util::HOST)); GUARD_CU2(cudaMemcpyAsync(pass_comm.GetPointer(util::HOST), current_communities.GetPointer(util::DEVICE), sizeof(VertexT) * graph.nodes, cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync failed."); data_slice.num_pass = pass_num; // pass_communities.push_back(pass_comm); GUARD_CU(new_graph.FromCsr(new_graph.csr(), target, stream, true, true)); // GUARD_CU(new_graph.csr().Move(target, util::HOST, stream)); if (enactor_stats.iteration != 0) { // util::PrintMsg("Release graph"); // GUARD_CU(data_slice.new_graph -> Release(target)); // delete data_slice.new_graph; } // data_slice.new_graph = new_graph; // GUARD_CU2(cudaStreamSynchronize(stream), // "cudaStreamSynchronize failed"); // GUARD_CU(new_graph -> csr().Display()); // util::PrintMsg("Pass finished"); return retval; } bool Stop_Condition(int gpu_num = 0) { int num_gpus = this->enactor->num_gpus; auto &enactor_slices = this->enactor->enactor_slices; for (int gpu = 0; gpu < num_gpus * num_gpus; gpu++) { auto &retval = enactor_slices[gpu].enactor_stats.retval; if (retval == cudaSuccess) continue; printf("(CUDA error %d @ GPU %d: %s\n", retval, gpu % num_gpus, cudaGetErrorString(retval)); fflush(stdout); return true; } auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor->enactor_slices[this->gpu_num * this->enactor->num_gpus]; ValueT pass_gain_threshold = this->enactor->problem->parameters.template Get<ValueT>("pass-th"); // printf("iter = %lld, pass_gain = %lf\n", // enactor_slice.enactor_stats.iteration, data_slice.pass_gain); if (enactor_slice.enactor_stats.iteration >= data_slice.max_iters) return true; if (enactor_slice.enactor_stats.iteration <= 1 || data_slice.pass_gain >= pass_gain_threshold) return false; return true; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; // auto iteration = enactor_slice.enactor_stats.iteration; // TODO: add problem specific data alias here, e.g.: // auto &distances = data_slice.distances; auto expand_op = [ // TODO: pass data used by the lambda, e.g.: // distances ] __host__ __device__(VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { // TODO: fill in the lambda to combine received and local data, e.g.: // ValueT in_val = value__associate_ins[in_pos]; // ValueT old_val = atomicMin(distances + key, in_val); // if (old_val <= in_val) // return false; return true; }; cudaError_t retval = LouvainIterationLoop::template ExpandIncomingBase< NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>(received_length, peer_, expand_op); return retval; } }; // end of LouvainIteration /** * @brief Louvain enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase< typename _Problem::GraphT, typename _Problem::GraphT::VertexT, // TODO: change to other label // types used for the operators, // e.g.: typename // _Problem::LabelT, typename _Problem::GraphT::ValueT, // TODO: change to other value // types used for inter GPU // communication, e.g.: typename // _Problem::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::GraphT GraphT; typedef typename GraphT::VertexT LabelT; // e.g. typedef typename Problem::LabelT LabelT; typedef typename GraphT::ValueT ValueT; // e.g. typedef typename Problem::ValueT ValueT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef LouvainIterationLoop<EnactorT> IterationT; Problem *problem; IterationT *iterations; VertexT max_passes; VertexT max_iters; bool pass_stats; bool iter_stats; bool unify_segments; ValueT pass_gain_threshold; ValueT iter_gain_threshold; ValueT first_threshold; ValueT neighborcomm_threshold; /** * @brief LouvainEnactor constructor */ Enactor() : BaseEnactor("Louvain"), problem(NULL) { this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief LouvainEnactor destructor */ virtual ~Enactor() { // Release(); } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * \addtogroup PublicInterface * @{ */ /** * @brief Initialize the problem. * @param[in] parameters Running parameters. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init( // util::Parameters &parameters, Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; max_passes = problem.parameters.template Get<VertexT>("max-passes"); max_iters = problem.parameters.template Get<VertexT>("max-iters"); pass_stats = problem.parameters.template Get<bool>("pass-stats"); iter_stats = problem.parameters.template Get<bool>("iter-stats"); pass_gain_threshold = problem.parameters.template Get<ValueT>("pass-th"); iter_gain_threshold = problem.parameters.template Get<ValueT>("iter-th"); first_threshold = problem.parameters.template Get<ValueT>("1st-th"); unify_segments = problem.parameters.template Get<bool>("unify-segments"); neighborcomm_threshold = problem.parameters.template Get<ValueT>("neighborcomm-th"); // Lazy initialization GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 0, NULL, target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges, this->queue_factors)); } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } cudaError_t Check_Queue_Size(int peer_) { // no need to check queue size for PR return cudaSuccess; } /** * @brief one run of Louvain, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop< // TODO: change to how many {VertexT, ValueT} data need to communicate // per element in the inter-GPU sub-frontiers 0, 0, IterationT>(thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Reset enactor * @param[in] src Source node to start primitive. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset( // TODO: add problem specific info, e.g.: // VertexT src, util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); // TODO: Initialize frontiers according to the algorithm, e.g.: for (int gpu = 0; gpu < this->num_gpus; gpu++) { // if ((this->num_gpus == 1) || // (gpu == this->problem->org_graph->GpT::partition_table[src])) // { // this -> thread_slices[gpu].init_size = 1; // for (int peer_ = 0; peer_ < this -> num_gpus; peer_++) // { // auto &frontier = this -> // enactor_slices[gpu * this -> num_gpus + peer_].frontier; // frontier.queue_length = (peer_ == 0) ? 1 : 0; // if (peer_ == 0) // { // GUARD_CU(frontier.V_Q() -> ForEach( // [src]__host__ __device__ (VertexT &v) // { // v = src; // }, 1, target, 0)); // } // } // } // // else { this->thread_slices[gpu].init_size = 1; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { this->enactor_slices[gpu * this->num_gpus + peer_] .frontier.queue_length = 1; } // } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief Enacts a Louvain computing on the specified graph. * @param[in] src Source node to start primitive. * \return cudaError_t error message(s), if any */ cudaError_t Enact() { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU Louvain Done.", this->flag & Debug); return retval; } /** @} */ }; } // namespace louvain } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
namespace Saiga { namespace CUDA { template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyUnCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto localStart = ti.thread_id * ElementSize; for (int i = 0; i < ElementSize; ++i) { l[i] = data[localStart + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { result[localStart + i] = l[i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void copyFullCoalesced(ArrayView<T> data, ArrayView<T> result) { const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; CUDA::ThreadInfo<BLOCK_SIZE> ti; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto warpStart = wId * elementsPerWarp; for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto globalOffset = warpStart + e; if (globalOffset < N) { auto d = data[globalOffset]; d += 42; result[globalOffset] = d; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryUnCoalesced(ArrayView<T> data, ArrayView<T> result) { __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop for (auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize) { T l[ElementSize]; auto matrixId = ti.thread_id; auto globalOffset = matrixId * ElementSize; auto localMatrixId = ti.local_thread_id; // id in shared buffer // linear copy for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = data[globalOffset + i]; } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // linear copy for (int i = 0; i < ElementSize; ++i) { result[globalOffset + i] = buffer[localMatrixId][i]; } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced(ArrayView<T> data, ArrayView<T> result) { CUDA::ThreadInfo<BLOCK_SIZE> ti; const int elementsPerWarp = ElementSize * SAIGA_WARP_SIZE; auto N = data.size(); auto Nelements = N / ElementSize; auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); // __shared__ double buffer[elementsPerBlock]; __shared__ T buffer[BLOCK_SIZE][ElementSize + 0]; // grid stride loop for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { // for(auto id = ti.thread_id * ElementSize; id < N; id += ti.num_warps){ // for(auto id = ti.thread_id; id < Nelements; id += ti.grid_size ){ T l[ElementSize]; auto localMatrixId = ti.local_thread_id; // id in shared buffer auto warpStart = ti.warp_id * elementsPerWarp; // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { buffer[localMatrix][localOffset] = data[globalOffset]; } } for (int i = 0; i < ElementSize; ++i) { l[i] = buffer[localMatrixId][i]; } // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } for (int i = 0; i < ElementSize; ++i) { buffer[localMatrixId][i] = l[i]; } // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / ElementSize; auto localOffset = e % ElementSize; auto globalOffset = warpStart + e; if (globalOffset < N) { result[globalOffset] = buffer[localMatrix][localOffset]; } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int2> __launch_bounds__(BLOCK_SIZE) __global__ static void sharedMemoryCoalesced2(ArrayView<T> data, ArrayView<T> result) { const int elementSize = sizeof(T) * ElementSize; const int fullVectorsPerElement = elementSize / sizeof(VectorType); #ifdef SAIGA_HAS_CONSTEXPR const int vectorsPerElement = CUDA::getBlockCount(elementSize, sizeof(VectorType)); static_assert(vectorsPerElement * sizeof(VectorType) == elementSize, "T cannot be loaded with VectorType"); #else const int vectorsPerElement = 1; #endif // const int vectorsPerWarp = fullVectorsPerElement * SAIGA_WARP_SIZE; const int tileSizeBytes = 64; const int tileSizeVectors = tileSizeBytes / sizeof(VectorType); const int fullVectorsPerTile = fullVectorsPerElement > tileSizeVectors ? tileSizeVectors : fullVectorsPerElement; const int vectorsPerTile = vectorsPerElement > tileSizeVectors ? tileSizeVectors : vectorsPerElement; // const int vectorsPerTile = N > 8 ? 8 : N; const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / fullVectorsPerTile; // const int fullTiles = fullVectorsPerElement == 0 ? fullVectorsPerElement : fullVectorsPerElement / // fullVectorsPerTile; const int tiles = 2; const int elementsPerTile = N / tiles; const int fullVectorsPerBlock = fullVectorsPerElement * BLOCK_SIZE; // __shared__ double buffer[elementsPerBlock]; __shared__ VectorType buffer[BLOCK_SIZE][vectorsPerTile]; // __shared__ T buffer[BLOCK_SIZE][N]; T l[ElementSize]; auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, SAIGA_WARP_SIZE); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); CUDA::ThreadInfo<BLOCK_SIZE> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { auto localMatrixId = ti.local_thread_id; // id in shared buffer // auto warpStart = ti.warp_id * vectorsPerWarp; auto blockStart = ti.block_id * fullVectorsPerBlock; auto warpOffset = ti.warp_lane * SAIGA_WARP_SIZE; // start matrix of this warp in block local shared memory #if 1 for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { buffer[localMatrix][localOffset] = global[globalIndex]; // printf("read %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } for (int i = 0; i < fullVectorsPerTile; ++i) { local[i + tileOffset] = buffer[localMatrixId][i]; } } #else // strided copy for (auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE) { auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; auto localOffset = e % N; buffer[localMatrix][localOffset] = data[warpStart + e]; } for (int i = 0; i < N; ++i) { l[i] = buffer[localMatrixId][i]; } #endif // add something so things don't get optimized away for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } // for(int t = 0 ; t < tiles ; ++t){ // auto tileOffset = t * elementsPerTile; // for(int i = 0; i < elementsPerTile; ++i){ // buffer[localMatrixId][i] = l[i + tileOffset]; // } // //strided copy // for(auto e = ti.lane_id; e < elementsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / elementsPerTile; // auto localOffset = e % elementsPerTile; // result[tileOffset+warpStart+e] = buffer[localMatrix][localOffset]; // } // } // for(int i = 0; i < N; ++i){ // buffer[localMatrixId][i] = l[i]; // } //// strided copy // for(auto e = ti.lane_id; e < elementsPerWarp; e += SAIGA_WARP_SIZE){ // auto localMatrix = ti.warp_lane * SAIGA_WARP_SIZE + e / N; // auto localOffset = e % N; // result[warpStart+e] = buffer[localMatrix][localOffset]; // } for (int t = 0; t < fullTiles; ++t) { auto tileOffset = t * fullVectorsPerTile; for (int i = 0; i < fullVectorsPerTile; ++i) { buffer[localMatrixId][i] = local[i + tileOffset]; } // strided copy for (auto e = ti.lane_id; e < fullVectorsPerTile * SAIGA_WARP_SIZE; e += SAIGA_WARP_SIZE) { auto localMatrix = warpOffset + e / fullVectorsPerTile; auto localOffset = e % fullVectorsPerTile; auto globalIndex = blockStart + localMatrix * fullVectorsPerElement + tileOffset + localOffset; if (globalIndex < NVectors) { globalResult[globalIndex] = buffer[localMatrix][localOffset]; // printf("write %d %d %d \n",ti.thread_id,localMatrix,globalIndex); } } } } } template <typename T, int ElementSize, unsigned int BLOCK_SIZE, typename VectorType = int4, int localWarpSize2 = -1> __launch_bounds__(BLOCK_SIZE) __global__ static void shuffleCopy(ArrayView<T> data, ArrayView<T> result) { const int localWarpSize = localWarpSize2 == -1 ? int(SAIGA_L2_CACHE_LINE_SIZE / sizeof(VectorType)) : localWarpSize2; const int vectorsPerElement = CUDA::getBlockCount(ElementSize * sizeof(T), sizeof(VectorType)); auto N = data.size(); auto Nelements = N / ElementSize; auto NVectors = N * sizeof(T) / sizeof(VectorType); auto requiredWarps = CUDA::getBlockCount(Nelements, localWarpSize); // const int localWarpSize = 2; CUDA::ThreadInfo<BLOCK_SIZE, localWarpSize> ti; // grid stride loop // for(auto id = ti.thread_id * ElementSize; id < data.size(); id += ti.grid_size * ElementSize){ for (auto wId = ti.warp_id; wId < requiredWarps; wId += ti.num_warps) { T l[ElementSize]; // auto matrixId = ti.thread_id; // auto globalOffset = matrixId * ElementSize; // auto localMatrixId = ti.local_thread_id; //id in shared buffer auto globalStart = wId * localWarpSize * vectorsPerElement; // printf("warp %d %d %d %d \n", wId,ti.lane_id,localWarpSize,Nelements); VectorType* global = reinterpret_cast<VectorType*>(data.data()); VectorType* globalResult = reinterpret_cast<VectorType*>(result.data()); VectorType* local = reinterpret_cast<VectorType*>(l); // loadShuffle<localWarpSize,sizeof(T)*ElementSize,VectorType>(data.data()+globalStart,local,ti.lane_id); loadShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(global, local, ti.lane_id, globalStart, NVectors); for (int i = 0; i < ElementSize; ++i) { l[i] += 42; } storeShuffle<localWarpSize, sizeof(T) * ElementSize, VectorType>(globalResult, local, ti.lane_id, globalStart, NVectors); } } /* __global__ static void strangeLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane == i) tmp = local[i]; } out[id] = tmp; } __global__ static void strangeUnrolled(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; //manually unrolled loop if(lane == 0) tmp = local[0]; if(lane == 1) tmp = local[1]; out[id] = tmp; } __global__ static void evenStrangerLoop(int* data, int* out, int N){ auto id = blockDim.x * blockIdx.x + threadIdx.x; auto lane = threadIdx.x % 2; if(id >= N) return; int local[2]; for(int i = 0 ; i < 2 ; ++i) local[i] = data[id * 2 + i]; for(int i = 0 ; i < 2 ; ++i) local[i] += 42; int tmp; for(int i = 0 ; i < 2 ; ++i){ if(lane >= i) tmp = local[i]; } out[id] = tmp; } */ // nvcc $CPPFLAGS -I ~/Master/libs/data/include/eigen3/ -ptx -lineinfo -src-in-ptx // -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr inverse_test.cu nvcc $CPPFLAGS -I // ~/Master/libs/data/include/eigen3/ -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11 // --expt-relaxed-constexpr inverse_test.cu template <typename ElementType, int ElementSize> void coalescedCopyTest2(int ElementCount) { std::cout << "Bytes per element = " << sizeof(ElementType) * ElementSize << std::endl; size_t readWrites = ElementSize * ElementCount * sizeof(ElementType) * 2; CUDA::PerformanceTestHelper test("Coalesced processing test. ElementSize: " + std::to_string(ElementSize) + " ElementCount: " + std::to_string(ElementCount), readWrites); thrust::host_vector<ElementType> data(ElementSize * ElementCount, 42); thrust::host_vector<ElementType> result(ElementSize * ElementCount + 1, -1); thrust::host_vector<ElementType> ref(ElementSize * ElementCount + 1, -1); for (int i = 0; i < int(data.size()); ++i) { data[i] = rand() % 10; ref[i] = data[i] + 42; } thrust::device_vector<ElementType> d_data(data); thrust::device_vector<ElementType> d_result(result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); copyUnCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("copyUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryUnCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("sharedMemoryUnCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("sharedMemoryCoalesced", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); sharedMemoryCoalesced2<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); // sharedMemoryCoalesced2<ElementType,ElementSize,BLOCK_SIZE> <<< // CUDA::getBlockCount(ElementCount,BLOCK_SIZE),BLOCK_SIZE >>>(d_data,d_result); } test.addMeassurement("sharedMemoryCoalesced2", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); copyFullCoalesced<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("copyFullCoalesced (no vector)", time); CUDA_SYNC_CHECK_ERROR(); } SAIGA_ASSERT(ref == d_result); { const int BLOCK_SIZE = 128; d_result = result; float time; { CUDA::ScopedTimer t(time); shuffleCopy<ElementType, ElementSize, BLOCK_SIZE> <<<CUDA::getBlockCount(ElementCount, BLOCK_SIZE), BLOCK_SIZE>>>(d_data, d_result); } test.addMeassurement("shuffleCopy", time); CUDA_SYNC_CHECK_ERROR(); } // for(int i = 0 ; i < ref.size() ; ++i){ // std::cout << ref[i] << " == " << d_result[i] << std::endl; // } SAIGA_ASSERT(ref == d_result); { result = result; float time; { CUDA::ScopedTimer t(time); cudaMemcpy(thrust::raw_pointer_cast(d_result.data()), thrust::raw_pointer_cast(d_data.data()), d_data.size() * sizeof(ElementType), cudaMemcpyDeviceToDevice); } test.addMeassurement("cudaMemcpy", time); CUDA_SYNC_CHECK_ERROR(); } return; } void coalescedCopyTest() { CUDA_SYNC_CHECK_ERROR(); // coalescedCopyTest2<int,4>(1); // coalescedCopyTest2<int,2>(1); // coalescedCopyTest2<int,16>(1); // coalescedCopyTest2<int,16>(3); // coalescedCopyTest2<int,16>(5); // coalescedCopyTest2<int,16>(1000 * 1000 + 1); // coalescedCopyTest2<int,16>(32); coalescedCopyTest2<int, 32>(1000 * 1000 + 1); coalescedCopyTest2<int, 64>(1000 * 1000 + 1); CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
the_stack
#pragma once #include "RGBDOdometryCuda.h" #include <Cuda/Common/UtilsCuda.h> #include <Cuda/Geometry/ImageCudaDevice.cuh> #include <Cuda/Container/ArrayCudaDevice.cuh> namespace open3d { namespace cuda { /** * Server end */ template<size_t N> __device__ bool RGBDOdometryCudaDevice<N>::ComputePixelwiseCorrespondence( int x_source, int y_source, size_t level, int &x_target, int &y_target, Vector3f &X_source_on_target, float &d_target) { bool mask = false; Vector2f p_warpedf; Vector2i p_warped; if(odometry_type_ == OdometryType::FRAME_TO_MODEL) { /** Check 1: Is the source vertex and normal valid */ mask = !source_vertex_[level].at(x_source, y_source).IsNaN(); mask = mask && !source_normal_[level].at(x_source, y_source).IsNaN(); /* printf("%d, [%f, %f, %f] [%f, %f, %f]\n", mask, curr_vertex(0), curr_vertex(1), curr_vertex(2), */ /* curr_normal(0), curr_normal(1), curr_normal(2)); */ if (!mask) return false; /** Check 2: Is the projected vertex valid in image */ X_source_on_target = transform_source_to_target_ * source_vertex_[level].at(x_source, y_source); p_warpedf = intrinsics_[level].ProjectPoint(X_source_on_target); p_warped = Vector2i(int(p_warpedf(0) + 0.5f), int(p_warpedf(1) + 0.5f)); /* printf("X_source_on_target: [%f, %f, %f], [%d, %d]\n", X_source_on_target(0), X_source_on_target(1), */ /* X_source_on_target(2), p_warped(0), p_warped(1)); */ mask = intrinsics_[level].IsPixelValid(p_warped); if (!mask) return false; /** Check 3: Whether transformed point has reasonable z value */ mask = IsValidDepth(X_source_on_target(2)); if(!mask) return false; //! We don't use d_target d_target = 0.0f; } else { /** Check 1: depth valid in source? **/ float d_source = source_depth_[level].at(x_source, y_source)(0); mask = IsValidDepth(d_source); if (!mask) return false; /** Check 2: reprojected point in image? **/ X_source_on_target = transform_source_to_target_ * intrinsics_[level].InverseProjectPixel( Vector2i(x_source, y_source), d_source); p_warpedf = intrinsics_[level].ProjectPoint(X_source_on_target); p_warped = Vector2i(int(p_warpedf(0) + 0.5f), int(p_warpedf(1) + 0.5f)); mask = intrinsics_[level].IsPixelValid(p_warped); if (!mask) return false; /** Check 3: depth valid in target? Occlusion? -> 1ms **/ d_target = target_depth_[level].at(p_warped(0), p_warped(1))(0); mask = IsValidDepth(d_target) && IsValidDepthDiff(d_target - X_source_on_target(2)); if (!mask) return false; } x_target = p_warped(0); y_target = p_warped(1); return true; } template<size_t N> __device__ bool RGBDOdometryCudaDevice<N>::ComputePixelwiseJacobianAndResidual( int x_source, int y_source, int x_target, int y_target, size_t level, const Vector3f &X_source_on_target, const float &d_target, Vector6f &jacobian_I, Vector6f &jacobian_D, float &residual_I, float &residual_D) { /********** Phase 2: Build linear system **********/ /** Checks passed, let's rock! -> 3ms, can be 2ms faster if we don't use * interpolation * \partial D(p_warped) \partial p_warped: [dx_D, dy_D] at p_warped, 1x2 * \partial I(p_warped) \partial p_warped: [dx_I, dy_I] at p_warped, 1x2 * \partial X.z \partial X: [0, 0, 1], 1x3 * \partial p_warped \partial X: [fx/Z, 0, -fx X/Z^2; * 0, fy/Z, -fy Y/Z^2] 2x3 * \partial X \partial \xi: [I | -[X]^] = [1 0 0 0 Z -Y; * 0 1 0 -Z 0 X; * 0 0 1 Y -X 0] 3x6 * J_I = (d I(p_warped) / d p_warped) (d p_warped / d X) (d X / d \xi) * J_D = (d D(p_warped) / d p_warped) (d p_warped / d X) (d X / d \xi) * - (d X.z / d X) (d X / d \xi) */ //! Intensity computations const float kSobelFactor = 0.125f; float dx_I = kSobelFactor * target_intensity_dx_[level].at( x_target, y_target)(0); float dy_I = kSobelFactor * target_intensity_dy_[level].at( x_target, y_target)(0); float fx = intrinsics_[level].fx_; float fy = intrinsics_[level].fy_; float inv_Z = 1.0f / X_source_on_target(2); float fx_on_Z = fx * inv_Z; float fy_on_Z = fy * inv_Z; float c0 = dx_I * fx_on_Z; float c1 = dy_I * fy_on_Z; float c2 = -(c0 * X_source_on_target(0) + c1 * X_source_on_target(1)) * inv_Z; jacobian_I(0) = sqrt_coeff_I_ * (-X_source_on_target(2) * c1 + X_source_on_target(1) * c2); jacobian_I(1) = sqrt_coeff_I_ * ( X_source_on_target(2) * c0 - X_source_on_target(0) * c2); jacobian_I(2) = sqrt_coeff_I_ * (-X_source_on_target(1) * c0 + X_source_on_target(0) * c1); jacobian_I(3) = sqrt_coeff_I_ * c0; jacobian_I(4) = sqrt_coeff_I_ * c1; jacobian_I(5) = sqrt_coeff_I_ * c2; residual_I = sqrt_coeff_I_ * ( target_intensity_[level].at(x_target, y_target)(0) - source_intensity_[level].at(x_source, y_source)(0)); /* printf("- (%d, %d), (%d, %d) -> " */ /* "(%f %f %f %f %f %f) - %f\n", */ /* x_source, y_source, x_target, y_target, */ /* jacobian_I(0), jacobian_I(1), jacobian_I(2), */ /* jacobian_I(3), jacobian_I(4), jacobian_I(5), residual_I); */ //! Depth computations if(odometry_type_ == OdometryType::FRAME_TO_FRAME) { float dx_D = kSobelFactor * target_depth_dx_[level].at( x_target, y_target)(0); float dy_D = kSobelFactor * target_depth_dy_[level].at( x_target, y_target)(0); if (isnan(dx_D)) dx_D = 0; if (isnan(dy_D)) dy_D = 0; float d0 = dx_D * fx_on_Z; float d1 = dy_D * fy_on_Z; float d2 = -(d0 * X_source_on_target(0) + d1 * X_source_on_target(1)) * inv_Z; jacobian_D(0) = sqrt_coeff_D_ * ((-X_source_on_target(2) * d1 + X_source_on_target(1) * d2) - X_source_on_target(1)); jacobian_D(1) = sqrt_coeff_D_ * ((X_source_on_target(2) * d0 - X_source_on_target(0) * d2) + X_source_on_target(0)); jacobian_D(2) = sqrt_coeff_D_ * (-X_source_on_target(1) * d0 + X_source_on_target(0) * d1); jacobian_D(3) = sqrt_coeff_D_ * d0; jacobian_D(4) = sqrt_coeff_D_ * d1; jacobian_D(5) = sqrt_coeff_D_ * (d2 - 1.0f); residual_D = sqrt_coeff_D_ * (d_target - X_source_on_target(2)); } else { Vector3f normal_t = target_normal_[level].at(x_target, y_target); Vector3f vertex_t = target_vertex_[level].at(x_target, y_target); if(vertex_t.IsNaN() || normal_t.IsNaN()) return false; bool mask = IsValidDepth(vertex_t(2)); if(!mask) return false; jacobian_D(0) = sqrt_coeff_D_ * (-X_source_on_target(2) * normal_t(1) + X_source_on_target(1) * normal_t(2)); jacobian_D(1) = sqrt_coeff_D_ * ( X_source_on_target(2) * normal_t(0) - X_source_on_target(0) * normal_t(2)); jacobian_D(2) = sqrt_coeff_D_ * (-X_source_on_target(1) * normal_t(0) + X_source_on_target(0) * normal_t(1)); jacobian_D(3) = sqrt_coeff_D_ * normal_t(0); jacobian_D(4) = sqrt_coeff_D_ * normal_t(1); jacobian_D(5) = sqrt_coeff_D_ * normal_t(2); residual_D = sqrt_coeff_D_ * (X_source_on_target - vertex_t).dot(normal_t); } return true; } template<size_t N> __device__ bool RGBDOdometryCudaDevice<N>:: ComputePixelwiseCorrespondenceAndInformationJacobian( int x_source, int y_source, Vector6f &jacobian_x, Vector6f &jacobian_y, Vector6f &jacobian_z) { bool mask = false; Vector3f X_target; if(odometry_type_ == OdometryType::FRAME_TO_MODEL) { mask = IsValidDepth(source_vertex_[0].at(x_source, y_source)(2)); if (!mask) return false; Vector3f X_source_on_target = transform_source_to_target_ * source_vertex_[0].at(x_source, y_source); Vector2f p_warpedf = intrinsics_[0].ProjectPoint(X_source_on_target); Vector2i p_warped(int(p_warpedf(0) + 0.5f), int(p_warpedf(1) + 0.5f)); mask = intrinsics_[0].IsPixelValid(p_warpedf); if (!mask) return false; X_target = target_vertex_[0].at(p_warped(0), p_warped(1)); } else { /** Check 1: depth valid in source? **/ float d_source = source_depth_[0].at(x_source, y_source)(0); bool mask = IsValidDepth(d_source); if (!mask) return false; /** Check 2: reprojected point in image? **/ Vector3f X_source_on_target = transform_source_to_target_ * intrinsics_[0].InverseProjectPixel( Vector2i(x_source, y_source), d_source); Vector2f p_warpedf = intrinsics_[0].ProjectPoint(X_source_on_target); mask = intrinsics_[0].IsPixelValid(p_warpedf); if (!mask) return false; Vector2i p_warped(int(p_warpedf(0) + 0.5f), int(p_warpedf(1) + 0.5f)); /** Check 3: depth valid in target? Occlusion? -> 1ms **/ float d_target = target_depth_[0].at(p_warped(0), p_warped(1))(0); mask = IsValidDepth(d_target) && IsValidDepthDiff( d_target - X_source_on_target(2)); if (!mask) return false; X_target = intrinsics_[0].InverseProjectPixel(p_warped, d_target); } jacobian_x(0) = jacobian_x(4) = jacobian_x(5) = 0; jacobian_x(1) = X_target(2); jacobian_x(2) = -X_target(1); jacobian_x(3) = 1; jacobian_y(1) = jacobian_y(3) = jacobian_y(5) = 0; jacobian_y(0) = -X_target(2); jacobian_y(2) = X_target(0); jacobian_y(4) = 1.0f; jacobian_z(2) = jacobian_z(3) = jacobian_z(4) = 0; jacobian_z(0) = X_target(1); jacobian_z(1) = -X_target(0); jacobian_z(5) = 1.0f; return true; } } // cuda } // open3d
the_stack
//PTX code for IDCT (GPUJPEG_IDCT_GPU_KERNEL_INPLACE macro) should be a bit faster //but maybe won't work for newer CCs #define GPUJPEG_IDCT_USE_ASM 0 /** Fast integer multiplication */ #define FMUL(x,y) (__mul24(x,y)) //#define FMUL(x,y) ((x)*(y)) // X block count which will be processed by one thread block #define GPUJPEG_DCT_BLOCK_COUNT_X 4 // Y block count which will be processed by one thread block #define GPUJPEG_DCT_BLOCK_COUNT_Y 4 // Thread block width #define GPUJPEG_DCT_THREAD_BLOCK_WIDTH (GPUJPEG_BLOCK_SIZE * GPUJPEG_DCT_BLOCK_COUNT_X) // Thread block height #define GPUJPEG_DCT_THREAD_BLOCK_HEIGHT (GPUJPEG_BLOCK_SIZE * GPUJPEG_DCT_BLOCK_COUNT_Y) // Stride of shared memory buffer (short kernel) #define GPUJPEG_DCT_THREAD_BLOCK_STRIDE (GPUJPEG_DCT_THREAD_BLOCK_WIDTH + 4) #define IMAD(a, b, c) ( ((a) * (b)) + (c) ) #define IMUL(a, b) ((a) * (b)) #define SIN_1_4 0x5A82 #define COS_1_4 0x5A82 #define SIN_1_8 0x30FC #define COS_1_8 0x7642 #define OSIN_1_16 0x063E #define OSIN_3_16 0x11C7 #define OSIN_5_16 0x1A9B #define OSIN_7_16 0x1F63 #define OCOS_1_16 0x1F63 #define OCOS_3_16 0x1A9B #define OCOS_5_16 0x11C7 #define OCOS_7_16 0x063E /** * Package of 2 shorts into 1 int - designed to perform i/o by integers to avoid bank conflicts */ union PackedInteger { struct __align__(8) { int16_t hShort1; int16_t hShort2; }; int32_t hInt; }; /** * Converts fixed point value to short value */ __device__ inline int16_t unfixh(int x) { return (int16_t)((x + 0x8000) >> 16); } /** * Converts fixed point value to short value */ __device__ inline int unfixo(int x) { return (x + 0x1000) >> 13; } /** * 1D 8point DCT, with optional level shift (must be premultiplied). * Based on based on Arai, Agui, and Nakajima's DCT algorithm. (Trans. IEICE E-71(11):1095) * Implementation inspired by Independent JPEG Group JPEG implementation, file jfdctflt.c, * but optimized for CUDA (cheap floating point MAD instructions). */ template <typename T> __device__ static inline void gpujpeg_dct_gpu(const T in0, const T in1, const T in2, const T in3, const T in4, const T in5, const T in6, const T in7, T & out0, T & out1, T & out2, T & out3, T & out4, T & out5, T & out6, T & out7, const float level_shift_8 = 0.0f) { const float diff0 = in0 + in7; const float diff1 = in1 + in6; const float diff2 = in2 + in5; const float diff3 = in3 + in4; const float diff4 = in3 - in4; const float diff5 = in2 - in5; const float diff6 = in1 - in6; const float diff7 = in0 - in7; const float even0 = diff0 + diff3; const float even1 = diff1 + diff2; const float even2 = diff1 - diff2; const float even3 = diff0 - diff3; const float even_diff = even2 + even3; const float odd0 = diff4 + diff5; const float odd1 = diff5 + diff6; const float odd2 = diff6 + diff7; const float odd_diff5 = (odd0 - odd2) * 0.382683433f; const float odd_diff4 = 1.306562965f * odd2 + odd_diff5; const float odd_diff3 = diff7 - odd1 * 0.707106781f; const float odd_diff2 = 0.541196100f * odd0 + odd_diff5; const float odd_diff1 = diff7 + odd1 * 0.707106781f; out0 = even0 + even1 + level_shift_8; out1 = odd_diff1 + odd_diff4; out2 = even3 + even_diff * 0.707106781f; out3 = odd_diff3 - odd_diff2; out4 = even0 - even1; out5 = odd_diff3 + odd_diff2; out6 = even3 - even_diff * 0.707106781f; out7 = odd_diff1 - odd_diff4; } /** Constant memory copy of transposed quantization table pre-divided with DCT output weights. */ __constant__ float gpujpeg_dct_gpu_quantization_table_const[64]; /** * Performs 8x8 block-wise Forward Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. Short implementation. * This kernel is designed to process image by blocks of blocks8x8 that * utilize maximum warps capacity, assuming that it is enough of 8 threads * per block8x8. * * @param source [IN] - Source coefficients * @param source_stride [IN] - Stride of source * @param output [OUT] - Source coefficients * @param output_stride [OUT] - Stride of source * @param quant_table [IN] - Quantization table, pre-divided with DCT output scales * @return None */ template <int WARP_COUNT> __global__ void gpujpeg_dct_gpu_kernel(int block_count_x, int block_count_y, uint8_t* source, const unsigned int source_stride, int16_t* output, int output_stride, const float * const quant_table) { // each warp processes 4 8x8 blocks (horizontally neighboring) const int block_idx_x = threadIdx.x >> 3; const int block_idx_y = threadIdx.y; // offset of threadblocks's blocks in the image (along both axes) const int block_offset_x = blockIdx.x * 4; const int block_offset_y = blockIdx.y * WARP_COUNT; // stop if thread's block is out of image const bool processing = block_offset_x + block_idx_x < block_count_x && block_offset_y + block_idx_y < block_count_y; if(!processing) { return; } // index of row/column processed by this thread within its 8x8 block const int dct_idx = threadIdx.x & 7; // data type of transformed coefficients typedef float dct_t; // dimensions of shared buffer (compile time constants) enum { // 4 8x8 blocks, padded to odd number of 4byte banks SHARED_STRIDE = ((32 * sizeof(dct_t)) | 4) / sizeof(dct_t), // number of shared buffer items needed for 1 warp SHARED_SIZE_WARP = SHARED_STRIDE * 8, // total number of items in shared buffer SHARED_SIZE_TOTAL = SHARED_SIZE_WARP * WARP_COUNT }; // buffer for transpositions of all blocks __shared__ dct_t s_transposition_all[SHARED_SIZE_TOTAL]; // pointer to begin of transposition buffer for thread's block dct_t * const s_transposition = s_transposition_all + block_idx_y * SHARED_SIZE_WARP + block_idx_x * 8; // input coefficients pointer (each thread loads 1 column of 8 coefficients from its 8x8 block) const int in_x = (block_offset_x + block_idx_x) * 8 + dct_idx; const int in_y = (block_offset_y + block_idx_y) * 8; const int in_offset = in_x + in_y * source_stride; const uint8_t * in = source + in_offset; // load all 8 coefficients of thread's column, but do NOT apply level shift now - will be applied as part of DCT dct_t src0 = *in; in += source_stride; dct_t src1 = *in; in += source_stride; dct_t src2 = *in; in += source_stride; dct_t src3 = *in; in += source_stride; dct_t src4 = *in; in += source_stride; dct_t src5 = *in; in += source_stride; dct_t src6 = *in; in += source_stride; dct_t src7 = *in; // destination pointer into shared transpose buffer (each thread saves one column) dct_t * const s_dest = s_transposition + dct_idx; // transform the column (vertically) and save it into the transpose buffer gpujpeg_dct_gpu(src0, src1, src2, src3, src4, src5, src6, src7, s_dest[SHARED_STRIDE * 0], s_dest[SHARED_STRIDE * 1], s_dest[SHARED_STRIDE * 2], s_dest[SHARED_STRIDE * 3], s_dest[SHARED_STRIDE * 4], s_dest[SHARED_STRIDE * 5], s_dest[SHARED_STRIDE * 6], s_dest[SHARED_STRIDE * 7], -1024.0f // = 8 * -128 ... level shift sum for all 8 coefficients ); // read coefficients back - each thread reads one row (no need to sync - only threads within same warp work on each block) // ... and transform the row horizontally volatile dct_t * s_src = s_transposition + SHARED_STRIDE * dct_idx; dct_t dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7; gpujpeg_dct_gpu(s_src[0], s_src[1], s_src[2], s_src[3], s_src[4], s_src[5], s_src[6], s_src[7], dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7); // apply quantization to the row of coefficients (quantization table is actually transposed in global memory for coalesced memory acceses) #if __CUDA_ARCH__ < 200 const float * const quantization_row = gpujpeg_dct_gpu_quantization_table_const + dct_idx; // Quantization table in constant memory for CCs < 2.0 #else const float * const quantization_row = quant_table + dct_idx; // Cached global memory reads for CCs >= 2.0 #endif const int out0 = rintf(dct0 * quantization_row[0 * 8]); const int out1 = rintf(dct1 * quantization_row[1 * 8]); const int out2 = rintf(dct2 * quantization_row[2 * 8]); const int out3 = rintf(dct3 * quantization_row[3 * 8]); const int out4 = rintf(dct4 * quantization_row[4 * 8]); const int out5 = rintf(dct5 * quantization_row[5 * 8]); const int out6 = rintf(dct6 * quantization_row[6 * 8]); const int out7 = rintf(dct7 * quantization_row[7 * 8]); // using single write, save output row packed into 16 bytes const int out_x = (block_offset_x + block_idx_x) * 64; // 64 coefficients per one transformed and quantized block const int out_y = (block_offset_y + block_idx_y) * output_stride; ((uint4*)(output + out_x + out_y))[dct_idx] = make_uint4( (out0 & 0xFFFF) + (out1 << 16), (out2 & 0xFFFF) + (out3 << 16), (out4 & 0xFFFF) + (out5 << 16), // ... & 0xFFFF keeps only lower 16 bits - useful for negative numbers, which have 1s in upper bits (out6 & 0xFFFF) + (out7 << 16) ); } /** Quantization table */ //TODO zmenit na float __constant__ uint16_t gpujpeg_idct_gpu_quantization_table[64]; #if !GPUJPEG_IDCT_USE_ASM /** * Performs in-place IDCT of vector of 8 elements (used to access rows * or columns in a vector). * With a use of a scheme presented in Jie Liang - Approximating the DCT * with the lifting scheme: systematic design and applications; online: * http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=910943 * * @param V8 [IN/OUT] - Pointer to the first element of vector * @return None */ __device__ void gpujpeg_idct_gpu_kernel_inplace(float* V8) { //costants which are used more than once const float koeficient[6] = {0.4142135623f, 0.3535533905f, 0.4619397662f, 0.1989123673f, 0.7071067811f, -2.0f}; V8[2] *= 0.5411961f; V8[4] *= 0.509795579f; V8[5] *= 0.601344887f; V8[1] = (V8[0] - V8[1]) * koeficient[1]; V8[0] = V8[0] * koeficient[4] - V8[1]; V8[3] = V8[2] * koeficient[1] + V8[3] * koeficient[2]; V8[2] = V8[3] * koeficient[0] - V8[2]; V8[6] = V8[5] * koeficient[2] + V8[6] * koeficient[0]; V8[5] = -0.6681786379f * V8[6] + V8[5]; V8[7] = V8[4] * koeficient[3] + V8[7] * 0.49039264f; V8[4] = V8[7] * koeficient[3] - V8[4]; //instead of float tmp = V8[1]; V8[1] = V8[2] + V8[1]; V8[2] = tmp - V8[2]; //we use this two operations (with a use of a multiply-add instruction) V8[1] = V8[2] + V8[1]; V8[2] = koeficient[5] * V8[2] + V8[1]; V8[4] = V8[5] + V8[4]; V8[5] = 2.0f * V8[5] - V8[4]; V8[7] = V8[6] + V8[7]; V8[6] = koeficient[5] * V8[6] + V8[7]; V8[0] = V8[3] + V8[0]; V8[3] = koeficient[5] * V8[3] + V8[0]; V8[5] = V8[6] * koeficient[0] + V8[5]; V8[6] = V8[5] * -koeficient[4] + V8[6]; V8[5] = V8[6] * koeficient[0] + V8[5]; V8[3] = V8[3] + V8[4]; V8[4] = koeficient[5] * V8[4] + V8[3]; V8[2] = V8[2] + V8[5]; V8[5] = koeficient[5] * V8[5] + V8[2]; V8[1] = V8[6] + V8[1]; V8[6] = koeficient[5] * V8[6] + V8[1]; V8[0] = V8[0] + V8[7]; V8[7] = koeficient[5] * V8[7] + V8[0]; } #else #if __CUDA_ARCH__ >= 200 #define MULTIPLY_ADD "fma.rn.f32 " #else #define MULTIPLY_ADD "mad.f32 " #endif //instead of float tmp = V8[1]; V8[1] = V8[2] + V8[1]; V8[2] = tmp - V8[2]; //we use this two operations (with a use of a multiply-add instruction) #define ASM_X_PLUS_Y_SIMULTANEOUSLY_WITH_X_MINUS_Y(x, y) \ "add.f32 " #x ", " #x ", " #y "; \n\t" \ MULTIPLY_ADD #y ", " #y ", 0fc0000000, " #x "; \n\t" /** * Performs in-place IDCT of 8 elements (rows or columns). A PTX implementation. * With a use of a scheme presented in Jie Liang - Approximating the DCT * with the lifting scheme: systematic design and applications; online: * http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=910943 */ #define GPUJPEG_IDCT_GPU_KERNEL_INPLACE(in0, in1, in2, in3, in4, in5, in6, in7, \ out0, out1, out2, out3, out4, out5, out6, out7) \ asm( \ /* negreg register used for negating variables (e.g. for */ \ /* a * b - c we neagte c into negreg and use multiply-add) */ \ "{.reg .f32 negreg; \n\t" \ \ "mul.f32 %9, %9, 0fbeb504f3; \n\t" \ MULTIPLY_ADD "%9, %8, 0f3eb504f3, %9; \n\t" \ "neg.f32 negreg, %9; \n\t" \ MULTIPLY_ADD "%8, %8, 0f3f3504f3, negreg; \n\t" \ \ "mul.f32 %10, %10, 0f3f0a8bd4; \n\t" \ "mul.f32 %11, %11, 0f3eec835e; \n\t" \ MULTIPLY_ADD "%11, %10, 0f3eb504f3, %11; \n\t" \ "neg.f32 %10, %10; \n\t" \ MULTIPLY_ADD "%10, %11, 0f3ed413cd, %10; \n\t" \ \ "mul.f32 %13, %13, 0f3f19f1bd; \n\t" \ "mul.f32 %14, %14, 0f3ed4db31; \n\t" \ MULTIPLY_ADD "%14, %13, 0f3eec835e, %14; \n\t" \ MULTIPLY_ADD "%13, %14, 0fbf2b0dc7, %13; \n\t" \ \ "mul.f32 %12, %12, 0f3f0281f7; \n\t" \ "mul.f32 %15, %15, 0f3efb14be; \n\t" \ MULTIPLY_ADD "%15, %12, 0f3e4bafaf, %15; \n\t" \ "neg.f32 %12, %12; \n\t" \ MULTIPLY_ADD "%12, %15, 0f3e4bafaf, %12; \n\t" \ \ ASM_X_PLUS_Y_SIMULTANEOUSLY_WITH_X_MINUS_Y(%9, %10) \ \ ASM_X_PLUS_Y_SIMULTANEOUSLY_WITH_X_MINUS_Y(%12, %13) \ \ ASM_X_PLUS_Y_SIMULTANEOUSLY_WITH_X_MINUS_Y(%8, %11) \ \ ASM_X_PLUS_Y_SIMULTANEOUSLY_WITH_X_MINUS_Y(%15, %14) \ \ MULTIPLY_ADD "%13, %14, 0fbed413db, %13; \n\t" \ MULTIPLY_ADD "%14, %13, 0f3f3504f3, %14; \n\t" \ "neg.f32 negreg, %13; \n\t" \ MULTIPLY_ADD "%13, %14, 0f3ed413cd, negreg; \n\t" \ \ /* writing into output registers */ \ "add.f32 %3, %11, %12; \n\t" \ "sub.f32 %4, %11, %12; \n\t" \ \ "add.f32 %2, %10, %13; \n\t" \ "sub.f32 %5, %10, %13; \n\t" \ \ "add.f32 %1, %14, %9; \n\t" \ "sub.f32 %6, %9, %14; \n\t" \ \ "add.f32 %0, %8, %15; \n\t" \ "sub.f32 %7, %8, %15; \n\t" \ "}" \ \ : "=f"((out0)), \ "=f"((out1)), \ "=f"((out2)), \ "=f"((out3)), \ "=f"((out4)), \ "=f"((out5)), \ "=f"((out6)), \ "=f"((out7)) \ : "f"((in0)), \ "f"((in1)), \ "f"((in2)), \ "f"((in3)), \ "f"((in4)), \ "f"((in5)), \ "f"((in6)), \ "f"((in7)) \ ); #endif /** * Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. Float implementation. * This kernel is designed to process image by blocks of blocks8x8 that * utilize maximum warps capacity. Prepared for 8*8*2 threads in a block * * @param source [IN] - Source coefficients * @param output [OUT] - Result coefficients * @param output_stride [OUT] - Stride of result (image width) * @param quantization_table [IN] - Quantization table * @return None */ __global__ void gpujpeg_idct_gpu_kernel(int16_t* source, uint8_t* result, int output_stride, uint16_t* quantization_table) { //here the grid is assumed to be only in x - it saves a few operations; if a larger //block count is used (e. g. GPUJPEG_IDCT_BLOCK_Z == 1), it would need to be adjusted, //the blockIdx.x not to exceed 65535. In the current state this function is good //enough for a 67.1 MPix picture (8K is 33.1 MPix) //the first block of picture processed in this thread block unsigned int picBlockNumber = (blockIdx.x) * GPUJPEG_IDCT_BLOCK_Y * GPUJPEG_IDCT_BLOCK_X * GPUJPEG_IDCT_BLOCK_Z; //pointer to the begin of data for this thread block int16_t* sourcePtr = (int16_t*) (source) + picBlockNumber * 8; __shared__ float data[GPUJPEG_IDCT_BLOCK_Z][8][GPUJPEG_IDCT_BLOCK_Y][GPUJPEG_IDCT_BLOCK_X + 1]; //variables to be used later more times (only one multiplication here) unsigned int z64 = threadIdx.z * 64; unsigned int x8 = threadIdx.x * 8; //data copying global -> shared, type casting int16_t -> float and dequantization. //16b reading gives only 50% efectivity but another ways are too complicated //so this proves to be the fastest way #pragma unroll for (int i = 0; i < 8; i++) { data[threadIdx.z][i][threadIdx.x][threadIdx.y] = sourcePtr[x8 + threadIdx.y + i * GPUJPEG_IDCT_BLOCK_X * GPUJPEG_IDCT_BLOCK_Y + z64 * 8] * quantization_table[threadIdx.x * 8 + threadIdx.y]; } __syncthreads(); float x[8]; //kompilator delal hrozne psi kusy - zbytecne kopirovani konstant do //registru atp., bylo jednodussi napsat to v assembleru nez snazit se ho //presvedcit, aby nedelal blbosti; vsechny konstanty se pouzivaji primo //hodnotou, nestrkaji se zbytecne do registru //here the data are being processed by columns - each thread processes one column #if GPUJPEG_IDCT_USE_ASM GPUJPEG_IDCT_GPU_KERNEL_INPLACE(data[threadIdx.z][threadIdx.x][0][threadIdx.y], data[threadIdx.z][threadIdx.x][4][threadIdx.y], data[threadIdx.z][threadIdx.x][6][threadIdx.y], data[threadIdx.z][threadIdx.x][2][threadIdx.y], data[threadIdx.z][threadIdx.x][7][threadIdx.y], data[threadIdx.z][threadIdx.x][5][threadIdx.y], data[threadIdx.z][threadIdx.x][3][threadIdx.y], data[threadIdx.z][threadIdx.x][1][threadIdx.y], data[threadIdx.z][threadIdx.x][0][threadIdx.y], data[threadIdx.z][threadIdx.x][1][threadIdx.y], data[threadIdx.z][threadIdx.x][2][threadIdx.y], data[threadIdx.z][threadIdx.x][3][threadIdx.y], data[threadIdx.z][threadIdx.x][4][threadIdx.y], data[threadIdx.z][threadIdx.x][5][threadIdx.y], data[threadIdx.z][threadIdx.x][6][threadIdx.y], data[threadIdx.z][threadIdx.x][7][threadIdx.y]) #else x[0] = data[threadIdx.z][threadIdx.x][0][threadIdx.y]; x[1] = data[threadIdx.z][threadIdx.x][4][threadIdx.y]; x[2] = data[threadIdx.z][threadIdx.x][6][threadIdx.y]; x[3] = data[threadIdx.z][threadIdx.x][2][threadIdx.y]; x[4] = data[threadIdx.z][threadIdx.x][7][threadIdx.y]; x[5] = data[threadIdx.z][threadIdx.x][5][threadIdx.y]; x[6] = data[threadIdx.z][threadIdx.x][3][threadIdx.y]; x[7] = data[threadIdx.z][threadIdx.x][1][threadIdx.y]; gpujpeg_idct_gpu_kernel_inplace(x); data[threadIdx.z][threadIdx.x][0][threadIdx.y] = x[0]; data[threadIdx.z][threadIdx.x][1][threadIdx.y] = x[1]; data[threadIdx.z][threadIdx.x][2][threadIdx.y] = x[2]; data[threadIdx.z][threadIdx.x][3][threadIdx.y] = x[3]; data[threadIdx.z][threadIdx.x][4][threadIdx.y] = x[4]; data[threadIdx.z][threadIdx.x][5][threadIdx.y] = x[5]; data[threadIdx.z][threadIdx.x][6][threadIdx.y] = x[6]; data[threadIdx.z][threadIdx.x][7][threadIdx.y] = x[7]; #endif //between data writing and sync it's good to compute something useful // - the sync will be shorter. //output pointer (the begin for this thread block) unsigned int firstByteOfActualBlock = x8 + z64 + picBlockNumber; //output pointer for this thread + output row shift; each thread writes 1 row of an //output block (8B), threads [0 - 7] in threadIdx.x write blocks next to each other, //threads [1 - 7] in threadIdx.y write next rows of a block; threads [0 - 1] in //threadIdx.z write next 8 blocks uint8_t* resultPtr = ((uint8_t*) result) + firstByteOfActualBlock + (threadIdx.y + ((firstByteOfActualBlock / output_stride) * 7)) * output_stride; __syncthreads(); #if GPUJPEG_IDCT_USE_ASM //here the data are being processed by rows - each thread processes one row GPUJPEG_IDCT_GPU_KERNEL_INPLACE(data[threadIdx.z][threadIdx.x][threadIdx.y][0], data[threadIdx.z][threadIdx.x][threadIdx.y][4], data[threadIdx.z][threadIdx.x][threadIdx.y][6], data[threadIdx.z][threadIdx.x][threadIdx.y][2], data[threadIdx.z][threadIdx.x][threadIdx.y][7], data[threadIdx.z][threadIdx.x][threadIdx.y][5], data[threadIdx.z][threadIdx.x][threadIdx.y][3], data[threadIdx.z][threadIdx.x][threadIdx.y][1], x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]) #else x[0] = data[threadIdx.z][threadIdx.x][threadIdx.y][0]; x[1] = data[threadIdx.z][threadIdx.x][threadIdx.y][4]; x[2] = data[threadIdx.z][threadIdx.x][threadIdx.y][6]; x[3] = data[threadIdx.z][threadIdx.x][threadIdx.y][2]; x[4] = data[threadIdx.z][threadIdx.x][threadIdx.y][7]; x[5] = data[threadIdx.z][threadIdx.x][threadIdx.y][5]; x[6] = data[threadIdx.z][threadIdx.x][threadIdx.y][3]; x[7] = data[threadIdx.z][threadIdx.x][threadIdx.y][1]; gpujpeg_idct_gpu_kernel_inplace(x); #endif //output will be written by 8B (one row) which is the most effective way uint64_t tempResult; uint64_t* tempResultP = &tempResult; #pragma unroll for (int i = 0; i < 8; i++) { //this would be faster but will work only for 100% quality otherwise some values overflow 255 //((uint8_t*) tempResultP)[i] = __float2uint_rz(x[i] + ((float) 128.0)); //cast float to uint8_t with saturation (.sat) which cuts values higher than //255 to 255 and smaller than 0 to 0; cuda can't use a reg smaller than 32b //(though it can convert to 8b for the saturation purposes and save to 32b reg) uint32_t save; asm("cvt.rni.u8.f32.sat %0, %1;" : "=r"(save) : "f"(x[i] + ((float) 128.0))); ((uint8_t*) tempResultP)[i] = save; } //writing result - one row of a picture block by a thread *((uint64_t*) resultPtr) = tempResult; } /* Documented at declaration */ int gpujpeg_dct_gpu(struct gpujpeg_encoder* encoder) { // Get coder struct gpujpeg_coder* coder = &encoder->coder; // Encode each component for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) { // Get component struct gpujpeg_component* component = &coder->component[comp]; // Get quantization table enum gpujpeg_component_type type = (comp == 0) ? GPUJPEG_COMPONENT_LUMINANCE : GPUJPEG_COMPONENT_CHROMINANCE; const float* const d_quantization_table = encoder->table_quantization[type].d_table_forward; // copy the quantization table into constant memory for devices of CC < 2.0 if( encoder->coder.cuda_cc_major < 2 ) { cudaMemcpyToSymbolAsync( gpujpeg_dct_gpu_quantization_table_const, d_quantization_table, sizeof(gpujpeg_dct_gpu_quantization_table_const), 0, cudaMemcpyDeviceToDevice, encoder->stream ); gpujpeg_cuda_check_error("Quantization table memcpy failed", return -1); } int roi_width = component->data_width; int roi_height = component->data_height; assert(GPUJPEG_BLOCK_SIZE == 8); int block_count_x = roi_width / GPUJPEG_BLOCK_SIZE; int block_count_y = roi_height / GPUJPEG_BLOCK_SIZE; enum { WARP_COUNT = 4 }; // Perform block-wise DCT processing dim3 dct_grid( gpujpeg_div_and_round_up(block_count_x, 4), gpujpeg_div_and_round_up(block_count_y, WARP_COUNT), 1 ); dim3 dct_block(4 * 8, WARP_COUNT); gpujpeg_dct_gpu_kernel<WARP_COUNT><<<dct_grid, dct_block, 0, encoder->stream>>>( block_count_x, block_count_y, component->d_data, component->data_width, component->d_data_quantized, component->data_width * GPUJPEG_BLOCK_SIZE, d_quantization_table ); gpujpeg_cuda_check_error("Quantization table memcpy failed", return -1); } return 0; } /* Documented at declaration */ int gpujpeg_idct_gpu(struct gpujpeg_decoder* decoder) { // Get coder struct gpujpeg_coder* coder = &decoder->coder; // Encode each component for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) { // Get component struct gpujpeg_component* component = &coder->component[comp]; int roi_width = component->data_width; int roi_height = component->data_height; assert(GPUJPEG_BLOCK_SIZE == 8); int block_count_x = roi_width / GPUJPEG_BLOCK_SIZE; int block_count_y = roi_height / GPUJPEG_BLOCK_SIZE; // Get quantization table uint16_t* d_quantization_table = decoder->table_quantization[decoder->comp_table_quantization_map[comp]].d_table; // Copy quantization table to constant memory cudaMemcpyToSymbolAsync( gpujpeg_idct_gpu_quantization_table, d_quantization_table, 64 * sizeof(uint16_t), 0, cudaMemcpyDeviceToDevice, decoder->stream ); gpujpeg_cuda_check_error("Copy IDCT quantization table to constant memory", return -1); dim3 dct_grid(gpujpeg_div_and_round_up(block_count_x * block_count_y, (GPUJPEG_IDCT_BLOCK_X * GPUJPEG_IDCT_BLOCK_Y * GPUJPEG_IDCT_BLOCK_Z) / GPUJPEG_BLOCK_SIZE), 1); dim3 dct_block(GPUJPEG_IDCT_BLOCK_X, GPUJPEG_IDCT_BLOCK_Y, GPUJPEG_IDCT_BLOCK_Z); gpujpeg_idct_gpu_kernel<<<dct_grid, dct_block, 0, decoder->stream>>>( component->d_data_quantized, component->d_data, component->data_width, d_quantization_table ); gpujpeg_cuda_check_error("Inverse Integer DCT failed", return -1); } return 0; }
the_stack
using namespace std; //##define CUDA_CHECK(condition)\ // // do { // cudaError_t error = condition; // if (error != cudaSuccess) { // // } // } #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; #define maxn 510 const double eps=1E-8; __device__ inline int sig(float d){ return(d>eps)-(d<-eps); } // struct Point{ // double x,y; Point(){} // Point(double x,double y):x(x),y(y){} // bool operator==(const Point&p)const{ // return sig(x-p.x)==0&&sig(y-p.y)==0; // } // }; __device__ inline int point_eq(const float2 a, const float2 b) { return (sig(a.x - b.x) == 0) && (sig(a.y - b.y)==0); } __device__ inline void point_swap(float2 *a, float2 *b) { float2 temp = *a; *a = *b; *b = temp; } __device__ inline void point_reverse(float2 *first, float2* last) { while ((first!=last)&&(first!=--last)) { point_swap (first,last); ++first; } } // void point_reverse(Point* first, Point* last) // { // while ((first!=last)&&(first!=--last)) { // point_swap (first,last); // ++first; // } // } __device__ inline float cross(float2 o,float2 a,float2 b){ //叉积 return(a.x-o.x)*(b.y-o.y)-(b.x-o.x)*(a.y-o.y); } __device__ inline float area(float2* ps,int n){ ps[n]=ps[0]; float res=0; for(int i=0;i<n;i++){ res+=ps[i].x*ps[i+1].y-ps[i].y*ps[i+1].x; } return res/2.0; } __device__ inline int lineCross(float2 a,float2 b,float2 c,float2 d,float2&p){ float s1,s2; s1=cross(a,b,c); s2=cross(a,b,d); if(sig(s1)==0&&sig(s2)==0) return 2; if(sig(s2-s1)==0) return 0; p.x=(c.x*s2-d.x*s1)/(s2-s1); p.y=(c.y*s2-d.y*s1)/(s2-s1); return 1; } //多边形切割 //用直线ab切割多边形p,切割后的在向量(a,b)的左侧,并原地保存切割结果 //如果退化为一个点,也会返回去,此时n为1 // __device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b){ // // TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work // printf("polygon_cut, offset\n"); // static float2 pp[maxn]; // int m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1]))) // lineCross(a,b,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // // while(n>1&&p[n-1]==p[0])n--; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // int x = blockIdx.x * blockDim.x + threadIdx.x; // // // corresponding to k // // int y = blockIdx.y * blockDim.y + threadIdx.y; // // int offset = x * 1 + y; // // printf("polygon_cut, offset\n"); // } __device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b, float2* pp){ // TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work // printf("polygon_cut, offset\n"); // static float2 pp[maxn]; int m=0;p[n]=p[0]; for(int i=0;i<n;i++){ if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i]; if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1]))) lineCross(a,b,p[i],p[i+1],pp[m++]); } n=0; for(int i=0;i<m;i++) if(!i||!(point_eq(pp[i], pp[i-1]))) p[n++]=pp[i]; // while(n>1&&p[n-1]==p[0])n--; while(n>1&&point_eq(p[n-1], p[0]))n--; // int x = blockIdx.x * blockDim.x + threadIdx.x; // // corresponding to k // int y = blockIdx.y * blockDim.y + threadIdx.y; // int offset = x * 1 + y; // printf("polygon_cut, offset\n"); } //---------------华丽的分隔线-----------------// //返回三角形oab和三角形ocd的有向交面积,o是原点// __device__ inline float intersectArea(float2 a,float2 b,float2 c,float2 d){ float2 o = make_float2(0,0); int s1=sig(cross(o,a,b)); int s2=sig(cross(o,c,d)); if(s1==0||s2==0)return 0.0;//退化,面积为0 // if(s1==-1) swap(a,b); // if(s2==-1) swap(c,d); // printf("before swap\n"); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); if(s1 == -1) point_swap(&a, &b); // printf("a.x %f, a.y %f\n", a.x, a.y); // printf("b.x %f, b.y %f\n", b.x, b.y); // printf("after swap\n"); if(s2 == -1) point_swap(&c, &d); float2 p[10]={o,a,b}; int n=3; // // manually implement polygon_cut(p, n, a, b) // float2 pp[maxn]; // // polygon_cut(p, n, o, c) // int m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(o,c,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(o,c,p[i]))!=sig(cross(o,c,p[i+1]))) // lineCross(o,c,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, c, d) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(c,d,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(c,d,p[i]))!=sig(cross(c,d,p[i+1]))) // lineCross(c,d,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; // // polygon_cut(p, n, d, o) // m=0;p[n]=p[0]; // for(int i=0;i<n;i++){ // if(sig(cross(d,o,p[i]))>0) pp[m++]=p[i]; // if(sig(cross(d,o,p[i]))!=sig(cross(d,o,p[i+1]))) // lineCross(d,o,p[i],p[i+1],pp[m++]); // } // n=0; // for(int i=0;i<m;i++) // if(!i||!(point_eq(pp[i], pp[i-1]))) // p[n++]=pp[i]; // while(n>1&&point_eq(p[n-1], p[0]))n--; float2 pp[maxn]; polygon_cut(p,n,o,c,pp); polygon_cut(p,n,c,d,pp); polygon_cut(p,n,d,o,pp); float res=fabs(area(p,n)); int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; // printf("intersectArea2, offset: %d, %f, %f, %f, %f, %f, %f, %f, %f, res: %f\n", offset, a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y, res); if(s1*s2==-1) res=-res;return res; } //求两多边形的交面积 // TODO: here changed the input, this need to be debug __device__ inline float intersectArea(float2*ps1,int n1,float2*ps2,int n2){ int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = x * 1 + y; if(area(ps1,n1)<0) point_reverse(ps1,ps1+n1); if(area(ps2,n2)<0) point_reverse(ps2,ps2+n2); ps1[n1]=ps1[0]; ps2[n2]=ps2[0]; float res=0; for(int i=0;i<n1;i++){ for(int j=0;j<n2;j++){ // printf("offset: %d, %f, %f, %f, %f, %f, %f, %f, %f addArea: %f \n", // offset, ps1[i].x, ps1[i].y, ps1[i + 1].x, ps1[i + 1].y, ps2[j].x, ps2[j].y, // ps2[j + 1].x, ps2[j + 1].y, intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1])); // float2 a = ps1[i]; // float2 b = ps1[i+1]; // float2 c = ps2[j]; // float2 d = ps2[j+1]; // res+=intersectArea2(a,b,c,d); res+=intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]); } } return res;//assumeresispositive! } //__device__ inline double iou_poly(vector<double> p, vector<double> q) { // Point ps1[maxn],ps2[maxn]; // int n1 = 4; // int n2 = 4; // for (int i = 0; i < 4; i++) { // ps1[i].x = p[i * 2]; // ps1[i].y = p[i * 2 + 1]; // // ps2[i].x = q[i * 2]; // ps2[i].y = q[i * 2 + 1]; // } // double inter_area = intersectArea(ps1, n1, ps2, n2); // double union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area; // double iou = inter_area / union_area; // //// cout << "inter_area:" << inter_area << endl; //// cout << "union_area:" << union_area << endl; //// cout << "iou:" << iou << endl; // // return iou; //} __device__ inline void RotBox2Poly(float const * const dbox, float2 * ps) { float cs = cos(dbox[4]); float ss = sin(dbox[4]); float w = dbox[2]; float h = dbox[3]; float x_ctr = dbox[0]; float y_ctr = dbox[1]; ps[0].x = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0); ps[1].x = x_ctr + cs * (w / 2.0) - ss * (h / 2.0); ps[2].x = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0); ps[3].x = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0); ps[0].y = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0); ps[1].y = y_ctr + ss * (w / 2.0) + cs * (h / 2.0); ps[2].y = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0); ps[3].y = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0); } __device__ inline float devPolyIoU(float const * const dbbox1, float const * const dbbox2) { float2 ps1[maxn], ps2[maxn]; int n1 = 4; int n2 = 4; RotBox2Poly(dbbox1, ps1); RotBox2Poly(dbbox2, ps2); // printf("ps1: %f, %f, %f, %f, %f, %f, %f, %f\n", ps1[0].x, ps1[0].y, ps1[1].x, ps1[1].y, ps1[2].x, ps1[2].y, ps1[3].x, ps1[3].y); // printf("ps2: %f, %f, %f, %f, %f, %f, %f, %f\n", ps2[0].x, ps2[0].y, ps2[1].x, ps2[1].y, ps2[2].x, ps2[2].y, ps2[3].x, ps2[3].y); float inter_area = intersectArea(ps1, n1, ps2, n2); //printf("inter_area: %f \n", inter_area); float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area; //printf("before union_area\n"); //printf("union_area: %f \n", union_area); float iou = 0; if (union_area == 0) { iou = (inter_area + 1) / (union_area + 1); } else { iou = inter_area / union_area; } // printf("iou: %f \n", iou); return iou; } __global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes, const float * dev_query_boxes, float* dev_overlaps) { // const int col_start = blockIdx.y; // const int row_start = blockIdx.x; // corresponding to n int x = blockIdx.x * blockDim.x + threadIdx.x; // corresponding to k int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < N) && (y < K)) { int offset = x * K + y; //printf // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_boxes + x*5)[0], // (dev_boxes + x*5)[1], (dev_boxes + x*5)[2], (dev_boxes + x*5)[3], // (dev_boxes + x*5)[4] ); // printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_query_boxes + y*5)[0], // (dev_query_boxes + y*5)[1], (dev_query_boxes + y*5)[2], (dev_query_boxes + y*5)[3], // (dev_query_boxes + y*5)[4] ); dev_overlaps[offset] = devPolyIoU(dev_boxes + x * 5, dev_query_boxes + y * 5); } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) { _set_device(device_id); float* overlaps_dev = NULL; float* boxes_dev = NULL; float* query_boxes_dev = NULL; CUDA_CHECK(cudaMalloc(&boxes_dev, n * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes, n * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&query_boxes_dev, k * 5 * sizeof(float))); CUDA_CHECK(cudaMemcpy(query_boxes_dev, query_boxes, k * 5 * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&overlaps_dev, n * k * sizeof(float))); if (true){} dim3 blocks(DIVUP(n, 32), DIVUP(k, 32)); dim3 threads(32, 32); overlaps_kernel<<<blocks, threads>>>(n, k, boxes_dev, query_boxes_dev, overlaps_dev); CUDA_CHECK(cudaMemcpy(overlaps, overlaps_dev, n * k * sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaFree(overlaps_dev)); CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(query_boxes_dev)); }
the_stack
#include <nvbio/basic/numbers.h> #include <nvbio/basic/algorithms.h> #include <nvbio/basic/priority_queue.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/transform_iterator.h> #include <nvbio/basic/vector_view.h> #include <nvbio/basic/primitives.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> using namespace nvbio; // compute the coverage for each chain in a set __global__ void chain_coverage_kernel( const uint32 n_chains, // the number of chains const uint32* chain_reads, // the chain reads const uint32* chain_offsets, // the chain offsets const uint32* chain_lengths, // the chain lengths const mem_state::mem_type* mems, // the MEMs for this chunk of reads const uint32* mems_index, // a sorting index into the MEMs specifying their processing order uint2* chain_ranges, // the output chain ranges uint64* chain_weights) // the output chain weights { const uint32 chain_id = threadIdx.x + blockIdx.x * blockDim.x; if (chain_id >= n_chains) return; const uint32 read = chain_reads[ chain_id ]; const uint32 begin = chain_offsets[ chain_id ]; const uint32 end = chain_lengths[ chain_id ] + begin; uint2 range = make_uint2( uint32(-1), 0u ); uint32 weight = 0; // NOTE: we assume here the MEMs of a chain appear sorted by their left coordinate for (uint32 i = begin; i < end; ++i) { const mem_state::mem_type seed = mems[ mems_index[i] ]; const uint2 span = seed.span(); if (span.x >= range.y) weight += span.y - span.x; else if (span.y > range.y) weight += span.y - range.y; range.x = nvbio::min( range.x, seed.span().x ); range.y = nvbio::max( range.y, seed.span().y ); } // write out the outputs chain_ranges[ chain_id ] = range; chain_weights[ chain_id ] = uint64( weight ) | (uint64( read ) << 32); } // filter the chains belonging to each read __global__ void chain_filter_kernel( const read_chunk chunk, // the current sub-batch const uint32 n_chains, // the number of chains const uint32* chain_reads, // the chain reads const uint32* chain_index, // the chain order const uint2* chain_ranges, // the chain ranges const uint64* chain_weights, // the chain weights const float mask_level, // input option const float chain_drop_ratio, // input option const uint32 min_seed_len, // input option uint8* chain_flags) // the output flags { const uint32 read_id = threadIdx.x + blockIdx.x * blockDim.x + chunk.read_begin; if (read_id >= chunk.read_end) return; const uint32 begin = uint32( nvbio::lower_bound( read_id, chain_reads, n_chains ) - chain_reads ); const uint32 end = uint32( nvbio::upper_bound( read_id, chain_reads, n_chains ) - chain_reads ); // skip pathological cases if (begin == end) return; // keep the first chain chain_flags[ chain_index[begin] ] = 1u; // mark to keep // and loop through all the rest to decide which ones to keep uint32 n = 1; for (uint32 i = begin + 1; i < end; ++i) { const uint2 i_span = chain_ranges[ chain_index[i] ]; const uint32 i_w = chain_weights[ i ] & 0xFFFFFFFFu; // already sorted as chain_index uint32 j; for (j = begin; j < begin + n; ++j) { const uint2 j_span = chain_ranges[ chain_index[j] ]; const uint32 j_w = chain_weights[ j ] & 0xFFFFFFFFu; // already sorted as chain_index const uint32 max_begin = nvbio::max( i_span.x, j_span.x ); const uint32 min_end = nvbio::min( i_span.y, j_span.y ); if (min_end > max_begin) // have overlap { const uint32 min_l = nvbio::min( i_span.y - i_span.x, j_span.y - j_span.x ); if (min_end - max_begin >= min_l * mask_level) // significant overlap { chain_flags[ chain_index[i] ] = 1u; // mark to keep if (i_w < j_w * chain_drop_ratio && j_w - i_w >= min_seed_len * 2) break; } } } if (j == n) // no significant overlap with better chains, keep it. { chain_flags[ chain_index[i] ] = 1u; // mark to keep ++n; } } } // filter chains for the current pipeline::chunk of reads void filter_chains(pipeline_state *pipeline, const io::SequenceDataDevice *reads) { const ScopedTimer<float> timer( &pipeline->stats.chain_time ); // keep track of the time spent here struct chains_state<device_tag> *chn = &pipeline->chn; const uint32 n_reads = pipeline->chunk.read_end - pipeline->chunk.read_begin; const uint32 n_mems = pipeline->chunk.mem_end - pipeline->chunk.mem_begin; // skip pathological cases if (n_mems == 0u) return; // extract the list of unique chain ids together with their counts, i.e. the chain lengths nvbio::vector<device_tag,uint64> unique_chains( n_mems ); nvbio::vector<device_tag,uint32> unique_counts( n_mems ); nvbio::vector<device_tag,uint8> temp_storage; const uint32 n_chains = runlength_encode( n_mems, chn->mems_chain.begin(), // the input chain ids, one per seed unique_chains.begin(), // the output "unique" chain ids unique_counts.begin(), // the output repetition counts, i.e. the chain lengths temp_storage ); // some temp storage // resize the chain vectors if needed uint32 reserved_space = uint32( chn->chain_lengths.size() ); if (n_chains > reserved_space) { chn->chain_lengths.clear(); chn->chain_lengths.resize( n_chains ); chn->chain_offsets.clear(); chn->chain_offsets.resize( n_chains ); chn->chain_reads.clear(); chn->chain_reads.resize( n_chains ); reserved_space = n_chains; } // copy their lengths thrust::copy( unique_counts.begin(), unique_counts.begin() + n_chains, chn->chain_lengths.begin() ); // find the offset to the beginning of each chain thrust::lower_bound( chn->mems_chain.begin(), // the beginning of the sorted list of keys to search in chn->mems_chain.begin() + n_mems, // the end of the sorted list of keys to search in unique_chains.begin(), // the beginning of the sequence of values to search unique_chains.begin() + n_chains, // the end of the sequence of values to search chn->chain_offsets.begin() ); // the output sequence // extract the read-id frome the chain ids thrust::transform( unique_chains.begin(), // the beginning of the input sequence to transform unique_chains.begin() + n_chains, // the end of the input sequence to transform chn->chain_reads.begin(), // the beginning othe output sequence nvbio::hi_bits_functor<uint32,uint64>() ); // the functor to apply, in this case a 32-bit left shift // debug check: make sure the chain offsets are sorted if (is_sorted<device_tag>( n_chains, chn->chain_offsets.begin() ) == false) { log_error(stderr, "filter_chains: chain offsets are not sorted!\n"); exit(0); } // debug check: make sure the chains are sorted by read if (is_sorted<device_tag>( n_chains, chn->chain_reads.begin() ) == false) { log_error(stderr, "filter_chains: chains are not sorted by read!\n"); exit(0); } nvbio::vector<device_tag,uint2> chain_ranges( n_chains ); nvbio::vector<device_tag,uint64> chain_weights( n_chains ); nvbio::vector<device_tag,uint32> chain_index( reserved_space ); // potentially a little bigger because we'll reuse // it for the final filtering... optional_device_synchronize(); cuda::check_error("chain-coverage-init"); // compute chain coverages { const uint32 block_dim = 128; const uint32 n_blocks = util::divide_ri( n_chains, block_dim ); chain_coverage_kernel<<<n_blocks, block_dim>>>( n_chains, nvbio::plain_view( chn->chain_reads ), nvbio::plain_view( chn->chain_offsets ), nvbio::plain_view( chn->chain_lengths ), nvbio::plain_view( chn->mems ), nvbio::plain_view( chn->mems_index ), nvbio::plain_view( chain_ranges ), nvbio::plain_view( chain_weights ) ); optional_device_synchronize(); cuda::check_error("chain-coverage kernel"); } // sort the chains by weight thrust::copy( thrust::make_counting_iterator<uint32>(0u), thrust::make_counting_iterator<uint32>(0u) + n_chains, chain_index.begin() ); thrust::sort_by_key( // TODO: this is slow, switch to nvbio::cuda::SortEnactor chain_weights.begin(), chain_weights.begin() + n_chains, chain_index.begin() ); nvbio::vector<device_tag,uint8> chain_flags( n_chains ); thrust::fill( chain_flags.begin(), chain_flags.begin() + n_chains, 0u ); // filter chains: set the flags for the chains to be kept { const uint32 block_dim = 128; const uint32 n_blocks = util::divide_ri( n_reads, block_dim ); chain_filter_kernel<<<n_blocks, block_dim>>>( pipeline->chunk, n_chains, nvbio::plain_view( chn->chain_reads ), nvbio::plain_view( chain_index ), nvbio::plain_view( chain_ranges ), nvbio::plain_view( chain_weights ), command_line_options.mask_level, command_line_options.chain_drop_ratio, command_line_options.min_seed_len, nvbio::plain_view( chain_flags ) ); optional_device_synchronize(); cuda::check_error("chain-filter kernel"); } // filter chain_reads const uint32 n_filtered_chains = copy_flagged( n_chains, // the number of input elements chn->chain_reads.begin(), // the input sequence of flagged elements to copy chain_flags.begin(), // the input sequence of flags chain_index.begin(), // the output sequence of copied elements temp_storage ); // some temporary storage chn->chain_reads.swap( chain_index ); // debug check: make sure the chains are sorted by read if (is_sorted<device_tag>( n_filtered_chains, chn->chain_reads.begin() ) == false) { log_error(stderr, "filter_chains: filtered chains are not sorted by read!\n"); exit(0); } // filter chain_offsets cuda::copy_flagged( n_chains, // the number of input elements chn->chain_offsets.begin(), // the input sequence of flagged elements to copy chain_flags.begin(), // the input sequence of flags chain_index.begin(), // the output sequence of copied elements temp_storage ); // some temporary storage chn->chain_offsets.swap( chain_index ); // filter chain_lengths cuda::copy_flagged( n_chains, // the number of input elements chn->chain_lengths.begin(), // the input sequence of flagged elements to copy chain_flags.begin(), // the input sequence of flags chain_index.begin(), // the output sequence of copied elements temp_storage ); // some temporary storage chn->chain_lengths.swap( chain_index ); // assign the output number of chains chn->n_chains = n_filtered_chains; // keep stats pipeline->stats.n_chains += n_filtered_chains; }
the_stack
#include <thrust/sort.h> #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <thrust/unique.h> #include <algorithm> #include <functional> #include <unordered_set> #include <vector> #include "core/cuda_helpers.h" #include "cub/cub.cuh" #include "io.h" namespace arboretum { namespace io { using namespace std; #define ITEMS 8 struct ValueIndexSegment { bool border; float value; unsigned count; }; class ValueSegmentIterator { public: // Required iterator traits typedef ValueSegmentIterator self_type; ///< My own type typedef ptrdiff_t difference_type; ///< Type to express the result of ///< subtracting one iterator from another typedef ValueIndexSegment value_type; ///< The type of the element the iterator can point to typedef value_type * pointer; ///< The type of a pointer to an element the iterator can point to typedef value_type reference; ///< The type of a reference to an element the ///< iterator can point to typedef typename thrust::detail::iterator_facade_category< thrust::any_system_tag, thrust::random_access_traversal_tag, value_type, reference>::type iterator_category; ///< The iterator category private: const unsigned segment_size; const float *itr; difference_type offset; public: /// Constructor __host__ __device__ __forceinline__ ValueSegmentIterator( const unsigned segment_size, const float *itr, ///< Input iterator to wrap difference_type offset = 0) ///< OffsetT (in items) from \p itr denoting ///< the position of the iterator : segment_size(segment_size), itr(itr), offset(offset) {} /// Postfix increment __host__ __device__ __forceinline__ self_type operator++(int) { self_type retval = *this; offset++; return retval; } /// Prefix increment __host__ __device__ __forceinline__ self_type operator++() { offset++; return *this; } /// Indirection __host__ __device__ __forceinline__ reference operator*() const { difference_type prev_offset = offset > 0 ? offset - 1 : 0; value_type retval; retval.value = (itr[offset] + itr[prev_offset]) * 0.5; retval.border = (itr[offset] != itr[prev_offset]); retval.count = retval.border; return retval; } /// Addition template <typename Distance> __host__ __device__ __forceinline__ self_type operator+(Distance n) const { self_type retval(segment_size, itr, offset + n); return retval; } /// Addition assignment template <typename Distance> __host__ __device__ __forceinline__ self_type &operator+=(Distance n) { offset += n; return *this; } /// Subtraction template <typename Distance> __host__ __device__ __forceinline__ self_type operator-(Distance n) const { self_type retval(itr, offset - n); return retval; } /// Subtraction assignment template <typename Distance> __host__ __device__ __forceinline__ self_type &operator-=(Distance n) { offset -= n; return *this; } /// Distance __host__ __device__ __forceinline__ difference_type operator-(self_type other) const { return offset - other.offset; } /// Array subscript template <typename Distance> __host__ __device__ __forceinline__ reference operator[](Distance n) const { self_type offset = (*this) + n; return *offset; } /// Equal to __host__ __device__ __forceinline__ bool operator==(const self_type &rhs) { return ((itr == rhs.itr) && (offset == rhs.offset)); } /// Not equal to __host__ __device__ __forceinline__ bool operator!=(const self_type &rhs) { return ((itr != rhs.itr) || (offset != rhs.offset)); } /// Normalize __host__ __device__ __forceinline__ void normalize() { itr += offset; offset = 0; } /// ostream operator friend std::ostream &operator<<(std::ostream &os, const self_type & /*itr*/) { return os; } }; struct SegmentLength { __device__ __forceinline__ ValueIndexSegment operator()(const ValueIndexSegment &a, const ValueIndexSegment &b) const { ValueIndexSegment ret; ret.value = b.value; ret.count = a.count + b.count; ret.border = b.border; return ret; } }; class ThresholdOutputIterator { public: // Required iterator traits typedef ThresholdOutputIterator self_type; ///< My own type typedef ptrdiff_t difference_type; ///< Type to express the result of ///< subtracting one iterator from another typedef void value_type; ///< The type of the element the iterator can point to typedef void pointer; ///< The type of a pointer to an element the iterator can point to typedef void reference; ///< The type of a reference to an element the ///< iterator can point to typedef typename thrust::detail::iterator_facade_category< thrust::any_system_tag, thrust::random_access_traversal_tag, value_type, reference>::type iterator_category; ///< The iterator category private: float *thresholds; unsigned size; difference_type offset; public: /// Constructor __host__ __device__ __forceinline__ ThresholdOutputIterator(float *thresholds, unsigned size, difference_type offset = 0) ///< Base offset : thresholds(thresholds), size(size), offset(offset) {} /// Postfix increment __host__ __device__ __forceinline__ self_type operator++(int) { self_type retval = *this; offset++; return retval; } /// Prefix increment __host__ __device__ __forceinline__ self_type operator++() { offset++; return *this; } /// Indirection __host__ __device__ __forceinline__ self_type &operator*() { // return self reference, which can be assigned to anything return *this; } /// Addition template <typename Distance> __host__ __device__ __forceinline__ self_type operator+(Distance n) const { self_type retval(thresholds, size, offset + n); return retval; } /// Addition assignment template <typename Distance> __host__ __device__ __forceinline__ self_type &operator+=(Distance n) { offset += n; return *this; } /// Subtraction template <typename Distance> __host__ __device__ __forceinline__ self_type operator-(Distance n) const { self_type retval(thresholds, size, offset - n); return retval; } /// Subtraction assignment template <typename Distance> __host__ __device__ __forceinline__ self_type &operator-=(Distance n) { offset -= n; return *this; } /// Distance __host__ __device__ __forceinline__ difference_type operator-(self_type other) const { return offset - other.offset; } /// Array subscript template <typename Distance> __host__ __device__ __forceinline__ self_type &operator[](Distance n) { // return self reference, which can be assigned to anything self_type retval(thresholds, size, offset + n); return *retval; } /// Structure dereference __host__ __device__ __forceinline__ pointer operator->() { return; } /// Assignment to self (no-op) __host__ __device__ __forceinline__ void operator=(self_type const &other) { offset = other.offset; size = other.size; } /// Assignment to anything else (no-op) __device__ __forceinline__ void operator=(ValueIndexSegment const &value) { if (value.border) { int segment = int(min(value.count + 1, unsigned(offset / size + 1))) - 2; if (segment >= 0) { union fi { float f; int i; }; fi loc, test; loc.f = value.value; test.f = thresholds[segment]; while (loc.f < test.f) test.i = atomicCAS(((int *)thresholds) + segment, test.i, loc.i); } } } /// Cast to void* operator __host__ __device__ __forceinline__ operator void *() const { return NULL; } /// Equal to __host__ __device__ __forceinline__ bool operator==(const self_type &rhs) { return (offset == rhs.offset); } /// Not equal to __host__ __device__ __forceinline__ bool operator!=(const self_type &rhs) { return (offset != rhs.offset); } /// ostream operator friend std::ostream &operator<<(std::ostream &os, const self_type &itr) { os << "[" << itr.offset << "]"; return os; } }; template <typename T, int ITEMS_PER_THREAD> __global__ void build_histogram(T *bin, const float *threshold, const float *fvalue, const int hist_size, const int unique_size, const size_t n) { extern __shared__ float values[]; const int size = min(unique_size, hist_size); if (threadIdx.x < hist_size) { values[threadIdx.x] = INFINITY; if (threadIdx.x < size - 1) values[threadIdx.x] = threshold[threadIdx.x]; } __syncthreads(); #pragma unroll for (unsigned i = 0; i < ITEMS_PER_THREAD; ++i) { unsigned idx = blockDim.x * blockIdx.x * ITEMS_PER_THREAD + i * blockDim.x + threadIdx.x; if (idx < n) bin[idx] = lower_bound<float>(values, fvalue[idx], size); } } DataMatrix::DataMatrix(int rows, int columns, int columns_category) : rows(rows), columns(columns + columns_category), columns_dense(columns), columns_category(columns_category) { _init = false; data.resize(columns); data_category_device.resize(columns_category); data_reduced_u8.resize(columns); data_reduced_u16.resize(columns); data_reduced_u32.resize(columns); data_reduced_u8_device.resize(columns); data_reduced_u16_device.resize(columns); data_reduced_u32_device.resize(columns); reduced_size.resize(columns); category_size.resize(columns_category); data_reduced_mapping.resize(columns); data_categories.resize(columns_category); for (int i = 0; i < columns; ++i) { data[i].resize(rows); } for (int i = 0; i < columns_category; ++i) { data_categories[i].resize(rows); } } void DataMatrix::InitHist(int hist_size, bool verbose) { if (hist_size < (1 << 8)) this->InitHistInternal<unsigned char>(hist_size, verbose); else this->InitHistInternal<unsigned short>(hist_size, verbose); } template <typename T> void DataMatrix::InitHistInternal(int hist_size, bool verbose) { if (!_init) { thrust::host_vector<thrust::host_vector<float>> thresholds(columns_dense); thrust::device_vector<float> d_data(rows); thrust::device_vector<float> d_data_sorted(rows); thrust::device_vector<T> bin(rows); thrust::device_vector<float> d_threshold(hist_size); void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortKeys( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(d_data.data()), thrust::raw_pointer_cast(d_data_sorted.data()), rows); size_t temp_storage_bytes_scan = 0; OK(cub::DeviceScan::InclusiveScan( NULL, temp_storage_bytes_scan, ValueSegmentIterator(0, nullptr, 0), ThresholdOutputIterator(nullptr, 0), SegmentLength(), rows)); temp_storage_bytes = max(temp_storage_bytes_scan, temp_storage_bytes); cudaMalloc(&d_temp_storage, temp_storage_bytes); unsigned segment_size = rows / hist_size; segment_size = max(1, segment_size); for (size_t i = 0; i < columns_dense; ++i) { thrust::copy(data[i].begin(), data[i].end(), d_data.begin()); cub::DeviceRadixSort::SortKeys( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(d_data.data()), thrust::raw_pointer_cast(d_data_sorted.data()), rows); thrust::fill(d_threshold.begin(), d_threshold.end(), INFINITY); ValueSegmentIterator in(segment_size, thrust::raw_pointer_cast(d_data_sorted.data())); ThresholdOutputIterator out(thrust::raw_pointer_cast(d_threshold.data()), segment_size); SegmentLength op; OK(cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, in, out, op, rows)); data_reduced_mapping[i].resize(hist_size); thrust::copy(d_threshold.begin(), d_threshold.end(), data_reduced_mapping[i].begin()); int unique_size = 0; for (unique_size = 0; unique_size < data_reduced_mapping[i].size() && !std::isinf(data_reduced_mapping[i][unique_size]); unique_size++) { } unique_size++; int size = std::min(unique_size, hist_size); reduced_size[i] = 32 - __builtin_clz(size); int grid_size = (rows + 1024 * ITEMS - 1) / (1024 * ITEMS); build_histogram<T, ITEMS><<<grid_size, 1024, hist_size * sizeof(float)>>>( thrust::raw_pointer_cast(bin.data()), thrust::raw_pointer_cast(d_threshold.data()), thrust::raw_pointer_cast(d_data.data()), hist_size, unique_size, rows); OK(cudaDeviceSynchronize()); GetHostData<T>(i).resize(rows); thrust::copy(bin.begin(), bin.end(), GetHostData<T>(i).begin()); } OK(cudaFree(d_temp_storage)); for (size_t i = 0; i < columns_dense && verbose; ++i) { printf("feature %lu has been reduced to %u bits \n", i, reduced_size[i]); } max_reduced_size = max_feature_size = *std::max_element(reduced_size.begin(), reduced_size.end()); if (verbose) printf("max feature size %u \n", max_reduced_size); this->_init = true; } } void DataMatrix::InitExact(bool verbose) { if (!_init) { data_reduced_u32.resize(columns); #pragma omp parallel for for (size_t i = 0; i < columns_dense; ++i) { data_reduced_u32[i].resize(rows); std::unordered_set<float> s; for (float v : data[i]) s.insert(v); data_reduced_mapping[i].assign(s.begin(), s.end()); std::sort(data_reduced_mapping[i].begin(), data_reduced_mapping[i].end()); reduced_size[i] = 32 - __builtin_clz(data_reduced_mapping[i].size()); for (size_t j = 0; j < rows; ++j) { vector<float>::iterator indx = std::lower_bound(data_reduced_mapping[i].begin(), data_reduced_mapping[i].end(), data[i][j]); unsigned int idx = indx - data_reduced_mapping[i].begin(); data_reduced_u32[i][j] = idx; } } #pragma omp parallel for for (size_t i = 0; i < columns_category; ++i) { unsigned int m = *std::max_element(data_categories[i].begin(), data_categories[i].end()); category_size[i] = 32 - __builtin_clz(m); } for (size_t i = 0; i < columns_dense && verbose; ++i) { printf("feature %lu has been reduced to %u bits \n", i, reduced_size[i]); } max_reduced_size = *std::max_element(reduced_size.begin(), reduced_size.end()); if (verbose) printf("max feature size %u \n", max_reduced_size); if (columns_category == 0) max_feature_size = max_reduced_size; else { max_category_size = *std::max_element(category_size.begin(), category_size.end()); max_feature_size = std::max(max_reduced_size, max_category_size); } _init = true; } } void DataMatrix::UpdateGrad() {} void DataMatrix::TransferToGPU(size_t free, bool verbose) { if (this->max_feature_size <= sizeof(unsigned char) * CHAR_BIT) { this->TransferToGPUInternal<unsigned char>(free, verbose); } else if (this->max_feature_size <= sizeof(unsigned short) * CHAR_BIT) { this->TransferToGPUInternal<unsigned short>(free, verbose); } else { this->TransferToGPUInternal<unsigned int>(free, verbose); } } template <typename T> void DataMatrix::TransferToGPUInternal(size_t free, bool verbose) { size_t data_size = sizeof(T) * rows; size_t copy_count = std::min(free / data_size, columns_dense); for (size_t i = 0; i < copy_count; ++i) { this->GetDeviceData<T>(i).resize(rows); thrust::copy(this->GetHostData<T>(i).begin(), this->GetHostData<T>(i).end(), this->GetDeviceData<T>(i).begin()); } if (verbose) printf("copied features data %ld from %ld \n", copy_count, columns_dense); // free -= copy_count * data_size; // copy_count = 0; // for (size_t i = 0; i < columns_category; ++i) { // if (rows * sizeof(unsigned int) < free) { // copy_count++; // data_category_device[i].resize(rows); // thrust::copy(data_categories[i].begin(), data_categories[i].end(), // data_category_device[i].begin()); // free -= rows * sizeof(unsigned int); // } else { // break; // } // } // if (verbose) // printf("copied category features %ld from %ld \n", copy_count, // columns_category); } template <> thrust::host_vector<unsigned int, thrust::cuda::experimental::pinned_allocator<unsigned int>> &DataMatrix::GetHostData(int column) { return data_reduced_u32[column]; } template <> thrust::host_vector< unsigned short, thrust::cuda::experimental::pinned_allocator<unsigned short>> &DataMatrix::GetHostData(int column) { return data_reduced_u16[column]; } template <> thrust::host_vector<unsigned char, thrust::cuda::experimental::pinned_allocator<unsigned char>> &DataMatrix::GetHostData(int column) { return data_reduced_u8[column]; } template <> thrust::device_vector<unsigned int> &DataMatrix::GetDeviceData(int column) { return data_reduced_u32_device[column]; } template <> thrust::device_vector<unsigned short> &DataMatrix::GetDeviceData(int column) { return data_reduced_u16_device[column]; } template <> thrust::device_vector<unsigned char> &DataMatrix::GetDeviceData(int column) { return data_reduced_u8_device[column]; } } // namespace io } // namespace arboretum
the_stack
using namespace std; typedef uint8_t uint8; typedef unsigned int uint32; typedef unsigned long long int uint64; #define STREAM_BLOCK 16 #define BLOCK_SIZE 32 #define BLOCK_D_SIZE 64 #define INTEGRAL_BLOCK_SIZE 8 #define XDIM_MAX_THREADS 1024 #define XDIM_H_THREADS 512 #define XDIM_Q_THREADS 256 #define SHARED_MEMORY 49152 #define INIT_BLOCK 8 __global__ void NCC(float* left, float* right, double* cost, const int rows, const int cols,const int ndisp){ const int Col =blockIdx.x*blockDim.x + threadIdx.x-blockIdx.x*3; extern __shared__ __align__(sizeof(double)) unsigned char ncc_shared[]; uint64 * Ar_sm = reinterpret_cast<uint64 *>(&ncc_shared[0]); double * Cr_sm = reinterpret_cast<double *>(&ncc_shared[(XDIM_H_THREADS+ndisp)*sizeof(double)]); float * l_im_sm = reinterpret_cast<float *>(&ncc_shared[ 2*(XDIM_H_THREADS+ndisp)*sizeof(double)]); float * r_im_sm = reinterpret_cast<float *>(&ncc_shared[ 2*(XDIM_H_THREADS+ndisp)*sizeof(double) + 3*(XDIM_H_THREADS)*sizeof(float) ]); int wh,ww; uint64 Al=0; uint64 temp1,temp2,p; double Cl; float l_im_0,l_im_1,l_im_2,l_im_3,l_im_4,l_im_5,l_im_6,l_im_7,l_im_8; int threaddispl = 0; if(blockIdx.x >0){ threaddispl=ndisp; } if(blockIdx.x >0 && threadIdx.x < ndisp && Col-ndisp >=0 ){ #pragma unroll for(wh=0; wh<3;wh++){ r_im_sm[wh*(XDIM_H_THREADS+ndisp)+threadIdx.x] =right[(blockIdx.y+wh)*cols+Col-ndisp]; } } if(Col < cols){ #pragma unroll for(wh=0; wh<3;wh++){ r_im_sm[wh*(XDIM_H_THREADS+ndisp)+threaddispl+ threadIdx.x] =right[(blockIdx.y+wh)*cols+Col]; } #pragma unroll for(wh=0; wh<3;wh++){ l_im_sm[wh*XDIM_H_THREADS+threadIdx.x] =left[(blockIdx.y+wh)*cols+Col]; } } __syncthreads(); if(blockIdx.x >0 && threadIdx.x < ndisp && Col-ndisp >=0 ){ temp1=0; temp2=0; #pragma unroll for(ww=0; ww<3;ww++){ for (wh=0; wh<3;wh++){ p = r_im_sm[wh*(XDIM_H_THREADS+ndisp)+threadIdx.x+ww]; temp1 += p; temp2 += p*p; } } Ar_sm[threadIdx.x]=temp1; Cr_sm[threadIdx.x] = 1/(sqrt(9*temp2 - (double)( temp1 )*( temp1) )); } __syncthreads(); if(Col < cols-3 && threadIdx.x < blockDim.x-3){ temp1 =0; temp2 =0; #pragma unroll for(ww=0; ww<3;ww++){ for (wh=0; wh<3;wh++){ p = r_im_sm[wh*(XDIM_H_THREADS+ndisp)+threaddispl+ threadIdx.x+ww]; temp1 += p; temp2 += p*p; } } Ar_sm[threaddispl+ threadIdx.x]=temp1; Cr_sm[threaddispl+ threadIdx.x] = 1/(sqrt(9*temp2 - (double)( temp1 )*( temp1) )); } __syncthreads(); if(Col < cols-3 && threadIdx.x < blockDim.x-3 ){ l_im_0 = l_im_sm[threadIdx.x]; l_im_1 = l_im_sm[threadIdx.x+1]; l_im_2 = l_im_sm[threadIdx.x+2]; l_im_3 = l_im_sm[XDIM_H_THREADS+threadIdx.x]; l_im_4 = l_im_sm[XDIM_H_THREADS+threadIdx.x+1]; l_im_5 = l_im_sm[XDIM_H_THREADS+threadIdx.x+2]; l_im_6 = l_im_sm[2*XDIM_H_THREADS+threadIdx.x]; l_im_7 = l_im_sm[2*XDIM_H_THREADS+threadIdx.x+1]; l_im_8 = l_im_sm[2*XDIM_H_THREADS+threadIdx.x+2]; Al = l_im_0+l_im_1+l_im_2+l_im_3+l_im_4+l_im_5 +l_im_6+l_im_7+l_im_8; Cl = 1/(sqrt(9*(l_im_0*l_im_0+ l_im_1*l_im_1+ l_im_2*l_im_2+ l_im_3*l_im_3+ l_im_4*l_im_4+ l_im_5*l_im_5+ l_im_6*l_im_6+ l_im_7*l_im_7+ l_im_8*l_im_8) - (double)( Al*Al ))); } if(Col < cols-3 && threadIdx.x < blockDim.x-3){ for(int d=0; d < ndisp; d++){ double ncccost =2; if(Col-d>=0){ if( isfinite(Cl) && isfinite(Cr_sm[ threaddispl+ threadIdx.x-d])){ double D = l_im_0 * r_im_sm[threaddispl+ threadIdx.x-d] + l_im_1 * r_im_sm[threaddispl+ threadIdx.x-d+1]+ l_im_2 * r_im_sm[threaddispl+ threadIdx.x-d+2]+ l_im_3 * r_im_sm[XDIM_H_THREADS+ndisp+threaddispl+ threadIdx.x-d]+ l_im_4 * r_im_sm[XDIM_H_THREADS+ndisp+threaddispl+ threadIdx.x-d+1]+ l_im_5 * r_im_sm[XDIM_H_THREADS+ndisp+threaddispl+ threadIdx.x-d+2]+ l_im_6 * r_im_sm[2*(XDIM_H_THREADS+ndisp)+threaddispl+ threadIdx.x-d]+ l_im_7 * r_im_sm[2*(XDIM_H_THREADS+ndisp)+threaddispl+ threadIdx.x-d+1]+ l_im_8 * r_im_sm[2*(XDIM_H_THREADS+ndisp)+threaddispl+ threadIdx.x-d+2]; ncccost = 1- ((double)(9*D- Al * Ar_sm[threaddispl+ threadIdx.x-d] )*Cl*Cr_sm[ threaddispl+ threadIdx.x-d]); } } cost[d*rows*cols+(blockIdx.y+1)*cols + (Col+1)]=ncccost; } } } void usage(void){ std::cout << "NCC fixed window CUDA implementation" << std::endl; std::cout << "Arguments" << std::endl; std::cout << "-l:\t\t Left image | File containing names of the left images" << std::endl; std::cout << "-r:\t\t Right image | File containing the names of the right images" << std::endl; std::cout << "-ndisp:\t\t Number of Disparities" << std::endl; std::cout << "-dopost:\t Default false. If set, activates sgm cost optimization" << std::endl; std::cout << "-list:\t\t Default is single file. If set, left and right files should be lists of images." << std::endl; std::cout << "-out:\t\t Output directory for disparity images." << std::endl; std::cout << "-out_type:\t Output image type. Supports pgm|pfm|png|disp(uint16 png format)." << std::endl; std::cout << "-postconf:\t Optional configuration file for post-processing." << std::endl; std::cout << "-h:\t\t Prints this help" << std::endl; } int main(int argc, char* argv[]){ string leftfile; string rightfile; string out=string("."); string out_t=string("disp"); int ndisp=256; bool post=false; bool single=true; int argsassigned = 0; int required=0; int wsize=3; postparams params; //sgm params params.pi1=1.32; params.pi2=24.25; params.tau_so=1; params.alpha1=2; params.sgm_q1=3; params.sgm_q2=2; params.alpha2=6; params.sigma = 5.99; params.kernel_size=5; int direction =-1; for(int i=0; i<argc; i++){ if( !strcmp(argv[i], "-l") ){ leftfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-r") ){ rightfile = string(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i],"-ndisp") ){ ndisp= atoi(argv[++i]); argsassigned++; required++; }else if( !strcmp(argv[i], "-dopost") ){ post= true; argsassigned++; }else if(!strcmp(argv[i],"-list")){ single=false; argsassigned++; }else if(!strcmp(argv[i],"-out")){ out=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-out_type")){ out_t=string(argv[++i]); argsassigned++; }else if(!strcmp(argv[i],"-postconf")){ parseConf(params ,string(argv[++i])); argsassigned++; }else if(!strcmp(argv[i],"-h")){ usage(); return 0; } } if(argsassigned == 0){ usage(); return 0; } if(argsassigned ==1){ leftfile = string("../../leftimg.txt"); rightfile = string("../../rightimg.txt"); } else if( required < 3 ){ usage(); return 0; } std::vector<string> limg; std::vector<string> rimg; if (single){ limg.push_back(leftfile); rimg.push_back(rightfile); }else{ limg = getImages(leftfile); rimg = getImages(rightfile); } imgio* imgutil = new imgio(); imgutil->read_image_meta(limg[0].c_str()); //######################### Allocate memory on the device ###########################################// float* imgl; size_t ibytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &imgl, ibytes ); float* imgr; cudaMallocHost( (void**) &imgr, ibytes ); int width = imgutil->getWidth(); int height = imgutil->getHeight(); int wdiv = ceil((float)width/32); cudaStream_t stream1; cudaStream_t stream2; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); double* cost_d; size_t bytes = height*width*ndisp*sizeof(double); cudaMalloc( (void**) &cost_d, bytes ); double* post_cost_d; cudaMalloc( (void**) &post_cost_d, bytes ); float* disp_h; size_t dbytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float); cudaMallocHost( (void**) &disp_h, dbytes ); float * disp_d; cudaMalloc(&disp_d, dbytes); float * disp_tmp; cudaMalloc(&disp_tmp, dbytes); float* imgl_d; cudaMalloc(&imgl_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); float* imgr_d; cudaMalloc(&imgr_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float)); uint64* l_integral_d; uint64* r_integral_d; uint64* l_integral_d_t; uint64* r_integral_d_t; cudaMalloc(&l_integral_d, height*width*sizeof(uint64)); cudaMemsetAsync(l_integral_d, 0,height*width*sizeof(uint64),stream1); cudaMalloc(&r_integral_d, height*width*sizeof(uint64)); cudaMemsetAsync(r_integral_d, 0,height*width*sizeof(uint64),stream2); cudaMalloc(&l_integral_d_t, height*width*sizeof(uint64)); cudaMemsetAsync(l_integral_d_t, 0,height*width*sizeof(uint64),stream1); cudaMalloc(&r_integral_d_t, height*width*sizeof(uint64)); cudaMemsetAsync(r_integral_d_t, 0,height*width*sizeof(uint64),stream2); unsigned long long int * l_sq_integral_d; unsigned long long int * r_sq_integral_d; cudaMalloc(&l_sq_integral_d, height*width*sizeof(unsigned long long int)); cudaMemsetAsync(l_sq_integral_d, 0,height*width*sizeof(unsigned long long int),stream1); cudaMalloc(&r_sq_integral_d, height*width*sizeof(unsigned long long int)); cudaMemsetAsync(r_sq_integral_d, 0,height*width*sizeof(unsigned long long int),stream2); uint64 * Al_d; uint64 * Ar_d; double* Cl_d; double* Cr_d; cudaMalloc(&Al_d, height*width*sizeof(uint64)); cudaMalloc(&Ar_d, height*width*sizeof(uint64)); cudaMalloc(&Cl_d, height*width*sizeof(double)); cudaMalloc(&Cr_d, height*width*sizeof(double)); cudaMemsetAsync(Al_d,0,height*width*sizeof(uint64)); cudaMemsetAsync(Ar_d,0,height*width*sizeof(uint64)); cudaMemsetAsync(Cl_d,0,height*width*sizeof(double)); cudaMemsetAsync(Cr_d,0,height*width*sizeof(double)); int size1 = height*ndisp; int size2 = width*ndisp; dim3 argGridSGM1((size1 - 1) / ndisp + 1,width); dim3 argGridSGM2((size2 - 1) / ndisp + 1,height); float * tmp_d; cudaMalloc(&tmp_d, width*ndisp*sizeof(float)); cudaMemsetAsync(tmp_d,0 , width*ndisp*sizeof(float),0); float* left_cross; cudaMalloc(&left_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(left_cross,0 , 4*height*width*sizeof(float),0); float* right_cross; cudaMalloc(&right_cross, 4*height*width*sizeof(float)); cudaMemsetAsync(right_cross,0 , 4*height*width*sizeof(float),0); int kr = ceil(params.sigma*3); int ks = kr*2+1; float * kernel = (float*)calloc(ks*ks,sizeof(float)); for (int i=0; i<ks; i++){ for(int j=0; j<ks; j++){ int y= (i-1)-kr; int x= (j-1)-kr; kernel[i*ks+j] = exp( -(x*x+y*y)/(2*params.sigma*params.sigma) ); } } float *kernel_d; cudaMalloc(&kernel_d, ks*ks*sizeof(float)); cudaMemcpy( kernel_d, kernel, ks*ks*sizeof(float), cudaMemcpyHostToDevice); dim3 swapBlock(BLOCK_D_SIZE,16,1); dim3 swapGrid(ceil((float)imgutil->getWidth()*imgutil->getHeight()/BLOCK_D_SIZE),ceil((float) ndisp/BLOCK_D_SIZE )); dim3 argBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 argGrid(ceil((float) imgutil->getWidth() / BLOCK_SIZE),ceil( (float)imgutil->getHeight()/ BLOCK_SIZE)); dim3 dimBlockNCC(XDIM_H_THREADS); dim3 dimGridNCC(ceil((float) imgutil->getWidth() / XDIM_H_THREADS),imgutil->getHeight()-wsize); //################################################################################################################# for(size_t i=0; i<limg.size(); i++){ imgutil->read_image(limg[i],imgl); imgutil->read_image(rimg[i],imgr); cudaMemsetAsync(cost_d,0 , height*width*ndisp*sizeof(double),stream1); cudaMemcpyAsync( imgl_d, imgl, width*height*sizeof(float), cudaMemcpyHostToDevice,stream1); cudaMemcpyAsync( imgr_d, imgr, width*height*sizeof(float), cudaMemcpyHostToDevice,stream2); NCC<<<dimGridNCC, dimBlockNCC,(2*(XDIM_H_THREADS+ndisp )*sizeof(double) + 3*(XDIM_H_THREADS)*sizeof(float)+ 3*(XDIM_H_THREADS+ndisp)*sizeof(float) )>>>( imgl_d, imgr_d,cost_d,height,width,ndisp); if(post){ swap_axis<<< swapGrid, swapBlock >>>( cost_d, post_cost_d,height,width,ndisp ); cudaMemset(cost_d,0 , height*width*ndisp*sizeof(double)); for (int step = 0; step < width; step++) { sgm_loop<0><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < width; step++) { sgm_loop<1><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<2><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } for (int step = 0; step < height; step++) { sgm_loop<3><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>( imgl_d, imgr_d, post_cost_d, cost_d, tmp_d, params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction, height, width, ndisp, step); } argmin<<<argGrid, argBlock>>>( disp_d, cost_d, height, width,ndisp ); subpixel_enchancement<<<(height*width - 1) / TB + 1, TB>>>( disp_d, cost_d, disp_tmp, height*width, height*width, ndisp); median2d<<<(height*width - 1) / TB + 1, TB>>>( disp_tmp, disp_d, height*width, height, width, params.kernel_size / 2); mean2d<<<(height*width - 1) / TB + 1, TB>>>( disp_d, kernel_d, disp_tmp, height*width, ks / 2, height, width, params.alpha2); }else{ argmin_d<<<argGrid, argBlock>>>( disp_tmp, cost_d, height, width,ndisp ); } cudaMemcpy( disp_h, disp_tmp, height*width*sizeof(float), cudaMemcpyDeviceToHost ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); imgutil->write_image(out + string("/") +limg[i].substr(limg[i].find_last_of("/")+1) ,disp_h,out_t); } cudaFreeHost(imgl); cudaFreeHost(imgr); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaFree(left_cross); cudaFree(right_cross); cudaFree(tmp_d); cudaFreeHost(imgl); cudaFreeHost(imgr); cudaFreeHost(disp_h); cudaFree(disp_d); cudaFree(disp_tmp); cudaFree(imgl_d); cudaFree(imgr_d); cudaFree(cost_d); cudaFree(post_cost_d); cudaFree(l_integral_d); cudaFree(r_integral_d); cudaFree(l_sq_integral_d); cudaFree(r_sq_integral_d); cudaFree(Al_d); cudaFree(Ar_d); cudaFree(Cl_d); cudaFree(Cr_d); delete imgutil; }
the_stack
* \brief Define CUDA implementation of nbnxn_gpu.h * * \author Szilard Pall <pall.szilard@gmail.com> */ #include "gmxpre.h" #include "config.h" #include <assert.h> #include <stdlib.h> #include "gromacs/nbnxm/nbnxm_gpu.h" #if defined(_MSVC) # include <limits> #endif #include "nbnxm_cuda.h" #include "gromacs/gpu_utils/gpu_utils.h" #include "gromacs/gpu_utils/gpueventsynchronizer.h" #include "gromacs/gpu_utils/typecasts.cuh" #include "gromacs/gpu_utils/vectype_ops.cuh" #include "gromacs/hardware/device_information.h" #include "gromacs/mdtypes/simulation_workload.h" #include "gromacs/nbnxm/atomdata.h" #include "gromacs/nbnxm/gpu_common.h" #include "gromacs/nbnxm/gpu_common_utils.h" #include "gromacs/nbnxm/gpu_data_mgmt.h" #include "gromacs/nbnxm/grid.h" #include "gromacs/nbnxm/nbnxm.h" #include "gromacs/nbnxm/pairlist.h" #include "gromacs/timing/gpu_timing.h" #include "gromacs/utility/cstringutil.h" #include "gromacs/utility/gmxassert.h" #include "nbnxm_cuda_types.h" /***** The kernel declarations/definitions come here *****/ /* Top-level kernel declaration generation: will generate through multiple * inclusion the following flavors for all kernel declarations: * - force-only output; * - force and energy output; * - force-only with pair list pruning; * - force and energy output with pair list pruning. */ #define FUNCTION_DECLARATION_ONLY /** Force only **/ #include "nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES /*** Pair-list pruning kernels ***/ /** Force only **/ #define PRUNE_NBL #include "nbnxm_cuda_kernels.cuh" /** Force & energy **/ #define CALC_ENERGIES #include "nbnxm_cuda_kernels.cuh" #undef CALC_ENERGIES #undef PRUNE_NBL /* Prune-only kernels */ #include "nbnxm_cuda_kernel_pruneonly.cuh" #undef FUNCTION_DECLARATION_ONLY /* Now generate the function definitions if we are using a single compilation unit. */ #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT # include "nbnxm_cuda_kernel_F_noprune.cu" # include "nbnxm_cuda_kernel_F_prune.cu" # include "nbnxm_cuda_kernel_VF_noprune.cu" # include "nbnxm_cuda_kernel_VF_prune.cu" # include "nbnxm_cuda_kernel_pruneonly.cu" #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */ namespace Nbnxm { /*! Nonbonded kernel function pointer type */ typedef void (*nbnxn_cu_kfunc_ptr_t)(const NBAtomDataGpu, const NBParamGpu, const gpu_plist, bool); /*********************************/ /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */ static inline int calc_nb_kernel_nblock(int nwork_units, const DeviceInformation* deviceInfo) { int max_grid_x_size; assert(deviceInfo); /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an empty domain) and that case should be handled before this point. */ assert(nwork_units > 0); max_grid_x_size = deviceInfo->prop.maxGridSize[0]; /* do we exceed the grid x dimension limit? */ if (nwork_units > max_grid_x_size) { gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n" "The number of nonbonded work units (=number of super-clusters) exceeds the" "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size); } return nwork_units; } /* Constant arrays listing all kernel function pointers and enabling selection of a kernel in an elegant manner. */ /*! Pointers to the non-bonded kernels organized in 2-dim arrays by: * electrostatics and VDW type. * * Note that the row- and column-order of function pointers has to match the * order of corresponding enumerated electrostatics and vdw types, resp., * defined in nbnxn_cuda_types.h. */ /*! Force-only kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = { { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda } }; /*! Force + energy kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[c_numElecTypes][c_numVdwTypes] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda } }; /*! Force + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[c_numElecTypes][c_numVdwTypes] = { { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda } }; /*! Force + energy + pruning kernel function pointers. */ static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[c_numElecTypes][c_numVdwTypes] = { { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda }, { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda } }; /*! Return a pointer to the kernel version to be executed at the current step. */ static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(enum ElecType elecType, enum VdwType vdwType, bool bDoEne, bool bDoPrune, const DeviceInformation gmx_unused* deviceInfo) { const int elecTypeIdx = static_cast<int>(elecType); const int vdwTypeIdx = static_cast<int>(vdwType); GMX_ASSERT(elecTypeIdx < c_numElecTypes, "The electrostatics type requested is not implemented in the CUDA kernels."); GMX_ASSERT(vdwTypeIdx < c_numVdwTypes, "The VdW type requested is not implemented in the CUDA kernels."); /* assert assumptions made by the kernels */ GMX_ASSERT(c_nbnxnGpuClusterSize * c_nbnxnGpuClusterSize / c_nbnxnGpuClusterpairSplit == deviceInfo->prop.warpSize, "The CUDA kernels require the " "cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size " "of the architecture targeted."); if (bDoEne) { if (bDoPrune) { return nb_kfunc_ener_prune_ptr[elecTypeIdx][vdwTypeIdx]; } else { return nb_kfunc_ener_noprune_ptr[elecTypeIdx][vdwTypeIdx]; } } else { if (bDoPrune) { return nb_kfunc_noener_prune_ptr[elecTypeIdx][vdwTypeIdx]; } else { return nb_kfunc_noener_noprune_ptr[elecTypeIdx][vdwTypeIdx]; } } } /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */ static inline int calc_shmem_required_nonbonded(const int num_threads_z, const DeviceInformation gmx_unused* deviceInfo, const NBParamGpu* nbp) { int shmem; assert(deviceInfo); /* size of shmem (force-buffers/xq/atom type preloading) */ /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */ /* i-atom x+q in shared memory */ shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); if (nbp->vdwType == VdwType::CutCombGeom || nbp->vdwType == VdwType::CutCombLB) { /* i-atom LJ combination parameters in shared memory */ shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float2); } else { /* i-atom types in shared memory */ shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); } return shmem; } /*! As we execute nonbonded workload in separate streams, before launching the kernel we need to make sure that he following operations have completed: - atomdata allocation and related H2D transfers (every nstlist step); - pair list H2D transfer (every nstlist step); - shift vector H2D transfer (every nstlist step); - force (+shift force and energy) output clearing (every step). These operations are issued in the local stream at the beginning of the step and therefore always complete before the local kernel launch. The non-local kernel is launched after the local on the same device/context hence it is inherently scheduled after the operations in the local stream (including the above "misc_ops") on pre-GK110 devices with single hardware queue, but on later devices with multiple hardware queues the dependency needs to be enforced. We use the misc_ops_and_local_H2D_done event to record the point where the local x+q H2D (and all preceding) tasks are complete and synchronize with this event in the non-local stream before launching the non-bonded kernel. */ void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const InteractionLocality iloc) { NBAtomDataGpu* adat = nb->atdat; NBParamGpu* nbp = nb->nbparam; gpu_plist* plist = nb->plist[iloc]; Nbnxm::GpuTimers* timers = nb->timers; const DeviceStream& deviceStream = *nb->deviceStreams[iloc]; bool bDoTime = nb->bDoTime; /* Don't launch the non-local kernel if there is no work to do. Doing the same for the local kernel is more complicated, since the local part of the force array also depends on the non-local kernel. So to avoid complicating the code and to reduce the risk of bugs, we always call the local kernel, and later (not in this function) the stream wait, local f copyback and the f buffer clearing. All these operations, except for the local interaction kernel, are needed for the non-local interactions. The skip of the local kernel call is taken care of later in this function. */ if (canSkipNonbondedWork(*nb, iloc)) { plist->haveFreshList = false; return; } if (nbp->useDynamicPruning && plist->haveFreshList) { /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false (TODO: ATM that's the way the timing accounting can distinguish between separate prune kernel and combined force+prune, maybe we need a better way?). */ gpu_launch_kernel_pruneonly(nb, iloc, 1); } if (plist->nsci == 0) { /* Don't launch an empty local kernel (not allowed with CUDA) */ return; } /* beginning of timed nonbonded calculation section */ if (bDoTime) { timers->interaction[iloc].nb_k.openTimingRegion(deviceStream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = 1; if (nb->deviceContext_->deviceInfo().prop.major == 3 && nb->deviceContext_->deviceInfo().prop.minor == 7) { num_threads_z = 2; } int nblock = calc_nb_kernel_nblock(plist->nsci, &nb->deviceContext_->deviceInfo()); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, &nb->deviceContext_->deviceInfo(), nbp); if (debug) { fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], plist->nsci * c_nbnxnGpuNumClusterPerSupercluster, c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize); } auto* timingEvent = bDoTime ? timers->interaction[iloc].nb_k.fetchNextEvent() : nullptr; const auto kernel = select_nbnxn_kernel(nbp->elecType, nbp->vdwType, stepWork.computeEnergy, (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune), &nb->deviceContext_->deviceInfo()); const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &stepWork.computeVirial); launchGpuKernel(kernel, config, deviceStream, timingEvent, "k_calc_nb", kernelArgs); if (bDoTime) { timers->interaction[iloc].nb_k.closeTimingRegion(deviceStream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ cudaStreamQuery(deviceStream.stream()); } } /*! Calculates the amount of shared memory required by the CUDA kernel in use. */ static inline int calc_shmem_required_prune(const int num_threads_z) { int shmem; /* i-atom x in shared memory */ shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float4); /* cj in shared memory, for each warp separately */ shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int); return shmem; } void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts) { NBAtomDataGpu* adat = nb->atdat; NBParamGpu* nbp = nb->nbparam; gpu_plist* plist = nb->plist[iloc]; Nbnxm::GpuTimers* timers = nb->timers; const DeviceStream& deviceStream = *nb->deviceStreams[iloc]; bool bDoTime = nb->bDoTime; if (plist->haveFreshList) { GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part"); /* Set rollingPruningNumParts to signal that it is not set */ plist->rollingPruningNumParts = 0; plist->rollingPruningPart = 0; } else { if (plist->rollingPruningNumParts == 0) { plist->rollingPruningNumParts = numParts; } else { GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps"); } } /* Use a local variable for part and update in plist, so we can return here * without duplicating the part increment code. */ int part = plist->rollingPruningPart; plist->rollingPruningPart++; if (plist->rollingPruningPart >= plist->rollingPruningNumParts) { plist->rollingPruningPart = 0; } /* Compute the number of list entries to prune in this pass */ int numSciInPart = (plist->nsci - part) / numParts; /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */ if (numSciInPart <= 0) { plist->haveFreshList = false; return; } GpuRegionTimer* timer = nullptr; if (bDoTime) { timer = &(plist->haveFreshList ? timers->interaction[iloc].prune_k : timers->interaction[iloc].rollingPrune_k); } /* beginning of timed prune calculation section */ if (bDoTime) { timer->openTimingRegion(deviceStream); } /* Kernel launch config: * - The thread block dimensions match the size of i-clusters, j-clusters, * and j-cluster concurrency, in x, y, and z, respectively. * - The 1D block-grid contains as many blocks as super-clusters. */ int num_threads_z = c_pruneKernelJ4Concurrency; int nblock = calc_nb_kernel_nblock(numSciInPart, &nb->deviceContext_->deviceInfo()); KernelLaunchConfig config; config.blockSize[0] = c_clSize; config.blockSize[1] = c_clSize; config.blockSize[2] = num_threads_z; config.gridSize[0] = nblock; config.sharedMemorySize = calc_shmem_required_prune(num_threads_z); if (debug) { fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t" "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n" "\tShMem: %zu\n", config.blockSize[0], config.blockSize[1], config.blockSize[2], config.gridSize[0], config.gridSize[1], numSciInPart * c_nbnxnGpuNumClusterPerSupercluster, c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize); } auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr; constexpr char kernelName[] = "k_pruneonly"; const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>; const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part); launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs); /* TODO: consider a more elegant way to track which kernel has been called (combined or separate 1st pass prune, rolling prune). */ if (plist->haveFreshList) { plist->haveFreshList = false; /* Mark that pruning has been done */ nb->timers->interaction[iloc].didPrune = true; } else { /* Mark that rolling pruning has been done */ nb->timers->interaction[iloc].didRollingPrune = true; } if (bDoTime) { timer->closeTimingRegion(deviceStream); } if (GMX_NATIVE_WINDOWS) { /* Windows: force flushing WDDM queue */ cudaStreamQuery(deviceStream.stream()); } } void cuda_set_cacheconfig() { cudaError_t stat; for (int i = 0; i < c_numElecTypes; i++) { for (int j = 0; j < c_numVdwTypes; j++) { /* Default kernel 32/32 kB Shared/L1 */ cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual); cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual); cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual); stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual); CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed"); } } } } // namespace Nbnxm
the_stack
extern "C" { #include "metis.h" } #include "AggMIS_Types.h" #include "AggMIS_Aggregation_GPU.h" #include "AggMIS_MIS_GPU.h" #include "AggMIS_MIS_CPU.h" #include "AggMIS_MergeSplitConditioner.h" #include "AggMIS_MergeSplitConditioner_CPU.h" #include "AggMIS_GraphHelpers.h" #include <smoothedMG/aggregators/Timer.h> #include <iostream> #include <fstream> #include <string> #include <sstream> #include "Logger.h" namespace misHelpers { double totalAggregationTime = 0; int totalAggregationCalls = 0; namespace CP { void OldMIS(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph int fineDepth = parameters % 100; // The MIS depth for the first aggregation int coarseDepth = (parameters / 100) % 100; // The MIS depth for the second aggregation int minAggregateSize = (parameters / 10000) % 10; // The minimum acceptable size for an aggregate IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates AggMIS::Types::JTimer timmy; if (verbose) std::cout << "Finished initializing IdxVector_d elements." << std::endl; partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in if (verbose) std::cout << "Finished with partitionLabel creation." << std::endl; timmy.start(); misHelpers::aggregateGraph(minAggregateSize, fineDepth, adjIndexesIn, adjacencyIn, fineAggregate, verbose); if (verbose) std::cout << "Finished with aggregateGraph." << std::endl; timmy.stop(); totalAggregationTime += timmy.getElapsedTimeInSec(true); if (verbose) printf("Fine conditioning time: %3.3fs\n", timmy.getElapsedTimeInSec(true)); totalAggregationCalls++; Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Fine Aggregation"); // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); if (verbose) std::cout << "Finished with fineAggregateSort." << std::endl; // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); if (verbose) std::cout << "Got permutation array." << std::endl; // Getting the aggregate indices and node weights for the induced graph misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); if (verbose) std::cout << "Got partition sizes." << std::endl; // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); if (verbose) std::cout << "Got induced graph." << std::endl; // Doing the coarse aggregation: int maxSize = part_max_size; int fullSize = adjIndexesIn.size() - 1; coarseAggregate = IdxVector_d(fullSize, 1); int inducedGraphSize = adjIndexesOut.size() - 1; timmy.start(); misHelpers::aggregateWeightedGraph(maxSize, fullSize, coarseDepth, adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, verbose); timmy.stop(); if (verbose) std::cout << "Finished aggregateWeightedGraph." << std::endl; Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Coarse Aggregation"); // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); if (verbose) std::cout << "Finished remapInducedGraph." << std::endl; // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); if (verbose) std::cout << "Finished fillPartitionLabel." << std::endl; // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator(thrust::make_tuple(fineAggregateSort.begin(), permutation.begin()))); if (verbose) std::cout << "Finished thrust::stable_sort_by_key." << std::endl; // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); } void MetisBottomUp(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in int fineSize, coarseSize; coarseSize = part_max_size % 1000; fineSize = (part_max_size / 1000) % 1000; fineSize = fineSize <= 0 ? 1 : fineSize; // Getting the fine aggregation with Metis AT::IntVector_h indices(adjIndexesIn.size()); thrust::copy(adjIndexesIn.begin(), adjIndexesIn.end(), indices.begin()); AT::IntVector_h adjacency(adjacencyIn.size()); thrust::copy(adjacencyIn.begin(), adjacencyIn.end(), adjacency.begin()); AT::IntVector_h result(numNodesIn); Help::GetMetisAggregation(indices, adjacency, result, fineSize); thrust::copy(result.begin(), result.end(), fineAggregate.begin()); Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Fine Aggregation"); // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); // Getting the aggregate indices and node weights for the induced graph misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); int inducedGraphSize = adjIndexesOut.size() - 1; // Doing the coarse aggregation (assuming Metis parts are close enough in size to ignore weighting) indices.assign(adjIndexesOut.begin(), adjIndexesOut.end()); adjacency.assign(adjacencyOut.begin(), adjacencyOut.end()); Help::GetMetisAggregation(indices, adjacency, result, coarseSize); coarseAggregate.assign(result.begin(), result.end()); Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Coarse Aggregation"); // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator(thrust::make_tuple(fineAggregateSort.begin(), permutation.begin()))); // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); // Clean up temp vectors indices.clear(); adjacency.clear(); result.clear(); if (verbose) printf("Total aggregation time (Metis): %3.4fs for %d calls\n", totalAggregationTime, totalAggregationCalls); } void MetisTopDown(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph int fineDepth = parameters % 100; // The MIS depth for the first aggregation int coarseDepth = (parameters / 100) % 100; // The MIS depth for the second aggregation int minAggregateSize = (parameters / 10000) % 10; // The minimum acceptable size for an aggregate IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in misHelpers::aggregateGraph(minAggregateSize, fineDepth, adjIndexesIn, adjacencyIn, fineAggregate, verbose); // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); // Getting the aggregate indices and node weights for the induced graph misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); // Doing the coarse aggregation: int maxSize = part_max_size; //400; int fullSize = adjIndexesIn.size() - 1; coarseAggregate = IdxVector_d(fullSize, 1); misHelpers::aggregateWeightedGraph(maxSize, fullSize, coarseDepth, adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, verbose); // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator(thrust::make_tuple(fineAggregateSort.begin(), permutation.begin()))); // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); } void NewMIS(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph int fineDepth = parameters % 100; // The MIS depth for the first aggregation int coarseDepth = (parameters / 100) % 100; IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates AggMIS::Types::JTimer jimmy; AggMIS::Types::JTimer iTime; int fineMin, fineMax, coarseMin, coarseMax; fineMax = parameters % 1000; fineMin = (parameters / 1000) % 1000; coarseMax = part_max_size % 1000; coarseMin = (part_max_size / 1000) % 1000; coarseDepth = (parameters / 1000000) % 10; fineDepth = (parameters / 10000000) % 10; partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in // First transfer in the graph AT::Graph_d fineGraph; fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); jimmy.start(); // Now get an MIS of the graph iTime.start(); IntVector_d *fineMIS = AggMIS::MIS::RandomizedMIS(fineDepth, fineGraph); iTime.stop(); // Aggregate to nearest iTime.start(); IntVector_d *fineAgg = AggMIS::Aggregation::AggregateToNearest(fineGraph, *fineMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitGPU::MergeSplitConditionerGPU fineConditioner(fineGraph, *fineAgg); int desiredSize = (fineMin + fineMax) / 2; fineConditioner.SetSizeBounds(fineMin, fineMax); fineConditioner.Condition(desiredSize, true, .1, .1, 10); iTime.stop(); jimmy.stop(); // Getting the count of the MIS int misCount = thrust::count(fineMIS->begin(), fineMIS->end(), 1); // DataRecorder::Add("Fine MIS Count", misCount); fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); fineAgg->swap(fineAggregate); // Record initial aggregation stats Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Initial Fine Aggregation"); fineAgg->swap(fineAggregate); // Swap out the aggregation and graph fineConditioner.GetAggregation()->swap(fineAggregate); // Record final aggregation stats Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Fine Aggregation"); // Clear temp stuff fineMIS->clear(); delete fineMIS; fineAgg->clear(); delete fineAgg; // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); // Getting the aggregate indices and node weights for the induced graph misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); int inducedGraphSize = adjIndexesOut.size() - 1; // Doing the coarse aggregation with AggMIS // Swapping in the graph data and weights AT::Graph_d coarseGraph; coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); IntVector_d nodeWeights; nodeWeights.swap(inducedNodeWeights); jimmy.start(); // Getting an MIS iTime.start(); IntVector_d *coarseMIS = AggMIS::MIS::RandomizedMIS(coarseDepth, coarseGraph); iTime.stop(); // Getting initial aggregation iTime.start(); IntVector_d *coarseAgg = AggMIS::Aggregation::AggregateToNearest(coarseGraph, *coarseMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitGPU::MergeSplitConditionerGPU coarseConditioner(coarseGraph, *coarseAgg); coarseConditioner.SetNodeWeights(nodeWeights); coarseConditioner.SetSizeBounds(coarseMin, coarseMax); desiredSize = (coarseMin + coarseMax) / 2; coarseConditioner.Condition(desiredSize, true, .1, .1, 10); jimmy.stop(); iTime.stop(); misCount = thrust::count(coarseMIS->begin(), coarseMIS->end(), 1); // Swap out the aggregation, graph, and node weights coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); coarseConditioner.GetNodeWeights()->swap(inducedNodeWeights); coarseAgg->swap(coarseAggregate); // Record initial aggregation stats Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Initial Coarse Aggregation"); coarseAgg->swap(coarseAggregate); coarseConditioner.GetAggregation()->swap(coarseAggregate); // Record final aggregation stats Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Coarse Aggregation"); // Clear temp stuff coarseMIS->clear(); delete coarseMIS; coarseAgg->clear(); delete coarseAgg; // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator(thrust::make_tuple(fineAggregateSort.begin(), permutation.begin()))); // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); } void NewMIS_CPU(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates // Creating timer AggMIS::Types::JTimer jimmy; AggMIS::Types::JTimer iTime; // Parsing the parameters int fineMin, fineMax, coarseMin, coarseMax, fineDepth, coarseDepth; fineMax = parameters % 1000; fineMin = (parameters / 1000) % 1000; coarseMax = part_max_size % 1000; coarseMin = (part_max_size / 1000) % 1000; coarseDepth = (parameters / 1000000) % 10; fineDepth = (parameters / 10000000) % 10; // Initialize the partitionLabel array partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in // Getting aggregation of graph with AggMIS AT::Graph_d fineGraph; fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); // Getting a host version of the graph AT::Graph_h fineGraph_h(fineGraph); jimmy.start(); // Now get an MIS of the graph iTime.start(); AT::IntVector_h *fineMIS = AggMIS::MIS::FloodFillMIS(fineDepth, fineGraph_h); iTime.stop(); // Aggregate to nearest iTime.start(); AT::IntVector_h *fineAgg = AggMIS::Aggregation::AggregateToNearest(fineGraph_h, *fineMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitCPU::MergeSplitConditionerCPU fineConditioner(fineGraph_h, *fineAgg); int desiredSize = (fineMin + fineMax) / 2; fineConditioner.SetSizeBounds(fineMin, fineMax); fineConditioner.Condition(desiredSize, true, .1, .1, 10); jimmy.stop(); iTime.stop(); // Getting the count of the MIS int misCount = thrust::count(fineMIS->begin(), fineMIS->end(), 1); // DataRecorder::Add("Fine MIS Count", misCount); // Swap out the aggregation and graph fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); fineAggregate.assign(fineAgg->begin(), fineAgg->end()); Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Initial Fine Aggregation"); fineAggregate.assign(fineConditioner.GetAggregation()->begin(), fineConditioner.GetAggregation()->end()); Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Fine Aggregation"); // Clear temp stuff fineMIS->clear(); delete fineMIS; fineAgg->clear(); delete fineAgg; // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); // Getting the aggregate indices and node weights for the induced graph misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); int inducedGraphSize = adjIndexesOut.size() - 1; // Doing the coarse aggregation with AggMIS // Swapping in the graph data and weights AT::Graph_d coarseGraph; coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); AT::Graph_h coarseGraph_h(coarseGraph); AT::IntVector_h nodeWeights_h(inducedNodeWeights.begin(), inducedNodeWeights.end()); jimmy.start(); // Getting an MIS iTime.start(); AT::IntVector_h *coarseMIS = AggMIS::MIS::FloodFillMIS(coarseDepth, coarseGraph_h); iTime.stop(); // Getting initial aggregation iTime.start(); AT::IntVector_h *coarseAgg = AggMIS::Aggregation::AggregateToNearest(coarseGraph_h, *coarseMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitCPU::MergeSplitConditionerCPU coarseConditioner(coarseGraph_h, *coarseAgg); coarseConditioner.SetNodeWeights(nodeWeights_h); coarseConditioner.SetSizeBounds(coarseMin, coarseMax); desiredSize = (coarseMin + coarseMax) / 2; coarseConditioner.Condition(desiredSize, true, .1, .1, 10); jimmy.stop(); iTime.stop(); misCount = thrust::count(coarseMIS->begin(), coarseMIS->end(), 1); // Swap out the aggregation, graph, and node weights coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); coarseAggregate.assign(coarseAgg->begin(), coarseAgg->end()); Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Initial Coarse Aggregation"); coarseAggregate.assign(coarseConditioner.GetAggregation()->begin(), coarseConditioner.GetAggregation()->end()); Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Coarse Aggregation"); // Clear temp stuff coarseMIS->clear(); delete coarseMIS; coarseAgg->clear(); delete coarseAgg; // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator(thrust::make_tuple(fineAggregateSort.begin(), permutation.begin()))); // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); if (verbose) printf("Total aggregation time (Conditioned MIS CPU): %3.4fs for %d calls\n", totalAggregationTime, totalAggregationCalls); } void LightMIS_CPU(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int parameters, int part_max_size, bool verbose) { int numNodesIn = adjIndexesIn.size() - 1; // Size of input graph IdxVector_d fineAggregate(numNodesIn, 0); // The partition label for the fine partition IdxVector_d fineAggregateSort; // The copy of the fine partition label that is sorted IdxVector_d finePartSizes; // Vector with sizes of fine partitions IdxVector_d neighborCountsIn; // Vector to hold the sizes for each nodes adjacency IdxVector_d permutedAdjIndexesIn; // Vector to hold the indices for the initial adjacency permutation IdxVector_d permutedAdjacencyIn; // Holds the permuted initial adjacency IdxVector_d neighborCountsOut; // Holds the counts of neighbors for the induced graph IdxVector_d coarseAggregate; // Holds the partition label for the coarse partition IdxVector_d aggregateRemapId; // Holds the current id of each aggregate IdxVector_d iAggregateRemapId; // Holds the permutation to remap the aggregate id' IdxVector_d aggregateRemapIndex; // Holds the start index of each aggregate IdxVector_d inducedNodeWeights; // Holds the sizes of the fine aggregates // Creating timer AggMIS::Types::JTimer jimmy; AggMIS::Types::JTimer iTime; // Parsing the parameters int fineDepth, coarseDepth; int maxPart = parameters % 1000; coarseDepth = (parameters / 1000000) % 10; fineDepth = (parameters / 10000000) % 10; // Initialize the partitionLabel array partitionLabel = IdxVector_d(numNodesIn, 2); // Holds the partition each vertex is located in // Getting aggregation of graph with AggMIS AT::Graph_d fineGraph; fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); // Getting a host version of the graph AT::Graph_h fineGraph_h(fineGraph); jimmy.start(); // Now get an MIS of the graph iTime.start(); AT::IntVector_h *fineMIS = AggMIS::MIS::FloodFillMIS(fineDepth, fineGraph_h); iTime.stop(); // Aggregate to nearest iTime.start(); AT::IntVector_h *fineAgg = AggMIS::Aggregation::AggregateToNearest(fineGraph_h, *fineMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitCPU::MergeSplitConditionerCPU fineConditioner(fineGraph_h, *fineAgg); jimmy.stop(); iTime.stop(); // Getting the count of the MIS int misCount = thrust::count(fineMIS->begin(), fineMIS->end(), 1); // Swap out the aggregation and graph fineGraph.indices->swap(adjIndexesIn); fineGraph.adjacency->swap(adjacencyIn); fineAggregate.assign(fineAgg->begin(), fineAgg->end()); Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Initial Fine Aggregation"); fineAggregate.assign(fineConditioner.GetAggregation()->begin(), fineConditioner.GetAggregation()->end()); Help::RecordAllStats(adjIndexesIn, adjacencyIn, fineAggregate, "Fine Aggregation"); // Clear temp stuff fineMIS->clear(); delete fineMIS; fineAgg->clear(); delete fineAgg; // Setting the permutation array to have values equal to element indices permutation = IdxVector_d(numNodesIn); misHelpers::fillWithIndex(permutation); // Sorting arrays together: fineAggregateSort = fineAggregate; thrust::sort_by_key(fineAggregateSort.begin(), fineAggregateSort.end(), permutation.begin()); // Building the permutation array: misHelpers::getInversePermutation(permutation, ipermutation); // Getting the aggregate indices and node weights for the induced graph // finePartCount = fineAggregateSort[fineAggregateSort.size() - 1]; misHelpers::getPartSizes(fineAggregateSort, inducedNodeWeights, aggregateIdx); // Getting the induced graph: misHelpers::getInducedGraph(adjIndexesIn, adjacencyIn, fineAggregate, adjIndexesOut, adjacencyOut); int inducedGraphSize = adjIndexesOut.size() - 1; // Doing the coarse aggregation with AggMIS // Swapping in the graph data and weights AT::Graph_d coarseGraph; coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); AT::Graph_h coarseGraph_h(coarseGraph); AT::IntVector_h nodeWeights_h(inducedNodeWeights.begin(), inducedNodeWeights.end()); jimmy.start(); // Getting an MIS iTime.start(); AT::IntVector_h *coarseMIS = AggMIS::MIS::FloodFillMIS(coarseDepth, coarseGraph_h); iTime.stop(); // Getting initial aggregation iTime.start(); AT::IntVector_h *coarseAgg = AggMIS::Aggregation::AggregateToNearest(coarseGraph_h, *coarseMIS); iTime.stop(); // Getting a conditioner iTime.start(); AggMIS::MergeSplitCPU::MergeSplitConditionerCPU coarseConditioner(coarseGraph_h, *coarseAgg); coarseConditioner.SetNodeWeights(nodeWeights_h); coarseConditioner.SetSizeBounds(maxPart / 2, maxPart); coarseConditioner.CycleSplits(true); coarseConditioner.CycleMerges(false); jimmy.stop(); iTime.stop(); misCount = thrust::count(coarseMIS->begin(), coarseMIS->end(), 1); // Swap out the aggregation, graph, and node weights coarseGraph.indices->swap(adjIndexesOut); coarseGraph.adjacency->swap(adjacencyOut); coarseAggregate.assign(coarseAgg->begin(), coarseAgg->end()); Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Initial Coarse Aggregation"); coarseAggregate.assign(coarseConditioner.GetAggregation()->begin(), coarseConditioner.GetAggregation()->end()); Help::RecordAllStats(adjIndexesOut, adjacencyOut, coarseAggregate, inducedNodeWeights, "Coarse Aggregation"); // Clear temp stuff coarseMIS->clear(); delete coarseMIS; coarseAgg->clear(); delete coarseAgg; // Performing new version of getting induced graph misHelpers::remapInducedGraph(adjIndexesOut, adjacencyOut, coarseAggregate); // Filling in the partitionLabel: misHelpers::fillPartitionLabel(coarseAggregate, fineAggregateSort, partitionLabel); // Do a stable sort by key with the partitionLabel as the key: thrust::stable_sort_by_key(partitionLabel.begin(), partitionLabel.end(), thrust::make_zip_iterator( thrust::make_tuple( fineAggregateSort.begin(), permutation.begin()))); // Remapping the aggregate id's: aggregateRemapId = IdxVector_d(aggregateIdx.size() - 1, 0); aggregateRemapIndex = IdxVector_d(aggregateIdx.size() - 1, 0); misHelpers::fillWithIndex(aggregateRemapId); misHelpers::getAggregateStartIndices(fineAggregateSort, aggregateRemapIndex); thrust::stable_sort_by_key(aggregateRemapIndex.begin(), aggregateRemapIndex.end(), aggregateRemapId.begin()); misHelpers::getInversePermutation(aggregateRemapId, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregateSort, iAggregateRemapId); misHelpers::remapAggregateIdx(fineAggregate, iAggregateRemapId); // Sort the coarseAggregate for indices and permutation: thrust::sort(coarseAggregate.begin(), coarseAggregate.end()); misHelpers::getPartIndices(coarseAggregate, partitionIdx); // Get indices for the fine aggregates misHelpers::getPartIndices(fineAggregateSort, aggregateIdx); // Putting in the right permutation vectors for the output: ipermutation = permutation; misHelpers::getInversePermutation(ipermutation, permutation); } } namespace Help { namespace AT = AggMIS::Types; int GetMetisAggregation(AT::IntVector_h &indices, AT::IntVector_h &adjacency, AT::IntVector_h &result, int partSize, bool verbose) { // Getting size of graph int graphSize = indices.size() - 1; // Making sure result is sized correctly result.resize(graphSize); // Setting up for Metis call: int nparts, edgecut; int *npart = &result[0]; nparts = (graphSize / partSize); if (nparts < 8192) { if (nparts < 2) nparts = 2; int options[10], pnumflag = 0, wgtflag = 0; for (int i = 0; i < 10; i++) options[i] = 0; AggMIS::Types::JTimer jimmy; jimmy.start(); METIS_PartGraphKway(&graphSize, &indices[0], &adjacency[0], NULL, NULL, &wgtflag, &pnumflag, &nparts, options, &edgecut, npart); jimmy.stop(); if (verbose) printf("Metis call for graph of %d nodes into parts of size %d took %3.4fs (host) %3.4fs (cuda)\n", graphSize, partSize, jimmy.getElapsedTimeInSec(true), jimmy.getElapsedTimeInSec(false)); totalAggregationTime += jimmy.getElapsedTimeInSec(true); totalAggregationCalls++; // Output timing to file std::ofstream outputFile; outputFile.open("TimingResults.csv", std::ofstream::app); if (totalAggregationCalls == 1) outputFile << "\n\nNote,Metis Aggregation Time, Total Calls, Total Time\n"; outputFile << "Metis call on graph with " << graphSize << " nodes into parts of size " << partSize << ","; outputFile << jimmy.getElapsedTimeInSec(true) << "," << totalAggregationCalls << "," << totalAggregationTime << "\n"; return EnsureConnectedAndNonEmpty(indices, adjacency, result); } else { int count = GetMetisAggregation_Large(indices, adjacency, result, partSize); return count; } } int GetMetisAggregation_Large(AT::IntVector_h &indices, AT::IntVector_h &adjacency, AT::IntVector_h &result, int partSize, bool verbose) { // Getting size of graph int graphSize = indices.size() - 1; // Getting a partitioning with four parts to create subgraphs int subGraphSize = graphSize / 4; GetMetisAggregation(indices, adjacency, result, subGraphSize); // Getting the subgraphs from the partitioning AT::IntVector_h_ptr subIndices, subAdjacencies, subNodeMaps; GetSubGraphs(indices, adjacency, result, subIndices, subAdjacencies, subNodeMaps); int subGraphCount = subIndices.size(); // Getting aggregation of each subgraph and mapping to original graph int offset = 0; for (int i = 0; i < subGraphCount; i++) { AT::IntVector_h agg; AT::IntVector_h &nodeMap = *(subNodeMaps[i]); AT::IntVector_h &ind = *(subIndices[i]); AT::IntVector_h &adj = *(subAdjacencies[i]); int aggCount = GetMetisAggregation(ind, adj, agg, partSize); for (int n = 0; n < agg.size(); n++) { // Look up original node Id int node = nodeMap[n]; // Set the aggregate to the subgraph's plus current offset result[node] = agg[n] + offset; } offset += aggCount; agg.clear(); ind.clear(); adj.clear(); nodeMap.clear(); } // Cleaning up subIndices.clear(); subAdjacencies.clear(); subNodeMaps.clear(); // Return number of aggregates return offset; } void GetSubGraphs(AT::IntVector_h &indices, AT::IntVector_h &adjacency, AT::IntVector_h &partition, AT::IntVector_h_ptr &newIndices, AT::IntVector_h_ptr &newAdjacencies, AT::IntVector_h_ptr &nodeMaps, bool verbose) { // Getting a map from old graph id to subgraph id AT::IntVector_h mapToSubGraphId(adjacency.size() - 1); // Getting separate node maps for all partitions nodeMaps.clear(); int minPart = partition[0]; int maxPart = partition[0]; for (int i = 0; i < partition.size(); i++) { int partId = partition[i]; minPart = std::min(minPart, partId); maxPart = std::max(maxPart, partId); while (partId + 1 > nodeMaps.size()) nodeMaps.push_back(new AT::IntVector_h()); nodeMaps[partId]->push_back(i); mapToSubGraphId[i] = nodeMaps[partId]->size() - 1; } int graphCount = nodeMaps.size(); // Creating the new subgraph indices and adjacency vectors newIndices.resize(graphCount); newAdjacencies.resize(graphCount); for (int i = 0; i < graphCount; i++) { newIndices[i] = new AT::IntVector_h(nodeMaps[i]->size() + 1); AT::IntVector_h *ptr = newIndices[i]; newAdjacencies[i] = new AT::IntVector_h(); } // Filling the subgraphs in for (int i = 0; i < graphCount; i++) { AT::IntVector_h &nodes = *nodeMaps[i]; AT::IntVector_h &ind = *newIndices[i]; AT::IntVector_h &adj = *newAdjacencies[i]; int insertAt = 0; (*newIndices[0])[0] = 0; for (int nIt = 0; nIt < nodes.size(); nIt++) { int node = nodes[nIt]; if (partition[node] != i) { int p = partition[node]; if (verbose) printf("Node %d found in node list %d but marked as in partition %d\n", node, i, p); std::cin >> p; } int start = indices[node]; int end = indices[node + 1]; for (int n = start; n < end; n++) { int neighbor = adjacency[n]; if (partition[neighbor] == i) { newAdjacencies[i]->push_back(mapToSubGraphId[neighbor]); insertAt++; } } ind[nIt + 1] = insertAt; } } // Cleaning up mapToSubGraphId.clear(); } int EnsureConnectedAndNonEmpty(AT::IntVector_h &indices, AT::IntVector_h &adjacency, AT::IntVector_h &aggregation) { AT::IntVector_h temp(aggregation.size()); // Flood fill aggregates with node indices for (int i = 0; i < temp.size(); i++) temp[i] = i; bool changed = true; while (changed) { changed = false; for (int root = 0; root < aggregation.size(); root++) { int rootValue = temp[root]; int rootAggregate = aggregation[root]; int start = indices[root]; int end = indices[root + 1]; for (int nIt = start; nIt < end; nIt++) { int neighbor = adjacency[nIt]; int neighborAggregate = aggregation[neighbor]; int neighborValue = temp[neighbor]; if (rootAggregate == neighborAggregate && neighborValue > rootValue) rootValue = neighborValue; } if (rootValue > temp[root]) { temp[root] = rootValue; changed = true; } } } // Making a copy of the filled aggregation AT::IntVector_h mapping(temp.size()); thrust::copy(temp.begin(), temp.end(), mapping.begin()); // Sort the values thrust::sort(mapping.begin(), mapping.end()); // Get just unique values int newSize = thrust::unique(mapping.begin(), mapping.end()) - mapping.begin(); mapping.resize(newSize); // Remap aggregation for (int i = 0; i < aggregation.size(); i++) aggregation[i] = BinarySearch(temp[i], mapping); // Get rid of temporary vectors mapping.clear(); temp.clear(); // Return count of aggregates return newSize; } int BinarySearch(int value, AT::IntVector_h &array) { int imin = 0; int imax = array.size() - 1; while (imin < imax) { int imid = (imax + imin) / 2; if (array[imid] < value) imin = imid + 1; else imax = imid; } if (imax == imin && array[imin] == value) return imin; else return -1; } void RecordAllStats(IdxVector_d& indices, IdxVector_d& adjacency, IdxVector_d& aggregation, std::string prefix) { IdxVector_d dummy; RecordAllStats(indices, adjacency, aggregation, dummy, prefix); } void RecordAllStats(IdxVector_d& indices, IdxVector_d& adjacency, IdxVector_d& aggregation, IdxVector_d& nodeWeights, std::string prefix) { // Recording aggregation stats if (nodeWeights.size() == 0) RecordAggregationStats(aggregation, prefix + ":Parts"); else RecordAggregationStats(aggregation, nodeWeights, prefix + ":Parts"); // Recording Valence stats RecordValenceStats(indices, adjacency, prefix + ":Valence"); // Recording Edge cut ratio RecordEdgeCut(indices, adjacency, aggregation, prefix); } void RecordAggregationStats(IdxVector_d& aggregation, std::string prefix) { AT::IntVector_d agg; agg.swap(aggregation); AT::IntVector_d partSizes; // Get the part sizes AggMIS::GraphHelpers::getPartSizes(agg, partSizes); // Find the largest and smallest parts thrust::sort(partSizes.begin(), partSizes.end()); int smallest = partSizes[0]; int largest = partSizes.back(); // Get the mean, median, and std deviation double meanSize = (double)agg.size() / partSizes.size(); int medianSize = partSizes[(partSizes.size() - 1) / 2]; double std = thrust::transform_reduce(partSizes.begin(), partSizes.end(), AggMIS::MergeSplitGPU::Functors::SquaredDifference(meanSize), 0.0, thrust::plus<double>()); std = sqrt(std / partSizes.size()); agg.swap(aggregation); } void RecordAggregationStats(IdxVector_d& aggregation, IdxVector_d& nodeWeights, std::string prefix) { AT::IntVector_d agg; agg.swap(aggregation); AT::IntVector_d nw; nw.swap(nodeWeights); AT::IntVector_d partSizes; // Get the part sizes AggMIS::GraphHelpers::getPartSizes(agg, partSizes, nw); // Find the largest and smallest parts thrust::sort(partSizes.begin(), partSizes.end()); int smallest = partSizes[0]; int largest = partSizes.back(); // Get the mean, median, and std deviation int totalWeight = thrust::reduce(nw.begin(), nw.end()); int medianSize = partSizes[(partSizes.size() - 1) / 2]; double meanSize = (double)totalWeight / partSizes.size(); double std = thrust::transform_reduce(partSizes.begin(), partSizes.end(), AggMIS::MergeSplitGPU::Functors::SquaredDifference(meanSize), 0.0, thrust::plus<double>()); std = sqrt(std / partSizes.size()); agg.swap(aggregation); nw.swap(nodeWeights); } void RecordValenceStats(IdxVector_d& indices, IdxVector_d& adjacency, std::string prefix) { // Get a graph object to use AT::Graph_d g; g.adjacency->swap(adjacency); g.indices->swap(indices); // Get the valences from the graph AT::IntVector_d* valences = AggMIS::GraphHelpers::GetValences(g); // Compute the stats thrust::sort(valences->begin(), valences->end()); int smallest = valences->data()[0]; int largest = valences->back(); // Get the mean, median, and std deviation int totalValence = thrust::reduce(valences->begin(), valences->end()); double meanSize = (double)totalValence / valences->size(); int medianSize = valences->data()[(valences->size() - 1) / 2]; double std = thrust::transform_reduce(valences->begin(), valences->end(), AggMIS::MergeSplitGPU::Functors::SquaredDifference(meanSize), 0.0, thrust::plus<double>()); std = sqrt(std / valences->size()); g.adjacency->swap(adjacency); g.indices->swap(indices); } void RecordEdgeCut(IdxVector_d& indices, IdxVector_d& adjacency, IdxVector_d& aggregation, std::string prefix) { // Get a graph AT::Graph_d g; g.indices->swap(indices); g.adjacency->swap(adjacency); // Get an IntVector for aggregation AT::IntVector_d agg; agg.swap(aggregation); // Swapping back data g.indices->swap(indices); g.adjacency->swap(adjacency); agg.swap(aggregation); } } }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <cfloat> #include <time.h> #include <thrust/extrema.h> #include <Eigen/Geometry> #include <cublas_v2.h> #define VERTEX_CHANNELS 3 #define MAX_ROI 128 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __device__ inline float angle_distance(int cx, int cy, int x, int y, float u, float v) { float dx = cx - x; float dy = cy - y; float n1 = sqrt(u * u + v * v); float n2 = sqrt(dx * dx + dy * dy); float dot = u * dx + v * dy; float distance = dot / (n1 * n2); return distance; } __device__ inline float angle_distance_label(int cx, int cy, int x, int y, float u, float v, int cls, const int height, const int width, const int* labelmap) { float dx = cx - x; float dy = cy - y; float n1 = sqrt(u * u + v * v); float n2 = sqrt(dx * dx + dy * dy); float dot = u * dx + v * dy; float distance = dot / (n1 * n2); int num = 10; int count = 0; for (int i = 1; i <= num; i++) { float step = float(i) / float(num); int px = int(x + step * dx); int py = int(y + step * dy); if (px >= 0 && px < width && py >= 0 && py < height) { if (labelmap[py * width + px] == cls) count++; } } if ((float)count / float(num) < 0.5) distance = 0; return distance; } __device__ inline void project_box(int cls, const float* extents, const float* meta_data, float distance, float factor, float* threshold) { float xHalf = extents[cls * 3 + 0] * 0.5; float yHalf = extents[cls * 3 + 1] * 0.5; float zHalf = extents[cls * 3 + 2] * 0.5; float bb3D[24]; bb3D[0] = xHalf; bb3D[1] = yHalf; bb3D[2] = zHalf + distance; bb3D[3] = -xHalf; bb3D[4] = yHalf; bb3D[5] = zHalf + distance; bb3D[6] = xHalf; bb3D[7] = -yHalf; bb3D[8] = zHalf + distance; bb3D[9] = -xHalf; bb3D[10] = -yHalf; bb3D[11] = zHalf + distance; bb3D[12] = xHalf; bb3D[13] = yHalf; bb3D[14] = -zHalf + distance; bb3D[15] = -xHalf; bb3D[16] = yHalf; bb3D[17] = -zHalf + distance; bb3D[18] = xHalf; bb3D[19] = -yHalf; bb3D[20] = -zHalf + distance; bb3D[21] = -xHalf; bb3D[22] = -yHalf; bb3D[23] = -zHalf + distance; float fx = meta_data[0]; float fy = meta_data[4]; float px = meta_data[2]; float py = meta_data[5]; float minX = 1e8; float maxX = -1e8; float minY = 1e8; float maxY = -1e8; for (int i = 0; i < 8; i++) { float x = fx * (bb3D[i * 3] / bb3D[i * 3 + 2]) + px; float y = fy * (bb3D[i * 3 + 1] / bb3D[i * 3 + 2]) + py; minX = fmin(minX, x); minY = fmin(minY, y); maxX = fmax(maxX, x); maxY = fmax(maxY, y); } float width = maxX - minX + 1; float height = maxY - minY + 1; *threshold = fmax(width, height) * factor; } __global__ void compute_arrays_kernel(const int nthreads, const int* labelmap, int* arrays, int* array_size, const int height, const int width) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int cls = labelmap[index]; if (cls > 0) { int size = atomicAdd(array_size + cls, 1); int offset = cls * height * width + size; arrays[offset] = index; } } } __global__ void compute_hough_kernel(const int nthreads, float* hough_space, float* hough_data, const int* labelmap, const float* vertmap, const float* extents, const float* meta_data, int* arrays, int* array_size, int* class_indexes, const int height, const int width, const int num_classes, const int count, const float inlierThreshold, const int skip_pixels) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (cls, cx, cy) is an element in the hough space int ind = index / (height * width); int cls = class_indexes[ind]; int n = index % (height * width); int cx = n % width; int cy = n / width; int size = array_size[cls]; float distance = 0; float threshold; for (int i = 0; i < size; i += skip_pixels) { int offset = cls * height * width + i; int location = arrays[offset]; int x = location % width; int y = location / width; // read the direction offset = VERTEX_CHANNELS * cls * height * width + y * width + x; float u = vertmap[offset]; offset = VERTEX_CHANNELS * cls * height * width + height * width + y * width + x; float v = vertmap[offset]; offset = VERTEX_CHANNELS * cls * height * width + 2 * height * width + y * width + x; float d = exp(vertmap[offset]); // vote if (angle_distance_label(cx, cy, x, y, u, v, cls, height, width, labelmap) > inlierThreshold) { project_box(cls, extents, meta_data, d, 0.6, &threshold); float dx = fabsf(x - cx); float dy = fabsf(y - cy); if (dx < threshold && dy < threshold) { hough_space[index]++; distance += d; } } } if (hough_space[index] > 0) { distance /= hough_space[index]; float bb_width = -1; float bb_height = -1; for (int i = 0; i < size; i += skip_pixels) { int offset = cls * height * width + i; int location = arrays[offset]; int x = location % width; int y = location / width; // read the direction offset = VERTEX_CHANNELS * cls * height * width + y * width + x; float u = vertmap[offset]; offset = VERTEX_CHANNELS * cls * height * width + height * width + y * width + x; float v = vertmap[offset]; // vote if (angle_distance_label(cx, cy, x, y, u, v, cls, height, width, labelmap) > inlierThreshold) { project_box(cls, extents, meta_data, distance, 0.6, &threshold); float dx = fabsf(x - cx); float dy = fabsf(y - cy); if (dx > bb_width && dx < threshold && dy < threshold) bb_width = dx; if (dy > bb_height && dx < threshold && dy < threshold) bb_height = dy; } } int offset = ind * height * width * 3 + 3 * (cy * width + cx); hough_data[offset] = distance; hough_data[offset + 1] = 2 * bb_height; hough_data[offset + 2] = 2 * bb_width; } } } __global__ void compute_max_indexes_kernel(const int nthreads, int* max_indexes, int index_size, int* num_max, float* hough_space, float* hough_data, int height, int width, float threshold, float perThreshold, const int is_train) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (ind, cx, cy) is an element in the hough space int ind = index / (height * width); int n = index % (height * width); int cx = n % width; int cy = n / width; int kernel_size = 3; int offset = ind * height * width * 3 + 3 * (cy * width + cx); float bb_height = hough_data[offset + 1]; float bb_width = hough_data[offset + 2]; if (hough_space[index] > threshold && bb_height > 0 && bb_width > 0) { // check if the location is local maximum int flag = 0; for (int x = cx - kernel_size; x <= cx + kernel_size; x++) { for (int y = cy - kernel_size; y <= cy + kernel_size; y++) { if (x >= 0 && x < width && y >= 0 && y < height) { if (hough_space[ind * height * width + y * width + x] > hough_space[index]) { flag = 1; break; } if (is_train == 0 && hough_space[ind * height * width + y * width + x] == hough_space[index]) { if (ind * height * width + y * width + x > index) { flag = 1; break; } } } } // check the percentage of voting if (hough_space[index] / (bb_height * bb_width) < perThreshold) flag = 1; } if (flag == 0) { // add the location to max_indexes int max_index = atomicAdd(num_max, 1); if (max_index < index_size) max_indexes[max_index] = index; } } } } __global__ void compute_rois_kernel(const int nthreads, float* top_box, float* top_pose, const float* meta_data, float* hough_space, float* hough_data, int* max_indexes, int* class_indexes, int batch_index, const int height, const int width, const int num_classes, int* num_rois, const int is_train) { CUDA_1D_KERNEL_LOOP(index, nthreads) { float scale = 0.0; int max_index = max_indexes[index]; int ind = max_index / (height * width); int cls = class_indexes[ind]; int n = max_index % (height * width); int x = n % width; int y = n / width; float fx = meta_data[0]; float fy = meta_data[4]; float px = meta_data[2]; float py = meta_data[5]; float rx = (x - px) / fx; float ry = (y - py) / fy; int offset = ind * height * width * 3 + 3 * (y * width + x); float bb_distance = hough_data[offset]; float bb_height = hough_data[offset + 1]; float bb_width = hough_data[offset + 2]; if (is_train) { int roi_index = atomicAdd(num_rois, 9); top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x - bb_width * (0.5 + scale); top_box[roi_index * 7 + 3] = y - bb_height * (0.5 + scale); top_box[roi_index * 7 + 4] = x + bb_width * (0.5 + scale); top_box[roi_index * 7 + 5] = y + bb_height * (0.5 + scale); top_box[roi_index * 7 + 6] = hough_space[max_index]; for (int j = 0; j < 9; j++) { top_pose[(roi_index + j) * 7 + 0] = 1; top_pose[(roi_index + j) * 7 + 1] = 0; top_pose[(roi_index + j) * 7 + 2] = 0; top_pose[(roi_index + j) * 7 + 3] = 0; top_pose[(roi_index + j) * 7 + 4] = rx; top_pose[(roi_index + j) * 7 + 5] = ry; top_pose[(roi_index + j) * 7 + 6] = bb_distance; } // add jittering boxes float x1 = top_box[roi_index * 7 + 2]; float y1 = top_box[roi_index * 7 + 3]; float x2 = top_box[roi_index * 7 + 4]; float y2 = top_box[roi_index * 7 + 5]; float ww = x2 - x1; float hh = y2 - y1; // (-1, -1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 - 0.05 * ww; top_box[roi_index * 7 + 3] = y1 - 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (+1, -1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 + 0.05 * ww; top_box[roi_index * 7 + 3] = y1 - 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (-1, +1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 - 0.05 * ww; top_box[roi_index * 7 + 3] = y1 + 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (+1, +1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 + 0.05 * ww; top_box[roi_index * 7 + 3] = y1 + 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (0, -1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1; top_box[roi_index * 7 + 3] = y1 - 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (-1, 0) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 - 0.05 * ww; top_box[roi_index * 7 + 3] = y1; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (0, +1) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1; top_box[roi_index * 7 + 3] = y1 + 0.05 * hh; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; // (+1, 0) roi_index++; top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x1 + 0.05 * ww; top_box[roi_index * 7 + 3] = y1; top_box[roi_index * 7 + 4] = top_box[roi_index * 7 + 2] + ww; top_box[roi_index * 7 + 5] = top_box[roi_index * 7 + 3] + hh; top_box[roi_index * 7 + 6] = hough_space[max_index]; } else { int roi_index = atomicAdd(num_rois, 1); top_box[roi_index * 7 + 0] = batch_index; top_box[roi_index * 7 + 1] = cls; top_box[roi_index * 7 + 2] = x - bb_width * (0.5 + scale); top_box[roi_index * 7 + 3] = y - bb_height * (0.5 + scale); top_box[roi_index * 7 + 4] = x + bb_width * (0.5 + scale); top_box[roi_index * 7 + 5] = y + bb_height * (0.5 + scale); top_box[roi_index * 7 + 6] = hough_space[max_index]; top_pose[roi_index * 7 + 0] = 1; top_pose[roi_index * 7 + 1] = 0; top_pose[roi_index * 7 + 2] = 0; top_pose[roi_index * 7 + 3] = 0; top_pose[roi_index * 7 + 4] = rx; top_pose[roi_index * 7 + 5] = ry; top_pose[roi_index * 7 + 6] = bb_distance; } } } std::vector<at::Tensor> hough_voting_cuda_forward( at::Tensor bottom_label, at::Tensor bottom_vertex, at::Tensor bottom_meta_data, at::Tensor extents, int is_train, int skip_pixels, int labelThreshold, float inlierThreshold, float votingThreshold, float perThreshold) { const int kThreadsPerBlock = 1024; int output_size; cudaError_t err; const int batch_size = bottom_vertex.size(0); const int num_classes = bottom_vertex.size(1) / VERTEX_CHANNELS; const int height = bottom_vertex.size(2); const int width = bottom_vertex.size(3); const int num_meta_data = bottom_meta_data.size(1); const int index_size = MAX_ROI / batch_size; auto top_box = at::zeros({MAX_ROI * 9, 7}, bottom_vertex.options()); auto top_pose = at::zeros({MAX_ROI * 9, 7}, bottom_vertex.options()); auto num_rois = at::zeros({1}, bottom_label.options()); for (int batch_index = 0; batch_index < batch_size; batch_index++) { const int* labelmap = bottom_label.data<int>() + batch_index * height * width; const float* vertmap = bottom_vertex.data<float>() + batch_index * height * width * VERTEX_CHANNELS * num_classes; const float* meta_data = bottom_meta_data.data<float>() + batch_index * num_meta_data; // step 1: compute a label index array for each class auto arrays = at::zeros({num_classes, height * width}, bottom_label.options()); auto array_sizes = at::zeros({num_classes}, bottom_label.options()); output_size = height * width; compute_arrays_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, labelmap, arrays.data<int>(), array_sizes.data<int>(), height, width); cudaThreadSynchronize(); // compute class indexes int* array_sizes_host = (int*)malloc(num_classes * sizeof(int)); int* class_indexes_host = (int*)malloc(num_classes * sizeof(int)); cudaMemcpy(array_sizes_host, array_sizes.data<int>(), num_classes * sizeof(int), cudaMemcpyDeviceToHost); int count = 0; for (int c = 1; c < num_classes; c++) { if (array_sizes_host[c] > labelThreshold) { class_indexes_host[count] = c; count++; } // else // printf("class %d with only pixels %d\n", c, array_sizes_host[c]); } if (count == 0) { free(array_sizes_host); free(class_indexes_host); continue; } auto class_indexes = at::zeros({count}, bottom_label.options()); cudaMemcpy(class_indexes.data<int>(), class_indexes_host, count * sizeof(int), cudaMemcpyHostToDevice); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute label index: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // step 2: compute the hough space auto hough_space = at::zeros({count, height, width}, bottom_vertex.options()); auto hough_data = at::zeros({count, height, width, 3}, bottom_vertex.options()); output_size = count * height * width; compute_hough_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, hough_space.data<float>(), hough_data.data<float>(), labelmap, vertmap, extents.data<float>(), meta_data, arrays.data<int>(), array_sizes.data<int>(), class_indexes.data<int>(), height, width, num_classes, count, inlierThreshold, skip_pixels); cudaThreadSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute hough space: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // step 3: find the maximum in hough space auto num_max = at::zeros({1}, bottom_label.options()); auto max_indexes = at::zeros({index_size}, bottom_label.options()); if (votingThreshold > 0) { output_size = count * height * width; compute_max_indexes_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, max_indexes.data<int>(), index_size, num_max.data<int>(), hough_space.data<float>(), hough_data.data<float>(), height, width, votingThreshold, perThreshold, is_train); cudaThreadSynchronize(); } else { int* max_indexes_host = (int*)malloc(count * sizeof(int)); memset(max_indexes_host, 0, count * sizeof(int)); for (int i = 0; i < count; i++) { float *hmax = thrust::max_element(thrust::device, hough_space.data<float>() + i * height * width, hough_space.data<float>() + (i+1) * height * width); max_indexes_host[i] = hmax - hough_space.data<float>(); } cudaMemcpy(num_max.data<int>(), &count, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(max_indexes.data<int>(), max_indexes_host, count * sizeof(int), cudaMemcpyHostToDevice); free(max_indexes_host); } err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute maximum: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // step 4: compute outputs int num_max_host; cudaMemcpy(&num_max_host, num_max.data<int>(), sizeof(int), cudaMemcpyDeviceToHost); if (num_max_host >= index_size) { printf("hough voting num_max: %d exceeds capacity %d\n", num_max_host, index_size); num_max_host = index_size; } if (num_max_host > 0) { output_size = num_max_host; compute_rois_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, top_box.data<float>(), top_pose.data<float>(), meta_data, hough_space.data<float>(), hough_data.data<float>(), max_indexes.data<int>(), class_indexes.data<int>(), batch_index, height, width, num_classes, num_rois.data<int>(), is_train); cudaThreadSynchronize(); } // clean up free(array_sizes_host); free(class_indexes_host); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed compute outputs: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } // copy outputs int num_rois_host; cudaMemcpy(&num_rois_host, num_rois.data<int>(), sizeof(int), cudaMemcpyDeviceToHost); if (num_rois_host == 0) num_rois_host = 1; auto top_box_final = at::zeros({num_rois_host, 7}, bottom_vertex.options()); auto top_pose_final = at::zeros({num_rois_host, 7}, bottom_vertex.options()); cudaMemcpy(top_box_final.data<float>(), top_box.data<float>(), num_rois_host * 7 * sizeof(float), cudaMemcpyDeviceToDevice); cudaMemcpy(top_pose_final.data<float>(), top_pose.data<float>(), num_rois_host * 7 * sizeof(float), cudaMemcpyDeviceToDevice); return {top_box_final, top_pose_final}; }
the_stack
using namespace std; #define TMPMEMNUM 10353 #define Nstreams 16 __global__ void RL( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, REAL* __restrict__ val_dev, const unsigned* __restrict__ l_col_ptr_dev, const unsigned* __restrict__ csr_r_ptr_dev, const unsigned* __restrict__ csr_c_idx_dev, const unsigned* __restrict__ csr_diag_ptr_dev, const int* __restrict__ level_idx_dev, REAL* __restrict__ tmpMem, const unsigned n, const int levelHead, const int inLevPos) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int wid = threadIdx.x / 32; const unsigned currentCol = level_idx_dev[levelHead+inLevPos+bid]; const unsigned currentLColSize = sym_c_ptr_dev[currentCol + 1] - l_col_ptr_dev[currentCol] - 1; const unsigned currentLPos = l_col_ptr_dev[currentCol] + tid + 1; extern __shared__ REAL s[]; //update current col int offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; val_dev[currentLPos + offset] /= val_dev[l_col_ptr_dev[currentCol]]; tmpMem[bid*n + ridx]= val_dev[currentLPos + offset]; } offset += blockDim.x; } __syncthreads(); //broadcast to submatrix const unsigned subColPos = csr_diag_ptr_dev[currentCol] + wid + 1; const unsigned subMatSize = csr_r_ptr_dev[currentCol + 1] - csr_diag_ptr_dev[currentCol] - 1; unsigned subCol; const int tidInWarp = threadIdx.x % 32; unsigned subColElem = 0; int woffset = 0; while (subMatSize > woffset) { if (wid + woffset < subMatSize) { offset = 0; subCol = csr_c_idx_dev[subColPos + woffset]; while(offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { if (tidInWarp + offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { subColElem = sym_c_ptr_dev[subCol] + tidInWarp + offset; unsigned ridx = sym_r_idx_dev[subColElem]; if (ridx == currentCol) { s[wid] = val_dev[subColElem]; } //Threads in a warp are always synchronized //__syncthreads(); if (ridx > currentCol) { //elem in currentCol same row with subColElem might be 0, so //clearing tmpMem is necessary atomicAdd(&val_dev[subColElem], -tmpMem[ridx+n*bid]*s[wid]); } } offset += 32; } } woffset += blockDim.x/32; } __syncthreads(); //Clear tmpMem offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; tmpMem[bid*n + ridx]= 0; } offset += blockDim.x; } } __global__ void RL_perturb( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, REAL* __restrict__ val_dev, const unsigned* __restrict__ l_col_ptr_dev, const unsigned* __restrict__ csr_r_ptr_dev, const unsigned* __restrict__ csr_c_idx_dev, const unsigned* __restrict__ csr_diag_ptr_dev, const int* __restrict__ level_idx_dev, REAL* __restrict__ tmpMem, const unsigned n, const int levelHead, const int inLevPos, const float pert) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int wid = threadIdx.x / 32; const unsigned currentCol = level_idx_dev[levelHead+inLevPos+bid]; const unsigned currentLColSize = sym_c_ptr_dev[currentCol + 1] - l_col_ptr_dev[currentCol] - 1; const unsigned currentLPos = l_col_ptr_dev[currentCol] + tid + 1; extern __shared__ REAL s[]; //update current col int offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; if (abs(val_dev[l_col_ptr_dev[currentCol]]) < pert) val_dev[l_col_ptr_dev[currentCol]] = pert; val_dev[currentLPos + offset] /= val_dev[l_col_ptr_dev[currentCol]]; tmpMem[bid*n + ridx]= val_dev[currentLPos + offset]; } offset += blockDim.x; } __syncthreads(); //broadcast to submatrix const unsigned subColPos = csr_diag_ptr_dev[currentCol] + wid + 1; const unsigned subMatSize = csr_r_ptr_dev[currentCol + 1] - csr_diag_ptr_dev[currentCol] - 1; unsigned subCol; const int tidInWarp = threadIdx.x % 32; unsigned subColElem = 0; int woffset = 0; while (subMatSize > woffset) { if (wid + woffset < subMatSize) { offset = 0; subCol = csr_c_idx_dev[subColPos + woffset]; while(offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { if (tidInWarp + offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { subColElem = sym_c_ptr_dev[subCol] + tidInWarp + offset; unsigned ridx = sym_r_idx_dev[subColElem]; if (ridx == currentCol) { s[wid] = val_dev[subColElem]; } //Threads in a warp are always synchronized //__syncthreads(); if (ridx > currentCol) { //elem in currentCol same row with subColElem might be 0, so //clearing tmpMem is necessary atomicAdd(&val_dev[subColElem], -tmpMem[ridx+n*bid]*s[wid]); } } offset += 32; } } woffset += blockDim.x/32; } __syncthreads(); //Clear tmpMem offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; tmpMem[bid*n + ridx]= 0; } offset += blockDim.x; } } __global__ void RL_onecol_factorizeCurrentCol( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, REAL* __restrict__ val_dev, const unsigned* __restrict__ l_col_ptr_dev, const unsigned currentCol, REAL* __restrict__ tmpMem, const int stream, const unsigned n) { const int tid = threadIdx.x; const unsigned currentLColSize = sym_c_ptr_dev[currentCol + 1] - l_col_ptr_dev[currentCol] - 1; const unsigned currentLPos = l_col_ptr_dev[currentCol] + tid + 1; //update current col int offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; val_dev[currentLPos + offset] /= val_dev[l_col_ptr_dev[currentCol]]; tmpMem[stream * n + ridx]= val_dev[currentLPos + offset]; } offset += blockDim.x; } } __global__ void RL_onecol_factorizeCurrentCol_perturb( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, REAL* __restrict__ val_dev, const unsigned* __restrict__ l_col_ptr_dev, const unsigned currentCol, REAL* __restrict__ tmpMem, const int stream, const unsigned n, const float pert) { const int tid = threadIdx.x; const unsigned currentLColSize = sym_c_ptr_dev[currentCol + 1] - l_col_ptr_dev[currentCol] - 1; const unsigned currentLPos = l_col_ptr_dev[currentCol] + tid + 1; //update current col int offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; if (abs(val_dev[l_col_ptr_dev[currentCol]]) < pert) val_dev[l_col_ptr_dev[currentCol]] = pert; val_dev[currentLPos + offset] /= val_dev[l_col_ptr_dev[currentCol]]; tmpMem[stream * n + ridx]= val_dev[currentLPos + offset]; } offset += blockDim.x; } } __global__ void RL_onecol_updateSubmat( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, REAL* __restrict__ val_dev, const unsigned* __restrict__ csr_c_idx_dev, const unsigned* __restrict__ csr_diag_ptr_dev, const unsigned currentCol, REAL* __restrict__ tmpMem, const int stream, const unsigned n) { const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ REAL s; //broadcast to submatrix const unsigned subColPos = csr_diag_ptr_dev[currentCol] + bid + 1; unsigned subCol; unsigned subColElem = 0; int offset = 0; subCol = csr_c_idx_dev[subColPos]; while(offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { if (tid + offset < sym_c_ptr_dev[subCol + 1] - sym_c_ptr_dev[subCol]) { subColElem = sym_c_ptr_dev[subCol] + tid + offset; unsigned ridx = sym_r_idx_dev[subColElem]; if (ridx == currentCol) { s = val_dev[subColElem]; } __syncthreads(); if (ridx > currentCol) { atomicAdd(&val_dev[subColElem], -tmpMem[stream * n + ridx] * s); } } offset += blockDim.x; } } __global__ void RL_onecol_cleartmpMem( const unsigned* __restrict__ sym_c_ptr_dev, const unsigned* __restrict__ sym_r_idx_dev, const unsigned* __restrict__ l_col_ptr_dev, const unsigned currentCol, REAL* __restrict__ tmpMem, const int stream, const unsigned n) { const int tid = threadIdx.x; const unsigned currentLColSize = sym_c_ptr_dev[currentCol + 1] - l_col_ptr_dev[currentCol] - 1; const unsigned currentLPos = l_col_ptr_dev[currentCol] + tid + 1; unsigned offset = 0; while (currentLColSize > offset) { if (tid + offset < currentLColSize) { unsigned ridx = sym_r_idx_dev[currentLPos + offset]; tmpMem[stream * n + ridx]= 0; } offset += blockDim.x; } } void LUonDevice(Symbolic_Matrix &A_sym, ostream &out, ostream &err, bool PERTURB) { unsigned n = A_sym.n; unsigned nnz = A_sym.nnz; unsigned num_lev = A_sym.num_lev; unsigned *sym_c_ptr_dev, *sym_r_idx_dev, *l_col_ptr_dev; REAL *val_dev, *tmpMem; unsigned *csr_r_ptr_dev, *csr_c_idx_dev, *csr_diag_ptr_dev; int *level_idx_dev; cudaMalloc((void**)&sym_c_ptr_dev, (n + 1) * sizeof(unsigned)); cudaMalloc((void**)&sym_r_idx_dev, nnz * sizeof(unsigned)); cudaMalloc((void**)&val_dev, nnz * sizeof(REAL)); cudaMalloc((void**)&l_col_ptr_dev, n * sizeof(unsigned)); cudaMalloc((void**)&csr_r_ptr_dev, (n + 1) * sizeof(unsigned)); cudaMalloc((void**)&csr_c_idx_dev, nnz * sizeof(unsigned)); cudaMalloc((void**)&csr_diag_ptr_dev, n * sizeof(unsigned)); cudaMalloc((void**)&level_idx_dev, n * sizeof(int)); cudaMemcpy(sym_c_ptr_dev, &(A_sym.sym_c_ptr[0]), (n + 1) * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(sym_r_idx_dev, &(A_sym.sym_r_idx[0]), nnz * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(val_dev, &(A_sym.val[0]), nnz * sizeof(REAL), cudaMemcpyHostToDevice); cudaMemcpy(l_col_ptr_dev, &(A_sym.l_col_ptr[0]), n * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(csr_r_ptr_dev, &(A_sym.csr_r_ptr[0]), (n + 1) * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(csr_c_idx_dev, &(A_sym.csr_c_idx[0]), nnz * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(csr_diag_ptr_dev, &(A_sym.csr_diag_ptr[0]), n * sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(level_idx_dev, &(A_sym.level_idx[0]), n * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&tmpMem, TMPMEMNUM*n*sizeof(REAL)); cudaMemset(tmpMem, 0, TMPMEMNUM*n*sizeof(REAL)); // calculate 1-norm of A and perturbation value for perturbation float pert = 0; if (PERTURB) { float norm_A = 0; for (unsigned i = 0; i < n; ++i) { float tmp = 0; for (unsigned j = A_sym.sym_c_ptr[i]; j < A_sym.sym_c_ptr[i+1]; ++j) tmp += abs(A_sym.val[j]); if (norm_A < tmp) norm_A = tmp; } pert = 3.45e-4 * norm_A; out << "Gaussian elimination with static pivoting (GESP)..." << endl; out << "1-Norm of A matrix is " << norm_A << ", Perturbation value is " << pert << endl; } cudaDeviceSynchronize(); Timer t; double utime; t.start(); for (unsigned i = 0; i < num_lev; ++i) { int lev_size = A_sym.level_ptr[i + 1] - A_sym.level_ptr[i]; if (lev_size > 896) { //3584 / 4 unsigned WarpsPerBlock = 2; dim3 dimBlock(WarpsPerBlock * 32, 1); size_t MemSize = WarpsPerBlock * sizeof(REAL); unsigned j = 0; while(lev_size > 0) { unsigned restCol = lev_size > TMPMEMNUM ? TMPMEMNUM : lev_size; dim3 dimGrid(restCol, 1); if (!PERTURB) RL<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM); else RL_perturb<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM, pert); j++; lev_size -= TMPMEMNUM; } } else if (lev_size > 448) { unsigned WarpsPerBlock = 4; dim3 dimBlock(WarpsPerBlock * 32, 1); size_t MemSize = WarpsPerBlock * sizeof(REAL); unsigned j = 0; while(lev_size > 0) { unsigned restCol = lev_size > TMPMEMNUM ? TMPMEMNUM : lev_size; dim3 dimGrid(restCol, 1); if (!PERTURB) RL<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM); else RL_perturb<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM, pert); j++; lev_size -= TMPMEMNUM; } } else if (lev_size > Nstreams) { dim3 dimBlock(256, 1); size_t MemSize = 32 * sizeof(REAL); unsigned j = 0; while(lev_size > 0) { unsigned restCol = lev_size > TMPMEMNUM ? TMPMEMNUM : lev_size; dim3 dimGrid(restCol, 1); if (!PERTURB) RL<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM); else RL_perturb<<<dimGrid, dimBlock, MemSize>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, csr_r_ptr_dev, csr_c_idx_dev, csr_diag_ptr_dev, level_idx_dev, tmpMem, n, A_sym.level_ptr[i], j*TMPMEMNUM, pert); j++; lev_size -= TMPMEMNUM; } } else { // "Big" levels for (int offset = 0; offset < lev_size; offset += Nstreams) { for (int j = 0; j < Nstreams; j++) { if (j + offset < lev_size) { const unsigned currentCol = A_sym.level_idx[A_sym.level_ptr[i] + j + offset]; const unsigned subMatSize = A_sym.csr_r_ptr[currentCol + 1] - A_sym.csr_diag_ptr[currentCol] - 1; if (!PERTURB) RL_onecol_factorizeCurrentCol<<<1, 256>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, currentCol, tmpMem, j, n); else RL_onecol_factorizeCurrentCol_perturb<<<1, 256>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, l_col_ptr_dev, currentCol, tmpMem, j, n, pert); if (subMatSize > 0) RL_onecol_updateSubmat<<<subMatSize, 256>>>(sym_c_ptr_dev, sym_r_idx_dev, val_dev, csr_c_idx_dev, csr_diag_ptr_dev, currentCol, tmpMem, j, n); RL_onecol_cleartmpMem<<<1, 256>>>(sym_c_ptr_dev, sym_r_idx_dev, l_col_ptr_dev, currentCol, tmpMem, j, n); } } } } } //copy LU val back to main mem cudaMemcpy(&(A_sym.val[0]), val_dev, nnz * sizeof(REAL), cudaMemcpyDeviceToHost); t.elapsedUserTime(utime); out << "Total LU kernel loop time: " << utime << " ms" << std::endl; #ifdef VERIFY //check NaN elements unsigned err_find = 0; for(unsigned i = 0; i < nnz; i++) if(isnan(A_sym.val[i]) || isinf(A_sym.val[i])) err_find++; if (err_find != 0) err << "LU data check: " << " NaN found!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl; #endif cudaFree(sym_c_ptr_dev); cudaFree(sym_r_idx_dev); cudaFree(val_dev); cudaFree(l_col_ptr_dev); cudaFree(csr_c_idx_dev); cudaFree(csr_r_ptr_dev); cudaFree(csr_diag_ptr_dev); cudaFree(level_idx_dev); }
the_stack
using namespace std; /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxEnergies: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * top5Probs: (1, numCases) (*out) * * target: (1, numCases) * */ __global__ void kMultiSoftmaxCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, float* top5Probs, const int numCases, const int numOut, const int setSize) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); int numBiggerProbs = 0, numEqualsProbs = 0; for (int i = 0; i < numOut; ++i) { numBiggerProbs += probs[i * numCases + tx] > labelp; numEqualsProbs += probs[i * numCases + tx] == labelp; } const int slotsLeft = setSize - numBiggerProbs; top5Probs[tx] = slotsLeft <= 0.0f ? 0.0f : (numEqualsProbs <= slotsLeft ? 1.0f : float(slotsLeft) / numEqualsProbs); correctProbs[tx] = labelp != maxp ? 0.0f : 1.0f / float(numEqualsProbs); } } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * top5Probs: (1, numCases) (*out) * * target: (1, numCases) == log(y_l[labels,:] */ void computeMultiSoftmaxCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out, NVMatrix& top5Probs_out, int setSize) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); // NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); top5Probs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); cudaStream_t stream = NVMatrix::getDefaultStream(); cudaFuncSetCacheConfig(kMultiSoftmaxCost, cudaFuncCachePreferL1); kMultiSoftmaxCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), top5Probs_out.getDevData(), numCases, numOut, setSize); getLastCudaError("kMultiSoftmaxCost: Kernel execution failed"); // cudaThreadSynchronize(); } /* * E = sum(p_l * log(y_l)) * probs: (numOut, numCases) * labels: (numOut, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kCrossEntCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { probs += tx; labels += tx; maxProbs += tx; labelLogProbs += tx; correctProbs += tx; const float maxp = maxProbs[0]; /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ float crossEnt = 0.0f; int numMax = 0; bool correctLabel = false; for (int i = 0; i < numOut; i++) { const float label_prob = labels[i * numCases]; const float model_prob = probs[i * numCases]; numMax += model_prob == maxp; crossEnt += label_prob * safelog(model_prob); correctLabel |= model_prob == maxp && label_prob > 0.0f; } labelLogProbs[0] = crossEnt; if (!correctLabel) { correctProbs[0] = 0.0f; } else { correctProbs[0] = 1.0f / float(numMax); } } } /* * E = sum(p_l * log(y_l)) * y_l: (numOut, numCases) * labels: (numOut, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kCrossEntGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const float label_prob = labels[tidx]; const float model_prob = y_l[tidx]; const float v = gradCoeff * __fdividef(label_prob, model_prob); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * E = sum(p_l * log(y_l)) * y_l: (numOut, numCases) * labels: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kCrossEntSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const float model_prob = y_l[tidx]; const float label_prob = labels[tidx]; float v = gradCoeff * (label_prob - model_prob); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * (label == ty); v = __fdividef(v, y_l[tidx]); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * ((label == ty) - y_l[tidx]); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * dE_dy_l: (numOut, numCases) * y_l: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut, const float scaleTarget, const float scaleGrad) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; if (add) { dE_dx_l[tidx] = scaleTarget * dE_dx_l[tidx] + scaleGrad * v; } else { dE_dx_l[tidx] = scaleGrad * v; } } } template <int B_X, bool add> __global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target, const int numElements) { for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) { if (add) { target[i] += actGrad[i] * (output[i] == input[i]); } else { target[i] = actGrad[i] * (output[i] == input[i]); } } } void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) { assert(actGrad.isContiguous()); assert(output.isContiguous()); assert(input.isContiguous()); assert(actGrad.isSameDims(input)); assert(actGrad.isSameDims(output)); dim3 blocks(DIVUP(actGrad.getNumElements(), 128)); dim3 threads(128); cudaStream_t stream = NVMatrix::getDefaultStream(); if (add) { assert(actGrad.isSameDims(target)); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, true><<<blocks, threads, 0, stream>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } else { target.resize(actGrad); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, false><<<blocks, threads, 0, stream>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } getLastCudaError("computeEltwiseMaxGrad: Kernel execution failed"); } /* * E = sum_i{-p_i*log(y_i)} * probs: (numOut, numCases) * labels: (numOut, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ void computeCrossEntCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.isSameDims(probs)); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); cudaStream_t stream = NVMatrix::getDefaultStream(); cudaFuncSetCacheConfig(kCrossEntCost, cudaFuncCachePreferL1); kCrossEntCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); getLastCudaError("kCrossEntCost: Kernel execution failed"); delete &maxProbs; } void computeCrossEntGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.isSameDims(probs)); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); cudaStream_t stream = NVMatrix::getDefaultStream(); if (!add) { target.resize(probs); kCrossEntGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kCrossEntGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } getLastCudaError("kCrossEntGrad: Kernel execution failed"); } void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, float scaleTarget, float scaleGrad) { int numCases = acts.getLeadingDim(); int numOut = acts.getFollowingDim(); assert(acts.isSameDims(actsGrad)); assert(acts.isContiguous()); assert(actsGrad.isContiguous()); assert(target.isContiguous()); assert(acts.isTrans()); assert(actsGrad.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); cudaStream_t stream = NVMatrix::getDefaultStream(); if (scaleTarget == 0) { target.resize(acts); kSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad); } else { kSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut, scaleTarget, scaleGrad); } getLastCudaError("computeSoftmaxGrad: Kernel execution failed"); } void computeCrossEntSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getLeadingDim() == probs.getLeadingDim() && labels.getFollowingDim() == probs.getFollowingDim()); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); assert(!labels.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); cudaStream_t stream = NVMatrix::getDefaultStream(); if (!add) { target.resize(probs); cudaFuncSetCacheConfig(kCrossEntSoftmaxGrad<false>, cudaFuncCachePreferL1); kCrossEntSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { cudaFuncSetCacheConfig(kCrossEntSoftmaxGrad<true>, cudaFuncCachePreferL1); kCrossEntSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } getLastCudaError("kCrossEntSoftmaxGrad: Kernel execution failed"); } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) == log(y_l[labels,:] */ void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& maxProbs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); cudaStream_t stream = NVMatrix::getDefaultStream(); cudaFuncSetCacheConfig(kLogregCost, cudaFuncCachePreferL1); kLogregCost<<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); getLastCudaError("computeLogregCost: Kernel execution failed"); } void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); cudaStream_t stream = NVMatrix::getDefaultStream(); if (!add) { target.resize(probs); kLogregCostGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregCostGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } getLastCudaError("computeLogregGrad: Kernel execution failed"); } void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); cudaStream_t stream = NVMatrix::getDefaultStream(); if (!add) { target.resize(probs); kLogregSoftmaxGrad<false><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregSoftmaxGrad<true><<<blocks, threads, 0, stream>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } getLastCudaError("computeLogregSoftmaxGrad: Kernel execution failed"); }
the_stack
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <Eigen/Core> #include <Eigen/Dense> #include <sophus/se3.hpp> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) inline __device__ __host__ float lerp(float a, float b, float t) { return a + t*(b-a); } __device__ __host__ float3 operator+(const float3 &a, const float3 &b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } __device__ __host__ float3 operator-(const float3 &a, const float3 &b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } template <typename Dtype> inline __device__ __host__ const Dtype & getValue(const int3 & v, const int3 & dim, const Dtype* sdf_grids) { return sdf_grids[v.x * dim.y * dim.z + v.y * dim.z + v.z]; } template <typename Dtype> inline __device__ __host__ Dtype getValueInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids) { const int x0 = (int)(pGrid.x - 0.5); const float fx = (pGrid.x - 0.5) - x0; const int y0 = (int)(pGrid.y - 0.5); const float fy = (pGrid.y - 0.5) - y0; const int z0 = (int)(pGrid.z - 0.5); const float fz = (pGrid.z - 0.5) - z0; const int x1 = x0 + 1; const int y1 = y0 + 1; const int z1 = z0 + 1; if ( !(x0 >= 0 && x1 < dim.x && y0 >= 0 && y1 < dim.y && z0 >=0 && z1 < dim.z) ) return 0.1; const float dx00 = lerp( getValue(make_int3(x0,y0,z0), dim, sdf_grids), getValue(make_int3(x1,y0,z0), dim, sdf_grids), fx); const float dx01 = lerp( getValue(make_int3(x0,y0,z1), dim, sdf_grids), getValue(make_int3(x1,y0,z1), dim, sdf_grids), fx); const float dx10 = lerp( getValue(make_int3(x0,y1,z0), dim, sdf_grids), getValue(make_int3(x1,y1,z0), dim, sdf_grids), fx); const float dx11 = lerp( getValue(make_int3(x0,y1,z1), dim, sdf_grids), getValue(make_int3(x1,y1,z1), dim, sdf_grids), fx); const float dxy0 = lerp( dx00, dx10, fy ); const float dxy1 = lerp( dx01, dx11, fy ); float dxyz = lerp( dxy0, dxy1, fz ); // penalize inside objects // if (dxyz < 0) // dxyz *= 10; return dxyz; } template <typename Dtype> inline __device__ __host__ float3 getGradientInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids) { const float3 delta_x = make_float3(1,0,0); const float3 delta_y = make_float3(0,1,0); const float3 delta_z = make_float3(0,0,1); Dtype f_px = getValueInterpolated(pGrid + delta_x, dim, sdf_grids); Dtype f_py = getValueInterpolated(pGrid + delta_y, dim, sdf_grids); Dtype f_pz = getValueInterpolated(pGrid + delta_z, dim, sdf_grids); Dtype f_mx = getValueInterpolated(pGrid - delta_x, dim, sdf_grids); Dtype f_my = getValueInterpolated(pGrid - delta_y, dim, sdf_grids); Dtype f_mz = getValueInterpolated(pGrid - delta_z, dim, sdf_grids); float3 grad; grad.x = 0.5*(f_px - f_mx); grad.y = 0.5*(f_py - f_my); grad.z = 0.5*(f_pz - f_mz); return grad; } /*******************************************/ /* pose_delta: num_objects x 6 */ /* pose_init: num_objects x 4 x 4 */ /* sdf_grid: num_classes x c x h x w */ /* sdf_limits: num_classes x 9 */ /* points: num_points x 5 */ /*******************************************/ template <typename Dtype> __global__ void SDFdistanceForward(const int nthreads, const Dtype* pose_delta, const Dtype* pose_init, const Dtype* sdf_grids, const Dtype* sdf_limits, const Dtype* points, const int num_points, Dtype* losses, Dtype* top_values, Dtype* diffs, Dtype* JTJ, Dtype* top_se3) { typedef Sophus::SE3<Dtype> SE3; typedef Eigen::Matrix<Dtype,3,1,Eigen::DontAlign> Vec3; // index is the index of point CUDA_1D_KERNEL_LOOP(index, nthreads) { int cls_index = int(points[5 * index + 3]); int obj_index = int(points[5 * index + 4]); int start_index; // convert delta pose Eigen::Matrix<Dtype,6,1> deltaPose; start_index = 6 * obj_index; deltaPose << pose_delta[start_index + 0], pose_delta[start_index + 1], pose_delta[start_index + 2], pose_delta[start_index + 3], pose_delta[start_index + 4], pose_delta[start_index + 5]; SE3 deltaPoseMatrix = SE3::exp(deltaPose); // convert initial pose Eigen::Matrix<Dtype,4,4> initialPose; start_index = 16 * obj_index; initialPose << pose_init[start_index + 0], pose_init[start_index + 1], pose_init[start_index + 2], pose_init[start_index + 3], pose_init[start_index + 4], pose_init[start_index + 5], pose_init[start_index + 6], pose_init[start_index + 7], pose_init[start_index + 8], pose_init[start_index + 9], pose_init[start_index + 10], pose_init[start_index + 11], pose_init[start_index + 12], pose_init[start_index + 13], pose_init[start_index + 14], pose_init[start_index + 15]; SE3 initialPoseMatrix = SE3(initialPose); // start point of a new object if (index == 0 || int(points[5 * (index-1) + 4]) != obj_index) { SE3 pose = deltaPoseMatrix * initialPoseMatrix; Eigen::Matrix<Dtype,3,4> matrix = pose.matrix3x4(); int count = 0; start_index = 16 * obj_index; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) top_se3[start_index + count++] = matrix(i, j); } top_se3[start_index + 15] = 1.0; } // convert point Vec3 point; point << points[5 * index], points[5 * index + 1], points[5 * index + 2]; // transform the point const Vec3 updatedPoint = deltaPoseMatrix * initialPoseMatrix * point; // obtain sdf value start_index = 9 * cls_index; int d0 = int(sdf_limits[start_index + 6]); int d1 = int(sdf_limits[start_index + 7]); int d2 = int(sdf_limits[start_index + 8]); float px = (updatedPoint(0) - sdf_limits[start_index + 0]) / (sdf_limits[start_index + 3] - sdf_limits[start_index + 0]) * d0; float py = (updatedPoint(1) - sdf_limits[start_index + 1]) / (sdf_limits[start_index + 4] - sdf_limits[start_index + 1]) * d1; float pz = (updatedPoint(2) - sdf_limits[start_index + 2]) / (sdf_limits[start_index + 5] - sdf_limits[start_index + 2]) * d2; float3 pGrid = make_float3(px, py, pz); int3 dim = make_int3(d0, d1, d2); Dtype value = getValueInterpolated(pGrid, dim, sdf_grids + cls_index * d0 * d1 * d2); // L2 loss int flag = 1; if (value < 0) flag = -1; value *= flag; losses[index] = 0.5 * value * value; top_values[index] = losses[index]; // L2 penalty on translation // float lambda = 0.1; // losses[index] += 0.5 * lambda * (pose_delta[0] * pose_delta[0] + pose_delta[1] * pose_delta[1] + pose_delta[2] * pose_delta[2]); // compute gradient float3 grad = getGradientInterpolated(pGrid, dim, sdf_grids + cls_index * d0 * d1 * d2); Vec3 sdfUpdate; sdfUpdate << grad.x, grad.y, grad.z; Eigen::Matrix<Dtype,3,6> dUpdate; dUpdate << 1, 0, 0, 0, updatedPoint(2), -updatedPoint(1), 0, 1, 0, -updatedPoint(2), 0, updatedPoint(0), 0, 0, 1, updatedPoint(1), -updatedPoint(0), 0; Eigen::Matrix<Dtype,1,6> J = flag * sdfUpdate.transpose() * dUpdate; // assign gradient for (int i = 0; i < 6; i++) diffs[6 * index + i] = value * J(i); // L2 penalty on translation // diffs[6 * index + 0] += lambda * pose_delta[0]; // diffs[6 * index + 1] += lambda * pose_delta[1]; // diffs[6 * index + 2] += lambda * pose_delta[2]; // compute JTJ Eigen::Matrix<Dtype,6,6> result = J.transpose() * J; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) JTJ[36 * index + i * 6 + j] = result(i, j); } } } /* diffs: num_points x num_channels */ /* bottom_diff: num_objects x num_channels */ template <typename Dtype> __global__ void sum_gradients(const int nthreads, const Dtype* diffs, const int num_channels, const Dtype* points, Dtype* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int p = index / num_channels; int c = index % num_channels; int obj_index = int(points[5 * p + 4]); atomicAdd(bottom_diff + obj_index * num_channels + c, diffs[index]); } } /*******************************************/ /* pose_delta: num_objects x 6 */ /* pose_init: num_objects x 4 x 4 */ /* sdf_grid: num_classes x c x h x w */ /* sdf_limits: num_classes x 9 */ /* points: num_points x 5 */ /*******************************************/ std::vector<at::Tensor> sdf_loss_cuda_forward( at::Tensor pose_delta, at::Tensor pose_init, at::Tensor sdf_grids, at::Tensor sdf_limits, at::Tensor points, at::Tensor regularization) { // run kernels cudaError_t err; const int kThreadsPerBlock = 512; const int num_channels = 6; int output_size; // sizes const int num_objects = pose_delta.size(0); const int num_classes = sdf_grids.size(0); const int num_points = points.size(0); // temp losses auto losses = at::zeros({num_points}, points.options()); auto top_values = at::zeros({num_points}, points.options()); auto top_data = at::zeros({1}, points.options()); auto top_se3 = at::zeros({num_objects, 4, 4}, points.options()); // temp diffs auto diffs = at::zeros({num_points, num_channels}, points.options()); auto JTJ = at::zeros({num_points, num_channels, num_channels}, points.options()); auto bottom_diff = at::zeros({num_objects, num_channels}, points.options()); auto bottom_JTJ = at::zeros({num_objects, num_channels, num_channels}, points.options()); // compute the losses and gradients output_size = num_points; SDFdistanceForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, pose_delta.data<float>(), pose_init.data<float>(), sdf_grids.data<float>(), sdf_limits.data<float>(), points.data<float>(), num_points, losses.data<float>(), top_values.data<float>(), diffs.data<float>(), JTJ.data<float>(), top_se3.data<float>()); cudaDeviceSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // sum the diffs output_size = num_points * num_channels; sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, diffs.data<float>(), num_channels, points.data<float>(), bottom_diff.data<float>()); output_size = num_points * num_channels * num_channels; sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, JTJ.data<float>(), num_channels * num_channels, points.data<float>(), bottom_JTJ.data<float>()); cudaDeviceSynchronize(); // sum the loss thrust::device_ptr<float> losses_ptr(losses.data<float>()); float loss = thrust::reduce(losses_ptr, losses_ptr + num_points) / num_points; cudaMemcpy(top_data.data<float>(), &loss, sizeof(float), cudaMemcpyHostToDevice); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) ); exit( -1 ); } // compute Gauss Newton update float* bottom_diff_host = (float*)malloc(num_objects * num_channels * sizeof(float)); float* regularization_host = (float*)malloc(num_channels * sizeof(float)); float* bottom_JTJ_host = (float*)malloc(num_objects * num_channels * num_channels * sizeof(float)); cudaMemcpy(bottom_diff_host, bottom_diff.data<float>(), num_objects * num_channels * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(regularization_host, regularization.data<float>(), num_channels * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(bottom_JTJ_host, bottom_JTJ.data<float>(), num_objects * num_channels * num_channels * sizeof(float), cudaMemcpyDeviceToHost); Eigen::Matrix<float,6,1> J_eigen; Eigen::Matrix<float,6,6> JTJ_eigen; float* dalpha_all = (float*)malloc(num_objects * num_channels * sizeof(float)); for (int k = 0; k < num_objects; k++) { for (int i = 0; i < num_channels; i++) { J_eigen(i) = bottom_diff_host[k * num_channels + i]; for (int j = 0; j < num_channels; j++) JTJ_eigen(i, j) = bottom_JTJ_host[k * num_channels * num_channels + i * num_channels + j]; JTJ_eigen(i, i) += regularization_host[i]; } Eigen::Matrix<float,6,1> dalpha = JTJ_eigen.ldlt().solve(J_eigen); for (int i = 0; i < num_channels; i++) dalpha_all[k * num_channels + i] = dalpha(i); } auto bottom_delta = at::zeros({num_objects, num_channels}, points.options()); cudaMemcpy(bottom_delta.data<float>(), dalpha_all, num_objects * num_channels * sizeof(float), cudaMemcpyHostToDevice); free(bottom_diff_host); free(regularization_host); free(bottom_JTJ_host); free(dalpha_all); return {top_data, top_values, top_se3, bottom_delta, bottom_diff}; } template <typename Dtype> __global__ void SDFdistanceBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_diff, Dtype* output) { CUDA_1D_KERNEL_LOOP(index, nthreads) { output[index] = top_diff[0] * bottom_diff[index]; } } std::vector<at::Tensor> sdf_loss_cuda_backward( at::Tensor grad_loss, at::Tensor bottom_diff) { cudaError_t err; const int kThreadsPerBlock = 512; int output_size; const int batch_size = bottom_diff.size(0); const int num_channels = bottom_diff.size(1); auto grad_pose = at::zeros({batch_size, num_channels}, bottom_diff.options()); output_size = batch_size * num_channels; SDFdistanceBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, grad_loss.data<float>(), bottom_diff.data<float>(), grad_pose.data<float>()); cudaDeviceSynchronize(); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return {grad_pose}; }
the_stack
#include <stdio.h> __global__ void Sub_V_S(const float *a, const float b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a[i] - b; } } __global__ void Sub_S_V(const float a, float* b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a - b[i]; } } __global__ void Add_V_S(const float* a, const float b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a[i] + b; } } __global__ void Add_V_V_InPlace(const float* a, int aOffset, float* b, int bOffset, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { b[i + bOffset] = a[i + aOffset] + b[i + bOffset]; } } __global__ void Mul_Had_V_V(const float* a, const float* b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a[i] * b[i]; } } __global__ void Div_S_V(const float a, const float* b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a / b[i]; } } __global__ void Div_V_V(const float* a, const float* b, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = a[i] / b[i]; } } __global__ void Exp_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = __expf(a[i]); } } __global__ void Sqrt_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = sqrtf(a[i]); } } __global__ void Sign_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = copysignf(1.0f, a[i]); } } __global__ void Rel_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = (fabsf(a[i]) + a[i]) / 2.0f; } } __global__ void Log_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = __logf(a[i]); } } __global__ void Sigmoid_V(const float* a, float* out, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { out[i] = 1.0f / (1.0f + __expf(-a[i])); } } __global__ void Sum_V(const float* a, float* partial_sums, const int n) { extern __shared__ float sdata[]; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; int ti = threadIdx.x; // move global input data to shared memory, pad with zeros float x = 0.0f; if (i < n) { x = a[i]; } sdata[ti] = x; __syncthreads(); // use parallel reduction to contiguously reduce to partial sums for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (ti < offset) { sdata[ti] += sdata[ti + offset]; } __syncthreads(); } if (ti == 0) { partial_sums[blockIdx.x] = sdata[0]; } } __global__ void Softmax_Rowwise_M(const float* a, float* maxPerRow, float* maxPerRowIndices, float* sumPerRow, const int rows, const int cols, const int cols2, float* out, const int n) { extern __shared__ float sdata[]; float* rowBuffer = &sdata[blockDim.x]; int rowsPerBlock = blockDim.x / cols; int usedPerBlock = rowsPerBlock * cols; int unusedPerBlock = blockDim.x - usedPerBlock; int ti = threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x - (unusedPerBlock * blockId); int ri = i / cols; int riLocal = ri % rowsPerBlock; int tiLocal = ti - riLocal * cols; bool inData = i < n && ti < usedPerBlock; float x = 0.0f; if (inData) { x = a[i]; } sdata[ti] = rowBuffer[ti] = x; __syncthreads(); // find each rows max value for (int offset = cols2 / 2; offset > 0; offset >>= 1) { if (tiLocal < offset) { float currentMax = rowBuffer[ti]; float other = (ti + offset) / cols == riLocal ? rowBuffer[ti + offset] : 0.0f; rowBuffer[ti] = other > currentMax ? other : currentMax; } __syncthreads(); } // subtract each value from that row's maximum if (inData) { sdata[ti] = __expf(sdata[ti] - rowBuffer[riLocal * cols]); if (tiLocal == 0) { maxPerRow[ri] = rowBuffer[riLocal * cols]; } } rowBuffer[ti] = sdata[ti]; __syncthreads(); // write out max index if (maxPerRow[ri] == a[i]) { maxPerRowIndices[ri] = tiLocal; } // calculate each rows sum for (int offset = cols2 / 2; offset > 0; offset >>= 1) { if (tiLocal < offset) { float other = (ti + offset) / cols == riLocal ? rowBuffer[ti + offset] : 0.0f; rowBuffer[ti] = rowBuffer[ti] + other; } __syncthreads(); } if (inData) { out[i] = sdata[ti] / rowBuffer[riLocal * cols]; if (tiLocal == 0) { sumPerRow[ri] = rowBuffer[riLocal * cols]; } } } __global__ void Softmax_Rowwise_M_Backward(const float* origin, const float* adjoint, const float* primal, const float* prevMaxs, const float* prevMaxIndices, const float* prevSums, float* out, const int rows, const int cols, const int cols2, const int n) { extern __shared__ float sdata[]; float* rowBuffer = sdata; float* originData = &sdata[blockDim.x]; float* adjointData = &sdata[blockDim.x * 2]; float* primalData = &sdata[blockDim.x * 3]; float* outData = &sdata[blockDim.x * 4]; int rowsPerBlock = blockDim.x / cols; int usedPerBlock = rowsPerBlock * cols; int unusedPerBlock = blockDim.x - usedPerBlock; int ti = threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x - (unusedPerBlock * blockId); int ri = i / cols; int riLocal = ri % rowsPerBlock; int tiLocal = ti - riLocal * cols; bool inData = i < n && ti < usedPerBlock; float prevMax = prevMaxs[ri]; int prevMaxIndex = prevMaxIndices[ri]; float prevSum = prevSums[ri]; if (inData) { originData[ti] = origin[i]; adjointData[ti] = adjoint[i]; primalData[ti] = primal[i]; } // Div_DM_D DM (direct) D (indirect via Sum_DM) rowBuffer[ti] = adjointData[ti] / prevSum + adjointData[ti] * (originData[ti] / (prevSum * prevSum)); // Exp_DM DM (direct) rowBuffer[ti] = rowBuffer[ti] * __expf(originData[ti] - prevMax); outData[ti] = rowBuffer[ti]; __syncthreads(); // calculate each rows derivatives (in rowBuffer) sum for (int offset = cols2 / 2; offset > 0; offset >>= 1) { if (tiLocal < offset) { float other = (ti + offset) / cols == riLocal ? rowBuffer[ti + offset] : 0.0f; rowBuffer[ti] = rowBuffer[ti] + other; } __syncthreads(); } // Item_DM D (indirect via Max op via Sub_DM_D op (left part for DM is just passthrough of gradient, so nothing to do there)) if (tiLocal == prevMaxIndex) { outData[ti] = outData[ti] - rowBuffer[riLocal * cols]; } if (inData) { out[i] = outData[ti]; } } __global__ void Sum_M_Rowwise(const float* a, const int rows, const int cols, const int cols2, float* sumPerRowPerBlock, const int n) { extern __shared__ float sdata[]; int rowsPerBlock = blockDim.x / cols; int usedPerBlock = rowsPerBlock * cols; int unusedPerBlock = blockDim.x - usedPerBlock; int ti = threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x - (unusedPerBlock * blockId); int ri = i / cols; int riLocal = ri % rowsPerBlock; int tiLocal = ti - riLocal * cols; bool inData = i < n && ti < usedPerBlock; float x = 0.0f; if (inData) { x = a[i]; } sdata[ti] = x; __syncthreads(); // calculate each rows derivatives (in sdata) sum for (int offset = cols2 / 2; offset > 0; offset >>= 1) { if (tiLocal < offset) { float other = (ti + offset) / cols == riLocal ? sdata[ti + offset] : 0.0f; sdata[ti] = sdata[ti] + other; } __syncthreads(); } if (tiLocal == 0) { sumPerRowPerBlock[blockIdx.x * rowsPerBlock + riLocal] = sdata[riLocal * cols]; } } __global__ void Add_M_Rowwise_V_InPlace(const float* a, const int rows, const int cols, const int cols2, float* b, const int n) { extern __shared__ float sdata[]; int rowsPerBlock = blockDim.x / cols; int usedPerBlock = rowsPerBlock * cols; int unusedPerBlock = blockDim.x - usedPerBlock; int ti = threadIdx.x; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x - (unusedPerBlock * blockId); int ri = i / cols; int riLocal = ri % rowsPerBlock; int tiLocal = ti - riLocal * cols; bool inData = i < n && ti < usedPerBlock; float x = 0.0f; if (inData) { x = a[i]; } sdata[ti] = x; __syncthreads(); // calculate each rows derivatives (in sdata) sum for (int offset = cols2 / 2; offset > 0; offset >>= 1) { if (tiLocal < offset) { float other = (ti + offset) / cols == riLocal ? sdata[ti + offset] : 0.0f; sdata[ti] = sdata[ti] + other; } __syncthreads(); } if (tiLocal == 0) { b[ri] = b[ri] + sdata[riLocal * cols]; } } __global__ void RepeatReshapeCopy_V_MRows(const float* a, float* b, const int rows, const int cols, const int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < cols) { float value = a[i]; while (i < n) { b[i] = value; i += cols; } } } __global__ void Permute_M(const float* a, const float* permutedDimensions, const float* originalStrides, float* out, const float* permutedStrides, const int rank, const int n) { extern __shared__ float sdata[]; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; float* bufferIndices = &sdata[threadIdx.x * rank * 2]; float* resultIndices = &bufferIndices[rank]; if (i < n) { int flatIndex = i; for (int y = 0; y < rank; y++) { bufferIndices[y] = (int) (flatIndex / originalStrides[y]); flatIndex -= bufferIndices[y] * originalStrides[y]; } for (int y = 0; y < rank; y++) { resultIndices[y] = bufferIndices[(int) permutedDimensions[y]]; } int permutedIndex = 0; for (int y = 0; y < rank; y++) { permutedIndex += resultIndices[y] * permutedStrides[y]; } out[permutedIndex] = a[i]; } } __device__ curandState randomStates[256]; __global__ void InitialiseRandomStates(int seed) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < 256) { curand_init(seed + i, i, 0, &randomStates[i]); } } __global__ void FillWithProbabilityMask_V(float* a, const float probability, int n) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int i = blockId * blockDim.x + threadIdx.x; if (i < n) { float rand = curand_uniform(&randomStates[i % 256]); a[i] = rand < probability ? 1 : 0; } } int main() { // do nothing return 0; }
the_stack
#ifndef CH_SYSTEMFSI_IMPL_H_ #define CH_SYSTEMFSI_IMPL_H_ #include "chrono/ChConfig.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/detail/normal_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/tuple.h> #include "chrono_fsi/physics/ChFsiGeneral.h" #include "chrono_fsi/math/custom_math.h" #include "chrono_fsi/utils/ChUtilsDevice.cuh" namespace chrono { namespace fsi { /// @addtogroup fsi_physics /// @{ /// typedef device iterators for shorthand SPH operation of thrust vectors of Real3 typedef thrust::device_vector<Real3>::iterator r3IterD; /// typedef device iterators for shorthand SPH operation of thrust vectors of Real4 typedef thrust::device_vector<Real4>::iterator r4IterD; /// typedef device tuple for holding SPH data pos,vel,[rho,pressure,mu,type] typedef thrust::tuple<r4IterD, r3IterD, r4IterD, r3IterD, r3IterD> iterTupleSphD; typedef thrust::zip_iterator<iterTupleSphD> zipIterSphD; /// typedef host iterators for shorthand SPH operation of thrust vectors of Real3 typedef thrust::host_vector<Real3>::iterator r3IterH; /// typedef host iterators for shorthand SPH operation of thrust vectors of Real4 typedef thrust::host_vector<Real4>::iterator r4IterH; /// typedef host tuple for holding SPH data pos,vel,[rho,pressure,mu,type] typedef thrust::tuple<r4IterH, r3IterH, r4IterH, r3IterH, r3IterH> iterTupleH; typedef thrust::zip_iterator<iterTupleH> zipIterSphH; /// typedef device iterators for shorthand rigid body states: /// pos,orientation in position, velocity and acceleration level typedef thrust::tuple<r3IterD, r4IterD, r3IterD, r4IterD, r3IterD, r3IterD> iterTupleRigidD; typedef thrust::zip_iterator<iterTupleRigidD> zipIterRigidD; /// typedef host iterators for shorthand rigid body states: /// pos,orientation in position, velocity and acceleration level typedef thrust::tuple<r3IterH, r4IterH, r3IterH, r4IterH, r3IterH, r3IterH> iterTupleRigidH; typedef thrust::zip_iterator<iterTupleRigidH> zipIterRigidH; /// typedef device iterators for shorthand chrono bodies operations typedef thrust::tuple<r3IterH, r3IterH, r3IterH, r4IterH, r3IterH, r3IterH> iterTupleChronoBodiesH; typedef thrust::zip_iterator<iterTupleChronoBodiesH> zipIterChronoBodiesH; /// Struct to store the information of SPH particles on the device struct SphMarkerDataD { thrust::device_vector<Real4> posRadD; ///< Vector of the positions of particles + characteristic radius thrust::device_vector<Real3> velMasD; ///< Vector of the velocities of particles thrust::device_vector<Real4> rhoPresMuD; ///< Vector of the rho+pressure+mu+type of particles thrust::device_vector<Real3> tauXxYyZzD; ///< Vector of the shear stress (diagonal) of particles thrust::device_vector<Real3> tauXyXzYzD; ///< Vector of the shear stress (off-diagonal) of particles zipIterSphD iterator(); void resize(size_t s); }; /// Struct to store the information of SPH particles on the host struct SphMarkerDataH { thrust::host_vector<Real4> posRadH; ///< Vector of the positions of particles thrust::host_vector<Real3> velMasH; ///< Vector of the velocities of particles thrust::host_vector<Real4> rhoPresMuH; ///< Vector of the rho+pressure+mu+type of particles thrust::host_vector<Real3> tauXxYyZzH; ///< Vector of the shear stress (diagonal) of particles thrust::host_vector<Real3> tauXyXzYzH; ///< Vector of the shear stress (off-diagonal) of particles zipIterSphH iterator(); void resize(size_t s); }; /// Struct to store the information of rigid bodies on the host struct FsiBodiesDataH { thrust::host_vector<Real3> posRigid_fsiBodies_H; ///< Vector of the linear positions of rigid bodies thrust::host_vector<Real4> velMassRigid_fsiBodies_H; ///< Vector of the linear velocities of rigid bodies thrust::host_vector<Real3> accRigid_fsiBodies_H; ///< Vector of the linear acceleration of rigid bodies thrust::host_vector<Real4> q_fsiBodies_H; ///< Vector of the orientations (Euler parameters as Quaternion) of rigid bodies thrust::host_vector<Real3> omegaVelLRF_fsiBodies_H; ///< Vector of the angular velocities of rigid bodies thrust::host_vector<Real3> omegaAccLRF_fsiBodies_H; ///< Vector of the angular acceleration of rigid bodies zipIterRigidH iterator(); void resize(size_t s); }; /// Struct to store the information of rigid bodies on the device struct FsiBodiesDataD { thrust::device_vector<Real3> posRigid_fsiBodies_D; ///< Vector of the linear positions of rigid bodies thrust::device_vector<Real4> velMassRigid_fsiBodies_D; ///< Vector of the linear velocities of rigid bodies thrust::device_vector<Real3> accRigid_fsiBodies_D; ///< Vector of the linear acceleration of rigid bodies thrust::device_vector<Real4> q_fsiBodies_D; ///< Vector of the orientations (Euler parameters as Quaternion) of rigid bodies thrust::device_vector<Real3> omegaVelLRF_fsiBodies_D; ///< Vector of the angular velocities of rigid bodies thrust::device_vector<Real3> omegaAccLRF_fsiBodies_D; ///< Vector of the angular acceleration of rigid bodies zipIterRigidD iterator(); void CopyFromH(const FsiBodiesDataH& other); FsiBodiesDataD& operator=(const FsiBodiesDataD& other); void resize(size_t s); }; /// Struct to store the information of mesh on the host struct FsiMeshDataH { thrust::host_vector<Real3> pos_fsi_fea_H; ///< Vector of the mesh position thrust::host_vector<Real3> vel_fsi_fea_H; ///< Vector of the mesh velocity thrust::host_vector<Real3> acc_fsi_fea_H; ///< Vector of the mesh acceleration // zipIterFlexH iterator(); void resize(size_t s); size_t size() { return pos_fsi_fea_H.size(); }; }; /// Struct to store the information of mesh on the device struct FsiMeshDataD { thrust::device_vector<Real3> pos_fsi_fea_D; ///< Vector of the mesh position thrust::device_vector<Real3> vel_fsi_fea_D; ///< Vector of the mesh velocity thrust::device_vector<Real3> acc_fsi_fea_D; ///< Vector of the mesh acceleration // zipIterFlexD iterator(); void CopyFromH(const FsiMeshDataH& other); FsiMeshDataD& operator=(const FsiMeshDataD& other); void resize(size_t s); }; /// Struct to store the information of shell elements on the host struct FsiShellsDataH { thrust::host_vector<Real3> posFlex_fsiBodies_nA_H; ///< Vector of the node A position thrust::host_vector<Real3> posFlex_fsiBodies_nB_H; ///< Vector of the node B position thrust::host_vector<Real3> posFlex_fsiBodies_nC_H; ///< Vector of the node B position thrust::host_vector<Real3> posFlex_fsiBodies_nD_H; ///< Vector of the node D position thrust::host_vector<Real3> velFlex_fsiBodies_nA_H; ///< Vector of the node A velocity thrust::host_vector<Real3> velFlex_fsiBodies_nB_H; ///< Vector of the node B velocity thrust::host_vector<Real3> velFlex_fsiBodies_nC_H; ///< Vector of the node C velocity thrust::host_vector<Real3> velFlex_fsiBodies_nD_H; ///< Vector of the node D velocity thrust::host_vector<Real3> accFlex_fsiBodies_nA_H; ///< Vector of the node A acceleration thrust::host_vector<Real3> accFlex_fsiBodies_nB_H; ///< Vector of the node B acceleration thrust::host_vector<Real3> accFlex_fsiBodies_nC_H; ///< Vector of the node C acceleration thrust::host_vector<Real3> accFlex_fsiBodies_nD_H; ///< Vector of the node D acceleration // zipIterFlexH iterator(); void resize(size_t s); }; /// Struct to store the information of shell elements on the device struct FsiShellsDataD { thrust::device_vector<Real3> posFlex_fsiBodies_nA_D; ///< Vector of the node A position thrust::device_vector<Real3> posFlex_fsiBodies_nB_D; ///< Vector of the node B position thrust::device_vector<Real3> posFlex_fsiBodies_nC_D; ///< Vector of the node C position thrust::device_vector<Real3> posFlex_fsiBodies_nD_D; ///< Vector of the node D position thrust::device_vector<Real3> velFlex_fsiBodies_nA_D; ///< Vector of the node A velocity thrust::device_vector<Real3> velFlex_fsiBodies_nB_D; ///< Vector of the node B velocity thrust::device_vector<Real3> velFlex_fsiBodies_nC_D; ///< Vector of the node C velocity thrust::device_vector<Real3> velFlex_fsiBodies_nD_D; ///< Vector of the node D velocity thrust::device_vector<Real3> accFlex_fsiBodies_nA_D; ///< Vector of the node A acceleration thrust::device_vector<Real3> accFlex_fsiBodies_nB_D; ///< Vector of the node B acceleration thrust::device_vector<Real3> accFlex_fsiBodies_nC_D; ///< Vector of the node C acceleration thrust::device_vector<Real3> accFlex_fsiBodies_nD_D; ///< Vector of the node D acceleration // zipIterFlexD iterator(); void CopyFromH(const FsiShellsDataH& other); FsiShellsDataD& operator=(const FsiShellsDataD& other); void resize(size_t s); }; /// Struct to store neighbor search information on the device struct ProximityDataD { thrust::device_vector<uint> gridMarkerHashD; ///< gridMarkerHash=s(i,j,k)= k*n_x*n_y + j*n_x + i (numAllMarkers); thrust::device_vector<uint> gridMarkerIndexD; ///< (numAllMarkers); thrust::device_vector<uint> cellStartD; ///< Index of the particle starts a cell in sorted list (m_numGridCells) thrust::device_vector<uint> cellEndD; ///< Index of the particle ends a cell in sorted list (m_numGridCells) thrust::device_vector<uint> mapOriginalToSorted; ///< Index mapping from the original to the sorted void resize(size_t numAllMarkers); }; /// Struct to store Chrono rigid bodies information on the host struct ChronoBodiesDataH { ChronoBodiesDataH() {} ChronoBodiesDataH(size_t s); thrust::host_vector<Real3> pos_ChSystemH; ///< Vector of the linear positions of rigid bodies thrust::host_vector<Real3> vel_ChSystemH; ///< Vector of the linear velocities of rigid bodies thrust::host_vector<Real3> acc_ChSystemH; ///< Vector of the linear accelerations of rigid bodies thrust::host_vector<Real4> quat_ChSystemH; ///< Vector of the orientations (Euler parameters as Quaternion) of rigid bodies thrust::host_vector<Real3> omegaVelGRF_ChSystemH; ///< Vector of the angular velocities of rigid bodies thrust::host_vector<Real3> omegaAccGRF_ChSystemH; ///< Vector of the angular acceleraion of rigid bodies zipIterChronoBodiesH iterator(); void resize(size_t s); }; /// Struct to store Chrono shell elements information on the host struct ChronoShellsDataH { ChronoShellsDataH() {} ChronoShellsDataH(size_t s); // zipIterChronoShellsH iterator(); thrust::host_vector<Real3> posFlex_ChSystemH_nA_H; ///< Vector of the node A position thrust::host_vector<Real3> posFlex_ChSystemH_nB_H; ///< Vector of the node B position thrust::host_vector<Real3> posFlex_ChSystemH_nC_H; ///< Vector of the node C position thrust::host_vector<Real3> posFlex_ChSystemH_nD_H; ///< Vector of the node D position thrust::host_vector<Real3> velFlex_ChSystemH_nA_H; ///< Vector of the node A velocity thrust::host_vector<Real3> velFlex_ChSystemH_nB_H; ///< Vector of the node B velocity thrust::host_vector<Real3> velFlex_ChSystemH_nC_H; ///< Vector of the node C velocity thrust::host_vector<Real3> velFlex_ChSystemH_nD_H; ///< Vector of the node D velocity thrust::host_vector<Real3> accFlex_ChSystemH_nA_H; ///< Vector of the node A acceleration thrust::host_vector<Real3> accFlex_ChSystemH_nB_H; ///< Vector of the node B acceleration thrust::host_vector<Real3> accFlex_ChSystemH_nC_H; ///< Vector of the node C acceleration thrust::host_vector<Real3> accFlex_ChSystemH_nD_H; ///< Vector of the node D acceleration void resize(size_t s); }; /// Struct to store Chrono mesh information on the host struct ChronoMeshDataH { ChronoMeshDataH() {} ChronoMeshDataH(size_t s); thrust::host_vector<Real3> posFlex_ChSystemH_H; ///< Vector of the mesh position thrust::host_vector<Real3> velFlex_ChSystemH_H; ///< Vector of the mesh velocity thrust::host_vector<Real3> accFlex_ChSystemH_H; ///< Vector of the mesh acceleration void resize(size_t s); }; /// Struct to store fluid/granular system information that need to be passed to Chrono struct FsiGeneralData { // ---------------- // host // ---------------- // fluidfsiBodiesIndex thrust::host_vector<int4> referenceArray; ///< Holds information of each phase in the array of SPH particles thrust::host_vector<int4> referenceArray_FEA; ///< Holds information of each phase in the array of SPH particles for Flexible elements // ---------------- // device // ---------------- // fluid thrust::device_vector<Real4> derivVelRhoD; ///< dv/dt and d(rho)/dt for particles thrust::device_vector<Real4> derivVelRhoD_old; ///< dv/dt and d(rho)/dt for particles thrust::device_vector<Real3> derivTauXxYyZzD; ///< d(tau)/dt for particles thrust::device_vector<Real3> derivTauXyXzYzD; ///< d(tau)/dt for particles thrust::device_vector<Real3> vel_XSPH_D; ///< XSPH velocity for particles thrust::device_vector<Real3> vis_vel_SPH_D; ///< IISPH velocity for particles thrust::device_vector<Real4> sr_tau_I_mu_i; ///< I2SPH strain-rate, stress, inertia number, friction // BCE thrust::device_vector<Real3> rigidSPH_MeshPos_LRF_D; ///< Position of a particle attached to a rigid body in a local thrust::device_vector<Real3> FlexSPH_MeshPos_LRF_D; ///< Position of a particle attached to a mesh in a local on device thrust::host_vector<Real3> FlexSPH_MeshPos_LRF_H; ///< Position of a particle attached to a mesh in a local on host thrust::device_vector<uint> rigidIdentifierD; ///< Identifies which rigid body a particle belongs to thrust::device_vector<uint> FlexIdentifierD; ///< Identifies which flexible body a particle belongs to // fsi bodies thrust::device_vector<Real3> rigid_FSI_ForcesD; ///< Vector of the surface-integrated forces to rigid bodies thrust::device_vector<Real3> rigid_FSI_TorquesD; ///< Vector of the surface-integrated torques to rigid bodies thrust::device_vector<Real3> Flex_FSI_ForcesD; ///< Vector of the surface-integrated force on FEA nodes thrust::host_vector<int2> CableElementsNodesH; ///< Vector of the cable elements dodes on host thrust::device_vector<int2> CableElementsNodes; ///< Vector of the cable elements dodes on device thrust::host_vector<int4> ShellElementsNodesH; ///< Vector of the shell elements dodes on host thrust::device_vector<int4> ShellElementsNodes; ///< Vector of the shell elements dodes on device }; /// @brief Data related function implementations for FSI system class ChSystemFsi_impl : public ChFsiGeneral { public: ChSystemFsi_impl(); virtual ~ChSystemFsi_impl(); /// Add an SPH particle given its position, physical properties, velocity, and stress void AddSphMarker(Real4 pos, Real4 rhoPresMu, Real3 vel = mR3(0.0), Real3 tauXxYyZz = mR3(0.0), Real3 tauXyXzYz = mR3(0.0)); /// Resize the simulation data based on the FSI system constructed. void ResizeDataManager(int numNode = 0); std::shared_ptr<NumberOfObjects> numObjects; ///< Number of objects (SPH particles, BCE particles, rigid bodies and such). std::shared_ptr<SphMarkerDataD> sphMarkersD1; ///< Information of SPH particles at state 1 on device std::shared_ptr<SphMarkerDataD> sphMarkersD2; ///< Information of SPH particles at state 2 on device std::shared_ptr<SphMarkerDataD> sortedSphMarkersD; ///< Sorted information of SPH particles at state 1 on device std::shared_ptr<SphMarkerDataH> sphMarkersH; ///< Information of SPH particles on host std::shared_ptr<FsiBodiesDataD> fsiBodiesD1; ///< Information of rigid bodies at state 1 on device std::shared_ptr<FsiBodiesDataD> fsiBodiesD2; ///< Information of rigid bodies at state 2 on device std::shared_ptr<FsiBodiesDataH> fsiBodiesH; ///< Information of rigid bodies at state 1 on host std::shared_ptr<FsiMeshDataD> fsiMeshD; ///< Information of mesh on device std::shared_ptr<FsiMeshDataH> fsiMeshH; ///< Information of mesh on host std::shared_ptr<FsiGeneralData> fsiGeneralData; ///< General FSI data needed in the simulation std::shared_ptr<ProximityDataD> markersProximityD; ///< Information of neighbor search on the device private: void ArrangeDataManager(); void ConstructReferenceArray(); void InitNumObjects(); void CalcNumObjects(); ///< Calculates the number of rigid bodies, flexible bodies, etc. based on the type of particles public: /// Base class of FSI system. friend class ChSystemFsi; }; /// @} fsi_physics } // end namespace fsi } // end namespace chrono #endif
the_stack
#pragma once #include "hoomd/BoxDim.h" #include "hoomd/GPUPartition.cuh" #include "hoomd/HOOMDMath.h" #include "hoomd/Index1D.h" #include "hoomd/RNGIdentifiers.h" #include "hoomd/RandomNumbers.h" #include "hoomd/VectorMath.h" #include "hoomd/hpmc/HPMCCounters.h" #include "hoomd/hpmc/Moves.h" #include <hip/hip_runtime.h> #include "GPUHelpers.cuh" // base data types #include "IntegratorHPMCMonoGPUTypes.cuh" namespace hpmc { namespace gpu { #ifdef __HIPCC__ namespace kernel { //! Propose trial moves template<class Shape, unsigned int dim> __global__ void hpmc_gen_moves(const Scalar4* d_postype, const Scalar4* d_orientation, const Scalar4* d_vel, const unsigned int N, const Index3D ci, const uint3 cell_dim, const Scalar3 ghost_width, const unsigned int num_types, const unsigned int seed, const unsigned int rank, const Scalar* d_d, const Scalar* d_a, const unsigned int move_ratio, const uint64_t timestep, const BoxDim box, const unsigned int select, const Scalar3 ghost_fraction, const bool domain_decomposition, const bool have_auxilliary_variable, Scalar4* d_trial_postype, Scalar4* d_trial_orientation, Scalar4* d_trial_vel, unsigned int* d_trial_move_type, unsigned int* d_reject_out_of_cell, const typename Shape::param_type* d_params) { // load the per type pair parameters into shared memory HIP_DYNAMIC_SHARED(char, s_data) typename Shape::param_type* s_params = (typename Shape::param_type*)(&s_data[0]); Scalar* s_d = (Scalar*)(s_params + num_types); Scalar* s_a = (Scalar*)(s_d + num_types); // copy over parameters one int per thread for fast loads { unsigned int tidx = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; unsigned int block_size = blockDim.x * blockDim.y * blockDim.z; unsigned int param_size = num_types * sizeof(typename Shape::param_type) / sizeof(int); for (unsigned int cur_offset = 0; cur_offset < param_size; cur_offset += block_size) { if (cur_offset + tidx < param_size) { ((int*)s_params)[cur_offset + tidx] = ((int*)d_params)[cur_offset + tidx]; } } for (unsigned int cur_offset = 0; cur_offset < num_types; cur_offset += block_size) { if (cur_offset + tidx < num_types) { s_a[cur_offset + tidx] = d_a[cur_offset + tidx]; s_d[cur_offset + tidx] = d_d[cur_offset + tidx]; } } } __syncthreads(); // identify the particle that this thread handles unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // return early if we are not handling a particle if (idx >= N) return; // read in the position and orientation of our particle. Scalar4 postype_i = d_postype[idx]; Scalar4 orientation_i = make_scalar4(1, 0, 0, 0); unsigned int typ_i = __scalar_as_int(postype_i.w); Shape shape_i(quat<Scalar>(orientation_i), s_params[typ_i]); if (shape_i.hasOrientation()) orientation_i = d_orientation[idx]; shape_i.orientation = quat<Scalar>(orientation_i); vec3<Scalar> pos_i = vec3<Scalar>(postype_i); unsigned int old_cell = computeParticleCell(vec_to_scalar3(pos_i), box, ghost_width, cell_dim, ci, true); // for domain decomposition simulations, we need to leave all particles in the inactive region // alone in order to avoid even more divergence, this is done by setting the move_active flag // overlap checks are still processed, but the final move acceptance will be skipped bool move_active = true; if (domain_decomposition && !isActive(make_scalar3(postype_i.x, postype_i.y, postype_i.z), box, ghost_fraction)) move_active = false; // make the move hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::HPMCMonoTrialMove, timestep, seed), hoomd::Counter(idx, select, rank)); // do not move particles that are outside the boundaries unsigned int reject = old_cell >= ci.getNumElements(); unsigned int move_type_select = hoomd::UniformIntDistribution(0xffff)(rng); bool move_type_translate = !shape_i.hasOrientation() || (move_type_select < move_ratio); if (move_active) { if (move_type_translate) { move_translate(pos_i, rng, s_d[typ_i], dim); // need to reject any move that puts the particle in the inactive region if (domain_decomposition && !isActive(vec_to_scalar3(pos_i), box, ghost_fraction)) move_active = false; } else { move_rotate<dim>(shape_i.orientation, rng, s_a[typ_i]); } } if (move_active && move_type_translate) { // check if the particle remains in its cell Scalar3 xnew_i = make_scalar3(pos_i.x, pos_i.y, pos_i.z); unsigned int new_cell = computeParticleCell(xnew_i, box, ghost_width, cell_dim, ci, true); if (new_cell != old_cell) reject = 1; } if (have_auxilliary_variable) { // generate a new random auxillary variable unsigned int seed_i_new = hoomd::detail::generate_u32(rng); // store it in the velocity .x field Scalar4 vel = d_vel[idx]; vel.x = __int_as_scalar(seed_i_new); d_trial_vel[idx] = vel; } // stash the trial move in global memory d_trial_postype[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(typ_i)); d_trial_orientation[idx] = quat_to_scalar4(shape_i.orientation); // 0==inactive, 1==translation, 2==rotation d_trial_move_type[idx] = move_active ? (move_type_translate ? 1 : 2) : 0; // initialize reject flag d_reject_out_of_cell[idx] = reject; } //! Kernel to update particle data and statistics after acceptance template<class Shape> __global__ void hpmc_update_pdata(Scalar4* d_postype, Scalar4* d_orientation, Scalar4* d_vel, hpmc_counters_t* d_counters, const unsigned int nwork, const unsigned int offset, const bool have_auxilliary_variable, const Scalar4* d_trial_postype, const Scalar4* d_trial_orientation, const Scalar4* d_trial_vel, const unsigned int* d_trial_move_type, const unsigned int* d_reject, const typename Shape::param_type* d_params) { // determine which update step we are handling unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // shared arrays for per type pair parameters __shared__ unsigned int s_translate_accept_count; __shared__ unsigned int s_translate_reject_count; __shared__ unsigned int s_rotate_accept_count; __shared__ unsigned int s_rotate_reject_count; // initialize the shared memory array for communicating overlaps if (threadIdx.x == 0) { s_translate_accept_count = 0; s_translate_reject_count = 0; s_rotate_accept_count = 0; s_rotate_reject_count = 0; } __syncthreads(); if (idx < nwork) { idx += offset; unsigned int move_type = d_trial_move_type[idx]; bool move_active = move_type > 0; bool move_type_translate = move_type == 1; bool accept = !d_reject[idx]; unsigned int type_i = __scalar_as_int(d_postype[idx].w); Shape shape_i(quat<Scalar>(), d_params[type_i]); bool ignore_stats = shape_i.ignoreStatistics(); // update the data if accepted if (move_active) { if (accept) { // write out the updated position and orientation d_postype[idx] = d_trial_postype[idx]; d_orientation[idx] = d_trial_orientation[idx]; if (have_auxilliary_variable) d_vel[idx] = d_trial_vel[idx]; } if (!ignore_stats && accept && move_type_translate) atomicAdd(&s_translate_accept_count, 1); if (!ignore_stats && accept && !move_type_translate) atomicAdd(&s_rotate_accept_count, 1); if (!ignore_stats && !accept && move_type_translate) atomicAdd(&s_translate_reject_count, 1); if (!ignore_stats && !accept && !move_type_translate) atomicAdd(&s_rotate_reject_count, 1); } } __syncthreads(); // final tally into global mem if (threadIdx.x == 0) { #if (__CUDA_ARCH__ >= 600) atomicAdd_system(&d_counters->translate_accept_count, s_translate_accept_count); atomicAdd_system(&d_counters->translate_reject_count, s_translate_reject_count); atomicAdd_system(&d_counters->rotate_accept_count, s_rotate_accept_count); atomicAdd_system(&d_counters->rotate_reject_count, s_rotate_reject_count); #else atomicAdd(&d_counters->translate_accept_count, s_translate_accept_count); atomicAdd(&d_counters->translate_reject_count, s_translate_reject_count); atomicAdd(&d_counters->rotate_accept_count, s_rotate_accept_count); atomicAdd(&d_counters->rotate_reject_count, s_rotate_reject_count); #endif } } } // end namespace kernel //! Kernel driver for kernel::hpmc_gen_moves template<class Shape> void hpmc_gen_moves(const hpmc_args_t& args, const typename Shape::param_type* params) { assert(args.d_postype); assert(args.d_orientation); assert(args.d_d); assert(args.d_a); if (args.dim == 2) { // determine the maximum block size and clamp the input block size down int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_gen_moves<Shape, 2>)); max_block_size = attr.maxThreadsPerBlock; // choose a block size based on the max block size by regs (max_block_size) and include // dynamic shared memory usage unsigned int block_size = min(args.block_size, (unsigned int)max_block_size); size_t shared_bytes = args.num_types * (sizeof(typename Shape::param_type) + 2 * sizeof(Scalar)); if (shared_bytes + attr.sharedSizeBytes >= args.devprop.sharedMemPerBlock) throw std::runtime_error("hpmc::kernel::gen_moves() exceeds shared memory limits"); // setup the grid to run the kernel dim3 threads(block_size, 1, 1); dim3 grid(args.N / block_size + 1, 1, 1); hipLaunchKernelGGL((kernel::hpmc_gen_moves<Shape, 2>), grid, threads, shared_bytes, 0, args.d_postype, args.d_orientation, args.d_vel, args.N, args.ci, args.cell_dim, args.ghost_width, args.num_types, args.seed, args.rank, args.d_d, args.d_a, args.move_ratio, args.timestep, args.box, args.select, args.ghost_fraction, args.domain_decomposition, args.have_auxilliary_variable, args.d_trial_postype, args.d_trial_orientation, args.d_trial_vel, args.d_trial_move_type, args.d_reject_out_of_cell, params); } else { // determine the maximum block size and clamp the input block size down int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_gen_moves<Shape, 3>)); max_block_size = attr.maxThreadsPerBlock; // choose a block size based on the max block size by regs (max_block_size) and include // dynamic shared memory usage unsigned int block_size = min(args.block_size, (unsigned int)max_block_size); size_t shared_bytes = args.num_types * (sizeof(typename Shape::param_type) + 2 * sizeof(Scalar)); if (shared_bytes + attr.sharedSizeBytes >= args.devprop.sharedMemPerBlock) throw std::runtime_error("hpmc::kernel::gen_moves() exceeds shared memory limits"); // setup the grid to run the kernel dim3 threads(block_size, 1, 1); dim3 grid(args.N / block_size + 1, 1, 1); hipLaunchKernelGGL((kernel::hpmc_gen_moves<Shape, 3>), grid, threads, shared_bytes, 0, args.d_postype, args.d_orientation, args.d_vel, args.N, args.ci, args.cell_dim, args.ghost_width, args.num_types, args.seed, args.rank, args.d_d, args.d_a, args.move_ratio, args.timestep, args.box, args.select, args.ghost_fraction, args.domain_decomposition, args.have_auxilliary_variable, args.d_trial_postype, args.d_trial_orientation, args.d_trial_vel, args.d_trial_move_type, args.d_reject_out_of_cell, params); } } //! Driver for kernel::hpmc_update_pdata() template<class Shape> void hpmc_update_pdata(const hpmc_update_args_t& args, const typename Shape::param_type* params) { // determine the maximum block size and clamp the input block size down int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_update_pdata<Shape>)); max_block_size = attr.maxThreadsPerBlock; unsigned int block_size = min(args.block_size, (unsigned int)max_block_size); for (int idev = args.gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = args.gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; const unsigned int num_blocks = nwork / block_size + 1; hipLaunchKernelGGL((kernel::hpmc_update_pdata<Shape>), dim3(num_blocks), dim3(block_size), 0, 0, args.d_postype, args.d_orientation, args.d_vel, args.d_counters + idev * args.counters_pitch, nwork, range.first, args.have_auxilliary_variable, args.d_trial_postype, args.d_trial_orientation, args.d_trial_vel, args.d_trial_move_type, args.d_reject, params); } } #endif } // end namespace gpu } // end namespace hpmc
the_stack
#include "../utils/DeviceUtils.h" #include "../utils/MathOperators.cuh" #include "../utils/Tensor.cuh" #include "../utils/StaticUtils.h" namespace faiss { namespace gpu { template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void sumAlongColumns(Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { T out = output[row][col]; out = Math<T>::add(out, val); output[row][col] = out; } } else { T rows[kRowUnroll]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = output[row + i][col]; } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { rows[i] = Math<T>::add(rows[i], val); } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = rows[i]; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { T out = output[row][col + i * blockDim.x]; out = Math<T>::add(out, val[i]); output[row][col + i * blockDim.x] = out; } } } else { T rows[kRowUnroll * kColLoad]; for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = output[row + i][col + j * blockDim.x]; } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { rows[i * kColLoad + j] = Math<T>::add(rows[i * kColLoad + j], val[j]); } } #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = rows[i * kColLoad + j]; } } } } } } template <typename T, int kRowsPerBlock, int kRowUnroll, int kColLoad> __global__ void assignAlongColumns(Tensor<T, 1, true> input, Tensor<T, 2, true> output) { static_assert(kRowsPerBlock % kRowUnroll == 0, "must fit rows"); // blockIdx.x: which chunk of rows we are responsible for updating // blockIdx.y: which chunk of columns we are responsible for // updating int rowStart = blockIdx.x * kRowsPerBlock; int rowEnd = rowStart + kRowsPerBlock; int colStart = blockIdx.y * blockDim.x * kColLoad; // FIXME: if we have exact multiples, don't need this bool endRow = (blockIdx.x == gridDim.x - 1); bool endCol = (blockIdx.y == gridDim.y - 1); if (endRow) { if (output.getSize(0) % kRowsPerBlock == 0) { endRow = false; } } if (endCol) { for (int col = colStart + threadIdx.x; col < input.getSize(0); col += blockDim.x) { T val = input[col]; if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { output[row][col] = val; } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { output[row + i][col] = val; } } } } } else { int col = colStart + threadIdx.x; T val[kColLoad]; #pragma unroll for (int i = 0; i < kColLoad; ++i) { val[i] = input[col + i * blockDim.x]; } if (endRow) { for (int row = rowStart; row < output.getSize(0); ++row) { #pragma unroll for (int i = 0; i < kColLoad; ++i) { output[row][col + i * blockDim.x] = val[i]; } } } else { for (int row = rowStart; row < rowEnd; row += kRowUnroll) { #pragma unroll for (int i = 0; i < kRowUnroll; ++i) { #pragma unroll for (int j = 0; j < kColLoad; ++j) { output[row + i][col + j * blockDim.x] = val[j]; } } } } } } template <typename T, typename TVec> __global__ void sumAlongRows(Tensor<T, 1, true> input, Tensor<TVec, 2, true> output) { __shared__ T sval; int row = blockIdx.x; if (threadIdx.x == 0) { sval = input[row]; } __syncthreads(); T val = sval; // FIXME: speed up for (int i = threadIdx.x; i < output.getSize(1); i += blockDim.x) { TVec out = output[row][i]; out = Math<TVec>::add(out, val); output[row][i] = out; } } template <typename T, typename TVec> void runSumAlongColumns(Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3(utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); sumAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(inputV, outputV); } else { auto grid = dim3(utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); sumAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runSumAlongColumns(Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, cudaStream_t stream) { runSumAlongColumns<float, float4>(input, output, stream); } #ifdef FAISS_USE_FLOAT16 void runSumAlongColumns(Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, cudaStream_t stream) { runSumAlongColumns<half, half2>(input, output, stream); } #endif template <typename T, typename TVec> void runAssignAlongColumns(Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(1)); int threadsPerBlock = 256; constexpr int kRowUnroll = 4; constexpr int kRowsPerBlock = kRowUnroll * 4; constexpr int kColLoad = 4; auto block = dim3(threadsPerBlock); if (input.template canCastResize<TVec>() && output.template canCastResize<TVec>()) { auto inputV = input.template castResize<TVec>(); auto outputV = output.template castResize<TVec>(); auto grid = dim3(utils::divUp(outputV.getSize(0), kRowsPerBlock), utils::divUp(outputV.getSize(1), threadsPerBlock * kColLoad)); assignAlongColumns<TVec, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(inputV, outputV); } else { auto grid = dim3(utils::divUp(output.getSize(0), kRowsPerBlock), utils::divUp(output.getSize(1), threadsPerBlock * kColLoad)); assignAlongColumns<T, kRowsPerBlock, kRowUnroll, kColLoad> <<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runAssignAlongColumns(Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, cudaStream_t stream) { runAssignAlongColumns<float, float4>(input, output, stream); } #ifdef FAISS_USE_FLOAT16 void runAssignAlongColumns(Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, cudaStream_t stream) { runAssignAlongColumns<half, half2>(input, output, stream); } #endif template <typename T, typename TVec> void runSumAlongRows(Tensor<T, 1, true>& input, Tensor<T, 2, true>& output, cudaStream_t stream) { FAISS_ASSERT(input.getSize(0) == output.getSize(0)); if (output.template canCastResize<TVec>()) { auto outputV = output.template castResize<TVec>(); int threadsPerBlock = std::min(outputV.getSize(1), getMaxThreadsCurrentDevice()); auto grid = dim3(outputV.getSize(0)); auto block = dim3(threadsPerBlock); sumAlongRows<T, TVec><<<grid, block, 0, stream>>>(input, outputV); } else { int threadsPerBlock = std::min(output.getSize(1), getMaxThreadsCurrentDevice()); auto grid = dim3(output.getSize(0)); auto block = dim3(threadsPerBlock); sumAlongRows<T, T><<<grid, block, 0, stream>>>(input, output); } CUDA_TEST_ERROR(); } void runSumAlongRows(Tensor<float, 1, true>& input, Tensor<float, 2, true>& output, cudaStream_t stream) { runSumAlongRows<float, float4>(input, output, stream); } #ifdef FAISS_USE_FLOAT16 void runSumAlongRows(Tensor<half, 1, true>& input, Tensor<half, 2, true>& output, cudaStream_t stream) { runSumAlongRows<half, half2>(input, output, stream); } #endif } } // namespace
the_stack
#include "common.h" #include "gpu_util.cuh" using std::max; using std::min; template <typename Dtype> __global__ void PSROIPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, // total number of channels for one image, e.g. C*N*N const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, // the number of class, e.g. C Dtype* top_data, int* mapping_channel) { // DEBUG //printf("[INIT c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("[INIT-DATA c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_data[0], bottom_data[1], bottom_data[2], bottom_data[3], bottom_data[4]); CUDA_KERNEL_LOOP(index, nthreads){ // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; // DEBUG //printf("[c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("spatial_scale=%.3f\n", spatial_scale); int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // DEBUG //printf("[hs=%.2f,ws=%.2f,he=%.2f,we=%.2f]\n", roi_start_h, roi_start_w, roi_end_h, roi_end_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // DEBUG //printf("[%d,%d,%d,%d]\n", wstart+1, hstart+1, wend, hend); int gw = pw; int gh = ph; int c = ctop * pooled_width * pooled_height + gh * pooled_width + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } // DEBUG //if (is_empty) { // printf("empty\n"); //} else { // printf("non-empty\n"); //} Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } extern "C" void PSROIPooling_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *indices, THCudaTensor *data, THCudaTensor* rois, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { // DEBUG //printf("PSROIPooling_updateOutput, spatial_scale=%.3f\n", spatial_scale); //printf("PSROIPooling_updateOutput, height=%d\n", height); //printf("PSROIPooling_updateOutput, width=%d\n", width); //printf("PSROIPooling_updateOutput, pooled_height=%d\n", pooled_height); //printf("PSROIPooling_updateOutput, pooled_width=%d\n", pooled_width); //printf("PSROIPooling_updateOutput, output_dim=%d\n", output_dim); THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_zero(state, output); THCudaTensor_zero(state, indices); THCudaTensor_resize4d(state, output, num_rois, output_dim, pooled_height, pooled_width); THCudaTensor_resize4d(state, indices, num_rois, output_dim, pooled_height, pooled_width); long count = THCudaTensor_nElement(state, output); PSROIPoolingForward<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, data), spatial_scale, channels, height, width, pooled_height, pooled_width, THCudaTensor_data(state, rois), output_dim, THCudaTensor_data(state, output), (int*)THCudaTensor_data(state, indices)); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in PSROIPooling_updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void PSROIPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } extern "C" void PSROIPooling_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput, THCudaTensor *gradOutput, THCudaTensor *data, THCudaTensor* rois, THCudaTensor *indices, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_resizeAs(state, gradInput, data); THCudaTensor_zero(state, gradInput); long count = THCudaTensor_nElement(state, gradOutput); PSROIPoolingBackwardAtomic<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, gradOutput), (int*)THCudaTensor_data(state, indices), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, THCudaTensor_data(state, gradInput), THCudaTensor_data(state, rois)); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in PSROIPooling_updateGradInputAtomic: %s\n", cudaGetErrorString(err)); THError("aborting"); } }
the_stack
#include <cuml/cluster/kmeans_mg.hpp> #include <cuml/common/logger.hpp> #include <cuml/metrics/metrics.hpp> #include <ml_cuda_utils.h> #include <common/tensor.hpp> #include <linalg/reduce_cols_by_key.cuh> #include <linalg/reduce_rows_by_key.cuh> #include <matrix/gather.cuh> #include <random/permute.cuh> #include <raft/cudart_utils.h> #include <raft/comms/comms.hpp> #include <raft/distance/fused_l2_nn.hpp> #include <raft/linalg/binary_op.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <raft/linalg/mean_squared_error.cuh> #include <raft/linalg/reduce.cuh> #include <raft/random/rng.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <random/permute.cuh> #include <random> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/scan.h> #include <ml_cuda_utils.h> #include <common/tensor.hpp> #include <cuml/cluster/kmeans_mg.hpp> #include <cuml/common/logger.hpp> #include <cuml/metrics/metrics.hpp> #include <linalg/reduce_cols_by_key.cuh> #include <linalg/reduce_rows_by_key.cuh> #include <matrix/gather.cuh> #include <fstream> #include <numeric> #include <random> #include <vector> namespace ML { #define LOG(handle, fmt, ...) \ do { \ bool isRoot = true; \ if (handle.comms_initialized()) { \ const auto& comm = handle.get_comms(); \ const int my_rank = comm.get_rank(); \ isRoot = my_rank == 0; \ } \ if (isRoot) { CUML_LOG_DEBUG(fmt, ##__VA_ARGS__); } \ } while (0) namespace kmeans { namespace detail { template <typename LabelT, typename DataT> struct FusedL2NNReduceOp { LabelT offset; FusedL2NNReduceOp(LabelT _offset) : offset(_offset){}; typedef typename cub::KeyValuePair<LabelT, DataT> KVP; DI void operator()(LabelT rit, KVP* out, const KVP& other) { if (other.value < out->value) { out->key = offset + other.key; out->value = other.value; } } DI void operator()(LabelT rit, DataT* out, const KVP& other) { if (other.value < *out) { *out = other.value; } } DI void init(DataT* out, DataT maxVal) { *out = maxVal; } DI void init(KVP* out, DataT maxVal) { out->key = -1; out->value = maxVal; } }; template <typename DataT> struct SamplingOp { DataT* rnd; int* flag; DataT cluster_cost; double oversampling_factor; int n_clusters; CUB_RUNTIME_FUNCTION __forceinline__ SamplingOp(DataT c, double l, int k, DataT* rand, int* ptr) : cluster_cost(c), oversampling_factor(l), n_clusters(k), rnd(rand), flag(ptr) { } __host__ __device__ __forceinline__ bool operator()( const cub::KeyValuePair<ptrdiff_t, DataT>& a) const { DataT prob_threshold = (DataT)rnd[a.key]; DataT prob_x = ((oversampling_factor * n_clusters * a.value) / cluster_cost); return !flag[a.key] && (prob_x > prob_threshold); } }; template <typename IndexT, typename DataT> struct KeyValueIndexOp { __host__ __device__ __forceinline__ IndexT operator()(const cub::KeyValuePair<IndexT, DataT>& a) const { return a.key; } }; template <typename CountT> CountT getDataBatchSize(const KMeansParams& params, CountT n_samples) { auto minVal = std::min(params.batch_samples, n_samples); return (minVal == 0) ? n_samples : minVal; } template <typename CountT> CountT getCentroidsBatchSize(const KMeansParams& params, CountT n_local_clusters) { auto minVal = std::min(params.batch_centroids, n_local_clusters); return (minVal == 0) ? n_local_clusters : minVal; } // Computes the intensity histogram from a sequence of labels template <typename SampleIteratorT, typename CounterT> void countLabels(const raft::handle_t& handle, SampleIteratorT labels, CounterT* count, int n_samples, int n_clusters, rmm::device_uvector<char>& workspace, cudaStream_t stream) { int num_levels = n_clusters + 1; int lower_level = 0; int upper_level = n_clusters; size_t temp_storage_bytes = 0; CUDA_CHECK(cub::DeviceHistogram::HistogramEven(nullptr, temp_storage_bytes, labels, count, num_levels, lower_level, upper_level, n_samples, stream)); workspace.resize(temp_storage_bytes, stream); CUDA_CHECK(cub::DeviceHistogram::HistogramEven(workspace.data(), temp_storage_bytes, labels, count, num_levels, lower_level, upper_level, n_samples, stream)); } template <typename DataT, typename IndexT> Tensor<DataT, 2, IndexT> sampleCentroids(const raft::handle_t& handle, Tensor<DataT, 2, IndexT>& X, Tensor<DataT, 1, IndexT>& minClusterDistance, Tensor<int, 1, IndexT>& isSampleCentroid, typename kmeans::detail::SamplingOp<DataT>& select_op, rmm::device_uvector<char>& workspace, cudaStream_t stream) { int n_local_samples = X.getSize(0); int n_features = X.getSize(1); Tensor<int, 1> nSelected({1}, stream); cub::ArgIndexInputIterator<DataT*> ip_itr(minClusterDistance.data()); Tensor<cub::KeyValuePair<ptrdiff_t, DataT>, 1> sampledMinClusterDistance({n_local_samples}, stream); size_t temp_storage_bytes = 0; CUDA_CHECK(cub::DeviceSelect::If(nullptr, temp_storage_bytes, ip_itr, sampledMinClusterDistance.data(), nSelected.data(), n_local_samples, select_op, stream)); workspace.resize(temp_storage_bytes, stream); CUDA_CHECK(cub::DeviceSelect::If(workspace.data(), temp_storage_bytes, ip_itr, sampledMinClusterDistance.data(), nSelected.data(), n_local_samples, select_op, stream)); int nPtsSampledInRank = 0; raft::copy(&nPtsSampledInRank, nSelected.data(), nSelected.numElements(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); int* rawPtr_isSampleCentroid = isSampleCentroid.data(); thrust::for_each_n(handle.get_thrust_policy(), sampledMinClusterDistance.begin(), nPtsSampledInRank, [=] __device__(cub::KeyValuePair<ptrdiff_t, DataT> val) { rawPtr_isSampleCentroid[val.key] = 1; }); Tensor<DataT, 2, IndexT> inRankCp({nPtsSampledInRank, n_features}, stream); MLCommon::Matrix::gather( X.data(), X.getSize(1), X.getSize(0), sampledMinClusterDistance.data(), nPtsSampledInRank, inRankCp.data(), [=] __device__(cub::KeyValuePair<ptrdiff_t, DataT> val) { // MapTransformOp return val.key; }, stream); return inRankCp; } template <typename DataT, typename IndexT, typename ReductionOpT> void computeClusterCost(const raft::handle_t& handle, Tensor<DataT, 1, IndexT>& minClusterDistance, rmm::device_uvector<char>& workspace, DataT* clusterCost, ReductionOpT reduction_op, cudaStream_t stream) { size_t temp_storage_bytes = 0; CUDA_CHECK(cub::DeviceReduce::Reduce(nullptr, temp_storage_bytes, minClusterDistance.data(), clusterCost, minClusterDistance.numElements(), reduction_op, DataT(), stream)); workspace.resize(temp_storage_bytes, stream); CUDA_CHECK(cub::DeviceReduce::Reduce(workspace.data(), temp_storage_bytes, minClusterDistance.data(), clusterCost, minClusterDistance.numElements(), reduction_op, DataT(), stream)); } // calculate pairwise distance between 'dataset[n x d]' and 'centroids[k x d]', // result will be stored in 'pairwiseDistance[n x k]' template <typename DataT, typename IndexT> void pairwise_distance(const raft::handle_t& handle, Tensor<DataT, 2, IndexT>& X, Tensor<DataT, 2, IndexT>& centroids, Tensor<DataT, 2, IndexT>& pairwiseDistance, rmm::device_uvector<char>& workspace, raft::distance::DistanceType metric, cudaStream_t stream) { auto n_samples = X.getSize(0); auto n_features = X.getSize(1); auto n_clusters = centroids.getSize(0); ASSERT(X.getSize(1) == centroids.getSize(1), "# features in dataset and centroids are different (must be same)"); ML::Metrics::pairwise_distance(handle, X.data(), centroids.data(), pairwiseDistance.data(), n_samples, n_clusters, n_features, metric); } // Calculates a <key, value> pair for every sample in input 'X' where key is an // index to an sample in 'centroids' (index of the nearest centroid) and 'value' // is the distance between the sample and the 'centroid[key]' template <typename DataT, typename IndexT> void minClusterAndDistance( const raft::handle_t& handle, const KMeansParams& params, Tensor<DataT, 2, IndexT>& X, Tensor<DataT, 2, IndexT>& centroids, Tensor<cub::KeyValuePair<IndexT, DataT>, 1, IndexT>& minClusterAndDistance, Tensor<DataT, 1, IndexT>& L2NormX, rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf, rmm::device_uvector<char>& workspace, raft::distance::DistanceType metric, cudaStream_t stream) { auto n_samples = X.getSize(0); auto n_features = X.getSize(1); auto n_clusters = centroids.getSize(0); auto dataBatchSize = kmeans::detail::getDataBatchSize(params, n_samples); auto centroidsBatchSize = kmeans::detail::getCentroidsBatchSize(params, n_clusters); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { L2NormBuf_OR_DistBuf.resize(n_clusters, stream); raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(), centroids.data(), centroids.getSize(1), centroids.getSize(0), raft::linalg::L2Norm, true, stream); } else { L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream); } // Note - pairwiseDistance and centroidsNorm share the same buffer // centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm Tensor<DataT, 1> centroidsNorm(L2NormBuf_OR_DistBuf.data(), {n_clusters}); // pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer Tensor<DataT, 2, IndexT> pairwiseDistance(L2NormBuf_OR_DistBuf.data(), {dataBatchSize, centroidsBatchSize}); cub::KeyValuePair<IndexT, DataT> initial_value(0, std::numeric_limits<DataT>::max()); thrust::fill(handle.get_thrust_policy(), minClusterAndDistance.begin(), minClusterAndDistance.end(), initial_value); // tile over the input dataset for (auto dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) { // # of samples for the current batch auto ns = std::min(dataBatchSize, n_samples - dIdx); // datasetView [ns x n_features] - view representing the current batch of // input dataset auto datasetView = X.template view<2>({ns, n_features}, {dIdx, 0}); // minClusterAndDistanceView [ns x n_clusters] auto minClusterAndDistanceView = minClusterAndDistance.template view<1>({ns}, {dIdx}); auto L2NormXView = L2NormX.template view<1>({ns}, {dIdx}); // tile over the centroids for (auto cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) { // # of centroids for the current batch auto nc = std::min(centroidsBatchSize, n_clusters - cIdx); // centroidsView [nc x n_features] - view representing the current batch // of centroids auto centroidsView = centroids.template view<2>({nc, n_features}, {cIdx, 0}); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { auto centroidsNormView = centroidsNorm.template view<1>({nc}, {cIdx}); workspace.resize((sizeof(int)) * ns, stream); FusedL2NNReduceOp<IndexT, DataT> redOp(cIdx); raft::distance::KVPMinReduce<IndexT, DataT> pairRedOp; raft::distance::fusedL2NN<DataT, cub::KeyValuePair<IndexT, DataT>, IndexT>( minClusterAndDistanceView.data(), datasetView.data(), centroidsView.data(), L2NormXView.data(), centroidsNormView.data(), ns, nc, n_features, (void*)workspace.data(), redOp, pairRedOp, (metric == raft::distance::DistanceType::L2Expanded) ? false : true, false, stream); } else { // pairwiseDistanceView [ns x nc] - view representing the pairwise // distance for current batch auto pairwiseDistanceView = pairwiseDistance.template view<2>({ns, nc}, {0, 0}); // calculate pairwise distance between current tile of cluster centroids // and input dataset kmeans::detail::pairwise_distance( handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric, stream); // argmin reduction returning <index, value> pair // calculates the closest centroid and the distance to the closest // centroid raft::linalg::coalescedReduction( minClusterAndDistanceView.data(), pairwiseDistanceView.data(), pairwiseDistanceView.getSize(1), pairwiseDistanceView.getSize(0), initial_value, stream, true, [=] __device__(const DataT val, const IndexT i) { cub::KeyValuePair<IndexT, DataT> pair; pair.key = cIdx + i; pair.value = val; return pair; }, [=] __device__(cub::KeyValuePair<IndexT, DataT> a, cub::KeyValuePair<IndexT, DataT> b) { return (b.value < a.value) ? b : a; }, [=] __device__(cub::KeyValuePair<IndexT, DataT> pair) { return pair; }); } } } } template <typename DataT, typename IndexT> void minClusterDistance(const raft::handle_t& handle, const KMeansParams& params, Tensor<DataT, 2, IndexT>& X, Tensor<DataT, 2, IndexT>& centroids, Tensor<DataT, 1, IndexT>& minClusterDistance, Tensor<DataT, 1, IndexT>& L2NormX, rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf, rmm::device_uvector<char>& workspace, raft::distance::DistanceType metric, cudaStream_t stream) { auto n_samples = X.getSize(0); auto n_features = X.getSize(1); auto n_clusters = centroids.getSize(0); auto dataBatchSize = kmeans::detail::getDataBatchSize(params, n_samples); auto centroidsBatchSize = kmeans::detail::getCentroidsBatchSize(params, n_clusters); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { L2NormBuf_OR_DistBuf.resize(n_clusters, stream); raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(), centroids.data(), centroids.getSize(1), centroids.getSize(0), raft::linalg::L2Norm, true, stream); } else { L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream); } // Note - pairwiseDistance and centroidsNorm share the same buffer // centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm Tensor<DataT, 1> centroidsNorm(L2NormBuf_OR_DistBuf.data(), {n_clusters}); // pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer Tensor<DataT, 2, IndexT> pairwiseDistance(L2NormBuf_OR_DistBuf.data(), {dataBatchSize, centroidsBatchSize}); thrust::fill(handle.get_thrust_policy(), minClusterDistance.begin(), minClusterDistance.end(), std::numeric_limits<DataT>::max()); // tile over the input data and calculate distance matrix [n_samples x // n_clusters] for (int dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) { // # of samples for the current batch auto ns = std::min(dataBatchSize, n_samples - dIdx); // datasetView [ns x n_features] - view representing the current batch of // input dataset auto datasetView = X.template view<2>({ns, n_features}, {dIdx, 0}); // minClusterDistanceView [ns x n_clusters] auto minClusterDistanceView = minClusterDistance.template view<1>({ns}, {dIdx}); auto L2NormXView = L2NormX.template view<1>({ns}, {dIdx}); // tile over the centroids for (auto cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) { // # of centroids for the current batch auto nc = std::min(centroidsBatchSize, n_clusters - cIdx); // centroidsView [nc x n_features] - view representing the current batch // of centroids auto centroidsView = centroids.template view<2>({nc, n_features}, {cIdx, 0}); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { auto centroidsNormView = centroidsNorm.template view<1>({nc}, {cIdx}); workspace.resize((sizeof(int)) * ns, stream); FusedL2NNReduceOp<IndexT, DataT> redOp(cIdx); raft::distance::KVPMinReduce<IndexT, DataT> pairRedOp; raft::distance::fusedL2NN<DataT, DataT, IndexT>( minClusterDistanceView.data(), datasetView.data(), centroidsView.data(), L2NormXView.data(), centroidsNormView.data(), ns, nc, n_features, (void*)workspace.data(), redOp, pairRedOp, (metric == raft::distance::DistanceType::L2Expanded) ? false : true, false, stream); } else { // pairwiseDistanceView [ns x nc] - view representing the pairwise // distance for current batch auto pairwiseDistanceView = pairwiseDistance.template view<2>({ns, nc}, {0, 0}); // calculate pairwise distance between current tile of cluster centroids // and input dataset kmeans::detail::pairwise_distance( handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric, stream); raft::linalg::coalescedReduction( minClusterDistanceView.data(), pairwiseDistanceView.data(), pairwiseDistanceView.getSize(1), pairwiseDistanceView.getSize(0), std::numeric_limits<DataT>::max(), stream, true, [=] __device__(DataT val, int i) { // MainLambda return val; }, [=] __device__(DataT a, DataT b) { // ReduceLambda return (b < a) ? b : a; }, [=] __device__(DataT val) { // FinalLambda return val; }); } } } } // shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores // in 'out' does not modify the input template <typename DataT, typename IndexT> void shuffleAndGather(const raft::handle_t& handle, const Tensor<DataT, 2, IndexT>& in, Tensor<DataT, 2, IndexT>& out, size_t n_samples_to_gather, int seed, cudaStream_t stream, rmm::device_uvector<char>* workspace = nullptr) { auto n_samples = in.getSize(0); auto n_features = in.getSize(1); Tensor<IndexT, 1> indices({n_samples}, stream); if (workspace) { // shuffle indices on device using ml-prims MLCommon::Random::permute<DataT>( indices.data(), nullptr, nullptr, in.getSize(1), in.getSize(0), true, stream); } else { // shuffle indices on host and copy to device... std::vector<IndexT> ht_indices(n_samples); std::iota(ht_indices.begin(), ht_indices.end(), 0); std::mt19937 gen(seed); std::shuffle(ht_indices.begin(), ht_indices.end(), gen); raft::copy(indices.data(), ht_indices.data(), indices.numElements(), stream); } MLCommon::Matrix::gather(in.data(), in.getSize(1), in.getSize(0), indices.data(), n_samples_to_gather, out.data(), stream); } template <typename DataT, typename IndexT> void countSamplesInCluster(const raft::handle_t& handle, const KMeansParams& params, Tensor<DataT, 2, IndexT>& X, Tensor<DataT, 1, IndexT>& L2NormX, Tensor<DataT, 2, IndexT>& centroids, rmm::device_uvector<char>& workspace, raft::distance::DistanceType metric, Tensor<DataT, 1, IndexT>& sampleCountInCluster, cudaStream_t stream) { auto n_samples = X.getSize(0); auto n_features = X.getSize(1); auto n_clusters = centroids.getSize(0); // stores (key, value) pair corresponding to each sample where // - key is the index of nearest cluster // - value is the distance to the nearest cluster Tensor<cub::KeyValuePair<IndexT, DataT>, 1, IndexT> minClusterAndDistance({n_samples}, stream); // temporary buffer to store distance matrix, destructor releases the resource rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream); // computes minClusterAndDistance[0:n_samples) where minClusterAndDistance[i] // is a <key, value> pair where // 'key' is index to an sample in 'centroids' (index of the nearest // centroid) and 'value' is the distance between the sample 'X[i]' and the // 'centroid[key]' kmeans::detail::minClusterAndDistance(handle, params, X, centroids, minClusterAndDistance, L2NormX, L2NormBuf_OR_DistBuf, workspace, metric, stream); // Using TransformInputIteratorT to dereference an array of cub::KeyValuePair // and converting them to just return the Key to be used in reduce_rows_by_key // prims kmeans::detail::KeyValueIndexOp<IndexT, DataT> conversion_op; cub::TransformInputIterator<IndexT, kmeans::detail::KeyValueIndexOp<IndexT, DataT>, cub::KeyValuePair<IndexT, DataT>*> itr(minClusterAndDistance.data(), conversion_op); // count # of samples in each cluster kmeans::detail::countLabels( handle, itr, sampleCountInCluster.data(), n_samples, n_clusters, workspace, stream); } /* * @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm. * @note This is the algorithm described in * "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S. * ACM-SIAM symposium on Discrete algorithms. * * Scalable kmeans++ pseudocode * 1: C = sample a point uniformly at random from X * 2: while |C| < k * 3: Sample x in X with probability p_x = d^2(x, C) / phi_X (C) * 4: C = C U {x} * 5: end for */ template <typename DataT, typename IndexT> void kmeansPlusPlus(const raft::handle_t& handle, const KMeansParams& params, Tensor<DataT, 2, IndexT>& X, raft::distance::DistanceType metric, rmm::device_uvector<char>& workspace, rmm::device_uvector<DataT>& centroidsRawData, cudaStream_t stream) { auto n_samples = X.getSize(0); auto n_features = X.getSize(1); auto n_clusters = params.n_clusters; // number of seeding trials for each center (except the first) auto n_trials = 2 + static_cast<int>(std::ceil(log(n_clusters))); LOG(handle, "Run sequential k-means++ to select %d centroids from %d input samples " "(%d seeding trials per iterations)", n_clusters, n_samples, n_trials); auto dataBatchSize = kmeans::detail::getDataBatchSize(params, n_samples); // temporary buffers std::vector<DataT> h_wt(n_samples); rmm::device_uvector<DataT> distBuffer(n_trials * n_samples, stream); Tensor<DataT, 2, IndexT> centroidCandidates({n_trials, n_features}, stream); Tensor<DataT, 1, IndexT> costPerCandidate({n_trials}, stream); Tensor<DataT, 1, IndexT> minClusterDistance({n_samples}, stream); rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream); rmm::device_scalar<DataT> clusterCost(stream); rmm::device_scalar<cub::KeyValuePair<int, DataT>> minClusterIndexAndDistance(stream); // L2 norm of X: ||c||^2 Tensor<DataT, 1> L2NormX({n_samples}, stream); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { raft::linalg::rowNorm( L2NormX.data(), X.data(), X.getSize(1), X.getSize(0), raft::linalg::L2Norm, true, stream); } std::mt19937 gen(params.seed); std::uniform_int_distribution<> dis(0, n_samples - 1); // <<< Step-1 >>>: C <-- sample a point uniformly at random from X auto initialCentroid = X.template view<2>({1, n_features}, {dis(gen), 0}); int n_clusters_picked = 1; // reset buffer to store the chosen centroid centroidsRawData.resize(initialCentroid.numElements(), stream); raft::copy( centroidsRawData.begin(), initialCentroid.data(), initialCentroid.numElements(), stream); // C = initial set of centroids Tensor<DataT, 2, IndexT> centroids(centroidsRawData.data(), {initialCentroid.getSize(0), initialCentroid.getSize(1)}); // <<< End of Step-1 >>> // Calculate cluster distance, d^2(x, C), for all the points x in X to the nearest centroid kmeans::detail::minClusterDistance(handle, params, X, centroids, minClusterDistance, L2NormX, L2NormBuf_OR_DistBuf, workspace, metric, stream); LOG(handle, " k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters); // <<<< Step-2 >>> : while |C| < k while (n_clusters_picked < n_clusters) { // <<< Step-3 >>> : Sample x in X with probability p_x = d^2(x, C) / phi_X (C) // Choose 'n_trials' centroid candidates from X with probability proportional to the squared // distance to the nearest existing cluster raft::copy(h_wt.data(), minClusterDistance.data(), minClusterDistance.numElements(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // Note - n_trials is relative small here, we don't need MLCommon::gather call std::discrete_distribution<> d(h_wt.begin(), h_wt.end()); for (int cIdx = 0; cIdx < n_trials; ++cIdx) { auto rand_idx = d(gen); auto randCentroid = X.template view<2>({1, n_features}, {rand_idx, 0}); raft::copy(centroidCandidates.data() + cIdx * n_features, randCentroid.data(), randCentroid.numElements(), stream); } // Calculate pairwise distance between X and the centroid candidates // Output - pwd [n_trails x n_samples] Tensor<DataT, 2, IndexT> pwd(distBuffer.data(), {n_trials, n_samples}); kmeans::detail::pairwise_distance( handle, centroidCandidates, X, pwd, workspace, metric, stream); // Update nearest cluster distance for each centroid candidate // Note pwd and minDistBuf points to same buffer which currently holds pairwise distance values. // Outputs minDistanceBuf[m_trails x n_samples] where minDistance[i, :] contains updated // minClusterDistance that includes candidate-i Tensor<DataT, 2, IndexT> minDistBuf(distBuffer.data(), {n_trials, n_samples}); raft::linalg::matrixVectorOp( minDistBuf.data(), pwd.data(), minClusterDistance.data(), pwd.getSize(1), pwd.getSize(0), true, true, [=] __device__(DataT mat, DataT vec) { return vec <= mat ? vec : mat; }, stream); // Calculate costPerCandidate[n_trials] where costPerCandidate[i] is the cluster cost when using // centroid candidate-i raft::linalg::reduce(costPerCandidate.data(), minDistBuf.data(), minDistBuf.getSize(1), minDistBuf.getSize(0), static_cast<DataT>(0), true, true, stream); // Greedy Choice - Choose the candidate that has minimum cluster cost // ArgMin operation below identifies the index of minimum cost in costPerCandidate { // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceReduce::ArgMin(nullptr, temp_storage_bytes, costPerCandidate.data(), minClusterIndexAndDistance.data(), costPerCandidate.getSize(0)); // Allocate temporary storage workspace.resize(temp_storage_bytes, stream); // Run argmin-reduction cub::DeviceReduce::ArgMin(workspace.data(), temp_storage_bytes, costPerCandidate.data(), minClusterIndexAndDistance.data(), costPerCandidate.getSize(0)); int bestCandidateIdx = -1; raft::copy(&bestCandidateIdx, &minClusterIndexAndDistance.data()->key, 1, stream); /// <<< End of Step-3 >>> /// <<< Step-4 >>>: C = C U {x} // Update minimum cluster distance corresponding to the chosen centroid candidate raft::copy(minClusterDistance.data(), minDistBuf.data() + bestCandidateIdx * n_samples, n_samples, stream); raft::copy(centroidsRawData.data() + n_clusters_picked * n_features, centroidCandidates.data() + bestCandidateIdx * n_features, n_features, stream); ++n_clusters_picked; /// <<< End of Step-4 >>> } LOG(handle, " k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters); } /// <<<< Step-5 >>> } template <typename DataT, typename IndexT> void checkWeights(const raft::handle_t& handle, rmm::device_uvector<char>& workspace, Tensor<DataT, 1, IndexT>& weight, cudaStream_t stream) { rmm::device_scalar<DataT> wt_aggr(stream); int n_samples = weight.getSize(0); size_t temp_storage_bytes = 0; CUDA_CHECK(cub::DeviceReduce::Sum( nullptr, temp_storage_bytes, weight.data(), wt_aggr.data(), n_samples, stream)); workspace.resize(temp_storage_bytes, stream); CUDA_CHECK(cub::DeviceReduce::Sum( workspace.data(), temp_storage_bytes, weight.data(), wt_aggr.data(), n_samples, stream)); DataT wt_sum = 0; raft::copy(&wt_sum, wt_aggr.data(), 1, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); if (wt_sum != n_samples) { LOG(handle, "[Warning!] KMeans: normalizing the user provided sample weights to " "sum up to %d samples", n_samples); DataT scale = n_samples / wt_sum; raft::linalg::unaryOp( weight.data(), weight.data(), weight.numElements(), [=] __device__(const DataT& wt) { return wt * scale; }, stream); } } }; // namespace detail }; // namespace kmeans }; // namespace ML
the_stack
#include <miner.h> #include <cuda_helper.h> #include <cuda_vectors.h> #include <cuda_vector_uint2x4.h> #include "skunk/streebog_arrays.cuh" //#define FULL_UNROLL __device__ __forceinline__ static void GOST_FS(const uint2 shared[8][256],const uint2 *const __restrict__ state,uint2* return_state) { return_state[0] = __ldg(&T02[__byte_perm(state[7].x,0,0x44440)]) ^ shared[1][__byte_perm(state[6].x,0,0x44440)] ^ shared[2][__byte_perm(state[5].x,0,0x44440)] ^ shared[3][__byte_perm(state[4].x,0,0x44440)] ^ shared[4][__byte_perm(state[3].x,0,0x44440)] ^ shared[5][__byte_perm(state[2].x,0,0x44440)] ^ shared[6][__byte_perm(state[1].x,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44440)]); return_state[1] = __ldg(&T02[__byte_perm(state[7].x,0,0x44441)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44441)]) ^ shared[2][__byte_perm(state[5].x,0,0x44441)] ^ shared[3][__byte_perm(state[4].x,0,0x44441)] ^ shared[4][__byte_perm(state[3].x,0,0x44441)] ^ shared[5][__byte_perm(state[2].x,0,0x44441)] ^ shared[6][__byte_perm(state[1].x,0,0x44441)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44441)]); return_state[2] = __ldg(&T02[__byte_perm(state[7].x,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44442)]) ^ shared[2][__byte_perm(state[5].x,0,0x44442)] ^ shared[3][__byte_perm(state[4].x,0,0x44442)] ^ shared[4][__byte_perm(state[3].x,0,0x44442)] ^ shared[5][__byte_perm(state[2].x,0,0x44442)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44442)]) ^ shared[6][__byte_perm(state[1].x,0,0x44442)]; return_state[3] = __ldg(&T02[__byte_perm(state[7].x,0,0x44443)]) ^ shared[1][__byte_perm(state[6].x,0,0x44443)] ^ shared[2][__byte_perm(state[5].x,0,0x44443)] ^ shared[3][__byte_perm(state[4].x,0,0x44443)] ^ __ldg(&T42[__byte_perm(state[3].x,0,0x44443)]) ^ shared[5][__byte_perm(state[2].x,0,0x44443)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44443)]) ^ shared[6][__byte_perm(state[1].x,0,0x44443)]; return_state[4] = __ldg(&T02[__byte_perm(state[7].y,0,0x44440)]) ^ shared[1][__byte_perm(state[6].y,0,0x44440)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44440)]) ^ shared[3][__byte_perm(state[4].y,0,0x44440)] ^ shared[4][__byte_perm(state[3].y,0,0x44440)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44440)]) ^ shared[5][__byte_perm(state[2].y,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44440)]); return_state[5] = __ldg(&T02[__byte_perm(state[7].y,0,0x44441)]) ^ shared[2][__byte_perm(state[5].y,0,0x44441)] ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44441)]) ^ shared[3][__byte_perm(state[4].y,0,0x44441)] ^ shared[4][__byte_perm(state[3].y,0,0x44441)] ^ shared[5][__byte_perm(state[2].y,0,0x44441)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44441)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44441)]); return_state[6] = __ldg(&T02[__byte_perm(state[7].y,0,0x44442)]) ^ shared[1][__byte_perm(state[6].y,0,0x44442)] ^ shared[2][__byte_perm(state[5].y,0,0x44442)] ^ shared[3][__byte_perm(state[4].y,0,0x44442)] ^ shared[4][__byte_perm(state[3].y,0,0x44442)] ^ shared[5][__byte_perm(state[2].y,0,0x44442)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44442)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44442)]); return_state[7] = __ldg(&T02[__byte_perm(state[7].y,0,0x44443)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44443)]) ^ shared[2][__byte_perm(state[5].y,0,0x44443)] ^ shared[3][__byte_perm(state[4].y,0,0x44443)] ^ shared[4][__byte_perm(state[3].y,0,0x44443)] ^ shared[5][__byte_perm(state[2].y,0,0x44443)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44443)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44443)]); } __device__ __forceinline__ static void GOST_FS_LDG(const uint2 shared[8][256],const uint2 *const __restrict__ state,uint2* return_state) { return_state[0] = __ldg(&T02[__byte_perm(state[7].x,0,0x44440)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44440)]) ^ shared[2][__byte_perm(state[5].x,0,0x44440)] ^ shared[3][__byte_perm(state[4].x,0,0x44440)] ^ shared[4][__byte_perm(state[3].x,0,0x44440)] ^ shared[5][__byte_perm(state[2].x,0,0x44440)] ^ shared[6][__byte_perm(state[1].x,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44440)]); return_state[1] = __ldg(&T02[__byte_perm(state[7].x,0,0x44441)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44441)]) ^ shared[2][__byte_perm(state[5].x,0,0x44441)] ^ shared[3][__byte_perm(state[4].x,0,0x44441)] ^ shared[4][__byte_perm(state[3].x,0,0x44441)] ^ shared[5][__byte_perm(state[2].x,0,0x44441)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44441)]) ^ shared[6][__byte_perm(state[1].x,0,0x44441)]; return_state[2] = __ldg(&T02[__byte_perm(state[7].x,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44442)]) ^ shared[2][__byte_perm(state[5].x,0,0x44442)] ^ shared[3][__byte_perm(state[4].x,0,0x44442)] ^ shared[4][__byte_perm(state[3].x,0,0x44442)] ^ shared[5][__byte_perm(state[2].x,0,0x44442)] ^ shared[6][__byte_perm(state[1].x,0,0x44442)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44442)]); return_state[3] = __ldg(&T02[__byte_perm(state[7].x,0,0x44443)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44443)]) ^ shared[2][__byte_perm(state[5].x,0,0x44443)] ^ shared[3][__byte_perm(state[4].x,0,0x44443)] ^ shared[4][__byte_perm(state[3].x,0,0x44443)] ^ shared[5][__byte_perm(state[2].x,0,0x44443)] ^ shared[6][__byte_perm(state[1].x,0,0x44443)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44443)]); return_state[4] = __ldg(&T02[__byte_perm(state[7].y,0,0x44440)]) ^ shared[1][__byte_perm(state[6].y,0,0x44440)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44440)]) ^ shared[3][__byte_perm(state[4].y,0,0x44440)] ^ shared[4][__byte_perm(state[3].y,0,0x44440)] ^ shared[5][__byte_perm(state[2].y,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44440)]) ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44440)]); return_state[5] = __ldg(&T02[__byte_perm(state[7].y,0,0x44441)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44441)]) ^ shared[2][__byte_perm(state[5].y,0,0x44441)] ^ shared[3][__byte_perm(state[4].y,0,0x44441)] ^ shared[4][__byte_perm(state[3].y,0,0x44441)] ^ shared[5][__byte_perm(state[2].y,0,0x44441)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44441)]) ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44441)]); return_state[6] = __ldg(&T02[__byte_perm(state[7].y,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44442)]) ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44442)]) ^ shared[3][__byte_perm(state[4].y,0,0x44442)] ^ shared[4][__byte_perm(state[3].y,0,0x44442)] ^ shared[5][__byte_perm(state[2].y,0,0x44442)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44442)]) ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44442)]); return_state[7] = __ldg(&T02[__byte_perm(state[7].y,0,0x44443)]) ^ shared[1][__byte_perm(state[6].y,0,0x44443)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44443)]) ^ shared[3][__byte_perm(state[4].y,0,0x44443)] ^ shared[4][__byte_perm(state[3].y,0,0x44443)] ^ shared[5][__byte_perm(state[2].y,0,0x44443)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44443)]) ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44443)]); } __device__ __forceinline__ static void GOST_E12(const uint2 shared[8][256],uint2 *const __restrict__ K, uint2 *const __restrict__ state) { uint2 t[ 8]; //#pragma unroll 12 for(int i=0; i<12; i++){ GOST_FS(shared,state, t); #pragma unroll 8 for(int j=0;j<8;j++) K[ j] ^= *(uint2*)&CC[i][j]; #pragma unroll 8 for(int j=0;j<8;j++) state[ j] = t[ j]; GOST_FS_LDG(shared,K, t); #pragma unroll 8 for(int j=0;j<8;j++) state[ j]^= t[ j]; #pragma unroll 8 for(int j=0;j<8;j++) K[ j] = t[ j]; } } __constant__ uint64_t target64[4]; __host__ void skunk_set_target(uint32_t* ptarget) { cudaMemcpyToSymbol(target64, ptarget, 4*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); } #define TPB 256 __global__ __launch_bounds__(TPB, 2) void skunk_streebog_gpu_final_64(uint64_t *g_hash, uint32_t* resNonce) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2 buf[8], t[8], temp[8], K0[8], hash[8]; __shared__ uint2 shared[8][256]; shared[0][threadIdx.x] = __ldg(&T02[threadIdx.x]); shared[1][threadIdx.x] = __ldg(&T12[threadIdx.x]); shared[2][threadIdx.x] = __ldg(&T22[threadIdx.x]); shared[3][threadIdx.x] = __ldg(&T32[threadIdx.x]); shared[4][threadIdx.x] = __ldg(&T42[threadIdx.x]); shared[5][threadIdx.x] = __ldg(&T52[threadIdx.x]); shared[6][threadIdx.x] = __ldg(&T62[threadIdx.x]); shared[7][threadIdx.x] = __ldg(&T72[threadIdx.x]); // if (thread < threads) // { uint64_t* inout = &g_hash[thread<<3]; *(uint2x4*)&hash[0] = __ldg4((uint2x4*)&inout[0]); *(uint2x4*)&hash[4] = __ldg4((uint2x4*)&inout[4]); __threadfence_block(); K0[0] = vectorize(0x74a5d4ce2efc83b3); #pragma unroll 8 for(uint32_t i=0;i<8;i++){ buf[ i] = hash[ i] ^ K0[ 0]; } //#pragma unroll 12 for(int i=0; i<12; i++){ GOST_FS(shared, buf, temp); #pragma unroll 8 for(uint32_t j=0;j<8;j++){ buf[ j] = temp[ j] ^ *(uint2*)&precomputed_values[i][j]; } } #pragma unroll 8 for(int j=0;j<8;j++){ buf[ j]^= hash[ j]; } #pragma unroll 8 for(int j=0;j<8;j++){ K0[ j] = buf[ j]; } K0[7].y ^= 0x00020000; GOST_FS(shared, K0, t); #pragma unroll 8 for(uint32_t i=0;i<8;i++) K0[ i] = t[ i]; t[7].y ^= 0x01000000; GOST_E12(shared, K0, t); #pragma unroll 8 for(int j=0;j<8;j++) buf[ j] ^= t[ j]; buf[7].y ^= 0x01000000; GOST_FS(shared, buf,K0); buf[7].y ^= 0x00020000; #pragma unroll 8 for(uint32_t j=0;j<8;j++) t[ j] = K0[ j]; t[7].y ^= 0x00020000; GOST_E12(shared, K0, t); #pragma unroll 8 for(uint32_t j=0;j<8;j++) buf[ j] ^= t[ j]; GOST_FS(shared, buf,K0); // K = F(h) hash[7]+= vectorize(0x0100000000000000); #pragma unroll 8 for(uint32_t j=0;j<8;j++) t[ j] = K0[ j] ^ hash[ j]; // #pragma unroll for(uint32_t i=0; i<10; i++){ GOST_FS(shared, t, temp); #pragma unroll 8 for(uint32_t j=0;j<8;j++){ t[ j] = temp[ j]; K0[ j] = K0[ j] ^ *(uint2*)&CC[ i][ j]; } GOST_FS(shared, K0, temp); #pragma unroll 8 for(uint32_t j=0;j<8;j++){ K0[ j] = temp[ j]; t[ j]^= temp[ j]; } } GOST_FS(shared, t, temp); #pragma unroll 8 for(uint32_t j=0;j<8;j++){ t[ j] = temp[ j]; K0[ j] = K0[ j] ^ *(uint2*)&CC[10][ j]; } GOST_FS(shared, K0, temp); #pragma unroll 8 for(int i=7;i>=0;i--){ t[i].x = t[i].x ^ temp[i].x; temp[i].x = temp[i].x ^ ((uint32_t*)&CC[11])[i<<1]; } uint2 last[2]; #define T0(x) shared[0][x] #define T1(x) shared[1][x] #define T2(x) shared[2][x] #define T3(x) shared[3][x] #define T4(x) shared[4][x] #define T5(x) shared[5][x] #define T6(x) shared[6][x] #define T7(x) shared[7][x] last[ 0] = T0(__byte_perm(t[7].x,0,0x44443)) ^ T1(__byte_perm(t[6].x,0,0x44443)) ^ T2(__byte_perm(t[5].x,0,0x44443)) ^ T3(__byte_perm(t[4].x,0,0x44443)) ^ T4(__byte_perm(t[3].x,0,0x44443)) ^ T5(__byte_perm(t[2].x,0,0x44443)) ^ T6(__byte_perm(t[1].x,0,0x44443)) ^ T7(__byte_perm(t[0].x,0,0x44443)); last[ 1] = T0(__byte_perm(temp[7].x,0,0x44443)) ^ T1(__byte_perm(temp[6].x,0,0x44443)) ^ T2(__byte_perm(temp[5].x,0,0x44443)) ^ T3(__byte_perm(temp[4].x,0,0x44443)) ^ T4(__byte_perm(temp[3].x,0,0x44443)) ^ T5(__byte_perm(temp[2].x,0,0x44443)) ^ T6(__byte_perm(temp[1].x,0,0x44443)) ^ T7(__byte_perm(temp[0].x,0,0x44443)); if(devectorize(buf[3] ^ hash[3] ^ last[ 0] ^ last[ 1]) <= target64[3]){ uint32_t tmp = atomicExch(&resNonce[0], thread); if (tmp != UINT32_MAX) resNonce[1] = tmp; } } __host__ void skunk_cuda_streebog(int thr_id, uint32_t threads, uint32_t *d_hash, uint32_t* d_resNonce) { dim3 grid((threads + TPB-1) / TPB); dim3 block(TPB); skunk_streebog_gpu_final_64 <<< grid, block >>> ((uint64_t*)d_hash, d_resNonce); }
the_stack
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../NeuralNetwork/Activation/ActivationFunction.cu" extern "C" { typedef enum MyBackPropMethod { SGD = 0, RMSProp = 1, } MyBackPropMethod; __device__ float Clip(float value, float clip) { return (clip == 0) * value + (clip != 0) * ((value > clip) * clip + (value < -clip) * -clip + (value >= -clip && value <= clip) * value); /* avoids thread divergence, equivalent to: if (clip == 0) return value; else if (value > clip) return clip; else if (value < -clip) return -clip; else return value; */ } __device__ void SGDWeightUpdate(float trainingRate, float momentum, float clipGradient, float *weights, float *weightDeltas, int weightId, float gradient) { float weightDelta = trainingRate * Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = weightDelta; weights[weightId] -= weightDelta; } __device__ void RMSPropWeightUpdate(float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *weights, float *weightDeltas, float *weightMeanSquares, int weightId, float gradient) { float rmsGradient = Clip(gradient, clipGradient) + momentum * weightDeltas[weightId]; weightDeltas[weightId] = rmsGradient; float weightMeanSquare = smoothingFactor * weightMeanSquares[weightId] + (1.0f - smoothingFactor) * rmsGradient * rmsGradient; if (weightMeanSquare != 0) rmsGradient /= sqrtf(weightMeanSquare); weightMeanSquares[weightId] = weightMeanSquare; weights[weightId] -= trainingRate * rmsGradient; } __global__ void LSTMUpdateGateWeightsKernelBPTT( float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float* outputGateWeightGradient, float* inputGateWeightGradient, float* forgetGateWeightGradient, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient[weightId]); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient[weightId]); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient[weightId]); } } } __global__ void LSTMUpdateCellWeightsKernelBPTT( float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, float *cellInputWeightGradient, int inputCount, int previousOutputCount ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; int cellStatesCount = previousOutputCount; if (weightId < weightsPerCell * cellStatesCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellInputWeightGradient[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellInputWeightGradient[weightId]); } } } /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /* /* ORIGINAL FROM KAREL */ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ __global__ void LSTMUpdateGateWeightsKernel( float *input, float *previousOutput, float *cellStates, float *cellStateErrors, float *outputGateDeltas, float *inputGateWeights, float *inputGateWeightDeltas, float *inputGateWeightMeanSquares, float *forgetGateWeights, float *forgetGateWeightDeltas, float *forgetGateWeightMeanSquares, float *outputGateWeights, float *outputGateWeightDeltas, float *outputGateWeightMeanSquares, float *inputGateWeightsRTRLPartials, float *forgetGateWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { int fromId = weightId % weightsPerGate; int toId = weightId / weightsPerGate; //calculate output gate weight gradient int isFromInputUnit = fromId >= 0 && fromId < inputCount; int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount); int isPeephole = (fromId >= inputCount + previousOutputCount) && (fromId < inputCount + previousOutputCount + cellsPerBlock); int isFromBiasUnit = fromId == (inputCount + previousOutputCount + cellsPerBlock); float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId] + isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)] + isPeephole * cellStates[isPeephole * (toId * cellsPerBlock + (fromId - inputCount - previousOutputCount))] + isFromBiasUnit * 1; float outputGateWeightGradient = outputGateDeltas[toId] * inputFromWeight; //calculate input and forget gate weight gradients float inputGateWeightGradient = 0; float forgetGateWeightGradient = 0; //loop through cells for (int cellId = toId * cellsPerBlock; cellId < (toId + 1) * cellsPerBlock; cellId++) { inputGateWeightGradient += cellStateErrors[cellId] * inputGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; forgetGateWeightGradient += cellStateErrors[cellId] * forgetGateWeightsRTRLPartials[cellId * weightsPerGate + fromId]; } //update gate weights if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, outputGateWeights, outputGateWeightDeltas, outputGateWeightMeanSquares, weightId, outputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, inputGateWeights, inputGateWeightDeltas, inputGateWeightMeanSquares, weightId, inputGateWeightGradient); RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, forgetGateWeights, forgetGateWeightDeltas, forgetGateWeightMeanSquares, weightId, forgetGateWeightGradient); } else // SGD { SGDWeightUpdate(trainingRate, momentum, clipGradient, outputGateWeights, outputGateWeightDeltas, weightId, outputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, inputGateWeights, inputGateWeightDeltas, weightId, inputGateWeightGradient); SGDWeightUpdate(trainingRate, momentum, clipGradient, forgetGateWeights, forgetGateWeightDeltas, weightId, forgetGateWeightGradient); } } } __global__ void LSTMUpdateCellWeightsKernel( float *input, float *previousOutput, float *cellStateErrors, float *cellInputWeights, float *cellInputWeightDeltas, float *cellInputWeightMeanSquares, float *cellWeightsRTRLPartials, MyBackPropMethod backPropMethod, float trainingRate, float momentum, float smoothingFactor, float clipGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; if (weightId < weightsPerCell * previousOutputCount) { int cellId = weightId / weightsPerCell; if (backPropMethod == RMSProp) { RMSPropWeightUpdate(trainingRate, momentum, smoothingFactor, clipGradient, cellInputWeights, cellInputWeightDeltas, cellInputWeightMeanSquares, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } else { SGDWeightUpdate(trainingRate, momentum, clipGradient, cellInputWeights, cellInputWeightDeltas, weightId, cellStateErrors[cellId] * cellWeightsRTRLPartials[weightId]); } } } }
the_stack
* \file * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ #pragma once #include "../../util_arch.cuh" #include "../../util_ptx.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ template < typename T, int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanWarpScans { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, /// Number of warp threads INNER_WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), OUTER_WARP_THREADS = BLOCK_THREADS / INNER_WARP_THREADS, /// Number of outer scan warps OUTER_WARPS = INNER_WARP_THREADS }; /// Outer WarpScan utility type typedef WarpScan<T, OUTER_WARP_THREADS, PTX_ARCH> OuterWarpScanT; /// Inner WarpScan utility type typedef WarpScan<T, INNER_WARP_THREADS, PTX_ARCH> InnerWarpScanT; typedef typename OuterWarpScanT::TempStorage OuterScanArray[OUTER_WARPS]; /// Shared memory storage layout type struct _TempStorage { union Aliasable { Uninitialized<OuterScanArray> outer_warp_scan; ///< Buffer for warp-synchronous outer scans typename InnerWarpScanT::TempStorage inner_warp_scan; ///< Buffer for warp-synchronous inner scan } aliasable; T warp_aggregates[OUTER_WARPS]; T block_aggregate; ///< Shared prefix for the entire thread block }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; unsigned int warp_id; unsigned int lane_id; //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanWarpScans( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((OUTER_WARPS == 1) ? 0 : linear_tid / OUTER_WARP_THREADS), lane_id((OUTER_WARPS == 1) ? linear_tid : linear_tid % OUTER_WARP_THREADS) {} //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); if (warp_id != 0) { // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) { temp_storage.warp_aggregates[warp_id] = inclusive_output; } CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, initial_value, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. The call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { InnerWarpScanT inner_scan(temp_storage.aliasable.inner_warp_scan); T upsweep = temp_storage.warp_aggregates[linear_tid]; T downsweep_prefix, block_aggregate; inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); // Use callback functor to get block prefix in lane0 and then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = inner_scan.Broadcast(block_prefix, 0); downsweep_prefix = scan_op(block_prefix, downsweep_prefix); if (linear_tid == 0) downsweep_prefix = block_prefix; temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; } CTA_SYNC(); // Apply warp prefix to our lane's partial (or assign it if partial is invalid) T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; InclusiveScan(input, inclusive_output, scan_op, block_aggregate); } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).InclusiveScan( input, inclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); if (warp_id != 0) { // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).InclusiveScan( input, inclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { InnerWarpScanT inner_scan(temp_storage.aliasable.inner_warp_scan); T upsweep = temp_storage.warp_aggregates[linear_tid]; T downsweep_prefix, block_aggregate; inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); // Use callback functor to get block prefix in lane0 and then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = inner_scan.Broadcast(block_prefix, 0); downsweep_prefix = scan_op(block_prefix, downsweep_prefix); if (linear_tid == 0) downsweep_prefix = block_prefix; temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; } CTA_SYNC(); // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <stdint.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <algorithm> #include <vector> /*!\rst Macro to stringify the expansion of a macro. For example, say we are on line 53: * ``#__LINE__ --> "__LINE__"`` * ``OL_CUDA_STRINGIFY_EXPANSION(__LINE__) --> "53"`` ``OL_CUDA_STRINGIFY_EXPANSION_INNER`` is not meant to be used directly; but we need ``#x`` in a macro for this expansion to work. This is a standard trick; see bottom of: http://gcc.gnu.org/onlinedocs/cpp/Stringification.html \endrst*/ #define OL_CUDA_STRINGIFY_EXPANSION_INNER(x) #x #define OL_CUDA_STRINGIFY_EXPANSION(x) OL_CUDA_STRINGIFY_EXPANSION_INNER(x) /*!\rst Macro to stringify and format the current file and line number. For example, if the macro is invoked from line 893 of file gpp_foo.cpp, this macro produces the compile-time string-constant: ``(gpp_foo.cpp: 893)`` \endrst*/ #define OL_CUDA_STRINGIFY_FILE_AND_LINE "(" __FILE__ ": " OL_CUDA_STRINGIFY_EXPANSION(__LINE__) ")" /*!\rst Macro that checks error message (with type cudaError_t) returned by CUDA API functions, and if there is error occurred, the macro produces a C struct containing error message, function name where error occured, file name and line info, and then terminate the function. \endrst*/ #define OL_CUDA_ERROR_RETURN(X) do {cudaError_t _error_code = (X); if (_error_code != cudaSuccess) {CudaError _err = {_error_code, OL_CUDA_STRINGIFY_FILE_AND_LINE, __func__}; return _err;}} while (0) namespace optimal_learning { namespace { // functions run on gpu device /*!\rst Special case of GeneralMatrixVectorMultiply. As long as A has zeros in the strict upper-triangle, GeneralMatrixVectorMultiply will work too (but take ``>= 2x`` as long). Computes results IN-PLACE. Avoids accessing the strict upper triangle of A. Should be equivalent to BLAS call: ``dtrmv('L', trans, 'N', size_m, A, size_m, x, 1);`` \endrst*/ __device__ void CudaTriangularMatrixVectorMultiply(double const * restrict A, int size_m, double * restrict x) { double temp; A += size_m * (size_m-1); for (int j = size_m-1; j >= 0; --j) { // i.e., j >= 0 temp = x[j]; for (int i = size_m-1; i >= j+1; --i) { // handles sub-diagonal contributions from j-th column x[i] += temp*A[i]; } x[j] *= A[j]; // handles j-th on-diagonal component A -= size_m; } } /*!\rst This is reduced version of GeneralMatrixVectorMultiply(...) in gpp_linear_algebra.cpp, and this function computes y = y - A * x (aka alpha = -1.0, beta = 1.0) \endrst*/ __device__ void CudaGeneralMatrixVectorMultiply(double const * restrict A, double const * restrict x, int size_m, int size_n, int lda, double * restrict y) { double temp; for (int i = 0; i < size_n; ++i) { temp = -1.0 * x[i]; for (int j = 0; j < size_m; ++j) { y[j] += A[j]*temp; } A += lda; } } /*!\rst This inline function copies [begin, begin+1, ..., end-1] elements from one array to the other, if bound < end, then end = bound \endrst*/ __forceinline__ __device__ void CudaCopyElements(int begin, int end, int bound, double const * restrict origin, double * restrict destination) { int local_end = end < bound ? end : bound; for (int idx = begin; idx < local_end; ++idx) { destination[idx] = origin[idx]; } } /*!\rst GPU kernel function of computing Expected Improvement using Monte-Carlo. **Shared Memory Requirements** This method requires the caller to allocate 3 arrays: chol_var_local, mu_local and normals, with ``(num_union * num_union + num_union + num_union * num_threads)`` doubles in total in shared memory. The order of the arrays placed in this shared memory is like ``[chol_var_local, mu_local, normals]`` Currently size of shared memory per block is set to 48K, to give you a sense, that is approximately 6144 doubles, for example, this caller works when num_union = 22 without blowing up shared memory (currently num_threads = 256). :chol_var_local[num_union][num_union]: copy of chol_var in shared memory for each block :mu_local[num_union]: copy of mu in shared memory for each block :normals[num_union][num_threads]: shared memory for storage of normal random numbers for each block \param :mu[num_union]: the mean of the GP evaluated at points interested :chol_var[num_union][num_union]: cholesky factorization of the GP variance evaluated at points interested :num_union: number of the points interested :num_iteration: number of iterations performed on each thread for MC evaluation :best: best function evaluation obtained so far :base_seed: base seed for the GPU's RNG; will be offset by GPU thread index (see curand) :configure_for_test: whether record random_number_ei or not \output :gpu_random_number_ei[num_union][num_iteration][num_threads][num_blocks]: array storing random numbers used for computing EI, for testing only :ei_storage[num_threads][num_blocks]: each thread's computed EI component written to its corresponding position \endrst*/ __global__ void CudaComputeEIGpu(double const * restrict mu, double const * restrict chol_var, int num_union, int num_iteration, double best, uint64_t base_seed, bool configure_for_test, double * restrict gpu_random_number_ei, double * restrict ei_storage) { // copy mu, chol_var to shared memory mu_local & chol_var_local // For multiple dynamically sized arrays in a single kernel, declare a single extern unsized array, and use // pointers into it to divide it into multiple arrays // refer to http://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/ extern __shared__ double storage[]; double * restrict chol_var_local = storage; double * restrict mu_local = chol_var_local + num_union * num_union; const int idx = threadIdx.x; const int IDX = threadIdx.x + blockDim.x * blockIdx.x; int chunk_size = (num_union * num_union - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_union * num_union, chol_var, chol_var_local); chunk_size = (num_union - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_union, mu, mu_local); __syncthreads(); double * restrict normals = mu_local + num_union + idx * num_union; // MC start // RNG setup uint64_t local_seed = base_seed + IDX; curandState random_state; // seed a random number generator curand_init(local_seed, 0, 0, &random_state); double agg = 0.0; double improvement_this_step; double EI; for (int mc = 0; mc < num_iteration; ++mc) { improvement_this_step = 0.0; for (int i = 0; i < num_union; ++i) { normals[i] = curand_normal_double(&random_state); // If configure_for_test is true, random numbers used in MC computations will be saved as output. // In fact we will let EI compuation on CPU use the same sequence of random numbers saved here, // so that EI compuation on CPU & GPU can be compared directly for unit testing. if (configure_for_test) { gpu_random_number_ei[IDX * num_iteration * num_union + mc * num_union + i] = normals[i]; } } CudaTriangularMatrixVectorMultiply(chol_var_local, num_union, normals); for (int i = 0; i < num_union; ++i) { EI = best - (mu_local[i] + normals[i]); improvement_this_step = fmax(EI, improvement_this_step); } agg += improvement_this_step; } ei_storage[IDX] = agg / static_cast<double>(num_iteration); } /*!\rst Device code to compute Gradient of Expected Improvement by Monte-Carlo on GPU. **Shared Memory Requirements** This method requires the caller to allocate 5 arrays: mu_local, chol_var_local, grad_mu_local, grad_chol_var_local and normals, with ``(num_union + num_union * num_union + dim * num_to_sample + dim * num_union * num_union * num_to_sample + 2 * num_union * num_threads)`` doubles in total in shared memory. The order of the arrays placed in this shared memory is like ``[mu_local, chol_var_local, grad_mu_local, grad_chol_var_local, normals]`` Currently size of shared memory per block is set to 48K, to give you a sense, that is approximately 6144 doubles, for example, this caller works for num_union = num_to_sample = 8, dim = 3 without blowing up shared memory (currently num_threads = 256). :mu_local[num_union]: copy of mu in shared memory for each block :chol_var_local[num_union][num_union]: copy of chol_var in shared memory for each block :grad_mu_local[dim][num_to_sample]: copy of grad_mu in shared memory for each block :grad_chol_var_local[dim][num_union][num_union][num_to_sample]: copy of grad_chol_var_local in shared memory for each block :normals[2 * num_union][num_threads]: shared memory for storage of normal random numbers for each block, and for each thread it gets 2 * num_union normal random numbers, with one set of normals occupying the first num_union doubles, and we store a copy of them in the rest of the spaces. \param :mu[num_union]: the mean of the GP evaluated at points interested :grad_mu[dim][num_to_sample]: the gradient of mean of the GP evaluated at points interested :chol_var[num_union][num_union]: cholesky factorization of the GP variance evaluated at points interested :grad_chol_var[dim][num_union][num_union][num_to_sample]: gradient of cholesky factorization of the GP variance evaluated at points interested :num_union: number of the union of points (aka q+p) :num_to_sample: number of points to sample (aka q) :dim: dimension of point space :num_iteration: number of iterations performed on each thread for MC evaluation :best: best function evaluation obtained so far :base_seed: base seed for the GPU's RNG; will be offset by GPU thread index (see curand) :configure_for_test: whether record random_number_grad_ei or not \output :gpu_random_number_grad_ei[num_union][num_itreration][num_threads][num_blocks]: array storing random numbers used for computing gradEI, for testing only :grad_ei_storage[dim][num_to_sample][num_threads][num_blocks]: each thread write result of grad_ei to its corresponding positions \endrst*/ __global__ void CudaComputeGradEIGpu(double const * restrict mu, double const * restrict grad_mu, double const * restrict chol_var, double const * restrict grad_chol_var, int num_union, int num_to_sample, int dim, int num_iteration, double best, uint64_t base_seed, bool configure_for_test, double * restrict gpu_random_number_grad_ei, double * restrict grad_ei_storage) { // copy mu, chol_var, grad_mu, grad_chol_var to shared memory extern __shared__ double storage[]; double * restrict mu_local = storage; double * restrict chol_var_local = mu_local + num_union; double * restrict grad_mu_local = chol_var_local + num_union * num_union; double * restrict grad_chol_var_local = grad_mu_local + num_to_sample * dim; const int idx = threadIdx.x; const int IDX = threadIdx.x + blockDim.x * blockIdx.x; int chunk_size = (num_to_sample * num_union * num_union * dim - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_to_sample * num_union * num_union * dim, grad_chol_var, grad_chol_var_local); chunk_size = (num_union * num_union - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_union * num_union, chol_var, chol_var_local); chunk_size = (num_to_sample * dim - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_to_sample * dim, grad_mu, grad_mu_local); chunk_size = (num_union - 1)/ blockDim.x + 1; CudaCopyElements(chunk_size * idx, chunk_size * (idx + 1), num_union, mu, mu_local); __syncthreads(); double * restrict normals = grad_chol_var_local + num_union * num_union * num_to_sample * dim + idx * num_union * 2; double * restrict normals_copy = normals + num_union; int i, k, mc, winner; double EI, improvement_this_step; // RNG setup uint64_t local_seed = base_seed + IDX; curandState random_state; curand_init(local_seed, 0, 0, &random_state); // initialize grad_ei_storage for (int i = 0; i < (num_to_sample * dim); ++i) { grad_ei_storage[IDX*num_to_sample*dim + i] = 0.0; } // MC step start for (mc = 0; mc < num_iteration; ++mc) { improvement_this_step = 0.0; winner = -1; for (i = 0; i < num_union; ++i) { normals[i] = curand_normal_double(&random_state); normals_copy[i] = normals[i]; // If configure_for_test is true, random numbers used in MC computations will be saved as output. // In fact we will let gradEI compuation on CPU use the same sequence of random numbers saved here, // so that gradEI compuation on CPU & GPU can be compared directly for unit testing. if (configure_for_test) { gpu_random_number_grad_ei[IDX * num_iteration * num_union + mc * num_union + i] = normals[i]; } } CudaTriangularMatrixVectorMultiply(chol_var_local, num_union, normals); for (i = 0; i < num_union; ++i) { EI = best - (mu_local[i] + normals[i]); if (EI > improvement_this_step) { improvement_this_step = EI; winner = i; } } if (improvement_this_step > 0.0) { if (winner < num_to_sample) { for (k = 0; k < dim; ++k) { grad_ei_storage[IDX*num_to_sample*dim + winner * dim + k] -= grad_mu_local[winner * dim + k]; } } for (i = 0; i < num_to_sample; ++i) { // derivative w.r.t ith point CudaGeneralMatrixVectorMultiply(grad_chol_var_local + i*num_union*num_union*dim + winner*num_union*dim, normals_copy, dim, num_union, dim, grad_ei_storage + IDX*num_to_sample*dim + i*dim); } } } for (int i = 0; i < num_to_sample*dim; ++i) { grad_ei_storage[IDX*num_to_sample*dim + i] /= static_cast<double>(num_iteration); } } } // end unnamed namespace CudaError CudaGetEI(double const * restrict mu, double const * restrict chol_var, int num_union, int num_mc, double best, uint64_t base_seed, bool configure_for_test, double * restrict gpu_mu, double * restrict gpu_chol_var, double * restrict random_number_ei, double * restrict gpu_random_number_ei, double * restrict gpu_ei_storage, double * restrict ei_val) { // We assign kEINumBlocks blocks and kEINumThreads threads/block for EI computation, so there are // (kEINumBlocks * kEINumThreads) threads in total to execute kernel function in parallel dim3 threads(kEINumThreads); dim3 grid(kEINumBlocks); std::vector<double> ei_storage(kEINumThreads * kEINumBlocks); int num_iteration = num_mc / (kEINumThreads * kEINumBlocks) + 1; // make sure num_iteration is always >= 1 int mem_size_mu = num_union * sizeof(*mu); int mem_size_chol_var = num_union * num_union * sizeof(*mu); int mem_size_ei_storage = kEINumThreads * kEINumBlocks * sizeof(*mu); // copy mu, chol_var to GPU OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_mu, mu, mem_size_mu, cudaMemcpyHostToDevice)); OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_chol_var, chol_var, mem_size_chol_var, cudaMemcpyHostToDevice)); // execute kernel CudaComputeEIGpu <<< grid, threads, num_union*sizeof(*mu)+num_union*num_union*sizeof(*mu)+num_union*kEINumThreads*sizeof(*mu) >>> (gpu_mu, gpu_chol_var, num_union, num_iteration, best, base_seed, configure_for_test, gpu_random_number_ei, gpu_ei_storage); OL_CUDA_ERROR_RETURN(cudaPeekAtLastError()); // copy gpu_ei_storage back to CPU OL_CUDA_ERROR_RETURN(cudaMemcpy(ei_storage.data(), gpu_ei_storage, mem_size_ei_storage, cudaMemcpyDeviceToHost)); // copy gpu_random_number_ei back to CPU if configure_for_test is on if (configure_for_test) { int mem_size_random_number_ei = num_iteration * kEINumThreads * kEINumBlocks * num_union * sizeof(*mu); OL_CUDA_ERROR_RETURN(cudaMemcpy(random_number_ei, gpu_random_number_ei, mem_size_random_number_ei, cudaMemcpyDeviceToHost)); } // average ei_storage double ave = 0.0; for (int i = 0; i < (kEINumThreads*kEINumBlocks); ++i) { ave += ei_storage[i]; } *ei_val = ave / static_cast<double>(kEINumThreads*kEINumBlocks); return kCudaSuccess; } CudaError CudaGetGradEI(double const * restrict mu, double const * restrict grad_mu, double const * restrict chol_var, double const * restrict grad_chol_var, int num_union, int num_to_sample, int dim, int num_mc, double best, uint64_t base_seed, bool configure_for_test, double * restrict gpu_mu, double * restrict gpu_grad_mu, double * restrict gpu_chol_var, double * restrict gpu_grad_chol_var, double * restrict random_number_grad_ei, double * restrict gpu_random_number_grad_ei, double * restrict gpu_grad_ei_storage, double * restrict grad_ei) { std::vector<double> grad_ei_storage(num_to_sample * dim * kGradEINumThreads * kGradEINumBlocks); // We assign kGradEINumBlocks blocks and kGradEINumThreads threads/block for grad_ei computation, // so there are (kGradEINumBlocks * kGradEINumThreads) threads in total to execute kernel function // in parallel dim3 threads(kGradEINumThreads); dim3 grid(kGradEINumBlocks); int num_iteration = num_mc / (kGradEINumThreads * kGradEINumBlocks) + 1; // make sure num_iteration is always >= 1 int mem_size_mu = num_union * sizeof(*mu); int mem_size_grad_mu = num_to_sample * dim * sizeof(*mu); int mem_size_chol_var = num_union * num_union *sizeof(*mu); int mem_size_grad_chol_var = num_to_sample * num_union * num_union * dim * sizeof(*mu); int mem_size_grad_ei_storage = kGradEINumThreads * kGradEINumBlocks * num_to_sample * dim * sizeof(*mu); OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_mu, mu, mem_size_mu, cudaMemcpyHostToDevice)); OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_grad_mu, grad_mu, mem_size_grad_mu, cudaMemcpyHostToDevice)); OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_chol_var, chol_var, mem_size_chol_var, cudaMemcpyHostToDevice)); OL_CUDA_ERROR_RETURN(cudaMemcpy(gpu_grad_chol_var, grad_chol_var, mem_size_grad_chol_var, cudaMemcpyHostToDevice)); // execute kernel // inputs: gpu_mu, gpu_chol_var, gpu_grad_mu, gpu_grad_chol_var, best, num_union, num_to_sample, dim, num_iteration, base_seed // output: gpu_grad_ei_storage CudaComputeGradEIGpu <<< grid, threads, mem_size_mu+mem_size_chol_var+mem_size_grad_mu+mem_size_grad_chol_var+num_union*kGradEINumThreads*2*sizeof(*mu) >>> (gpu_mu, gpu_grad_mu, gpu_chol_var, gpu_grad_chol_var, num_union, num_to_sample, dim, num_iteration, best, base_seed, configure_for_test, gpu_random_number_grad_ei, gpu_grad_ei_storage); OL_CUDA_ERROR_RETURN(cudaPeekAtLastError()); OL_CUDA_ERROR_RETURN(cudaMemcpy(grad_ei_storage.data(), gpu_grad_ei_storage, mem_size_grad_ei_storage, cudaMemcpyDeviceToHost)); // copy gpu_random_number_grad_ei back to CPU if configure_for_test is on if (configure_for_test) { int mem_size_random_number_grad_ei = num_iteration * kGradEINumThreads * kGradEINumBlocks * num_union * sizeof(*mu); OL_CUDA_ERROR_RETURN(cudaMemcpy(random_number_grad_ei, gpu_random_number_grad_ei, mem_size_random_number_grad_ei, cudaMemcpyDeviceToHost)); } // The code block below extracts grad_ei from grad_ei_storage, which is output from the function // "CudaGetGradEI" run on gpu. The way to do that is for each component of grad_ei, we find all // the threads calculating the corresponding component and average over the threads. std::fill(grad_ei, grad_ei + num_to_sample * dim, 0.0); for (int n = 0; n < (kGradEINumThreads*kGradEINumBlocks); ++n) { for (int i = 0; i < num_to_sample*dim; ++i) { grad_ei[i] += grad_ei_storage[n*num_to_sample*dim + i]; } } for (int i = 0; i < num_to_sample*dim; ++i) { grad_ei[i] /= static_cast<double>(kGradEINumThreads*kGradEINumBlocks); } return kCudaSuccess; } CudaError CudaMallocDeviceMemory(size_t size, void ** restrict address_of_ptr_to_gpu_memory) { OL_CUDA_ERROR_RETURN(cudaMalloc(address_of_ptr_to_gpu_memory, size)); return kCudaSuccess; } CudaError CudaFreeDeviceMemory(void * restrict ptr_to_gpu_memory) { OL_CUDA_ERROR_RETURN(cudaFree(ptr_to_gpu_memory)); return kCudaSuccess; } CudaError CudaSetDevice(int devID) { OL_CUDA_ERROR_RETURN(cudaSetDevice(devID)); // Cuda API to set memory config preference: in our code we prefer to use more shared memory OL_CUDA_ERROR_RETURN(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); return kCudaSuccess; } } // end namespace optimal_learning
the_stack
#include <cmath> #include <vector> #include "nms/kernel.h" template <typename T> struct RotatedBox { T x_ctr, y_ctr, w, h, a; }; template <typename T> struct Point { T x, y; __host__ __device__ __forceinline__ Point(const T &px = 0, const T &py = 0) : x(px), y(py) {} __host__ __device__ __forceinline__ Point operator+(const Point &p) const { return Point(x + p.x, y + p.y); } __host__ __device__ __forceinline__ Point &operator+=(const Point &p) { x += p.x; y += p.y; return *this; } __host__ __device__ __forceinline__ Point operator-(const Point &p) const { return Point(x - p.x, y - p.y); } __host__ __device__ __forceinline__ Point operator*(const T coeff) const { return Point(x * coeff, y * coeff); } }; template <typename T> __host__ __device__ __forceinline__ T dot_2d(const Point<T> &A, const Point<T> &B) { return A.x * B.x + A.y * B.y; } template <typename T> __host__ __device__ __forceinline__ T cross_2d(const Point<T> &A, const Point<T> &B) { return A.x * B.y - B.x * A.y; } template <typename T> __host__ __device__ __forceinline__ void get_rotated_vertices(const RotatedBox<T> &box, Point<T> (&pts)[4]) { // M_PI / 180. == 0.01745329251 // double theta = box.a * 0.01745329251; // MODIFIED double theta = box.a; T cosTheta2 = (T)cos(theta) * 0.5f; T sinTheta2 = (T)sin(theta) * 0.5f; // y: top --> down; x: left --> right pts[0].x = box.x_ctr - sinTheta2 * box.h - cosTheta2 * box.w; pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; pts[1].x = box.x_ctr + sinTheta2 * box.h - cosTheta2 * box.w; pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; pts[2].x = 2 * box.x_ctr - pts[0].x; pts[2].y = 2 * box.y_ctr - pts[0].y; pts[3].x = 2 * box.x_ctr - pts[1].x; pts[3].y = 2 * box.y_ctr - pts[1].y; } template <typename T> __host__ __device__ __forceinline__ int get_intersection_points(const Point<T> (&pts1)[4], const Point<T> (&pts2)[4], Point<T> (&intersections)[24]) { // Line vector // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] Point<T> vec1[4], vec2[4]; for (int i = 0; i < 4; i++) { vec1[i] = pts1[(i + 1) % 4] - pts1[i]; vec2[i] = pts2[(i + 1) % 4] - pts2[i]; } // Line test - test all line combos for intersection int num = 0; // number of intersections for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { // Solve for 2x2 Ax=b T det = cross_2d<T>(vec2[j], vec1[i]); // This takes care of parallel lines if (fabs(det) <= 1e-14) { continue; } auto vec12 = pts2[j] - pts1[i]; T t1 = cross_2d<T>(vec2[j], vec12) / det; T t2 = cross_2d<T>(vec1[i], vec12) / det; if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) { intersections[num++] = pts1[i] + vec1[i] * t1; } } } // Check for vertices of rect1 inside rect2 { const auto &AB = vec2[0]; const auto &DA = vec2[3]; auto ABdotAB = dot_2d<T>(AB, AB); auto ADdotAD = dot_2d<T>(DA, DA); for (int i = 0; i < 4; i++) { // assume ABCD is the rectangle, and P is the point to be judged // P is inside ABCD iff. P's projection on AB lies within AB // and P's projection on AD lies within AD auto AP = pts1[i] - pts2[0]; auto APdotAB = dot_2d<T>(AP, AB); auto APdotAD = -dot_2d<T>(AP, DA); if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && (APdotAD <= ADdotAD)) { intersections[num++] = pts1[i]; } } } // Reverse the check - check for vertices of rect2 inside rect1 { const auto &AB = vec1[0]; const auto &DA = vec1[3]; auto ABdotAB = dot_2d<T>(AB, AB); auto ADdotAD = dot_2d<T>(DA, DA); for (int i = 0; i < 4; i++) { auto AP = pts2[i] - pts1[0]; auto APdotAB = dot_2d<T>(AP, AB); auto APdotAD = -dot_2d<T>(AP, DA); if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && (APdotAD <= ADdotAD)) { intersections[num++] = pts2[i]; } } } return num; } template <typename T> __host__ __device__ __forceinline__ int convex_hull_graham(const Point<T> (&p)[24], const int &num_in, Point<T> (&q)[24], bool shift_to_zero = false) { assert(num_in >= 2); // Step 1: // Find point with minimum y // if more than 1 points have the same minimum y, // pick the one with the minimum x. int t = 0; for (int i = 1; i < num_in; i++) { if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { t = i; } } auto &start = p[t]; // starting point // Step 2: // Subtract starting point from every points (for sorting in the next step) for (int i = 0; i < num_in; i++) { q[i] = p[i] - start; } // Swap the starting point to position 0 auto tmp = q[0]; q[0] = q[t]; q[t] = tmp; // Step 3: // Sort point 1 ~ num_in according to their relative cross-product values // (essentially sorting according to angles) // If the angles are the same, sort according to their distance to origin T dist[24]; for (int i = 0; i < num_in; i++) { dist[i] = dot_2d<T>(q[i], q[i]); } for (int i = 1; i < num_in - 1; i++) { for (int j = i + 1; j < num_in; j++) { T crossProduct = cross_2d<T>(q[i], q[j]); if ((crossProduct < -1e-6) || (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { auto q_tmp = q[i]; q[i] = q[j]; q[j] = q_tmp; auto dist_tmp = dist[i]; dist[i] = dist[j]; dist[j] = dist_tmp; } } } // Step 4: // Make sure there are at least 2 points (that don't overlap with each other) // in the stack int k; // index of the non-overlapped second point for (k = 1; k < num_in; k++) { if (dist[k] > 1e-8) { break; } } if (k == num_in) { // We reach the end, which means the convex hull is just one point q[0] = p[t]; return 1; } q[1] = q[k]; int m = 2; // 2 points in the stack // Step 5: // Finally we can start the scanning process. // When a non-convex relationship between the 3 points is found // (either concave shape or duplicated points), // we pop the previous point from the stack // until the 3-point relationship is convex again, or // until the stack only contains two points for (int i = k + 1; i < num_in; i++) { while (m > 1 && cross_2d<T>(q[i] - q[m - 2], q[m - 1] - q[m - 2]) >= 0) { m--; } q[m++] = q[i]; } // Step 6 (Optional): // In general sense we need the original coordinates, so we // need to shift the points back (reverting Step 2) // But if we're only interested in getting the area/perimeter of the shape // We can simply return. if (!shift_to_zero) { for (int i = 0; i < m; i++) { q[i] += start; } } return m; } template <typename T> __host__ __device__ __forceinline__ T polygon_area(const Point<T> (&q)[24], const int &m) { if (m <= 2) { return 0; } T area = 0; for (int i = 1; i < m - 1; i++) { area += fabs(cross_2d<T>(q[i] - q[0], q[i + 1] - q[0])); } return area / 2.0; } template <typename T> __host__ __device__ __forceinline__ T rotated_boxes_intersection(const RotatedBox<T> &box1, const RotatedBox<T> &box2) { // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned // from rotated_rect_intersection_pts Point<T> intersectPts[24], orderedPts[24]; Point<T> pts1[4]; Point<T> pts2[4]; get_rotated_vertices<T>(box1, pts1); get_rotated_vertices<T>(box2, pts2); int num = get_intersection_points<T>(pts1, pts2, intersectPts); if (num <= 2) { return 0.0; } // Convex Hull to order the intersection points in clockwise order and find // the contour area. int num_convex = convex_hull_graham<T>(intersectPts, num, orderedPts, true); return polygon_area<T>(orderedPts, num_convex); } template <typename T> __host__ __device__ __forceinline__ T single_box_iou_rotated(T const *const box1_raw, T const *const box2_raw) { // shift center to the middle point to achieve higher precision in result RotatedBox<T> box1, box2; auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; box1.x_ctr = box1_raw[0] - center_shift_x; box1.y_ctr = box1_raw[1] - center_shift_y; box1.w = box1_raw[2]; box1.h = box1_raw[3]; box1.a = box1_raw[4]; box2.x_ctr = box2_raw[0] - center_shift_x; box2.y_ctr = box2_raw[1] - center_shift_y; box2.w = box2_raw[2]; box2.h = box2_raw[3]; box2.a = box2_raw[4]; const T area1 = box1.w * box1.h; const T area2 = box2.w * box2.h; if (area1 < 1e-14 || area2 < 1e-14) { return 0.f; } const T intersection = rotated_boxes_intersection<T>(box1, box2); T baseS = 1.0; baseS = (area1 + area2 - intersection); const T iou = intersection / baseS; return iou; } /********** new NMS for only score and index array **********/ template <typename T_SCORE, typename T_BBOX, int TSIZE> __global__ void allClassRotatedNMS_kernel(const int num, const int num_classes, const int num_preds_per_class, const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized, T_BBOX *bbox_data, // bbox_data should be float to // preserve location information T_SCORE *beforeNMS_scores, int *beforeNMS_index_array, T_SCORE *afterNMS_scores, int *afterNMS_index_array) { //__shared__ bool kept_bboxinfo_flag[CAFFE_CUDA_NUM_THREADS * TSIZE]; extern __shared__ bool kept_bboxinfo_flag[]; for (int i = 0; i < num; i++) { const int offset = i * num_classes * num_preds_per_class + blockIdx.x * num_preds_per_class; const int max_idx = offset + top_k; // put top_k bboxes into NMS calculation const int bbox_idx_offset = share_location ? (i * num_preds_per_class) : (i * num_classes * num_preds_per_class); // local thread data int loc_bboxIndex[TSIZE]; T_BBOX loc_bbox[TSIZE * 5]; // initialize Bbox, Bboxinfo, kept_bboxinfo_flag // Eliminate shared memory RAW hazard __syncthreads(); #pragma unroll for (int t = 0; t < TSIZE; t++) { const int cur_idx = threadIdx.x + blockDim.x * t; const int item_idx = offset + cur_idx; if (item_idx < max_idx) { loc_bboxIndex[t] = beforeNMS_index_array[item_idx]; if (loc_bboxIndex[t] >= 0) // if (loc_bboxIndex[t] != -1) { const int bbox_data_idx = share_location ? (loc_bboxIndex[t] % num_preds_per_class + bbox_idx_offset) : loc_bboxIndex[t]; memcpy(&loc_bbox[t * 5], &bbox_data[bbox_data_idx * 5], 5 * sizeof(T_BBOX)); kept_bboxinfo_flag[cur_idx] = true; } else { kept_bboxinfo_flag[cur_idx] = false; } } else { kept_bboxinfo_flag[cur_idx] = false; } } // filter out overlapped boxes with lower scores int ref_item_idx = offset; int ref_bbox_idx = share_location ? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset) : beforeNMS_index_array[ref_item_idx]; while ((ref_bbox_idx != -1) && ref_item_idx < max_idx) { T_BBOX ref_bbox[5]; memcpy(&ref_bbox[0], &bbox_data[ref_bbox_idx * 5], 5 * sizeof(T_BBOX)); // Eliminate shared memory RAW hazard __syncthreads(); for (int t = 0; t < TSIZE; t++) { const int cur_idx = threadIdx.x + blockDim.x * t; const int item_idx = offset + cur_idx; if ((kept_bboxinfo_flag[cur_idx]) && (item_idx > ref_item_idx)) { // TODO: may need to add bool normalized as argument, HERE true means // normalized if (single_box_iou_rotated(&ref_bbox[0], loc_bbox + t * 5) > nms_threshold) { kept_bboxinfo_flag[cur_idx] = false; } } } __syncthreads(); do { ref_item_idx++; } while (ref_item_idx < max_idx && !kept_bboxinfo_flag[ref_item_idx - offset]); ref_bbox_idx = share_location ? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset) : beforeNMS_index_array[ref_item_idx]; } // store data for (int t = 0; t < TSIZE; t++) { const int cur_idx = threadIdx.x + blockDim.x * t; const int read_item_idx = offset + cur_idx; const int write_item_idx = (i * num_classes * top_k + blockIdx.x * top_k) + cur_idx; /* * If not not keeping the bbox * Set the score to 0 * Set the bounding box index to -1 */ if (read_item_idx < max_idx) { afterNMS_scores[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? beforeNMS_scores[read_item_idx] : 0.0f; afterNMS_index_array[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? loc_bboxIndex[t] : -1; } } } } template <typename T_SCORE, typename T_BBOX> pluginStatus_t allClassRotatedNMS_gpu(cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized, void *bbox_data, void *beforeNMS_scores, void *beforeNMS_index_array, void *afterNMS_scores, void *afterNMS_index_array) { #define P(tsize) allClassRotatedNMS_kernel<T_SCORE, T_BBOX, (tsize)> void (*kernel[10])(const int, const int, const int, const int, const float, const bool, const bool, float *, T_SCORE *, int *, T_SCORE *, int *) = { P(1), P(2), P(3), P(4), P(5), P(6), P(7), P(8), P(9), P(10), }; const int BS = 512; const int GS = num_classes; const int t_size = (top_k + BS - 1) / BS; kernel[t_size - 1]<<<GS, BS, BS * t_size * sizeof(bool), stream>>>( num, num_classes, num_preds_per_class, top_k, nms_threshold, share_location, isNormalized, (T_BBOX *)bbox_data, (T_SCORE *)beforeNMS_scores, (int *)beforeNMS_index_array, (T_SCORE *)afterNMS_scores, (int *)afterNMS_index_array); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // allClassNMS LAUNCH CONFIG typedef pluginStatus_t (*rotatedNmsFunc)(cudaStream_t, const int, const int, const int, const int, const float, const bool, const bool, void *, void *, void *, void *, void *); struct rotatedNmsLaunchConfig { DataType t_score; DataType t_bbox; rotatedNmsFunc function; rotatedNmsLaunchConfig(DataType t_score, DataType t_bbox) : t_score(t_score), t_bbox(t_bbox) {} rotatedNmsLaunchConfig(DataType t_score, DataType t_bbox, rotatedNmsFunc function) : t_score(t_score), t_bbox(t_bbox), function(function) {} bool operator==(const rotatedNmsLaunchConfig &other) { return t_score == other.t_score && t_bbox == other.t_bbox; } }; static std::vector<rotatedNmsLaunchConfig> rotatedNmsFuncVec; bool rotatedNmsInit() { rotatedNmsFuncVec.push_back(rotatedNmsLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, allClassRotatedNMS_gpu<float, float>)); return true; } static bool initialized = rotatedNmsInit(); pluginStatus_t allClassRotatedNMS(cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized, const DataType DT_SCORE, const DataType DT_BBOX, void *bbox_data, void *beforeNMS_scores, void *beforeNMS_index_array, void *afterNMS_scores, void *afterNMS_index_array, bool) { auto __cuda_arch__ = get_cuda_arch(0); // assume there is only one arch 7.2 device if (__cuda_arch__ == 720 && top_k >= 1000) { printf("Warning: pre_top_k need to be reduced for devices with arch 7.2, got pre_top_k=%d\n", top_k); } rotatedNmsLaunchConfig lc(DT_SCORE, DT_BBOX); for (unsigned i = 0; i < rotatedNmsFuncVec.size(); ++i) { if (lc == rotatedNmsFuncVec[i]) { DEBUG_PRINTF("all class rotated nms kernel %d\n", i); return rotatedNmsFuncVec[i].function(stream, num, num_classes, num_preds_per_class, top_k, nms_threshold, share_location, isNormalized, bbox_data, beforeNMS_scores, beforeNMS_index_array, afterNMS_scores, afterNMS_index_array); } } return STATUS_BAD_PARAM; }
the_stack
#ifdef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_ATOMIC_BUILTINS #endif /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // includes #include <cutil_inline.h> #include <shrUtils.h> #include <cuda.h> // defines, project #define MEMCOPY_ITERATIONS 10 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth( unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { printf("[bandwidthTest]\n"); // set logfile name and start logs shrSetLogFileName ("bandwidthTest.txt"); shrLog("%s Starting...\n\n", argv[0]); int iRetVal = runTest(argc, (const char**)argv); if (iRetVal != -1) { shrLog("\n[bandwidthTest] - Test results:\n%s\n\n", (iRetVal == 0) ? "PASSED" : "FAILED"); } // finish shrEXIT(argc, (const char**)argv); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PAGEABLE; //process command line args if(shrCheckCmdLineFlag( argc, argv, "help")) { printHelp(); return -1; } if(shrCheckCmdLineFlag( argc, argv, "csv")) { printmode = CSV; } if( shrGetCmdLineArgumentstr(argc, argv, "memory", &memModeStr) ) { if( strcmp(memModeStr, "pageable") == 0 ) { memMode = PAGEABLE; } else if( strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { shrLog("Invalid memory mode - valid modes are pageable or pinned\n"); shrLog("See --help for more information\n"); return -1000; } } else { //default - pageable memory memMode = PAGEABLE; } if( shrGetCmdLineArgumentstr(argc, argv, "device", &device) ) { int deviceCount; cudaGetDeviceCount(&deviceCount); if( deviceCount == 0 ) { shrLog("!!!!!No devices found!!!!!\n"); return -2000; } if( strcmp (device, "all") == 0 ) { printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if( startDevice >= deviceCount || startDevice < 0) { shrLog("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } shrLog("Running on...\n\n"); for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaDeviceProp deviceProp; if (cudaGetDeviceProperties(&deviceProp, currentDevice) == cudaSuccess) shrLog(" Device %d: %s\n", currentDevice, deviceProp.name); } if( shrGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) ) { //figure out the mode if( strcmp(modeStr, "quick") == 0 ) { shrLog(" Quick Mode\n\n"); mode = QUICK_MODE; } else if( strcmp(modeStr, "shmoo") == 0 ) { shrLog(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if( strcmp(modeStr, "range") == 0 ) { shrLog(" Range Mode\n\n"); mode = RANGE_MODE; } else { shrLog("Invalid mode - valid modes are quick, range, or shmoo\n"); shrLog("See --help for more information\n"); return -3000; } } else { //default mode - quick shrLog(" Quick Mode\n\n"); mode = QUICK_MODE; } if(shrCheckCmdLineFlag( argc, argv, "htod")) htod = true; if(shrCheckCmdLineFlag( argc, argv, "dtoh")) dtoh = true; if(shrCheckCmdLineFlag( argc, argv, "dtod")) dtod = true; #if CUDART_VERSION >= 2020 if(shrCheckCmdLineFlag( argc, argv, "wc")) wc = true; #endif if(shrCheckCmdLineFlag( argc, argv, "cputiming")) bDontUseGPUTiming = true; if( !htod && !dtoh && !dtod ) { //default: All htod = true; dtoh = true; dtod = true; } if( RANGE_MODE == mode ) { if( shrGetCmdLineArgumenti( argc, argv, "start", &start) ) { if( start <= 0 ) { shrLog("Illegal argument - start must be greater than zero\n"); return -4000; } } else { shrLog("Must specify a starting size in range mode\n"); shrLog("See --help for more information\n"); return -5000; } if( shrGetCmdLineArgumenti( argc, argv, "end", &end) ) { if( end <= 0 ) { shrLog("Illegal argument - end must be greater than zero\n"); return -6000; } if( start > end ) { shrLog("Illegal argument - start is greater than end\n"); return -7000; } } else { shrLog("Must specify an end size in range mode.\n"); shrLog("See --help for more information\n"); return -8000; } if( shrGetCmdLineArgumenti( argc, argv, "increment", &increment) ) { if( increment <= 0 ) { shrLog("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { shrLog("Must specify an increment in user mode\n"); shrLog("See --help for more information\n"); return -10000; } } if( htod ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if( dtoh ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if( dtod ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } shrFree( memModeStr); return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch( mode ) { case QUICK_MODE: testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc ); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) ); double *bandwidths = ( double * ) malloc( count * sizeof(double) ); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) bandwidths[i] = 0.0; // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //run each of the copies for(unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch(kind) { case DEVICE_TO_HOST: bandwidths[i] = testDeviceToHostTransfer( memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] = testHostToDeviceTransfer( memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] = testDeviceToDeviceTransfer( memSizes[i] ); break; } } cudaThreadExit(); } // Complete the bandwidth computation on all the devices //print results if(printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) ); double *bandwidths = ( double * ) malloc( count * sizeof(double) ); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) bandwidths[i] = 0.0; // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while( memSize <= SHMOO_MEMSIZE_MAX ) { if( memSize < SHMOO_LIMIT_20KB ) { memSize += SHMOO_INCREMENT_1KB; } else if( memSize < SHMOO_LIMIT_50KB ) { memSize += SHMOO_INCREMENT_2KB; }else if( memSize < SHMOO_LIMIT_100KB ) { memSize += SHMOO_INCREMENT_10KB; }else if( memSize < SHMOO_LIMIT_1MB ) { memSize += SHMOO_INCREMENT_100KB; }else if( memSize < SHMOO_LIMIT_16MB ) { memSize += SHMOO_INCREMENT_1MB; }else if( memSize < SHMOO_LIMIT_32MB ) { memSize += SHMOO_INCREMENT_2MB; }else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch(kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc ); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc ); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] ); break; } iteration++; shrLog("."); } } // Complete the bandwidth computation on all the devices //print results shrLog("\n"); if( CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { unsigned int timer = 0; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; cudaEvent_t start, stop; cutilCheckError( cutCreateTimer( &timer ) ); cutilSafeCall ( cudaEventCreate( &start ) ); cutilSafeCall ( cudaEventCreate( &stop ) ); //allocate host memory if( PINNED == memMode ) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 cutilSafeCall( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); #else cutilSafeCall( cudaMallocHost( (void**)&h_idata, memSize ) ); cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) ); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc( memSize ); h_odata = (unsigned char *)malloc( memSize ); } //initialize the memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char) (i & 0xff); } // allocate device memory unsigned char* d_idata; cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize)); //initialize the device memory cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize, cudaMemcpyHostToDevice) ); //copy data from GPU to Host cutilCheckError( cutStartTimer( timer)); cutilSafeCall( cudaEventRecord( start, 0 ) ); if( PINNED == memMode ) { for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { cutilSafeCall( cudaMemcpyAsync( h_odata, d_idata, memSize, cudaMemcpyDeviceToHost, 0) ); } } else { for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { cutilSafeCall( cudaMemcpy( h_odata, d_idata, memSize, cudaMemcpyDeviceToHost) ); } } cutilSafeCall( cudaEventRecord( stop, 0 ) ); // make sure GPU has finished copying cutilSafeCall( cudaThreadSynchronize() ); //get the the total elapsed time in ms cutilCheckError( cutStopTimer( timer)); cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if( PINNED != memMode || bDontUseGPUTiming ) { elapsedTimeInMs = cutGetTimerValue( timer); } //calculate bandwidth in MB/s bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory cutilSafeCall( cudaEventDestroy(stop) ); cutilSafeCall( cudaEventDestroy(start) ); cutilCheckError( cutDeleteTimer( timer)); if( PINNED == memMode ) { cutilSafeCall( cudaFreeHost(h_idata) ); cutilSafeCall( cudaFreeHost(h_odata) ); } else { free(h_idata); free(h_odata); } cutilSafeCall(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { unsigned int timer = 0; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; cutilCheckError( cutCreateTimer( &timer ) ); cutilSafeCall( cudaEventCreate( &start ) ); cutilSafeCall( cudaEventCreate( &stop ) ); //allocate host memory unsigned char *h_odata = NULL; if( PINNED == memMode ) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory cutilSafeCall( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); #else //pinned memory mode - use special function to get OS-pinned memory cutilSafeCall( cudaMallocHost( (void**)&h_odata, memSize ) ); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc( memSize ); } unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE ); unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE ); //initialize the memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char) (i & 0xff); } for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char) (i & 0xff); h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff)); } //allocate device memory unsigned char* d_idata; cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize)); cutilCheckError( cutStartTimer( timer)); cutilSafeCall( cudaEventRecord( start, 0 ) ); //copy host memory to device memory if( PINNED == memMode ) { for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { cutilSafeCall( cudaMemcpyAsync( d_idata, h_odata, memSize, cudaMemcpyHostToDevice, 0) ); } } else { for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { cutilSafeCall( cudaMemcpy( d_idata, h_odata, memSize, cudaMemcpyHostToDevice) ); } } cutilSafeCall( cudaEventRecord( stop, 0 ) ); cutilSafeCall( cudaThreadSynchronize() ); //total elapsed time in ms cutilCheckError( cutStopTimer( timer)); cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if ( PINNED != memMode || bDontUseGPUTiming ) { elapsedTimeInMs = cutGetTimerValue( timer); } cutilCheckError( cutResetTimer( timer)); //calculate bandwidth in MB/s bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory cutilSafeCall( cudaEventDestroy(stop) ); cutilSafeCall( cudaEventDestroy(start) ); cutilCheckError( cutDeleteTimer( timer)); if( PINNED == memMode ) { cutilSafeCall( cudaFreeHost(h_odata) ); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); cutilSafeCall(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { unsigned int timer = 0; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; cutilCheckError( cutCreateTimer( &timer ) ); cutilSafeCall( cudaEventCreate( &start ) ); cutilSafeCall( cudaEventCreate( &stop ) ); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc( memSize ); //initialize the host memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char) (i & 0xff); } //allocate device memory unsigned char *d_idata; cutilSafeCall( cudaMalloc( (void**) &d_idata, memSize)); unsigned char *d_odata; cutilSafeCall( cudaMalloc( (void**) &d_odata, memSize)); //initialize memory cutilSafeCall( cudaMemcpy( d_idata, h_idata, memSize, cudaMemcpyHostToDevice) ); //run the memcopy cutilCheckError( cutStartTimer( timer)); cutilSafeCall( cudaEventRecord( start, 0 ) ); for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { cutilSafeCall( cudaMemcpy( d_odata, d_idata, memSize, cudaMemcpyDeviceToDevice) ); } cutilSafeCall( cudaEventRecord( stop, 0 ) ); //Since device to device memory copies are non-blocking, //cudaThreadSynchronize() is required in order to get //proper timing. cutilSafeCall( cudaThreadSynchronize() ); //get the the total elapsed time in ms cutilCheckError( cutStopTimer( timer)); cutilSafeCall( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if ( bDontUseGPUTiming ) { elapsedTimeInMs = cutGetTimerValue( timer); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory cutilCheckError( cutDeleteTimer( timer)); free(h_idata); cutilSafeCall(cudaEventDestroy(stop)); cutilSafeCall(cudaEventDestroy(start)); cutilSafeCall(cudaFree(d_idata)); cutilSafeCall(cudaFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { // log config information if (kind == DEVICE_TO_DEVICE) { shrLog(" Device to Device Bandwidth, %i Device(s)\n", iNumDevs); } else { if (kind == DEVICE_TO_HOST) { shrLog(" Device to Host Bandwidth, %i Device(s), ", iNumDevs); } else if (kind == HOST_TO_DEVICE) { shrLog(" Host to Device Bandwidth, %i Device(s), ", iNumDevs); } if(memMode == PAGEABLE) { shrLog("Paged memory\n"); } else if (memMode == PINNED) { shrLog("Pinned memory"); if (wc) { shrLog(", Write-Combined Memory Enabled"); } shrLog("\n"); } } shrLog(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for(i = 0; i < (count - 1); i++) { shrLog(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } shrLog(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if(memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for(i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); shrLogEx(LOGBOTH | MASTER, 0, "bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { shrLog("Usage: bandwidthTest [OPTION]...\n"); shrLog("Test the bandwidth for device to host, host to device, and device to device transfers\n"); shrLog("\n"); shrLog("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); shrLog("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); shrLog("\n"); shrLog("Options:\n"); shrLog("--help\tDisplay this help menu\n"); shrLog("--csv\tPrint results as a CSV\n"); shrLog("--device=[deviceno]\tSpecify the device device to be used\n"); shrLog(" all - compute cumulative bandwidth on all the devices\n"); shrLog(" 0,1,2,...,n - Specify any particular device to be used\n"); shrLog("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); shrLog(" pageable - pageable memory\n"); shrLog(" pinned - non-pageable system memory\n"); shrLog("--mode=[MODE]\tSpecify the mode to use\n"); shrLog(" quick - performs a quick measurement\n"); shrLog(" range - measures a user-specified range of values\n"); shrLog(" shmoo - performs an intense shmoo of a large range of values\n"); shrLog("--htod\tMeasure host to device transfers\n"); shrLog("--dtoh\tMeasure device to host transfers\n"); shrLog("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 shrLog("--wc\tAllocate pinned memory as write-combined\n"); #endif shrLog("--cputiming\tForce CPU-based timing always\n"); shrLog("Range mode options\n"); shrLog("--start=[SIZE]\tStarting transfer size in bytes\n"); shrLog("--end=[SIZE]\tEnding transfer size in bytes\n"); shrLog("--increment=[SIZE]\tIncrement size in bytes\n"); }
the_stack
#include "JobWrapper.h" #include "SignalProcessingFitterQueue.h" #include "GpuMultiFlowFitControl.h" #include "DarkHalo.h" using namespace std; //////////////////////////////////////// // workset (represents one job WorkSet::WorkSet( int flow_key, int flow_block_size) { setFlow( flow_key, flow_block_size ); _maxFrames = 0; // only set if we don't want to determine the mem sizes for a specific number of frames or no item is set _maxBeads = 0; // only set if we don't want to determine the mem sizes for a specific number of beads or no item is set _info = NULL; } WorkSet::WorkSet(BkgModelWorkInfo * i) { setData( i ); _maxFrames = 0; // only set if we don't want to determine the mem sizes for a specific number of frames or no item is set _maxBeads = 0; // only set if we don't want to determine the mem sizes for a specific number of beads or no item is set } WorkSet::~WorkSet() { } void WorkSet::setMaxFrames(int frames) { _maxFrames = frames; } int WorkSet::getMaxFrames() const { int maxFrames; maxFrames = (_maxFrames != 0)?(_maxFrames):(GpuMultiFlowFitControl::GetMaxFrames()); assert(maxFrames); return maxFrames; } int WorkSet::getUncompressedFrames() const { return _info->img->raw->uncompFrames; } int WorkSet::getImageFrames() const { return _info->img->raw->frames; } void WorkSet::setMaxBeads(int beads) { _maxBeads = beads; } void WorkSet::setFlow(int flow_key, int flow_block_size) { _flow_block_size = flow_block_size; _flow_key = flow_key; if ( _flow_block_size > MAX_NUM_FLOWS_IN_BLOCK_GPU ) { // I'm sorry, Dave, I can't do that. fprintf( stderr, "GPU acceleration requires that the number of flows in a block be less than %d.\n" "This limit is set at compile time in MAX_NUM_FLOWS_IN_BLOCK_GPU.\n", MAX_NUM_FLOWS_IN_BLOCK_GPU ); exit( -1 ); } _multiFlowFitControl.SetFlowParams( flow_key, flow_block_size ); } int WorkSet::getFlowBlockSize() const { // We shouldn't be asking for this if some code path didn't set it. assert( _flow_block_size ); return _flow_block_size; } int WorkSet::getFlowKey() const { return _flow_key; } int WorkSet::getMaxBeads() const { int maxBeads = (_maxBeads != 0)?(_maxBeads):(GpuMultiFlowFitControl::GetMaxBeads()); assert(maxBeads); return maxBeads; } void WorkSet::setData(BkgModelWorkInfo * i) { _info = i; setFlow( i->flow_key, i->inception_state-> bkg_control.signal_chunks.flow_block_sequence.BlockAtFlow( i->flow )->size() ); } bool WorkSet::isSet() const { return (_info != NULL)?(true):(false); } int WorkSet::getNumBeads() const { if(isSet()){ return _info->bkgObj->region_data->my_beads.numLBeads; } return getMaxBeads();// GpuMultiFlowFitControl::GetMaxBeads(); } int WorkSet::getNumFrames() const { if(isSet()){ return _info->bkgObj->region_data->time_c.GetTimeCompressedFrames(); } return getMaxFrames(); } int WorkSet::getImgHeight() const { return _info->img->GetRows(); } int WorkSet::getImgWidth() const { return _info->img->GetCols(); } int WorkSet::getMaxRegionWidth() const { return _info->inception_state->loc_context.regionXSize; //return _info->bkgObj->region_data->region->w; } int WorkSet::getMaxRegionHeight() const { return _info->inception_state->loc_context.regionYSize; //return _info->bkgObj->region_data->region->h; } /* int WorkSet::getRegionWidth() const { return _info->inception_state->loc_context.regionXSize; //return _info->bkgObj->region_data->region->w; } int WorkSet::getRegionHeight() const { return _info->inception_state->loc_context.regionYSize; //return _info->bkgObj->region_data->region->h; } */ int WorkSet::getMaxSteps() { return _multiFlowFitControl.GetMaxSteps(); } int WorkSet::getMaxParams() { return _multiFlowFitControl.GetMaxParamsToFit(); } int WorkSet::getNumSteps(int fit_index) { return _fd[fit_index]->GetNumSteps(); } int WorkSet::getNumParams(int fit_index) { return _fd[fit_index]->GetNumParamsToFit(); } int WorkSet::getAbsoluteFlowNum() { FlowBlockSequence::const_iterator flow_block = _info->inception_state->bkg_control.signal_chunks.flow_block_sequence. BlockAtFlow( _info->flow ); return flow_block->begin(); } reg_params * WorkSet::getRegionParams() { return &_info->bkgObj->region_data->my_regions.rp; } reg_params * WorkSet::getRegionParamMinBounds() { return &_info->bkgObj->region_data->my_regions.rp_low; } reg_params * WorkSet::getRegionParamMaxBounds() { return &_info->bkgObj->region_data->my_regions.rp_high; } BeadTracker * WorkSet::getBeadTracker(){ return &_info->bkgObj->region_data->my_beads; } BeadParams * WorkSet::getBeadParams(){ return &_info->bkgObj->region_data->my_beads.params_nn[0]; } bead_state * WorkSet::getBeadState(){ return &_info->bkgObj->region_data->my_beads.all_status[0]; } float * WorkSet::getEmphVec(){ return &_info->bkgObj->region_data->emphasis_data.emphasis_vector_storage[0]; } float * WorkSet::getDarkMatter(){ return &_info->bkgObj->region_data->my_regions.missing_mass.dark_matter_compensator[0]; } int * WorkSet::getFlowIdxMap(){ return _info->bkgObj->region_data_extras.my_flow->flow_ndx_map; } FG_BUFFER_TYPE * WorkSet::getFgBuffer(){ return _info->bkgObj->region_data->my_trace.fg_buffers; } float * WorkSet::getDeltaFrames(){ return &_info->bkgObj->region_data->time_c.deltaFrame[0]; } int * WorkSet::getFineNucStart(){ return _info->bkgObj->region_data->my_regions.cache_step.i_start_fine_step; } int * WorkSet::getCoarseNucStart(){ return _info->bkgObj->region_data->my_regions.cache_step.i_start_coarse_step; } float * WorkSet::getShiftedBackground(){ _info->bkgObj->region_data->my_scratch.FillShiftedBkg (*_info->bkgObj->region_data->emptytrace, _info->bkgObj->region_data->my_regions.rp.tshift, _info->bkgObj->region_data->time_c, true, getFlowBlockSize() ); return _info->bkgObj->region_data->my_scratch.shifted_bkg; } void WorkSet::calculateFineNucRise(){ _info->bkgObj->region_data->my_regions.cache_step.Unlock(); _info->bkgObj->region_data->my_regions.cache_step.CalculateNucRiseFineStep (&_info->bkgObj->region_data->my_regions.rp, _info->bkgObj->region_data->time_c, *_info->bkgObj->region_data_extras.my_flow); // the same for the whole region because time-shift happens per well } float *WorkSet::getFineNucRise() { return _info->bkgObj->region_data->my_regions.cache_step.nuc_rise_fine_step; } void WorkSet::calculateCoarseNucRise() { _info->bkgObj->region_data->my_regions.cache_step.Unlock(); _info->bkgObj->region_data->my_regions.cache_step.CalculateNucRiseCoarseStep (&_info->bkgObj->region_data->my_regions.rp, _info->bkgObj->region_data->time_c, *_info->bkgObj->region_data_extras.my_flow); } float *WorkSet::getCoarseNucRise() { return _info->bkgObj->region_data->my_regions.cache_step.nuc_rise_coarse_step; } void WorkSet::setUpFineEmphasisVectors() { _info->bkgObj->region_data->SetFineEmphasisVectors(); } void WorkSet::setUpCrudeEmphasisVectors() { _info->bkgObj->region_data->SetCrudeEmphasisVectors(); } float WorkSet::getAmpLowLimit() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.AmplLowerLimit; } float WorkSet::getkmultLowLimit() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.single_flow_master.kmult_low_limit; } float WorkSet::getkmultHighLimit() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.single_flow_master.kmult_hi_limit; } float WorkSet::getkmultAdj() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.single_flow_master.krate_adj_limit; } bool WorkSet::fitkmultAlways() { return false; } float* WorkSet::getClonalCallScale() { return _info->bkgObj->getGlobalDefaultsForBkgModel().fitter_defaults.clonal_call_scale; } float WorkSet::getClonalCallPenalty() { return _info->bkgObj->getGlobalDefaultsForBkgModel().fitter_defaults.clonal_call_penalty; } bound_params * WorkSet::getBeadParamsMax() { return &_info->bkgObj->region_data->my_beads.params_high; } bound_params * WorkSet::getBeadParamsMin() { return &_info->bkgObj->region_data->my_beads.params_low; } float WorkSet::getMaxEmphasis() { return _info->bkgObj->region_data->my_beads.max_emphasis; }; bool WorkSet::useDynamicEmphasis() { return true; } CpuStep* WorkSet::getPartialDerivSteps(int fit_index) { return _fd[fit_index]->GetPartialDerivSteps(); } unsigned int* WorkSet::getJTJMatrixMap(int fit_index) { return _fd[fit_index]->GetJTJMatrixMapForDotProductComputation(); } unsigned int* WorkSet::getBeadParamIdxMap(int fit_index) { return _fd[fit_index]->GetParamIdxMap(); } float * WorkSet::getFrameNumber() { return &_info->bkgObj->region_data->time_c.frameNumber[0]; } ////////////////////////////////////////////////////////////////////////////////////// ///SIZES: //// N int WorkSet::getBeadParamsSize(bool padded) { int size = sizeof(BeadParams); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getBeadStateSize(bool padded) { int size = sizeof(bead_state); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getFgBufferSize(bool padded) { return getFlxFxB(padded); } int WorkSet::getFgBufferSizeShort(bool padded) { int size = sizeof(FG_BUFFER_TYPE)*getNumFrames()*getFlowBlockSize(); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getReusedFgBufferPartialDerivsSize(bool padded) { // We reuse the same buffer for both _hdFgBuffer and _dPartialDerivsOutput + _dDelta. // Make sure that it's the size of the bigger of the two. size_t dPartialDerivsOutputSize = sizeof(float) * getMaxSteps() * getPaddedN() * getNumFrames(); size_t dDeltaSize = getParamRHSMaxSize(padded); size_t fgBufferSize = getFgBufferSizeShort(padded); return max( fgBufferSize, dPartialDerivsOutputSize + dDeltaSize ); } int WorkSet::getFlxFxB(bool padded) { int size = sizeof(float)*getFlowBlockSize()*getNumFrames(); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getFxB(bool padded) { int size = sizeof(float)*getNumFrames(); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getFlxB(bool padded) { int size = sizeof(float)*getFlowBlockSize(); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getFloatPerBead(bool padded) { int size = sizeof(float); return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } ///// non-N int WorkSet::getRegionParamsSize(bool padded) { int size = sizeof(reg_params); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getEmphVecSize(bool padded) { int size = sizeof(float)*(MAX_POISSON_TABLE_COL)*getNumFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getDarkMatterSize(bool padded) { int size = sizeof(float)*NUMNUC*getNumFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getShiftedBackgroundSize(bool padded) { int size = sizeof(float)*getFlowBlockSize()*getNumFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getFlowIdxMapSize(bool padded) { int size = sizeof(int)*getFlowBlockSize(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getDeltaFramesSize(bool padded) { int size = sizeof(float)*getNumFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getFineNucRiseSize(bool padded) { int size = sizeof(float) * ISIG_SUB_STEPS_SINGLE_FLOW * getNumFrames() * getFlowBlockSize(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getCoarseNucRiseSize(bool padded) { int size = sizeof(float) * ISIG_SUB_STEPS_MULTI_FLOW * getNumFrames() * getFlowBlockSize(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getStartNucSize(bool padded) { int size = sizeof(int) * getFlowBlockSize(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getBeadParamsMaxSize(bool padded) { int size = sizeof(bound_params); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getBeadParamsMinSize(bool padded) { int size = sizeof(bound_params); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getClonalCallScaleSize(bool padded) { int size = sizeof(float)*MAGIC_CLONAL_CALL_ARRAY_SIZE; return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getPartialDerivStepsSize(int fit_index, bool padded) { int size = sizeof(CpuStep)*_fd[fit_index]->GetNumSteps(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getJTJMatrixMapSize(int fit_index, bool padded) { int size = sizeof(unsigned int) * _fd[fit_index]->GetNumParamsToFit()*_fd[fit_index]->GetNumParamsToFit(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getBeadParamIdxMapSize(int fit_index, bool padded) { int size = sizeof(unsigned int) * _fd[fit_index]->GetNumParamsToFit(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getParamMatrixSize(int fit_index, bool padded) { int size = ((_fd[fit_index]->GetNumParamsToFit()*_fd[fit_index]->GetNumParamsToFit()+ 1)/2)*sizeof(float); return size * ((!padded)?(getNumBeads()):(getPaddedN())); } int WorkSet::getParamRHSSize(int fit_index, bool padded) { int size = _fd[fit_index]->GetNumParamsToFit() *sizeof(float); return size * ((!padded)?(getNumBeads()):(getPaddedN())); } int WorkSet::getPartialDerivStepsMaxSize(bool padded) { int size = sizeof(CpuStep)*getMaxSteps(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getJTJMatrixMapMaxSize(bool padded) { int size = sizeof(unsigned int) * getMaxParams()*getMaxParams(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getBeadParamIdxMapMaxSize(bool padded) { int size = sizeof(unsigned int) * getMaxParams(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::getParamMatrixMaxSize(bool padded) { int size = ((getMaxParams()*(getMaxParams() + 1))/2)*sizeof(float); return size * ((!padded)?(getNumBeads()):(getPaddedN())); } int WorkSet::getParamRHSMaxSize(bool padded) { int size = getMaxParams() *sizeof(float); return size * ((!padded)?(getNumBeads()):(getPaddedN())); } int WorkSet::getFrameNumberSize(bool padded) { int size = sizeof(float)*getNumFrames(); return (!padded)?(size):(padTo128Bytes(size)); } ////////////////////////////////////////////////////////////////////////////////////// /// int WorkSet::getPaddedN() const{ return ((getNumBeads()+32-1)/32)*32; } int WorkSet::getPaddedGenericXtalkSample() const{ return (((GENERIC_SIMPLE_XTALK_SAMPLE)+32-1)/32)*32; } int WorkSet::padTo128Bytes(int size){ return ((size+128-1)/128)*128; } bool WorkSet::DataAvailalbe() { if (!isSet()) return false; if (getNumBeads() <= 0) return false; return true; } bool WorkSet::ValidJob() { if (!DataAvailalbe()) return false; if (_info->bkgObj->region_data->fitters_applied == -1 ) return false; return true; } void WorkSet::KeyNormalize() { _info->bkgObj->region_data->my_beads.my_mean_copy_count = _info->bkgObj->region_data->my_beads.KeyNormalizeReads(true, false, getFlowBlockSize()); } void WorkSet::PerformePCA() { FlowBlockSequence::const_iterator flow_block = _info->inception_state->bkg_control.signal_chunks.flow_block_sequence. BlockAtFlow( _info->flow ); _info->bkgObj->CPU_DarkMatterPCA( getFlowBlockSize(), flow_block->begin() ); } void WorkSet::setJobToPostFitStep() { _info->type = POST_FIT_STEPS; _info->bkgObj->region_data->fitters_applied=TIME_TO_DO_PREWELL; } void WorkSet::setJobToRemainRegionFit() { _info->type = INITIAL_FLOW_BLOCK_REMAIN_REGIONAL_FIT; _info->bkgObj->region_data->fitters_applied=TIME_TO_DO_REMAIN_MULTI_FLOW_FIT_STEPS; } void WorkSet::putJobToCPU(WorkerInfoQueueItem item) { //_info->pq->GetCpuQueue()->PutItem(item); _info->QueueControl->GetQueue()->PutItem(item); } void WorkSet::putJobToGPU(WorkerInfoQueueItem item) { //_info->pq->GetGpuQueue()->PutItem(item); _info->QueueControl->GetGpuQueue()->PutItem(item); } void WorkSet::printJobSummary() { if( ValidJob() ) { cout << " | Job Summary:" << endl << " | max beads: " << GpuMultiFlowFitControl::GetMaxBeads() << " max frames: " << GpuMultiFlowFitControl::GetMaxFrames() << endl << " | live beads: " << getNumBeads() <<" padded: "<< getPaddedN() << endl << " | num frames: " << getNumFrames() << endl << " | flow num: " << getAbsoluteFlowNum() << endl ; } else{ cout << "| No Valid Job Set" << endl; } } int WorkSet::getXtalkNeiIdxMapSize(bool padded) { int size = sizeof(int) * MAX_XTALK_NEIGHBOURS; return size*( (!padded)?(getNumBeads()):(getPaddedN()) ); } int WorkSet::getXtalkSampleNeiIdxMapSize(bool padded) { int size = sizeof(int) * MAX_XTALK_NEIGHBOURS; return size*((!padded) ? (GENERIC_SIMPLE_XTALK_SAMPLE) : getPaddedGenericXtalkSample()); } bool WorkSet::IsSimpleTraceLevelXtalk() const { return _info->bkgObj->getXtalkExecute().xtalk_spec_p->simple_model; } int WorkSet::getNumXtalkNeighbours() { return _info->bkgObj->getXtalkExecute().xtalk_spec_p->nei_affected; } const int* WorkSet::getNeiIdxMapForXtalk() { return _info->bkgObj->getXtalkExecute().GetNeighborIndexMap(); } const int* WorkSet::getSampleNeiIdxMapForXtalk() { return _info->bkgObj->getXtalkExecute().GetNeighbourIndexmapForSampleLocations(); } int* WorkSet::getXtalkNeiXCoords() { return &_info->bkgObj->getXtalkExecute().xtalk_spec_p->cx[0]; } int* WorkSet::getXtalkNeiYCoords() { return &_info->bkgObj->getXtalkExecute().xtalk_spec_p->cy[0]; } float* WorkSet::getXtalkNeiMultiplier() { return &_info->bkgObj->getXtalkExecute().xtalk_spec_p->multiplier[0]; } float* WorkSet::getXtalkNeiTauTop() { return &_info->bkgObj->getXtalkExecute().xtalk_spec_p->tau_top[0]; } float* WorkSet::getXtalkNeiTauFluid() { return &_info->bkgObj->getXtalkExecute().xtalk_spec_p->tau_fluid[0]; } void WorkSet::calculateCPUXtalkForBead(int ibd, float* buf) { FlowBlockSequence::const_iterator flow_block = _info->inception_state->bkg_control.signal_chunks.flow_block_sequence. BlockAtFlow( _info->flow ); _info->bkgObj->getXtalkExecute().ExecuteXtalkFlux(ibd, buf, flow_block->size(), flow_block->begin() ); } bool WorkSet::performCrossTalkCorrection() const { if(isSet()) { return _info->bkgObj->getTraceXTalkSpecs().do_xtalk_correction; } return false; } bool WorkSet::performWellsLevelXTalkCorrection() const { if(isSet()) { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.enable_well_xtalk_correction; } return false; } bool WorkSet::performPolyClonalFilter() const{ if(isSet()) { return _info->inception_state->bkg_control.polyclonal_filter.enable; } return false; } bool WorkSet::performPostFitHandshake() const { return _info->inception_state->bkg_control.gpuControl.postFitHandshakeWorker; } bool WorkSet::performExpTailFitting() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.exp_tail_fit; } bool WorkSet::performCalcPCADarkMatter() { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.pca_dark_matter; } bool WorkSet::useDarkMatterPCA() { return ( performCalcPCADarkMatter() && _info->bkgObj->region_data->my_regions.missing_mass.mytype == PCAVector)?(true):(false) ; } bool WorkSet::InitializeAmplitude() { if (_info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.regional_sampling) return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.amp_guess_on_gpu; else return false; } int WorkSet::GetNumUnCompressedFrames() { if(isSet()) return _info->bkgObj->region_data->time_c.GetUncompressedFrames(); return MAX_UNCOMPRESSED_FRAMES_GPU; } int WorkSet::GetNumStdCompressedFrames() { if(isSet()) return _info->bkgObj->region_data->time_c.GetStdFrames(); return getMaxFrames(); } int WorkSet::GetNumETFCompressedFrames() { if(isSet()) return _info->bkgObj->region_data->time_c.GetETFFrames(); return getMaxFrames(); } int* WorkSet::GetStdFramesPerPoint() { return &((_info->bkgObj->region_data->time_c.GetStdFramesPerPoint())[0]); } int* WorkSet::GetETFFramesPerPoint() { return &((_info->bkgObj->region_data->time_c.GetETFFramesPerPoint())[0]); } int* WorkSet::GetETFInterpolationFrames() { return &((_info->bkgObj->region_data->time_c.GetETFInterpolationFrame())[0]); } float* WorkSet::GetETFInterpolationMul() { return &((_info->bkgObj->region_data->time_c.GetETFInterpolationMul())[0]); } int WorkSet::GetETFStartFrame() { return _info->bkgObj->region_data->time_c.GetETFStartFrame(); } int WorkSet::GetStdFramesPerPointSize(bool padded) { int size = sizeof(int)*GetNumStdCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::GetETFFramesPerPointSize(bool padded) { int size = sizeof(int)*GetNumETFCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::GetETFInterpolationFrameSize(bool padded) { int size = sizeof(int)*GetNumUnCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::GetETFInterpolationMulSize(bool padded) { int size = sizeof(float)*GetNumUnCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } float * WorkSet::GetStdTimeCompEmphasis() { return &_info->bkgObj->region_data->std_time_comp_emphasis.emphasis_vector_storage[0]; } int WorkSet::GetStdTimeCompEmphasisSize(bool padded) { int size = sizeof(float)*(MAX_POISSON_TABLE_COL)*GetNumStdCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } float * WorkSet::GetStdTimeCompNucRise() { nucRise.Alloc(GetNumStdCompressedFrames(), getFlowBlockSize()); nucRise.CalculateNucRiseFineStep ( &_info->bkgObj->region_data->my_regions.rp, GetNumStdCompressedFrames(), _info->bkgObj->region_data->time_c.GetStdFrameNumber(), *_info->bkgObj->region_data_extras.my_flow); // the same for the whole region because time-shift happens per well return nucRise.nuc_rise_fine_step; } int WorkSet::GetStdTimeCompNucRiseSize(bool padded) { int size = sizeof(float) * ISIG_SUB_STEPS_SINGLE_FLOW * GetNumStdCompressedFrames() * getFlowBlockSize(); return (!padded)?(size):(padTo128Bytes(size)); } float* WorkSet::GetStdTimeCompDeltaFrame() { return &((_info->bkgObj->region_data->time_c.GetStdDeltaFrame())[0]); } float* WorkSet::GetStdTimeCompFrameNumber() { return &((_info->bkgObj->region_data->time_c.GetStdFrameNumber())[0]); } int WorkSet::GetStdTimeCompDeltaFrameSize(bool padded) { int size = sizeof(float)*GetNumStdCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } int WorkSet::GetStdTimeCompFrameNumberSize(bool padded) { int size = sizeof(float)*GetNumStdCompressedFrames(); return (!padded)?(size):(padTo128Bytes(size)); } void WorkSet::setUpFineEmphasisVectorsForStdCompression() { _info->bkgObj->region_data->GenerateFineEmphasisForStdTimeCompression(); } int* WorkSet::GetNonZeroEmphasisFrames() { return &_info->bkgObj->region_data->emphasis_data.nonZeroEmphasisFrames[0]; } int* WorkSet::GetNonZeroEmphasisFramesForStdCompression() { return &_info->bkgObj->region_data->std_time_comp_emphasis.nonZeroEmphasisFrames[0]; } int WorkSet::GetNonZeroEmphasisFramesVecSize(bool padded) { int size = sizeof(int) * MAX_POISSON_TABLE_COL; return (!padded) ? (size) : (padTo128Bytes(size)); } int WorkSet::getRegCol(){return _info->bkgObj->region_data->region->col;} int WorkSet::getRegRow(){return _info->bkgObj->region_data->region->row;} void ** WorkSet::getSampleCollectionPtr(){return _info->SampleCollection;} float WorkSet::getCTimeStart(){return _info->bkgObj->region_data->time_c.time_start; } float WorkSet::getTMidNuc() {return _info->bkgObj->region_data->my_regions.rp.nuc_shape.AccessTMidNuc()[0];} float WorkSet::getSigma() { return _info->bkgObj->region_data->my_regions.rp.nuc_shape.sigma;} float WorkSet::getTShift(){ return _info->bkgObj->region_data->my_regions.rp.tshift; } int WorkSet::getNucIdForFlow(int flow) { return _info->bkgObj->getGlobalDefaultsForBkgModel().flow_global.GetNucNdx(flow); } const EmphasisClass& WorkSet::getEmphasisData() { return _info->bkgObj->region_data->emphasis_data; } bool WorkSet::performTauAdjInExpTailFit() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.exp_tail_tau_adj; } bool WorkSet::performBkgAdjInExpTailFit() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.exp_tail_bkg_adj; } float WorkSet::expTailFitBkgAdjLimit() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.exp_tail_bkg_limit; } float WorkSet::expTailFitBkgDcLowerLimit() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.exp_tail_bkg_lower; } bool WorkSet::useSlowKmultInit() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.always_start_slow; } int WorkSet::getPostKeyFitAllWellsTrainingLevel() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.post_key_train; } int WorkSet::getPostKeyFitAllWellsTrainingStep() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.post_key_step; } void WorkSet::performPreFitStepsForMultiFitStream() { if (_info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.revert_regional_sampling) return; KeyNormalize(); if (_info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.skipFirstFlowBlockRegFitting) return; _info->bkgObj->region_data->my_beads.SetCopyCountOnUnSampledBeads(getFlowBlockSize()); _info->bkgObj->region_data->my_beads.SetBufferingRatioOnUnSampledBeads(); } void WorkSet::prepareMultiFlowFitMatrixConfig() { _fd[0] = _multiFlowFitControl.GetMatrixConfig("FitWellAmplBuffering", getLevMarSparseMatrices()); if (_info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.fit_region_kmult) _fd[1] = _multiFlowFitControl.GetMatrixConfig("FitWellAll", getLevMarSparseMatrices()); else _fd[1] = _multiFlowFitControl.GetMatrixConfig("FitWellPostKey", getLevMarSparseMatrices()); } const master_fit_type_table* WorkSet::getLevMarSparseMatrices() const { return _info->table; } bool WorkSet::fitTmidNucShift() const { return _info->bkgObj->getGlobalDefaultsForBkgModel().signal_process_control.per_flow_t_mid_nuc_tracking; }
the_stack
#undef isnan #undef isfinite #include <iostream> #include <TooN/TooN.h> #include <TooN/se3.h> #include <TooN/GR_SVD.h> #define INVALID -2 // this is used to mark invalid entries in normal or vertex maps using namespace std; __global__ void initVolume( Volume volume, const float2 val ){ uint3 pos = make_uint3(thr2pos2()); for(pos.z = 0; pos.z < volume.size.z; ++pos.z) volume.set(pos, val); } __global__ void raycast( Image<float3> pos3D, Image<float3> normal, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){ const uint2 pos = thr2pos2(); const float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep ); if(hit.w > 0){ pos3D[pos] = make_float3(hit); float3 surfNorm = volume.grad(make_float3(hit)); if(length(surfNorm) == 0){ normal[pos].x = INVALID; } else { normal[pos] = normalize(surfNorm); } } else { pos3D[pos] = make_float3(0); normal[pos] = make_float3(INVALID, 0, 0); } } __forceinline__ __device__ float sq( const float x ){ return x*x; } __global__ void integrate( Volume vol, const Image<float> depth, const Matrix4 invTrack, const Matrix4 K, const float mu, const float maxweight){ uint3 pix = make_uint3(thr2pos2()); float3 pos = invTrack * vol.pos(pix); float3 cameraX = K * pos; const float3 delta = rotate(invTrack, make_float3(0,0, vol.dim.z / vol.size.z)); const float3 cameraDelta = rotate(K, delta); for(pix.z = 0; pix.z < vol.size.z; ++pix.z, pos += delta, cameraX += cameraDelta){ if(pos.z < 0.0001f) // some near plane constraint continue; const float2 pixel = make_float2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f); if(pixel.x < 0 || pixel.x > depth.size.x-1 || pixel.y < 0 || pixel.y > depth.size.y-1) continue; const uint2 px = make_uint2(pixel.x, pixel.y); if(depth[px] == 0) continue; const float diff = (depth[px] - cameraX.z) * sqrt(1+sq(pos.x/pos.z) + sq(pos.y/pos.z)); if(diff > -mu){ const float sdf = fminf(1.f, diff/mu); float2 data = vol[pix]; data.x = clamp((data.y*data.x + sdf)/(data.y + 1), -1.f, 1.f); data.y = fminf(data.y+1, maxweight); vol.set(pix, data); } } } __global__ void depth2vertex( Image<float3> vertex, const Image<float> depth, const Matrix4 invK ){ const uint2 pixel = thr2pos2(); if(pixel.x >= depth.size.x || pixel.y >= depth.size.y ) return; if(depth[pixel] > 0){ vertex[pixel] = depth[pixel] * (rotate(invK, make_float3(pixel.x, pixel.y, 1.f))); } else { vertex[pixel] = make_float3(0); } } __global__ void vertex2normal( Image<float3> normal, const Image<float3> vertex ){ const uint2 pixel = thr2pos2(); if(pixel.x >= vertex.size.x || pixel.y >= vertex.size.y ) return; const float3 left = vertex[make_uint2(max(int(pixel.x)-1,0), pixel.y)]; const float3 right = vertex[make_uint2(min(pixel.x+1,vertex.size.x-1), pixel.y)]; const float3 up = vertex[make_uint2(pixel.x, max(int(pixel.y)-1,0))]; const float3 down = vertex[make_uint2(pixel.x, min(pixel.y+1,vertex.size.y-1))]; if(left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) { normal[pixel].x = INVALID; return; } const float3 dxv = right - left; const float3 dyv = down - up; normal[pixel] = normalize(cross(dyv, dxv)); // switched dx and dy to get factor -1 } template <int HALFSAMPLE> __global__ void mm2meters( Image<float> depth, const Image<ushort> in ){ const uint2 pixel = thr2pos2(); depth[pixel] = in[pixel * (HALFSAMPLE+1)] / 1000.0f; } //column pass using coalesced global memory reads __global__ void bilateral_filter(Image<float> out, const Image<float> in, const Image<float> gaussian, const float e_d, const int r) { const uint2 pos = thr2pos2(); if(in[pos] == 0){ out[pos] = 0; return; } float sum = 0.0f; float t = 0.0f; const float center = in[pos]; for(int i = -r; i <= r; ++i) { for(int j = -r; j <= r; ++j) { const float curPix = in[make_uint2(clamp(pos.x + i, 0u, in.size.x-1), clamp(pos.y + j, 0u, in.size.y-1))]; if(curPix > 0){ const float mod = sq(curPix - center); const float factor = gaussian[make_uint2(i + r, 0)] * gaussian[make_uint2(j + r, 0)] * __expf(-mod / (2 * e_d * e_d)); t += factor * curPix; sum += factor; } } } out[pos] = t / sum; } // filter and halfsample __global__ void halfSampleRobust( Image<float> out, const Image<float> in, const float e_d, const int r){ const uint2 pixel = thr2pos2(); const uint2 centerPixel = 2 * pixel; if(pixel.x >= out.size.x || pixel.y >= out.size.y ) return; float sum = 0.0f; float t = 0.0f; const float center = in[centerPixel]; for(int i = -r + 1; i <= r; ++i){ for(int j = -r + 1; j <= r; ++j){ float current = in[make_uint2(clamp(make_int2(centerPixel.x + j, centerPixel.y + i), make_int2(0), make_int2(in.size.x - 1, in.size.y - 1)))]; // TODO simplify this! if(fabsf(current - center) < e_d){ sum += 1.0f; t += current; } } } out[pixel] = t / sum; } __global__ void generate_gaussian(Image<float> out, float delta, int radius) { int x = threadIdx.x - radius; out[make_uint2(threadIdx.x,0)] = __expf(-(x * x) / (2 * delta * delta)); } __global__ void track( Image<TrackData> output, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ) { const uint2 pixel = thr2pos2(); if(pixel.x >= inVertex.size.x || pixel.y >= inVertex.size.y ) return; TrackData & row = output[pixel]; if(inNormal[pixel].x == INVALID ){ row.result = -1; return; } const float3 projectedVertex = Ttrack * inVertex[pixel]; const float3 projectedPos = view * projectedVertex; const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f); if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){ row.result = -2; return; } const uint2 refPixel = make_uint2(projPixel.x, projPixel.y); const float3 referenceNormal = refNormal[refPixel]; if(referenceNormal.x == INVALID){ row.result = -3; return; } const float3 diff = refVertex[refPixel] - projectedVertex; const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]); if(length(diff) > dist_threshold ){ row.result = -4; return; } if(dot(projectedNormal, referenceNormal) < normal_threshold){ row.result = -5; return; } row.result = 1; row.error = dot(referenceNormal, diff); ((float3 *)row.J)[0] = referenceNormal; ((float3 *)row.J)[1] = cross(projectedVertex, referenceNormal); } __global__ void reduce( float * out, const Image<TrackData> J, const uint2 size){ __shared__ float S[112][32]; // this is for the final accumulation const uint sline = threadIdx.x; float sums[32]; float * jtj = sums + 7; float * info = sums + 28; for(uint i = 0; i < 32; ++i) sums[i] = 0; for(uint y = blockIdx.x; y < size.y; y += gridDim.x){ for(uint x = sline; x < size.x; x += blockDim.x ){ const TrackData & row = J[make_uint2(x, y)]; if(row.result < 1){ info[1] += row.result == -4 ? 1 : 0; info[2] += row.result == -5 ? 1 : 0; info[3] += row.result > -4 ? 1 : 0; continue; } // Error part sums[0] += row.error * row.error; // JTe part for(int i = 0; i < 6; ++i) sums[i+1] += row.error * row.J[i]; // JTJ part, unfortunatly the double loop is not unrolled well... jtj[0] += row.J[0] * row.J[0]; jtj[1] += row.J[0] * row.J[1]; jtj[2] += row.J[0] * row.J[2]; jtj[3] += row.J[0] * row.J[3]; jtj[4] += row.J[0] * row.J[4]; jtj[5] += row.J[0] * row.J[5]; jtj[6] += row.J[1] * row.J[1]; jtj[7] += row.J[1] * row.J[2]; jtj[8] += row.J[1] * row.J[3]; jtj[9] += row.J[1] * row.J[4]; jtj[10] += row.J[1] * row.J[5]; jtj[11] += row.J[2] * row.J[2]; jtj[12] += row.J[2] * row.J[3]; jtj[13] += row.J[2] * row.J[4]; jtj[14] += row.J[2] * row.J[5]; jtj[15] += row.J[3] * row.J[3]; jtj[16] += row.J[3] * row.J[4]; jtj[17] += row.J[3] * row.J[5]; jtj[18] += row.J[4] * row.J[4]; jtj[19] += row.J[4] * row.J[5]; jtj[20] += row.J[5] * row.J[5]; // extra info here info[0] += 1; } } for(int i = 0; i < 32; ++i) // copy over to shared memory S[sline][i] = sums[i]; __syncthreads(); // wait for everyone to finish if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads for(unsigned i = 1; i < blockDim.x; ++i) S[0][sline] += S[i][sline]; out[sline+blockIdx.x*32] = S[0][sline]; } } __global__ void trackAndReduce( float * out, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ){ __shared__ float S[112][32]; // this is for the final accumulation const uint sline = threadIdx.x; float sums[32]; float * jtj = sums + 7; float * info = sums + 28; for(uint i = 0; i < 32; ++i) sums[i] = 0; float J[6]; for(uint y = blockIdx.x; y < inVertex.size.y; y += gridDim.x){ for(uint x = sline; x < inVertex.size.x; x += blockDim.x ){ const uint2 pixel = make_uint2(x,y); if(inNormal[pixel].x == INVALID){ continue; } const float3 projectedVertex = Ttrack * inVertex[pixel]; const float3 projectedPos = view * projectedVertex; const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f); if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){ info[3] += 1; continue; } const uint2 refPixel = make_uint2(projPixel.x, projPixel.y); if(refNormal[refPixel].x == INVALID){ info[3] += 1; continue; } const float3 referenceNormal = refNormal[refPixel]; const float3 diff = refVertex[refPixel] - projectedVertex; const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]); if(length(diff) > dist_threshold ){ info[1] += 1; continue; } if(dot(projectedNormal, referenceNormal) < normal_threshold){ info[2] += 1; continue; } const float error = dot(referenceNormal, diff); ((float3 *)J)[0] = referenceNormal; ((float3 *)J)[1] = cross(projectedVertex, referenceNormal); // Error part sums[0] += error * error; // JTe part for(int i = 0; i < 6; ++i) sums[i+1] += error * J[i]; // JTJ part jtj[0] += J[0] * J[0]; jtj[1] += J[0] * J[1]; jtj[2] += J[0] * J[2]; jtj[3] += J[0] * J[3]; jtj[4] += J[0] * J[4]; jtj[5] += J[0] * J[5]; jtj[6] += J[1] * J[1]; jtj[7] += J[1] * J[2]; jtj[8] += J[1] * J[3]; jtj[9] += J[1] * J[4]; jtj[10] += J[1] * J[5]; jtj[11] += J[2] * J[2]; jtj[12] += J[2] * J[3]; jtj[13] += J[2] * J[4]; jtj[14] += J[2] * J[5]; jtj[15] += J[3] * J[3]; jtj[16] += J[3] * J[4]; jtj[17] += J[3] * J[5]; jtj[18] += J[4] * J[4]; jtj[19] += J[4] * J[5]; jtj[20] += J[5] * J[5]; // extra info here info[0] += 1; } } for(int i = 0; i < 32; ++i) // copy over to shared memory S[sline][i] = sums[i]; __syncthreads(); // wait for everyone to finish if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads for(unsigned i = 1; i < blockDim.x; ++i) S[0][sline] += S[i][sline]; out[sline+blockIdx.x*32] = S[0][sline]; } } void KFusion::Init( const KFusionConfig & config ) { configuration = config; cudaSetDeviceFlags(cudaDeviceMapHost); integration.init(config.volumeSize, config.volumeDimensions); reduction.alloc(config.inputSize); vertex.alloc(config.inputSize); normal.alloc(config.inputSize); rawDepth.alloc(config.inputSize); inputDepth.resize(config.iterations.size()); inputVertex.resize(config.iterations.size()); inputNormal.resize(config.iterations.size()); for(int i = 0; i < config.iterations.size(); ++i){ inputDepth[i].alloc(config.inputSize >> i); inputVertex[i].alloc(config.inputSize >> i); inputNormal[i].alloc(config.inputSize >> i); } gaussian.alloc(make_uint2(config.radius * 2 + 1, 1)); output.alloc(make_uint2(32,8)); //generate gaussian array generate_gaussian<<< 1, gaussian.size.x>>>(gaussian, config.delta, config.radius); Reset(); } void KFusion::Reset(){ dim3 block(32,16); dim3 grid = divup(dim3(integration.size.x, integration.size.y), block); initVolume<<<grid, block>>>(integration, make_float2(1.0f, 0.0f)); } void KFusion::Clear(){ integration.release(); } void KFusion::setPose( const Matrix4 & p ){ pose = p; } void KFusion::setKinectDeviceDepth( const Image<uint16_t> & in){ if(configuration.inputSize.x == in.size.x) mm2meters<0><<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>(rawDepth, in); else if(configuration.inputSize.x == in.size.x / 2 ) mm2meters<1><<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>(rawDepth, in); else assert(false); } Matrix4 operator*( const Matrix4 & A, const Matrix4 & B){ Matrix4 R; TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::wrapMatrix<4,4>(&A.data[0].x) * TooN::wrapMatrix<4,4>(&B.data[0].x); return R; } template<typename P> inline Matrix4 toMatrix4( const TooN::SE3<P> & p){ const TooN::Matrix<4, 4, float> I = TooN::Identity; Matrix4 R; TooN::wrapMatrix<4,4>(&R.data[0].x) = p * I; return R; } Matrix4 inverse( const Matrix4 & A ){ static TooN::Matrix<4, 4, float> I = TooN::Identity; TooN::Matrix<4,4,float> temp = TooN::wrapMatrix<4,4>(&A.data[0].x); Matrix4 R; TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::gaussian_elimination(temp , I ); return R; } std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){ for(unsigned i = 0; i < 4; ++i) out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n"; return out; } template <typename P, typename A> TooN::Matrix<6> makeJTJ( const TooN::Vector<21, P, A> & v ){ TooN::Matrix<6> C = TooN::Zeros; C[0] = v.template slice<0,6>(); C[1].template slice<1,5>() = v.template slice<6,5>(); C[2].template slice<2,4>() = v.template slice<11,4>(); C[3].template slice<3,3>() = v.template slice<15,3>(); C[4].template slice<4,2>() = v.template slice<18,2>(); C[5][5] = v[20]; for(int r = 1; r < 6; ++r) for(int c = 0; c < r; ++c) C[r][c] = C[c][r]; return C; } template <typename T, typename A> TooN::Vector<6> solve( const TooN::Vector<27, T, A> & vals ){ const TooN::Vector<6> b = vals.template slice<0,6>(); const TooN::Matrix<6> C = makeJTJ(vals.template slice<6,21>()); TooN::GR_SVD<6,6> svd(C); return svd.backsub(b, 1e6); } void KFusion::Raycast(){ // raycast integration volume into the depth, vertex, normal buffers raycastPose = pose; raycast<<<divup(configuration.inputSize, configuration.raycastBlock), configuration.raycastBlock>>>(vertex, normal, integration, raycastPose * getInverseCameraMatrix(configuration.camera), configuration.nearPlane, configuration.farPlane, configuration.stepSize(), 0.75f * configuration.mu); } bool KFusion::Track() { const Matrix4 invK = getInverseCameraMatrix(configuration.camera); vector<dim3> grids; for(int i = 0; i < configuration.iterations.size(); ++i) grids.push_back(divup(configuration.inputSize >> i, configuration.imageBlock)); // filter the input depth map bilateral_filter<<<grids[0], configuration.imageBlock>>>(inputDepth[0], rawDepth, gaussian, configuration.e_delta, configuration.radius); // half sample the input depth maps into the pyramid levels for(int i = 1; i < configuration.iterations.size(); ++i) halfSampleRobust<<<grids[i], configuration.imageBlock>>>(inputDepth[i], inputDepth[i-1], configuration.e_delta * 3, 1); // prepare the 3D information from the input depth maps for(int i = 0; i < configuration.iterations.size(); ++i){ depth2vertex<<<grids[i], configuration.imageBlock>>>( inputVertex[i], inputDepth[i], getInverseCameraMatrix(configuration.camera / float(1 << i))); // inverse camera matrix depends on level vertex2normal<<<grids[i], configuration.imageBlock>>>( inputNormal[i], inputVertex[i] ); } const Matrix4 oldPose = pose; const Matrix4 projectReference = getCameraMatrix(configuration.camera) * inverse(raycastPose); TooN::Matrix<8, 32, float, TooN::Reference::RowMajor> values(output.data()); for(int level = configuration.iterations.size()-1; level >= 0; --level){ for(int i = 0; i < configuration.iterations[level]; ++i){ if(configuration.combinedTrackAndReduce){ trackAndReduce<<<8, 112>>>( output.getDeviceImage().data(), inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold ); } else { track<<<grids[level], configuration.imageBlock>>>( reduction, inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold); reduce<<<8, 112>>>( output.getDeviceImage().data(), reduction, inputVertex[level].size ); // compute the linear system to solve } cudaDeviceSynchronize(); // important due to async nature of kernel call for(int j = 1; j < 8; ++j) values[0] += values[j]; TooN::Vector<6> x = solve(values[0].slice<1,27>()); TooN::SE3<> delta(x); pose = toMatrix4( delta ) * pose; if(norm(x) < 1e-5) break; } } // test on both RSME per pixel and percent of pixels tracked if((sqrt(values(0,0) / values(0,28)) > 2e-2) || (values(0,28) / (rawDepth.size.x * rawDepth.size.y) < configuration.track_threshold) ){ pose = oldPose; return false; } return true; } void KFusion::Integrate() { integrate<<<divup(dim3(integration.size.x, integration.size.y), configuration.imageBlock), configuration.imageBlock>>>( integration, rawDepth, inverse(pose), getCameraMatrix(configuration.camera), configuration.mu, configuration.maxweight ); } int printCUDAError() { cudaError_t error = cudaGetLastError(); if(error) std::cout << cudaGetErrorString(error) << std::endl; return error; }
the_stack
#include "core/pack/Pack.h" #include "core/pack/Load.h" #include "core/pack/Load_Chunks.h" #include "core/pack/Load_Indref.h" #include "core/pack/Call.h" #include "core/pack/GetInds.h" #include "core/pack/GetDims.h" #include "core/mapreduce/broadcast_batch_dimensions.h" #include "core/utils/CudaErrorCheck.cu" #include "core/utils/TypesUtils.h" #include "Chunk_Mode_Constants.h" namespace keops { template < int USE_CHUNK_MODE, int BLOCKSIZE_CHUNKS, class FUN_GLOBAL=void, class VARFINAL=void > struct GpuConv1DOnDevice_ranges {}; #if USE_FINAL_CHUNKS==1 template < class FUN_GLOBAL, class VARFINAL, int DIMFINALCHUNK_CURR, typename TYPE > __device__ void do_finalchunk_sub_ranges(TYPE *acc, int i, int j, int jstart, int start_y, int chunk, int end_x, int end_y, int nbatchdims, int *indices_j, TYPE **args, TYPE *fout, TYPE *yj, TYPE *out) { static const int DIMOUT = VARFINAL::DIM; VectAssign<DIMFINALCHUNK>(acc,0.0f); //typename FUN::template InitializeReduction<__TYPEACC__, TYPE >()(acc); // acc = 0 TYPE *yjrel = yj; if (j < end_y) // we load yj from device global memory only if j<end_y if (nbatchdims==0) load_chunks < pack<VARFINAL::N>, DIMFINALCHUNK, DIMFINALCHUNK_CURR, VARFINAL::DIM > (j, chunk, yj + threadIdx.x * DIMFINALCHUNK, args); else load_chunks < pack<VARFINAL::N>, typename FUN_GLOBAL::INDSJ, DIMFINALCHUNK, DIMFINALCHUNK_CURR, VARFINAL::DIM > (j-start_y, chunk, yj + threadIdx.x * DIMFINALCHUNK, args, indices_j); __syncthreads(); for (int jrel = 0; (jrel < blockDim.x) && (jrel < end_y - jstart); jrel++, yjrel += DIMFINALCHUNK) { if (i < end_x) { // we compute only if needed #pragma unroll for (int k=0; k<DIMFINALCHUNK_CURR; k++) acc[k] += yjrel[k] * fout[jrel]; } __syncthreads(); } if (i < end_x) { //typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, out + i * DIMOUT, i); #pragma unroll for (int k=0; k<DIMFINALCHUNK_CURR; k++) out[i*DIMOUT+chunk*DIMFINALCHUNK+k] += acc[k]; } __syncthreads(); } template < int BLOCKSIZE_CHUNKS, class VARFINAL, typename TYPE, class FUN_GLOBAL, class FUN > __global__ void GpuConv1DOnDevice_ranges_FinalChunks(FUN_GLOBAL fun_global, FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { // Buffers for the "broadcasted indices" ----------------------------------- typedef typename FUN_GLOBAL::VARSI VARSI_GLOBAL; typedef typename FUN_GLOBAL::VARSJ VARSJ_GLOBAL; typedef typename FUN_GLOBAL::VARSP VARSP_GLOBAL; const int SIZEI_GLOBAL = VARSI_GLOBAL::SIZE; const int SIZEJ_GLOBAL = VARSJ_GLOBAL::SIZE; const int SIZEP_GLOBAL = VARSP_GLOBAL::SIZE; const int SIZEVARS_GLOBAL = SIZEI_GLOBAL + SIZEJ_GLOBAL + SIZEP_GLOBAL; int offsets[SIZEVARS_GLOBAL]; int *indices_i = offsets, *indices_j = offsets + SIZEI_GLOBAL, *indices_p = offsets + SIZEI_GLOBAL + SIZEJ_GLOBAL; if (nbatchdims > 0) { for (int k = 0; k < SIZEVARS_GLOBAL; k++) { offsets[k] = offsets_d[ SIZEVARS_GLOBAL * blockIdx.x + k ]; } } // Retrieve our position along the laaaaarge [1,~nx] axis: ----------------- __INDEX__ range_id= (lookup_d)[3*blockIdx.x] ; __INDEX__ start_x = (lookup_d)[3*blockIdx.x+1] ; __INDEX__ end_x = (lookup_d)[3*blockIdx.x+2] ; // The "slices_x" vector encodes a set of cutting points in // the "ranges_y" array of ranges. // As discussed in the Genred docstring, the first "0" is implicit: __INDEX__ start_slice = range_id < 1 ? 0 : slices_x[range_id-1]; __INDEX__ end_slice = slices_x[range_id]; // get the index of the current thread int i = start_x + threadIdx.x; // declare shared mem extern __shared__ TYPE yj[]; const int NCHUNKS = 1 + (VARFINAL::DIM-1) / DIMFINALCHUNK; const int DIMLASTFINALCHUNK = VARFINAL::DIM - (NCHUNKS-1)*DIMFINALCHUNK; // get templated dimensions : typedef typename FUN::DIMSX DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables typedef typename FUN::DIMSY DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables typedef typename FUN::DIMSP DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMOUT = VARFINAL::DIM; // dimension of output variable const int DIMFOUT = FUN::F::DIM; // DIMFOUT is dimension of output variable of inner function static_assert(DIMFOUT==1,"DIMFOUT should be 1"); static_assert(SUM_SCHEME==BLOCK_SUM,"only BLOCK_SUM available"); // load parameter(s) TYPE param_loc[DIMP < 1 ? 1 : DIMP]; if (nbatchdims == 0) load<DIMSP,INDSP>(0, param_loc, args); // load parameters variables from global memory to local thread memory else load<DIMSP,INDSP>(0, param_loc, args, indices_p); // Possibly, with offsets as we support broadcasting over batch dimensions TYPE fout[DIMFOUT*BLOCKSIZE_CHUNKS]; // get the value of variable (index with i) TYPE xi[DIMX < 1 ? 1 : DIMX]; if (i < end_x) { if (nbatchdims == 0) load<DIMSX, INDSI>(i, xi, args); // load xi variables from global memory to local thread memory else load< DIMSX, INDSI>(threadIdx.x, xi, args, indices_i); // Possibly, with offsets as we support broadcasting over batch dimensions #pragma unroll for (int k=0; k<DIMOUT; k++) { out[i*DIMOUT+k] = 0.0f; } } __TYPEACC__ acc[DIMFINALCHUNK]; __INDEX__ start_y = ranges_y[2*start_slice], end_y = 0; for( __INDEX__ index = start_slice ; index < end_slice ; index++ ) { if( (index+1 >= end_slice) || (ranges_y[2*index+2] != ranges_y[2*index+1]) ) { end_y = ranges_y[2*index+1]; for (int jstart = start_y, tile = 0; jstart < end_y; jstart += blockDim.x, tile++) { // get the current column int j = jstart + threadIdx.x; if (j < end_y) { // we load yj from device global memory only if j<end_y if (nbatchdims == 0) load<DIMSY,INDSJ>(j, yj + threadIdx.x * DIMY, args); // load yj variables from global memory to shared memory else load<DIMSY,INDSJ>(j-start_y, yj+threadIdx.x*DIMY, args, indices_j); // Possibly, with offsets as we support broadcasting over batch dimensions } __syncthreads(); if (i < end_x) { // we compute x1i only if needed TYPE * yjrel = yj; // Loop on the columns of the current block. for (int jrel = 0; (jrel < BLOCKSIZE_CHUNKS) && (jrel < end_y - jstart); jrel++, yjrel += DIMY) call<DIMSX, DIMSY, DIMSP>(fun, fout+jrel*DIMFOUT, xi, yjrel, param_loc); // Call the function, which outputs results in fout } __syncthreads(); for (int chunk=0; chunk<NCHUNKS-1; chunk++) do_finalchunk_sub_ranges < FUN_GLOBAL, VARFINAL, DIMFINALCHUNK > (acc, i, j, jstart, start_y, chunk, end_x, end_y, nbatchdims, indices_j, args, fout, yj, out); do_finalchunk_sub_ranges < FUN_GLOBAL, VARFINAL, DIMLASTFINALCHUNK > (acc, i, j, jstart, start_y, NCHUNKS-1, end_x, end_y, nbatchdims, indices_j, args, fout, yj, out); } if(index+1 < end_slice) start_y = ranges_y[2*index+2] ; } } } template < int BLOCKSIZE_CHUNKS, class FUN_GLOBAL, class VARFINAL > struct GpuConv1DOnDevice_ranges<2,BLOCKSIZE_CHUNKS,FUN_GLOBAL,VARFINAL> { template < typename TYPE, class FUN > static void Eval(dim3 gridSize, dim3 blockSize, size_t SharedMem, FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { GpuConv1DOnDevice_ranges_FinalChunks < BLOCKSIZE_CHUNKS, VARFINAL > <<< gridSize, blockSize, SharedMem >>> (FUN_GLOBAL(), fun, nx, ny, nbatchdims, shapes, offsets_d, lookup_d, slices_x, ranges_y, out, args); } }; #endif template < class FUN, class FUN_CHUNKED_CURR, int DIMCHUNK_CURR, typename TYPE > __device__ void do_chunk_sub_ranges(TYPE *acc, int tile, int i, int j, int jstart, int start_y, int chunk, int end_x, int end_y, int nbatchdims, int *indices_i, int *indices_j, TYPE **args, TYPE *fout, TYPE *xi, TYPE *yj, TYPE *param_loc) { using CHK = Chunk_Mode_Constants<FUN>; TYPE fout_tmp_chunk[CHK::FUN_CHUNKED::DIM]; if (i < end_x) { if (nbatchdims == 0) { load_chunks < typename CHK::INDSI_CHUNKED, DIMCHUNK, DIMCHUNK_CURR, CHK::DIM_ORG > (i, chunk, xi + CHK::DIMX_NOTCHUNKED, args); } else { load_chunks < typename CHK::INDSI_CHUNKED, typename FUN::INDSI, DIMCHUNK, DIMCHUNK_CURR, CHK::DIM_ORG > (threadIdx.x, chunk, xi + CHK::DIMX_NOTCHUNKED, args, indices_i); } } if (j < end_y) { // we load yj from device global memory only if j<ny if (nbatchdims == 0) { load_chunks < typename CHK::INDSJ_CHUNKED, DIMCHUNK, DIMCHUNK_CURR, CHK::DIM_ORG > (j, chunk, yj + threadIdx.x * CHK::DIMY + CHK::DIMY_NOTCHUNKED, args); } else { load_chunks < typename CHK::INDSJ_CHUNKED, typename FUN::INDSJ, DIMCHUNK, DIMCHUNK_CURR, CHK::DIM_ORG > (j-start_y, chunk, yj + threadIdx.x * CHK::DIMY + CHK::DIMY_NOTCHUNKED, args, indices_j); } } __syncthreads(); if (i < end_x) { // we compute only if needed TYPE *yjrel = yj; // Loop on the columns of the current block. for (int jrel = 0; (jrel < blockDim.x) && (jrel < end_y - jstart); jrel++, yjrel += CHK::DIMY) { TYPE *foutj = fout+jrel*CHK::FUN_CHUNKED::DIM; call < CHK::DIMSX, CHK::DIMSY, CHK::DIMSP > (FUN_CHUNKED_CURR::template EvalFun<CHK::INDS>(), fout_tmp_chunk, xi, yjrel, param_loc); CHK::FUN_CHUNKED::acc_chunk(foutj, fout_tmp_chunk); } } __syncthreads(); } template < int BLOCKSIZE_CHUNKS, typename TYPE, class FUN > __global__ void GpuConv1DOnDevice_ranges_Chunks(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { using CHK = Chunk_Mode_Constants<FUN>; // Buffers for the "broadcasted indices" ----------------------------------- typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; const int SIZEVARS = SIZEI + SIZEJ + SIZEP; int offsets[SIZEVARS]; int *indices_i = offsets, *indices_j = offsets + SIZEI, *indices_p = offsets + SIZEI + SIZEJ; if (nbatchdims > 0) { for (int k = 0; k < SIZEVARS; k++) { offsets[k] = offsets_d[ SIZEVARS * blockIdx.x + k ]; } } // Retrieve our position along the laaaaarge [1,~nx] axis: ----------------- __INDEX__ range_id= (lookup_d)[3*blockIdx.x] ; __INDEX__ start_x = (lookup_d)[3*blockIdx.x+1] ; __INDEX__ end_x = (lookup_d)[3*blockIdx.x+2] ; // The "slices_x" vector encodes a set of cutting points in // the "ranges_y" array of ranges. // As discussed in the Genred docstring, the first "0" is implicit: __INDEX__ start_slice = range_id < 1 ? 0 : slices_x[range_id-1]; __INDEX__ end_slice = slices_x[range_id]; // get the index of the current thread int i = start_x + threadIdx.x; // declare shared mem extern __shared__ TYPE yj[]; TYPE param_loc[CHK::DIMP < 1 ? 1 : CHK::DIMP]; if (nbatchdims == 0) { load<CHK::DIMSP,CHK::INDSP>(0, param_loc, args); // load parameters variables from global memory to local thread memory } else { load<CHK::DIMSP,CHK::INDSP>(0, param_loc, args, indices_p); // Possibly, with offsets as we support broadcasting over batch dimensions } __TYPEACC__ acc[CHK::DIMRED]; #if SUM_SCHEME == BLOCK_SUM // additional tmp vector to store intermediate results from each block TYPE tmp[CHK::DIMRED]; #elif SUM_SCHEME == KAHAN_SCHEME // additional tmp vector to accumulate errors static const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if (i < end_x) { typename FUN::template InitializeReduction<__TYPEACC__, TYPE >()(acc); // acc = 0 #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif } TYPE xi[CHK::DIMX]; TYPE fout_chunk[BLOCKSIZE_CHUNKS*CHK::DIMOUT_CHUNK]; if (i < end_x) { if (nbatchdims == 0) { load < CHK::DIMSX_NOTCHUNKED, CHK::INDSI_NOTCHUNKED > (i, xi, args); // load xi variables from global memory to local thread memory } else { load_indref < CHK::DIMSX_NOTCHUNKED, CHK::INDSI_NOTCHUNKED, FUN::INDSI > (threadIdx.x, xi, args, indices_i); // load xi variables from global memory to local thread memory } } __INDEX__ start_y = ranges_y[2*start_slice], end_y = 0; for( __INDEX__ index = start_slice ; index < end_slice ; index++ ) { if( (index+1 >= end_slice) || (ranges_y[2*index+2] != ranges_y[2*index+1]) ) { //start_y = ranges_y[2*index] ; end_y = ranges_y[2*index+1]; for (int jstart = start_y, tile = 0; jstart < end_y; jstart += blockDim.x, tile++) { // get the current column int j = jstart + threadIdx.x; if (j < end_y) { if (nbatchdims == 0) { load < CHK::DIMSY_NOTCHUNKED, CHK::INDSJ_NOTCHUNKED > (j, yj + threadIdx.x * CHK::DIMY, args); } else { load_indref < CHK::DIMSY_NOTCHUNKED, CHK::INDSJ_NOTCHUNKED, FUN::INDSJ > (j-start_y, yj + threadIdx.x * CHK::DIMY, args, indices_j); } } __syncthreads(); if (i < end_x) { // we compute only if needed for (int jrel = 0; (jrel < blockDim.x) && (jrel < end_y - jstart); jrel++) CHK::FUN_CHUNKED::initacc_chunk(fout_chunk+jrel*CHK::DIMOUT_CHUNK); #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(tmp); // tmp = 0 #endif } // looping on chunks (except the last) #pragma unroll for (int chunk=0; chunk<CHK::NCHUNKS-1; chunk++) do_chunk_sub_ranges < FUN, CHK::FUN_CHUNKED, DIMCHUNK > (acc, tile, i, j, jstart, start_y, chunk, end_x, end_y, nbatchdims, indices_i, indices_j, args, fout_chunk, xi, yj, param_loc); // last chunk do_chunk_sub_ranges < FUN, CHK::FUN_LASTCHUNKED, CHK::DIMLASTCHUNK > (acc, tile, i, j, jstart, start_y, CHK::NCHUNKS-1, end_x,end_y, nbatchdims, indices_i, indices_j, args, fout_chunk, xi, yj, param_loc); if (i < end_x) { TYPE *yjrel = yj; // Loop on the columns of the current block. if (nbatchdims == 0) { for (int jrel = 0; (jrel < blockDim.x) && (jrel <end_y - jstart); jrel++, yjrel += CHK::DIMY) { #if SUM_SCHEME != KAHAN_SCHEME int ind = jrel + tile * blockDim.x + start_y; #endif TYPE *foutj = fout_chunk + jrel*CHK::DIMOUT_CHUNK; TYPE fout_tmp[CHK::DIMFOUT]; call<CHK::DIMSX, CHK::DIMSY, CHK::DIMSP, pack<CHK::DIMOUT_CHUNK> > (typename CHK::FUN_POSTCHUNK::template EvalFun<ConcatPacks<typename CHK::INDS,pack<FUN::NMINARGS>>>(), fout_tmp,xi, yjrel, param_loc, foutj); #if SUM_SCHEME == BLOCK_SUM #if USE_HALF typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout_tmp, __floats2half2_rn(2*ind,2*ind+1)); // tmp += fout_tmp #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout_tmp, ind); // tmp += fout_tmp #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout_tmp, tmp); #else #if USE_HALF typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout_tmp, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout_tmp #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout_tmp, ind); // acc += fout_tmp #endif #endif } } else { for (int jrel = 0; (jrel < blockDim.x) && (jrel <end_y - jstart); jrel++, yjrel += CHK::DIMY) { #if SUM_SCHEME != KAHAN_SCHEME int ind = jrel + tile * blockDim.x; #endif TYPE *foutj = fout_chunk + jrel*CHK::DIMOUT_CHUNK; TYPE fout_tmp[CHK::DIMFOUT]; call<CHK::DIMSX, CHK::DIMSY, CHK::DIMSP, pack<CHK::DIMOUT_CHUNK> > (typename CHK::FUN_POSTCHUNK::template EvalFun<ConcatPacks<typename CHK::INDS,pack<FUN::NMINARGS>>>(), fout_tmp,xi, yjrel, param_loc, foutj); #if SUM_SCHEME == BLOCK_SUM #if USE_HALF typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout_tmp, __floats2half2_rn(2*ind,2*ind+1)); // tmp += fout_tmp #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout_tmp, ind); // tmp += fout_tmp #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout_tmp, tmp); #else #if USE_HALF typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout_tmp, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout_tmp #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout_tmp, ind); // acc += fout_tmp #endif #endif } } #if SUM_SCHEME == BLOCK_SUM typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, tmp); // acc += tmp #endif } __syncthreads(); } if(index+1 < end_slice) { start_y = ranges_y[2*index+2] ; } } } if (i < end_x) typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, out + i * CHK::DIMOUT, i); } template < int BLOCKSIZE_CHUNKS > struct GpuConv1DOnDevice_ranges<1,BLOCKSIZE_CHUNKS> { template < typename TYPE, class FUN > static void Eval(dim3 gridSize, dim3 blockSize, size_t SharedMem, FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { GpuConv1DOnDevice_ranges_Chunks < BLOCKSIZE_CHUNKS > <<< gridSize, blockSize, SharedMem >>> (fun, nx, ny, nbatchdims, shapes, offsets_d, lookup_d, slices_x, ranges_y, out, args); } }; template < typename TYPE, class FUN > __global__ void GpuConv1DOnDevice_ranges_NoChunks(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { // Buffers for the "broadcasted indices" ----------------------------------- typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; const int SIZEVARS = SIZEI + SIZEJ + SIZEP; int offsets[SIZEVARS]; int *indices_i = offsets, *indices_j = offsets + SIZEI, *indices_p = offsets + SIZEI + SIZEJ; if (nbatchdims > 0) { for (int k = 0; k < SIZEVARS; k++) { offsets[k] = offsets_d[ SIZEVARS * blockIdx.x + k ]; } } // Retrieve our position along the laaaaarge [1,~nx] axis: ----------------- __INDEX__ range_id= (lookup_d)[3*blockIdx.x] ; __INDEX__ start_x = (lookup_d)[3*blockIdx.x+1] ; __INDEX__ end_x = (lookup_d)[3*blockIdx.x+2] ; // The "slices_x" vector encodes a set of cutting points in // the "ranges_y" array of ranges. // As discussed in the Genred docstring, the first "0" is implicit: __INDEX__ start_slice = range_id < 1 ? 0 : slices_x[range_id-1]; __INDEX__ end_slice = slices_x[range_id]; // get the index of the current thread int i = start_x + threadIdx.x; // declare shared mem extern __shared__ TYPE yj[]; // get templated dimensions : typedef typename FUN::DIMSX DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables typedef typename FUN::DIMSY DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables typedef typename FUN::DIMSP DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int DIMFOUT = FUN::F::DIM; // DIMFOUT is dimension of output variable of inner function // load parameter(s) TYPE param_loc[DIMP < 1 ? 1 : DIMP]; if (nbatchdims == 0) { load<DIMSP, INDSP>(0, param_loc, args); // load parameters variables from global memory to local thread memory } else { load<DIMSP,INDSP>(0, param_loc, args, indices_p); // Possibly, with offsets as we support broadcasting over batch dimensions } TYPE fout[DIMFOUT < 1 ? 1 : DIMFOUT]; // get the value of variable (index with i) TYPE xi[DIMX < 1 ? 1 : DIMX]; __TYPEACC__ acc[DIMRED]; #if SUM_SCHEME == BLOCK_SUM // additional tmp vector to store intermediate results from each block TYPE tmp[DIMRED]; #elif SUM_SCHEME == KAHAN_SCHEME // additional tmp vector to accumulate errors const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if(i<end_x) { typename FUN::template InitializeReduction<__TYPEACC__,TYPE>()(acc); // acc = 0 #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif if (nbatchdims == 0) { load< DIMSX, INDSI>(i, xi, args); // load xi variables from global memory to local thread memory } else { load< DIMSX, INDSI>(threadIdx.x, xi, args, indices_i); // Possibly, with offsets as we support broadcasting over batch dimensions } } __INDEX__ start_y = ranges_y[2*start_slice], end_y = 0; for( __INDEX__ index = start_slice ; index < end_slice ; index++ ) { if( (index+1 >= end_slice) || (ranges_y[2*index+2] != ranges_y[2*index+1]) ) { //start_y = ranges_y[2*index] ; end_y = ranges_y[2*index+1]; for(int jstart = start_y, tile = 0; jstart < end_y; jstart += blockDim.x, tile++) { // get the current column int j = jstart + threadIdx.x; if(j<end_y) { // we load yj from device global memory only if j<end_y if (nbatchdims == 0) { load<DIMSY,INDSJ>(j, yj+threadIdx.x*DIMY, args); // load yj variables from global memory to shared memory } else { load<DIMSY,INDSJ>(j-start_y, yj+threadIdx.x*DIMY, args, indices_j); // Possibly, with offsets as we support broadcasting over batch dimensions } } __syncthreads(); if(i<end_x) { // we compute x1i only if needed TYPE* yjrel = yj; // Loop on the columns of the current block. #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(tmp); // tmp = 0 #endif if (nbatchdims == 0) { for(int jrel = 0; (jrel < blockDim.x) && (jrel<end_y-jstart); jrel++, yjrel+=DIMY) { call<DIMSX,DIMSY,DIMSP>(fun,fout,xi,yjrel,param_loc); // Call the function, which outputs results in xi[0:DIMX1] #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = jrel+tile*blockDim.x + start_y; typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout, __floats2half2_rn(2*ind,2*ind+1)); // tmp += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout, jrel+tile*blockDim.x + start_y); // tmp += fout #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout, tmp); #else #if USE_HALF int ind = jrel+tile*blockDim.x + start_y; typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout, jrel+tile*blockDim.x + start_y); // acc += fout #endif #endif } } else { for(int jrel = 0; (jrel < blockDim.x) && (jrel<end_y-jstart); jrel++, yjrel+=DIMY) { call<DIMSX,DIMSY,DIMSP>(fun,fout,xi,yjrel,param_loc); // Call the function, which outputs results in fout #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = jrel+tile*blockDim.x; typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout, __floats2half2_rn(2*ind,2*ind+1)); // tmp += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(tmp, fout, jrel+tile*blockDim.x); // tmp += fout #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout, tmp); #else #if USE_HALF int ind = jrel+tile*blockDim.x; typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<__TYPEACC__,TYPE>()(acc, fout, jrel+tile*blockDim.x); // acc += fout #endif #endif } } #if SUM_SCHEME == BLOCK_SUM typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, tmp); // acc += tmp #endif } __syncthreads(); } if(index+1 < end_slice) { start_y = ranges_y[2*index+2] ; } } } if(i<end_x) { typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, out+i*DIMOUT, i); } } template < int DUMMY > struct GpuConv1DOnDevice_ranges<0,DUMMY> { template < typename TYPE, class FUN > static void Eval(dim3 gridSize, dim3 blockSize, size_t SharedMem, FUN fun, int nx, int ny, int nbatchdims, int *shapes, int *offsets_d, __INDEX__* lookup_d, __INDEX__* slices_x, __INDEX__* ranges_y, TYPE *out, TYPE **args) { GpuConv1DOnDevice_ranges_NoChunks <<< gridSize, blockSize, SharedMem >>> (fun, nx, ny, nbatchdims, shapes, offsets_d, lookup_d, slices_x, ranges_y, out, args); } }; template < class FUN > int* build_offset_tables( int nbatchdims, int *shapes, int nblocks, __INDEX__ *lookup_h ) { // Support for broadcasting over batch dimensions ============================================= typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; const int SIZEI = VARSI::SIZE; const int SIZEJ = VARSJ::SIZE; const int SIZEP = VARSP::SIZE; const int SIZEVARS = SIZEI + SIZEJ + SIZEP; // Separate and store the shapes of the "i" and "j" variables + parameters -------------- // // shapes is an array of size (1+nargs)*(nbatchdims+3), which looks like: // [ A, .., B, M, N, D_out] -> output // [ A, .., B, M, 1, D_1 ] -> "i" variable // [ A, .., B, 1, N, D_2 ] -> "j" variable // [ A, .., B, 1, 1, D_3 ] -> "parameter" // [ A, .., 1, M, 1, D_4 ] -> N.B.: we support broadcasting on the batch dimensions! // [ 1, .., 1, M, 1, D_5 ] -> (we'll just ask users to fill in the shapes with *explicit* ones) int shapes_i[SIZEI*(nbatchdims+1)], shapes_j[SIZEJ*(nbatchdims+1)], shapes_p[SIZEP*(nbatchdims+1)]; // First, we fill shapes_i with the "relevant" shapes of the "i" variables, // making it look like, say: // [ A, .., B, M] // [ A, .., 1, M] // [ A, .., A, M] // Then, we do the same for shapes_j, but with "N" instead of "M". // And finally for the parameters, with "1" instead of "M". fill_shapes<FUN>(nbatchdims, shapes, shapes_i, shapes_j, shapes_p); const int tagIJ = FUN::tagJ; // 1 if the reduction is made "over j", 0 if it is made "over i" int M = shapes[nbatchdims], N = shapes[nbatchdims+1]; // We create a lookup table, "offsets", of shape (nblocks, SIZEVARS) -------- int *offsets_h = NULL, *offsets_d = NULL; offsets_h = new int[nblocks * SIZEVARS] ; for (int k=0; k < nblocks; k++) { int range_id = (int) lookup_h[3*k] ; int start_x = tagIJ ? range_id * M : range_id * N; int start_y = tagIJ ? range_id * N : range_id * M; int patch_offset = (int) (lookup_h[3*k+1]-start_x); vect_broadcast_index(start_x, nbatchdims, SIZEI, shapes, shapes_i, offsets_h + k*SIZEVARS, patch_offset); vect_broadcast_index(start_y, nbatchdims, SIZEJ, shapes, shapes_j, offsets_h + k*SIZEVARS + SIZEI); vect_broadcast_index(range_id, nbatchdims, SIZEP, shapes, shapes_p, offsets_h + k*SIZEVARS + SIZEI + SIZEJ); } CudaSafeCall(cudaMalloc((int**)&offsets_d, sizeof(int)*nblocks*SIZEVARS)); CudaSafeCall(cudaMemcpy(offsets_d, offsets_h, sizeof(int)*nblocks*SIZEVARS, cudaMemcpyHostToDevice)); delete [] offsets_h; return offsets_d; } struct GpuConv1D_ranges_FromHost { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, int nredranges_x, int nredranges_y, __INDEX__ **ranges, TYPE *out, TYPE **args_h) { typedef typename FUN::DIMSX DIMSX; typedef typename FUN::DIMSY DIMSY; typedef typename FUN::DIMSP DIMSP; typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMOUT = FUN::DIM; // dimension of output variable const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; static const int NMINARGS = FUN::NMINARGS; // Compute the memory footprints of all (broadcasted?) variables =========== typedef typename FUN::VARSI VARSI; typedef typename FUN::VARSJ VARSJ; typedef typename FUN::VARSP VARSP; // Separate and store the shapes of the "i" and "j" variables + parameters -------------- // // shapes is an array of size (1+nargs)*(nbatchdims+3), which looks like: // [ A, .., B, M, N, D_out] -> output // [ A, .., B, M, 1, D_1 ] -> "i" variable // [ A, .., B, 1, N, D_2 ] -> "j" variable // [ A, .., B, 1, 1, D_3 ] -> "parameter" // [ A, .., 1, M, 1, D_4 ] -> N.B.: we support broadcasting on the batch dimensions! // [ 1, .., 1, M, 1, D_5 ] -> (we'll just ask users to fill in the shapes with *explicit* ones) int shapes_i[(SIZEI)*(nbatchdims+1)], shapes_j[SIZEJ*(nbatchdims+1)], shapes_p[SIZEP*(nbatchdims+1)]; // First, we fill shapes_i with the "relevant" shapes of the "i" variables, // making it look like, say: // [ A, .., B, M] // [ A, .., 1, M] // [ A, .., A, M] // Then, we do the same for shapes_j, but with "N" instead of "M". // And finally for the parameters, with "1" instead of "M". fill_shapes<FUN>(nbatchdims, shapes, shapes_i, shapes_j, shapes_p); int total_footprint_x = 0, total_footprint_y = 0, total_footprint_p = 0; int footprints_x[SIZEI], footprints_y[SIZEJ], footprints_p[SIZEP]; int tmp = 0; // Footprints of the "x" variables: ---------------------------------------- for (int k=0; k < SIZEI; k++) { // For the actual variables: tmp = DIMSX::VAL(k); // use the product of the vector dimension... for (int l=0; l < nbatchdims+1; l++) { tmp *= shapes_i[ k*(nbatchdims+1) + l]; // with all the shape's dimensions } footprints_x[k] = tmp; total_footprint_x += tmp; } // Footprints of the "y" variables: ---------------------------------------- for (int k=0; k < SIZEJ; k++) { // For the actual variables: tmp = DIMSY::VAL(k); // use the product of the vector dimension... for (int l=0; l < nbatchdims+1; l++) { tmp *= shapes_j[ k*(nbatchdims+1) + l]; // with all the shape's dimensions } footprints_y[k] = tmp; total_footprint_y += tmp; } // Footprints of the "parameters": ----------------------------------------- for (int k=0; k < SIZEP; k++) { // For the actual variables: tmp = DIMSP::VAL(k); // use the product of the vector dimension... for (int l=0; l < nbatchdims+1; l++) { tmp *= shapes_p[ k*(nbatchdims+1) + l]; // with all the shape's dimensions } footprints_p[k] = tmp; total_footprint_p += tmp; } // Load data on the device ================================================= // Setup pointers, allocate memory ----------------------------------------- // pointer to device output array TYPE *out_d; // array of pointers to device input arrays TYPE **args_d; // single cudaMalloc void *p_data; CudaSafeCall(cudaMalloc(&p_data, sizeof(TYPE*) * NMINARGS // pointers to the start of each variable + sizeof(TYPE) * ( nx*DIMOUT // output + total_footprint_p // parameters + total_footprint_x // "i" variables if tagIJ==1, "j" otherwise + total_footprint_y ))); // "j" variables if tagIJ==1, "i" otherwise // Now, fill in our big, contiguous array: --------------------------------- // In the head, the pointer to the data: args_d = (TYPE **) p_data; // In the tail, the actual data: TYPE *dataloc = (TYPE *) (args_d + NMINARGS); // Beware: Instead of storing TYPE*, we now store TYPE out_d = dataloc; dataloc += nx*DIMOUT; // host array of pointers to device data TYPE *ph[NMINARGS]; for (int k = 0; k < SIZEP; k++) { int indk = INDSP::VAL(k); int nvals = footprints_p[k]; CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEI; k++) { int indk = INDSI::VAL(k); int nvals = footprints_x[k]; CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEJ; k++) { int indk = INDSJ::VAL(k); int nvals = footprints_y[k]; CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } // Load on the device the pointer arrays: ---------------------------------- CudaSafeCall(cudaMemcpy(args_d, ph, NMINARGS * sizeof(TYPE *), cudaMemcpyHostToDevice)); // Setup the compute properties ============================================================== // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; #if USE_FINAL_CHUNKS==1 static const int USE_CHUNK_MODE = 2; using FUN_INTERNAL = Sum_Reduction<typename FUN::F::ARG1,FUN::tagI>; using VARFINAL = typename FUN::F::ARG2; #else static const int USE_CHUNK_MODE = ENABLECHUNK && ( FUN::F::template CHUNKED_FORMULAS<DIMCHUNK>::SIZE == 1 ); #endif static const int DIMY_SHARED = Get_DIMY_SHARED<FUN,USE_CHUNK_MODE>::Value; static const int BLOCKSIZE_CHUNKS = ::std::min(CUDA_BLOCK_SIZE, ::std::min(1024, (int) (49152 / ::std::max(1, (int) ( DIMY_SHARED * sizeof(TYPE)))))); int blocksize_nochunks = ::std::min(CUDA_BLOCK_SIZE, ::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int) ( DIMY_SHARED * sizeof(TYPE)))))); blockSize.x = USE_CHUNK_MODE ? BLOCKSIZE_CHUNKS : blocksize_nochunks; // Ranges pre-processing... ================================================================== // N.B.: In the following code, we assume that the x-ranges do not overlap. // Otherwise, we'd have to assume that DIMRED == DIMOUT // or allocate a buffer of size nx * DIMRED. This may be done in the future. // Cf. reduction.h: // FUN::tagJ = 1 for a reduction over j, result indexed by i // FUN::tagJ = 0 for a reduction over i, result indexed by j int nranges = FUN::tagJ ? nranges_x : nranges_y ; int nredranges = FUN::tagJ ? nredranges_y : nredranges_x ; __INDEX__ *ranges_x = FUN::tagJ ? ranges[0] : ranges[3] ; __INDEX__ *slices_x = FUN::tagJ ? ranges[1] : ranges[4] ; __INDEX__ *ranges_y = FUN::tagJ ? ranges[2] : ranges[5] ; // Computes the number of blocks needed --------------------------------------------- int nblocks = 0, len_range = 0; for(int i=0; i<nranges ; i++){ len_range = ranges_x[2*i+1] - ranges_x[2*i] ; nblocks += (len_range/blockSize.x) + (len_range%blockSize.x==0 ? 0 : 1) ; } // Create a lookup table for the blocks -------------------------------------------- __INDEX__ *lookup_h = NULL; lookup_h = new __INDEX__[3*nblocks] ; int index = 0; for(int i=0; i<nranges ; i++){ len_range = ranges_x[2*i+1] - ranges_x[2*i] ; for(int j=0; j<len_range ; j+=blockSize.x) { lookup_h[3*index] = i; lookup_h[3*index+1] = ranges_x[2*i] + j; lookup_h[3*index+2] = ranges_x[2*i] + j + ::std::min((int) blockSize.x, len_range-j ) ; index++; } } // Load the table on the device ----------------------------------------------------- __INDEX__ *lookup_d = NULL; CudaSafeCall(cudaMalloc((__INDEX__**)&lookup_d, sizeof(__INDEX__)*3*nblocks)); CudaSafeCall(cudaMemcpy(lookup_d, lookup_h, sizeof(__INDEX__)*3*nblocks, cudaMemcpyHostToDevice)); // Load copies of slices_x and ranges_y on the device: __INDEX__ *slices_x_d = NULL, *ranges_y_d = NULL; // Send data from host device: CudaSafeCall(cudaMalloc((__INDEX__**) &slices_x_d, sizeof(__INDEX__)*2*nranges)); CudaSafeCall(cudaMemcpy(slices_x_d, slices_x, sizeof(__INDEX__)*2*nranges, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMalloc((__INDEX__**) &ranges_y_d, sizeof(__INDEX__)*2*nredranges)); CudaSafeCall(cudaMemcpy(ranges_y_d, ranges_y, sizeof(__INDEX__)*2*nredranges, cudaMemcpyHostToDevice)); // Support for broadcasting over batch dimensions ============================================= // We create a lookup table, "offsets", of shape (nblock, SIZEVARS): int *offsets_d = NULL; if (nbatchdims > 0) { offsets_d = build_offset_tables<FUN>( nbatchdims, shapes, nblocks, lookup_h ); } // ============================================================================================ dim3 gridSize; gridSize.x = nblocks; //nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); #if USE_FINAL_CHUNKS==1 GpuConv1DOnDevice_ranges<USE_CHUNK_MODE,BLOCKSIZE_CHUNKS,FUN,VARFINAL>::Eval(gridSize, blockSize, blockSize.x * DIMY_SHARED * sizeof(TYPE), FUN_INTERNAL(), nx, ny, nbatchdims,shapes, offsets_d, lookup_d,slices_x_d,ranges_y_d, out_d, args_d); #else GpuConv1DOnDevice_ranges<USE_CHUNK_MODE,BLOCKSIZE_CHUNKS>::Eval(gridSize, blockSize, blockSize.x * DIMY_SHARED * sizeof(TYPE), fun, nx, ny, nbatchdims,shapes, offsets_d, lookup_d,slices_x_d,ranges_y_d, out_d, args_d); #endif // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); // Send data from device to host. CudaSafeCall(cudaMemcpy(out, out_d, sizeof(TYPE)*(nx*DIMOUT),cudaMemcpyDeviceToHost)); // Free memory. CudaSafeCall(cudaFree(p_data)); // Free the block lookup table : delete [] lookup_h; CudaSafeCall(cudaFree(lookup_d)); CudaSafeCall(cudaFree(slices_x_d)); CudaSafeCall(cudaFree(ranges_y_d)); if (nbatchdims > 0) { CudaSafeCall(cudaFree(offsets_d)); } return 0; } // and use getlist to enroll them into "pointers arrays" px and py. template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, int nredranges_x, int nredranges_y, __INDEX__ **ranges, int device_id, TYPE *out, Args... args) { if(device_id!=-1) CudaSafeCall(cudaSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,nbatchdims,shapes,nranges_x,nranges_y,nredranges_x,nredranges_y,ranges,out, pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, int nredranges_x, int nredranges_y, __INDEX__ **ranges, TYPE *out, Args... args) { return Eval(fun, nx, ny, nbatchdims, shapes, nranges_x, nranges_y, nredranges_x, nredranges_y, ranges, -1, out, args...); } // Idem, but with args given as an array of arrays, instead of an explicit list of arrays template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, int nredranges_x, int nredranges_y, __INDEX__ **ranges, TYPE* out, TYPE** pargs, int device_id=-1) { // We set the GPU device on which computations will be performed if(device_id!=-1) CudaSafeCall(cudaSetDevice(device_id)); return Eval_(fun,nx,ny,nbatchdims,shapes,nranges_x,nranges_y,nredranges_x,nredranges_y,ranges,out,pargs); } }; struct GpuConv1D_ranges_FromDevice { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, __INDEX__ **ranges, TYPE *out, TYPE** args) { static const int NMINARGS = FUN::NMINARGS; // device array of pointers to device data TYPE **args_d; // single cudaMalloc CudaSafeCall(cudaMalloc(&args_d, sizeof(TYPE*)*NMINARGS)); CudaSafeCall(cudaMemcpy(args_d, args, NMINARGS * sizeof(TYPE *), cudaMemcpyHostToDevice)); // Compute on device : grid and block are both 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; #if USE_FINAL_CHUNKS==1 static const int USE_CHUNK_MODE = 2; using FUN_INTERNAL = Sum_Reduction<typename FUN::F::ARG1,FUN::tagI>; using VARFINAL = typename FUN::F::ARG2; #else static const int USE_CHUNK_MODE = ENABLECHUNK && ( FUN::F::template CHUNKED_FORMULAS<DIMCHUNK>::SIZE == 1 ); #endif static const int DIMY_SHARED = Get_DIMY_SHARED<FUN,USE_CHUNK_MODE>::Value; static const int BLOCKSIZE_CHUNKS = ::std::min(CUDA_BLOCK_SIZE, ::std::min(1024, (int) (49152 / ::std::max(1, (int) ( DIMY_SHARED * sizeof(TYPE)))))); int blocksize_nochunks = ::std::min(CUDA_BLOCK_SIZE, ::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int) ( DIMY_SHARED * sizeof(TYPE)))))); blockSize.x = USE_CHUNK_MODE ? BLOCKSIZE_CHUNKS : blocksize_nochunks; // Ranges pre-processing... ================================================================== // N.B.: In the following code, we assume that the x-ranges do not overlap. // Otherwise, we'd have to assume that DIMRED == DIMOUT // or allocate a buffer of size nx * DIMRED. This may be done in the future. // Cf. reduction.h: // FUN::tagJ = 1 for a reduction over j, result indexed by i // FUN::tagJ = 0 for a reduction over i, result indexed by j int nranges = FUN::tagJ ? nranges_x : nranges_y ; __INDEX__ *ranges_x = FUN::tagJ ? ranges[0] : ranges[3] ; __INDEX__ *slices_x = FUN::tagJ ? ranges[1] : ranges[4] ; __INDEX__ *ranges_y = FUN::tagJ ? ranges[2] : ranges[5] ; __INDEX__ *ranges_x_h = NULL, *slices_x_d = NULL, *ranges_y_d = NULL; // The code below needs a pointer to ranges_x on *host* memory, ------------------- // as well as pointers to slices_x and ranges_y on *device* memory. // -> Depending on the "ranges" location, we'll copy ranges_x *or* slices_x and ranges_y // to the appropriate memory: bool ranges_on_device = (nbatchdims==0); // N.B.: We only support Host ranges with Device data when these ranges were created // to emulate block-sparse reductions. if ( ranges_on_device ) { // The ranges are on the device ranges_x_h = new __INDEX__[2*nranges] ; // Send data from device to host. CudaSafeCall(cudaMemcpy(ranges_x_h, ranges_x, sizeof(__INDEX__)*2*nranges, cudaMemcpyDeviceToHost)); slices_x_d = slices_x; ranges_y_d = ranges_y; } else { // The ranges are on host memory; this is typically what happens with **batch processing**, // with ranges generated by keops_io.h: ranges_x_h = ranges_x; // Copy "slices_x" to the device: CudaSafeCall(cudaMalloc((__INDEX__**)&slices_x_d, sizeof(__INDEX__)*nranges)); CudaSafeCall(cudaMemcpy(slices_x_d, slices_x, sizeof(__INDEX__)*nranges, cudaMemcpyHostToDevice)); // Copy "redranges_y" to the device: with batch processing, we KNOW that they have the same shape as ranges_x CudaSafeCall(cudaMalloc((__INDEX__**)&ranges_y_d, sizeof(__INDEX__)*2*nranges)); CudaSafeCall(cudaMemcpy(ranges_y_d, ranges_y, sizeof(__INDEX__)*2*nranges, cudaMemcpyHostToDevice)); } // Computes the number of blocks needed --------------------------------------------- int nblocks = 0, len_range = 0; for(int i=0; i<nranges ; i++){ len_range = ranges_x_h[2*i+1] - ranges_x_h[2*i] ; nblocks += (len_range/blockSize.x) + (len_range%blockSize.x==0 ? 0 : 1) ; } // Create a lookup table for the blocks -------------------------------------------- __INDEX__ *lookup_h = NULL; lookup_h = new __INDEX__[3*nblocks] ; int index = 0; for(int i=0; i<nranges ; i++){ len_range = ranges_x_h[2*i+1] - ranges_x_h[2*i] ; for(int j=0; j<len_range ; j+=blockSize.x) { lookup_h[3*index] = i; lookup_h[3*index+1] = ranges_x_h[2*i] + j; lookup_h[3*index+2] = ranges_x_h[2*i] + j + ::std::min((int) blockSize.x, len_range-j ) ; index++; } } // Load the table on the device ----------------------------------------------------- __INDEX__ *lookup_d = NULL; CudaSafeCall(cudaMalloc((__INDEX__**)&lookup_d, sizeof(__INDEX__)*3*nblocks)); CudaSafeCall(cudaMemcpy(lookup_d, lookup_h, sizeof(__INDEX__)*3*nblocks, cudaMemcpyHostToDevice)); // Support for broadcasting over batch dimensions ============================================= // We create a lookup table, "offsets", of shape (nblock, SIZEVARS): int *offsets_d = NULL; if (nbatchdims > 0) { offsets_d = build_offset_tables<FUN>( nbatchdims, shapes, nblocks, lookup_h ); } // ============================================================================================ dim3 gridSize; gridSize.x = nblocks ; //nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); #if USE_FINAL_CHUNKS==1 GpuConv1DOnDevice_ranges<USE_CHUNK_MODE,BLOCKSIZE_CHUNKS,FUN,VARFINAL>::Eval(gridSize, blockSize, blockSize.x * DIMY_SHARED * sizeof(TYPE), FUN_INTERNAL(), nx, ny, nbatchdims,shapes, offsets_d, lookup_d,slices_x_d,ranges_y_d, out, args_d); #else GpuConv1DOnDevice_ranges<USE_CHUNK_MODE,BLOCKSIZE_CHUNKS>::Eval(gridSize, blockSize, blockSize.x * DIMY_SHARED * sizeof(TYPE), fun, nx, ny, nbatchdims,shapes, offsets_d, lookup_d,slices_x_d,ranges_y_d, out, args_d); #endif // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); CudaSafeCall(cudaFree(args_d)); // Free the block lookup table : delete [] lookup_h; CudaSafeCall(cudaFree(lookup_d)); // Free the host or device "ranges" copies: if (ranges_on_device) { delete [] ranges_x_h; } else { CudaSafeCall(cudaFree(slices_x_d)); CudaSafeCall(cudaFree(ranges_y_d)); } if (nbatchdims > 0) { CudaSafeCall(cudaFree(offsets_d)); } return 0; } // Same wrappers, but for data located on the device template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, __INDEX__ **ranges, int device_id, TYPE* out, Args... args) { // device_id is provided, so we set the GPU device accordingly // Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,nbatchdims,shapes,nranges_x,nranges_y,ranges,out, pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, __INDEX__ **ranges, TYPE* out, Args... args) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of out which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (first function above) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes,out)); return Eval(fun, nx, ny, nbatchdims, shapes, nranges_x,nranges_y,ranges, attributes.device, out, args...); } template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, int nbatchdims, int *shapes, int nranges_x, int nranges_y, __INDEX__ **ranges, TYPE* out, TYPE** args, int device_id=-1) { if(device_id==-1) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (else statement below) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes,out)); CudaSafeCall(cudaSetDevice(attributes.device)); } else // device_id is provided, so we use it. Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); return Eval_(fun,nx,ny,nbatchdims,shapes,nranges_x,nranges_y,ranges,out,args); } }; }
the_stack